Repo created
This commit is contained in:
parent
81b91f4139
commit
f8c34fa5ee
22732 changed files with 4815320 additions and 2 deletions
142
TMessagesProj/jni/voip/tgcalls/AudioDeviceHelper.cpp
Normal file
142
TMessagesProj/jni/voip/tgcalls/AudioDeviceHelper.cpp
Normal file
|
|
@ -0,0 +1,142 @@
|
|||
#include "AudioDeviceHelper.h"
|
||||
|
||||
#include "modules/audio_device/include/audio_device.h"
|
||||
#include "rtc_base/logging.h"
|
||||
|
||||
namespace tgcalls {
|
||||
namespace {
|
||||
|
||||
bool SkipDefaultDevice(const char *name) {
|
||||
const auto utfName = std::string(name);
|
||||
#ifdef WEBRTC_WIN
|
||||
return (utfName.rfind("Default - ", 0) == 0)
|
||||
|| (utfName.rfind("Communication - ", 0) == 0);
|
||||
#elif defined WEBRTC_MAC
|
||||
return (utfName.rfind("default (", 0) == 0)
|
||||
&& (utfName.find(")", utfName.size() - 1) == utfName.size() - 1);
|
||||
#else
|
||||
return false;
|
||||
#endif // WEBRTC_WIN || WEBRTC_MAC
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
void SetAudioInputDeviceById(webrtc::AudioDeviceModule *adm, const std::string &id) {
|
||||
const auto recording = adm->Recording() || adm->RecordingIsInitialized();
|
||||
if (recording) {
|
||||
adm->StopRecording();
|
||||
}
|
||||
auto specific = false;
|
||||
const auto finish = [&] {
|
||||
if (!specific) {
|
||||
if (const auto result = adm->SetRecordingDevice(webrtc::AudioDeviceModule::kDefaultCommunicationDevice)) {
|
||||
RTC_LOG(LS_ERROR) << "setAudioInputDevice(" << id << "): SetRecordingDevice(kDefaultCommunicationDevice) failed: " << result << ".";
|
||||
} else {
|
||||
RTC_LOG(LS_INFO) << "setAudioInputDevice(" << id << "): SetRecordingDevice(kDefaultCommunicationDevice) success.";
|
||||
}
|
||||
}
|
||||
if (recording && adm->InitRecording() == 0) {
|
||||
adm->StartRecording();
|
||||
}
|
||||
};
|
||||
if (id == "default" || id.empty()) {
|
||||
return finish();
|
||||
}
|
||||
#ifdef TGCALLS_UWP_DESKTOP
|
||||
const auto result = adm->SetRecordingDevice(id);
|
||||
if (result != 0) {
|
||||
RTC_LOG(LS_ERROR) << "setAudioInputDevice(" << id << ") failed: " << result << ".";
|
||||
} else {
|
||||
RTC_LOG(LS_INFO) << "setAudioInputDevice(" << id << ") success.";
|
||||
specific = true;
|
||||
}
|
||||
return finish();
|
||||
#else // TGCALLS_UWP_DESKTOP
|
||||
const auto count = adm
|
||||
? adm->RecordingDevices()
|
||||
: int16_t(-666);
|
||||
if (count <= 0) {
|
||||
RTC_LOG(LS_ERROR) << "setAudioInputDevice(" << id << "): Could not get recording devices count: " << count << ".";
|
||||
return finish();
|
||||
}
|
||||
|
||||
int16_t order = !id.empty() && id[0] == '#' ? static_cast<int16_t>(std::stoi(id.substr(1))) : -1;
|
||||
for (auto i = 0; i != count; ++i) {
|
||||
char name[webrtc::kAdmMaxDeviceNameSize + 1] = { 0 };
|
||||
char guid[webrtc::kAdmMaxGuidSize + 1] = { 0 };
|
||||
adm->RecordingDeviceName(i, name, guid);
|
||||
if ((!SkipDefaultDevice(name) && id == guid) || order == i) {
|
||||
const auto result = adm->SetRecordingDevice(i);
|
||||
if (result != 0) {
|
||||
RTC_LOG(LS_ERROR) << "setAudioInputDevice(" << id << ") name '" << std::string(name) << "' failed: " << result << ".";
|
||||
} else {
|
||||
RTC_LOG(LS_INFO) << "setAudioInputDevice(" << id << ") name '" << std::string(name) << "' success.";
|
||||
specific = true;
|
||||
}
|
||||
return finish();
|
||||
}
|
||||
}
|
||||
RTC_LOG(LS_ERROR) << "setAudioInputDevice(" << id << "): Could not find recording device.";
|
||||
return finish();
|
||||
#endif // TGCALLS_UWP_DESKTOP
|
||||
}
|
||||
|
||||
void SetAudioOutputDeviceById(webrtc::AudioDeviceModule *adm, const std::string &id) {
|
||||
if (adm->Playing()) {
|
||||
adm->StopPlayout();
|
||||
}
|
||||
auto specific = false;
|
||||
const auto finish = [&] {
|
||||
if (!specific) {
|
||||
if (const auto result = adm->SetPlayoutDevice(webrtc::AudioDeviceModule::kDefaultCommunicationDevice)) {
|
||||
RTC_LOG(LS_ERROR) << "setAudioOutputDevice(" << id << "): SetPlayoutDevice(kDefaultCommunicationDevice) failed: " << result << ".";
|
||||
} else {
|
||||
RTC_LOG(LS_INFO) << "setAudioOutputDevice(" << id << "): SetPlayoutDevice(kDefaultCommunicationDevice) success.";
|
||||
}
|
||||
}
|
||||
if (adm->InitPlayout() == 0) {
|
||||
adm->StartPlayout();
|
||||
}
|
||||
};
|
||||
if (id == "default" || id.empty()) {
|
||||
return finish();
|
||||
}
|
||||
#ifdef TGCALLS_UWP_DESKTOP
|
||||
const auto result = adm->SetPlayoutDevice(id);
|
||||
if (result != 0) {
|
||||
RTC_LOG(LS_ERROR) << "setAudioOutputDevice(" << id << ") failed: " << result << ".";
|
||||
} else {
|
||||
RTC_LOG(LS_INFO) << "setAudioOutputDevice(" << id << ") success.";
|
||||
specific = true;
|
||||
}
|
||||
return finish();
|
||||
#else // TGCALLS_UWP_DESKTOP
|
||||
const auto count = adm
|
||||
? adm->PlayoutDevices()
|
||||
: int16_t(-666);
|
||||
if (count <= 0) {
|
||||
RTC_LOG(LS_ERROR) << "setAudioOutputDevice(" << id << "): Could not get playout devices count: " << count << ".";
|
||||
return finish();
|
||||
}
|
||||
int16_t order = !id.empty() && id[0] == '#' ? static_cast<int16_t>(std::stoi(id.substr(1))) : -1;
|
||||
for (auto i = 0; i != count; ++i) {
|
||||
char name[webrtc::kAdmMaxDeviceNameSize + 1] = { 0 };
|
||||
char guid[webrtc::kAdmMaxGuidSize + 1] = { 0 };
|
||||
adm->PlayoutDeviceName(i, name, guid);
|
||||
if ((!SkipDefaultDevice(name) && id == guid) || order == i) {
|
||||
const auto result = adm->SetPlayoutDevice(i);
|
||||
if (result != 0) {
|
||||
RTC_LOG(LS_ERROR) << "setAudioOutputDevice(" << id << ") name '" << std::string(name) << "' failed: " << result << ".";
|
||||
} else {
|
||||
RTC_LOG(LS_INFO) << "setAudioOutputDevice(" << id << ") name '" << std::string(name) << "' success.";
|
||||
specific = true;
|
||||
}
|
||||
return finish();
|
||||
}
|
||||
}
|
||||
RTC_LOG(LS_ERROR) << "setAudioOutputDevice(" << id << "): Could not find playout device.";
|
||||
return finish();
|
||||
#endif // TGCALLS_UWP_DESKTOP
|
||||
}
|
||||
|
||||
} // namespace tgcalls
|
||||
17
TMessagesProj/jni/voip/tgcalls/AudioDeviceHelper.h
Normal file
17
TMessagesProj/jni/voip/tgcalls/AudioDeviceHelper.h
Normal file
|
|
@ -0,0 +1,17 @@
|
|||
#ifndef TGCALLS_AUDIO_DEVICE_HELPER_H
|
||||
#define TGCALLS_AUDIO_DEVICE_HELPER_H
|
||||
|
||||
#include <string>
|
||||
|
||||
namespace webrtc {
|
||||
class AudioDeviceModule;
|
||||
} // namespace webrtc
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
void SetAudioInputDeviceById(webrtc::AudioDeviceModule *adm, const std::string &id);
|
||||
void SetAudioOutputDeviceById(webrtc::AudioDeviceModule *adm, const std::string &id);
|
||||
|
||||
} // namespace tgcalls
|
||||
|
||||
#endif
|
||||
16
TMessagesProj/jni/voip/tgcalls/AudioFrame.h
Normal file
16
TMessagesProj/jni/voip/tgcalls/AudioFrame.h
Normal file
|
|
@ -0,0 +1,16 @@
|
|||
#pragma once
|
||||
|
||||
#include <cinttypes>
|
||||
#include <cstring>
|
||||
|
||||
namespace tgcalls {
|
||||
struct AudioFrame {
|
||||
const int16_t *audio_samples;
|
||||
size_t num_samples;
|
||||
size_t bytes_per_sample;
|
||||
size_t num_channels;
|
||||
uint32_t samples_per_sec;
|
||||
int64_t elapsed_time_ms;
|
||||
int64_t ntp_time_ms;
|
||||
};
|
||||
} // namespace tgcalls
|
||||
161
TMessagesProj/jni/voip/tgcalls/ChannelManager.cpp
Normal file
161
TMessagesProj/jni/voip/tgcalls/ChannelManager.cpp
Normal file
|
|
@ -0,0 +1,161 @@
|
|||
#include "ChannelManager.h"
|
||||
#include <utility>
|
||||
#include "absl/algorithm/container.h"
|
||||
#include "absl/memory/memory.h"
|
||||
#include "absl/strings/match.h"
|
||||
#include "api/media_types.h"
|
||||
#include "api/sequence_checker.h"
|
||||
#include "media/base/media_constants.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/trace_event.h"
|
||||
namespace tgcalls {
|
||||
// static
|
||||
std::unique_ptr<ChannelManager> ChannelManager::Create(
|
||||
std::unique_ptr<cricket::MediaEngineInterface> media_engine,
|
||||
rtc::Thread* worker_thread,
|
||||
rtc::Thread* network_thread) {
|
||||
RTC_DCHECK(network_thread);
|
||||
RTC_DCHECK(worker_thread);
|
||||
return absl::WrapUnique(new ChannelManager(
|
||||
std::move(media_engine), worker_thread, network_thread));
|
||||
}
|
||||
ChannelManager::ChannelManager(
|
||||
std::unique_ptr<cricket::MediaEngineInterface> media_engine,
|
||||
rtc::Thread* worker_thread,
|
||||
rtc::Thread* network_thread)
|
||||
: media_engine_(std::move(media_engine)),
|
||||
signaling_thread_(rtc::Thread::Current()),
|
||||
worker_thread_(worker_thread),
|
||||
network_thread_(network_thread) {
|
||||
RTC_DCHECK_RUN_ON(signaling_thread_);
|
||||
RTC_DCHECK(worker_thread_);
|
||||
RTC_DCHECK(network_thread_);
|
||||
if (media_engine_) {
|
||||
// TODO(tommi): Change VoiceEngine to do ctor time initialization so that
|
||||
// this isn't necessary.
|
||||
worker_thread_->BlockingCall([&] { media_engine_->Init(); });
|
||||
}
|
||||
}
|
||||
ChannelManager::~ChannelManager() {
|
||||
RTC_DCHECK_RUN_ON(signaling_thread_);
|
||||
worker_thread_->BlockingCall([&] {
|
||||
RTC_DCHECK_RUN_ON(worker_thread_);
|
||||
RTC_DCHECK(voice_channels_.empty());
|
||||
RTC_DCHECK(video_channels_.empty());
|
||||
// While `media_engine_` is const throughout the ChannelManager's lifetime,
|
||||
// it requires destruction to happen on the worker thread. Instead of
|
||||
// marking the pointer as non-const, we live with this const_cast<> in the
|
||||
// destructor.
|
||||
const_cast<std::unique_ptr<cricket::MediaEngineInterface>&>(media_engine_).reset();
|
||||
});
|
||||
}
|
||||
cricket::VoiceChannel* ChannelManager::CreateVoiceChannel(
|
||||
webrtc::Call* call,
|
||||
const cricket::MediaConfig& media_config,
|
||||
const std::string& mid,
|
||||
bool srtp_required,
|
||||
const webrtc::CryptoOptions& crypto_options,
|
||||
const cricket::AudioOptions& options) {
|
||||
RTC_DCHECK(call);
|
||||
RTC_DCHECK(media_engine_);
|
||||
// TODO(bugs.webrtc.org/11992): Remove this workaround after updates in
|
||||
// PeerConnection and add the expectation that we're already on the right
|
||||
// thread.
|
||||
if (!worker_thread_->IsCurrent()) {
|
||||
cricket::VoiceChannel* temp = nullptr;
|
||||
worker_thread_->BlockingCall([&] {
|
||||
temp = CreateVoiceChannel(call, media_config, mid, srtp_required,
|
||||
crypto_options, options);
|
||||
});
|
||||
return temp;
|
||||
}
|
||||
RTC_DCHECK_RUN_ON(worker_thread_);
|
||||
std::unique_ptr<cricket::VoiceMediaSendChannelInterface> send_media_channel = media_engine_->voice().CreateSendChannel(
|
||||
call, media_config, options, crypto_options, webrtc::AudioCodecPairId::Create());
|
||||
if (!send_media_channel) {
|
||||
return nullptr;
|
||||
}
|
||||
std::unique_ptr<cricket::VoiceMediaReceiveChannelInterface> receive_media_channel = media_engine_->voice().CreateReceiveChannel(
|
||||
call, media_config, options, crypto_options, webrtc::AudioCodecPairId::Create());
|
||||
if (!receive_media_channel) {
|
||||
return nullptr;
|
||||
}
|
||||
auto voice_channel = std::make_unique<cricket::VoiceChannel>(
|
||||
worker_thread_, network_thread_, signaling_thread_,
|
||||
std::move(send_media_channel), std::move(receive_media_channel), mid, srtp_required, crypto_options,
|
||||
&ssrc_generator_);
|
||||
cricket::VoiceChannel* voice_channel_ptr = voice_channel.get();
|
||||
voice_channels_.push_back(std::move(voice_channel));
|
||||
return voice_channel_ptr;
|
||||
}
|
||||
void ChannelManager::DestroyVoiceChannel(cricket::VoiceChannel* channel) {
|
||||
TRACE_EVENT0("webrtc", "ChannelManager::DestroyVoiceChannel");
|
||||
RTC_DCHECK_RUN_ON(worker_thread_);
|
||||
voice_channels_.erase(absl::c_find_if(
|
||||
voice_channels_, [&](const auto& p) { return p.get() == channel; }));
|
||||
}
|
||||
cricket::VideoChannel* ChannelManager::CreateVideoChannel(
|
||||
webrtc::Call* call,
|
||||
const cricket::MediaConfig& media_config,
|
||||
const std::string& mid,
|
||||
bool srtp_required,
|
||||
const webrtc::CryptoOptions& crypto_options,
|
||||
const cricket::VideoOptions& options,
|
||||
webrtc::VideoBitrateAllocatorFactory* video_bitrate_allocator_factory) {
|
||||
RTC_DCHECK(call);
|
||||
RTC_DCHECK(media_engine_);
|
||||
// TODO(bugs.webrtc.org/11992): Remove this workaround after updates in
|
||||
// PeerConnection and add the expectation that we're already on the right
|
||||
// thread.
|
||||
if (!worker_thread_->IsCurrent()) {
|
||||
cricket::VideoChannel* temp = nullptr;
|
||||
worker_thread_->BlockingCall([&] {
|
||||
temp = CreateVideoChannel(call, media_config, mid, srtp_required,
|
||||
crypto_options, options,
|
||||
video_bitrate_allocator_factory);
|
||||
});
|
||||
return temp;
|
||||
}
|
||||
RTC_DCHECK_RUN_ON(worker_thread_);
|
||||
std::unique_ptr<cricket::VideoMediaSendChannelInterface> send_media_channel = media_engine_->video().CreateSendChannel(
|
||||
call, media_config, options, crypto_options,
|
||||
video_bitrate_allocator_factory);
|
||||
if (!send_media_channel) {
|
||||
return nullptr;
|
||||
}
|
||||
std::unique_ptr<cricket::VideoMediaReceiveChannelInterface> receive_media_channel = media_engine_->video().CreateReceiveChannel(
|
||||
call, media_config, options, crypto_options);
|
||||
if (!receive_media_channel) {
|
||||
return nullptr;
|
||||
}
|
||||
auto video_channel = std::make_unique<cricket::VideoChannel>(
|
||||
worker_thread_, network_thread_, signaling_thread_,
|
||||
std::move(send_media_channel), std::move(receive_media_channel), mid, srtp_required, crypto_options,
|
||||
&ssrc_generator_);
|
||||
cricket::VideoChannel* video_channel_ptr = video_channel.get();
|
||||
video_channels_.push_back(std::move(video_channel));
|
||||
return video_channel_ptr;
|
||||
}
|
||||
void ChannelManager::DestroyVideoChannel(cricket::VideoChannel* channel) {
|
||||
TRACE_EVENT0("webrtc", "ChannelManager::DestroyVideoChannel");
|
||||
RTC_DCHECK_RUN_ON(worker_thread_);
|
||||
video_channels_.erase(absl::c_find_if(
|
||||
video_channels_, [&](const auto& p) { return p.get() == channel; }));
|
||||
}
|
||||
void ChannelManager::DestroyChannel(cricket::ChannelInterface* channel) {
|
||||
RTC_DCHECK(channel);
|
||||
if (!worker_thread_->IsCurrent()) {
|
||||
// TODO(tommi): Do this asynchronously when we have a way to make sure that
|
||||
// the call to DestroyChannel runs before ~Call() runs, which today happens
|
||||
// inside an Invoke from the signaling thread in PeerConnectin::Close().
|
||||
worker_thread_->BlockingCall([&] { DestroyChannel(channel); });
|
||||
return;
|
||||
}
|
||||
if (channel->media_type() == cricket::MEDIA_TYPE_AUDIO) {
|
||||
DestroyVoiceChannel(static_cast<cricket::VoiceChannel*>(channel));
|
||||
} else {
|
||||
RTC_DCHECK_EQ(channel->media_type(), cricket::MEDIA_TYPE_VIDEO);
|
||||
DestroyVideoChannel(static_cast<cricket::VideoChannel*>(channel));
|
||||
}
|
||||
}
|
||||
} // namespace tgcalls
|
||||
95
TMessagesProj/jni/voip/tgcalls/ChannelManager.h
Normal file
95
TMessagesProj/jni/voip/tgcalls/ChannelManager.h
Normal file
|
|
@ -0,0 +1,95 @@
|
|||
#ifndef TGCALLS_CHANNEL_MANAGER_H_
|
||||
#define TGCALLS_CHANNEL_MANAGER_H_
|
||||
#include <stdint.h>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include "api/audio_options.h"
|
||||
#include "api/crypto/crypto_options.h"
|
||||
#include "api/rtp_parameters.h"
|
||||
#include "api/video/video_bitrate_allocator_factory.h"
|
||||
#include "call/call.h"
|
||||
#include "media/base/codec.h"
|
||||
#include "media/base/media_channel.h"
|
||||
#include "media/base/media_config.h"
|
||||
#include "media/base/media_engine.h"
|
||||
#include "pc/channel.h"
|
||||
#include "pc/channel_interface.h"
|
||||
#include "pc/rtp_transport_internal.h"
|
||||
#include "pc/session_description.h"
|
||||
#include "rtc_base/system/file_wrapper.h"
|
||||
#include "rtc_base/thread.h"
|
||||
#include "rtc_base/thread_annotations.h"
|
||||
#include "rtc_base/unique_id_generator.h"
|
||||
namespace tgcalls {
|
||||
// ChannelManager allows the MediaEngine to run on a separate thread, and takes
|
||||
// care of marshalling calls between threads. It also creates and keeps track of
|
||||
// voice and video channels; by doing so, it can temporarily pause all the
|
||||
// channels when a new audio or video device is chosen. The voice and video
|
||||
// channels are stored in separate vectors, to easily allow operations on just
|
||||
// voice or just video channels.
|
||||
// ChannelManager also allows the application to discover what devices it has
|
||||
// using device manager.
|
||||
class ChannelManager {
|
||||
public:
|
||||
// Returns an initialized instance of ChannelManager.
|
||||
// If media_engine is non-nullptr, then the returned ChannelManager instance
|
||||
// will own that reference and media engine initialization
|
||||
static std::unique_ptr<ChannelManager> Create(
|
||||
std::unique_ptr<cricket::MediaEngineInterface> media_engine,
|
||||
rtc::Thread* worker_thread,
|
||||
rtc::Thread* network_thread);
|
||||
ChannelManager() = delete;
|
||||
~ChannelManager();
|
||||
rtc::Thread* worker_thread() const { return worker_thread_; }
|
||||
rtc::Thread* network_thread() const { return network_thread_; }
|
||||
cricket::MediaEngineInterface* media_engine() { return media_engine_.get(); }
|
||||
rtc::UniqueRandomIdGenerator& ssrc_generator() { return ssrc_generator_; }
|
||||
// The operations below all occur on the worker thread.
|
||||
// ChannelManager retains ownership of the created channels, so clients should
|
||||
// call the appropriate Destroy*Channel method when done.
|
||||
// Creates a voice channel, to be associated with the specified session.
|
||||
cricket::VoiceChannel* CreateVoiceChannel(webrtc::Call* call,
|
||||
const cricket::MediaConfig& media_config,
|
||||
const std::string& mid,
|
||||
bool srtp_required,
|
||||
const webrtc::CryptoOptions& crypto_options,
|
||||
const cricket::AudioOptions& options);
|
||||
// Creates a video channel, synced with the specified voice channel, and
|
||||
// associated with the specified session.
|
||||
// Version of the above that takes PacketTransportInternal.
|
||||
cricket::VideoChannel* CreateVideoChannel(
|
||||
webrtc::Call* call,
|
||||
const cricket::MediaConfig& media_config,
|
||||
const std::string& mid,
|
||||
bool srtp_required,
|
||||
const webrtc::CryptoOptions& crypto_options,
|
||||
const cricket::VideoOptions& options,
|
||||
webrtc::VideoBitrateAllocatorFactory* video_bitrate_allocator_factory);
|
||||
void DestroyChannel(cricket::ChannelInterface* channel);
|
||||
protected:
|
||||
ChannelManager(std::unique_ptr<cricket::MediaEngineInterface> media_engine,
|
||||
rtc::Thread* worker_thread,
|
||||
rtc::Thread* network_thread);
|
||||
// Destroys a voice channel created by CreateVoiceChannel.
|
||||
void DestroyVoiceChannel(cricket::VoiceChannel* voice_channel);
|
||||
// Destroys a video channel created by CreateVideoChannel.
|
||||
void DestroyVideoChannel(cricket::VideoChannel* video_channel);
|
||||
private:
|
||||
const std::unique_ptr<cricket::MediaEngineInterface> media_engine_; // Nullable.
|
||||
rtc::Thread* const signaling_thread_;
|
||||
rtc::Thread* const worker_thread_;
|
||||
rtc::Thread* const network_thread_;
|
||||
// This object should be used to generate any SSRC that is not explicitly
|
||||
// specified by the user (or by the remote party).
|
||||
// TODO(bugs.webrtc.org/12666): This variable is used from both the signaling
|
||||
// and worker threads. See if we can't restrict usage to a single thread.
|
||||
rtc::UniqueRandomIdGenerator ssrc_generator_;
|
||||
// Vector contents are non-null.
|
||||
std::vector<std::unique_ptr<cricket::VoiceChannel>> voice_channels_
|
||||
RTC_GUARDED_BY(worker_thread_);
|
||||
std::vector<std::unique_ptr<cricket::VideoChannel>> video_channels_
|
||||
RTC_GUARDED_BY(worker_thread_);
|
||||
};
|
||||
} // namespace tgcalls
|
||||
#endif // TGCALLS_CHANNEL_MANAGER_H_
|
||||
304
TMessagesProj/jni/voip/tgcalls/CodecSelectHelper.cpp
Normal file
304
TMessagesProj/jni/voip/tgcalls/CodecSelectHelper.cpp
Normal file
|
|
@ -0,0 +1,304 @@
|
|||
#include "CodecSelectHelper.h"
|
||||
|
||||
#include "platform/PlatformInterface.h"
|
||||
|
||||
#include "media/base/media_constants.h"
|
||||
#include "media/base/codec.h"
|
||||
#include "absl/strings/match.h"
|
||||
#include "rtc_base/logging.h"
|
||||
#include "system_wrappers/include/field_trial.h"
|
||||
|
||||
namespace tgcalls {
|
||||
namespace {
|
||||
|
||||
using VideoFormat = webrtc::SdpVideoFormat;
|
||||
|
||||
bool CompareFormats(const VideoFormat &a, const VideoFormat &b) {
|
||||
if (a.name < b.name) {
|
||||
return true;
|
||||
} else if (b.name < a.name) {
|
||||
return false;
|
||||
} else {
|
||||
return a.parameters < b.parameters;
|
||||
}
|
||||
}
|
||||
|
||||
int FormatPriority(const VideoFormat &format, const std::vector<std::string> &preferredCodecs, std::shared_ptr<PlatformContext> platformContext) {
|
||||
static const auto kCodecs = {
|
||||
std::string(cricket::kAv1CodecName),
|
||||
#ifndef WEBRTC_DISABLE_H265
|
||||
std::string(cricket::kH265CodecName),
|
||||
#endif
|
||||
std::string(cricket::kH264CodecName),
|
||||
std::string(cricket::kVp8CodecName),
|
||||
std::string(cricket::kVp9CodecName),
|
||||
};
|
||||
static const auto kSupported = [platformContext] {
|
||||
const auto platform = PlatformInterface::SharedInstance();
|
||||
|
||||
auto result = std::vector<std::string>();
|
||||
result.reserve(kCodecs.size());
|
||||
for (const auto &codec : kCodecs) {
|
||||
if (platform->supportsEncoding(codec, platformContext)) {
|
||||
result.push_back(codec);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}();
|
||||
|
||||
for (int i = 0; i < preferredCodecs.size(); i++) {
|
||||
for (const auto &name : kSupported) {
|
||||
if (absl::EqualsIgnoreCase(format.name, preferredCodecs[i]) && absl::EqualsIgnoreCase(format.name, name)) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
auto result = (int)preferredCodecs.size();
|
||||
for (const auto &name : kSupported) {
|
||||
if (absl::EqualsIgnoreCase(format.name, name)) {
|
||||
return result;
|
||||
}
|
||||
++result;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
bool ComparePriorities(const VideoFormat &a, const VideoFormat &b, const std::vector<std::string> &preferredCodecs, std::shared_ptr<PlatformContext> platformContext) {
|
||||
return FormatPriority(a, preferredCodecs, platformContext) < FormatPriority(b, preferredCodecs, platformContext);
|
||||
}
|
||||
|
||||
std::vector<VideoFormat> FilterAndSortEncoders(std::vector<VideoFormat> list, const std::vector<std::string> &preferredCodecs, std::shared_ptr<PlatformContext> platformContext) {
|
||||
const auto listBegin = begin(list);
|
||||
const auto listEnd = end(list);
|
||||
std::sort(listBegin, listEnd, [&preferredCodecs, platformContext](const VideoFormat &lhs, const VideoFormat &rhs) {
|
||||
return ComparePriorities(lhs, rhs, preferredCodecs, platformContext);
|
||||
});
|
||||
auto eraseFrom = listBegin;
|
||||
auto eraseTill = eraseFrom;
|
||||
while (eraseTill != listEnd && FormatPriority(*eraseTill, preferredCodecs, platformContext) == -1) {
|
||||
++eraseTill;
|
||||
}
|
||||
if (eraseTill != eraseFrom) {
|
||||
list.erase(eraseFrom, eraseTill);
|
||||
}
|
||||
return list;
|
||||
}
|
||||
|
||||
std::vector<VideoFormat> AppendUnique(
|
||||
std::vector<VideoFormat> list,
|
||||
std::vector<VideoFormat> other) {
|
||||
if (list.empty()) {
|
||||
return other;
|
||||
}
|
||||
list.reserve(list.size() + other.size());
|
||||
const auto oldBegin = &list[0];
|
||||
const auto oldEnd = oldBegin + list.size();
|
||||
for (auto &format : other) {
|
||||
if (std::find(oldBegin, oldEnd, format) == oldEnd) {
|
||||
list.push_back(std::move(format));
|
||||
}
|
||||
}
|
||||
return list;
|
||||
}
|
||||
|
||||
std::vector<VideoFormat>::const_iterator FindEqualFormat(
|
||||
const std::vector<VideoFormat> &list,
|
||||
const VideoFormat &format) {
|
||||
return std::find_if(list.begin(), list.end(), [&](const VideoFormat &other) {
|
||||
return format.IsSameCodec(other);
|
||||
});
|
||||
}
|
||||
|
||||
void AddDefaultFeedbackParams(cricket::VideoCodec *codec) {
|
||||
// Don't add any feedback params for RED and ULPFEC.
|
||||
if (codec->name == cricket::kRedCodecName || codec->name == cricket::kUlpfecCodecName)
|
||||
return;
|
||||
codec->AddFeedbackParam(cricket::FeedbackParam(cricket::kRtcpFbParamRemb, cricket::kParamValueEmpty));
|
||||
codec->AddFeedbackParam(
|
||||
cricket::FeedbackParam(cricket::kRtcpFbParamTransportCc, cricket::kParamValueEmpty));
|
||||
// Don't add any more feedback params for FLEXFEC.
|
||||
if (codec->name == cricket::kFlexfecCodecName)
|
||||
return;
|
||||
codec->AddFeedbackParam(cricket::FeedbackParam(cricket::kRtcpFbParamCcm, cricket::kRtcpFbCcmParamFir));
|
||||
codec->AddFeedbackParam(cricket::FeedbackParam(cricket::kRtcpFbParamNack, cricket::kParamValueEmpty));
|
||||
codec->AddFeedbackParam(cricket::FeedbackParam(cricket::kRtcpFbParamNack, cricket::kRtcpFbNackParamPli));
|
||||
if (codec->name == cricket::kVp8CodecName &&
|
||||
webrtc::field_trial::IsEnabled("WebRTC-RtcpLossNotification")) {
|
||||
codec->AddFeedbackParam(cricket::FeedbackParam(cricket::kRtcpFbParamLntf, cricket::kParamValueEmpty));
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<VideoFormat> RemoveScalabilityModes(
|
||||
std::vector<VideoFormat> list) {
|
||||
auto changed = false;
|
||||
for (auto &entry : list) {
|
||||
if (!entry.scalability_modes.empty()) {
|
||||
entry.scalability_modes = {};
|
||||
changed = true;
|
||||
}
|
||||
}
|
||||
if (changed && list.size() > 1) {
|
||||
for (auto i = list.end() - 1; i != list.begin(); --i) {
|
||||
if (std::find(list.begin(), i, *i) != i) {
|
||||
i = list.erase(i);
|
||||
}
|
||||
}
|
||||
}
|
||||
return list;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
VideoFormatsMessage ComposeSupportedFormats(
|
||||
std::vector<VideoFormat> encoders,
|
||||
std::vector<VideoFormat> decoders,
|
||||
const std::vector<std::string> &preferredCodecs,
|
||||
std::shared_ptr<PlatformContext> platformContext) {
|
||||
// We don't pass scalability_modes through signaling,
|
||||
// So we have to remove them here, otherwise lists are different.
|
||||
encoders = RemoveScalabilityModes(std::move(encoders));
|
||||
decoders = RemoveScalabilityModes(std::move(decoders));
|
||||
|
||||
encoders = FilterAndSortEncoders(std::move(encoders), preferredCodecs, platformContext);
|
||||
|
||||
auto result = VideoFormatsMessage();
|
||||
result.encodersCount = (int)encoders.size();
|
||||
result.formats = AppendUnique(std::move(encoders), std::move(decoders));
|
||||
for (const auto &format : result.formats) {
|
||||
RTC_LOG(LS_INFO) << "Format: " << format.ToString();
|
||||
}
|
||||
RTC_LOG(LS_INFO) << "First " << result.encodersCount << " formats are supported encoders.";
|
||||
return result;
|
||||
}
|
||||
|
||||
CommonFormats ComputeCommonFormats(
|
||||
const VideoFormatsMessage &my,
|
||||
VideoFormatsMessage their) {
|
||||
assert(my.encodersCount <= my.formats.size());
|
||||
assert(their.encodersCount <= their.formats.size());
|
||||
|
||||
for (const auto &format : their.formats) {
|
||||
RTC_LOG(LS_INFO) << "Their format: " << format.ToString();
|
||||
}
|
||||
RTC_LOG(LS_INFO) << "Their first " << their.encodersCount << " formats are supported encoders.";
|
||||
|
||||
const auto myEncodersBegin = begin(my.formats);
|
||||
const auto myEncodersEnd = myEncodersBegin + my.encodersCount;
|
||||
const auto theirEncodersBegin = begin(their.formats);
|
||||
const auto theirEncodersEnd = theirEncodersBegin + their.encodersCount;
|
||||
|
||||
auto result = CommonFormats();
|
||||
const auto addUnique = [&](const VideoFormat &format) {
|
||||
const auto already = std::find(
|
||||
result.list.begin(),
|
||||
result.list.end(),
|
||||
format);
|
||||
if (already == result.list.end()) {
|
||||
result.list.push_back(format);
|
||||
}
|
||||
};
|
||||
const auto addCommonAndFindFirst = [&](
|
||||
std::vector<VideoFormat>::const_iterator begin,
|
||||
std::vector<VideoFormat>::const_iterator end,
|
||||
const std::vector<VideoFormat> &decoders) {
|
||||
auto first = VideoFormat(std::string());
|
||||
for (auto i = begin; i != end; ++i) {
|
||||
const auto &format = *i;
|
||||
const auto j = FindEqualFormat(decoders, format);
|
||||
if (j != decoders.end()) {
|
||||
if (first.name.empty()) {
|
||||
first = format;
|
||||
}
|
||||
addUnique(format);
|
||||
addUnique(*j);
|
||||
};
|
||||
}
|
||||
return first;
|
||||
};
|
||||
|
||||
result.list.reserve(my.formats.size() + their.formats.size());
|
||||
auto myEncoderFormat = addCommonAndFindFirst(
|
||||
myEncodersBegin,
|
||||
myEncodersEnd,
|
||||
their.formats);
|
||||
auto theirEncoderFormat = addCommonAndFindFirst(
|
||||
theirEncodersBegin,
|
||||
theirEncodersEnd,
|
||||
my.formats);
|
||||
std::sort(begin(result.list), end(result.list), CompareFormats);
|
||||
if (!myEncoderFormat.name.empty()) {
|
||||
const auto i = std::find(begin(result.list), end(result.list), myEncoderFormat);
|
||||
assert(i != end(result.list));
|
||||
result.myEncoderIndex = (i - begin(result.list));
|
||||
}
|
||||
|
||||
for (const auto &format : result.list) {
|
||||
RTC_LOG(LS_INFO) << "Common format: " << format.ToString();
|
||||
}
|
||||
RTC_LOG(LS_INFO) << "My encoder: " << (result.myEncoderIndex >= 0 ? result.list[result.myEncoderIndex].ToString() : "(null)");
|
||||
RTC_LOG(LS_INFO) << "Their encoder: " << (!theirEncoderFormat.name.empty() ? theirEncoderFormat.ToString() : "(null)");
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
CommonCodecs AssignPayloadTypesAndDefaultCodecs(CommonFormats &&formats) {
|
||||
if (formats.list.empty()) {
|
||||
return CommonCodecs();
|
||||
}
|
||||
|
||||
constexpr int kFirstDynamicPayloadType = 96;
|
||||
constexpr int kLastDynamicPayloadType = 127;
|
||||
|
||||
int payload_type = kFirstDynamicPayloadType;
|
||||
|
||||
formats.list.push_back(webrtc::SdpVideoFormat(cricket::kRedCodecName));
|
||||
formats.list.push_back(webrtc::SdpVideoFormat(cricket::kUlpfecCodecName));
|
||||
|
||||
if (true) {
|
||||
webrtc::SdpVideoFormat flexfec_format(cricket::kFlexfecCodecName);
|
||||
// This value is currently arbitrarily set to 10 seconds. (The unit
|
||||
// is microseconds.) This parameter MUST be present in the SDP, but
|
||||
// we never use the actual value anywhere in our code however.
|
||||
// TODO(brandtr): Consider honouring this value in the sender and receiver.
|
||||
flexfec_format.parameters = { {cricket::kFlexfecFmtpRepairWindow, "10000000"} };
|
||||
formats.list.push_back(flexfec_format);
|
||||
}
|
||||
|
||||
auto inputIndex = 0;
|
||||
auto result = CommonCodecs();
|
||||
result.list.reserve(2 * formats.list.size() - 2);
|
||||
for (const auto &format : formats.list) {
|
||||
cricket::VideoCodec codec = cricket::CreateVideoCodec(format);
|
||||
codec.id = payload_type;
|
||||
AddDefaultFeedbackParams(&codec);
|
||||
|
||||
if (inputIndex++ == formats.myEncoderIndex) {
|
||||
result.myEncoderIndex = (int)result.list.size();
|
||||
}
|
||||
result.list.push_back(codec);
|
||||
|
||||
// Increment payload type.
|
||||
++payload_type;
|
||||
if (payload_type > kLastDynamicPayloadType) {
|
||||
RTC_LOG(LS_ERROR) << "Out of dynamic payload types, skipping the rest.";
|
||||
break;
|
||||
}
|
||||
|
||||
// Add associated RTX codec for non-FEC codecs.
|
||||
if (!absl::EqualsIgnoreCase(codec.name, cricket::kUlpfecCodecName) &&
|
||||
!absl::EqualsIgnoreCase(codec.name, cricket::kFlexfecCodecName)) {
|
||||
result.list.push_back(cricket::CreateVideoRtxCodec(payload_type, codec.id));
|
||||
|
||||
// Increment payload type.
|
||||
++payload_type;
|
||||
if (payload_type > kLastDynamicPayloadType) {
|
||||
RTC_LOG(LS_ERROR) << "Out of dynamic payload types, skipping the rest.";
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
} // namespace tgcalls
|
||||
35
TMessagesProj/jni/voip/tgcalls/CodecSelectHelper.h
Normal file
35
TMessagesProj/jni/voip/tgcalls/CodecSelectHelper.h
Normal file
|
|
@ -0,0 +1,35 @@
|
|||
#ifndef TGCALLS_CODEC_SELECT_HELPER_H
|
||||
#define TGCALLS_CODEC_SELECT_HELPER_H
|
||||
|
||||
#include "Message.h"
|
||||
#include "media/base/codec.h"
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
class PlatformContext;
|
||||
|
||||
struct CommonFormats {
|
||||
std::vector<webrtc::SdpVideoFormat> list;
|
||||
int myEncoderIndex = -1;
|
||||
};
|
||||
|
||||
struct CommonCodecs {
|
||||
std::vector<cricket::VideoCodec> list;
|
||||
int myEncoderIndex = -1;
|
||||
};
|
||||
|
||||
VideoFormatsMessage ComposeSupportedFormats(
|
||||
std::vector<webrtc::SdpVideoFormat> encoders,
|
||||
std::vector<webrtc::SdpVideoFormat> decoders,
|
||||
const std::vector<std::string> &preferredCodecs,
|
||||
std::shared_ptr<PlatformContext> platformContext);
|
||||
|
||||
CommonFormats ComputeCommonFormats(
|
||||
const VideoFormatsMessage &my,
|
||||
VideoFormatsMessage theirs);
|
||||
|
||||
CommonCodecs AssignPayloadTypesAndDefaultCodecs(CommonFormats &&formats);
|
||||
|
||||
} // namespace tgcalls
|
||||
|
||||
#endif
|
||||
61
TMessagesProj/jni/voip/tgcalls/CryptoHelper.cpp
Normal file
61
TMessagesProj/jni/voip/tgcalls/CryptoHelper.cpp
Normal file
|
|
@ -0,0 +1,61 @@
|
|||
#include "CryptoHelper.h"
|
||||
|
||||
#include <cstring>
|
||||
#include <limits.h>
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
AesKeyIv PrepareAesKeyIv(const uint8_t *key, const uint8_t *msgKey, int x) {
|
||||
auto result = AesKeyIv();
|
||||
|
||||
const auto sha256a = ConcatSHA256(
|
||||
MemorySpan{ msgKey, 16 },
|
||||
MemorySpan{ key + x, 36 });
|
||||
const auto sha256b = ConcatSHA256(
|
||||
MemorySpan{ key + 40 + x, 36 },
|
||||
MemorySpan{ msgKey, 16 });
|
||||
const auto aesKey = result.key.data();
|
||||
const auto aesIv = result.iv.data();
|
||||
memcpy(aesKey, sha256a.data(), 8);
|
||||
memcpy(aesKey + 8, sha256b.data() + 8, 16);
|
||||
memcpy(aesKey + 8 + 16, sha256a.data() + 24, 8);
|
||||
memcpy(aesIv, sha256b.data(), 4);
|
||||
memcpy(aesIv + 4, sha256a.data() + 8, 8);
|
||||
memcpy(aesIv + 4 + 8, sha256b.data() + 24, 4);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
void AesProcessCtr(MemorySpan from, void *to, AesKeyIv &&aesKeyIv) {
|
||||
auto aes = AES_KEY();
|
||||
AES_set_encrypt_key(
|
||||
reinterpret_cast<const unsigned char*>(aesKeyIv.key.data()),
|
||||
aesKeyIv.key.size() * CHAR_BIT,
|
||||
&aes);
|
||||
|
||||
unsigned char ecountBuf[16] = { 0 };
|
||||
unsigned int offsetInBlock = 0;
|
||||
|
||||
#ifdef OPENSSL_IS_BORINGSSL
|
||||
AES_ctr128_encrypt(
|
||||
reinterpret_cast<const unsigned char*>(from.data),
|
||||
reinterpret_cast<unsigned char*>(to),
|
||||
from.size,
|
||||
&aes,
|
||||
reinterpret_cast<unsigned char*>(aesKeyIv.iv.data()),
|
||||
ecountBuf,
|
||||
&offsetInBlock);
|
||||
#else
|
||||
CRYPTO_ctr128_encrypt(
|
||||
reinterpret_cast<const unsigned char*>(from.data),
|
||||
reinterpret_cast<unsigned char*>(to),
|
||||
from.size,
|
||||
&aes,
|
||||
reinterpret_cast<unsigned char*>(aesKeyIv.iv.data()),
|
||||
ecountBuf,
|
||||
&offsetInBlock,
|
||||
block128_f(AES_encrypt));
|
||||
#endif
|
||||
}
|
||||
|
||||
} // namespace tgcalls
|
||||
68
TMessagesProj/jni/voip/tgcalls/CryptoHelper.h
Normal file
68
TMessagesProj/jni/voip/tgcalls/CryptoHelper.h
Normal file
|
|
@ -0,0 +1,68 @@
|
|||
#ifndef TGCALLS_CRYPTO_HELPER_H
|
||||
#define TGCALLS_CRYPTO_HELPER_H
|
||||
|
||||
extern "C" {
|
||||
#include <openssl/sha.h>
|
||||
#include <openssl/aes.h>
|
||||
#ifndef OPENSSL_IS_BORINGSSL
|
||||
#include <openssl/modes.h>
|
||||
#endif
|
||||
#include <openssl/rand.h>
|
||||
#include <openssl/crypto.h>
|
||||
} // extern "C"
|
||||
|
||||
#include <array>
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
struct MemorySpan {
|
||||
MemorySpan(const void *data, size_t size) :
|
||||
data(data),
|
||||
size(size) {
|
||||
}
|
||||
|
||||
const void *data = nullptr;
|
||||
size_t size = 0;
|
||||
};
|
||||
|
||||
struct AesKeyIv {
|
||||
std::array<uint8_t, 32> key;
|
||||
std::array<uint8_t, 16> iv;
|
||||
};
|
||||
|
||||
constexpr auto kSha256Size = size_t(SHA256_DIGEST_LENGTH);
|
||||
|
||||
template <typename ...Parts>
|
||||
void SHA256Update(SHA256_CTX*, Parts &&...parts);
|
||||
|
||||
inline void SHA256Update(SHA256_CTX*) {
|
||||
}
|
||||
|
||||
template <typename First, typename ...Others>
|
||||
inline void SHA256Update(SHA256_CTX *context, First &&span, Others &&...others) {
|
||||
static_assert(
|
||||
std::is_same<std::decay_t<First>, MemorySpan>::value,
|
||||
"Pass some MemorySpan-s here.");
|
||||
|
||||
SHA256_Update(context, span.data, span.size);
|
||||
SHA256Update(context, std::forward<Others>(others)...);
|
||||
}
|
||||
|
||||
template <typename ...Parts>
|
||||
inline std::array<uint8_t, kSha256Size> ConcatSHA256(Parts &&... parts) {
|
||||
static_assert(sizeof...(parts) > 0, "empty list");
|
||||
|
||||
auto result = std::array<uint8_t, kSha256Size>();
|
||||
auto context = SHA256_CTX();
|
||||
SHA256_Init(&context);
|
||||
SHA256Update(&context, std::forward<Parts>(parts)...);
|
||||
SHA256_Final(result.data(), &context);
|
||||
return result;
|
||||
}
|
||||
|
||||
AesKeyIv PrepareAesKeyIv(const uint8_t *key, const uint8_t *msgKey, int x);
|
||||
void AesProcessCtr(MemorySpan from, void *to, AesKeyIv &&aesKeyIv);
|
||||
|
||||
} // namespace tgcalls
|
||||
|
||||
#endif
|
||||
21
TMessagesProj/jni/voip/tgcalls/DirectConnectionChannel.h
Normal file
21
TMessagesProj/jni/voip/tgcalls/DirectConnectionChannel.h
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
#ifndef TGCALLS_DIRECT_CONNECTION_CHANNEL_H
|
||||
#define TGCALLS_DIRECT_CONNECTION_CHANNEL_H
|
||||
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
class DirectConnectionChannel {
|
||||
public:
|
||||
virtual ~DirectConnectionChannel() = default;
|
||||
|
||||
virtual std::vector<uint8_t> addOnIncomingPacket(std::function<void(std::shared_ptr<std::vector<uint8_t>>)> &&) = 0;
|
||||
virtual void removeOnIncomingPacket(std::vector<uint8_t> &token) = 0;
|
||||
virtual void sendPacket(std::unique_ptr<std::vector<uint8_t>> &&packet) = 0;
|
||||
};
|
||||
|
||||
} // namespace tgcalls
|
||||
|
||||
#endif
|
||||
754
TMessagesProj/jni/voip/tgcalls/EncryptedConnection.cpp
Normal file
754
TMessagesProj/jni/voip/tgcalls/EncryptedConnection.cpp
Normal file
|
|
@ -0,0 +1,754 @@
|
|||
#include "EncryptedConnection.h"
|
||||
|
||||
#include "CryptoHelper.h"
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/byte_buffer.h"
|
||||
#include "rtc_base/time_utils.h"
|
||||
|
||||
namespace tgcalls {
|
||||
namespace {
|
||||
|
||||
constexpr auto kSingleMessagePacketSeqBit = (uint32_t(1) << 31);
|
||||
constexpr auto kMessageRequiresAckSeqBit = (uint32_t(1) << 30);
|
||||
constexpr auto kMaxAllowedCounter = std::numeric_limits<uint32_t>::max()
|
||||
& ~kSingleMessagePacketSeqBit
|
||||
& ~kMessageRequiresAckSeqBit;
|
||||
|
||||
static_assert(kMaxAllowedCounter < kSingleMessagePacketSeqBit, "bad");
|
||||
static_assert(kMaxAllowedCounter < kMessageRequiresAckSeqBit, "bad");
|
||||
|
||||
constexpr auto kAckSerializedSize = sizeof(uint32_t) + sizeof(uint8_t);
|
||||
constexpr auto kNotAckedMessagesLimit = 64 * 1024;
|
||||
constexpr auto kMaxIncomingPacketSize = 128 * 1024; // don't try decrypting more
|
||||
constexpr auto kKeepIncomingCountersCount = 64;
|
||||
constexpr auto kMaxFullPacketSize = 1500; // IP_PACKET_SIZE from webrtc.
|
||||
|
||||
// Max seen turn_overhead is around 36.
|
||||
constexpr auto kMaxOuterPacketSize = kMaxFullPacketSize - 48;
|
||||
|
||||
constexpr auto kMaxSignalingPacketSize = 16 * 1024;
|
||||
|
||||
constexpr auto kServiceCauseAcks = 1;
|
||||
constexpr auto kServiceCauseResend = 2;
|
||||
|
||||
static constexpr uint8_t kAckId = uint8_t(-1);
|
||||
static constexpr uint8_t kEmptyId = uint8_t(-2);
|
||||
static constexpr uint8_t kCustomId = uint8_t(127);
|
||||
|
||||
void AppendSeq(rtc::CopyOnWriteBuffer &buffer, uint32_t seq) {
|
||||
const auto bytes = rtc::HostToNetwork32(seq);
|
||||
buffer.AppendData(reinterpret_cast<const char*>(&bytes), sizeof(bytes));
|
||||
}
|
||||
|
||||
void WriteSeq(void *bytes, uint32_t seq) {
|
||||
*reinterpret_cast<uint32_t*>(bytes) = rtc::HostToNetwork32(seq);
|
||||
}
|
||||
|
||||
uint32_t ReadSeq(const void *bytes) {
|
||||
return rtc::NetworkToHost32(*reinterpret_cast<const uint32_t*>(bytes));
|
||||
}
|
||||
|
||||
uint32_t CounterFromSeq(uint32_t seq) {
|
||||
return seq & ~kSingleMessagePacketSeqBit & ~kMessageRequiresAckSeqBit;
|
||||
}
|
||||
|
||||
absl::nullopt_t LogError(
|
||||
const char *message,
|
||||
const std::string &additional = std::string()) {
|
||||
RTC_LOG(LS_ERROR) << "ERROR! " << message << additional;
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
bool ConstTimeIsDifferent(const void *a, const void *b, size_t size) {
|
||||
auto ca = reinterpret_cast<const char*>(a);
|
||||
auto cb = reinterpret_cast<const char*>(b);
|
||||
volatile auto different = false;
|
||||
for (const auto ce = ca + size; ca != ce; ++ca, ++cb) {
|
||||
different = different | (*ca != *cb);
|
||||
}
|
||||
return different;
|
||||
}
|
||||
|
||||
rtc::CopyOnWriteBuffer SerializeRawMessageWithSeq(
|
||||
const rtc::CopyOnWriteBuffer &message,
|
||||
uint32_t seq,
|
||||
bool singleMessagePacket) {
|
||||
rtc::ByteBufferWriter writer;
|
||||
writer.WriteUInt32(seq);
|
||||
writer.WriteUInt8(kCustomId);
|
||||
writer.WriteUInt32((uint32_t)message.size());
|
||||
writer.WriteBytes((const uint8_t *)message.data(), message.size());
|
||||
|
||||
auto result = rtc::CopyOnWriteBuffer();
|
||||
result.AppendData(writer.Data(), writer.Length());
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
EncryptedConnection::EncryptedConnection(
|
||||
Type type,
|
||||
const EncryptionKey &key,
|
||||
std::function<void(int delayMs, int cause)> requestSendService) :
|
||||
_type(type),
|
||||
_key(key),
|
||||
_delayIntervals(DelayIntervalsByType(type)),
|
||||
_requestSendService(std::move(requestSendService)) {
|
||||
assert(_key.value != nullptr);
|
||||
}
|
||||
|
||||
absl::optional<rtc::CopyOnWriteBuffer> EncryptedConnection::encryptRawPacket(rtc::CopyOnWriteBuffer const &buffer) {
|
||||
auto seq = ++_counter;
|
||||
|
||||
rtc::ByteBufferWriter writer;
|
||||
writer.WriteUInt32(seq);
|
||||
|
||||
auto result = rtc::CopyOnWriteBuffer();
|
||||
result.AppendData(writer.Data(), writer.Length());
|
||||
|
||||
result.AppendData(buffer);
|
||||
|
||||
auto encryptedPacket = encryptPrepared(result);
|
||||
|
||||
rtc::CopyOnWriteBuffer encryptedBuffer;
|
||||
encryptedBuffer.AppendData(encryptedPacket.bytes.data(), encryptedPacket.bytes.size());
|
||||
return encryptedBuffer;
|
||||
}
|
||||
|
||||
absl::optional<rtc::CopyOnWriteBuffer> EncryptedConnection::decryptRawPacket(rtc::CopyOnWriteBuffer const &buffer) {
|
||||
if (buffer.size() < 21 || buffer.size() > kMaxIncomingPacketSize) {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
const auto x = (_key.isOutgoing ? 8 : 0) + (_type == Type::Signaling ? 128 : 0);
|
||||
const auto key = _key.value->data();
|
||||
const auto msgKey = reinterpret_cast<const uint8_t*>(buffer.data());
|
||||
const auto encryptedData = msgKey + 16;
|
||||
const auto dataSize = buffer.size() - 16;
|
||||
|
||||
auto aesKeyIv = PrepareAesKeyIv(key, msgKey, x);
|
||||
|
||||
auto decryptionBuffer = rtc::Buffer(dataSize);
|
||||
AesProcessCtr(
|
||||
MemorySpan{ encryptedData, dataSize },
|
||||
decryptionBuffer.data(),
|
||||
std::move(aesKeyIv));
|
||||
|
||||
const auto msgKeyLarge = ConcatSHA256(
|
||||
MemorySpan{ key + 88 + x, 32 },
|
||||
MemorySpan{ decryptionBuffer.data(), decryptionBuffer.size() });
|
||||
if (ConstTimeIsDifferent(msgKeyLarge.data() + 8, msgKey, 16)) {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
const auto incomingSeq = ReadSeq(decryptionBuffer.data());
|
||||
const auto incomingCounter = CounterFromSeq(incomingSeq);
|
||||
if (!registerIncomingCounter(incomingCounter)) {
|
||||
// We've received that packet already.
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
rtc::CopyOnWriteBuffer resultBuffer;
|
||||
resultBuffer.AppendData(decryptionBuffer.data() + 4, decryptionBuffer.size() - 4);
|
||||
return resultBuffer;
|
||||
}
|
||||
|
||||
auto EncryptedConnection::prepareForSending(const Message &message)
|
||||
-> absl::optional<EncryptedPacket> {
|
||||
const auto messageRequiresAck = absl::visit([](const auto &data) {
|
||||
return std::decay_t<decltype(data)>::kRequiresAck;
|
||||
}, message.data);
|
||||
|
||||
// If message requires ack, then we can't serialize it as a single
|
||||
// message packet, because later it may be sent as a part of big packet.
|
||||
const auto singleMessagePacket = !haveAdditionalMessages() && !messageRequiresAck;
|
||||
const auto maybeSeq = computeNextSeq(messageRequiresAck, singleMessagePacket);
|
||||
if (!maybeSeq) {
|
||||
return absl::nullopt;
|
||||
}
|
||||
const auto seq = *maybeSeq;
|
||||
auto serialized = SerializeMessageWithSeq(message, seq, singleMessagePacket);
|
||||
|
||||
return prepareForSendingMessageInternal(serialized, seq, messageRequiresAck);
|
||||
}
|
||||
|
||||
absl::optional<EncryptedConnection::EncryptedPacket> EncryptedConnection::prepareForSendingRawMessage(rtc::CopyOnWriteBuffer &message, bool messageRequiresAck) {
|
||||
// If message requires ack, then we can't serialize it as a single
|
||||
// message packet, because later it may be sent as a part of big packet.
|
||||
const auto singleMessagePacket = !haveAdditionalMessages() && !messageRequiresAck;
|
||||
const auto maybeSeq = computeNextSeq(messageRequiresAck, singleMessagePacket);
|
||||
if (!maybeSeq) {
|
||||
return absl::nullopt;
|
||||
}
|
||||
const auto seq = *maybeSeq;
|
||||
auto serialized = SerializeRawMessageWithSeq(message, seq, singleMessagePacket);
|
||||
|
||||
return prepareForSendingMessageInternal(serialized, seq, messageRequiresAck);
|
||||
}
|
||||
|
||||
absl::optional<EncryptedConnection::EncryptedPacket> EncryptedConnection::prepareForSendingMessageInternal(rtc::CopyOnWriteBuffer &serialized, uint32_t seq, bool messageRequiresAck) {
|
||||
if (!enoughSpaceInPacket(serialized, 0)) {
|
||||
return LogError("Too large packet: ", std::to_string(serialized.size()));
|
||||
}
|
||||
const auto notYetAckedCopy = messageRequiresAck
|
||||
? serialized
|
||||
: rtc::CopyOnWriteBuffer();
|
||||
if (!messageRequiresAck) {
|
||||
appendAdditionalMessages(serialized);
|
||||
return encryptPrepared(serialized);
|
||||
}
|
||||
const auto type = uint8_t(serialized.cdata()[4]);
|
||||
const auto sendEnqueued = !_myNotYetAckedMessages.empty();
|
||||
if (sendEnqueued) {
|
||||
// All requiring ack messages should always be sent in order within
|
||||
// one packet, starting with the least not-yet-acked one.
|
||||
// So if we still have those, we send an empty message with all
|
||||
// requiring ack messages that will fit in correct order.
|
||||
RTC_LOG(LS_INFO) << logHeader()
|
||||
<< "Enqueue SEND:type" << type << "#" << CounterFromSeq(seq);
|
||||
} else {
|
||||
RTC_LOG(LS_INFO) << logHeader()
|
||||
<< "Add SEND:type" << type << "#" << CounterFromSeq(seq);
|
||||
appendAdditionalMessages(serialized);
|
||||
}
|
||||
_myNotYetAckedMessages.push_back({ notYetAckedCopy, rtc::TimeMillis() });
|
||||
if (!sendEnqueued) {
|
||||
return encryptPrepared(serialized);
|
||||
}
|
||||
for (auto &queued : _myNotYetAckedMessages) {
|
||||
queued.lastSent = 0;
|
||||
}
|
||||
return prepareForSendingService(0);
|
||||
}
|
||||
|
||||
auto EncryptedConnection::prepareForSendingService(int cause)
|
||||
-> absl::optional<EncryptedPacket> {
|
||||
if (cause == kServiceCauseAcks) {
|
||||
_sendAcksTimerActive = false;
|
||||
} else if (cause == kServiceCauseResend) {
|
||||
_resendTimerActive = false;
|
||||
}
|
||||
if (!haveAdditionalMessages()) {
|
||||
return absl::nullopt;
|
||||
}
|
||||
const auto messageRequiresAck = false;
|
||||
const auto singleMessagePacket = false;
|
||||
const auto seq = computeNextSeq(messageRequiresAck, singleMessagePacket);
|
||||
if (!seq) {
|
||||
return absl::nullopt;
|
||||
}
|
||||
auto serialized = SerializeEmptyMessageWithSeq(*seq);
|
||||
assert(enoughSpaceInPacket(serialized, 0));
|
||||
|
||||
RTC_LOG(LS_INFO) << logHeader()
|
||||
<< "SEND:empty#" << CounterFromSeq(*seq);
|
||||
|
||||
appendAdditionalMessages(serialized);
|
||||
return encryptPrepared(serialized);
|
||||
}
|
||||
|
||||
bool EncryptedConnection::haveAdditionalMessages() const {
|
||||
return !_myNotYetAckedMessages.empty() || !_acksToSendSeqs.empty();
|
||||
}
|
||||
|
||||
absl::optional<uint32_t> EncryptedConnection::computeNextSeq(
|
||||
bool messageRequiresAck,
|
||||
bool singleMessagePacket) {
|
||||
if (messageRequiresAck && _myNotYetAckedMessages.size() >= kNotAckedMessagesLimit) {
|
||||
return LogError("Too many not ACKed messages.");
|
||||
} else if (_counter == kMaxAllowedCounter) {
|
||||
return LogError("Outgoing packet limit reached.");
|
||||
}
|
||||
|
||||
return (++_counter)
|
||||
| (singleMessagePacket ? kSingleMessagePacketSeqBit : 0)
|
||||
| (messageRequiresAck ? kMessageRequiresAckSeqBit : 0);
|
||||
}
|
||||
|
||||
size_t EncryptedConnection::packetLimit() const {
|
||||
switch (_type) {
|
||||
case Type::Signaling:
|
||||
return kMaxSignalingPacketSize;
|
||||
default:
|
||||
return kMaxOuterPacketSize;
|
||||
}
|
||||
}
|
||||
|
||||
bool EncryptedConnection::enoughSpaceInPacket(const rtc::CopyOnWriteBuffer &buffer, size_t amount) const {
|
||||
const auto limit = packetLimit();
|
||||
return (amount < limit)
|
||||
&& (16 + buffer.size() + amount <= limit);
|
||||
}
|
||||
|
||||
void EncryptedConnection::appendAcksToSend(rtc::CopyOnWriteBuffer &buffer) {
|
||||
auto i = _acksToSendSeqs.begin();
|
||||
while ((i != _acksToSendSeqs.end())
|
||||
&& enoughSpaceInPacket(
|
||||
buffer,
|
||||
kAckSerializedSize)) {
|
||||
|
||||
RTC_LOG(LS_INFO) << logHeader()
|
||||
<< "Add ACK#" << CounterFromSeq(*i);
|
||||
|
||||
AppendSeq(buffer, *i);
|
||||
buffer.AppendData(&kAckId, 1);
|
||||
++i;
|
||||
}
|
||||
_acksToSendSeqs.erase(_acksToSendSeqs.begin(), i);
|
||||
for (const auto seq : _acksToSendSeqs) {
|
||||
RTC_LOG(LS_INFO) << logHeader()
|
||||
<< "Skip ACK#" << CounterFromSeq(seq)
|
||||
<< " (no space, length: " << kAckSerializedSize << ", already: " << buffer.size() << ")";
|
||||
}
|
||||
}
|
||||
|
||||
size_t EncryptedConnection::fullNotAckedLength() const {
|
||||
assert(_myNotYetAckedMessages.size() < kNotAckedMessagesLimit);
|
||||
|
||||
auto result = size_t();
|
||||
for (const auto &message : _myNotYetAckedMessages) {
|
||||
result += message.data.size();
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
void EncryptedConnection::appendAdditionalMessages(rtc::CopyOnWriteBuffer &buffer) {
|
||||
appendAcksToSend(buffer);
|
||||
|
||||
if (_myNotYetAckedMessages.empty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
const auto now = rtc::TimeMillis();
|
||||
for (auto &resending : _myNotYetAckedMessages) {
|
||||
const auto sent = resending.lastSent;
|
||||
const auto when = sent
|
||||
? (sent + _delayIntervals.minDelayBeforeMessageResend)
|
||||
: 0;
|
||||
|
||||
assert(resending.data.size() >= 5);
|
||||
const auto counter = CounterFromSeq(ReadSeq(resending.data.data()));
|
||||
const auto type = uint8_t(resending.data.data()[4]);
|
||||
if (when > now) {
|
||||
RTC_LOG(LS_INFO) << logHeader()
|
||||
<< "Skip RESEND:type" << type << "#" << counter
|
||||
<< " (wait " << (when - now) << "ms).";
|
||||
break;
|
||||
} else if (enoughSpaceInPacket(buffer, resending.data.size())) {
|
||||
RTC_LOG(LS_INFO) << logHeader()
|
||||
<< "Add RESEND:type" << type << "#" << counter;
|
||||
buffer.AppendData(resending.data);
|
||||
resending.lastSent = now;
|
||||
} else {
|
||||
RTC_LOG(LS_INFO) << logHeader()
|
||||
<< "Skip RESEND:type" << type << "#" << counter
|
||||
<< " (no space, length: " << resending.data.size() << ", already: " << buffer.size() << ")";
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!_resendTimerActive) {
|
||||
_resendTimerActive = true;
|
||||
_requestSendService(
|
||||
_delayIntervals.maxDelayBeforeMessageResend,
|
||||
kServiceCauseResend);
|
||||
}
|
||||
}
|
||||
|
||||
auto EncryptedConnection::encryptPrepared(const rtc::CopyOnWriteBuffer &buffer)
|
||||
-> EncryptedPacket {
|
||||
auto result = EncryptedPacket();
|
||||
result.counter = CounterFromSeq(ReadSeq(buffer.data()));
|
||||
result.bytes.resize(16 + buffer.size());
|
||||
|
||||
const auto x = (_key.isOutgoing ? 0 : 8) + (_type == Type::Signaling ? 128 : 0);
|
||||
const auto key = _key.value->data();
|
||||
|
||||
const auto msgKeyLarge = ConcatSHA256(
|
||||
MemorySpan{ key + 88 + x, 32 },
|
||||
MemorySpan{ buffer.data(), buffer.size() });
|
||||
const auto msgKey = result.bytes.data();
|
||||
memcpy(msgKey, msgKeyLarge.data() + 8, 16);
|
||||
|
||||
auto aesKeyIv = PrepareAesKeyIv(key, msgKey, x);
|
||||
|
||||
AesProcessCtr(
|
||||
MemorySpan{ buffer.data(), buffer.size() },
|
||||
result.bytes.data() + 16,
|
||||
std::move(aesKeyIv));
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
bool EncryptedConnection::registerIncomingCounter(uint32_t incomingCounter) {
|
||||
auto &list = _largestIncomingCounters;
|
||||
|
||||
const auto position = std::lower_bound(list.begin(), list.end(), incomingCounter);
|
||||
const auto largest = list.empty() ? 0 : list.back();
|
||||
if (position != list.end() && *position == incomingCounter) {
|
||||
// The packet is in the list already.
|
||||
return false;
|
||||
} else if (incomingCounter + kKeepIncomingCountersCount <= largest) {
|
||||
// The packet is too old.
|
||||
return false;
|
||||
}
|
||||
const auto eraseTill = std::find_if(list.begin(), list.end(), [&](uint32_t counter) {
|
||||
return (counter + kKeepIncomingCountersCount > incomingCounter);
|
||||
});
|
||||
const auto eraseCount = eraseTill - list.begin();
|
||||
const auto positionIndex = (position - list.begin()) - eraseCount;
|
||||
list.erase(list.begin(), eraseTill);
|
||||
|
||||
assert(positionIndex >= 0 && positionIndex <= list.size());
|
||||
list.insert(list.begin() + positionIndex, incomingCounter);
|
||||
return true;
|
||||
}
|
||||
|
||||
auto EncryptedConnection::handleIncomingPacket(const char *bytes, size_t size)
|
||||
-> absl::optional<DecryptedPacket> {
|
||||
if (size < 21 || size > kMaxIncomingPacketSize) {
|
||||
return LogError("Bad incoming packet size: ", std::to_string(size));
|
||||
}
|
||||
|
||||
const auto x = (_key.isOutgoing ? 8 : 0) + (_type == Type::Signaling ? 128 : 0);
|
||||
const auto key = _key.value->data();
|
||||
const auto msgKey = reinterpret_cast<const uint8_t*>(bytes);
|
||||
const auto encryptedData = msgKey + 16;
|
||||
const auto dataSize = size - 16;
|
||||
|
||||
auto aesKeyIv = PrepareAesKeyIv(key, msgKey, x);
|
||||
|
||||
auto decryptionBuffer = rtc::Buffer(dataSize);
|
||||
AesProcessCtr(
|
||||
MemorySpan{ encryptedData, dataSize },
|
||||
decryptionBuffer.data(),
|
||||
std::move(aesKeyIv));
|
||||
|
||||
const auto msgKeyLarge = ConcatSHA256(
|
||||
MemorySpan{ key + 88 + x, 32 },
|
||||
MemorySpan{ decryptionBuffer.data(), decryptionBuffer.size() });
|
||||
if (ConstTimeIsDifferent(msgKeyLarge.data() + 8, msgKey, 16)) {
|
||||
return LogError("Bad incoming data hash.");
|
||||
}
|
||||
|
||||
const auto incomingSeq = ReadSeq(decryptionBuffer.data());
|
||||
const auto incomingCounter = CounterFromSeq(incomingSeq);
|
||||
if (!registerIncomingCounter(incomingCounter)) {
|
||||
// We've received that packet already.
|
||||
return LogError("Already handled packet received.", std::to_string(incomingCounter));
|
||||
}
|
||||
return processPacket(decryptionBuffer, incomingSeq);
|
||||
}
|
||||
|
||||
absl::optional<EncryptedConnection::DecryptedRawPacket> EncryptedConnection::handleIncomingRawPacket(const char *bytes, size_t size) {
|
||||
if (size < 21 || size > kMaxIncomingPacketSize) {
|
||||
return LogError("Bad incoming packet size: ", std::to_string(size));
|
||||
}
|
||||
|
||||
const auto x = (_key.isOutgoing ? 8 : 0) + (_type == Type::Signaling ? 128 : 0);
|
||||
const auto key = _key.value->data();
|
||||
const auto msgKey = reinterpret_cast<const uint8_t*>(bytes);
|
||||
const auto encryptedData = msgKey + 16;
|
||||
const auto dataSize = size - 16;
|
||||
|
||||
auto aesKeyIv = PrepareAesKeyIv(key, msgKey, x);
|
||||
|
||||
auto decryptionBuffer = rtc::Buffer(dataSize);
|
||||
AesProcessCtr(
|
||||
MemorySpan{ encryptedData, dataSize },
|
||||
decryptionBuffer.data(),
|
||||
std::move(aesKeyIv));
|
||||
|
||||
const auto msgKeyLarge = ConcatSHA256(
|
||||
MemorySpan{ key + 88 + x, 32 },
|
||||
MemorySpan{ decryptionBuffer.data(), decryptionBuffer.size() });
|
||||
if (ConstTimeIsDifferent(msgKeyLarge.data() + 8, msgKey, 16)) {
|
||||
return LogError("Bad incoming data hash.");
|
||||
}
|
||||
|
||||
const auto incomingSeq = ReadSeq(decryptionBuffer.data());
|
||||
const auto incomingCounter = CounterFromSeq(incomingSeq);
|
||||
if (!registerIncomingCounter(incomingCounter)) {
|
||||
// We've received that packet already.
|
||||
return LogError("Already handled packet received.", std::to_string(incomingCounter));
|
||||
}
|
||||
return processRawPacket(decryptionBuffer, incomingSeq);
|
||||
}
|
||||
|
||||
auto EncryptedConnection::processPacket(
|
||||
const rtc::Buffer &fullBuffer,
|
||||
uint32_t packetSeq)
|
||||
-> absl::optional<DecryptedPacket> {
|
||||
assert(fullBuffer.size() >= 5);
|
||||
|
||||
auto additionalMessage = false;
|
||||
auto firstMessageRequiringAck = true;
|
||||
auto newRequiringAckReceived = false;
|
||||
|
||||
auto currentSeq = packetSeq;
|
||||
auto currentCounter = CounterFromSeq(currentSeq);
|
||||
rtc::ByteBufferReader reader(rtc::ArrayView<const uint8_t>(
|
||||
reinterpret_cast<const uint8_t *>(fullBuffer.data() + 4), // Skip seq.
|
||||
fullBuffer.size() - 4));
|
||||
|
||||
auto result = absl::optional<DecryptedPacket>();
|
||||
while (true) {
|
||||
const auto type = uint8_t(*reader.Data());
|
||||
const auto singleMessagePacket = ((currentSeq & kSingleMessagePacketSeqBit) != 0);
|
||||
if (singleMessagePacket && additionalMessage) {
|
||||
return LogError("Single message packet bit in not first message.");
|
||||
}
|
||||
|
||||
if (type == kEmptyId) {
|
||||
if (additionalMessage) {
|
||||
return LogError("Empty message should be only the first one in the packet.");
|
||||
}
|
||||
RTC_LOG(LS_INFO) << logHeader()
|
||||
<< "Got RECV:empty" << "#" << currentCounter;
|
||||
reader.Consume(1);
|
||||
} else if (type == kAckId) {
|
||||
if (!additionalMessage) {
|
||||
return LogError("Ack message must not be the first one in the packet.");
|
||||
}
|
||||
ackMyMessage(currentSeq);
|
||||
reader.Consume(1);
|
||||
} else if (auto message = DeserializeMessage(reader, singleMessagePacket)) {
|
||||
const auto messageRequiresAck = ((currentSeq & kMessageRequiresAckSeqBit) != 0);
|
||||
const auto skipMessage = messageRequiresAck
|
||||
? !registerSentAck(currentCounter, firstMessageRequiringAck)
|
||||
: (additionalMessage && !registerIncomingCounter(currentCounter));
|
||||
if (messageRequiresAck) {
|
||||
firstMessageRequiringAck = false;
|
||||
if (!skipMessage) {
|
||||
newRequiringAckReceived = true;
|
||||
}
|
||||
sendAckPostponed(currentSeq);
|
||||
RTC_LOG(LS_INFO) << logHeader()
|
||||
<< (skipMessage ? "Repeated RECV:type" : "Got RECV:type") << type << "#" << currentCounter;
|
||||
}
|
||||
if (!skipMessage) {
|
||||
appendReceivedMessage(result, std::move(*message), currentSeq);
|
||||
}
|
||||
} else {
|
||||
return LogError("Could not parse message from packet, type: ", std::to_string(type));
|
||||
}
|
||||
if (!reader.Length()) {
|
||||
break;
|
||||
} else if (singleMessagePacket) {
|
||||
return LogError("Single message didn't fill the entire packet.");
|
||||
} else if (reader.Length() < 5) {
|
||||
return LogError("Bad remaining data size: ", std::to_string(reader.Length()));
|
||||
}
|
||||
const auto success = reader.ReadUInt32(¤tSeq);
|
||||
assert(success);
|
||||
(void)success;
|
||||
currentCounter = CounterFromSeq(currentSeq);
|
||||
|
||||
additionalMessage = true;
|
||||
}
|
||||
|
||||
if (!_acksToSendSeqs.empty()) {
|
||||
if (newRequiringAckReceived) {
|
||||
_requestSendService(0, 0);
|
||||
} else if (!_sendAcksTimerActive) {
|
||||
_sendAcksTimerActive = true;
|
||||
_requestSendService(
|
||||
_delayIntervals.maxDelayBeforeAckResend,
|
||||
kServiceCauseAcks);
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
auto EncryptedConnection::processRawPacket(
|
||||
const rtc::Buffer &fullBuffer,
|
||||
uint32_t packetSeq)
|
||||
-> absl::optional<DecryptedRawPacket> {
|
||||
assert(fullBuffer.size() >= 5);
|
||||
|
||||
auto additionalMessage = false;
|
||||
auto firstMessageRequiringAck = true;
|
||||
auto newRequiringAckReceived = false;
|
||||
|
||||
auto currentSeq = packetSeq;
|
||||
auto currentCounter = CounterFromSeq(currentSeq);
|
||||
rtc::ByteBufferReader reader(rtc::ArrayView<const uint8_t>(
|
||||
reinterpret_cast<const uint8_t *>(fullBuffer.data() + 4), // Skip seq.
|
||||
fullBuffer.size() - 4));
|
||||
|
||||
auto result = absl::optional<DecryptedRawPacket>();
|
||||
while (true) {
|
||||
const auto type = uint8_t(*reader.Data());
|
||||
const auto singleMessagePacket = ((currentSeq & kSingleMessagePacketSeqBit) != 0);
|
||||
if (singleMessagePacket && additionalMessage) {
|
||||
return LogError("Single message packet bit in not first message.");
|
||||
}
|
||||
|
||||
if (type == kEmptyId) {
|
||||
if (additionalMessage) {
|
||||
return LogError("Empty message should be only the first one in the packet.");
|
||||
}
|
||||
RTC_LOG(LS_INFO) << logHeader()
|
||||
<< "Got RECV:empty" << "#" << currentCounter;
|
||||
reader.Consume(1);
|
||||
} else if (type == kAckId) {
|
||||
if (!additionalMessage) {
|
||||
return LogError("Ack message must not be the first one in the packet.");
|
||||
}
|
||||
ackMyMessage(currentSeq);
|
||||
reader.Consume(1);
|
||||
} else if (type == kCustomId) {
|
||||
reader.Consume(1);
|
||||
|
||||
if (auto message = DeserializeRawMessage(reader, singleMessagePacket)) {
|
||||
const auto messageRequiresAck = ((currentSeq & kMessageRequiresAckSeqBit) != 0);
|
||||
const auto skipMessage = messageRequiresAck
|
||||
? !registerSentAck(currentCounter, firstMessageRequiringAck)
|
||||
: (additionalMessage && !registerIncomingCounter(currentCounter));
|
||||
if (messageRequiresAck) {
|
||||
firstMessageRequiringAck = false;
|
||||
if (!skipMessage) {
|
||||
newRequiringAckReceived = true;
|
||||
}
|
||||
sendAckPostponed(currentSeq);
|
||||
RTC_LOG(LS_INFO) << logHeader()
|
||||
<< (skipMessage ? "Repeated RECV:type" : "Got RECV:type") << type << "#" << currentCounter;
|
||||
}
|
||||
if (!skipMessage) {
|
||||
appendReceivedRawMessage(result, std::move(*message), currentSeq);
|
||||
}
|
||||
} else {
|
||||
return LogError("Could not parse message from packet, type: ", std::to_string(type));
|
||||
}
|
||||
} else {
|
||||
return LogError("Could not parse message from packet, type: ", std::to_string(type));
|
||||
}
|
||||
if (!reader.Length()) {
|
||||
break;
|
||||
} else if (singleMessagePacket) {
|
||||
return LogError("Single message didn't fill the entire packet.");
|
||||
} else if (reader.Length() < 5) {
|
||||
return LogError("Bad remaining data size: ", std::to_string(reader.Length()));
|
||||
}
|
||||
const auto success = reader.ReadUInt32(¤tSeq);
|
||||
assert(success);
|
||||
(void)success;
|
||||
currentCounter = CounterFromSeq(currentSeq);
|
||||
|
||||
additionalMessage = true;
|
||||
}
|
||||
|
||||
if (!_acksToSendSeqs.empty()) {
|
||||
if (newRequiringAckReceived) {
|
||||
_requestSendService(0, 0);
|
||||
} else if (!_sendAcksTimerActive) {
|
||||
_sendAcksTimerActive = true;
|
||||
_requestSendService(
|
||||
_delayIntervals.maxDelayBeforeAckResend,
|
||||
kServiceCauseAcks);
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
void EncryptedConnection::appendReceivedMessage(
|
||||
absl::optional<DecryptedPacket> &to,
|
||||
Message &&message,
|
||||
uint32_t incomingSeq) {
|
||||
auto decrypted = DecryptedMessage{
|
||||
std::move(message),
|
||||
CounterFromSeq(incomingSeq)
|
||||
};
|
||||
if (to) {
|
||||
to->additional.push_back(std::move(decrypted));
|
||||
} else {
|
||||
to = DecryptedPacket{ std::move(decrypted) };
|
||||
}
|
||||
}
|
||||
|
||||
void EncryptedConnection::appendReceivedRawMessage(
|
||||
absl::optional<DecryptedRawPacket> &to,
|
||||
rtc::CopyOnWriteBuffer &&message,
|
||||
uint32_t incomingSeq) {
|
||||
auto decrypted = DecryptedRawMessage{
|
||||
std::move(message),
|
||||
CounterFromSeq(incomingSeq)
|
||||
};
|
||||
if (to) {
|
||||
to->additional.push_back(std::move(decrypted));
|
||||
} else {
|
||||
to = DecryptedRawPacket{ std::move(decrypted) };
|
||||
}
|
||||
}
|
||||
|
||||
const char *EncryptedConnection::logHeader() const {
|
||||
return (_type == Type::Signaling) ? "(signaling) " : "(transport) ";
|
||||
}
|
||||
|
||||
bool EncryptedConnection::registerSentAck(uint32_t counter, bool firstInPacket) {
|
||||
auto &list = _acksSentCounters;
|
||||
|
||||
const auto position = std::lower_bound(list.begin(), list.end(), counter);
|
||||
const auto already = (position != list.end()) && (*position == counter);
|
||||
|
||||
const auto was = list;
|
||||
if (firstInPacket) {
|
||||
list.erase(list.begin(), position);
|
||||
if (!already) {
|
||||
list.insert(list.begin(), counter);
|
||||
}
|
||||
} else if (!already) {
|
||||
list.insert(position, counter);
|
||||
}
|
||||
return !already;
|
||||
}
|
||||
|
||||
void EncryptedConnection::sendAckPostponed(uint32_t incomingSeq) {
|
||||
auto &list = _acksToSendSeqs;
|
||||
const auto already = std::find(list.begin(), list.end(), incomingSeq);
|
||||
if (already == list.end()) {
|
||||
list.push_back(incomingSeq);
|
||||
}
|
||||
}
|
||||
|
||||
void EncryptedConnection::ackMyMessage(uint32_t seq) {
|
||||
auto type = uint8_t(0);
|
||||
auto &list = _myNotYetAckedMessages;
|
||||
for (auto i = list.begin(), e = list.end(); i != e; ++i) {
|
||||
assert(i->data.size() >= 5);
|
||||
if (ReadSeq(i->data.cdata()) == seq) {
|
||||
type = uint8_t(i->data.cdata()[4]);
|
||||
list.erase(i);
|
||||
break;
|
||||
}
|
||||
}
|
||||
RTC_LOG(LS_INFO) << logHeader()
|
||||
<< (type ? "Got ACK:type" + std::to_string(type) + "#" : "Repeated ACK#")
|
||||
<< CounterFromSeq(seq);
|
||||
}
|
||||
|
||||
auto EncryptedConnection::DelayIntervalsByType(Type type) -> DelayIntervals {
|
||||
auto result = DelayIntervals();
|
||||
const auto signaling = (type == Type::Signaling);
|
||||
|
||||
// Don't resend faster than min delay even if we have a packet we can attach to.
|
||||
result.minDelayBeforeMessageResend = signaling ? 3000 : 300;
|
||||
|
||||
// When max delay elapsed we resend anyway, in a dedicated packet.
|
||||
result.maxDelayBeforeMessageResend = signaling ? 5000 : 1000;
|
||||
result.maxDelayBeforeAckResend = signaling ? 5000 : 1000;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
rtc::CopyOnWriteBuffer EncryptedConnection::SerializeEmptyMessageWithSeq(uint32_t seq) {
|
||||
auto result = rtc::CopyOnWriteBuffer(5);
|
||||
auto bytes = result.MutableData();
|
||||
WriteSeq(bytes, seq);
|
||||
bytes[4] = kEmptyId;
|
||||
return result;
|
||||
}
|
||||
|
||||
} // namespace tgcalls
|
||||
104
TMessagesProj/jni/voip/tgcalls/EncryptedConnection.h
Normal file
104
TMessagesProj/jni/voip/tgcalls/EncryptedConnection.h
Normal file
|
|
@ -0,0 +1,104 @@
|
|||
#ifndef TGCALLS_ENCRYPTED_CONNECTION_H
|
||||
#define TGCALLS_ENCRYPTED_CONNECTION_H
|
||||
|
||||
#include "Instance.h"
|
||||
#include "Message.h"
|
||||
|
||||
namespace rtc {
|
||||
class ByteBufferReader;
|
||||
} // namespace rtc
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
class EncryptedConnection final {
|
||||
public:
|
||||
enum class Type : uint8_t {
|
||||
Signaling,
|
||||
Transport,
|
||||
};
|
||||
EncryptedConnection(
|
||||
Type type,
|
||||
const EncryptionKey &key,
|
||||
std::function<void(int delayMs, int cause)> requestSendService);
|
||||
|
||||
struct EncryptedPacket {
|
||||
std::vector<uint8_t> bytes;
|
||||
uint32_t counter = 0;
|
||||
};
|
||||
absl::optional<EncryptedPacket> prepareForSending(const Message &message);
|
||||
absl::optional<EncryptedPacket> prepareForSendingRawMessage(rtc::CopyOnWriteBuffer &serialized, bool messageRequiresAck);
|
||||
absl::optional<EncryptedPacket> prepareForSendingService(int cause);
|
||||
|
||||
struct DecryptedPacket {
|
||||
DecryptedMessage main;
|
||||
std::vector<DecryptedMessage> additional;
|
||||
};
|
||||
struct DecryptedRawPacket {
|
||||
DecryptedRawMessage main;
|
||||
std::vector<DecryptedRawMessage> additional;
|
||||
};
|
||||
absl::optional<DecryptedPacket> handleIncomingPacket(const char *bytes, size_t size);
|
||||
absl::optional<DecryptedRawPacket> handleIncomingRawPacket(const char *bytes, size_t size);
|
||||
|
||||
absl::optional<rtc::CopyOnWriteBuffer> encryptRawPacket(rtc::CopyOnWriteBuffer const &buffer);
|
||||
absl::optional<rtc::CopyOnWriteBuffer> decryptRawPacket(rtc::CopyOnWriteBuffer const &buffer);
|
||||
|
||||
private:
|
||||
struct DelayIntervals {
|
||||
// In milliseconds.
|
||||
int minDelayBeforeMessageResend = 0;
|
||||
int maxDelayBeforeMessageResend = 0;
|
||||
int maxDelayBeforeAckResend = 0;
|
||||
};
|
||||
struct MessageForResend {
|
||||
rtc::CopyOnWriteBuffer data;
|
||||
int64_t lastSent = 0;
|
||||
};
|
||||
|
||||
bool enoughSpaceInPacket(const rtc::CopyOnWriteBuffer &buffer, size_t amount) const;
|
||||
size_t packetLimit() const;
|
||||
size_t fullNotAckedLength() const;
|
||||
void appendAcksToSend(rtc::CopyOnWriteBuffer &buffer);
|
||||
void appendAdditionalMessages(rtc::CopyOnWriteBuffer &buffer);
|
||||
EncryptedPacket encryptPrepared(const rtc::CopyOnWriteBuffer &buffer);
|
||||
bool registerIncomingCounter(uint32_t incomingCounter);
|
||||
absl::optional<DecryptedPacket> processPacket(const rtc::Buffer &fullBuffer, uint32_t packetSeq);
|
||||
absl::optional<DecryptedRawPacket> processRawPacket(const rtc::Buffer &fullBuffer, uint32_t packetSeq);
|
||||
bool registerSentAck(uint32_t counter, bool firstInPacket);
|
||||
void ackMyMessage(uint32_t counter);
|
||||
void sendAckPostponed(uint32_t incomingSeq);
|
||||
bool haveAdditionalMessages() const;
|
||||
absl::optional<uint32_t> computeNextSeq(bool messageRequiresAck, bool singleMessagePacket);
|
||||
void appendReceivedMessage(
|
||||
absl::optional<DecryptedPacket> &to,
|
||||
Message &&message,
|
||||
uint32_t incomingSeq);
|
||||
void appendReceivedRawMessage(
|
||||
absl::optional<DecryptedRawPacket> &to,
|
||||
rtc::CopyOnWriteBuffer &&message,
|
||||
uint32_t incomingSeq);
|
||||
absl::optional<EncryptedPacket> prepareForSendingMessageInternal(rtc::CopyOnWriteBuffer &serialized, uint32_t seq, bool messageRequiresAck);
|
||||
|
||||
const char *logHeader() const;
|
||||
|
||||
static DelayIntervals DelayIntervalsByType(Type type);
|
||||
static rtc::CopyOnWriteBuffer SerializeEmptyMessageWithSeq(uint32_t seq);
|
||||
|
||||
Type _type = Type();
|
||||
EncryptionKey _key;
|
||||
uint32_t _counter = 0;
|
||||
DelayIntervals _delayIntervals;
|
||||
std::vector<uint32_t> _largestIncomingCounters;
|
||||
std::vector<uint32_t> _ackedIncomingCounters;
|
||||
std::vector<uint32_t> _acksToSendSeqs;
|
||||
std::vector<uint32_t> _acksSentCounters;
|
||||
std::vector<MessageForResend> _myNotYetAckedMessages;
|
||||
std::function<void(int delayMs, int cause)> _requestSendService;
|
||||
bool _resendTimerActive = false;
|
||||
bool _sendAcksTimerActive = false;
|
||||
|
||||
};
|
||||
|
||||
} // namespace tgcalls
|
||||
|
||||
#endif
|
||||
257
TMessagesProj/jni/voip/tgcalls/FakeAudioDeviceModule.cpp
Normal file
257
TMessagesProj/jni/voip/tgcalls/FakeAudioDeviceModule.cpp
Normal file
|
|
@ -0,0 +1,257 @@
|
|||
#include "FakeAudioDeviceModule.h"
|
||||
|
||||
#include "modules/audio_device/include/audio_device_default.h"
|
||||
#include "rtc_base/ref_counted_object.h"
|
||||
#include "rtc_base/platform_thread.h"
|
||||
#include "rtc_base/time_utils.h"
|
||||
|
||||
#include <thread>
|
||||
#include <mutex>
|
||||
#include <condition_variable>
|
||||
|
||||
namespace tgcalls {
|
||||
class FakeAudioDeviceModuleImpl : public webrtc::webrtc_impl::AudioDeviceModuleDefault<webrtc::AudioDeviceModule> {
|
||||
public:
|
||||
static webrtc::scoped_refptr<webrtc::AudioDeviceModule> Create(webrtc::TaskQueueFactory* taskQueueFactory,
|
||||
std::shared_ptr<FakeAudioDeviceModule::Renderer> renderer,
|
||||
std::shared_ptr<FakeAudioDeviceModule::Recorder> recorder,
|
||||
FakeAudioDeviceModule::Options options) {
|
||||
return webrtc::scoped_refptr<webrtc::AudioDeviceModule>(
|
||||
new rtc::RefCountedObject<FakeAudioDeviceModuleImpl>(taskQueueFactory, options, std::move(renderer), std::move(recorder)));
|
||||
}
|
||||
|
||||
FakeAudioDeviceModuleImpl(webrtc::TaskQueueFactory*, FakeAudioDeviceModule::Options options,
|
||||
std::shared_ptr<FakeAudioDeviceModule::Renderer> renderer,
|
||||
std::shared_ptr<FakeAudioDeviceModule::Recorder> recorder)
|
||||
: num_channels_{options.num_channels}, samples_per_sec_{options.samples_per_sec}, scheduler_(options.scheduler_),
|
||||
renderer_(std::move(renderer)), recorder_(std::move(recorder)) {
|
||||
if (!scheduler_) {
|
||||
scheduler_ = [](auto f) {
|
||||
std::thread([f = std::move(f)]() {
|
||||
while (true) {
|
||||
double wait = f();
|
||||
if (wait < 0) {
|
||||
return;
|
||||
}
|
||||
std::this_thread::sleep_for(std::chrono::microseconds (static_cast<int64_t>(wait * 1000000)));
|
||||
}
|
||||
}).detach();
|
||||
};
|
||||
}
|
||||
RTC_CHECK(num_channels_ == 1 || num_channels_ == 2);
|
||||
auto good_sample_rate = [](size_t sr) {
|
||||
return sr == 8000 || sr == 16000 || sr == 32000 || sr == 44100 || sr == 48000;
|
||||
};
|
||||
RTC_CHECK(good_sample_rate(samples_per_sec_));
|
||||
samples_per_frame_ = samples_per_sec_ / 100;
|
||||
playout_buffer_.resize(samples_per_frame_ * 2 /* 2 in case stereo will be turned on later */, 0);
|
||||
}
|
||||
|
||||
~FakeAudioDeviceModuleImpl() override {
|
||||
StopPlayout();
|
||||
}
|
||||
|
||||
int32_t PlayoutIsAvailable(bool* available) override {
|
||||
if (available) {
|
||||
*available = true;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t StereoPlayoutIsAvailable(bool* available) const override {
|
||||
if (available) {
|
||||
*available = true;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
int32_t StereoPlayout(bool* enabled) const override {
|
||||
if (enabled) {
|
||||
*enabled = num_channels_ == 2;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
int32_t SetStereoPlayout(bool enable) override {
|
||||
size_t new_num_channels = enable ? 2 : 1;
|
||||
if (new_num_channels != num_channels_) {
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t Init() override {
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t RegisterAudioCallback(webrtc::AudioTransport* callback) override {
|
||||
std::unique_lock<std::mutex> lock(render_mutex_);
|
||||
audio_callback_ = callback;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t StartPlayout() override {
|
||||
std::unique_lock<std::mutex> lock(render_mutex_);
|
||||
if (!renderer_) {
|
||||
return 0;
|
||||
}
|
||||
if (rendering_) {
|
||||
return 0;
|
||||
}
|
||||
need_rendering_ = true;
|
||||
rendering_ = true;
|
||||
scheduler_([this]{
|
||||
return Render() / 1000000.0;
|
||||
});
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t StopPlayout() override {
|
||||
if (!rendering_) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
need_rendering_ = false;
|
||||
std::unique_lock<std::mutex> lock(render_mutex_);
|
||||
render_cond_.wait(lock, [this]{ return !rendering_; });
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool Playing() const override {
|
||||
return rendering_;
|
||||
}
|
||||
|
||||
int32_t StartRecording() override {
|
||||
std::unique_lock<std::mutex> lock(record_mutex_);
|
||||
if (!recorder_) {
|
||||
return 0;
|
||||
}
|
||||
if (recording_) {
|
||||
return 0;
|
||||
}
|
||||
need_recording_ = true;
|
||||
recording_ = true;
|
||||
scheduler_([this]{
|
||||
return Record() / 1000000.0;
|
||||
});
|
||||
return 0;
|
||||
}
|
||||
int32_t StopRecording() override {
|
||||
if (!recording_) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
need_recording_ = false;
|
||||
std::unique_lock<std::mutex> lock(record_mutex_);
|
||||
record_cond_.wait(lock, [this]{ return !recording_; });
|
||||
|
||||
return 0;
|
||||
}
|
||||
bool Recording() const override {
|
||||
return recording_;
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
int32_t Render() {
|
||||
std::unique_lock<std::mutex> lock(render_mutex_);
|
||||
if (!need_rendering_) {
|
||||
rendering_ = false;
|
||||
render_cond_.notify_all();
|
||||
return -1;
|
||||
}
|
||||
|
||||
size_t samples_out = 0;
|
||||
int64_t elapsed_time_ms = -1;
|
||||
int64_t ntp_time_ms = -1;
|
||||
size_t bytes_per_sample = 2 * num_channels_;
|
||||
|
||||
RTC_CHECK(audio_callback_);
|
||||
if (renderer_) {
|
||||
renderer_->BeginFrame(0);
|
||||
}
|
||||
audio_callback_->NeedMorePlayData(samples_per_frame_, bytes_per_sample, num_channels_, samples_per_sec_,
|
||||
playout_buffer_.data(), samples_out, &elapsed_time_ms, &ntp_time_ms);
|
||||
if (renderer_) {
|
||||
renderer_->EndFrame();
|
||||
}
|
||||
if (samples_out != 0 && renderer_) {
|
||||
AudioFrame frame;
|
||||
frame.audio_samples = playout_buffer_.data();
|
||||
frame.num_samples = samples_out;
|
||||
frame.bytes_per_sample = bytes_per_sample;
|
||||
frame.num_channels = num_channels_;
|
||||
frame.samples_per_sec = samples_per_sec_;
|
||||
frame.elapsed_time_ms = elapsed_time_ms;
|
||||
frame.ntp_time_ms = ntp_time_ms;
|
||||
renderer_->Render(frame);
|
||||
}
|
||||
int32_t wait_for_us = -1;
|
||||
if (renderer_) {
|
||||
wait_for_us = renderer_->WaitForUs();
|
||||
}
|
||||
return wait_for_us;
|
||||
}
|
||||
|
||||
int32_t Record() {
|
||||
std::unique_lock<std::mutex> lock(record_mutex_);
|
||||
if (!need_recording_) {
|
||||
recording_ = false;
|
||||
record_cond_.notify_all();
|
||||
return -1;
|
||||
}
|
||||
|
||||
auto frame = recorder_->Record();
|
||||
if (frame.num_samples != 0) {
|
||||
uint32_t new_mic_level;
|
||||
audio_callback_->RecordedDataIsAvailable(frame.audio_samples,
|
||||
frame.num_samples, frame.bytes_per_sample, frame.num_channels,
|
||||
frame.samples_per_sec, 0, 0, 0, false, new_mic_level);
|
||||
}
|
||||
|
||||
int32_t wait_for_us = -1;
|
||||
if (recorder_) {
|
||||
wait_for_us = recorder_->WaitForUs();
|
||||
}
|
||||
return wait_for_us;
|
||||
}
|
||||
|
||||
size_t num_channels_;
|
||||
const uint32_t samples_per_sec_;
|
||||
size_t samples_per_frame_{0};
|
||||
|
||||
std::function<void(FakeAudioDeviceModule::Task)> scheduler_;
|
||||
|
||||
mutable std::mutex render_mutex_;
|
||||
std::atomic<bool> need_rendering_{false};
|
||||
std::atomic<bool> rendering_{false};
|
||||
std::condition_variable render_cond_;
|
||||
std::unique_ptr<rtc::PlatformThread> renderThread_;
|
||||
|
||||
mutable std::mutex record_mutex_;
|
||||
std::atomic<bool> need_recording_{false};
|
||||
std::atomic<bool> recording_{false};
|
||||
std::condition_variable record_cond_;
|
||||
std::unique_ptr<rtc::PlatformThread> recordThread_;
|
||||
|
||||
|
||||
webrtc::AudioTransport* audio_callback_{nullptr};
|
||||
const std::shared_ptr<FakeAudioDeviceModule::Renderer> renderer_;
|
||||
const std::shared_ptr<FakeAudioDeviceModule::Recorder> recorder_;
|
||||
std::vector<int16_t> playout_buffer_;
|
||||
};
|
||||
|
||||
std::function<webrtc::scoped_refptr<webrtc::AudioDeviceModule>(webrtc::TaskQueueFactory*)> FakeAudioDeviceModule::Creator(
|
||||
std::shared_ptr<Renderer> renderer, std::shared_ptr<Recorder> recorder, Options options) {
|
||||
bool is_renderer_empty = bool(renderer);
|
||||
auto boxed_renderer = std::make_shared<std::shared_ptr<Renderer>>(std::move(renderer));
|
||||
bool is_recorder_empty = bool(recorder);
|
||||
auto boxed_recorder = std::make_shared<std::shared_ptr<Recorder>>(std::move(recorder));
|
||||
return
|
||||
[boxed_renderer = std::move(boxed_renderer), is_renderer_empty,
|
||||
boxed_recorder = std::move(boxed_recorder), is_recorder_empty, options](webrtc::TaskQueueFactory* task_factory) {
|
||||
RTC_CHECK(is_renderer_empty == bool(*boxed_renderer)); // call only once if renderer exists
|
||||
RTC_CHECK(is_recorder_empty == bool(*boxed_recorder)); // call only once if recorder exists
|
||||
return FakeAudioDeviceModuleImpl::Create(task_factory, std::move(*boxed_renderer), std::move(*boxed_recorder), options);
|
||||
};
|
||||
}
|
||||
} // namespace tgcalls
|
||||
54
TMessagesProj/jni/voip/tgcalls/FakeAudioDeviceModule.h
Normal file
54
TMessagesProj/jni/voip/tgcalls/FakeAudioDeviceModule.h
Normal file
|
|
@ -0,0 +1,54 @@
|
|||
#pragma once
|
||||
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
|
||||
#include "AudioFrame.h"
|
||||
|
||||
namespace webrtc {
|
||||
class AudioDeviceModule;
|
||||
class TaskQueueFactory;
|
||||
} // namespace webrtc
|
||||
|
||||
namespace webrtc {
|
||||
template <class T>
|
||||
class scoped_refptr;
|
||||
}
|
||||
|
||||
namespace tgcalls {
|
||||
class FakeAudioDeviceModule {
|
||||
public:
|
||||
class Renderer {
|
||||
public:
|
||||
virtual ~Renderer() = default;
|
||||
virtual bool Render(const AudioFrame &samples) = 0;
|
||||
virtual void BeginFrame(double timestamp) {
|
||||
}
|
||||
virtual void AddFrameChannel(uint32_t ssrc, const tgcalls::AudioFrame &frame) {
|
||||
}
|
||||
virtual void EndFrame() {
|
||||
}
|
||||
virtual int32_t WaitForUs() {
|
||||
return 10000;
|
||||
}
|
||||
};
|
||||
class Recorder {
|
||||
public:
|
||||
virtual ~Recorder() = default;
|
||||
virtual AudioFrame Record() = 0;
|
||||
virtual int32_t WaitForUs() {
|
||||
return 10000;
|
||||
}
|
||||
};
|
||||
using Task = std::function<double()>;
|
||||
struct Options {
|
||||
uint32_t samples_per_sec{48000};
|
||||
uint32_t num_channels{2};
|
||||
std::function<void(Task)> scheduler_;
|
||||
};
|
||||
static std::function<webrtc::scoped_refptr<webrtc::AudioDeviceModule>(webrtc::TaskQueueFactory *)> Creator(
|
||||
std::shared_ptr<Renderer> renderer,
|
||||
std::shared_ptr<Recorder> recorder,
|
||||
Options options);
|
||||
};
|
||||
} // namespace tgcalls
|
||||
173
TMessagesProj/jni/voip/tgcalls/FakeVideoTrackSource.cpp
Normal file
173
TMessagesProj/jni/voip/tgcalls/FakeVideoTrackSource.cpp
Normal file
|
|
@ -0,0 +1,173 @@
|
|||
#include "FakeVideoTrackSource.h"
|
||||
|
||||
#include "api/video/i420_buffer.h"
|
||||
#include "media/base/video_broadcaster.h"
|
||||
#include "pc/video_track_source.h"
|
||||
|
||||
#include "libyuv.h"
|
||||
|
||||
#include <thread>
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
int WIDTH = 1280;
|
||||
int HEIGHT = 720;
|
||||
|
||||
class ChessFrameSource : public FrameSource {
|
||||
public:
|
||||
ChessFrameSource() {
|
||||
int N = 100;
|
||||
frames_.reserve(N);
|
||||
for (int i = 0; i < N; i++) {
|
||||
frames_.push_back(genFrame(i, N));
|
||||
}
|
||||
}
|
||||
Info info() const override{
|
||||
return Info{WIDTH, HEIGHT};
|
||||
}
|
||||
// webrtc::VideoFrame next_frame() override {
|
||||
// i = (i + 1) % frames_.size();
|
||||
// return frames_[i].frame;
|
||||
// }
|
||||
void next_frame_rgb0(char *buf, double *pts) override {
|
||||
*pts = 0;
|
||||
i = (i + 1) % frames_.size();
|
||||
size_t size = WIDTH * HEIGHT * 4;
|
||||
memcpy(buf, frames_[i].rbga.get(), size);
|
||||
}
|
||||
|
||||
private:
|
||||
struct Frame {
|
||||
webrtc::VideoFrame frame;
|
||||
std::unique_ptr<std::uint8_t[]> rbga;
|
||||
};
|
||||
std::vector<Frame> frames_;
|
||||
size_t i = 0;
|
||||
Frame genFrame(int i, int n) {
|
||||
int width = WIDTH;
|
||||
int height = HEIGHT;
|
||||
auto bytes_ptr = std::make_unique<std::uint8_t[]>(width * height * 4);
|
||||
auto bytes = bytes_ptr.get();
|
||||
auto set_rgb = [&](int x, int y, std::uint8_t r, std::uint8_t g, std::uint8_t b) {
|
||||
auto dest = bytes + (x * width + y) * 4;
|
||||
dest[0] = r;
|
||||
dest[1] = g;
|
||||
dest[2] = b;
|
||||
dest[3] = 0;
|
||||
};
|
||||
auto angle = (double)i / n * M_PI;
|
||||
auto co = cos(angle);
|
||||
auto si = sin(angle);
|
||||
|
||||
for (int i = 0; i < height; i++) {
|
||||
for (int j = 0; j < width; j++) {
|
||||
double sx = (i - height / 2) * 20.0 / HEIGHT;
|
||||
double sy = (j - width / 2) * 20.0 / HEIGHT;
|
||||
|
||||
int x, y;
|
||||
if (sx * sx + sy * sy < 10) {
|
||||
x = int(floor(sx * co - sy * si));
|
||||
y = int(floor(sx * si + sy * co));
|
||||
} else {
|
||||
x = int(floor(sx));
|
||||
y = int(floor(sy));
|
||||
}
|
||||
std::uint8_t color = ((y & 1) ^ (x & 1)) * 255;
|
||||
set_rgb(i, j, color, color, color);
|
||||
}
|
||||
}
|
||||
|
||||
webrtc::scoped_refptr<webrtc::I420Buffer> buffer = webrtc::I420Buffer::Create(width, height);
|
||||
|
||||
libyuv::RGBAToI420(bytes, width * 4, buffer->MutableDataY(), buffer->StrideY(), buffer->MutableDataU(),
|
||||
buffer->StrideU(), buffer->MutableDataV(), buffer->StrideV(), width, height);
|
||||
|
||||
return Frame{webrtc::VideoFrame::Builder().set_video_frame_buffer(buffer).build(), std::move(bytes_ptr)};
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
webrtc::VideoFrame FrameSource::next_frame() {
|
||||
auto info = this->info();
|
||||
auto height = info.height;
|
||||
auto width = info.width;
|
||||
auto bytes_ptr = std::make_unique<std::uint8_t[]>(width * height * 4);
|
||||
double pts;
|
||||
next_frame_rgb0(reinterpret_cast<char *>(bytes_ptr.get()), &pts);
|
||||
webrtc::scoped_refptr<webrtc::I420Buffer> buffer = webrtc::I420Buffer::Create(width, height);
|
||||
libyuv::ABGRToI420(bytes_ptr.get(), width * 4, buffer->MutableDataY(), buffer->StrideY(), buffer->MutableDataU(),
|
||||
buffer->StrideU(), buffer->MutableDataV(), buffer->StrideV(), width, height);
|
||||
return webrtc::VideoFrame::Builder().set_timestamp_us(static_cast<int64_t>(pts * 1000000)).set_video_frame_buffer(buffer).build();
|
||||
}
|
||||
|
||||
class FakeVideoSource : public rtc::VideoSourceInterface<webrtc::VideoFrame> {
|
||||
public:
|
||||
FakeVideoSource(std::unique_ptr<FrameSource> source) {
|
||||
data_ = std::make_shared<Data>();
|
||||
std::thread([data = data_, source = std::move(source)] {
|
||||
std::uint32_t step = 0;
|
||||
while (!data->flag_) {
|
||||
step++;
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(1000 / 30));
|
||||
auto frame = source->next_frame();
|
||||
frame.set_id(static_cast<std::uint16_t>(step));
|
||||
frame.set_timestamp_us(rtc::TimeMicros());
|
||||
data->broadcaster_.OnFrame(frame);
|
||||
}
|
||||
}).detach();
|
||||
}
|
||||
~FakeVideoSource() {
|
||||
data_->flag_ = true;
|
||||
}
|
||||
using VideoFrameT = webrtc::VideoFrame;
|
||||
void AddOrUpdateSink(rtc::VideoSinkInterface<VideoFrameT> *sink, const rtc::VideoSinkWants &wants) override {
|
||||
RTC_LOG(LS_WARNING) << "ADD";
|
||||
data_->broadcaster_.AddOrUpdateSink(sink, wants);
|
||||
}
|
||||
// RemoveSink must guarantee that at the time the method returns,
|
||||
// there is no current and no future calls to VideoSinkInterface::OnFrame.
|
||||
void RemoveSink(rtc::VideoSinkInterface<VideoFrameT> *sink) override {
|
||||
RTC_LOG(LS_WARNING) << "REMOVE";
|
||||
data_->broadcaster_.RemoveSink(sink);
|
||||
}
|
||||
|
||||
private:
|
||||
struct Data {
|
||||
std::atomic<bool> flag_;
|
||||
rtc::VideoBroadcaster broadcaster_;
|
||||
};
|
||||
std::shared_ptr<Data> data_;
|
||||
};
|
||||
|
||||
class FakeVideoTrackSourceImpl : public webrtc::VideoTrackSource {
|
||||
public:
|
||||
static webrtc::scoped_refptr<FakeVideoTrackSourceImpl> Create(std::unique_ptr<FrameSource> source) {
|
||||
return webrtc::scoped_refptr<FakeVideoTrackSourceImpl>(new rtc::RefCountedObject<FakeVideoTrackSourceImpl>(std::move(source)));
|
||||
}
|
||||
|
||||
explicit FakeVideoTrackSourceImpl(std::unique_ptr<FrameSource> source) : VideoTrackSource(false), source_(std::move(source)) {
|
||||
}
|
||||
|
||||
protected:
|
||||
FakeVideoSource source_;
|
||||
rtc::VideoSourceInterface<webrtc::VideoFrame> *source() override {
|
||||
return &source_;
|
||||
}
|
||||
};
|
||||
|
||||
std::function<webrtc::VideoTrackSourceInterface*()> FakeVideoTrackSource::create(std::unique_ptr<FrameSource> frame_source) {
|
||||
auto source = FakeVideoTrackSourceImpl::Create(std::move(frame_source));
|
||||
return [source] {
|
||||
return source.get();
|
||||
};
|
||||
}
|
||||
std::unique_ptr<FrameSource> FrameSource::chess(){
|
||||
return std::make_unique<ChessFrameSource>();
|
||||
}
|
||||
|
||||
void FrameSource::video_frame_to_rgb0(const webrtc::VideoFrame & src, char *dest){
|
||||
auto buffer = src.video_frame_buffer()->ToI420();
|
||||
libyuv::I420ToABGR(buffer->DataY(), buffer->StrideY(), buffer->DataU(),
|
||||
buffer->StrideU(), buffer->DataV(), buffer->StrideV( ), reinterpret_cast<uint8_t *>(dest), src.width() * 4, src.width(), src.height());
|
||||
}
|
||||
}
|
||||
35
TMessagesProj/jni/voip/tgcalls/FakeVideoTrackSource.h
Normal file
35
TMessagesProj/jni/voip/tgcalls/FakeVideoTrackSource.h
Normal file
|
|
@ -0,0 +1,35 @@
|
|||
#pragma once
|
||||
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
namespace webrtc {
|
||||
class VideoTrackSourceInterface;
|
||||
class VideoFrame;
|
||||
}
|
||||
|
||||
namespace tgcalls {
|
||||
class FrameSource {
|
||||
public:
|
||||
struct Info {
|
||||
int32_t width;
|
||||
int32_t height;
|
||||
};
|
||||
|
||||
virtual ~FrameSource() = default;
|
||||
|
||||
virtual Info info() const = 0;
|
||||
virtual webrtc::VideoFrame next_frame();
|
||||
static void video_frame_to_rgb0(const webrtc::VideoFrame &src, char *dest);
|
||||
virtual void next_frame_rgb0(char *buf, double *pt_in_seconds) = 0;
|
||||
|
||||
static std::unique_ptr<FrameSource> chess();
|
||||
static std::unique_ptr<FrameSource> from_file(std::string path);
|
||||
};
|
||||
|
||||
class FakeVideoTrackSource {
|
||||
public:
|
||||
static std::function<webrtc::VideoTrackSourceInterface*()> create(std::unique_ptr<FrameSource> source);
|
||||
};
|
||||
}
|
||||
7
TMessagesProj/jni/voip/tgcalls/FieldTrialsConfig.cpp
Normal file
7
TMessagesProj/jni/voip/tgcalls/FieldTrialsConfig.cpp
Normal file
|
|
@ -0,0 +1,7 @@
|
|||
#include "FieldTrialsConfig.h"
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
webrtc::FieldTrialBasedConfig fieldTrialsBasedConfig;
|
||||
|
||||
}
|
||||
12
TMessagesProj/jni/voip/tgcalls/FieldTrialsConfig.h
Normal file
12
TMessagesProj/jni/voip/tgcalls/FieldTrialsConfig.h
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
#ifndef TGCALLS_FIELD_TRIALS_CONFIG_H
|
||||
#define TGCALLS_FIELD_TRIALS_CONFIG_H
|
||||
|
||||
#include "api/transport/field_trial_based_config.h"
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
extern webrtc::FieldTrialBasedConfig fieldTrialsBasedConfig;
|
||||
|
||||
} // namespace tgcalls
|
||||
|
||||
#endif
|
||||
66
TMessagesProj/jni/voip/tgcalls/Instance.cpp
Normal file
66
TMessagesProj/jni/voip/tgcalls/Instance.cpp
Normal file
|
|
@ -0,0 +1,66 @@
|
|||
#include "Instance.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <stdarg.h>
|
||||
|
||||
namespace tgcalls {
|
||||
namespace {
|
||||
|
||||
std::function<void(std::string const &)> globalLoggingFunction;
|
||||
|
||||
std::map<std::string, std::shared_ptr<Meta>> &MetaMap() {
|
||||
static auto result = std::map<std::string, std::shared_ptr<Meta>>();
|
||||
return result;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
std::vector<std::string> Meta::Versions() {
|
||||
auto &map = MetaMap();
|
||||
auto result = std::vector<std::string>();
|
||||
result.reserve(map.size());
|
||||
for (const auto &entry : map) {
|
||||
result.push_back(entry.first);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
int Meta::MaxLayer() {
|
||||
auto result = 0;
|
||||
for (const auto &entry : MetaMap()) {
|
||||
result = std::max(result, entry.second->connectionMaxLayer());
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
std::unique_ptr<Instance> Meta::Create(
|
||||
const std::string &version,
|
||||
Descriptor &&descriptor) {
|
||||
const auto i = MetaMap().find(version);
|
||||
|
||||
// Enforce correct protocol version.
|
||||
if (version == "2.7.7") {
|
||||
descriptor.config.protocolVersion = ProtocolVersion::V0;
|
||||
} else if (version == "5.0.0") {
|
||||
descriptor.config.protocolVersion = ProtocolVersion::V1;
|
||||
}
|
||||
|
||||
return (i != MetaMap().end())
|
||||
? i->second->construct(std::move(descriptor))
|
||||
: nullptr;
|
||||
}
|
||||
|
||||
void Meta::RegisterOne(std::shared_ptr<Meta> meta) {
|
||||
if (meta) {
|
||||
const auto versions = meta->versions();
|
||||
for (auto &it : versions) {
|
||||
MetaMap().emplace(it, meta);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void SetLoggingFunction(std::function<void(std::string const &)> loggingFunction) {
|
||||
globalLoggingFunction = loggingFunction;
|
||||
}
|
||||
|
||||
} // namespace tgcalls
|
||||
296
TMessagesProj/jni/voip/tgcalls/Instance.h
Normal file
296
TMessagesProj/jni/voip/tgcalls/Instance.h
Normal file
|
|
@ -0,0 +1,296 @@
|
|||
#ifndef TGCALLS_INSTANCE_H
|
||||
#define TGCALLS_INSTANCE_H
|
||||
|
||||
#include <functional>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <memory>
|
||||
#include <map>
|
||||
|
||||
#include "Stats.h"
|
||||
#include "DirectConnectionChannel.h"
|
||||
|
||||
#include "platform/PlatformInterface.h"
|
||||
|
||||
namespace rtc {
|
||||
template <typename VideoFrameT>
|
||||
class VideoSinkInterface;
|
||||
} // namespace rtc
|
||||
|
||||
namespace webrtc {
|
||||
class VideoFrame;
|
||||
class AudioDeviceModule;
|
||||
class TaskQueueFactory;
|
||||
template <class T>
|
||||
class scoped_refptr;
|
||||
} // namespace webrtc
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
class WrappedAudioDeviceModule;
|
||||
class VideoCaptureInterface;
|
||||
|
||||
struct FilePath {
|
||||
#ifndef _WIN32
|
||||
std::string data;
|
||||
#else
|
||||
std::wstring data;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct Proxy {
|
||||
std::string host;
|
||||
uint16_t port = 0;
|
||||
std::string login;
|
||||
std::string password;
|
||||
};
|
||||
|
||||
struct RtcServer {
|
||||
uint8_t id = 0;
|
||||
std::string host;
|
||||
uint16_t port = 0;
|
||||
std::string login;
|
||||
std::string password;
|
||||
bool isTurn = false;
|
||||
bool isTcp = false;
|
||||
};
|
||||
|
||||
enum class EndpointType {
|
||||
Inet,
|
||||
Lan,
|
||||
UdpRelay,
|
||||
TcpRelay
|
||||
};
|
||||
|
||||
struct EndpointHost {
|
||||
std::string ipv4;
|
||||
std::string ipv6;
|
||||
};
|
||||
|
||||
struct Endpoint {
|
||||
int64_t endpointId = 0;
|
||||
EndpointHost host;
|
||||
uint16_t port = 0;
|
||||
EndpointType type = EndpointType{};
|
||||
unsigned char peerTag[16] = { 0 };
|
||||
};
|
||||
|
||||
enum class ProtocolVersion {
|
||||
V0,
|
||||
V1 // Low-cost network negotiation
|
||||
};
|
||||
|
||||
enum class NetworkType {
|
||||
Unknown,
|
||||
Gprs,
|
||||
Edge,
|
||||
ThirdGeneration,
|
||||
Hspa,
|
||||
Lte,
|
||||
WiFi,
|
||||
Ethernet,
|
||||
OtherHighSpeed,
|
||||
OtherLowSpeed,
|
||||
OtherMobile,
|
||||
Dialup
|
||||
};
|
||||
|
||||
enum class DataSaving {
|
||||
Never,
|
||||
Mobile,
|
||||
Always
|
||||
};
|
||||
|
||||
struct PersistentState {
|
||||
std::vector<uint8_t> value;
|
||||
};
|
||||
|
||||
struct Config {
|
||||
double initializationTimeout = 0.;
|
||||
double receiveTimeout = 0.;
|
||||
DataSaving dataSaving = DataSaving::Never;
|
||||
bool enableP2P = false;
|
||||
bool allowTCP = false;
|
||||
bool enableStunMarking = false;
|
||||
bool enableAEC = false;
|
||||
bool enableNS = false;
|
||||
bool enableAGC = false;
|
||||
bool enableCallUpgrade = false;
|
||||
bool enableVolumeControl = false;
|
||||
FilePath logPath;
|
||||
FilePath statsLogPath;
|
||||
int maxApiLayer = 0;
|
||||
bool enableHighBitrateVideo = false;
|
||||
std::vector<std::string> preferredVideoCodecs;
|
||||
ProtocolVersion protocolVersion = ProtocolVersion::V0;
|
||||
std::string customParameters = "";
|
||||
};
|
||||
|
||||
struct EncryptionKey {
|
||||
static constexpr int kSize = 256;
|
||||
|
||||
std::shared_ptr<std::array<uint8_t, kSize>> value;
|
||||
bool isOutgoing = false;
|
||||
|
||||
EncryptionKey(
|
||||
std::shared_ptr<std::array<uint8_t, kSize>> value,
|
||||
bool isOutgoing
|
||||
): value(value), isOutgoing(isOutgoing) {
|
||||
}
|
||||
};
|
||||
|
||||
enum class State {
|
||||
WaitInit,
|
||||
WaitInitAck,
|
||||
Established,
|
||||
Failed,
|
||||
Reconnecting
|
||||
};
|
||||
|
||||
// Defined in VideoCaptureInterface.h
|
||||
enum class VideoState;
|
||||
|
||||
enum class AudioState {
|
||||
Muted,
|
||||
Active,
|
||||
};
|
||||
|
||||
struct TrafficStats {
|
||||
uint64_t bytesSentWifi = 0;
|
||||
uint64_t bytesReceivedWifi = 0;
|
||||
uint64_t bytesSentMobile = 0;
|
||||
uint64_t bytesReceivedMobile = 0;
|
||||
};
|
||||
|
||||
struct FinalState {
|
||||
PersistentState persistentState;
|
||||
std::string debugLog;
|
||||
TrafficStats trafficStats;
|
||||
CallStats callStats;
|
||||
bool isRatingSuggested = false;
|
||||
};
|
||||
|
||||
struct MediaDevicesConfig {
|
||||
std::string audioInputId;
|
||||
std::string audioOutputId;
|
||||
float inputVolume = 1.f;
|
||||
float outputVolume = 1.f;
|
||||
};
|
||||
|
||||
class Instance {
|
||||
protected:
|
||||
Instance() = default;
|
||||
|
||||
public:
|
||||
virtual ~Instance() = default;
|
||||
|
||||
virtual void setNetworkType(NetworkType networkType) = 0;
|
||||
virtual void setMuteMicrophone(bool muteMicrophone) = 0;
|
||||
virtual void setAudioOutputGainControlEnabled(bool enabled) = 0;
|
||||
virtual void setEchoCancellationStrength(int strength) = 0;
|
||||
|
||||
virtual bool supportsVideo() = 0;
|
||||
virtual void setIncomingVideoOutput(std::weak_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink) = 0;
|
||||
|
||||
virtual void setAudioInputDevice(std::string id) = 0;
|
||||
virtual void setAudioOutputDevice(std::string id) = 0;
|
||||
virtual void setInputVolume(float level) = 0;
|
||||
virtual void setOutputVolume(float level) = 0;
|
||||
virtual void setAudioOutputDuckingEnabled(bool enabled) = 0;
|
||||
virtual void addExternalAudioSamples(std::vector<uint8_t> &&samples) {
|
||||
}
|
||||
|
||||
virtual void setIsLowBatteryLevel(bool isLowBatteryLevel) = 0;
|
||||
|
||||
virtual std::string getLastError() = 0;
|
||||
virtual std::string getDebugInfo() = 0;
|
||||
virtual int64_t getPreferredRelayId() = 0;
|
||||
virtual TrafficStats getTrafficStats() = 0;
|
||||
virtual PersistentState getPersistentState() = 0;
|
||||
|
||||
virtual void receiveSignalingData(const std::vector<uint8_t> &data) = 0;
|
||||
virtual void setVideoCapture(std::shared_ptr<VideoCaptureInterface> videoCapture) = 0;
|
||||
virtual void sendVideoDeviceUpdated() = 0;
|
||||
virtual void setRequestedVideoAspect(float aspect) = 0;
|
||||
|
||||
virtual void stop(std::function<void(FinalState)> completion) = 0;
|
||||
|
||||
};
|
||||
|
||||
template <typename Implementation>
|
||||
bool Register();
|
||||
|
||||
struct Descriptor {
|
||||
std::string version;
|
||||
Config config;
|
||||
PersistentState persistentState;
|
||||
std::vector<Endpoint> endpoints;
|
||||
std::unique_ptr<Proxy> proxy;
|
||||
std::vector<RtcServer> rtcServers;
|
||||
NetworkType initialNetworkType = NetworkType();
|
||||
EncryptionKey encryptionKey;
|
||||
MediaDevicesConfig mediaDevicesConfig;
|
||||
std::shared_ptr<VideoCaptureInterface> videoCapture;
|
||||
std::function<void(State)> stateUpdated;
|
||||
std::function<void(int)> signalBarsUpdated;
|
||||
std::function<void(float, float)> audioLevelsUpdated;
|
||||
std::function<void(bool)> remoteBatteryLevelIsLowUpdated;
|
||||
std::function<void(AudioState, VideoState)> remoteMediaStateUpdated;
|
||||
std::function<void(float)> remotePrefferedAspectRatioUpdated;
|
||||
std::function<void(const std::vector<uint8_t> &)> signalingDataEmitted;
|
||||
std::function<webrtc::scoped_refptr<webrtc::AudioDeviceModule>(webrtc::TaskQueueFactory*)> createAudioDeviceModule;
|
||||
std::function<webrtc::scoped_refptr<WrappedAudioDeviceModule>(webrtc::TaskQueueFactory*)> createWrappedAudioDeviceModule;
|
||||
std::string initialInputDeviceId;
|
||||
std::string initialOutputDeviceId;
|
||||
std::shared_ptr<DirectConnectionChannel> directConnectionChannel;
|
||||
|
||||
std::shared_ptr<PlatformContext> platformContext;
|
||||
};
|
||||
|
||||
class Meta {
|
||||
public:
|
||||
virtual ~Meta() = default;
|
||||
|
||||
virtual std::unique_ptr<Instance> construct(Descriptor &&descriptor) = 0;
|
||||
virtual int connectionMaxLayer() = 0;
|
||||
virtual std::vector<std::string> versions() = 0;
|
||||
|
||||
static std::unique_ptr<Instance> Create(
|
||||
const std::string &version,
|
||||
Descriptor &&descriptor);
|
||||
static std::vector<std::string> Versions();
|
||||
static int MaxLayer();
|
||||
|
||||
private:
|
||||
template <typename Implementation>
|
||||
friend bool Register();
|
||||
|
||||
template <typename Implementation>
|
||||
static bool RegisterOne();
|
||||
static void RegisterOne(std::shared_ptr<Meta> meta);
|
||||
|
||||
};
|
||||
|
||||
template <typename Implementation>
|
||||
bool Meta::RegisterOne() {
|
||||
class MetaImpl final : public Meta {
|
||||
public:
|
||||
int connectionMaxLayer() override {
|
||||
return Implementation::GetConnectionMaxLayer();
|
||||
}
|
||||
std::vector<std::string> versions() override {
|
||||
return Implementation::GetVersions();
|
||||
}
|
||||
std::unique_ptr<Instance> construct(Descriptor &&descriptor) override {
|
||||
return std::make_unique<Implementation>(std::move(descriptor));
|
||||
}
|
||||
};
|
||||
RegisterOne(std::make_shared<MetaImpl>());
|
||||
return true;
|
||||
}
|
||||
|
||||
void SetLoggingFunction(std::function<void(std::string const &)> loggingFunction);
|
||||
|
||||
} // namespace tgcalls
|
||||
|
||||
#endif
|
||||
202
TMessagesProj/jni/voip/tgcalls/InstanceImpl.cpp
Normal file
202
TMessagesProj/jni/voip/tgcalls/InstanceImpl.cpp
Normal file
|
|
@ -0,0 +1,202 @@
|
|||
#include "InstanceImpl.h"
|
||||
|
||||
#include "LogSinkImpl.h"
|
||||
#include "Manager.h"
|
||||
#include "MediaManager.h"
|
||||
#include "VideoCaptureInterfaceImpl.h"
|
||||
#include "VideoCapturerInterface.h"
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
namespace {
|
||||
|
||||
rtc::Thread *makeManagerThread() {
|
||||
static std::unique_ptr<rtc::Thread> value = rtc::Thread::Create();
|
||||
value->SetName("WebRTC-Manager", nullptr);
|
||||
value->Start();
|
||||
return value.get();
|
||||
}
|
||||
|
||||
|
||||
rtc::Thread *getManagerThread() {
|
||||
static rtc::Thread *value = makeManagerThread();
|
||||
return value;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
InstanceImpl::InstanceImpl(Descriptor &&descriptor)
|
||||
: _logSink(std::make_unique<LogSinkImpl>(descriptor.config.logPath)) {
|
||||
rtc::LogMessage::LogToDebug(rtc::LS_INFO);
|
||||
rtc::LogMessage::SetLogToStderr(false);
|
||||
rtc::LogMessage::AddLogToStream(_logSink.get(), rtc::LS_INFO);
|
||||
|
||||
auto networkType = descriptor.initialNetworkType;
|
||||
|
||||
_manager.reset(new ThreadLocalObject<Manager>(getManagerThread(), [descriptor = std::move(descriptor)]() mutable {
|
||||
return std::make_shared<Manager>(getManagerThread(), std::move(descriptor));
|
||||
}));
|
||||
_manager->perform([](Manager *manager) {
|
||||
manager->start();
|
||||
});
|
||||
|
||||
setNetworkType(networkType);
|
||||
}
|
||||
|
||||
InstanceImpl::~InstanceImpl() {
|
||||
rtc::LogMessage::RemoveLogToStream(_logSink.get());
|
||||
}
|
||||
|
||||
void InstanceImpl::receiveSignalingData(const std::vector<uint8_t> &data) {
|
||||
_manager->perform([data](Manager *manager) {
|
||||
manager->receiveSignalingData(data);
|
||||
});
|
||||
};
|
||||
|
||||
void InstanceImpl::setVideoCapture(std::shared_ptr<VideoCaptureInterface> videoCapture) {
|
||||
_manager->perform([videoCapture](Manager *manager) {
|
||||
manager->setVideoCapture(videoCapture);
|
||||
});
|
||||
}
|
||||
|
||||
void InstanceImpl::sendVideoDeviceUpdated() {
|
||||
_manager->perform([](Manager *manager) {
|
||||
manager->sendVideoDeviceUpdated();
|
||||
});
|
||||
}
|
||||
|
||||
void InstanceImpl::setRequestedVideoAspect(float aspect) {
|
||||
_manager->perform([aspect](Manager *manager) {
|
||||
manager->setRequestedVideoAspect(aspect);
|
||||
});
|
||||
}
|
||||
|
||||
void InstanceImpl::setNetworkType(NetworkType networkType) {
|
||||
bool isLowCostNetwork = false;
|
||||
switch (networkType) {
|
||||
case NetworkType::WiFi:
|
||||
case NetworkType::Ethernet:
|
||||
isLowCostNetwork = true;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
_manager->perform([isLowCostNetwork](Manager *manager) {
|
||||
manager->setIsLocalNetworkLowCost(isLowCostNetwork);
|
||||
});
|
||||
}
|
||||
|
||||
void InstanceImpl::setMuteMicrophone(bool muteMicrophone) {
|
||||
_manager->perform([muteMicrophone](Manager *manager) {
|
||||
manager->setMuteOutgoingAudio(muteMicrophone);
|
||||
});
|
||||
}
|
||||
|
||||
void InstanceImpl::setIncomingVideoOutput(std::weak_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink) {
|
||||
_manager->perform([sink](Manager *manager) {
|
||||
manager->setIncomingVideoOutput(sink);
|
||||
});
|
||||
}
|
||||
|
||||
void InstanceImpl::setAudioOutputGainControlEnabled(bool enabled) {
|
||||
}
|
||||
|
||||
void InstanceImpl::setEchoCancellationStrength(int strength) {
|
||||
}
|
||||
|
||||
void InstanceImpl::setAudioInputDevice(std::string id) {
|
||||
_manager->perform([id](Manager *manager) {
|
||||
manager->setAudioInputDevice(id);
|
||||
});
|
||||
}
|
||||
|
||||
void InstanceImpl::setAudioOutputDevice(std::string id) {
|
||||
_manager->perform([id](Manager *manager) {
|
||||
manager->setAudioOutputDevice(id);
|
||||
});
|
||||
}
|
||||
|
||||
void InstanceImpl::setInputVolume(float level) {
|
||||
_manager->perform([level](Manager *manager) {
|
||||
manager->setInputVolume(level);
|
||||
});
|
||||
}
|
||||
|
||||
void InstanceImpl::setOutputVolume(float level) {
|
||||
_manager->perform([level](Manager *manager) {
|
||||
manager->setOutputVolume(level);
|
||||
});
|
||||
}
|
||||
|
||||
void InstanceImpl::setAudioOutputDuckingEnabled(bool enabled) {
|
||||
// TODO: not implemented
|
||||
}
|
||||
|
||||
void InstanceImpl::addExternalAudioSamples(std::vector<uint8_t> &&samples) {
|
||||
_manager->perform([samples = std::move(samples)](Manager *manager) mutable {
|
||||
manager->addExternalAudioSamples(std::move(samples));
|
||||
});
|
||||
}
|
||||
|
||||
void InstanceImpl::setIsLowBatteryLevel(bool isLowBatteryLevel) {
|
||||
_manager->perform([isLowBatteryLevel](Manager *manager) {
|
||||
manager->setIsLowBatteryLevel(isLowBatteryLevel);
|
||||
});
|
||||
}
|
||||
|
||||
std::string InstanceImpl::getLastError() {
|
||||
return ""; // TODO: not implemented
|
||||
}
|
||||
|
||||
std::string InstanceImpl::getDebugInfo() {
|
||||
return ""; // TODO: not implemented
|
||||
}
|
||||
|
||||
int64_t InstanceImpl::getPreferredRelayId() {
|
||||
return 0; // we don't have endpoint ids
|
||||
}
|
||||
|
||||
TrafficStats InstanceImpl::getTrafficStats() {
|
||||
return TrafficStats{}; // TODO: not implemented
|
||||
}
|
||||
|
||||
PersistentState InstanceImpl::getPersistentState() {
|
||||
return PersistentState{}; // we dont't have such information
|
||||
}
|
||||
|
||||
void InstanceImpl::stop(std::function<void(FinalState)> completion) {
|
||||
RTC_LOG(LS_INFO) << "Stopping InstanceImpl";
|
||||
|
||||
std::string debugLog = _logSink->result();
|
||||
|
||||
_manager->perform([completion, debugLog = std::move(debugLog)](Manager *manager) {
|
||||
manager->getNetworkStats([completion, debugLog = std::move(debugLog)](TrafficStats stats, CallStats callStats) {
|
||||
FinalState finalState;
|
||||
finalState.debugLog = debugLog;
|
||||
finalState.isRatingSuggested = false;
|
||||
finalState.trafficStats = stats;
|
||||
finalState.callStats = callStats;
|
||||
|
||||
completion(finalState);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
int InstanceImpl::GetConnectionMaxLayer() {
|
||||
return 92;
|
||||
}
|
||||
|
||||
std::vector<std::string> InstanceImpl::GetVersions() {
|
||||
std::vector<std::string> result;
|
||||
result.push_back("2.7.7");
|
||||
result.push_back("5.0.0");
|
||||
return result;
|
||||
}
|
||||
|
||||
template <>
|
||||
bool Register<InstanceImpl>() {
|
||||
return Meta::RegisterOne<InstanceImpl>();
|
||||
}
|
||||
|
||||
} // namespace tgcalls
|
||||
56
TMessagesProj/jni/voip/tgcalls/InstanceImpl.h
Normal file
56
TMessagesProj/jni/voip/tgcalls/InstanceImpl.h
Normal file
|
|
@ -0,0 +1,56 @@
|
|||
#ifndef TGCALLS_INSTANCE_IMPL_H
|
||||
#define TGCALLS_INSTANCE_IMPL_H
|
||||
|
||||
#include "Instance.h"
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
class LogSinkImpl;
|
||||
|
||||
class Manager;
|
||||
template <typename T>
|
||||
class ThreadLocalObject;
|
||||
|
||||
class InstanceImpl final : public Instance {
|
||||
public:
|
||||
explicit InstanceImpl(Descriptor &&descriptor);
|
||||
~InstanceImpl() override;
|
||||
|
||||
static int GetConnectionMaxLayer();
|
||||
static std::vector<std::string> GetVersions();
|
||||
|
||||
void receiveSignalingData(const std::vector<uint8_t> &data) override;
|
||||
void setVideoCapture(std::shared_ptr<VideoCaptureInterface> videoCapture) override;
|
||||
void sendVideoDeviceUpdated() override;
|
||||
void setRequestedVideoAspect(float aspect) override;
|
||||
void setNetworkType(NetworkType networkType) override;
|
||||
void setMuteMicrophone(bool muteMicrophone) override;
|
||||
bool supportsVideo() override {
|
||||
return true;
|
||||
}
|
||||
void setIncomingVideoOutput(std::weak_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink) override;
|
||||
void setAudioOutputGainControlEnabled(bool enabled) override;
|
||||
void setEchoCancellationStrength(int strength) override;
|
||||
void setAudioInputDevice(std::string id) override;
|
||||
void setAudioOutputDevice(std::string id) override;
|
||||
void setInputVolume(float level) override;
|
||||
void setOutputVolume(float level) override;
|
||||
void setAudioOutputDuckingEnabled(bool enabled) override;
|
||||
void addExternalAudioSamples(std::vector<uint8_t> &&samples) override;
|
||||
void setIsLowBatteryLevel(bool isLowBatteryLevel) override;
|
||||
std::string getLastError() override;
|
||||
std::string getDebugInfo() override;
|
||||
int64_t getPreferredRelayId() override;
|
||||
TrafficStats getTrafficStats() override;
|
||||
PersistentState getPersistentState() override;
|
||||
void stop(std::function<void(FinalState)> completion) override;
|
||||
|
||||
private:
|
||||
std::unique_ptr<ThreadLocalObject<Manager>> _manager;
|
||||
std::unique_ptr<LogSinkImpl> _logSink;
|
||||
|
||||
};
|
||||
|
||||
} // namespace tgcalls
|
||||
|
||||
#endif
|
||||
71
TMessagesProj/jni/voip/tgcalls/LogSinkImpl.cpp
Normal file
71
TMessagesProj/jni/voip/tgcalls/LogSinkImpl.cpp
Normal file
|
|
@ -0,0 +1,71 @@
|
|||
#include "LogSinkImpl.h"
|
||||
|
||||
#include "Instance.h"
|
||||
|
||||
#ifdef WEBRTC_WIN
|
||||
#include "windows.h"
|
||||
#include <ctime>
|
||||
#else // WEBRTC_WIN
|
||||
#include <sys/time.h>
|
||||
#endif // WEBRTC_WIN
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
LogSinkImpl::LogSinkImpl(const FilePath &logPath) {
|
||||
if (!logPath.data.empty()) {
|
||||
_file.open(logPath.data);
|
||||
}
|
||||
}
|
||||
|
||||
void LogSinkImpl::OnLogMessage(const std::string &msg, rtc::LoggingSeverity severity, const char *tag) {
|
||||
OnLogMessage(std::string(tag) + ": " + msg);
|
||||
}
|
||||
|
||||
void LogSinkImpl::OnLogMessage(const std::string &message, rtc::LoggingSeverity severity) {
|
||||
OnLogMessage(message);
|
||||
}
|
||||
|
||||
void LogSinkImpl::OnLogMessage(const std::string &message) {
|
||||
time_t rawTime;
|
||||
time(&rawTime);
|
||||
struct tm timeinfo;
|
||||
|
||||
#ifdef WEBRTC_WIN
|
||||
localtime_s(&timeinfo, &rawTime);
|
||||
|
||||
FILETIME ft;
|
||||
unsigned __int64 full = 0;
|
||||
GetSystemTimeAsFileTime(&ft);
|
||||
|
||||
full |= ft.dwHighDateTime;
|
||||
full <<= 32;
|
||||
full |= ft.dwLowDateTime;
|
||||
|
||||
const auto deltaEpochInMicrosecs = 11644473600000000Ui64;
|
||||
full -= deltaEpochInMicrosecs;
|
||||
full /= 10;
|
||||
int32_t milliseconds = (long)(full % 1000000UL) / 1000;
|
||||
#else
|
||||
timeval curTime = { 0 };
|
||||
localtime_r(&rawTime, &timeinfo);
|
||||
gettimeofday(&curTime, nullptr);
|
||||
int32_t milliseconds = curTime.tv_usec / 1000;
|
||||
#endif
|
||||
|
||||
auto &stream = _file.is_open() ? (std::ostream&)_file : _data;
|
||||
stream
|
||||
<< (timeinfo.tm_year + 1900)
|
||||
<< "-" << (timeinfo.tm_mon + 1)
|
||||
<< "-" << (timeinfo.tm_mday)
|
||||
<< " " << timeinfo.tm_hour
|
||||
<< ":" << timeinfo.tm_min
|
||||
<< ":" << timeinfo.tm_sec
|
||||
<< ":" << milliseconds
|
||||
<< " " << message;
|
||||
|
||||
#if DEBUG
|
||||
printf("%d-%d-%d %d:%d:%d:%d %s\n", timeinfo.tm_year + 1900, timeinfo.tm_mon + 1, timeinfo.tm_mday, timeinfo.tm_hour, timeinfo.tm_min, timeinfo.tm_sec, milliseconds, message.c_str());
|
||||
#endif
|
||||
}
|
||||
|
||||
} // namespace tgcalls
|
||||
31
TMessagesProj/jni/voip/tgcalls/LogSinkImpl.h
Normal file
31
TMessagesProj/jni/voip/tgcalls/LogSinkImpl.h
Normal file
|
|
@ -0,0 +1,31 @@
|
|||
#ifndef TGCALLS_LOG_SINK_IMPL_H
|
||||
#define TGCALLS_LOG_SINK_IMPL_H
|
||||
|
||||
#include "rtc_base/logging.h"
|
||||
#include <fstream>
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
struct FilePath;
|
||||
|
||||
class LogSinkImpl final : public rtc::LogSink {
|
||||
public:
|
||||
LogSinkImpl(const FilePath &logPath);
|
||||
|
||||
void OnLogMessage(const std::string &msg, rtc::LoggingSeverity severity, const char *tag) override;
|
||||
void OnLogMessage(const std::string &message, rtc::LoggingSeverity severity) override;
|
||||
void OnLogMessage(const std::string &message) override;
|
||||
|
||||
std::string result() const {
|
||||
return _data.str();
|
||||
}
|
||||
|
||||
private:
|
||||
std::ofstream _file;
|
||||
std::ostringstream _data;
|
||||
|
||||
};
|
||||
|
||||
} // namespace tgcalls
|
||||
|
||||
#endif
|
||||
467
TMessagesProj/jni/voip/tgcalls/Manager.cpp
Normal file
467
TMessagesProj/jni/voip/tgcalls/Manager.cpp
Normal file
|
|
@ -0,0 +1,467 @@
|
|||
#include "Manager.h"
|
||||
|
||||
#include "rtc_base/byte_buffer.h"
|
||||
#include "StaticThreads.h"
|
||||
|
||||
#include <fstream>
|
||||
|
||||
namespace tgcalls {
|
||||
namespace {
|
||||
|
||||
void dumpStatsLog(const FilePath &path, const CallStats &stats) {
|
||||
if (path.data.empty()) {
|
||||
return;
|
||||
}
|
||||
std::ofstream file;
|
||||
file.open(path.data);
|
||||
|
||||
file << "{";
|
||||
file << "\"v\":\"" << 1 << "\"";
|
||||
file << ",";
|
||||
|
||||
file << "\"codec\":\"" << stats.outgoingCodec << "\"";
|
||||
file << ",";
|
||||
|
||||
file << "\"bitrate\":[";
|
||||
bool addComma = false;
|
||||
for (auto &it : stats.bitrateRecords) {
|
||||
if (addComma) {
|
||||
file << ",";
|
||||
}
|
||||
file << "{";
|
||||
file << "\"t\":\"" << it.timestamp << "\"";
|
||||
file << ",";
|
||||
file << "\"b\":\"" << it.bitrate << "\"";
|
||||
file << "}";
|
||||
addComma = true;
|
||||
}
|
||||
file << "]";
|
||||
file << ",";
|
||||
|
||||
file << "\"network\":[";
|
||||
addComma = false;
|
||||
for (auto &it : stats.networkRecords) {
|
||||
if (addComma) {
|
||||
file << ",";
|
||||
}
|
||||
file << "{";
|
||||
file << "\"t\":\"" << it.timestamp << "\"";
|
||||
file << ",";
|
||||
file << "\"e\":\"" << (int)(it.endpointType) << "\"";
|
||||
file << ",";
|
||||
file << "\"w\":\"" << (it.isLowCost ? 1 : 0) << "\"";
|
||||
file << "}";
|
||||
addComma = true;
|
||||
}
|
||||
file << "]";
|
||||
|
||||
file << "}";
|
||||
|
||||
file.close();
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
bool Manager::ResolvedNetworkStatus::operator==(const ResolvedNetworkStatus &rhs) const {
|
||||
if (rhs.isLowCost != isLowCost) {
|
||||
return false;
|
||||
}
|
||||
if (rhs.isLowDataRequested != isLowDataRequested) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool Manager::ResolvedNetworkStatus::operator!=(const ResolvedNetworkStatus &rhs) const {
|
||||
return !(*this == rhs);
|
||||
}
|
||||
|
||||
Manager::Manager(rtc::Thread *thread, Descriptor &&descriptor) :
|
||||
_thread(thread),
|
||||
_encryptionKey(descriptor.encryptionKey),
|
||||
_signaling(
|
||||
EncryptedConnection::Type::Signaling,
|
||||
_encryptionKey,
|
||||
[=](int delayMs, int cause) { sendSignalingAsync(delayMs, cause); }),
|
||||
_enableP2P(descriptor.config.enableP2P),
|
||||
_enableTCP(descriptor.config.allowTCP),
|
||||
_enableStunMarking(descriptor.config.enableStunMarking),
|
||||
_protocolVersion(descriptor.config.protocolVersion),
|
||||
_statsLogPath(descriptor.config.statsLogPath),
|
||||
_rtcServers(std::move(descriptor.rtcServers)),
|
||||
_proxy(std::move(descriptor.proxy)),
|
||||
_mediaDevicesConfig(std::move(descriptor.mediaDevicesConfig)),
|
||||
_videoCapture(std::move(descriptor.videoCapture)),
|
||||
_stateUpdated(std::move(descriptor.stateUpdated)),
|
||||
_remoteMediaStateUpdated(std::move(descriptor.remoteMediaStateUpdated)),
|
||||
_remoteBatteryLevelIsLowUpdated(std::move(descriptor.remoteBatteryLevelIsLowUpdated)),
|
||||
_remotePrefferedAspectRatioUpdated(std::move(descriptor.remotePrefferedAspectRatioUpdated)),
|
||||
_signalingDataEmitted(std::move(descriptor.signalingDataEmitted)),
|
||||
_signalBarsUpdated(std::move(descriptor.signalBarsUpdated)),
|
||||
_audioLevelsUpdated(std::move(descriptor.audioLevelsUpdated)),
|
||||
_createAudioDeviceModule(std::move(descriptor.createAudioDeviceModule)),
|
||||
_enableHighBitrateVideo(descriptor.config.enableHighBitrateVideo),
|
||||
_dataSaving(descriptor.config.dataSaving),
|
||||
_platformContext(descriptor.platformContext) {
|
||||
assert(_thread->IsCurrent());
|
||||
assert(_stateUpdated != nullptr);
|
||||
assert(_signalingDataEmitted != nullptr);
|
||||
|
||||
_preferredCodecs = descriptor.config.preferredVideoCodecs;
|
||||
|
||||
_sendSignalingMessage = [=](const Message &message) {
|
||||
if (const auto prepared = _signaling.prepareForSending(message)) {
|
||||
_signalingDataEmitted(prepared->bytes);
|
||||
return prepared->counter;
|
||||
}
|
||||
return uint32_t(0);
|
||||
};
|
||||
_sendTransportMessage = [=](Message &&message) {
|
||||
_networkManager->perform([message = std::move(message)](NetworkManager *networkManager) {
|
||||
networkManager->sendMessage(message);
|
||||
});
|
||||
};
|
||||
}
|
||||
|
||||
Manager::~Manager() {
|
||||
assert(_thread->IsCurrent());
|
||||
}
|
||||
|
||||
void Manager::sendSignalingAsync(int delayMs, int cause) {
|
||||
auto task = [weak = std::weak_ptr<Manager>(shared_from_this()), cause] {
|
||||
const auto strong = weak.lock();
|
||||
if (!strong) {
|
||||
return;
|
||||
}
|
||||
if (const auto prepared = strong->_signaling.prepareForSendingService(cause)) {
|
||||
strong->_signalingDataEmitted(prepared->bytes);
|
||||
}
|
||||
};
|
||||
if (delayMs) {
|
||||
_thread->PostDelayedTask(std::move(task), webrtc::TimeDelta::Millis(delayMs));
|
||||
} else {
|
||||
_thread->PostTask(std::move(task));
|
||||
}
|
||||
}
|
||||
|
||||
void Manager::start() {
|
||||
const auto weak = std::weak_ptr<Manager>(shared_from_this());
|
||||
const auto thread = _thread;
|
||||
const auto sendSignalingMessage = [=](Message &&message) {
|
||||
thread->PostTask([=, message = std::move(message)]() mutable {
|
||||
const auto strong = weak.lock();
|
||||
if (!strong) {
|
||||
return;
|
||||
}
|
||||
strong->_sendSignalingMessage(std::move(message));
|
||||
});
|
||||
};
|
||||
_networkManager.reset(new ThreadLocalObject<NetworkManager>(StaticThreads::getNetworkThread(), [weak, thread, sendSignalingMessage, encryptionKey = _encryptionKey, enableP2P = _enableP2P, enableTCP = _enableTCP, enableStunMarking = _enableStunMarking, rtcServers = _rtcServers, proxy = std::move(_proxy)] () mutable {
|
||||
return std::make_shared<NetworkManager>(
|
||||
StaticThreads::getNetworkThread(),
|
||||
encryptionKey,
|
||||
enableP2P,
|
||||
enableTCP,
|
||||
enableStunMarking,
|
||||
rtcServers,
|
||||
std::move(proxy),
|
||||
[=](const NetworkManager::State &state) {
|
||||
thread->PostTask([=] {
|
||||
const auto strong = weak.lock();
|
||||
if (!strong) {
|
||||
return;
|
||||
}
|
||||
State mappedState;
|
||||
if (state.isFailed) {
|
||||
mappedState = State::Failed;
|
||||
} else {
|
||||
mappedState = state.isReadyToSendData
|
||||
? State::Established
|
||||
: State::Reconnecting;
|
||||
}
|
||||
bool isFirstConnection = false;
|
||||
if (state.isReadyToSendData) {
|
||||
if (!strong->_didConnectOnce) {
|
||||
strong->_didConnectOnce = true;
|
||||
isFirstConnection = true;
|
||||
}
|
||||
}
|
||||
strong->_state = mappedState;
|
||||
strong->_stateUpdated(mappedState);
|
||||
|
||||
strong->_mediaManager->perform([=](MediaManager *mediaManager) {
|
||||
mediaManager->setIsConnected(state.isReadyToSendData);
|
||||
});
|
||||
|
||||
if (isFirstConnection) {
|
||||
strong->sendInitialSignalingMessages();
|
||||
}
|
||||
});
|
||||
},
|
||||
[=](DecryptedMessage &&message) {
|
||||
thread->PostTask([=, message = std::move(message)]() mutable {
|
||||
if (const auto strong = weak.lock()) {
|
||||
strong->receiveMessage(std::move(message));
|
||||
}
|
||||
});
|
||||
},
|
||||
sendSignalingMessage,
|
||||
[=](int delayMs, int cause) {
|
||||
const auto task = [=] {
|
||||
if (const auto strong = weak.lock()) {
|
||||
strong->_networkManager->perform([=](NetworkManager *networkManager) {
|
||||
networkManager->sendTransportService(cause);
|
||||
});
|
||||
}
|
||||
};
|
||||
if (delayMs) {
|
||||
thread->PostDelayedTask(task, webrtc::TimeDelta::Millis(delayMs));
|
||||
} else {
|
||||
thread->PostTask(task);
|
||||
}
|
||||
});
|
||||
}));
|
||||
bool isOutgoing = _encryptionKey.isOutgoing;
|
||||
_mediaManager.reset(new ThreadLocalObject<MediaManager>(StaticThreads::getMediaThread(), [weak, isOutgoing, protocolVersion = _protocolVersion, thread, sendSignalingMessage, videoCapture = _videoCapture, mediaDevicesConfig = _mediaDevicesConfig, enableHighBitrateVideo = _enableHighBitrateVideo, signalBarsUpdated = _signalBarsUpdated, audioLevelsUpdated = _audioLevelsUpdated, preferredCodecs = _preferredCodecs, createAudioDeviceModule = _createAudioDeviceModule, platformContext = _platformContext]() {
|
||||
return std::make_shared<MediaManager>(
|
||||
StaticThreads::getMediaThread(),
|
||||
isOutgoing,
|
||||
protocolVersion,
|
||||
mediaDevicesConfig,
|
||||
videoCapture,
|
||||
sendSignalingMessage,
|
||||
[=](Message &&message) {
|
||||
thread->PostTask([=, message = std::move(message)]() mutable {
|
||||
const auto strong = weak.lock();
|
||||
if (!strong) {
|
||||
return;
|
||||
}
|
||||
strong->_sendTransportMessage(std::move(message));
|
||||
});
|
||||
},
|
||||
signalBarsUpdated,
|
||||
audioLevelsUpdated,
|
||||
createAudioDeviceModule,
|
||||
enableHighBitrateVideo,
|
||||
preferredCodecs,
|
||||
platformContext);
|
||||
}));
|
||||
_networkManager->perform([](NetworkManager *networkManager) {
|
||||
networkManager->start();
|
||||
});
|
||||
_mediaManager->perform([](MediaManager *mediaManager) {
|
||||
mediaManager->start();
|
||||
});
|
||||
}
|
||||
|
||||
void Manager::receiveSignalingData(const std::vector<uint8_t> &data) {
|
||||
if (auto decrypted = _signaling.handleIncomingPacket((const char*)data.data(), data.size())) {
|
||||
receiveMessage(std::move(decrypted->main));
|
||||
for (auto &message : decrypted->additional) {
|
||||
receiveMessage(std::move(message));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Manager::receiveMessage(DecryptedMessage &&message) {
|
||||
const auto data = &message.message.data;
|
||||
if (const auto candidatesList = absl::get_if<CandidatesListMessage>(data)) {
|
||||
_networkManager->perform([message = std::move(message)](NetworkManager *networkManager) mutable {
|
||||
networkManager->receiveSignalingMessage(std::move(message));
|
||||
});
|
||||
} else if (const auto videoFormats = absl::get_if<VideoFormatsMessage>(data)) {
|
||||
_mediaManager->perform([message = std::move(message)](MediaManager *mediaManager) mutable {
|
||||
mediaManager->receiveMessage(std::move(message));
|
||||
});
|
||||
} else if (const auto remoteMediaState = absl::get_if<RemoteMediaStateMessage>(data)) {
|
||||
if (_remoteMediaStateUpdated) {
|
||||
_remoteMediaStateUpdated(
|
||||
remoteMediaState->audio,
|
||||
remoteMediaState->video);
|
||||
}
|
||||
_mediaManager->perform([video = remoteMediaState->video](MediaManager *mediaManager) {
|
||||
mediaManager->remoteVideoStateUpdated(video);
|
||||
});
|
||||
} else if (const auto remoteBatteryLevelIsLow = absl::get_if<RemoteBatteryLevelIsLowMessage>(data)) {
|
||||
if (_remoteBatteryLevelIsLowUpdated) {
|
||||
_remoteBatteryLevelIsLowUpdated(remoteBatteryLevelIsLow->batteryLow);
|
||||
}
|
||||
} else if (const auto remoteNetworkStatus = absl::get_if<RemoteNetworkStatusMessage>(data)) {
|
||||
_remoteNetworkIsLowCost = remoteNetworkStatus->isLowCost;
|
||||
_remoteIsLowDataRequested = remoteNetworkStatus->isLowDataRequested;
|
||||
updateCurrentResolvedNetworkStatus();
|
||||
} else {
|
||||
if (const auto videoParameters = absl::get_if<VideoParametersMessage>(data)) {
|
||||
float value = ((float)videoParameters->aspectRatio) / 1000.0;
|
||||
if (_remotePrefferedAspectRatioUpdated) {
|
||||
_remotePrefferedAspectRatioUpdated(value);
|
||||
}
|
||||
}
|
||||
_mediaManager->perform([=, message = std::move(message)](MediaManager *mediaManager) mutable {
|
||||
mediaManager->receiveMessage(std::move(message));
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
void Manager::setVideoCapture(std::shared_ptr<VideoCaptureInterface> videoCapture) {
|
||||
assert(_didConnectOnce);
|
||||
|
||||
if (_videoCapture == videoCapture) {
|
||||
return;
|
||||
}
|
||||
_videoCapture = videoCapture;
|
||||
_mediaManager->perform([videoCapture](MediaManager *mediaManager) {
|
||||
mediaManager->setSendVideo(videoCapture);
|
||||
});
|
||||
}
|
||||
|
||||
void Manager::sendVideoDeviceUpdated() {
|
||||
_mediaManager->perform([](MediaManager *mediaManager) {
|
||||
mediaManager->sendVideoDeviceUpdated();
|
||||
});
|
||||
}
|
||||
|
||||
void Manager::setRequestedVideoAspect(float aspect) {
|
||||
_mediaManager->perform([aspect](MediaManager *mediaManager) {
|
||||
mediaManager->setRequestedVideoAspect(aspect);
|
||||
});
|
||||
}
|
||||
|
||||
void Manager::setMuteOutgoingAudio(bool mute) {
|
||||
_mediaManager->perform([mute](MediaManager *mediaManager) {
|
||||
mediaManager->setMuteOutgoingAudio(mute);
|
||||
});
|
||||
}
|
||||
|
||||
void Manager::setIncomingVideoOutput(std::weak_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink) {
|
||||
_mediaManager->perform([sink](MediaManager *mediaManager) {
|
||||
mediaManager->setIncomingVideoOutput(sink);
|
||||
});
|
||||
}
|
||||
|
||||
void Manager::setIsLowBatteryLevel(bool isLowBatteryLevel) {
|
||||
_sendTransportMessage({ RemoteBatteryLevelIsLowMessage{ isLowBatteryLevel } });
|
||||
}
|
||||
|
||||
void Manager::setIsLocalNetworkLowCost(bool isLocalNetworkLowCost) {
|
||||
if (isLocalNetworkLowCost != _localNetworkIsLowCost) {
|
||||
_networkManager->perform([isLocalNetworkLowCost](NetworkManager *networkManager) {
|
||||
networkManager->setIsLocalNetworkLowCost(isLocalNetworkLowCost);
|
||||
});
|
||||
|
||||
_localNetworkIsLowCost = isLocalNetworkLowCost;
|
||||
updateCurrentResolvedNetworkStatus();
|
||||
}
|
||||
}
|
||||
|
||||
void Manager::getNetworkStats(std::function<void (TrafficStats, CallStats)> completion) {
|
||||
_networkManager->perform([thread = _thread, weak = std::weak_ptr<Manager>(shared_from_this()), completion = std::move(completion), statsLogPath = _statsLogPath](NetworkManager *networkManager) {
|
||||
auto networkStats = networkManager->getNetworkStats();
|
||||
|
||||
CallStats callStats;
|
||||
networkManager->fillCallStats(callStats);
|
||||
|
||||
thread->PostTask([weak, networkStats, completion = std::move(completion), callStats = std::move(callStats), statsLogPath = statsLogPath] {
|
||||
const auto strong = weak.lock();
|
||||
if (!strong) {
|
||||
return;
|
||||
}
|
||||
|
||||
strong->_mediaManager->perform([networkStats, completion = std::move(completion), callStatsValue = std::move(callStats), statsLogPath = statsLogPath](MediaManager *mediaManager) {
|
||||
CallStats callStats = std::move(callStatsValue);
|
||||
mediaManager->fillCallStats(callStats);
|
||||
dumpStatsLog(statsLogPath, callStats);
|
||||
completion(networkStats, callStats);
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
void Manager::updateCurrentResolvedNetworkStatus() {
|
||||
bool localIsLowDataRequested = false;
|
||||
switch (_dataSaving) {
|
||||
case DataSaving::Never:
|
||||
localIsLowDataRequested = false;
|
||||
break;
|
||||
case DataSaving::Mobile:
|
||||
localIsLowDataRequested = !_localNetworkIsLowCost;
|
||||
break;
|
||||
case DataSaving::Always:
|
||||
localIsLowDataRequested = true;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
ResolvedNetworkStatus localStatus;
|
||||
localStatus.isLowCost = _localNetworkIsLowCost;
|
||||
localStatus.isLowDataRequested = localIsLowDataRequested;
|
||||
|
||||
if (!_currentResolvedLocalNetworkStatus.has_value() || *_currentResolvedLocalNetworkStatus != localStatus) {
|
||||
_currentResolvedLocalNetworkStatus = localStatus;
|
||||
|
||||
switch (_protocolVersion) {
|
||||
case ProtocolVersion::V1:
|
||||
if (_didConnectOnce) {
|
||||
_sendTransportMessage({ RemoteNetworkStatusMessage{ localStatus.isLowCost, localStatus.isLowDataRequested } });
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
ResolvedNetworkStatus status;
|
||||
status.isLowCost = _localNetworkIsLowCost && _remoteNetworkIsLowCost;
|
||||
status.isLowDataRequested = localIsLowDataRequested || _remoteIsLowDataRequested;
|
||||
|
||||
if (!_currentResolvedNetworkStatus.has_value() || *_currentResolvedNetworkStatus != status) {
|
||||
_currentResolvedNetworkStatus = status;
|
||||
_mediaManager->perform([status](MediaManager *mediaManager) {
|
||||
mediaManager->setNetworkParameters(status.isLowCost, status.isLowDataRequested);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
void Manager::sendInitialSignalingMessages() {
|
||||
if (_currentResolvedLocalNetworkStatus.has_value()) {
|
||||
switch (_protocolVersion) {
|
||||
case ProtocolVersion::V1:
|
||||
_sendTransportMessage({ RemoteNetworkStatusMessage{ _currentResolvedLocalNetworkStatus->isLowCost, _currentResolvedLocalNetworkStatus->isLowDataRequested } });
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Manager::setAudioInputDevice(std::string id) {
|
||||
_mediaManager->perform([id](MediaManager *mediaManager) {
|
||||
mediaManager->setAudioInputDevice(id);
|
||||
});
|
||||
}
|
||||
|
||||
void Manager::setAudioOutputDevice(std::string id) {
|
||||
_mediaManager->perform([id](MediaManager *mediaManager) {
|
||||
mediaManager->setAudioOutputDevice(id);
|
||||
});
|
||||
}
|
||||
|
||||
void Manager::setInputVolume(float level) {
|
||||
_mediaManager->perform([level](MediaManager *mediaManager) {
|
||||
mediaManager->setInputVolume(level);
|
||||
});
|
||||
}
|
||||
|
||||
void Manager::setOutputVolume(float level) {
|
||||
_mediaManager->perform([level](MediaManager *mediaManager) {
|
||||
mediaManager->setOutputVolume(level);
|
||||
});
|
||||
}
|
||||
|
||||
void Manager::addExternalAudioSamples(std::vector<uint8_t> &&samples) {
|
||||
_mediaManager->perform([samples = std::move(samples)](MediaManager *mediaManager) mutable {
|
||||
mediaManager->addExternalAudioSamples(std::move(samples));
|
||||
});
|
||||
}
|
||||
|
||||
} // namespace tgcalls
|
||||
95
TMessagesProj/jni/voip/tgcalls/Manager.h
Normal file
95
TMessagesProj/jni/voip/tgcalls/Manager.h
Normal file
|
|
@ -0,0 +1,95 @@
|
|||
#ifndef TGCALLS_MANAGER_H
|
||||
#define TGCALLS_MANAGER_H
|
||||
|
||||
#include "ThreadLocalObject.h"
|
||||
#include "EncryptedConnection.h"
|
||||
#include "NetworkManager.h"
|
||||
#include "MediaManager.h"
|
||||
#include "Instance.h"
|
||||
#include "Stats.h"
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
class Manager final : public std::enable_shared_from_this<Manager> {
|
||||
private:
|
||||
struct ResolvedNetworkStatus {
|
||||
bool isLowCost = false;
|
||||
bool isLowDataRequested = false;
|
||||
|
||||
bool operator==(const ResolvedNetworkStatus &rhs) const;
|
||||
bool operator!=(const ResolvedNetworkStatus &rhs) const;
|
||||
};
|
||||
|
||||
public:
|
||||
static rtc::Thread *getMediaThread();
|
||||
|
||||
Manager(rtc::Thread *thread, Descriptor &&descriptor);
|
||||
~Manager();
|
||||
|
||||
void start();
|
||||
void receiveSignalingData(const std::vector<uint8_t> &data);
|
||||
void setVideoCapture(std::shared_ptr<VideoCaptureInterface> videoCapture);
|
||||
void sendVideoDeviceUpdated();
|
||||
void setRequestedVideoAspect(float aspect);
|
||||
void setMuteOutgoingAudio(bool mute);
|
||||
void setIncomingVideoOutput(std::weak_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink);
|
||||
void setIsLowBatteryLevel(bool isLowBatteryLevel);
|
||||
void setIsLocalNetworkLowCost(bool isLocalNetworkLowCost);
|
||||
void getNetworkStats(std::function<void(TrafficStats, CallStats)> completion);
|
||||
|
||||
|
||||
void setAudioInputDevice(std::string id);
|
||||
void setAudioOutputDevice(std::string id);
|
||||
void setInputVolume(float level);
|
||||
void setOutputVolume(float level);
|
||||
|
||||
void addExternalAudioSamples(std::vector<uint8_t> &&samples);
|
||||
|
||||
private:
|
||||
void sendSignalingAsync(int delayMs, int cause);
|
||||
void receiveMessage(DecryptedMessage &&message);
|
||||
void updateCurrentResolvedNetworkStatus();
|
||||
void sendInitialSignalingMessages();
|
||||
|
||||
rtc::Thread *_thread;
|
||||
EncryptionKey _encryptionKey;
|
||||
EncryptedConnection _signaling;
|
||||
bool _enableP2P = false;
|
||||
bool _enableTCP = false;
|
||||
bool _enableStunMarking = false;
|
||||
ProtocolVersion _protocolVersion = ProtocolVersion::V0;
|
||||
FilePath _statsLogPath;
|
||||
std::vector<RtcServer> _rtcServers;
|
||||
std::unique_ptr<Proxy> _proxy;
|
||||
MediaDevicesConfig _mediaDevicesConfig;
|
||||
std::shared_ptr<VideoCaptureInterface> _videoCapture;
|
||||
std::function<void(State)> _stateUpdated;
|
||||
std::function<void(AudioState, VideoState)> _remoteMediaStateUpdated;
|
||||
std::function<void(bool)> _remoteBatteryLevelIsLowUpdated;
|
||||
std::function<void(float)> _remotePrefferedAspectRatioUpdated;
|
||||
std::function<void(const std::vector<uint8_t> &)> _signalingDataEmitted;
|
||||
std::function<void(int)> _signalBarsUpdated;
|
||||
std::function<void(float, float)> _audioLevelsUpdated;
|
||||
std::function<webrtc::scoped_refptr<webrtc::AudioDeviceModule>(webrtc::TaskQueueFactory*)> _createAudioDeviceModule;
|
||||
std::function<uint32_t(const Message &)> _sendSignalingMessage;
|
||||
std::function<void(Message&&)> _sendTransportMessage;
|
||||
std::unique_ptr<ThreadLocalObject<NetworkManager>> _networkManager;
|
||||
std::unique_ptr<ThreadLocalObject<MediaManager>> _mediaManager;
|
||||
State _state = State::Reconnecting;
|
||||
bool _didConnectOnce = false;
|
||||
bool _enableHighBitrateVideo = false;
|
||||
DataSaving _dataSaving = DataSaving::Never;
|
||||
std::vector<std::string> _preferredCodecs;
|
||||
bool _localNetworkIsLowCost = false;
|
||||
bool _remoteNetworkIsLowCost = false;
|
||||
bool _remoteIsLowDataRequested = false;
|
||||
absl::optional<ResolvedNetworkStatus> _currentResolvedLocalNetworkStatus;
|
||||
absl::optional<ResolvedNetworkStatus> _currentResolvedNetworkStatus;
|
||||
|
||||
std::shared_ptr<PlatformContext> _platformContext;
|
||||
|
||||
};
|
||||
|
||||
} // namespace tgcalls
|
||||
|
||||
#endif
|
||||
1177
TMessagesProj/jni/voip/tgcalls/MediaManager.cpp
Normal file
1177
TMessagesProj/jni/voip/tgcalls/MediaManager.cpp
Normal file
File diff suppressed because it is too large
Load diff
197
TMessagesProj/jni/voip/tgcalls/MediaManager.h
Normal file
197
TMessagesProj/jni/voip/tgcalls/MediaManager.h
Normal file
|
|
@ -0,0 +1,197 @@
|
|||
#ifndef TGCALLS_MEDIA_MANAGER_H
|
||||
#define TGCALLS_MEDIA_MANAGER_H
|
||||
|
||||
#include "rtc_base/thread.h"
|
||||
#include "rtc_base/copy_on_write_buffer.h"
|
||||
#include "rtc_base/third_party/sigslot/sigslot.h"
|
||||
#include "api/transport/field_trial_based_config.h"
|
||||
#include "pc/rtp_sender.h"
|
||||
#include "media/base/media_channel.h"
|
||||
#include "pc/media_factory.h"
|
||||
#include "api/environment/environment.h"
|
||||
|
||||
#include "Instance.h"
|
||||
#include "Message.h"
|
||||
#include "VideoCaptureInterface.h"
|
||||
#include "Stats.h"
|
||||
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
|
||||
namespace webrtc {
|
||||
class Call;
|
||||
class RtcEventLogNull;
|
||||
class TaskQueueFactory;
|
||||
class VideoBitrateAllocatorFactory;
|
||||
class VideoTrackSourceInterface;
|
||||
class AudioDeviceModule;
|
||||
} // namespace webrtc
|
||||
|
||||
namespace cricket {
|
||||
class MediaEngineInterface;
|
||||
class VoiceMediaChannel;
|
||||
class VideoMediaChannel;
|
||||
} // namespace cricket
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
class VideoSinkInterfaceProxyImpl;
|
||||
|
||||
class MediaManager : public sigslot::has_slots<>, public std::enable_shared_from_this<MediaManager> {
|
||||
public:
|
||||
static rtc::Thread *getWorkerThread();
|
||||
|
||||
MediaManager(
|
||||
rtc::Thread *thread,
|
||||
bool isOutgoing,
|
||||
ProtocolVersion protocolVersion,
|
||||
const MediaDevicesConfig &devicesConfig,
|
||||
std::shared_ptr<VideoCaptureInterface> videoCapture,
|
||||
std::function<void(Message &&)> sendSignalingMessage,
|
||||
std::function<void(Message &&)> sendTransportMessage,
|
||||
std::function<void(int)> signalBarsUpdated,
|
||||
std::function<void(float, float)> audioLevelUpdated,
|
||||
std::function<webrtc::scoped_refptr<webrtc::AudioDeviceModule>(webrtc::TaskQueueFactory*)> createAudioDeviceModule,
|
||||
bool enableHighBitrateVideo,
|
||||
std::vector<std::string> preferredCodecs,
|
||||
std::shared_ptr<PlatformContext> platformContext);
|
||||
~MediaManager();
|
||||
|
||||
void start();
|
||||
void setIsConnected(bool isConnected);
|
||||
void notifyPacketSent(const rtc::SentPacket &sentPacket);
|
||||
void setSendVideo(std::shared_ptr<VideoCaptureInterface> videoCapture);
|
||||
void sendVideoDeviceUpdated();
|
||||
void setRequestedVideoAspect(float aspect);
|
||||
void setMuteOutgoingAudio(bool mute);
|
||||
void setIncomingVideoOutput(std::weak_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink);
|
||||
void receiveMessage(DecryptedMessage &&message);
|
||||
void remoteVideoStateUpdated(VideoState videoState);
|
||||
void setNetworkParameters(bool isLowCost, bool isDataSavingActive);
|
||||
void fillCallStats(CallStats &callStats);
|
||||
|
||||
void setAudioInputDevice(std::string id);
|
||||
void setAudioOutputDevice(std::string id);
|
||||
void setInputVolume(float level);
|
||||
void setOutputVolume(float level);
|
||||
|
||||
void addExternalAudioSamples(std::vector<uint8_t> &&samples);
|
||||
|
||||
private:
|
||||
struct SSRC {
|
||||
uint32_t incoming = 0;
|
||||
uint32_t outgoing = 0;
|
||||
uint32_t fecIncoming = 0;
|
||||
uint32_t fecOutgoing = 0;
|
||||
};
|
||||
|
||||
class NetworkInterfaceImpl : public cricket::MediaChannelNetworkInterface {
|
||||
public:
|
||||
NetworkInterfaceImpl(MediaManager *mediaManager, bool isVideo);
|
||||
|
||||
bool SendPacket(rtc::CopyOnWriteBuffer *packet, const rtc::PacketOptions& options) override;
|
||||
bool SendRtcp(rtc::CopyOnWriteBuffer *packet, const rtc::PacketOptions& options) override;
|
||||
int SetOption(SocketType type, rtc::Socket::Option opt, int option) override;
|
||||
|
||||
private:
|
||||
bool sendTransportMessage(rtc::CopyOnWriteBuffer *packet, const rtc::PacketOptions& options);
|
||||
|
||||
MediaManager *_mediaManager = nullptr;
|
||||
bool _isVideo = false;
|
||||
|
||||
};
|
||||
|
||||
friend class MediaManager::NetworkInterfaceImpl;
|
||||
|
||||
void setPeerVideoFormats(VideoFormatsMessage &&peerFormats);
|
||||
|
||||
bool computeIsSendingVideo() const;
|
||||
void configureSendingVideoIfNeeded();
|
||||
void checkIsSendingVideoChanged(bool wasSending);
|
||||
bool videoCodecsNegotiated() const;
|
||||
|
||||
int getMaxVideoBitrate() const;
|
||||
int getMaxAudioBitrate() const;
|
||||
void adjustBitratePreferences(bool resetStartBitrate);
|
||||
bool computeIsReceivingVideo() const;
|
||||
void checkIsReceivingVideoChanged(bool wasReceiving);
|
||||
|
||||
void setOutgoingVideoState(VideoState state);
|
||||
void setOutgoingAudioState(AudioState state);
|
||||
void sendVideoParametersMessage();
|
||||
void sendOutgoingMediaStateMessage();
|
||||
|
||||
webrtc::scoped_refptr<webrtc::AudioDeviceModule> createAudioDeviceModule();
|
||||
|
||||
void beginStatsTimer(int timeoutMs);
|
||||
void beginLevelsTimer(int timeoutMs);
|
||||
void collectStats();
|
||||
|
||||
rtc::Thread *_thread = nullptr;
|
||||
std::unique_ptr<webrtc::RtcEventLogNull> _eventLog;
|
||||
|
||||
std::function<void(Message &&)> _sendSignalingMessage;
|
||||
std::function<void(Message &&)> _sendTransportMessage;
|
||||
std::function<void(int)> _signalBarsUpdated;
|
||||
std::function<void(float, float)> _audioLevelsUpdated;
|
||||
std::function<webrtc::scoped_refptr<webrtc::AudioDeviceModule>(webrtc::TaskQueueFactory*)> _createAudioDeviceModule;
|
||||
|
||||
SSRC _ssrcAudio;
|
||||
SSRC _ssrcVideo;
|
||||
bool _enableFlexfec = true;
|
||||
|
||||
ProtocolVersion _protocolVersion;
|
||||
|
||||
bool _isConnected = false;
|
||||
bool _didConnectOnce = false;
|
||||
bool _readyToReceiveVideo = false;
|
||||
bool _didConfigureVideo = false;
|
||||
AudioState _outgoingAudioState = AudioState::Active;
|
||||
VideoState _outgoingVideoState = VideoState::Inactive;
|
||||
|
||||
VideoFormatsMessage _myVideoFormats;
|
||||
std::vector<cricket::VideoCodec> _videoCodecs;
|
||||
absl::optional<cricket::VideoCodec> _videoCodecOut;
|
||||
|
||||
webrtc::Environment _webrtcEnvironment;
|
||||
std::unique_ptr<webrtc::MediaFactory> _mediaFactory;
|
||||
std::unique_ptr<cricket::MediaEngineInterface> _mediaEngine;
|
||||
std::unique_ptr<webrtc::Call> _call;
|
||||
webrtc::LocalAudioSinkAdapter _audioSource;
|
||||
webrtc::scoped_refptr<webrtc::AudioDeviceModule> _audioDeviceModule;
|
||||
std::unique_ptr<cricket::VoiceMediaSendChannelInterface> _audioSendChannel;
|
||||
std::unique_ptr<cricket::VoiceMediaReceiveChannelInterface> _audioReceiveChannel;
|
||||
std::unique_ptr<cricket::VideoMediaSendChannelInterface> _videoSendChannel;
|
||||
bool _haveVideoSendChannel = false;
|
||||
std::unique_ptr<cricket::VideoMediaReceiveChannelInterface> _videoReceiveChannel;
|
||||
std::unique_ptr<webrtc::VideoBitrateAllocatorFactory> _videoBitrateAllocatorFactory;
|
||||
std::shared_ptr<VideoCaptureInterface> _videoCapture;
|
||||
std::shared_ptr<bool> _videoCaptureGuard;
|
||||
bool _isScreenCapture = false;
|
||||
std::shared_ptr<VideoSinkInterfaceProxyImpl> _incomingVideoSinkProxy;
|
||||
webrtc::RtpHeaderExtensionMap _audioRtpHeaderExtensionMap;
|
||||
webrtc::RtpHeaderExtensionMap _videoRtpHeaderExtensionMap;
|
||||
|
||||
float _localPreferredVideoAspectRatio = 0.0f;
|
||||
float _preferredAspectRatio = 0.0f;
|
||||
bool _enableHighBitrateVideo = false;
|
||||
bool _isLowCostNetwork = false;
|
||||
bool _isDataSavingActive = false;
|
||||
|
||||
float _currentAudioLevel = 0.0f;
|
||||
float _currentMyAudioLevel = 0.0f;
|
||||
|
||||
std::unique_ptr<MediaManager::NetworkInterfaceImpl> _audioNetworkInterface;
|
||||
std::unique_ptr<MediaManager::NetworkInterfaceImpl> _videoNetworkInterface;
|
||||
|
||||
std::vector<CallStatsBitrateRecord> _bitrateRecords;
|
||||
|
||||
std::vector<float> _externalAudioSamples;
|
||||
webrtc::Mutex _externalAudioSamplesMutex;
|
||||
|
||||
std::shared_ptr<PlatformContext> _platformContext;
|
||||
};
|
||||
|
||||
} // namespace tgcalls
|
||||
|
||||
#endif
|
||||
406
TMessagesProj/jni/voip/tgcalls/Message.cpp
Normal file
406
TMessagesProj/jni/voip/tgcalls/Message.cpp
Normal file
|
|
@ -0,0 +1,406 @@
|
|||
#include "Message.h"
|
||||
|
||||
#include "rtc_base/byte_buffer.h"
|
||||
#include "api/jsep_ice_candidate.h"
|
||||
|
||||
namespace tgcalls {
|
||||
namespace {
|
||||
|
||||
constexpr auto kMaxStringLength = 65536;
|
||||
|
||||
void Serialize(rtc::ByteBufferWriter &to, const std::string &from) {
|
||||
assert(from.size() < kMaxStringLength);
|
||||
|
||||
to.WriteUInt32(uint32_t(from.size()));
|
||||
to.WriteString(from);
|
||||
}
|
||||
|
||||
bool Deserialize(std::string &to, rtc::ByteBufferReader &from) {
|
||||
uint32_t length = 0;
|
||||
if (!from.ReadUInt32(&length)) {
|
||||
RTC_LOG(LS_ERROR) << "Could not read string length.";
|
||||
return false;
|
||||
} else if (length >= kMaxStringLength) {
|
||||
RTC_LOG(LS_ERROR) << "Invalid string length: " << length;
|
||||
return false;
|
||||
} else if (!from.ReadString(&to, length)) {
|
||||
RTC_LOG(LS_ERROR) << "Could not read string data.";
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void Serialize(rtc::ByteBufferWriter &to, const webrtc::SdpVideoFormat &from) {
|
||||
assert(from.parameters.size() < std::numeric_limits<uint8_t>::max());
|
||||
|
||||
Serialize(to, from.name);
|
||||
to.WriteUInt8(uint8_t(from.parameters.size()));
|
||||
for (const auto &pair : from.parameters) {
|
||||
Serialize(to, pair.first);
|
||||
Serialize(to, pair.second);
|
||||
}
|
||||
}
|
||||
|
||||
bool Deserialize(webrtc::SdpVideoFormat &to, rtc::ByteBufferReader &from) {
|
||||
if (!Deserialize(to.name, from)) {
|
||||
RTC_LOG(LS_ERROR) << "Could not read video format name.";
|
||||
return false;
|
||||
}
|
||||
auto count = uint8_t();
|
||||
if (!from.ReadUInt8(&count)) {
|
||||
RTC_LOG(LS_ERROR) << "Could not read video format parameters count.";
|
||||
return false;
|
||||
}
|
||||
for (uint32_t i = 0; i != count; ++i) {
|
||||
auto key = std::string();
|
||||
auto value = std::string();
|
||||
if (!Deserialize(key, from)) {
|
||||
RTC_LOG(LS_ERROR) << "Could not read video format parameter key.";
|
||||
return false;
|
||||
} else if (!Deserialize(value, from)) {
|
||||
RTC_LOG(LS_ERROR) << "Could not read video format parameter value.";
|
||||
return false;
|
||||
}
|
||||
to.parameters.emplace(key, value);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void Serialize(rtc::ByteBufferWriter &to, const cricket::Candidate &from) {
|
||||
webrtc::JsepIceCandidate iceCandidate{ std::string(), 0 };
|
||||
iceCandidate.SetCandidate(from);
|
||||
std::string serialized;
|
||||
const auto success = iceCandidate.ToString(&serialized);
|
||||
assert(success);
|
||||
(void)success;
|
||||
Serialize(to, serialized);
|
||||
}
|
||||
|
||||
bool Deserialize(cricket::Candidate &to, rtc::ByteBufferReader &from) {
|
||||
std::string candidate;
|
||||
if (!Deserialize(candidate, from)) {
|
||||
RTC_LOG(LS_ERROR) << "Could not read candidate string.";
|
||||
return false;
|
||||
}
|
||||
webrtc::JsepIceCandidate parseCandidate{ std::string(), 0 };
|
||||
if (!parseCandidate.Initialize(candidate, nullptr)) {
|
||||
RTC_LOG(LS_ERROR) << "Could not parse candidate: " << candidate;
|
||||
return false;
|
||||
}
|
||||
to = parseCandidate.candidate();
|
||||
return true;
|
||||
}
|
||||
|
||||
void Serialize(rtc::ByteBufferWriter &to, const RequestVideoMessage &from, bool singleMessagePacket) {
|
||||
}
|
||||
|
||||
bool Deserialize(RequestVideoMessage &to, rtc::ByteBufferReader &reader, bool singleMessagePacket) {
|
||||
return true;
|
||||
}
|
||||
|
||||
void Serialize(rtc::ByteBufferWriter &to, const RemoteMediaStateMessage &from, bool singleMessagePacket) {
|
||||
uint8_t state = (uint8_t(from.video) << 1) | uint8_t(from.audio);
|
||||
to.WriteUInt8(state);
|
||||
}
|
||||
|
||||
bool Deserialize(RemoteMediaStateMessage &to, rtc::ByteBufferReader &reader, bool singleMessagePacket) {
|
||||
uint8_t state = 0;
|
||||
if (!reader.ReadUInt8(&state)) {
|
||||
RTC_LOG(LS_ERROR) << "Could not read remote media state.";
|
||||
return false;
|
||||
}
|
||||
to.audio = AudioState(state & 0x01);
|
||||
to.video = VideoState((state >> 1) & 0x03);
|
||||
if (to.video == VideoState(0x03)) {
|
||||
RTC_LOG(LS_ERROR) << "Invalid value for remote video state.";
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void Serialize(rtc::ByteBufferWriter &to, const CandidatesListMessage &from, bool singleMessagePacket) {
|
||||
assert(from.candidates.size() < std::numeric_limits<uint8_t>::max());
|
||||
|
||||
to.WriteUInt8(uint8_t(from.candidates.size()));
|
||||
for (const auto &candidate : from.candidates) {
|
||||
Serialize(to, candidate);
|
||||
}
|
||||
|
||||
Serialize(to, from.iceParameters.ufrag);
|
||||
Serialize(to, from.iceParameters.pwd);
|
||||
}
|
||||
|
||||
bool Deserialize(CandidatesListMessage &to, rtc::ByteBufferReader &reader, bool singleMessagePacket) {
|
||||
auto count = uint8_t();
|
||||
if (!reader.ReadUInt8(&count)) {
|
||||
RTC_LOG(LS_ERROR) << "Could not read candidates count.";
|
||||
return false;
|
||||
}
|
||||
for (uint32_t i = 0; i != count; ++i) {
|
||||
auto candidate = cricket::Candidate();
|
||||
if (!Deserialize(candidate, reader)) {
|
||||
RTC_LOG(LS_ERROR) << "Could not read candidate.";
|
||||
return false;
|
||||
}
|
||||
to.candidates.push_back(std::move(candidate));
|
||||
}
|
||||
if (!Deserialize(to.iceParameters.ufrag, reader)) {
|
||||
return false;
|
||||
}
|
||||
if (!Deserialize(to.iceParameters.pwd, reader)) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void Serialize(rtc::ByteBufferWriter &to, const VideoFormatsMessage &from, bool singleMessagePacket) {
|
||||
assert(from.formats.size() < std::numeric_limits<uint8_t>::max());
|
||||
assert(from.encodersCount <= from.formats.size());
|
||||
|
||||
to.WriteUInt8(uint8_t(from.formats.size()));
|
||||
for (const auto &format : from.formats) {
|
||||
Serialize(to, format);
|
||||
}
|
||||
to.WriteUInt8(uint8_t(from.encodersCount));
|
||||
}
|
||||
|
||||
bool Deserialize(VideoFormatsMessage &to, rtc::ByteBufferReader &from, bool singleMessagePacket) {
|
||||
auto count = uint8_t();
|
||||
if (!from.ReadUInt8(&count)) {
|
||||
RTC_LOG(LS_ERROR) << "Could not read video formats count.";
|
||||
return false;
|
||||
}
|
||||
for (uint32_t i = 0; i != count; ++i) {
|
||||
auto format = webrtc::SdpVideoFormat(std::string());
|
||||
if (!Deserialize(format, from)) {
|
||||
RTC_LOG(LS_ERROR) << "Could not read video format.";
|
||||
return false;
|
||||
}
|
||||
to.formats.push_back(std::move(format));
|
||||
}
|
||||
auto encoders = uint8_t();
|
||||
if (!from.ReadUInt8(&encoders)) {
|
||||
RTC_LOG(LS_ERROR) << "Could not read encoders count.";
|
||||
return false;
|
||||
} else if (encoders > to.formats.size()) {
|
||||
RTC_LOG(LS_ERROR) << "Invalid encoders count: " << encoders << ", full formats count: " << to.formats.size();
|
||||
return false;
|
||||
}
|
||||
to.encodersCount = encoders;
|
||||
return true;
|
||||
}
|
||||
|
||||
void Serialize(rtc::ByteBufferWriter &to, const rtc::CopyOnWriteBuffer &from, bool singleMessagePacket) {
|
||||
if (!singleMessagePacket) {
|
||||
assert(from.size() <= UINT16_MAX);
|
||||
to.WriteUInt16(from.size());
|
||||
}
|
||||
to.WriteBytes(reinterpret_cast<const uint8_t*>(from.cdata()), from.size());
|
||||
}
|
||||
|
||||
bool Deserialize(rtc::CopyOnWriteBuffer &to, rtc::ByteBufferReader &from, bool singleMessagePacket) {
|
||||
auto length = uint16_t(from.Length());
|
||||
if (!singleMessagePacket) {
|
||||
if (!from.ReadUInt16(&length)) {
|
||||
RTC_LOG(LS_ERROR) << "Could not read buffer length.";
|
||||
return false;
|
||||
} else if (from.Length() < length) {
|
||||
RTC_LOG(LS_ERROR) << "Invalid buffer length: " << length << ", available: " << from.Length();
|
||||
return false;
|
||||
}
|
||||
}
|
||||
to.AppendData(from.Data(), length);
|
||||
from.Consume(length);
|
||||
return true;
|
||||
}
|
||||
|
||||
void Serialize(rtc::ByteBufferWriter &to, const AudioDataMessage &from, bool singleMessagePacket) {
|
||||
Serialize(to, from.data, singleMessagePacket);
|
||||
}
|
||||
|
||||
bool Deserialize(AudioDataMessage &to, rtc::ByteBufferReader &from, bool singleMessagePacket) {
|
||||
return Deserialize(to.data, from, singleMessagePacket);
|
||||
}
|
||||
|
||||
void Serialize(rtc::ByteBufferWriter &to, const VideoDataMessage &from, bool singleMessagePacket) {
|
||||
Serialize(to, from.data, singleMessagePacket);
|
||||
}
|
||||
|
||||
bool Deserialize(VideoDataMessage &to, rtc::ByteBufferReader &from, bool singleMessagePacket) {
|
||||
return Deserialize(to.data, from, singleMessagePacket);
|
||||
}
|
||||
|
||||
void Serialize(rtc::ByteBufferWriter &to, const UnstructuredDataMessage &from, bool singleMessagePacket) {
|
||||
Serialize(to, from.data, singleMessagePacket);
|
||||
}
|
||||
|
||||
bool Deserialize(UnstructuredDataMessage &to, rtc::ByteBufferReader &from, bool singleMessagePacket) {
|
||||
return Deserialize(to.data, from, singleMessagePacket);
|
||||
}
|
||||
|
||||
void Serialize(rtc::ByteBufferWriter &to, const VideoParametersMessage &from, bool singleMessagePacket) {
|
||||
to.WriteUInt32(from.aspectRatio);
|
||||
}
|
||||
|
||||
bool Deserialize(VideoParametersMessage &to, rtc::ByteBufferReader &from, bool singleMessagePacket) {
|
||||
uint32_t aspectRatio = 0;
|
||||
if (!from.ReadUInt32(&aspectRatio)) {
|
||||
return false;
|
||||
}
|
||||
to.aspectRatio = aspectRatio;
|
||||
return true;
|
||||
}
|
||||
|
||||
void Serialize(rtc::ByteBufferWriter &to, const RemoteBatteryLevelIsLowMessage &from, bool singleMessagePacket) {
|
||||
to.WriteUInt8(from.batteryLow ? 1 : 0);
|
||||
}
|
||||
|
||||
bool Deserialize(RemoteBatteryLevelIsLowMessage &to, rtc::ByteBufferReader &reader, bool singleMessagePacket) {
|
||||
uint8_t value = 0;
|
||||
if (!reader.ReadUInt8(&value)) {
|
||||
RTC_LOG(LS_ERROR) << "Could not read batteryLevelIsLow.";
|
||||
return false;
|
||||
}
|
||||
to.batteryLow = (value != 0);
|
||||
return true;
|
||||
}
|
||||
|
||||
void Serialize(rtc::ByteBufferWriter &to, const RemoteNetworkStatusMessage &from, bool singleMessagePacket) {
|
||||
to.WriteUInt8(from.isLowCost ? 1 : 0);
|
||||
to.WriteUInt8(from.isLowDataRequested ? 1 : 0);
|
||||
}
|
||||
|
||||
bool Deserialize(RemoteNetworkStatusMessage &to, rtc::ByteBufferReader &reader, bool singleMessagePacket) {
|
||||
uint8_t value = 0;
|
||||
if (!reader.ReadUInt8(&value)) {
|
||||
RTC_LOG(LS_ERROR) << "Could not read isLowCost.";
|
||||
return false;
|
||||
}
|
||||
to.isLowCost = (value != 0);
|
||||
if (reader.ReadUInt8(&value)) {
|
||||
to.isLowDataRequested = (value != 0);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
enum class TryResult : uint8_t {
|
||||
Success,
|
||||
TryNext,
|
||||
Abort,
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
TryResult TryDeserialize(
|
||||
absl::optional<Message> &to,
|
||||
rtc::ByteBufferReader &reader,
|
||||
bool singleMessagePacket) {
|
||||
assert(reader.Length() != 0);
|
||||
|
||||
constexpr auto id = T::kId;
|
||||
if (uint8_t(*reader.Data()) != id) {
|
||||
return TryResult::TryNext;
|
||||
}
|
||||
reader.Consume(1);
|
||||
auto parsed = T();
|
||||
if (!Deserialize(parsed, reader, singleMessagePacket)) {
|
||||
RTC_LOG(LS_ERROR) << "Could not read message with kId: " << id;
|
||||
return TryResult::Abort;
|
||||
}
|
||||
to = Message{ parsed };
|
||||
return TryResult::Success;
|
||||
}
|
||||
|
||||
template <typename ...Types>
|
||||
struct TryDeserializeNext;
|
||||
|
||||
template <>
|
||||
struct TryDeserializeNext<> {
|
||||
static bool Call(
|
||||
absl::optional<Message> &to,
|
||||
rtc::ByteBufferReader &reader,
|
||||
bool singleMessagePacket) {
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T, typename ...Other>
|
||||
struct TryDeserializeNext<T, Other...> {
|
||||
static bool Call(
|
||||
absl::optional<Message> &to,
|
||||
rtc::ByteBufferReader &reader,
|
||||
bool singleMessagePacket) {
|
||||
const auto result = TryDeserialize<T>(to, reader, singleMessagePacket);
|
||||
return (result == TryResult::TryNext)
|
||||
? TryDeserializeNext<Other...>::Call(to, reader, singleMessagePacket)
|
||||
: (result == TryResult::Success);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename ...Types>
|
||||
bool TryDeserializeRecursive(
|
||||
absl::optional<Message> &to,
|
||||
rtc::ByteBufferReader &reader,
|
||||
bool singleMessagePacket,
|
||||
absl::variant<Types...> *) {
|
||||
return TryDeserializeNext<Types...>::Call(to, reader, singleMessagePacket);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
|
||||
rtc::CopyOnWriteBuffer SerializeMessageWithSeq(
|
||||
const Message &message,
|
||||
uint32_t seq,
|
||||
bool singleMessagePacket) {
|
||||
rtc::ByteBufferWriter writer;
|
||||
writer.WriteUInt32(seq);
|
||||
absl::visit([&](const auto &data) {
|
||||
writer.WriteUInt8(std::decay_t<decltype(data)>::kId);
|
||||
Serialize(writer, data, singleMessagePacket);
|
||||
}, message.data);
|
||||
|
||||
auto result = rtc::CopyOnWriteBuffer();
|
||||
result.AppendData(writer.Data(), writer.Length());
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
absl::optional<Message> DeserializeMessage(
|
||||
rtc::ByteBufferReader &reader,
|
||||
bool singleMessagePacket) {
|
||||
if (!reader.Length()) {
|
||||
return absl::nullopt;
|
||||
}
|
||||
using Variant = decltype(std::declval<Message>().data);
|
||||
auto result = absl::make_optional<Message>();
|
||||
return TryDeserializeRecursive(result, reader, singleMessagePacket, (Variant*)nullptr)
|
||||
? result
|
||||
: absl::nullopt;
|
||||
}
|
||||
|
||||
absl::optional<rtc::CopyOnWriteBuffer> DeserializeRawMessage(
|
||||
rtc::ByteBufferReader &reader,
|
||||
bool singleMessagePacket) {
|
||||
if (!reader.Length()) {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
uint32_t length = 0;
|
||||
if (!reader.ReadUInt32(&length)) {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
if (/*length < 0 || */length > 1024 * 1024) {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
rtc::CopyOnWriteBuffer result;
|
||||
result.SetSize(length);
|
||||
if (!reader.ReadBytes(rtc::ArrayView<uint8_t>((uint8_t *)result.MutableData(), result.size()))) {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
} // namespace tgcalls
|
||||
148
TMessagesProj/jni/voip/tgcalls/Message.h
Normal file
148
TMessagesProj/jni/voip/tgcalls/Message.h
Normal file
|
|
@ -0,0 +1,148 @@
|
|||
#ifndef TGCALLS_MESSAGE_H
|
||||
#define TGCALLS_MESSAGE_H
|
||||
|
||||
#include "api/candidate.h"
|
||||
#include "api/video_codecs/sdp_video_format.h"
|
||||
#include "absl/types/variant.h"
|
||||
#include "absl/types/optional.h"
|
||||
#include "rtc_base/copy_on_write_buffer.h"
|
||||
#include "rtc_base/byte_buffer.h"
|
||||
|
||||
#include <vector>
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
enum class VideoState;
|
||||
enum class AudioState;
|
||||
|
||||
struct PeerIceParameters {
|
||||
std::string ufrag;
|
||||
std::string pwd;
|
||||
bool supportsRenomination = false;
|
||||
|
||||
PeerIceParameters() = default;
|
||||
|
||||
PeerIceParameters(std::string ufrag_, std::string pwd_, bool supportsRenomination_) :
|
||||
ufrag(ufrag_),
|
||||
pwd(pwd_),
|
||||
supportsRenomination(supportsRenomination_) {
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
struct CandidatesListMessage {
|
||||
static constexpr uint8_t kId = 1;
|
||||
static constexpr bool kRequiresAck = true;
|
||||
|
||||
std::vector<cricket::Candidate> candidates;
|
||||
PeerIceParameters iceParameters;
|
||||
};
|
||||
|
||||
struct VideoFormatsMessage {
|
||||
static constexpr uint8_t kId = 2;
|
||||
static constexpr bool kRequiresAck = true;
|
||||
|
||||
std::vector<webrtc::SdpVideoFormat> formats;
|
||||
int encodersCount = 0;
|
||||
};
|
||||
|
||||
struct RequestVideoMessage {
|
||||
static constexpr uint8_t kId = 3;
|
||||
static constexpr bool kRequiresAck = true;
|
||||
};
|
||||
|
||||
struct RemoteMediaStateMessage {
|
||||
static constexpr uint8_t kId = 4;
|
||||
static constexpr bool kRequiresAck = true;
|
||||
|
||||
AudioState audio = AudioState();
|
||||
VideoState video = VideoState();
|
||||
};
|
||||
|
||||
struct AudioDataMessage {
|
||||
static constexpr uint8_t kId = 5;
|
||||
static constexpr bool kRequiresAck = false;
|
||||
|
||||
rtc::CopyOnWriteBuffer data;
|
||||
};
|
||||
|
||||
struct VideoDataMessage {
|
||||
static constexpr uint8_t kId = 6;
|
||||
static constexpr bool kRequiresAck = false;
|
||||
|
||||
rtc::CopyOnWriteBuffer data;
|
||||
};
|
||||
|
||||
struct UnstructuredDataMessage {
|
||||
static constexpr uint8_t kId = 7;
|
||||
static constexpr bool kRequiresAck = true;
|
||||
|
||||
rtc::CopyOnWriteBuffer data;
|
||||
};
|
||||
|
||||
struct VideoParametersMessage {
|
||||
static constexpr uint8_t kId = 8;
|
||||
static constexpr bool kRequiresAck = true;
|
||||
|
||||
uint32_t aspectRatio;
|
||||
};
|
||||
|
||||
struct RemoteBatteryLevelIsLowMessage {
|
||||
static constexpr uint8_t kId = 9;
|
||||
static constexpr bool kRequiresAck = true;
|
||||
|
||||
bool batteryLow = false;
|
||||
};
|
||||
|
||||
struct RemoteNetworkStatusMessage {
|
||||
static constexpr uint8_t kId = 10;
|
||||
static constexpr bool kRequiresAck = true;
|
||||
|
||||
bool isLowCost = false;
|
||||
bool isLowDataRequested = false;
|
||||
};
|
||||
|
||||
// To add a new message you should:
|
||||
// 1. Add the message struct.
|
||||
// 2. Add the message to the variant in Message struct.
|
||||
// 3. Add Serialize/Deserialize methods in Message module.
|
||||
|
||||
struct Message {
|
||||
absl::variant<
|
||||
CandidatesListMessage,
|
||||
VideoFormatsMessage,
|
||||
RequestVideoMessage,
|
||||
RemoteMediaStateMessage,
|
||||
AudioDataMessage,
|
||||
VideoDataMessage,
|
||||
UnstructuredDataMessage,
|
||||
VideoParametersMessage,
|
||||
RemoteBatteryLevelIsLowMessage,
|
||||
RemoteNetworkStatusMessage> data;
|
||||
};
|
||||
|
||||
rtc::CopyOnWriteBuffer SerializeMessageWithSeq(
|
||||
const Message &message,
|
||||
uint32_t seq,
|
||||
bool singleMessagePacket);
|
||||
absl::optional<Message> DeserializeMessage(
|
||||
rtc::ByteBufferReader &reader,
|
||||
bool singleMessagePacket);
|
||||
absl::optional<rtc::CopyOnWriteBuffer> DeserializeRawMessage(
|
||||
rtc::ByteBufferReader &reader,
|
||||
bool singleMessagePacket);
|
||||
|
||||
struct DecryptedMessage {
|
||||
Message message;
|
||||
uint32_t counter = 0;
|
||||
};
|
||||
|
||||
struct DecryptedRawMessage {
|
||||
rtc::CopyOnWriteBuffer message;
|
||||
uint32_t counter = 0;
|
||||
};
|
||||
|
||||
|
||||
} // namespace tgcalls
|
||||
|
||||
#endif
|
||||
397
TMessagesProj/jni/voip/tgcalls/NetworkManager.cpp
Normal file
397
TMessagesProj/jni/voip/tgcalls/NetworkManager.cpp
Normal file
|
|
@ -0,0 +1,397 @@
|
|||
#include "NetworkManager.h"
|
||||
|
||||
#include "Message.h"
|
||||
|
||||
#include "p2p/base/basic_packet_socket_factory.h"
|
||||
#include "v2/ReflectorRelayPortFactory.h"
|
||||
#include "p2p/client/basic_port_allocator.h"
|
||||
#include "p2p/base/p2p_transport_channel.h"
|
||||
#include "p2p/base/basic_async_resolver_factory.h"
|
||||
#include "api/packet_socket_factory.h"
|
||||
#include "p2p/base/ice_credentials_iterator.h"
|
||||
#include "api/jsep_ice_candidate.h"
|
||||
#include "rtc_base/network_monitor_factory.h"
|
||||
|
||||
#include "TurnCustomizerImpl.h"
|
||||
#include "platform/PlatformInterface.h"
|
||||
|
||||
extern "C" {
|
||||
#include <openssl/sha.h>
|
||||
#include <openssl/aes.h>
|
||||
#ifndef OPENSSL_IS_BORINGSSL
|
||||
#include <openssl/modes.h>
|
||||
#endif
|
||||
#include <openssl/rand.h>
|
||||
#include <openssl/crypto.h>
|
||||
} // extern "C"
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
class TgCallsCryptStringImpl : public rtc::CryptStringImpl {
|
||||
public:
|
||||
TgCallsCryptStringImpl(std::string const &value) :
|
||||
_value(value) {
|
||||
}
|
||||
|
||||
virtual ~TgCallsCryptStringImpl() override {
|
||||
}
|
||||
|
||||
virtual size_t GetLength() const override {
|
||||
return _value.size();
|
||||
}
|
||||
|
||||
virtual void CopyTo(char* dest, bool nullterminate) const override {
|
||||
memcpy(dest, _value.data(), _value.size());
|
||||
if (nullterminate) {
|
||||
dest[_value.size()] = 0;
|
||||
}
|
||||
}
|
||||
virtual std::string UrlEncode() const override {
|
||||
return _value;
|
||||
}
|
||||
virtual CryptStringImpl* Copy() const override {
|
||||
return new TgCallsCryptStringImpl(_value);
|
||||
}
|
||||
|
||||
virtual void CopyRawTo(std::vector<unsigned char>* dest) const override {
|
||||
dest->resize(_value.size());
|
||||
memcpy(dest->data(), _value.data(), _value.size());
|
||||
}
|
||||
|
||||
private:
|
||||
std::string _value;
|
||||
};
|
||||
|
||||
NetworkManager::NetworkManager(
|
||||
rtc::Thread *thread,
|
||||
EncryptionKey encryptionKey,
|
||||
bool enableP2P,
|
||||
bool enableTCP,
|
||||
bool enableStunMarking,
|
||||
std::vector<RtcServer> const &rtcServers,
|
||||
std::unique_ptr<Proxy> proxy,
|
||||
std::function<void(const NetworkManager::State &)> stateUpdated,
|
||||
std::function<void(DecryptedMessage &&)> transportMessageReceived,
|
||||
std::function<void(Message &&)> sendSignalingMessage,
|
||||
std::function<void(int delayMs, int cause)> sendTransportServiceAsync) :
|
||||
_thread(thread),
|
||||
_enableP2P(enableP2P),
|
||||
_enableTCP(enableTCP),
|
||||
_enableStunMarking(enableStunMarking),
|
||||
_rtcServers(rtcServers),
|
||||
_proxy(std::move(proxy)),
|
||||
_transport(
|
||||
EncryptedConnection::Type::Transport,
|
||||
encryptionKey,
|
||||
[=](int delayMs, int cause) { sendTransportServiceAsync(delayMs, cause); }),
|
||||
_isOutgoing(encryptionKey.isOutgoing),
|
||||
_stateUpdated(std::move(stateUpdated)),
|
||||
_transportMessageReceived(std::move(transportMessageReceived)),
|
||||
_sendSignalingMessage(std::move(sendSignalingMessage)),
|
||||
_localIceParameters(rtc::CreateRandomString(cricket::ICE_UFRAG_LENGTH), rtc::CreateRandomString(cricket::ICE_PWD_LENGTH), false) {
|
||||
assert(_thread->IsCurrent());
|
||||
|
||||
_networkMonitorFactory = PlatformInterface::SharedInstance()->createNetworkMonitorFactory();
|
||||
}
|
||||
|
||||
NetworkManager::~NetworkManager() {
|
||||
assert(_thread->IsCurrent());
|
||||
|
||||
RTC_LOG(LS_INFO) << "NetworkManager::~NetworkManager()";
|
||||
|
||||
_transportChannel.reset();
|
||||
_asyncResolverFactory.reset();
|
||||
_portAllocator.reset();
|
||||
_networkManager.reset();
|
||||
_socketFactory.reset();
|
||||
_networkMonitorFactory.reset();
|
||||
}
|
||||
|
||||
void NetworkManager::start() {
|
||||
_socketFactory.reset(new rtc::BasicPacketSocketFactory(_thread->socketserver()));
|
||||
|
||||
_networkManager = std::make_unique<rtc::BasicNetworkManager>(_networkMonitorFactory.get(), _thread->socketserver());
|
||||
|
||||
if (_enableStunMarking) {
|
||||
_turnCustomizer.reset(new TurnCustomizerImpl());
|
||||
}
|
||||
|
||||
_relayPortFactory.reset(new ReflectorRelayPortFactory(_rtcServers, false, 0, _thread->socketserver()));
|
||||
|
||||
_portAllocator.reset(new cricket::BasicPortAllocator(_networkManager.get(), _socketFactory.get(), _turnCustomizer.get(), _relayPortFactory.get()));
|
||||
|
||||
uint32_t flags = _portAllocator->flags();
|
||||
|
||||
flags |=
|
||||
//cricket::PORTALLOCATOR_ENABLE_SHARED_SOCKET |
|
||||
cricket::PORTALLOCATOR_ENABLE_IPV6 |
|
||||
cricket::PORTALLOCATOR_ENABLE_IPV6_ON_WIFI;
|
||||
|
||||
if (!_enableTCP) {
|
||||
flags |= cricket::PORTALLOCATOR_DISABLE_TCP;
|
||||
}
|
||||
if (!_enableP2P) {
|
||||
flags |= cricket::PORTALLOCATOR_DISABLE_UDP;
|
||||
flags |= cricket::PORTALLOCATOR_DISABLE_STUN;
|
||||
uint32_t candidateFilter = _portAllocator->candidate_filter();
|
||||
candidateFilter &= ~(cricket::CF_REFLEXIVE);
|
||||
_portAllocator->SetCandidateFilter(candidateFilter);
|
||||
}
|
||||
|
||||
_portAllocator->set_step_delay(cricket::kMinimumStepDelay);
|
||||
|
||||
if (_proxy) {
|
||||
rtc::ProxyInfo proxyInfo;
|
||||
proxyInfo.type = rtc::ProxyType::PROXY_SOCKS5;
|
||||
proxyInfo.address = rtc::SocketAddress(_proxy->host, _proxy->port);
|
||||
proxyInfo.username = _proxy->login;
|
||||
proxyInfo.password = rtc::CryptString(TgCallsCryptStringImpl(_proxy->password));
|
||||
_portAllocator->set_proxy("t/1.0", proxyInfo);
|
||||
}
|
||||
|
||||
_portAllocator->set_flags(flags);
|
||||
_portAllocator->Initialize();
|
||||
|
||||
cricket::ServerAddresses stunServers;
|
||||
std::vector<cricket::RelayServerConfig> turnServers;
|
||||
|
||||
for (auto &server : _rtcServers) {
|
||||
if (server.isTcp) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (server.isTurn) {
|
||||
turnServers.push_back(cricket::RelayServerConfig(
|
||||
rtc::SocketAddress(server.host, server.port),
|
||||
server.login,
|
||||
server.password,
|
||||
cricket::PROTO_UDP
|
||||
));
|
||||
} else {
|
||||
rtc::SocketAddress stunAddress = rtc::SocketAddress(server.host, server.port);
|
||||
stunServers.insert(stunAddress);
|
||||
}
|
||||
}
|
||||
|
||||
_portAllocator->SetConfiguration(stunServers, turnServers, 2, webrtc::NO_PRUNE, _turnCustomizer.get());
|
||||
|
||||
_asyncResolverFactory = std::make_unique<webrtc::BasicAsyncDnsResolverFactory>();
|
||||
|
||||
webrtc::IceTransportInit iceTransportInit;
|
||||
iceTransportInit.set_port_allocator(_portAllocator.get());
|
||||
iceTransportInit.set_async_dns_resolver_factory(_asyncResolverFactory.get());
|
||||
|
||||
_transportChannel = cricket::P2PTransportChannel::Create("transport", 0, std::move(iceTransportInit));
|
||||
|
||||
cricket::IceConfig iceConfig;
|
||||
iceConfig.continual_gathering_policy = cricket::GATHER_CONTINUALLY;
|
||||
iceConfig.prioritize_most_likely_candidate_pairs = true;
|
||||
iceConfig.regather_on_failed_networks_interval = cricket::REGATHER_ON_FAILED_NETWORKS_INTERVAL;
|
||||
_transportChannel->SetIceConfig(iceConfig);
|
||||
|
||||
cricket::IceParameters localIceParameters(
|
||||
_localIceParameters.ufrag,
|
||||
_localIceParameters.pwd,
|
||||
false
|
||||
);
|
||||
|
||||
_transportChannel->SetIceParameters(localIceParameters);
|
||||
_transportChannel->SetIceRole(_isOutgoing ? cricket::ICEROLE_CONTROLLING : cricket::ICEROLE_CONTROLLED);
|
||||
|
||||
_transportChannel->SignalCandidateGathered.connect(this, &NetworkManager::candidateGathered);
|
||||
_transportChannel->SignalGatheringState.connect(this, &NetworkManager::candidateGatheringState);
|
||||
_transportChannel->SignalIceTransportStateChanged.connect(this, &NetworkManager::transportStateChanged);
|
||||
_transportChannel->SignalReadPacket.connect(this, &NetworkManager::transportPacketReceived);
|
||||
_transportChannel->SignalNetworkRouteChanged.connect(this, &NetworkManager::transportRouteChanged);
|
||||
|
||||
_transportChannel->MaybeStartGathering();
|
||||
|
||||
_transportChannel->SetRemoteIceMode(cricket::ICEMODE_FULL);
|
||||
|
||||
_lastNetworkActivityMs = rtc::TimeMillis();
|
||||
|
||||
checkConnectionTimeout();
|
||||
}
|
||||
|
||||
void NetworkManager::receiveSignalingMessage(DecryptedMessage &&message) {
|
||||
const auto list = absl::get_if<CandidatesListMessage>(&message.message.data);
|
||||
assert(list != nullptr);
|
||||
|
||||
if (!_remoteIceParameters.has_value()) {
|
||||
PeerIceParameters parameters(list->iceParameters.ufrag, list->iceParameters.pwd, false);
|
||||
_remoteIceParameters = parameters;
|
||||
|
||||
cricket::IceParameters remoteIceParameters(
|
||||
parameters.ufrag,
|
||||
parameters.pwd,
|
||||
false
|
||||
);
|
||||
|
||||
_transportChannel->SetRemoteIceParameters(remoteIceParameters);
|
||||
}
|
||||
|
||||
for (const auto &candidate : list->candidates) {
|
||||
_transportChannel->AddRemoteCandidate(candidate);
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t NetworkManager::sendMessage(const Message &message) {
|
||||
if (const auto prepared = _transport.prepareForSending(message)) {
|
||||
rtc::PacketOptions packetOptions;
|
||||
_transportChannel->SendPacket((const char *)prepared->bytes.data(), prepared->bytes.size(), packetOptions, 0);
|
||||
addTrafficStats(prepared->bytes.size(), false);
|
||||
return prepared->counter;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void NetworkManager::sendTransportService(int cause) {
|
||||
if (const auto prepared = _transport.prepareForSendingService(cause)) {
|
||||
rtc::PacketOptions packetOptions;
|
||||
_transportChannel->SendPacket((const char *)prepared->bytes.data(), prepared->bytes.size(), packetOptions, 0);
|
||||
addTrafficStats(prepared->bytes.size(), false);
|
||||
}
|
||||
}
|
||||
|
||||
void NetworkManager::setIsLocalNetworkLowCost(bool isLocalNetworkLowCost) {
|
||||
_isLocalNetworkLowCost = isLocalNetworkLowCost;
|
||||
|
||||
logCurrentNetworkState();
|
||||
}
|
||||
|
||||
TrafficStats NetworkManager::getNetworkStats() {
|
||||
TrafficStats stats;
|
||||
stats.bytesSentWifi = _trafficStatsWifi.outgoing;
|
||||
stats.bytesReceivedWifi = _trafficStatsWifi.incoming;
|
||||
stats.bytesSentMobile = _trafficStatsCellular.outgoing;
|
||||
stats.bytesReceivedMobile = _trafficStatsCellular.incoming;
|
||||
return stats;
|
||||
}
|
||||
|
||||
void NetworkManager::fillCallStats(CallStats &callStats) {
|
||||
callStats.networkRecords = _networkRecords;
|
||||
}
|
||||
|
||||
void NetworkManager::logCurrentNetworkState() {
|
||||
if (!_currentEndpointType.has_value()) {
|
||||
return;
|
||||
}
|
||||
|
||||
CallStatsNetworkRecord record;
|
||||
record.timestamp = (int32_t)(rtc::TimeMillis() / 1000);
|
||||
record.endpointType = *_currentEndpointType;
|
||||
record.isLowCost = _isLocalNetworkLowCost;
|
||||
_networkRecords.push_back(std::move(record));
|
||||
}
|
||||
|
||||
void NetworkManager::checkConnectionTimeout() {
|
||||
const auto weak = std::weak_ptr<NetworkManager>(shared_from_this());
|
||||
_thread->PostDelayedTask([weak]() {
|
||||
auto strong = weak.lock();
|
||||
if (!strong) {
|
||||
return;
|
||||
}
|
||||
|
||||
int64_t currentTimestamp = rtc::TimeMillis();
|
||||
const int64_t maxTimeout = 20000;
|
||||
|
||||
if (strong->_lastNetworkActivityMs + maxTimeout < currentTimestamp) {
|
||||
NetworkManager::State emitState;
|
||||
emitState.isReadyToSendData = false;
|
||||
emitState.isFailed = true;
|
||||
strong->_stateUpdated(emitState);
|
||||
}
|
||||
|
||||
strong->checkConnectionTimeout();
|
||||
}, webrtc::TimeDelta::Millis(1000));
|
||||
}
|
||||
|
||||
void NetworkManager::candidateGathered(cricket::IceTransportInternal *transport, const cricket::Candidate &candidate) {
|
||||
assert(_thread->IsCurrent());
|
||||
_sendSignalingMessage({ CandidatesListMessage{ { 1, candidate }, _localIceParameters } });
|
||||
}
|
||||
|
||||
void NetworkManager::candidateGatheringState(cricket::IceTransportInternal *transport) {
|
||||
assert(_thread->IsCurrent());
|
||||
}
|
||||
|
||||
void NetworkManager::transportStateChanged(cricket::IceTransportInternal *transport) {
|
||||
assert(_thread->IsCurrent());
|
||||
|
||||
auto state = transport->GetIceTransportState();
|
||||
bool isConnected = false;
|
||||
switch (state) {
|
||||
case webrtc::IceTransportState::kConnected:
|
||||
case webrtc::IceTransportState::kCompleted:
|
||||
isConnected = true;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
NetworkManager::State emitState;
|
||||
emitState.isReadyToSendData = isConnected;
|
||||
_stateUpdated(emitState);
|
||||
}
|
||||
|
||||
void NetworkManager::transportReadyToSend(cricket::IceTransportInternal *transport) {
|
||||
assert(_thread->IsCurrent());
|
||||
}
|
||||
|
||||
void NetworkManager::transportPacketReceived(rtc::PacketTransportInternal *transport, const char *bytes, size_t size, const int64_t ×tamp, int unused) {
|
||||
assert(_thread->IsCurrent());
|
||||
|
||||
_lastNetworkActivityMs = rtc::TimeMillis();
|
||||
|
||||
addTrafficStats(size, true);
|
||||
|
||||
if (auto decrypted = _transport.handleIncomingPacket(bytes, size)) {
|
||||
if (_transportMessageReceived) {
|
||||
_transportMessageReceived(std::move(decrypted->main));
|
||||
for (auto &message : decrypted->additional) {
|
||||
_transportMessageReceived(std::move(message));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void NetworkManager::transportRouteChanged(absl::optional<rtc::NetworkRoute> route) {
|
||||
assert(_thread->IsCurrent());
|
||||
|
||||
if (route.has_value()) {
|
||||
RTC_LOG(LS_INFO) << "NetworkManager route changed: " << route->DebugString();
|
||||
|
||||
bool localIsWifi = route->local.adapter_type() == rtc::AdapterType::ADAPTER_TYPE_WIFI;
|
||||
bool remoteIsWifi = route->remote.adapter_type() == rtc::AdapterType::ADAPTER_TYPE_WIFI;
|
||||
|
||||
RTC_LOG(LS_INFO) << "NetworkManager is wifi: local=" << localIsWifi << ", remote=" << remoteIsWifi;
|
||||
|
||||
CallStatsConnectionEndpointType endpointType;
|
||||
if (route->local.uses_turn()) {
|
||||
endpointType = CallStatsConnectionEndpointType::ConnectionEndpointTURN;
|
||||
} else {
|
||||
endpointType = CallStatsConnectionEndpointType::ConnectionEndpointP2P;
|
||||
}
|
||||
if (!_currentEndpointType.has_value() || _currentEndpointType != endpointType) {
|
||||
_currentEndpointType = endpointType;
|
||||
logCurrentNetworkState();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void NetworkManager::addTrafficStats(int64_t byteCount, bool isIncoming) {
|
||||
if (_isLocalNetworkLowCost) {
|
||||
if (isIncoming) {
|
||||
_trafficStatsWifi.incoming += byteCount;
|
||||
} else {
|
||||
_trafficStatsWifi.outgoing += byteCount;
|
||||
}
|
||||
} else {
|
||||
if (isIncoming) {
|
||||
_trafficStatsCellular.incoming += byteCount;
|
||||
} else {
|
||||
_trafficStatsCellular.outgoing += byteCount;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace tgcalls
|
||||
122
TMessagesProj/jni/voip/tgcalls/NetworkManager.h
Normal file
122
TMessagesProj/jni/voip/tgcalls/NetworkManager.h
Normal file
|
|
@ -0,0 +1,122 @@
|
|||
#ifndef TGCALLS_NETWORK_MANAGER_H
|
||||
#define TGCALLS_NETWORK_MANAGER_H
|
||||
|
||||
#include "rtc_base/thread.h"
|
||||
|
||||
#include "EncryptedConnection.h"
|
||||
#include "Instance.h"
|
||||
#include "Message.h"
|
||||
#include "Stats.h"
|
||||
|
||||
#include "rtc_base/copy_on_write_buffer.h"
|
||||
#include "api/candidate.h"
|
||||
#include "rtc_base/network_monitor_factory.h"
|
||||
#include "api/async_dns_resolver.h"
|
||||
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
|
||||
namespace rtc {
|
||||
class BasicPacketSocketFactory;
|
||||
class BasicNetworkManager;
|
||||
class PacketTransportInternal;
|
||||
struct NetworkRoute;
|
||||
} // namespace rtc
|
||||
|
||||
namespace cricket {
|
||||
class BasicPortAllocator;
|
||||
class P2PTransportChannel;
|
||||
class IceTransportInternal;
|
||||
class RelayPortFactoryInterface;
|
||||
} // namespace cricket
|
||||
|
||||
namespace webrtc {
|
||||
class BasicAsyncResolverFactory;
|
||||
class TurnCustomizer;
|
||||
} // namespace webrtc
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
struct Message;
|
||||
|
||||
class NetworkManager : public sigslot::has_slots<>, public std::enable_shared_from_this<NetworkManager> {
|
||||
public:
|
||||
struct State {
|
||||
bool isReadyToSendData = false;
|
||||
bool isFailed = false;
|
||||
};
|
||||
|
||||
struct InterfaceTrafficStats {
|
||||
int64_t incoming = 0;
|
||||
int64_t outgoing = 0;
|
||||
};
|
||||
|
||||
NetworkManager(
|
||||
rtc::Thread *thread,
|
||||
EncryptionKey encryptionKey,
|
||||
bool enableP2P,
|
||||
bool enableTCP,
|
||||
bool enableStunMarking,
|
||||
std::vector<RtcServer> const &rtcServers,
|
||||
std::unique_ptr<Proxy> proxy,
|
||||
std::function<void(const State &)> stateUpdated,
|
||||
std::function<void(DecryptedMessage &&)> transportMessageReceived,
|
||||
std::function<void(Message &&)> sendSignalingMessage,
|
||||
std::function<void(int delayMs, int cause)> sendTransportServiceAsync);
|
||||
~NetworkManager();
|
||||
|
||||
void start();
|
||||
void receiveSignalingMessage(DecryptedMessage &&message);
|
||||
uint32_t sendMessage(const Message &message);
|
||||
void sendTransportService(int cause);
|
||||
void setIsLocalNetworkLowCost(bool isLocalNetworkLowCost);
|
||||
TrafficStats getNetworkStats();
|
||||
void fillCallStats(CallStats &callStats);
|
||||
void logCurrentNetworkState();
|
||||
|
||||
private:
|
||||
void checkConnectionTimeout();
|
||||
void candidateGathered(cricket::IceTransportInternal *transport, const cricket::Candidate &candidate);
|
||||
void candidateGatheringState(cricket::IceTransportInternal *transport);
|
||||
void transportStateChanged(cricket::IceTransportInternal *transport);
|
||||
void transportReadyToSend(cricket::IceTransportInternal *transport);
|
||||
void transportPacketReceived(rtc::PacketTransportInternal *transport, const char *bytes, size_t size, const int64_t ×tamp, int unused);
|
||||
void transportRouteChanged(absl::optional<rtc::NetworkRoute> route);
|
||||
void addTrafficStats(int64_t byteCount, bool isIncoming);
|
||||
|
||||
rtc::Thread *_thread = nullptr;
|
||||
bool _enableP2P = false;
|
||||
bool _enableTCP = false;
|
||||
bool _enableStunMarking = false;
|
||||
std::vector<RtcServer> _rtcServers;
|
||||
std::unique_ptr<Proxy> _proxy;
|
||||
EncryptedConnection _transport;
|
||||
bool _isOutgoing = false;
|
||||
std::function<void(const NetworkManager::State &)> _stateUpdated;
|
||||
std::function<void(DecryptedMessage &&)> _transportMessageReceived;
|
||||
std::function<void(Message &&)> _sendSignalingMessage;
|
||||
|
||||
std::unique_ptr<rtc::NetworkMonitorFactory> _networkMonitorFactory;
|
||||
std::unique_ptr<rtc::BasicPacketSocketFactory> _socketFactory;
|
||||
std::unique_ptr<rtc::BasicNetworkManager> _networkManager;
|
||||
std::unique_ptr<webrtc::TurnCustomizer> _turnCustomizer;
|
||||
std::unique_ptr<cricket::RelayPortFactoryInterface> _relayPortFactory;
|
||||
std::unique_ptr<cricket::BasicPortAllocator> _portAllocator;
|
||||
std::unique_ptr<webrtc::AsyncDnsResolverFactoryInterface> _asyncResolverFactory;
|
||||
std::unique_ptr<cricket::P2PTransportChannel> _transportChannel;
|
||||
|
||||
PeerIceParameters _localIceParameters;
|
||||
absl::optional<PeerIceParameters> _remoteIceParameters;
|
||||
|
||||
bool _isLocalNetworkLowCost = false;
|
||||
int64_t _lastNetworkActivityMs = 0;
|
||||
InterfaceTrafficStats _trafficStatsWifi;
|
||||
InterfaceTrafficStats _trafficStatsCellular;
|
||||
|
||||
absl::optional<CallStatsConnectionEndpointType> _currentEndpointType;
|
||||
std::vector<CallStatsNetworkRecord> _networkRecords;
|
||||
};
|
||||
|
||||
} // namespace tgcalls
|
||||
|
||||
#endif
|
||||
15
TMessagesProj/jni/voip/tgcalls/PlatformContext.h
Normal file
15
TMessagesProj/jni/voip/tgcalls/PlatformContext.h
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
#ifndef TGCALLS_PLATFORM_CONTEXT_H
|
||||
#define TGCALLS_PLATFORM_CONTEXT_H
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
class PlatformContext {
|
||||
|
||||
public:
|
||||
virtual ~PlatformContext() = default;
|
||||
|
||||
};
|
||||
|
||||
} // namespace tgcalls
|
||||
|
||||
#endif
|
||||
|
|
@ -0,0 +1,160 @@
|
|||
#include "SctpDataChannelProviderInterfaceImpl.h"
|
||||
|
||||
#include "p2p/base/dtls_transport.h"
|
||||
#include "api/transport/field_trial_based_config.h"
|
||||
#include "FieldTrialsConfig.h"
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
SctpDataChannelProviderInterfaceImpl::SctpDataChannelProviderInterfaceImpl(
|
||||
rtc::PacketTransportInternal *transportChannel,
|
||||
bool isOutgoing,
|
||||
std::function<void(bool)> onStateChanged,
|
||||
std::function<void()> onTerminated,
|
||||
std::function<void(std::string const &)> onMessageReceived,
|
||||
std::shared_ptr<Threads> threads
|
||||
) :
|
||||
_weakFactory(this),
|
||||
_threads(std::move(threads)),
|
||||
_onStateChanged(onStateChanged),
|
||||
_onTerminated(onTerminated),
|
||||
_onMessageReceived(onMessageReceived) {
|
||||
assert(_threads->getNetworkThread()->IsCurrent());
|
||||
|
||||
_sctpTransportFactory.reset(new cricket::SctpTransportFactory(_threads->getNetworkThread()));
|
||||
|
||||
_sctpTransport = _sctpTransportFactory->CreateSctpTransport(transportChannel);
|
||||
_sctpTransport->SetDataChannelSink(this);
|
||||
|
||||
// TODO: should we disconnect the data channel sink?
|
||||
|
||||
webrtc::InternalDataChannelInit dataChannelInit;
|
||||
dataChannelInit.id = 0;
|
||||
dataChannelInit.open_handshake_role = isOutgoing ? webrtc::InternalDataChannelInit::kOpener : webrtc::InternalDataChannelInit::kAcker;
|
||||
|
||||
_dataChannel = webrtc::SctpDataChannel::Create(
|
||||
_weakFactory.GetWeakPtr(),
|
||||
"data",
|
||||
true,
|
||||
dataChannelInit,
|
||||
_threads->getNetworkThread(),
|
||||
_threads->getNetworkThread()
|
||||
);
|
||||
|
||||
_dataChannel->RegisterObserver(this);
|
||||
|
||||
AddSctpDataStream(webrtc::StreamId(0));
|
||||
}
|
||||
|
||||
SctpDataChannelProviderInterfaceImpl::~SctpDataChannelProviderInterfaceImpl() {
|
||||
assert(_threads->getNetworkThread()->IsCurrent());
|
||||
|
||||
_weakFactory.InvalidateWeakPtrs();
|
||||
|
||||
_dataChannel->UnregisterObserver();
|
||||
_dataChannel->Close();
|
||||
_dataChannel = nullptr;
|
||||
|
||||
_sctpTransport = nullptr;
|
||||
_sctpTransportFactory.reset();
|
||||
}
|
||||
|
||||
bool SctpDataChannelProviderInterfaceImpl::IsOkToCallOnTheNetworkThread() {
|
||||
return true;
|
||||
}
|
||||
|
||||
void SctpDataChannelProviderInterfaceImpl::sendDataChannelMessage(std::string const &message) {
|
||||
assert(_threads->getNetworkThread()->IsCurrent());
|
||||
|
||||
if (_isDataChannelOpen) {
|
||||
RTC_LOG(LS_INFO) << "Outgoing DataChannel message: " << message;
|
||||
|
||||
webrtc::DataBuffer buffer(message);
|
||||
_dataChannel->Send(buffer);
|
||||
} else {
|
||||
RTC_LOG(LS_INFO) << "Could not send an outgoing DataChannel message: the channel is not open";
|
||||
}
|
||||
}
|
||||
|
||||
void SctpDataChannelProviderInterfaceImpl::OnStateChange() {
|
||||
assert(_threads->getNetworkThread()->IsCurrent());
|
||||
|
||||
auto state = _dataChannel->state();
|
||||
bool isDataChannelOpen = state == webrtc::DataChannelInterface::DataState::kOpen;
|
||||
if (_isDataChannelOpen != isDataChannelOpen) {
|
||||
_isDataChannelOpen = isDataChannelOpen;
|
||||
_onStateChanged(_isDataChannelOpen);
|
||||
}
|
||||
}
|
||||
|
||||
void SctpDataChannelProviderInterfaceImpl::OnMessage(const webrtc::DataBuffer& buffer) {
|
||||
assert(_threads->getNetworkThread()->IsCurrent());
|
||||
|
||||
if (!buffer.binary) {
|
||||
std::string messageText(buffer.data.data(), buffer.data.data() + buffer.data.size());
|
||||
RTC_LOG(LS_INFO) << "Incoming DataChannel message: " << messageText;
|
||||
|
||||
_onMessageReceived(messageText);
|
||||
}
|
||||
}
|
||||
|
||||
void SctpDataChannelProviderInterfaceImpl::updateIsConnected(bool isConnected) {
|
||||
assert(_threads->getNetworkThread()->IsCurrent());
|
||||
|
||||
if (isConnected) {
|
||||
if (!_isSctpTransportStarted) {
|
||||
_isSctpTransportStarted = true;
|
||||
_sctpTransport->Start(5000, 5000, 262144);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void SctpDataChannelProviderInterfaceImpl::OnReadyToSend() {
|
||||
assert(_threads->getNetworkThread()->IsCurrent());
|
||||
|
||||
_dataChannel->OnTransportReady();
|
||||
}
|
||||
|
||||
void SctpDataChannelProviderInterfaceImpl::OnTransportClosed(webrtc::RTCError error) {
|
||||
assert(_threads->getNetworkThread()->IsCurrent());
|
||||
|
||||
if (_onTerminated) {
|
||||
_onTerminated();
|
||||
}
|
||||
}
|
||||
|
||||
void SctpDataChannelProviderInterfaceImpl::OnDataReceived(int channel_id, webrtc::DataMessageType type, const rtc::CopyOnWriteBuffer& buffer) {
|
||||
assert(_threads->getNetworkThread()->IsCurrent());
|
||||
|
||||
_dataChannel->OnDataReceived(type, buffer);
|
||||
}
|
||||
|
||||
webrtc::RTCError SctpDataChannelProviderInterfaceImpl::SendData(
|
||||
webrtc::StreamId sid,
|
||||
const webrtc::SendDataParams& params,
|
||||
const rtc::CopyOnWriteBuffer& payload
|
||||
) {
|
||||
assert(_threads->getNetworkThread()->IsCurrent());
|
||||
|
||||
return _sctpTransport->SendData(sid.stream_id_int(), params, payload);
|
||||
}
|
||||
|
||||
void SctpDataChannelProviderInterfaceImpl::AddSctpDataStream(webrtc::StreamId sid) {
|
||||
assert(_threads->getNetworkThread()->IsCurrent());
|
||||
|
||||
_sctpTransport->OpenStream(sid.stream_id_int());
|
||||
}
|
||||
|
||||
void SctpDataChannelProviderInterfaceImpl::RemoveSctpDataStream(webrtc::StreamId sid) {
|
||||
assert(_threads->getNetworkThread()->IsCurrent());
|
||||
|
||||
_threads->getNetworkThread()->BlockingCall([this, sid]() {
|
||||
_sctpTransport->ResetStream(sid.stream_id_int());
|
||||
});
|
||||
}
|
||||
|
||||
void SctpDataChannelProviderInterfaceImpl::OnChannelStateChanged(webrtc::SctpDataChannel *data_channel, webrtc::DataChannelInterface::DataState state) {
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,74 @@
|
|||
#ifndef TGCALLS_SCTP_DATA_CHANNEL_PROVIDER_IMPL_H
|
||||
#define TGCALLS_SCTP_DATA_CHANNEL_PROVIDER_IMPL_H
|
||||
|
||||
#include "rtc_base/weak_ptr.h"
|
||||
#include "api/turn_customizer.h"
|
||||
#include "api/data_channel_interface.h"
|
||||
#include "pc/sctp_data_channel.h"
|
||||
#include "media/sctp/sctp_transport_factory.h"
|
||||
|
||||
#include "StaticThreads.h"
|
||||
|
||||
namespace cricket {
|
||||
class DtlsTransport;
|
||||
} // namespace cricket
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
class SctpDataChannelProviderInterfaceImpl : public sigslot::has_slots<>, public webrtc::SctpDataChannelControllerInterface, public webrtc::DataChannelObserver, public webrtc::DataChannelSink {
|
||||
public:
|
||||
SctpDataChannelProviderInterfaceImpl(
|
||||
rtc::PacketTransportInternal *transportChannel,
|
||||
bool isOutgoing,
|
||||
std::function<void(bool)> onStateChanged,
|
||||
std::function<void()> onTerminated,
|
||||
std::function<void(std::string const &)> onMessageReceived,
|
||||
std::shared_ptr<Threads> threads
|
||||
);
|
||||
virtual ~SctpDataChannelProviderInterfaceImpl();
|
||||
|
||||
virtual bool IsOkToCallOnTheNetworkThread() override;
|
||||
|
||||
void updateIsConnected(bool isConnected);
|
||||
void sendDataChannelMessage(std::string const &message);
|
||||
|
||||
virtual void OnStateChange() override;
|
||||
virtual void OnMessage(const webrtc::DataBuffer& buffer) override;
|
||||
virtual webrtc::RTCError SendData(
|
||||
webrtc::StreamId sid,
|
||||
const webrtc::SendDataParams& params,
|
||||
const rtc::CopyOnWriteBuffer& payload) override;
|
||||
|
||||
virtual void AddSctpDataStream(webrtc::StreamId sid) override;
|
||||
virtual void RemoveSctpDataStream(webrtc::StreamId sid) override;
|
||||
virtual void OnChannelStateChanged(webrtc::SctpDataChannel *data_channel, webrtc::DataChannelInterface::DataState state) override;
|
||||
|
||||
virtual void OnDataReceived(int channel_id,
|
||||
webrtc::DataMessageType type,
|
||||
const rtc::CopyOnWriteBuffer& buffer) override;
|
||||
virtual void OnReadyToSend() override;
|
||||
virtual void OnTransportClosed(webrtc::RTCError error) override;
|
||||
|
||||
// Unused
|
||||
virtual void OnChannelClosing(int channel_id) override{}
|
||||
virtual void OnChannelClosed(int channel_id) override{}
|
||||
|
||||
private:
|
||||
rtc::WeakPtrFactory<SctpDataChannelProviderInterfaceImpl> _weakFactory;
|
||||
std::shared_ptr<Threads> _threads;
|
||||
std::function<void(bool)> _onStateChanged;
|
||||
std::function<void()> _onTerminated;
|
||||
std::function<void(std::string const &)> _onMessageReceived;
|
||||
|
||||
std::unique_ptr<cricket::SctpTransportFactory> _sctpTransportFactory;
|
||||
std::unique_ptr<cricket::SctpTransportInternal> _sctpTransport;
|
||||
webrtc::scoped_refptr<webrtc::SctpDataChannel> _dataChannel;
|
||||
|
||||
bool _isSctpTransportStarted = false;
|
||||
bool _isDataChannelOpen = false;
|
||||
|
||||
};
|
||||
|
||||
} // namespace tgcalls
|
||||
|
||||
#endif
|
||||
144
TMessagesProj/jni/voip/tgcalls/StaticThreads.cpp
Normal file
144
TMessagesProj/jni/voip/tgcalls/StaticThreads.cpp
Normal file
|
|
@ -0,0 +1,144 @@
|
|||
#include "StaticThreads.h"
|
||||
|
||||
#include "rtc_base/thread.h"
|
||||
#include "call/call.h"
|
||||
|
||||
#include <mutex>
|
||||
#include <algorithm>
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
template <class ValueT, class CreatorT>
|
||||
class Pool : public std::enable_shared_from_this<Pool<ValueT, CreatorT>> {
|
||||
struct Entry {
|
||||
std::unique_ptr<ValueT> value;
|
||||
size_t refcnt;
|
||||
|
||||
bool operator < (const Entry &other) const {
|
||||
return refcnt < other.refcnt;
|
||||
}
|
||||
};
|
||||
|
||||
public:
|
||||
explicit Pool(CreatorT creator) : creator_(std::move(creator)) {
|
||||
}
|
||||
std::shared_ptr<ValueT> get() {
|
||||
std::unique_lock<std::mutex> lock(mutex_);
|
||||
set_pool_size_locked(1);
|
||||
auto i = std::min_element(entries_.begin(), entries_.end()) - entries_.begin();
|
||||
return std::shared_ptr<ValueT>(entries_[i].value.get(),
|
||||
[i, self = this->shared_from_this()](auto *ptr) {
|
||||
self->dec_ref(i);
|
||||
});
|
||||
}
|
||||
|
||||
void set_pool_size(size_t size) {
|
||||
std::unique_lock<std::mutex> lock(mutex_);
|
||||
set_pool_size_locked(size);
|
||||
}
|
||||
|
||||
void dec_ref(size_t i) {
|
||||
std::unique_lock<std::mutex> lock(mutex_);
|
||||
entries_.at(i).refcnt--;
|
||||
}
|
||||
|
||||
private:
|
||||
std::mutex mutex_;
|
||||
std::vector<Entry> entries_;
|
||||
|
||||
CreatorT creator_;
|
||||
|
||||
void set_pool_size_locked(size_t size) {
|
||||
for (size_t i = entries_.size(); i < size; i++) {
|
||||
entries_.emplace_back(Entry{creator_(i + 1), 0});
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
class ThreadsImpl : public Threads {
|
||||
using Thread = std::unique_ptr<rtc::Thread>;
|
||||
public:
|
||||
explicit ThreadsImpl(size_t i) {
|
||||
auto suffix = i == 0 ? "" : "#" + std::to_string(i);
|
||||
media_ = create("tgc-media" + suffix);
|
||||
worker_ = create("tgc-work" + suffix);
|
||||
network_ = create_network("tgc-net" + suffix);
|
||||
|
||||
media_->AllowInvokesToThread(worker_.get());
|
||||
media_->AllowInvokesToThread(network_.get());
|
||||
worker_->AllowInvokesToThread(network_.get());
|
||||
|
||||
//network_->DisallowAllInvokes();
|
||||
//worker_->DisallowAllInvokes();
|
||||
//worker_->AllowInvokesToThread(network_.get());
|
||||
}
|
||||
|
||||
rtc::Thread *getNetworkThread() override {
|
||||
return network_.get();
|
||||
}
|
||||
rtc::Thread *getMediaThread() override {
|
||||
return media_.get();
|
||||
}
|
||||
rtc::Thread *getWorkerThread() override {
|
||||
return worker_.get();
|
||||
}
|
||||
|
||||
private:
|
||||
Thread network_;
|
||||
Thread media_;
|
||||
Thread worker_;
|
||||
|
||||
static Thread create(const std::string &name) {
|
||||
return init(std::unique_ptr<rtc::Thread>(rtc::Thread::Create()), name);
|
||||
}
|
||||
static Thread create_network(const std::string &name) {
|
||||
return init(std::unique_ptr<rtc::Thread>(rtc::Thread::CreateWithSocketServer()), name);
|
||||
}
|
||||
|
||||
static Thread init(Thread value, const std::string &name) {
|
||||
value->SetName(name, nullptr);
|
||||
value->Start();
|
||||
return value;
|
||||
}
|
||||
};
|
||||
|
||||
class ThreadsCreator {
|
||||
public:
|
||||
std::unique_ptr<Threads> operator()(size_t i) {
|
||||
return std::make_unique<ThreadsImpl>(i);
|
||||
}
|
||||
};
|
||||
|
||||
Pool<Threads, ThreadsCreator> &get_pool() {
|
||||
static auto pool = std::make_shared<Pool<Threads, ThreadsCreator>>(ThreadsCreator());
|
||||
return *pool;
|
||||
}
|
||||
|
||||
void Threads::setPoolSize(size_t size){
|
||||
get_pool().set_pool_size(size);
|
||||
}
|
||||
std::shared_ptr<Threads> Threads::getThreads(){
|
||||
return get_pool().get();
|
||||
}
|
||||
|
||||
namespace StaticThreads {
|
||||
|
||||
rtc::Thread *getNetworkThread() {
|
||||
return getThreads()->getNetworkThread();
|
||||
}
|
||||
|
||||
rtc::Thread *getMediaThread() {
|
||||
return getThreads()->getMediaThread();
|
||||
}
|
||||
|
||||
rtc::Thread *getWorkerThread() {
|
||||
return getThreads()->getWorkerThread();
|
||||
}
|
||||
|
||||
std::shared_ptr<Threads> &getThreads() {
|
||||
static std::shared_ptr<Threads> threads = std::make_shared<ThreadsImpl>(0);
|
||||
return threads;
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
31
TMessagesProj/jni/voip/tgcalls/StaticThreads.h
Normal file
31
TMessagesProj/jni/voip/tgcalls/StaticThreads.h
Normal file
|
|
@ -0,0 +1,31 @@
|
|||
#pragma once
|
||||
|
||||
#include <cstddef>
|
||||
#include <memory>
|
||||
|
||||
namespace rtc {
|
||||
class Thread;
|
||||
}
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
class Threads {
|
||||
public:
|
||||
virtual ~Threads() = default;
|
||||
virtual rtc::Thread *getNetworkThread() = 0;
|
||||
virtual rtc::Thread *getMediaThread() = 0;
|
||||
virtual rtc::Thread *getWorkerThread() = 0;
|
||||
|
||||
// it is not possible to decrease pool size
|
||||
static void setPoolSize(size_t size);
|
||||
static std::shared_ptr<Threads> getThreads();
|
||||
};
|
||||
|
||||
namespace StaticThreads {
|
||||
rtc::Thread *getNetworkThread();
|
||||
rtc::Thread *getMediaThread();
|
||||
rtc::Thread *getWorkerThread();
|
||||
std::shared_ptr<Threads> &getThreads();
|
||||
}
|
||||
|
||||
};
|
||||
30
TMessagesProj/jni/voip/tgcalls/Stats.h
Normal file
30
TMessagesProj/jni/voip/tgcalls/Stats.h
Normal file
|
|
@ -0,0 +1,30 @@
|
|||
#ifndef TGCALLS_STATS_H
|
||||
#define TGCALLS_STATS_H
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
enum class CallStatsConnectionEndpointType {
|
||||
ConnectionEndpointP2P = 0,
|
||||
ConnectionEndpointTURN = 1
|
||||
};
|
||||
|
||||
struct CallStatsNetworkRecord {
|
||||
int32_t timestamp = 0;
|
||||
CallStatsConnectionEndpointType endpointType = CallStatsConnectionEndpointType::ConnectionEndpointP2P;
|
||||
bool isLowCost = false;
|
||||
};
|
||||
|
||||
struct CallStatsBitrateRecord {
|
||||
int32_t timestamp = 0;
|
||||
int32_t bitrate = 0;
|
||||
};
|
||||
|
||||
struct CallStats {
|
||||
std::string outgoingCodec;
|
||||
std::vector<CallStatsNetworkRecord> networkRecords;
|
||||
std::vector<CallStatsBitrateRecord> bitrateRecords;
|
||||
};
|
||||
|
||||
} // namespace tgcalls
|
||||
|
||||
#endif
|
||||
1
TMessagesProj/jni/voip/tgcalls/ThreadLocalObject.cpp
Normal file
1
TMessagesProj/jni/voip/tgcalls/ThreadLocalObject.cpp
Normal file
|
|
@ -0,0 +1 @@
|
|||
|
||||
58
TMessagesProj/jni/voip/tgcalls/ThreadLocalObject.h
Normal file
58
TMessagesProj/jni/voip/tgcalls/ThreadLocalObject.h
Normal file
|
|
@ -0,0 +1,58 @@
|
|||
#ifndef TGCALLS_THREAD_LOCAL_OBJECT_H
|
||||
#define TGCALLS_THREAD_LOCAL_OBJECT_H
|
||||
|
||||
#include "rtc_base/thread.h"
|
||||
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
template <typename T>
|
||||
class ThreadLocalObject {
|
||||
public:
|
||||
template <
|
||||
typename Generator,
|
||||
typename = std::enable_if_t<std::is_same<std::shared_ptr<T>, decltype(std::declval<Generator>()())>::value>>
|
||||
ThreadLocalObject(rtc::Thread *thread, Generator &&generator) :
|
||||
_thread(thread),
|
||||
_valueHolder(std::make_unique<ValueHolder>()) {
|
||||
assert(_thread != nullptr);
|
||||
_thread->PostTask([valueHolder = _valueHolder.get(), generator = std::forward<Generator>(generator)]() mutable {
|
||||
valueHolder->_value = generator();
|
||||
});
|
||||
}
|
||||
|
||||
~ThreadLocalObject() {
|
||||
_thread->PostTask([valueHolder = std::move(_valueHolder)](){
|
||||
valueHolder->_value.reset();
|
||||
});
|
||||
}
|
||||
|
||||
template <typename FunctorT>
|
||||
void perform(FunctorT &&functor) {
|
||||
_thread->PostTask([valueHolder = _valueHolder.get(), f = std::forward<FunctorT>(functor)]() mutable {
|
||||
assert(valueHolder->_value != nullptr);
|
||||
f(valueHolder->_value.get());
|
||||
});
|
||||
}
|
||||
|
||||
T *getSyncAssumingSameThread() {
|
||||
assert(_thread->IsCurrent());
|
||||
assert(_valueHolder->_value != nullptr);
|
||||
return _valueHolder->_value.get();
|
||||
}
|
||||
|
||||
private:
|
||||
struct ValueHolder {
|
||||
std::shared_ptr<T> _value;
|
||||
};
|
||||
|
||||
rtc::Thread *_thread = nullptr;
|
||||
std::unique_ptr<ValueHolder> _valueHolder;
|
||||
|
||||
};
|
||||
|
||||
} // namespace tgcalls
|
||||
|
||||
#endif
|
||||
21
TMessagesProj/jni/voip/tgcalls/TurnCustomizerImpl.cpp
Normal file
21
TMessagesProj/jni/voip/tgcalls/TurnCustomizerImpl.cpp
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
#include "TurnCustomizerImpl.h"
|
||||
|
||||
#include "api/transport/stun.h"
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
TurnCustomizerImpl::TurnCustomizerImpl() {
|
||||
}
|
||||
|
||||
TurnCustomizerImpl::~TurnCustomizerImpl() {
|
||||
}
|
||||
|
||||
void TurnCustomizerImpl::MaybeModifyOutgoingStunMessage(cricket::PortInterface* port, cricket::StunMessage* message) {
|
||||
message->AddAttribute(std::make_unique<cricket::StunByteStringAttribute>(cricket::STUN_ATTR_SOFTWARE, "Telegram "));
|
||||
}
|
||||
|
||||
bool TurnCustomizerImpl::AllowChannelData(cricket::PortInterface* port, const void *data, size_t size, bool payload) {
|
||||
return true;
|
||||
}
|
||||
|
||||
}
|
||||
19
TMessagesProj/jni/voip/tgcalls/TurnCustomizerImpl.h
Normal file
19
TMessagesProj/jni/voip/tgcalls/TurnCustomizerImpl.h
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
#ifndef TGCALLS_TURN_CUSTOMIZER_H
|
||||
#define TGCALLS_TURN_CUSTOMIZER_H
|
||||
|
||||
#include "api/turn_customizer.h"
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
class TurnCustomizerImpl : public webrtc::TurnCustomizer {
|
||||
public:
|
||||
TurnCustomizerImpl();
|
||||
virtual ~TurnCustomizerImpl();
|
||||
|
||||
void MaybeModifyOutgoingStunMessage(cricket::PortInterface* port, cricket::StunMessage* message) override;
|
||||
bool AllowChannelData(cricket::PortInterface* port, const void *data, size_t size, bool payload) override;
|
||||
};
|
||||
|
||||
} // namespace tgcalls
|
||||
|
||||
#endif
|
||||
16
TMessagesProj/jni/voip/tgcalls/VideoCaptureInterface.cpp
Normal file
16
TMessagesProj/jni/voip/tgcalls/VideoCaptureInterface.cpp
Normal file
|
|
@ -0,0 +1,16 @@
|
|||
#include "VideoCaptureInterface.h"
|
||||
|
||||
#include "VideoCaptureInterfaceImpl.h"
|
||||
#include "tgnet/FileLog.h"
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
std::unique_ptr<VideoCaptureInterface> VideoCaptureInterface::Create(
|
||||
std::shared_ptr<Threads> threads, std::string deviceId, bool isScreenCapture,
|
||||
std::shared_ptr<PlatformContext> platformContext) {
|
||||
return std::make_unique<VideoCaptureInterfaceImpl>(deviceId, isScreenCapture, platformContext, std::move(threads));
|
||||
}
|
||||
|
||||
VideoCaptureInterface::~VideoCaptureInterface() = default;
|
||||
|
||||
} // namespace tgcalls
|
||||
66
TMessagesProj/jni/voip/tgcalls/VideoCaptureInterface.h
Normal file
66
TMessagesProj/jni/voip/tgcalls/VideoCaptureInterface.h
Normal file
|
|
@ -0,0 +1,66 @@
|
|||
#ifndef TGCALLS_VIDEO_CAPTURE_INTERFACE_H
|
||||
#define TGCALLS_VIDEO_CAPTURE_INTERFACE_H
|
||||
|
||||
#include <string>
|
||||
#include <memory>
|
||||
#include <functional>
|
||||
|
||||
namespace rtc {
|
||||
template <typename VideoFrameT>
|
||||
class VideoSinkInterface;
|
||||
} // namespace rtc
|
||||
|
||||
namespace webrtc {
|
||||
class VideoFrame;
|
||||
} // namespace webrtc
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
class PlatformContext;
|
||||
class Threads;
|
||||
|
||||
enum class VideoState {
|
||||
Inactive,
|
||||
Paused,
|
||||
Active,
|
||||
};
|
||||
|
||||
|
||||
class VideoCaptureInterface {
|
||||
protected:
|
||||
VideoCaptureInterface() = default;
|
||||
|
||||
public:
|
||||
static std::unique_ptr<VideoCaptureInterface> Create(
|
||||
std::shared_ptr<Threads> threads,
|
||||
std::string deviceId = std::string(),
|
||||
bool isScreenCapture = false,
|
||||
std::shared_ptr<PlatformContext> platformContext = nullptr);
|
||||
|
||||
virtual ~VideoCaptureInterface();
|
||||
|
||||
virtual void switchToDevice(std::string deviceId, bool isScreenCapture) = 0;
|
||||
virtual void setState(VideoState state) = 0;
|
||||
virtual void setPreferredAspectRatio(float aspectRatio) = 0;
|
||||
virtual void setOutput(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink) = 0;
|
||||
virtual void setOnFatalError(std::function<void()> error) {
|
||||
// TODO: make this function pure virtual when everybody implements it.
|
||||
}
|
||||
virtual void setOnPause(std::function<void(bool)> pause) {
|
||||
// TODO: make this function pure virtual when everybody implements it.
|
||||
}
|
||||
virtual void setOnIsActiveUpdated(std::function<void(bool)> onIsActiveUpdated) {
|
||||
// TODO: make this function pure virtual when everybody implements it.
|
||||
}
|
||||
virtual void withNativeImplementation(std::function<void(void *)> completion) {
|
||||
completion(nullptr);
|
||||
}
|
||||
|
||||
virtual std::shared_ptr<PlatformContext> getPlatformContext() {
|
||||
return nullptr;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace tgcalls
|
||||
|
||||
#endif
|
||||
230
TMessagesProj/jni/voip/tgcalls/VideoCaptureInterfaceImpl.cpp
Normal file
230
TMessagesProj/jni/voip/tgcalls/VideoCaptureInterfaceImpl.cpp
Normal file
|
|
@ -0,0 +1,230 @@
|
|||
#include "VideoCaptureInterfaceImpl.h"
|
||||
|
||||
#include "VideoCapturerInterface.h"
|
||||
#include "Manager.h"
|
||||
#include "MediaManager.h"
|
||||
#include "platform/PlatformInterface.h"
|
||||
#include "StaticThreads.h"
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
VideoCaptureInterfaceObject::VideoCaptureInterfaceObject(std::string deviceId, bool isScreenCapture, std::shared_ptr<PlatformContext> platformContext, Threads &threads)
|
||||
: _videoSource(PlatformInterface::SharedInstance()->makeVideoSource(threads.getMediaThread(), threads.getWorkerThread(), isScreenCapture)), _platformContext(platformContext) {
|
||||
switchToDevice(deviceId, isScreenCapture);
|
||||
}
|
||||
|
||||
VideoCaptureInterfaceObject::~VideoCaptureInterfaceObject() {
|
||||
if (_videoCapturer) {
|
||||
_videoCapturer->setUncroppedOutput(nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
webrtc::scoped_refptr<webrtc::VideoTrackSourceInterface> VideoCaptureInterfaceObject::source() {
|
||||
return _videoSource;
|
||||
}
|
||||
|
||||
int VideoCaptureInterfaceObject::getRotation() {
|
||||
if (_videoCapturer) {
|
||||
return _videoCapturer->getRotation();
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
bool VideoCaptureInterfaceObject::isScreenCapture() {
|
||||
return _isScreenCapture;
|
||||
}
|
||||
|
||||
void VideoCaptureInterfaceObject::switchToDevice(std::string deviceId, bool isScreenCapture) {
|
||||
if (_videoCapturer) {
|
||||
_videoCapturer->setUncroppedOutput(nullptr);
|
||||
}
|
||||
_isScreenCapture = isScreenCapture;
|
||||
if (_videoSource) {
|
||||
//this should outlive the capturer
|
||||
_videoCapturer = nullptr;
|
||||
_videoCapturer = PlatformInterface::SharedInstance()->makeVideoCapturer(_videoSource, deviceId, [this](VideoState state) {
|
||||
if (this->_stateUpdated) {
|
||||
this->_stateUpdated(state);
|
||||
}
|
||||
if (this->_onIsActiveUpdated) {
|
||||
switch (state) {
|
||||
case VideoState::Active: {
|
||||
this->_onIsActiveUpdated(true);
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
this->_onIsActiveUpdated(false);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}, [this](PlatformCaptureInfo info) {
|
||||
if (this->_shouldBeAdaptedToReceiverAspectRate != info.shouldBeAdaptedToReceiverAspectRate) {
|
||||
this->_shouldBeAdaptedToReceiverAspectRate = info.shouldBeAdaptedToReceiverAspectRate;
|
||||
}
|
||||
if (this->_rotationUpdated) {
|
||||
this->_rotationUpdated(info.rotation);
|
||||
}
|
||||
this->updateAspectRateAdaptation();
|
||||
}, _platformContext, _videoCapturerResolution);
|
||||
}
|
||||
if (_videoCapturer) {
|
||||
if (_preferredAspectRatio > 0) {
|
||||
_videoCapturer->setPreferredCaptureAspectRatio(_preferredAspectRatio);
|
||||
}
|
||||
// if (const auto currentUncroppedSink = _currentUncroppedSink.lock()) {
|
||||
_videoCapturer->setUncroppedOutput(_currentUncroppedSink);
|
||||
// }
|
||||
if (_onFatalError) {
|
||||
_videoCapturer->setOnFatalError(_onFatalError);
|
||||
}
|
||||
if (_onPause) {
|
||||
_videoCapturer->setOnPause(_onPause);
|
||||
}
|
||||
_videoCapturer->setState(_state);
|
||||
}
|
||||
}
|
||||
|
||||
void VideoCaptureInterfaceObject::withNativeImplementation(std::function<void(void *)> completion) {
|
||||
if (_videoCapturer) {
|
||||
_videoCapturer->withNativeImplementation(completion);
|
||||
} else {
|
||||
completion(nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
void VideoCaptureInterfaceObject::setState(VideoState state) {
|
||||
if (_state != state) {
|
||||
_state = state;
|
||||
if (_videoCapturer) {
|
||||
_videoCapturer->setState(state);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void VideoCaptureInterfaceObject::setPreferredAspectRatio(float aspectRatio) {
|
||||
_preferredAspectRatio = aspectRatio;
|
||||
updateAspectRateAdaptation();
|
||||
}
|
||||
|
||||
void VideoCaptureInterfaceObject::updateAspectRateAdaptation() {
|
||||
if (_videoCapturer) {
|
||||
if (_videoCapturerResolution.first != 0 && _videoCapturerResolution.second != 0) {
|
||||
if (_preferredAspectRatio > 0.01 && _shouldBeAdaptedToReceiverAspectRate) {
|
||||
float originalWidth = (float)_videoCapturerResolution.first;
|
||||
float originalHeight = (float)_videoCapturerResolution.second;
|
||||
|
||||
float aspectRatio = _preferredAspectRatio;
|
||||
|
||||
float width = (originalWidth > aspectRatio * originalHeight)
|
||||
? int(std::round(aspectRatio * originalHeight))
|
||||
: originalWidth;
|
||||
float height = (originalWidth > aspectRatio * originalHeight)
|
||||
? originalHeight
|
||||
: int(std::round(originalHeight / aspectRatio));
|
||||
|
||||
PlatformInterface::SharedInstance()->adaptVideoSource(_videoSource, (int)width, (int)height, 25);
|
||||
} else {
|
||||
PlatformInterface::SharedInstance()->adaptVideoSource(_videoSource, _videoCapturerResolution.first, _videoCapturerResolution.second, 25);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void VideoCaptureInterfaceObject::setOutput(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink) {
|
||||
if (_videoCapturer) {
|
||||
_videoCapturer->setUncroppedOutput(sink);
|
||||
}
|
||||
_currentUncroppedSink = sink;
|
||||
}
|
||||
|
||||
void VideoCaptureInterfaceObject::setOnFatalError(std::function<void()> error) {
|
||||
if (_videoCapturer) {
|
||||
_videoCapturer->setOnFatalError(error);
|
||||
}
|
||||
_onFatalError = error;
|
||||
}
|
||||
void VideoCaptureInterfaceObject::setOnPause(std::function<void(bool)> pause) {
|
||||
if (_videoCapturer) {
|
||||
_videoCapturer->setOnPause(pause);
|
||||
}
|
||||
_onPause = pause;
|
||||
}
|
||||
|
||||
void VideoCaptureInterfaceObject::setOnIsActiveUpdated(std::function<void(bool)> onIsActiveUpdated) {
|
||||
_onIsActiveUpdated = onIsActiveUpdated;
|
||||
}
|
||||
|
||||
void VideoCaptureInterfaceObject::setStateUpdated(std::function<void(VideoState)> stateUpdated) {
|
||||
_stateUpdated = stateUpdated;
|
||||
}
|
||||
|
||||
void VideoCaptureInterfaceObject::setRotationUpdated(std::function<void(int)> rotationUpdated) {
|
||||
_rotationUpdated = rotationUpdated;
|
||||
}
|
||||
|
||||
VideoCaptureInterfaceImpl::VideoCaptureInterfaceImpl(std::string deviceId, bool isScreenCapture, std::shared_ptr<PlatformContext> platformContext, std::shared_ptr<Threads> threads) :
|
||||
_platformContext(platformContext),
|
||||
_impl(threads->getMediaThread(), [deviceId, isScreenCapture, platformContext, threads]() {
|
||||
return std::make_shared<VideoCaptureInterfaceObject>(deviceId, isScreenCapture, platformContext, *threads);
|
||||
}) {
|
||||
}
|
||||
|
||||
VideoCaptureInterfaceImpl::~VideoCaptureInterfaceImpl() = default;
|
||||
|
||||
void VideoCaptureInterfaceImpl::switchToDevice(std::string deviceId, bool isScreenCapture) {
|
||||
_impl.perform([deviceId, isScreenCapture](VideoCaptureInterfaceObject *impl) {
|
||||
impl->switchToDevice(deviceId, isScreenCapture);
|
||||
});
|
||||
}
|
||||
|
||||
void VideoCaptureInterfaceImpl::withNativeImplementation(std::function<void(void *)> completion) {
|
||||
_impl.perform([completion](VideoCaptureInterfaceObject *impl) {
|
||||
impl->withNativeImplementation(completion);
|
||||
});
|
||||
}
|
||||
|
||||
void VideoCaptureInterfaceImpl::setState(VideoState state) {
|
||||
_impl.perform([state](VideoCaptureInterfaceObject *impl) {
|
||||
impl->setState(state);
|
||||
});
|
||||
}
|
||||
|
||||
void VideoCaptureInterfaceImpl::setPreferredAspectRatio(float aspectRatio) {
|
||||
_impl.perform([aspectRatio](VideoCaptureInterfaceObject *impl) {
|
||||
impl->setPreferredAspectRatio(aspectRatio);
|
||||
});
|
||||
}
|
||||
void VideoCaptureInterfaceImpl::setOnFatalError(std::function<void()> error) {
|
||||
_impl.perform([error](VideoCaptureInterfaceObject *impl) {
|
||||
impl->setOnFatalError(error);
|
||||
});
|
||||
}
|
||||
void VideoCaptureInterfaceImpl::setOnPause(std::function<void(bool)> pause) {
|
||||
_impl.perform([pause](VideoCaptureInterfaceObject *impl) {
|
||||
impl->setOnPause(pause);
|
||||
});
|
||||
}
|
||||
|
||||
void VideoCaptureInterfaceImpl::setOnIsActiveUpdated(std::function<void(bool)> onIsActiveUpdated) {
|
||||
_impl.perform([onIsActiveUpdated](VideoCaptureInterfaceObject *impl) {
|
||||
impl->setOnIsActiveUpdated(onIsActiveUpdated);
|
||||
});
|
||||
}
|
||||
|
||||
void VideoCaptureInterfaceImpl::setOutput(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink) {
|
||||
_impl.perform([sink](VideoCaptureInterfaceObject *impl) {
|
||||
impl->setOutput(sink);
|
||||
});
|
||||
}
|
||||
|
||||
std::shared_ptr<PlatformContext> VideoCaptureInterfaceImpl::getPlatformContext() {
|
||||
return _platformContext;
|
||||
}
|
||||
|
||||
ThreadLocalObject<VideoCaptureInterfaceObject> *VideoCaptureInterfaceImpl::object() {
|
||||
return &_impl;
|
||||
}
|
||||
|
||||
} // namespace tgcalls
|
||||
79
TMessagesProj/jni/voip/tgcalls/VideoCaptureInterfaceImpl.h
Normal file
79
TMessagesProj/jni/voip/tgcalls/VideoCaptureInterfaceImpl.h
Normal file
|
|
@ -0,0 +1,79 @@
|
|||
#ifndef TGCALLS_VIDEO_CAPTURE_INTERFACE_IMPL_H
|
||||
#define TGCALLS_VIDEO_CAPTURE_INTERFACE_IMPL_H
|
||||
|
||||
#include "VideoCaptureInterface.h"
|
||||
#include <memory>
|
||||
#include "ThreadLocalObject.h"
|
||||
#include "api/media_stream_interface.h"
|
||||
#include "platform/PlatformInterface.h"
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
class VideoCapturerInterface;
|
||||
class Threads;
|
||||
|
||||
class VideoCaptureInterfaceObject {
|
||||
public:
|
||||
VideoCaptureInterfaceObject(std::string deviceId, bool isScreenCapture, std::shared_ptr<PlatformContext> platformContext, Threads &threads);
|
||||
~VideoCaptureInterfaceObject();
|
||||
|
||||
void switchToDevice(std::string deviceId, bool isScreenCapture);
|
||||
void withNativeImplementation(std::function<void(void *)> completion);
|
||||
void setState(VideoState state);
|
||||
void setPreferredAspectRatio(float aspectRatio);
|
||||
void setOutput(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink);
|
||||
void setStateUpdated(std::function<void(VideoState)> stateUpdated);
|
||||
void setRotationUpdated(std::function<void(int)> rotationUpdated);
|
||||
void setOnFatalError(std::function<void()> error);
|
||||
void setOnPause(std::function<void(bool)> pause);
|
||||
void setOnIsActiveUpdated(std::function<void(bool)> onIsActiveUpdated);
|
||||
webrtc::scoped_refptr<webrtc::VideoTrackSourceInterface> source();
|
||||
int getRotation();
|
||||
bool isScreenCapture();
|
||||
|
||||
private:
|
||||
void updateAspectRateAdaptation();
|
||||
|
||||
webrtc::scoped_refptr<webrtc::VideoTrackSourceInterface> _videoSource;
|
||||
std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> _currentUncroppedSink;
|
||||
std::shared_ptr<PlatformContext> _platformContext;
|
||||
std::pair<int, int> _videoCapturerResolution;
|
||||
std::unique_ptr<VideoCapturerInterface> _videoCapturer;
|
||||
std::function<void(VideoState)> _stateUpdated;
|
||||
std::function<void()> _onFatalError;
|
||||
std::function<void(bool)> _onPause;
|
||||
std::function<void(bool)> _onIsActiveUpdated;
|
||||
std::function<void(int)> _rotationUpdated;
|
||||
VideoState _state = VideoState::Active;
|
||||
float _preferredAspectRatio = 0.0f;
|
||||
bool _shouldBeAdaptedToReceiverAspectRate = true;
|
||||
bool _isScreenCapture = false;
|
||||
};
|
||||
|
||||
class VideoCaptureInterfaceImpl : public VideoCaptureInterface {
|
||||
public:
|
||||
VideoCaptureInterfaceImpl(std::string deviceId, bool isScreenCapture, std::shared_ptr<PlatformContext> platformContext, std::shared_ptr<Threads> threads);
|
||||
virtual ~VideoCaptureInterfaceImpl();
|
||||
|
||||
void switchToDevice(std::string deviceId, bool isScreenCapture) override;
|
||||
void withNativeImplementation(std::function<void(void *)> completion) override;
|
||||
void setState(VideoState state) override;
|
||||
void setPreferredAspectRatio(float aspectRatio) override;
|
||||
void setOutput(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink) override;
|
||||
void setOnFatalError(std::function<void()> error) override;
|
||||
void setOnPause(std::function<void(bool)> pause) override;
|
||||
void setOnIsActiveUpdated(std::function<void(bool)> onIsActiveUpdated) override;
|
||||
std::shared_ptr<PlatformContext> getPlatformContext() override;
|
||||
|
||||
ThreadLocalObject<VideoCaptureInterfaceObject> *object();
|
||||
|
||||
private:
|
||||
ThreadLocalObject<VideoCaptureInterfaceObject> _impl;
|
||||
|
||||
std::shared_ptr<PlatformContext> _platformContext;
|
||||
|
||||
};
|
||||
|
||||
} // namespace tgcalls
|
||||
|
||||
#endif
|
||||
42
TMessagesProj/jni/voip/tgcalls/VideoCapturerInterface.h
Normal file
42
TMessagesProj/jni/voip/tgcalls/VideoCapturerInterface.h
Normal file
|
|
@ -0,0 +1,42 @@
|
|||
#ifndef TGCALLS_VIDEO_CAPTURER_INTERFACE_H
|
||||
#define TGCALLS_VIDEO_CAPTURER_INTERFACE_H
|
||||
|
||||
#include "Instance.h"
|
||||
|
||||
#include <memory>
|
||||
#include <functional>
|
||||
|
||||
namespace rtc {
|
||||
template <typename VideoFrameT>
|
||||
class VideoSinkInterface;
|
||||
} // namespace rtc
|
||||
|
||||
namespace webrtc {
|
||||
class VideoFrame;
|
||||
} // namespace webrtc
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
class VideoCapturerInterface {
|
||||
public:
|
||||
virtual ~VideoCapturerInterface() = default;
|
||||
|
||||
virtual void setState(VideoState state) = 0;
|
||||
virtual void setPreferredCaptureAspectRatio(float aspectRatio) = 0;
|
||||
virtual void setUncroppedOutput(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink) = 0;
|
||||
virtual int getRotation() = 0;
|
||||
virtual void setOnFatalError(std::function<void()> error) {
|
||||
// TODO: make this function pure virtual when everybody implements it.
|
||||
}
|
||||
virtual void setOnPause(std::function<void(bool)> pause) {
|
||||
// TODO: make this function pure virtual when everybody implements it.
|
||||
}
|
||||
virtual void withNativeImplementation(std::function<void(void *)> completion) {
|
||||
completion(nullptr);
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
} // namespace tgcalls
|
||||
|
||||
#endif
|
||||
|
|
@ -0,0 +1,68 @@
|
|||
//
|
||||
// DesktopCaptureSource.m
|
||||
// TgVoipWebrtc
|
||||
//
|
||||
// Created by Mikhail Filimonov on 29.12.2020.
|
||||
// Copyright © 2020 Mikhail Filimonov. All rights reserved.
|
||||
//
|
||||
|
||||
#include "tgcalls/desktop_capturer/DesktopCaptureSource.h"
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
std::string DesktopCaptureSourceData::cachedKey() const {
|
||||
return std::to_string(aspectSize.width)
|
||||
+ 'x'
|
||||
+ std::to_string(aspectSize.height)
|
||||
+ ':'
|
||||
+ std::to_string(fps)
|
||||
+ ':'
|
||||
+ (captureMouse ? '1' : '0');
|
||||
}
|
||||
|
||||
DesktopCaptureSource::DesktopCaptureSource(
|
||||
long long uniqueId,
|
||||
std::string title,
|
||||
bool isWindow)
|
||||
: _uniqueId(uniqueId)
|
||||
, _title(std::move(title))
|
||||
, _isWindow(isWindow) {
|
||||
}
|
||||
|
||||
long long DesktopCaptureSource::uniqueId() const {
|
||||
return _uniqueId;
|
||||
}
|
||||
|
||||
bool DesktopCaptureSource::isWindow() const {
|
||||
return _isWindow;
|
||||
}
|
||||
|
||||
std::string DesktopCaptureSource::deviceIdKey() const {
|
||||
return std::string("desktop_capturer_")
|
||||
+ (_isWindow ? "window_" : "screen_")
|
||||
+ std::to_string(uniqueId());
|
||||
}
|
||||
|
||||
std::string DesktopCaptureSource::title() const {
|
||||
return _isWindow ? _title : "Screen";
|
||||
}
|
||||
|
||||
std::string DesktopCaptureSource::uniqueKey() const {
|
||||
return std::to_string(_uniqueId)
|
||||
+ ':'
|
||||
+ (_isWindow ? "Window" : "Screen");
|
||||
}
|
||||
|
||||
std::string DesktopCaptureSource::deviceIdKey() {
|
||||
return static_cast<const DesktopCaptureSource*>(this)->deviceIdKey();
|
||||
}
|
||||
|
||||
std::string DesktopCaptureSource::title() {
|
||||
return static_cast<const DesktopCaptureSource*>(this)->title();
|
||||
}
|
||||
|
||||
std::string DesktopCaptureSource::uniqueKey() {
|
||||
return static_cast<const DesktopCaptureSource*>(this)->uniqueKey();
|
||||
}
|
||||
|
||||
} // namespace tgcalls
|
||||
|
|
@ -0,0 +1,85 @@
|
|||
//
|
||||
// DesktopCaptureSource.h
|
||||
// TgVoipWebrtc
|
||||
//
|
||||
// Created by Mikhail Filimonov on 29.12.2020.
|
||||
// Copyright © 2020 Mikhail Filimonov. All rights reserved.
|
||||
//
|
||||
#ifndef TGCALLS_DESKTOP_CAPTURE_SOURCE_H__
|
||||
#define TGCALLS_DESKTOP_CAPTURE_SOURCE_H__
|
||||
|
||||
#include <string>
|
||||
|
||||
#ifdef WEBRTC_WIN
|
||||
// Compiler errors in conflicting Windows headers if not included here.
|
||||
#include <winsock2.h>
|
||||
#endif // WEBRTC_WIN
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
class VideoSource {
|
||||
public:
|
||||
virtual ~VideoSource() = default;
|
||||
|
||||
virtual std::string deviceIdKey() = 0;
|
||||
virtual std::string title() = 0;
|
||||
virtual std::string uniqueKey() = 0;
|
||||
};
|
||||
|
||||
struct DesktopSize {
|
||||
int width = 0;
|
||||
int height = 0;
|
||||
};
|
||||
|
||||
struct DesktopCaptureSourceData {
|
||||
DesktopSize aspectSize;
|
||||
double fps = 24.;
|
||||
bool captureMouse = true;
|
||||
|
||||
std::string cachedKey() const;
|
||||
};
|
||||
|
||||
class DesktopCaptureSource : public VideoSource {
|
||||
public:
|
||||
DesktopCaptureSource(
|
||||
long long uniqueId,
|
||||
std::string title,
|
||||
bool isWindow);
|
||||
|
||||
static DesktopCaptureSource Invalid() {
|
||||
return InvalidTag{};
|
||||
}
|
||||
|
||||
long long uniqueId() const;
|
||||
bool isWindow() const;
|
||||
|
||||
std::string deviceIdKey() const;
|
||||
std::string title() const;
|
||||
std::string uniqueKey() const;
|
||||
|
||||
bool valid() const {
|
||||
return _valid;
|
||||
}
|
||||
explicit operator bool() const {
|
||||
return _valid;
|
||||
}
|
||||
|
||||
private:
|
||||
struct InvalidTag {};
|
||||
DesktopCaptureSource(InvalidTag) : _valid(false) {
|
||||
}
|
||||
|
||||
std::string deviceIdKey() override;
|
||||
std::string title() override;
|
||||
std::string uniqueKey() override;
|
||||
|
||||
long long _uniqueId = 0;
|
||||
std::string _title;
|
||||
bool _isWindow = false;
|
||||
bool _valid = true;
|
||||
|
||||
};
|
||||
|
||||
} // namespace tgcalls
|
||||
|
||||
#endif // TGCALLS_DESKTOP_CAPTURE_SOURCE_H__
|
||||
|
|
@ -0,0 +1,461 @@
|
|||
//
|
||||
// DesktopCaptureSourceHelper.m
|
||||
// TgVoipWebrtc
|
||||
//
|
||||
// Created by Mikhail Filimonov on 28.12.2020.
|
||||
// Copyright © 2020 Mikhail Filimonov. All rights reserved.
|
||||
//
|
||||
|
||||
#include "tgcalls/desktop_capturer/DesktopCaptureSourceHelper.h"
|
||||
|
||||
#include <iostream>
|
||||
#include <memory>
|
||||
#include <algorithm>
|
||||
#include <chrono>
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
#include <functional>
|
||||
|
||||
#include "tgcalls/desktop_capturer/DesktopCaptureSourceManager.h"
|
||||
#include "rtc_base/thread.h"
|
||||
#include "api/video/video_sink_interface.h"
|
||||
#include "api/video/video_frame.h"
|
||||
#include "modules/desktop_capture/desktop_and_cursor_composer.h"
|
||||
#include "modules/desktop_capture/desktop_capturer.h"
|
||||
#include "system_wrappers/include/clock.h"
|
||||
#include "api/video/i420_buffer.h"
|
||||
#include "third_party/libyuv/include/libyuv.h"
|
||||
|
||||
#ifdef WEBRTC_MAC
|
||||
#import <QuartzCore/QuartzCore.h>
|
||||
#endif // WEBRTC_MAC
|
||||
|
||||
namespace tgcalls {
|
||||
namespace {
|
||||
|
||||
#ifdef WEBRTC_MAC
|
||||
class CaptureScheduler {
|
||||
public:
|
||||
void runAsync(std::function<void()> method) {
|
||||
dispatch_async(dispatch_get_main_queue(), ^{
|
||||
method();
|
||||
});
|
||||
}
|
||||
void runDelayed(int delayMs, std::function<void()> method) {
|
||||
const auto time = dispatch_time(
|
||||
DISPATCH_TIME_NOW,
|
||||
((long long)delayMs * NSEC_PER_SEC) / 1000);
|
||||
dispatch_after(time, dispatch_get_main_queue(), ^{
|
||||
method();
|
||||
});
|
||||
}
|
||||
};
|
||||
#else // WEBRTC_MAC
|
||||
rtc::Thread *GlobalCapturerThread() {
|
||||
static auto result = [] {
|
||||
auto thread = rtc::Thread::Create();
|
||||
thread->SetName("WebRTC-DesktopCapturer", nullptr);
|
||||
thread->Start();
|
||||
return thread;
|
||||
}();
|
||||
return result.get();
|
||||
}
|
||||
|
||||
class CaptureScheduler {
|
||||
public:
|
||||
CaptureScheduler() : _thread(GlobalCapturerThread()) {
|
||||
}
|
||||
|
||||
void runAsync(std::function<void()> method) {
|
||||
_thread->PostTask(std::move(method));
|
||||
}
|
||||
void runDelayed(int delayMs, std::function<void()> method) {
|
||||
_thread->PostDelayedTask(std::move(method), webrtc::TimeDelta::Millis(delayMs));
|
||||
}
|
||||
|
||||
private:
|
||||
rtc::Thread *_thread;
|
||||
|
||||
};
|
||||
#endif // WEBRTC_MAC
|
||||
|
||||
class SourceFrameCallbackImpl : public webrtc::DesktopCapturer::Callback {
|
||||
public:
|
||||
SourceFrameCallbackImpl(DesktopSize size, int fps);
|
||||
|
||||
void OnCaptureResult(
|
||||
webrtc::DesktopCapturer::Result result,
|
||||
std::unique_ptr<webrtc::DesktopFrame> frame) override;
|
||||
void setOutput(
|
||||
std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink);
|
||||
void setSecondaryOutput(
|
||||
std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink);
|
||||
void setOnFatalError(std::function<void ()>);
|
||||
void setOnPause(std::function<void (bool)>);
|
||||
private:
|
||||
rtc::scoped_refptr<webrtc::I420Buffer> i420_buffer_;
|
||||
std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> _sink;
|
||||
std::shared_ptr<
|
||||
rtc::VideoSinkInterface<webrtc::VideoFrame>> _secondarySink;
|
||||
DesktopSize size_;
|
||||
std::function<void ()> _onFatalError;
|
||||
std::function<void (bool)> _onPause;
|
||||
};
|
||||
|
||||
class DesktopSourceRenderer {
|
||||
public:
|
||||
DesktopSourceRenderer(
|
||||
CaptureScheduler &scheduler,
|
||||
DesktopCaptureSource source,
|
||||
DesktopCaptureSourceData data);
|
||||
|
||||
void start();
|
||||
void stop();
|
||||
void setOutput(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink);
|
||||
void setSecondaryOutput(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink);
|
||||
void loop();
|
||||
void setOnFatalError(std::function<void ()>);
|
||||
void setOnPause(std::function<void (bool)>);
|
||||
private:
|
||||
CaptureScheduler &_scheduler;
|
||||
std::unique_ptr<webrtc::DesktopCapturer> _capturer;
|
||||
SourceFrameCallbackImpl _callback;
|
||||
std::shared_ptr<bool> _timerGuard;
|
||||
std::function<void()> _onFatalError;
|
||||
std::function<void(bool)> _onPause;
|
||||
bool _isRunning = false;
|
||||
bool _fatalError = false;
|
||||
bool _currentlyOnPause = false;
|
||||
double _delayMs = 0.;
|
||||
|
||||
};
|
||||
|
||||
SourceFrameCallbackImpl::SourceFrameCallbackImpl(DesktopSize size, int fps)
|
||||
: size_(size) {
|
||||
}
|
||||
|
||||
void SourceFrameCallbackImpl::OnCaptureResult(
|
||||
webrtc::DesktopCapturer::Result result,
|
||||
std::unique_ptr<webrtc::DesktopFrame> frame) {
|
||||
|
||||
const auto failed = (result != webrtc::DesktopCapturer::Result::SUCCESS)
|
||||
|| !frame
|
||||
|| frame->size().equals({ 1, 1 });
|
||||
if (failed) {
|
||||
if (result == webrtc::DesktopCapturer::Result::ERROR_PERMANENT) {
|
||||
if (_onFatalError) {
|
||||
_onFatalError();
|
||||
}
|
||||
} else if (_onPause) {
|
||||
_onPause(true);
|
||||
}
|
||||
return;
|
||||
} else if (_onPause) {
|
||||
_onPause(false);
|
||||
}
|
||||
|
||||
const auto frameSize = frame->size();
|
||||
auto fittedSize = (frameSize.width() >= size_.width * 2
|
||||
|| frameSize.height() >= size_.height * 2)
|
||||
? DesktopSize{ frameSize.width() / 2, frameSize.height() / 2 }
|
||||
: DesktopSize{ frameSize.width(), frameSize.height() };
|
||||
|
||||
fittedSize.width -= (fittedSize.width % 4);
|
||||
fittedSize.height -= (fittedSize.height % 4);
|
||||
|
||||
const auto outputSize = webrtc::DesktopSize{
|
||||
fittedSize.width,
|
||||
fittedSize.height
|
||||
};
|
||||
|
||||
webrtc::BasicDesktopFrame outputFrame{ outputSize };
|
||||
|
||||
const auto outputRect = webrtc::DesktopRect::MakeSize(outputSize);
|
||||
|
||||
const auto outputRectData = outputFrame.data() +
|
||||
outputFrame.stride() * outputRect.top() +
|
||||
webrtc::DesktopFrame::kBytesPerPixel * outputRect.left();
|
||||
|
||||
|
||||
libyuv::ARGBScale(
|
||||
frame->data(),
|
||||
frame->stride(),
|
||||
frame->size().width(),
|
||||
frame->size().height(),
|
||||
outputRectData,
|
||||
outputFrame.stride(),
|
||||
outputSize.width(),
|
||||
outputSize.height(),
|
||||
libyuv::kFilterBilinear);
|
||||
|
||||
int width = outputFrame.size().width();
|
||||
int height = outputFrame.size().height();
|
||||
int stride_y = width;
|
||||
int stride_uv = (width + 1) / 2;
|
||||
|
||||
if (!i420_buffer_
|
||||
|| i420_buffer_->width() != width
|
||||
|| i420_buffer_->height() != height) {
|
||||
i420_buffer_ = webrtc::I420Buffer::Create(
|
||||
width,
|
||||
height,
|
||||
stride_y,
|
||||
stride_uv,
|
||||
stride_uv);
|
||||
}
|
||||
|
||||
int i420Result = libyuv::ConvertToI420(
|
||||
outputFrame.data(),
|
||||
width * height,
|
||||
i420_buffer_->MutableDataY(), i420_buffer_->StrideY(),
|
||||
i420_buffer_->MutableDataU(), i420_buffer_->StrideU(),
|
||||
i420_buffer_->MutableDataV(), i420_buffer_->StrideV(),
|
||||
0, 0,
|
||||
width, height,
|
||||
width, height,
|
||||
libyuv::kRotate0,
|
||||
libyuv::FOURCC_ARGB);
|
||||
|
||||
|
||||
assert(i420Result == 0);
|
||||
(void)i420Result;
|
||||
webrtc::VideoFrame nativeVideoFrame = webrtc::VideoFrame(
|
||||
i420_buffer_,
|
||||
webrtc::kVideoRotation_0,
|
||||
webrtc::Clock::GetRealTimeClock()->CurrentTime().us());
|
||||
if (const auto sink = _sink.get()) {
|
||||
_sink->OnFrame(nativeVideoFrame);
|
||||
}
|
||||
if (const auto sink = _secondarySink.get()) {
|
||||
sink->OnFrame(nativeVideoFrame);
|
||||
}
|
||||
}
|
||||
|
||||
void SourceFrameCallbackImpl::setOutput(
|
||||
std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink) {
|
||||
_sink = std::move(sink);
|
||||
}
|
||||
|
||||
void SourceFrameCallbackImpl::setOnFatalError(std::function<void ()> error) {
|
||||
_onFatalError = error;
|
||||
}
|
||||
void SourceFrameCallbackImpl::setOnPause(std::function<void (bool)> pause) {
|
||||
_onPause = pause;
|
||||
}
|
||||
|
||||
void SourceFrameCallbackImpl::setSecondaryOutput(
|
||||
std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink) {
|
||||
_secondarySink = std::move(sink);
|
||||
}
|
||||
|
||||
DesktopSourceRenderer::DesktopSourceRenderer(
|
||||
CaptureScheduler &scheduler,
|
||||
DesktopCaptureSource source,
|
||||
DesktopCaptureSourceData data)
|
||||
: _scheduler(scheduler)
|
||||
, _callback(data.aspectSize, data.fps)
|
||||
, _delayMs(1000. / data.fps) {
|
||||
_callback.setOnFatalError([=] {
|
||||
stop();
|
||||
_fatalError = true;
|
||||
if (_onFatalError) _onFatalError();
|
||||
});
|
||||
|
||||
_callback.setOnPause([=] (bool pause) {
|
||||
bool previousOnPause = _currentlyOnPause;
|
||||
_currentlyOnPause = pause;
|
||||
if (previousOnPause != _currentlyOnPause) {
|
||||
if (_onPause) _onPause(pause);
|
||||
}
|
||||
});
|
||||
|
||||
auto options = webrtc::DesktopCaptureOptions::CreateDefault();
|
||||
options.set_disable_effects(true);
|
||||
options.set_detect_updated_region(true);
|
||||
|
||||
#ifdef WEBRTC_WIN
|
||||
options.set_allow_directx_capturer(true);
|
||||
#elif defined WEBRTC_MAC
|
||||
options.set_allow_iosurface(true);
|
||||
#elif defined WEBRTC_USE_PIPEWIRE
|
||||
options.set_allow_pipewire(true);
|
||||
#endif // WEBRTC_WIN || WEBRTC_MAC
|
||||
|
||||
_capturer = webrtc::DesktopCapturer::CreateGenericCapturer(options);
|
||||
if (!_capturer) {
|
||||
if (source.isWindow()) {
|
||||
_capturer = webrtc::DesktopCapturer::CreateWindowCapturer(options);
|
||||
} else {
|
||||
_capturer = webrtc::DesktopCapturer::CreateScreenCapturer(options);
|
||||
}
|
||||
if (!_capturer) {
|
||||
_fatalError = true;
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (data.captureMouse) {
|
||||
_capturer = std::make_unique<webrtc::DesktopAndCursorComposer>(
|
||||
std::move(_capturer),
|
||||
options);
|
||||
}
|
||||
_capturer->SelectSource(source.uniqueId());
|
||||
_capturer->Start(&_callback);
|
||||
}
|
||||
|
||||
void DesktopSourceRenderer::start() {
|
||||
if (!_capturer || _isRunning) {
|
||||
return;
|
||||
}
|
||||
// ++GlobalCount;
|
||||
//#ifdef WEBRTC_MAC
|
||||
// NSLog(@"current capture count: %d", GlobalCount);
|
||||
//#endif // WEBRTC_MAC
|
||||
|
||||
_isRunning = true;
|
||||
_timerGuard = std::make_shared<bool>(true);
|
||||
loop();
|
||||
}
|
||||
|
||||
void DesktopSourceRenderer::stop() {
|
||||
// if (_isRunning) {
|
||||
// GlobalCount--;
|
||||
//
|
||||
//#ifdef WEBRTC_MAC
|
||||
// NSLog(@"current capture count: %d", GlobalCount);
|
||||
//#endif // WEBRTC_MAC
|
||||
// }
|
||||
_isRunning = false;
|
||||
_timerGuard = nullptr;
|
||||
}
|
||||
|
||||
void DesktopSourceRenderer::loop() {
|
||||
if (!_capturer || !_isRunning) {
|
||||
return;
|
||||
}
|
||||
|
||||
_capturer->CaptureFrame();
|
||||
const auto guard = std::weak_ptr<bool>(_timerGuard);
|
||||
_scheduler.runDelayed(_delayMs, [=] {
|
||||
if (guard.lock()) {
|
||||
loop();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void DesktopSourceRenderer::setOnFatalError(std::function<void ()> error) {
|
||||
if (_fatalError) {
|
||||
error();
|
||||
} else {
|
||||
_onFatalError = std::move(error);
|
||||
}
|
||||
}
|
||||
|
||||
void DesktopSourceRenderer::setOnPause(std::function<void (bool)> pause) {
|
||||
if (_currentlyOnPause) {
|
||||
pause(true);
|
||||
}
|
||||
_onPause = std::move(pause);
|
||||
}
|
||||
|
||||
void DesktopSourceRenderer::setOutput(
|
||||
std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink) {
|
||||
_callback.setOutput(std::move(sink));
|
||||
}
|
||||
|
||||
void DesktopSourceRenderer::setSecondaryOutput(
|
||||
std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink) {
|
||||
_callback.setSecondaryOutput(std::move(sink));
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
struct DesktopCaptureSourceHelper::Renderer {
|
||||
CaptureScheduler scheduler;
|
||||
std::unique_ptr<DesktopSourceRenderer> renderer;
|
||||
};
|
||||
|
||||
DesktopCaptureSource DesktopCaptureSourceForKey(
|
||||
const std::string &uniqueKey) {
|
||||
if (!ShouldBeDesktopCapture(uniqueKey)) {
|
||||
return DesktopCaptureSource::Invalid();
|
||||
}
|
||||
if (uniqueKey == "desktop_capturer_pipewire") {
|
||||
return DesktopCaptureSource(0, "pipewire", false);
|
||||
}
|
||||
const auto windowPrefix = std::string("desktop_capturer_window_");
|
||||
const auto isWindow = (uniqueKey.find(windowPrefix) == 0);
|
||||
DesktopCaptureSourceManager manager(isWindow
|
||||
? DesktopCaptureType::Window
|
||||
: DesktopCaptureType::Screen);
|
||||
const auto sources = manager.sources();
|
||||
|
||||
// "desktop_capturer_window_".size() == "desktop_capturer_screen_".size()
|
||||
const auto keyId = std::stoll(uniqueKey.substr(windowPrefix.size()));
|
||||
for (const auto &source : sources) {
|
||||
if (source.uniqueId() == keyId) {
|
||||
return source;
|
||||
}
|
||||
}
|
||||
return DesktopCaptureSource::Invalid();
|
||||
}
|
||||
|
||||
bool ShouldBeDesktopCapture(const std::string &uniqueKey) {
|
||||
return (uniqueKey.find("desktop_capturer_") == 0);
|
||||
}
|
||||
|
||||
DesktopCaptureSourceHelper::DesktopCaptureSourceHelper(
|
||||
DesktopCaptureSource source,
|
||||
DesktopCaptureSourceData data)
|
||||
: _renderer(std::make_shared<Renderer>()) {
|
||||
_renderer->scheduler.runAsync([renderer = _renderer, source, data] {
|
||||
renderer->renderer = std::make_unique<DesktopSourceRenderer>(
|
||||
renderer->scheduler,
|
||||
source,
|
||||
data);
|
||||
});
|
||||
}
|
||||
|
||||
DesktopCaptureSourceHelper::~DesktopCaptureSourceHelper() {
|
||||
_renderer->scheduler.runAsync([renderer = _renderer] {
|
||||
});
|
||||
}
|
||||
|
||||
void DesktopCaptureSourceHelper::setOutput(
|
||||
std::shared_ptr<
|
||||
rtc::VideoSinkInterface<webrtc::VideoFrame>> sink) const {
|
||||
_renderer->scheduler.runAsync([renderer = _renderer, sink] {
|
||||
renderer->renderer->setOutput(sink);
|
||||
});
|
||||
}
|
||||
|
||||
void DesktopCaptureSourceHelper::setSecondaryOutput(
|
||||
std::shared_ptr<
|
||||
rtc::VideoSinkInterface<webrtc::VideoFrame>> sink) const {
|
||||
_renderer->scheduler.runAsync([renderer = _renderer, sink] {
|
||||
renderer->renderer->setSecondaryOutput(sink);
|
||||
});
|
||||
}
|
||||
|
||||
void DesktopCaptureSourceHelper::start() const {
|
||||
_renderer->scheduler.runAsync([renderer = _renderer] {
|
||||
renderer->renderer->start();
|
||||
});
|
||||
}
|
||||
void DesktopCaptureSourceHelper::setOnFatalError(std::function<void ()> error) const {
|
||||
_renderer->scheduler.runAsync([renderer = _renderer, error = error] {
|
||||
renderer->renderer->setOnFatalError(error);
|
||||
});
|
||||
}
|
||||
void DesktopCaptureSourceHelper::setOnPause(std::function<void (bool)> pause) const {
|
||||
_renderer->scheduler.runAsync([renderer = _renderer, pause = pause] {
|
||||
renderer->renderer->setOnPause(pause);
|
||||
});
|
||||
}
|
||||
|
||||
void DesktopCaptureSourceHelper::stop() const {
|
||||
_renderer->scheduler.runAsync([renderer = _renderer] {
|
||||
renderer->renderer->stop();
|
||||
});
|
||||
}
|
||||
|
||||
} // namespace tgcalls
|
||||
|
|
@ -0,0 +1,56 @@
|
|||
//
|
||||
// DesktopCaptureSourceHelper.h
|
||||
// TgVoipWebrtc
|
||||
//
|
||||
// Created by Mikhail Filimonov on 28.12.2020.
|
||||
// Copyright © 2020 Mikhail Filimonov. All rights reserved.
|
||||
//
|
||||
#ifndef TGCALLS_DESKTOP_CAPTURE_SOURCE_HELPER_H__
|
||||
#define TGCALLS_DESKTOP_CAPTURE_SOURCE_HELPER_H__
|
||||
|
||||
#include "tgcalls/desktop_capturer/DesktopCaptureSource.h"
|
||||
|
||||
#include <memory>
|
||||
#include <functional>
|
||||
|
||||
namespace webrtc {
|
||||
class VideoFrame;
|
||||
} // namespace webrtc
|
||||
|
||||
namespace rtc {
|
||||
template <typename T>
|
||||
class VideoSinkInterface;
|
||||
} // namespace rtc
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
DesktopCaptureSource DesktopCaptureSourceForKey(
|
||||
const std::string &uniqueKey);
|
||||
bool ShouldBeDesktopCapture(const std::string &uniqueKey);
|
||||
|
||||
class DesktopCaptureSourceHelper {
|
||||
public:
|
||||
DesktopCaptureSourceHelper(
|
||||
DesktopCaptureSource source,
|
||||
DesktopCaptureSourceData data);
|
||||
~DesktopCaptureSourceHelper();
|
||||
|
||||
void setOutput(
|
||||
std::shared_ptr<
|
||||
rtc::VideoSinkInterface<webrtc::VideoFrame>> sink) const;
|
||||
void setSecondaryOutput(
|
||||
std::shared_ptr<
|
||||
rtc::VideoSinkInterface<webrtc::VideoFrame>> sink) const;
|
||||
void start() const;
|
||||
void stop() const;
|
||||
void setOnFatalError(std::function<void ()>) const;
|
||||
void setOnPause(std::function<void (bool)>) const;
|
||||
private:
|
||||
struct Renderer;
|
||||
std::shared_ptr<Renderer> _renderer;
|
||||
|
||||
};
|
||||
|
||||
} // namespace tgcalls
|
||||
|
||||
#endif // TGCALLS_DESKTOP_CAPTURE_SOURCE_HELPER_H__
|
||||
|
|
@ -0,0 +1,65 @@
|
|||
//
|
||||
// DesktopCaptureSourceManager.m
|
||||
// TgVoipWebrtc
|
||||
//
|
||||
// Created by Mikhail Filimonov on 28.12.2020.
|
||||
// Copyright © 2020 Mikhail Filimonov. All rights reserved.
|
||||
//
|
||||
|
||||
#include "tgcalls/desktop_capturer/DesktopCaptureSourceManager.h"
|
||||
|
||||
#include "modules/desktop_capture/desktop_and_cursor_composer.h"
|
||||
#include "modules/desktop_capture/desktop_capturer_differ_wrapper.h"
|
||||
#include "third_party/libyuv/include/libyuv.h"
|
||||
#include "api/video/i420_buffer.h"
|
||||
#include "tgcalls/desktop_capturer/DesktopCaptureSourceHelper.h"
|
||||
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
DesktopCaptureSourceManager::DesktopCaptureSourceManager(
|
||||
DesktopCaptureType type)
|
||||
: _capturer(CreateForType(type))
|
||||
, _type(type) {
|
||||
}
|
||||
|
||||
DesktopCaptureSourceManager::~DesktopCaptureSourceManager() = default;
|
||||
|
||||
webrtc::DesktopCaptureOptions DesktopCaptureSourceManager::OptionsForType(
|
||||
DesktopCaptureType type) {
|
||||
auto result = webrtc::DesktopCaptureOptions::CreateDefault();
|
||||
#ifdef WEBRTC_WIN
|
||||
result.set_allow_directx_capturer(true);
|
||||
#elif defined WEBRTC_MAC
|
||||
result.set_allow_iosurface(type == DesktopCaptureType::Screen);
|
||||
#elif defined WEBRTC_USE_PIPEWIRE
|
||||
result.set_allow_pipewire(true);
|
||||
#endif // WEBRTC_WIN || WEBRTC_MAC
|
||||
result.set_detect_updated_region(true);
|
||||
return result;
|
||||
}
|
||||
|
||||
auto DesktopCaptureSourceManager::CreateForType(DesktopCaptureType type)
|
||||
-> std::unique_ptr<webrtc::DesktopCapturer> {
|
||||
const auto options = OptionsForType(type);
|
||||
if (auto result = webrtc::DesktopCapturer::CreateGenericCapturer(options)) {
|
||||
return result;
|
||||
}
|
||||
return (type == DesktopCaptureType::Screen)
|
||||
? webrtc::DesktopCapturer::CreateScreenCapturer(options)
|
||||
: webrtc::DesktopCapturer::CreateWindowCapturer(options);
|
||||
}
|
||||
|
||||
std::vector<DesktopCaptureSource> DesktopCaptureSourceManager::sources() {
|
||||
auto result = std::vector<DesktopCaptureSource>();
|
||||
auto list = webrtc::DesktopCapturer::SourceList();
|
||||
if (_capturer && _capturer->GetSourceList(&list)) {
|
||||
const auto isWindow = (_type == DesktopCaptureType::Window);
|
||||
for (const auto &source : list) {
|
||||
result.emplace_back(source.id, source.title, isWindow);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
} // namespace tgcalls
|
||||
|
|
@ -0,0 +1,49 @@
|
|||
//
|
||||
// DesktopCaptureSourceManager.h
|
||||
// TgVoipWebrtc
|
||||
//
|
||||
// Created by Mikhail Filimonov on 28.12.2020.
|
||||
// Copyright © 2020 Mikhail Filimonov. All rights reserved.
|
||||
//
|
||||
#ifndef TGCALLS_DESKTOP_CAPTURE_SOURCE_MANAGER_H__
|
||||
#define TGCALLS_DESKTOP_CAPTURE_SOURCE_MANAGER_H__
|
||||
|
||||
#include "tgcalls/desktop_capturer/DesktopCaptureSource.h"
|
||||
#include "tgcalls/desktop_capturer/DesktopCaptureSourceHelper.h"
|
||||
|
||||
#include <map>
|
||||
#include <vector>
|
||||
|
||||
namespace webrtc {
|
||||
class DesktopCapturer;
|
||||
class DesktopCaptureOptions;
|
||||
} // namespace webrtc
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
enum class DesktopCaptureType {
|
||||
Screen,
|
||||
Window,
|
||||
};
|
||||
|
||||
class DesktopCaptureSourceManager {
|
||||
public:
|
||||
explicit DesktopCaptureSourceManager(DesktopCaptureType type);
|
||||
~DesktopCaptureSourceManager();
|
||||
|
||||
std::vector<DesktopCaptureSource> sources();
|
||||
|
||||
private:
|
||||
static webrtc::DesktopCaptureOptions OptionsForType(
|
||||
DesktopCaptureType type);
|
||||
static std::unique_ptr<webrtc::DesktopCapturer> CreateForType(
|
||||
DesktopCaptureType type);
|
||||
|
||||
std::unique_ptr<webrtc::DesktopCapturer> _capturer;
|
||||
DesktopCaptureType _type = DesktopCaptureType::Screen;
|
||||
|
||||
};
|
||||
|
||||
} // namespace tgcalls
|
||||
|
||||
#endif // TGCALLS_DESKTOP_CAPTURE_SOURCE_MANAGER_H__
|
||||
64
TMessagesProj/jni/voip/tgcalls/group/AVIOContextImpl.cpp
Normal file
64
TMessagesProj/jni/voip/tgcalls/group/AVIOContextImpl.cpp
Normal file
|
|
@ -0,0 +1,64 @@
|
|||
#include "AVIOContextImpl.h"
|
||||
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/third_party/base64/base64.h"
|
||||
#include "api/video/i420_buffer.h"
|
||||
|
||||
#include <string>
|
||||
#include <set>
|
||||
#include <map>
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
namespace {
|
||||
|
||||
int AVIOContextImplRead(void *opaque, unsigned char *buffer, int bufferSize) {
|
||||
AVIOContextImpl *instance = static_cast<AVIOContextImpl *>(opaque);
|
||||
|
||||
int bytesToRead = std::min(bufferSize, ((int)instance->_fileData.size()) - instance->_fileReadPosition);
|
||||
if (bytesToRead < 0) {
|
||||
bytesToRead = 0;
|
||||
}
|
||||
|
||||
if (bytesToRead > 0) {
|
||||
memcpy(buffer, instance->_fileData.data() + instance->_fileReadPosition, bytesToRead);
|
||||
instance->_fileReadPosition += bytesToRead;
|
||||
|
||||
return bytesToRead;
|
||||
} else {
|
||||
return AVERROR_EOF;
|
||||
}
|
||||
}
|
||||
|
||||
int64_t AVIOContextImplSeek(void *opaque, int64_t offset, int whence) {
|
||||
AVIOContextImpl *instance = static_cast<AVIOContextImpl *>(opaque);
|
||||
|
||||
if (whence == 0x10000) {
|
||||
return (int64_t)instance->_fileData.size();
|
||||
} else {
|
||||
int64_t seekOffset = std::min(offset, (int64_t)instance->_fileData.size());
|
||||
if (seekOffset < 0) {
|
||||
seekOffset = 0;
|
||||
}
|
||||
instance->_fileReadPosition = (int)seekOffset;
|
||||
return seekOffset;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
AVIOContextImpl::AVIOContextImpl(std::vector<uint8_t> &&fileData) :
|
||||
_fileData(std::move(fileData)) {
|
||||
_buffer.resize(4 * 1024);
|
||||
_context = avio_alloc_context(_buffer.data(), (int)_buffer.size(), 0, this, &AVIOContextImplRead, NULL, &AVIOContextImplSeek);
|
||||
}
|
||||
|
||||
AVIOContextImpl::~AVIOContextImpl() {
|
||||
avio_context_free(&_context);
|
||||
}
|
||||
|
||||
AVIOContext *AVIOContextImpl::getContext() const {
|
||||
return _context;
|
||||
};
|
||||
|
||||
}
|
||||
40
TMessagesProj/jni/voip/tgcalls/group/AVIOContextImpl.h
Normal file
40
TMessagesProj/jni/voip/tgcalls/group/AVIOContextImpl.h
Normal file
|
|
@ -0,0 +1,40 @@
|
|||
#ifndef TGCALLS_AVIOCONTEXTIMPL_H
|
||||
#define TGCALLS_AVIOCONTEXTIMPL_H
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include <vector>
|
||||
#include <stdint.h>
|
||||
|
||||
#include "api/video/video_frame.h"
|
||||
#include "absl/types/optional.h"
|
||||
|
||||
// Fix build on Windows - this should appear before FFmpeg timestamp include.
|
||||
#define _USE_MATH_DEFINES
|
||||
#include <math.h>
|
||||
|
||||
extern "C" {
|
||||
#include <libavutil/timestamp.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavcodec/avcodec.h>
|
||||
}
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
class AVIOContextImpl {
|
||||
public:
|
||||
AVIOContextImpl(std::vector<uint8_t> &&fileData);
|
||||
~AVIOContextImpl();
|
||||
|
||||
AVIOContext *getContext() const;
|
||||
|
||||
public:
|
||||
std::vector<uint8_t> _fileData;
|
||||
int _fileReadPosition = 0;
|
||||
|
||||
std::vector<uint8_t> _buffer;
|
||||
AVIOContext *_context = nullptr;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
181
TMessagesProj/jni/voip/tgcalls/group/AudioStreamingPart.cpp
Normal file
181
TMessagesProj/jni/voip/tgcalls/group/AudioStreamingPart.cpp
Normal file
|
|
@ -0,0 +1,181 @@
|
|||
#include "AudioStreamingPart.h"
|
||||
|
||||
#include "AudioStreamingPartInternal.h"
|
||||
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/third_party/base64/base64.h"
|
||||
|
||||
#include <string>
|
||||
#include <bitset>
|
||||
#include <set>
|
||||
#include <map>
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
class AudioStreamingPartState {
|
||||
struct ChannelMapping {
|
||||
uint32_t ssrc = 0;
|
||||
int channelIndex = 0;
|
||||
|
||||
ChannelMapping(uint32_t ssrc_, int channelIndex_) :
|
||||
ssrc(ssrc_), channelIndex(channelIndex_) {
|
||||
}
|
||||
};
|
||||
|
||||
public:
|
||||
AudioStreamingPartState(std::vector<uint8_t> &&data, std::string const &container, bool isSingleChannel) :
|
||||
_isSingleChannel(isSingleChannel),
|
||||
_parsedPart(std::move(data), container) {
|
||||
if (_parsedPart.getChannelUpdates().size() == 0 && !isSingleChannel) {
|
||||
_didReadToEnd = true;
|
||||
return;
|
||||
}
|
||||
|
||||
_remainingMilliseconds = _parsedPart.getDurationInMilliseconds();
|
||||
|
||||
for (const auto &it : _parsedPart.getChannelUpdates()) {
|
||||
_allSsrcs.insert(it.ssrc);
|
||||
}
|
||||
}
|
||||
|
||||
~AudioStreamingPartState() {
|
||||
}
|
||||
|
||||
std::map<std::string, int32_t> getEndpointMapping() const {
|
||||
return _parsedPart.getEndpointMapping();
|
||||
}
|
||||
|
||||
int getRemainingMilliseconds() const {
|
||||
return _remainingMilliseconds;
|
||||
}
|
||||
|
||||
std::vector<AudioStreamingPart::StreamingPartChannel> get10msPerChannel(AudioStreamingPartPersistentDecoder &persistentDecoder) {
|
||||
if (_didReadToEnd) {
|
||||
return {};
|
||||
}
|
||||
|
||||
for (const auto &update : _parsedPart.getChannelUpdates()) {
|
||||
if (update.frameIndex == _frameIndex) {
|
||||
updateCurrentMapping(update.ssrc, update.id);
|
||||
}
|
||||
}
|
||||
|
||||
auto readResult = _parsedPart.readPcm(persistentDecoder, _pcm10ms);
|
||||
if (readResult.numSamples <= 0) {
|
||||
_didReadToEnd = true;
|
||||
return {};
|
||||
}
|
||||
|
||||
std::vector<AudioStreamingPart::StreamingPartChannel> resultChannels;
|
||||
|
||||
if (_isSingleChannel) {
|
||||
for (int i = 0; i < readResult.numChannels; i++) {
|
||||
AudioStreamingPart::StreamingPartChannel emptyPart;
|
||||
emptyPart.ssrc = i + 1;
|
||||
resultChannels.push_back(emptyPart);
|
||||
}
|
||||
|
||||
for (int i = 0; i < readResult.numChannels; i++) {
|
||||
auto channel = resultChannels.begin() + i;
|
||||
int sourceChannelIndex = i;
|
||||
for (int j = 0; j < readResult.numSamples; j++) {
|
||||
channel->pcmData.push_back(_pcm10ms[sourceChannelIndex + j * readResult.numChannels]);
|
||||
}
|
||||
channel->numSamples += readResult.numSamples;
|
||||
}
|
||||
} else {
|
||||
for (const auto ssrc : _allSsrcs) {
|
||||
AudioStreamingPart::StreamingPartChannel emptyPart;
|
||||
emptyPart.ssrc = ssrc;
|
||||
resultChannels.push_back(emptyPart);
|
||||
}
|
||||
|
||||
for (auto &channel : resultChannels) {
|
||||
auto mappedChannelIndex = getCurrentMappedChannelIndex(channel.ssrc);
|
||||
|
||||
if (mappedChannelIndex) {
|
||||
int sourceChannelIndex = mappedChannelIndex.value();
|
||||
for (int j = 0; j < readResult.numSamples; j++) {
|
||||
channel.pcmData.push_back(_pcm10ms[sourceChannelIndex + j * readResult.numChannels]);
|
||||
}
|
||||
channel.numSamples += readResult.numSamples;
|
||||
} else {
|
||||
for (int j = 0; j < readResult.numSamples; j++) {
|
||||
channel.pcmData.push_back(0);
|
||||
}
|
||||
channel.numSamples += readResult.numSamples;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_remainingMilliseconds -= 10;
|
||||
if (_remainingMilliseconds < 0) {
|
||||
_remainingMilliseconds = 0;
|
||||
}
|
||||
_frameIndex++;
|
||||
|
||||
return resultChannels;
|
||||
}
|
||||
|
||||
private:
|
||||
absl::optional<int> getCurrentMappedChannelIndex(uint32_t ssrc) {
|
||||
for (const auto &it : _currentChannelMapping) {
|
||||
if (it.ssrc == ssrc) {
|
||||
return it.channelIndex;
|
||||
}
|
||||
}
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
void updateCurrentMapping(uint32_t ssrc, int channelIndex) {
|
||||
for (int i = (int)_currentChannelMapping.size() - 1; i >= 0; i--) {
|
||||
const auto &entry = _currentChannelMapping[i];
|
||||
if (entry.ssrc == ssrc && entry.channelIndex == channelIndex) {
|
||||
return;
|
||||
} else if (entry.ssrc == ssrc || entry.channelIndex == channelIndex) {
|
||||
_currentChannelMapping.erase(_currentChannelMapping.begin() + i);
|
||||
}
|
||||
}
|
||||
_currentChannelMapping.emplace_back(ssrc, channelIndex);
|
||||
}
|
||||
|
||||
private:
|
||||
bool _isSingleChannel = false;
|
||||
AudioStreamingPartInternal _parsedPart;
|
||||
std::set<uint32_t> _allSsrcs;
|
||||
|
||||
std::vector<int16_t> _pcm10ms;
|
||||
std::vector<ChannelMapping> _currentChannelMapping;
|
||||
int _frameIndex = 0;
|
||||
int _remainingMilliseconds = 0;
|
||||
|
||||
bool _didReadToEnd = false;
|
||||
};
|
||||
|
||||
AudioStreamingPart::AudioStreamingPart(std::vector<uint8_t> &&data, std::string const &container, bool isSingleChannel) {
|
||||
if (!data.empty()) {
|
||||
_state = new AudioStreamingPartState(std::move(data), container, isSingleChannel);
|
||||
}
|
||||
}
|
||||
|
||||
AudioStreamingPart::~AudioStreamingPart() {
|
||||
if (_state) {
|
||||
delete _state;
|
||||
}
|
||||
}
|
||||
|
||||
std::map<std::string, int32_t> AudioStreamingPart::getEndpointMapping() const {
|
||||
return _state ? _state->getEndpointMapping() : std::map<std::string, int32_t>();
|
||||
}
|
||||
|
||||
int AudioStreamingPart::getRemainingMilliseconds() const {
|
||||
return _state ? _state->getRemainingMilliseconds() : 0;
|
||||
}
|
||||
|
||||
std::vector<AudioStreamingPart::StreamingPartChannel> AudioStreamingPart::get10msPerChannel(AudioStreamingPartPersistentDecoder &persistentDecoder) {
|
||||
return _state
|
||||
? _state->get10msPerChannel(persistentDecoder)
|
||||
: std::vector<AudioStreamingPart::StreamingPartChannel>();
|
||||
}
|
||||
|
||||
}
|
||||
45
TMessagesProj/jni/voip/tgcalls/group/AudioStreamingPart.h
Normal file
45
TMessagesProj/jni/voip/tgcalls/group/AudioStreamingPart.h
Normal file
|
|
@ -0,0 +1,45 @@
|
|||
#ifndef TGCALLS_AUDIO_STREAMING_PART_H
|
||||
#define TGCALLS_AUDIO_STREAMING_PART_H
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <map>
|
||||
#include <stdint.h>
|
||||
|
||||
#include "AudioStreamingPartPersistentDecoder.h"
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
class AudioStreamingPartState;
|
||||
|
||||
class AudioStreamingPart {
|
||||
public:
|
||||
struct StreamingPartChannel {
|
||||
uint32_t ssrc = 0;
|
||||
std::vector<int16_t> pcmData;
|
||||
int numSamples = 0;
|
||||
};
|
||||
|
||||
explicit AudioStreamingPart(std::vector<uint8_t> &&data, std::string const &container, bool isSingleChannel);
|
||||
~AudioStreamingPart();
|
||||
|
||||
AudioStreamingPart(const AudioStreamingPart&) = delete;
|
||||
AudioStreamingPart(AudioStreamingPart&& other) {
|
||||
_state = other._state;
|
||||
other._state = nullptr;
|
||||
}
|
||||
AudioStreamingPart& operator=(const AudioStreamingPart&) = delete;
|
||||
AudioStreamingPart& operator=(AudioStreamingPart&&) = delete;
|
||||
|
||||
std::map<std::string, int32_t> getEndpointMapping() const;
|
||||
int getRemainingMilliseconds() const;
|
||||
std::vector<StreamingPartChannel> get10msPerChannel(AudioStreamingPartPersistentDecoder &persistentDecoder);
|
||||
|
||||
private:
|
||||
AudioStreamingPartState *_state = nullptr;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
@ -0,0 +1,404 @@
|
|||
#include "AudioStreamingPartInternal.h"
|
||||
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/third_party/base64/base64.h"
|
||||
|
||||
extern "C" {
|
||||
#include <libavutil/timestamp.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavcodec/avcodec.h>
|
||||
}
|
||||
|
||||
#include <string>
|
||||
#include <bitset>
|
||||
#include <set>
|
||||
#include <map>
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
namespace {
|
||||
|
||||
int16_t sampleFloatToInt16(float sample) {
|
||||
return av_clip_int16 (static_cast<int32_t>(lrint(sample*32767)));
|
||||
}
|
||||
|
||||
uint32_t stringToUInt32(std::string const &string) {
|
||||
std::stringstream stringStream(string);
|
||||
uint32_t value = 0;
|
||||
stringStream >> value;
|
||||
return value;
|
||||
}
|
||||
|
||||
template <typename Out>
|
||||
void splitString(const std::string &s, char delim, Out result) {
|
||||
std::istringstream iss(s);
|
||||
std::string item;
|
||||
while (std::getline(iss, item, delim)) {
|
||||
*result++ = item;
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<std::string> splitString(const std::string &s, char delim) {
|
||||
std::vector<std::string> elems;
|
||||
splitString(s, delim, std::back_inserter(elems));
|
||||
return elems;
|
||||
}
|
||||
|
||||
static absl::optional<uint32_t> readInt32(std::string const &data, int &offset) {
|
||||
if (offset + 4 > data.length()) {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
int32_t value = 0;
|
||||
memcpy(&value, data.data() + offset, 4);
|
||||
offset += 4;
|
||||
|
||||
return value;
|
||||
}
|
||||
|
||||
std::vector<AudioStreamingPartInternal::ChannelUpdate> parseChannelUpdates(std::string const &data, int &offset) {
|
||||
std::vector<AudioStreamingPartInternal::ChannelUpdate> result;
|
||||
|
||||
auto channels = readInt32(data, offset);
|
||||
if (!channels) {
|
||||
return {};
|
||||
}
|
||||
|
||||
auto count = readInt32(data, offset);
|
||||
if (!count) {
|
||||
return {};
|
||||
}
|
||||
|
||||
for (int i = 0; i < count.value(); i++) {
|
||||
auto frameIndex = readInt32(data, offset);
|
||||
if (!frameIndex) {
|
||||
return {};
|
||||
}
|
||||
|
||||
auto channelId = readInt32(data, offset);
|
||||
if (!channelId) {
|
||||
return {};
|
||||
}
|
||||
|
||||
auto ssrc = readInt32(data, offset);
|
||||
if (!ssrc) {
|
||||
return {};
|
||||
}
|
||||
|
||||
AudioStreamingPartInternal::ChannelUpdate update;
|
||||
update.frameIndex = frameIndex.value();
|
||||
update.id = channelId.value();
|
||||
update.ssrc = ssrc.value();
|
||||
|
||||
result.push_back(update);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
AudioStreamingPartInternal::AudioStreamingPartInternal(std::vector<uint8_t> &&fileData, std::string const &container) :
|
||||
_avIoContext(std::move(fileData)) {
|
||||
int ret = 0;
|
||||
|
||||
_frame = av_frame_alloc();
|
||||
|
||||
#if LIBAVFORMAT_VERSION_MAJOR >= 59
|
||||
const
|
||||
#endif
|
||||
AVInputFormat *inputFormat = av_find_input_format(container.c_str());
|
||||
if (!inputFormat) {
|
||||
_didReadToEnd = true;
|
||||
return;
|
||||
}
|
||||
|
||||
_inputFormatContext = avformat_alloc_context();
|
||||
if (!_inputFormatContext) {
|
||||
_didReadToEnd = true;
|
||||
return;
|
||||
}
|
||||
|
||||
_inputFormatContext->pb = _avIoContext.getContext();
|
||||
|
||||
if ((ret = avformat_open_input(&_inputFormatContext, "", inputFormat, nullptr)) < 0) {
|
||||
_didReadToEnd = true;
|
||||
return;
|
||||
}
|
||||
|
||||
if ((ret = avformat_find_stream_info(_inputFormatContext, nullptr)) < 0) {
|
||||
_didReadToEnd = true;
|
||||
|
||||
avformat_close_input(&_inputFormatContext);
|
||||
_inputFormatContext = nullptr;
|
||||
return;
|
||||
}
|
||||
|
||||
for (int i = 0; i < _inputFormatContext->nb_streams; i++) {
|
||||
AVStream *inStream = _inputFormatContext->streams[i];
|
||||
|
||||
AVCodecParameters *inCodecpar = inStream->codecpar;
|
||||
if (inCodecpar->codec_type != AVMEDIA_TYPE_AUDIO) {
|
||||
continue;
|
||||
}
|
||||
|
||||
_audioCodecParameters = avcodec_parameters_alloc();
|
||||
avcodec_parameters_copy(_audioCodecParameters, inCodecpar);
|
||||
|
||||
_streamId = i;
|
||||
|
||||
_durationInMilliseconds = (int)(inStream->duration * av_q2d(inStream->time_base) * 1000);
|
||||
|
||||
if (inStream->metadata) {
|
||||
AVDictionaryEntry *entry = av_dict_get(inStream->metadata, "TG_META", nullptr, 0);
|
||||
if (entry && entry->value) {
|
||||
std::string result;
|
||||
size_t data_used = 0;
|
||||
std::string sourceBase64 = (const char *)entry->value;
|
||||
rtc::Base64::Decode(sourceBase64, rtc::Base64::DO_LAX, &result, &data_used);
|
||||
|
||||
if (result.size() != 0) {
|
||||
int offset = 0;
|
||||
_channelUpdates = parseChannelUpdates(result, offset);
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t videoChannelMask = 0;
|
||||
entry = av_dict_get(inStream->metadata, "ACTIVE_MASK", nullptr, 0);
|
||||
if (entry && entry->value) {
|
||||
std::string sourceString = (const char *)entry->value;
|
||||
videoChannelMask = stringToUInt32(sourceString);
|
||||
}
|
||||
|
||||
std::vector<std::string> endpointList;
|
||||
entry = av_dict_get(inStream->metadata, "ENDPOINTS", nullptr, 0);
|
||||
if (entry && entry->value) {
|
||||
std::string sourceString = (const char *)entry->value;
|
||||
endpointList = splitString(sourceString, ' ');
|
||||
}
|
||||
|
||||
std::bitset<32> videoChannels(videoChannelMask);
|
||||
size_t endpointIndex = 0;
|
||||
if (videoChannels.count() == endpointList.size()) {
|
||||
for (size_t i = 0; i < videoChannels.size(); i++) {
|
||||
if (videoChannels[i]) {
|
||||
_endpointMapping.insert(std::make_pair(endpointList[endpointIndex], i));
|
||||
endpointIndex++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
if (_streamId == -1) {
|
||||
_didReadToEnd = true;
|
||||
}
|
||||
}
|
||||
|
||||
AudioStreamingPartInternal::~AudioStreamingPartInternal() {
|
||||
if (_frame) {
|
||||
av_frame_free(&_frame);
|
||||
}
|
||||
if (_inputFormatContext) {
|
||||
avformat_close_input(&_inputFormatContext);
|
||||
}
|
||||
if (_audioCodecParameters) {
|
||||
avcodec_parameters_free(&_audioCodecParameters);
|
||||
}
|
||||
}
|
||||
|
||||
AudioStreamingPartInternal::ReadPcmResult AudioStreamingPartInternal::readPcm(AudioStreamingPartPersistentDecoder &persistentDecoder, std::vector<int16_t> &outPcm) {
|
||||
if (_didReadToEnd) {
|
||||
return AudioStreamingPartInternal::ReadPcmResult();
|
||||
}
|
||||
|
||||
int outPcmSampleOffset = 0;
|
||||
ReadPcmResult result;
|
||||
|
||||
if (_pcmBufferSampleOffset >= _pcmBufferSampleSize) {
|
||||
fillPcmBuffer(persistentDecoder);
|
||||
}
|
||||
|
||||
if (outPcm.size() != 480 * _channelCount) {
|
||||
outPcm.resize(480 * _channelCount);
|
||||
}
|
||||
int readSamples = 0;
|
||||
if (_channelCount != 0) {
|
||||
readSamples = (int)outPcm.size() / _channelCount;
|
||||
}
|
||||
|
||||
while (outPcmSampleOffset < readSamples) {
|
||||
if (_pcmBufferSampleOffset >= _pcmBufferSampleSize) {
|
||||
fillPcmBuffer(persistentDecoder);
|
||||
|
||||
if (_pcmBufferSampleOffset >= _pcmBufferSampleSize) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
int readFromPcmBufferSamples = std::min(_pcmBufferSampleSize - _pcmBufferSampleOffset, readSamples - outPcmSampleOffset);
|
||||
if (readFromPcmBufferSamples != 0) {
|
||||
std::copy(_pcmBuffer.begin() + _pcmBufferSampleOffset * _channelCount, _pcmBuffer.begin() + _pcmBufferSampleOffset * _channelCount + readFromPcmBufferSamples * _channelCount, outPcm.begin() + outPcmSampleOffset * _channelCount);
|
||||
_pcmBufferSampleOffset += readFromPcmBufferSamples;
|
||||
outPcmSampleOffset += readFromPcmBufferSamples;
|
||||
result.numSamples += readFromPcmBufferSamples;
|
||||
_readSampleCount += readFromPcmBufferSamples;
|
||||
}
|
||||
}
|
||||
|
||||
result.numChannels = _channelCount;
|
||||
|
||||
// Uncomment for debugging incomplete frames
|
||||
/*if (result.numSamples != 480 && result.numSamples != 0) {
|
||||
RTC_LOG(LS_INFO) << "result.numSamples = " << result.numSamples << ", _readSampleCount = " << _readSampleCount << ", duration = " << _inputFormatContext->streams[_streamId]->duration;
|
||||
}*/
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
int AudioStreamingPartInternal::getDurationInMilliseconds() const {
|
||||
return _durationInMilliseconds;
|
||||
}
|
||||
|
||||
std::vector<AudioStreamingPartInternal::ChannelUpdate> const &AudioStreamingPartInternal::getChannelUpdates() const {
|
||||
return _channelUpdates;
|
||||
}
|
||||
|
||||
std::map<std::string, int32_t> AudioStreamingPartInternal::getEndpointMapping() const {
|
||||
return _endpointMapping;
|
||||
}
|
||||
|
||||
void AudioStreamingPartInternal::fillPcmBuffer(AudioStreamingPartPersistentDecoder &persistentDecoder) {
|
||||
_pcmBufferSampleSize = 0;
|
||||
_pcmBufferSampleOffset = 0;
|
||||
|
||||
if (_didReadToEnd) {
|
||||
return;
|
||||
}
|
||||
if (!_inputFormatContext) {
|
||||
_didReadToEnd = true;
|
||||
return;
|
||||
}
|
||||
|
||||
int ret = 0;
|
||||
while (true) {
|
||||
ret = av_read_frame(_inputFormatContext, &_packet);
|
||||
if (ret < 0) {
|
||||
_didReadToEnd = true;
|
||||
return;
|
||||
}
|
||||
|
||||
if (_packet.stream_index != _streamId) {
|
||||
av_packet_unref(&_packet);
|
||||
continue;
|
||||
}
|
||||
|
||||
ret = persistentDecoder.decode(_audioCodecParameters, _inputFormatContext->streams[_streamId]->time_base, _packet, _frame);
|
||||
av_packet_unref(&_packet);
|
||||
|
||||
if (ret == AVERROR(EAGAIN)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
if (ret != 0) {
|
||||
_didReadToEnd = true;
|
||||
return;
|
||||
}
|
||||
|
||||
if (_channelCount == 0) {
|
||||
#if LIBAVFORMAT_VERSION_MAJOR >= 59
|
||||
_channelCount = _frame->ch_layout.nb_channels;
|
||||
#else
|
||||
_channelCount = _frame->channels;
|
||||
#endif
|
||||
}
|
||||
|
||||
if (_channelCount == 0) {
|
||||
_didReadToEnd = true;
|
||||
return;
|
||||
}
|
||||
|
||||
#if LIBAVFORMAT_VERSION_MAJOR >= 59
|
||||
if (_frame->ch_layout.nb_channels != _channelCount || _frame->ch_layout.nb_channels > 8) {
|
||||
#else
|
||||
if (_frame->channels != _channelCount || _frame->channels > 8) {
|
||||
#endif
|
||||
_didReadToEnd = true;
|
||||
return;
|
||||
}
|
||||
|
||||
#if LIBAVFORMAT_VERSION_MAJOR >= 59
|
||||
if (_pcmBuffer.size() < _frame->nb_samples * _frame->ch_layout.nb_channels) {
|
||||
_pcmBuffer.resize(_frame->nb_samples * _frame->ch_layout.nb_channels);
|
||||
}
|
||||
#else
|
||||
if (_pcmBuffer.size() < _frame->nb_samples * _frame->channels) {
|
||||
_pcmBuffer.resize(_frame->nb_samples * _frame->channels);
|
||||
}
|
||||
#endif
|
||||
|
||||
switch (_frame->format) {
|
||||
case AV_SAMPLE_FMT_S16: {
|
||||
#if LIBAVFORMAT_VERSION_MAJOR >= 59
|
||||
memcpy(_pcmBuffer.data(), _frame->data[0], _frame->nb_samples * 2 * _frame->ch_layout.nb_channels);
|
||||
#else
|
||||
memcpy(_pcmBuffer.data(), _frame->data[0], _frame->nb_samples * 2 * _frame->channels);
|
||||
#endif
|
||||
} break;
|
||||
|
||||
case AV_SAMPLE_FMT_S16P: {
|
||||
int16_t *to = _pcmBuffer.data();
|
||||
for (int sample = 0; sample < _frame->nb_samples; ++sample) {
|
||||
#if LIBAVFORMAT_VERSION_MAJOR >= 59
|
||||
for (int channel = 0; channel < _frame->ch_layout.nb_channels; ++channel) {
|
||||
#else
|
||||
for (int channel = 0; channel < _frame->channels; ++channel) {
|
||||
#endif
|
||||
int16_t *shortChannel = (int16_t*)_frame->data[channel];
|
||||
*to++ = shortChannel[sample];
|
||||
}
|
||||
}
|
||||
} break;
|
||||
|
||||
case AV_SAMPLE_FMT_FLT: {
|
||||
float *floatData = (float *)&_frame->data[0];
|
||||
#if LIBAVFORMAT_VERSION_MAJOR >= 59
|
||||
for (int i = 0; i < _frame->nb_samples * _frame->ch_layout.nb_channels; i++) {
|
||||
_pcmBuffer[i] = sampleFloatToInt16(floatData[i]);
|
||||
}
|
||||
#else
|
||||
for (int i = 0; i < _frame->nb_samples * _frame->channels; i++) {
|
||||
_pcmBuffer[i] = sampleFloatToInt16(floatData[i]);
|
||||
}
|
||||
#endif
|
||||
} break;
|
||||
|
||||
case AV_SAMPLE_FMT_FLTP: {
|
||||
int16_t *to = _pcmBuffer.data();
|
||||
for (int sample = 0; sample < _frame->nb_samples; ++sample) {
|
||||
#if LIBAVFORMAT_VERSION_MAJOR >= 59
|
||||
for (int channel = 0; channel < _frame->ch_layout.nb_channels; ++channel) {
|
||||
#else
|
||||
for (int channel = 0; channel < _frame->channels; ++channel) {
|
||||
#endif
|
||||
float *floatChannel = (float*)_frame->data[channel];
|
||||
*to++ = sampleFloatToInt16(floatChannel[sample]);
|
||||
}
|
||||
}
|
||||
} break;
|
||||
|
||||
default: {
|
||||
RTC_FATAL() << "Unexpected sample_fmt";
|
||||
} break;
|
||||
}
|
||||
|
||||
_pcmBufferSampleSize = _frame->nb_samples;
|
||||
_pcmBufferSampleOffset = 0;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,65 @@
|
|||
#ifndef TGCALLS_AUDIO_STREAMING_PART_INTERNAL_H
|
||||
#define TGCALLS_AUDIO_STREAMING_PART_INTERNAL_H
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <map>
|
||||
#include <stdint.h>
|
||||
|
||||
#include "AVIOContextImpl.h"
|
||||
#include "AudioStreamingPartPersistentDecoder.h"
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
class AudioStreamingPartInternal {
|
||||
public:
|
||||
struct ReadPcmResult {
|
||||
int numSamples = 0;
|
||||
int numChannels = 0;
|
||||
};
|
||||
|
||||
struct ChannelUpdate {
|
||||
int frameIndex = 0;
|
||||
int id = 0;
|
||||
uint32_t ssrc = 0;
|
||||
};
|
||||
|
||||
public:
|
||||
AudioStreamingPartInternal(std::vector<uint8_t> &&fileData, std::string const &container);
|
||||
~AudioStreamingPartInternal();
|
||||
|
||||
ReadPcmResult readPcm(AudioStreamingPartPersistentDecoder &persistentDecoder, std::vector<int16_t> &outPcm);
|
||||
int getDurationInMilliseconds() const;
|
||||
std::vector<ChannelUpdate> const &getChannelUpdates() const;
|
||||
std::map<std::string, int32_t> getEndpointMapping() const;
|
||||
|
||||
private:
|
||||
void fillPcmBuffer(AudioStreamingPartPersistentDecoder &persistentDecoder);
|
||||
|
||||
private:
|
||||
AVIOContextImpl _avIoContext;
|
||||
|
||||
AVFormatContext *_inputFormatContext = nullptr;
|
||||
AVPacket _packet;
|
||||
AVFrame *_frame = nullptr;
|
||||
AVCodecParameters *_audioCodecParameters = nullptr;
|
||||
|
||||
bool _didReadToEnd = false;
|
||||
|
||||
int _durationInMilliseconds = 0;
|
||||
int _streamId = -1;
|
||||
int _channelCount = 0;
|
||||
|
||||
std::vector<ChannelUpdate> _channelUpdates;
|
||||
std::map<std::string, int32_t> _endpointMapping;
|
||||
|
||||
std::vector<int16_t> _pcmBuffer;
|
||||
int _pcmBufferSampleOffset = 0;
|
||||
int _pcmBufferSampleSize = 0;
|
||||
int _readSampleCount = 0;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
@ -0,0 +1,136 @@
|
|||
#include "AudioStreamingPartPersistentDecoder.h"
|
||||
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/third_party/base64/base64.h"
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
WrappedCodecParameters::WrappedCodecParameters(AVCodecParameters const *codecParameters) {
|
||||
_value = avcodec_parameters_alloc();
|
||||
avcodec_parameters_copy(_value, codecParameters);
|
||||
}
|
||||
|
||||
WrappedCodecParameters::~WrappedCodecParameters() {
|
||||
avcodec_parameters_free(&_value);
|
||||
}
|
||||
|
||||
bool WrappedCodecParameters::isEqual(AVCodecParameters const *other) {
|
||||
if (_value->codec_id != other->codec_id) {
|
||||
return false;
|
||||
}
|
||||
if (_value->format != other->format) {
|
||||
return false;
|
||||
}
|
||||
#if LIBAVFORMAT_VERSION_MAJOR >= 59
|
||||
if (_value->ch_layout.nb_channels != other->ch_layout.nb_channels) {
|
||||
#else
|
||||
if (_value->channels != other->channels) {
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
class AudioStreamingPartPersistentDecoderState {
|
||||
public:
|
||||
AudioStreamingPartPersistentDecoderState(AVCodecParameters const *codecParameters, AVRational timeBase) :
|
||||
_codecParameters(codecParameters),
|
||||
_timeBase(timeBase) {
|
||||
const AVCodec *codec = avcodec_find_decoder(codecParameters->codec_id);
|
||||
if (codec) {
|
||||
_codecContext = avcodec_alloc_context3(codec);
|
||||
int ret = avcodec_parameters_to_context(_codecContext, codecParameters);
|
||||
if (ret < 0) {
|
||||
avcodec_free_context(&_codecContext);
|
||||
_codecContext = nullptr;
|
||||
} else {
|
||||
_codecContext->pkt_timebase = timeBase;
|
||||
|
||||
#if LIBAVFORMAT_VERSION_MAJOR >= 59
|
||||
_channelCount = _codecContext->ch_layout.nb_channels;
|
||||
#else
|
||||
_channelCount = _codecContext->channels;
|
||||
#endif
|
||||
|
||||
ret = avcodec_open2(_codecContext, codec, nullptr);
|
||||
if (ret < 0) {
|
||||
avcodec_free_context(&_codecContext);
|
||||
_codecContext = nullptr;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
~AudioStreamingPartPersistentDecoderState() {
|
||||
if (_codecContext) {
|
||||
avcodec_close(_codecContext);
|
||||
avcodec_free_context(&_codecContext);
|
||||
}
|
||||
}
|
||||
|
||||
int decode(AVPacket &packet, AVFrame *frame) {
|
||||
int ret = avcodec_send_packet(_codecContext, &packet);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bytesPerSample = av_get_bytes_per_sample(_codecContext->sample_fmt);
|
||||
if (bytesPerSample != 2 && bytesPerSample != 4) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
ret = avcodec_receive_frame(_codecContext, frame);
|
||||
return ret;
|
||||
}
|
||||
|
||||
public:
|
||||
WrappedCodecParameters _codecParameters;
|
||||
AVRational _timeBase;
|
||||
AVCodecContext *_codecContext = nullptr;
|
||||
int _channelCount = 0;
|
||||
};
|
||||
|
||||
AudioStreamingPartPersistentDecoder::AudioStreamingPartPersistentDecoder() {
|
||||
}
|
||||
|
||||
AudioStreamingPartPersistentDecoder::~AudioStreamingPartPersistentDecoder() {
|
||||
if (_state) {
|
||||
delete _state;
|
||||
_state = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
void AudioStreamingPartPersistentDecoder::maybeReset(AVCodecParameters const *codecParameters, AVRational timeBase) {
|
||||
if (_state) {
|
||||
bool isUpdated = false;
|
||||
if (!_state->_codecParameters.isEqual(codecParameters)) {
|
||||
isUpdated = true;
|
||||
}
|
||||
if (_state->_timeBase.num != timeBase.num || _state->_timeBase.den != timeBase.den) {
|
||||
isUpdated = true;
|
||||
}
|
||||
if (!isUpdated) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (_state) {
|
||||
delete _state;
|
||||
_state = nullptr;
|
||||
}
|
||||
|
||||
_state = new AudioStreamingPartPersistentDecoderState(codecParameters, timeBase);
|
||||
}
|
||||
|
||||
int AudioStreamingPartPersistentDecoder::decode(AVCodecParameters const *codecParameters, AVRational timeBase, AVPacket &packet, AVFrame *frame) {
|
||||
maybeReset(codecParameters, timeBase);
|
||||
|
||||
if (!_state) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
return _state->decode(packet, frame);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,51 @@
|
|||
#ifndef TGCALLS_AUDIO_STREAMING_PART_PERSISTENT_DECODER_H
|
||||
#define TGCALLS_AUDIO_STREAMING_PART_PERSISTENT_DECODER_H
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <map>
|
||||
#include <stdint.h>
|
||||
|
||||
// Fix build on Windows - this should appear before FFmpeg timestamp include.
|
||||
#define _USE_MATH_DEFINES
|
||||
#include <math.h>
|
||||
|
||||
extern "C" {
|
||||
#include <libavutil/timestamp.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavcodec/avcodec.h>
|
||||
}
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
class AudioStreamingPartPersistentDecoderState;
|
||||
|
||||
class WrappedCodecParameters {
|
||||
public:
|
||||
WrappedCodecParameters(AVCodecParameters const *codecParameters);
|
||||
~WrappedCodecParameters();
|
||||
|
||||
bool isEqual(AVCodecParameters const *other);
|
||||
|
||||
private:
|
||||
AVCodecParameters *_value = nullptr;
|
||||
};
|
||||
|
||||
class AudioStreamingPartPersistentDecoder {
|
||||
public:
|
||||
AudioStreamingPartPersistentDecoder();
|
||||
~AudioStreamingPartPersistentDecoder();
|
||||
|
||||
int decode(AVCodecParameters const *codecParameters, AVRational timeBase, AVPacket &packet, AVFrame *frame);
|
||||
|
||||
private:
|
||||
void maybeReset(AVCodecParameters const *codecParameters, AVRational timeBase);
|
||||
|
||||
private:
|
||||
AudioStreamingPartPersistentDecoderState *_state = nullptr;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
4523
TMessagesProj/jni/voip/tgcalls/group/GroupInstanceCustomImpl.cpp
Normal file
4523
TMessagesProj/jni/voip/tgcalls/group/GroupInstanceCustomImpl.cpp
Normal file
File diff suppressed because it is too large
Load diff
|
|
@ -0,0 +1,59 @@
|
|||
#ifndef TGCALLS_GROUP_INSTANCE_CUSTOM_IMPL_H
|
||||
#define TGCALLS_GROUP_INSTANCE_CUSTOM_IMPL_H
|
||||
|
||||
#include <functional>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <memory>
|
||||
#include <map>
|
||||
|
||||
#include "../Instance.h"
|
||||
#include "GroupInstanceImpl.h"
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
class LogSinkImpl;
|
||||
class GroupInstanceCustomInternal;
|
||||
class Threads;
|
||||
|
||||
class GroupInstanceCustomImpl final : public GroupInstanceInterface {
|
||||
public:
|
||||
explicit GroupInstanceCustomImpl(GroupInstanceDescriptor &&descriptor);
|
||||
~GroupInstanceCustomImpl();
|
||||
|
||||
void stop(std::function<void()> completion);
|
||||
|
||||
void setConnectionMode(GroupConnectionMode connectionMode, bool keepBroadcastIfWasEnabled, bool isUnifiedBroadcast);
|
||||
|
||||
void emitJoinPayload(std::function<void(GroupJoinPayload const &)> completion);
|
||||
void setJoinResponsePayload(std::string const &payload);
|
||||
void removeSsrcs(std::vector<uint32_t> ssrcs);
|
||||
void removeIncomingVideoSource(uint32_t ssrc);
|
||||
|
||||
void setIsMuted(bool isMuted);
|
||||
void setIsNoiseSuppressionEnabled(bool isNoiseSuppressionEnabled);
|
||||
void setVideoCapture(std::shared_ptr<VideoCaptureInterface> videoCapture);
|
||||
void setVideoSource(std::function<webrtc::scoped_refptr<webrtc::VideoTrackSourceInterface>()> getVideoSource);
|
||||
void setAudioOutputDevice(std::string id);
|
||||
void setAudioInputDevice(std::string id);
|
||||
void addExternalAudioSamples(std::vector<uint8_t> &&samples);
|
||||
|
||||
void addOutgoingVideoOutput(std::weak_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink);
|
||||
void addIncomingVideoOutput(std::string const &endpointId, std::weak_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink);
|
||||
|
||||
void setVolume(uint32_t ssrc, double volume);
|
||||
void setRequestedVideoChannels(std::vector<VideoChannelDescription> &&requestedVideoChannels);
|
||||
|
||||
void getStats(std::function<void(GroupInstanceStats)> completion);
|
||||
void internal_addCustomNetworkEvent(bool isRemoteConnected);
|
||||
|
||||
private:
|
||||
std::shared_ptr<Threads> _threads;
|
||||
std::unique_ptr<ThreadLocalObject<GroupInstanceCustomInternal>> _internal;
|
||||
std::unique_ptr<LogSinkImpl> _logSink;
|
||||
|
||||
};
|
||||
|
||||
} // namespace tgcalls
|
||||
|
||||
#endif
|
||||
238
TMessagesProj/jni/voip/tgcalls/group/GroupInstanceImpl.h
Normal file
238
TMessagesProj/jni/voip/tgcalls/group/GroupInstanceImpl.h
Normal file
|
|
@ -0,0 +1,238 @@
|
|||
#ifndef TGCALLS_GROUP_INSTANCE_IMPL_H
|
||||
#define TGCALLS_GROUP_INSTANCE_IMPL_H
|
||||
|
||||
#include <functional>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <memory>
|
||||
#include <map>
|
||||
#include <optional>
|
||||
|
||||
#include "../Instance.h"
|
||||
|
||||
#include "../StaticThreads.h"
|
||||
#include "GroupJoinPayload.h"
|
||||
|
||||
namespace webrtc {
|
||||
class AudioDeviceModule;
|
||||
class TaskQueueFactory;
|
||||
class VideoTrackSourceInterface;
|
||||
}
|
||||
|
||||
namespace webrtc {
|
||||
template <class T>
|
||||
class scoped_refptr;
|
||||
}
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
class LogSinkImpl;
|
||||
class GroupInstanceManager;
|
||||
class WrappedAudioDeviceModule;
|
||||
struct AudioFrame;
|
||||
|
||||
struct GroupConfig {
|
||||
bool need_log{true};
|
||||
FilePath logPath;
|
||||
};
|
||||
|
||||
struct GroupLevelValue {
|
||||
float level = 0.;
|
||||
bool voice = false;
|
||||
bool isMuted = false;
|
||||
};
|
||||
|
||||
struct GroupLevelUpdate {
|
||||
uint32_t ssrc = 0;
|
||||
GroupLevelValue value;
|
||||
};
|
||||
|
||||
struct GroupLevelsUpdate {
|
||||
std::vector<GroupLevelUpdate> updates;
|
||||
};
|
||||
|
||||
struct GroupActivityUpdate {
|
||||
uint32_t ssrc = 0;
|
||||
};
|
||||
|
||||
struct GroupActivitiesUpdate {
|
||||
std::vector<GroupActivityUpdate> updates;
|
||||
};
|
||||
|
||||
|
||||
class BroadcastPartTask {
|
||||
public:
|
||||
virtual ~BroadcastPartTask() = default;
|
||||
|
||||
virtual void cancel() = 0;
|
||||
};
|
||||
|
||||
struct BroadcastPart {
|
||||
struct VideoParams {
|
||||
};
|
||||
|
||||
enum class Status {
|
||||
Success,
|
||||
NotReady,
|
||||
ResyncNeeded
|
||||
};
|
||||
|
||||
int64_t timestampMilliseconds = 0;
|
||||
double responseTimestamp = 0;
|
||||
Status status = Status::NotReady;
|
||||
std::vector<uint8_t> data;
|
||||
};
|
||||
|
||||
enum class GroupConnectionMode {
|
||||
GroupConnectionModeNone,
|
||||
GroupConnectionModeRtc,
|
||||
GroupConnectionModeBroadcast
|
||||
};
|
||||
|
||||
struct GroupNetworkState {
|
||||
bool isConnected = false;
|
||||
bool isTransitioningFromBroadcastToRtc = false;
|
||||
};
|
||||
|
||||
enum class VideoContentType {
|
||||
None,
|
||||
Screencast,
|
||||
Generic
|
||||
};
|
||||
|
||||
enum class VideoCodecName {
|
||||
VP8,
|
||||
VP9,
|
||||
H264
|
||||
};
|
||||
|
||||
class RequestMediaChannelDescriptionTask {
|
||||
public:
|
||||
virtual ~RequestMediaChannelDescriptionTask() = default;
|
||||
|
||||
virtual void cancel() = 0;
|
||||
};
|
||||
|
||||
struct MediaChannelDescription {
|
||||
enum class Type {
|
||||
Audio,
|
||||
Video
|
||||
};
|
||||
|
||||
Type type = Type::Audio;
|
||||
uint32_t audioSsrc = 0;
|
||||
int64_t userId = 0;
|
||||
std::string videoInformation;
|
||||
};
|
||||
|
||||
struct MediaSsrcGroup {
|
||||
std::string semantics;
|
||||
std::vector<uint32_t> ssrcs;
|
||||
};
|
||||
|
||||
struct VideoChannelDescription {
|
||||
enum class Quality {
|
||||
Thumbnail,
|
||||
Medium,
|
||||
Full
|
||||
};
|
||||
uint32_t audioSsrc = 0;
|
||||
int64_t userId = 0;
|
||||
std::string endpointId;
|
||||
std::vector<MediaSsrcGroup> ssrcGroups;
|
||||
Quality minQuality = Quality::Thumbnail;
|
||||
Quality maxQuality = Quality::Thumbnail;
|
||||
};
|
||||
|
||||
struct GroupInstanceStats {
|
||||
struct IncomingVideoStats {
|
||||
int receivingQuality = 0;
|
||||
int availableQuality = 0;
|
||||
};
|
||||
|
||||
std::vector<std::pair<std::string, IncomingVideoStats>> incomingVideoStats;
|
||||
};
|
||||
|
||||
struct GroupInstanceDescriptor {
|
||||
std::shared_ptr<Threads> threads;
|
||||
GroupConfig config;
|
||||
std::string statsLogPath;
|
||||
std::function<void(GroupNetworkState)> networkStateUpdated;
|
||||
std::function<void(int)> signalBarsUpdated;
|
||||
std::function<void(GroupLevelsUpdate const &)> audioLevelsUpdated;
|
||||
std::function<void(uint32_t, const AudioFrame &)> onAudioFrame;
|
||||
std::function<void(GroupActivitiesUpdate const &)> ssrcActivityUpdated;
|
||||
std::string initialInputDeviceId;
|
||||
std::string initialOutputDeviceId;
|
||||
bool useDummyChannel{true};
|
||||
bool disableIncomingChannels{false};
|
||||
std::function<webrtc::scoped_refptr<webrtc::AudioDeviceModule>(webrtc::TaskQueueFactory*)> createAudioDeviceModule;
|
||||
std::function<webrtc::scoped_refptr<WrappedAudioDeviceModule>(webrtc::TaskQueueFactory*)> createWrappedAudioDeviceModule;
|
||||
std::shared_ptr<VideoCaptureInterface> videoCapture; // deprecated
|
||||
std::function<webrtc::scoped_refptr<webrtc::VideoTrackSourceInterface>()> getVideoSource;
|
||||
std::function<std::shared_ptr<BroadcastPartTask>(std::function<void(int64_t)>)> requestCurrentTime;
|
||||
std::function<std::shared_ptr<BroadcastPartTask>(std::shared_ptr<PlatformContext>, int64_t, int64_t, std::function<void(BroadcastPart &&)>)> requestAudioBroadcastPart;
|
||||
std::function<std::shared_ptr<BroadcastPartTask>(std::shared_ptr<PlatformContext>, int64_t, int64_t, int32_t, VideoChannelDescription::Quality, std::function<void(BroadcastPart &&)>)> requestVideoBroadcastPart;
|
||||
int outgoingAudioBitrateKbit{32};
|
||||
bool disableOutgoingAudioProcessing{false};
|
||||
bool disableAudioInput{false};
|
||||
bool ios_enableSystemMute{false};
|
||||
VideoContentType videoContentType{VideoContentType::None};
|
||||
bool initialEnableNoiseSuppression{false};
|
||||
std::vector<VideoCodecName> videoCodecPreferences;
|
||||
std::function<std::shared_ptr<RequestMediaChannelDescriptionTask>(std::vector<uint32_t> const &, std::function<void(std::vector<MediaChannelDescription> &&)>)> requestMediaChannelDescriptions;
|
||||
int minOutgoingVideoBitrateKbit{100};
|
||||
std::function<void(bool)> onMutedSpeechActivityDetected;
|
||||
std::function<std::vector<uint8_t>(std::vector<uint8_t> const &, int64_t, bool, int32_t)> e2eEncryptDecrypt;
|
||||
bool isConference{false};
|
||||
|
||||
std::shared_ptr<PlatformContext> platformContext;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
class ThreadLocalObject;
|
||||
|
||||
class GroupInstanceInterface {
|
||||
protected:
|
||||
GroupInstanceInterface() = default;
|
||||
|
||||
public:
|
||||
virtual ~GroupInstanceInterface() = default;
|
||||
|
||||
virtual void stop(std::function<void()> completion) = 0;
|
||||
|
||||
virtual void setConnectionMode(GroupConnectionMode connectionMode, bool keepBroadcastIfWasEnabled, bool isUnifiedBroadcast) = 0;
|
||||
|
||||
virtual void emitJoinPayload(std::function<void(GroupJoinPayload const &)> completion) = 0;
|
||||
virtual void setJoinResponsePayload(std::string const &payload) = 0;
|
||||
virtual void removeSsrcs(std::vector<uint32_t> ssrcs) = 0;
|
||||
virtual void removeIncomingVideoSource(uint32_t ssrc) = 0;
|
||||
|
||||
virtual void setIsMuted(bool isMuted) = 0;
|
||||
virtual void setIsNoiseSuppressionEnabled(bool isNoiseSuppressionEnabled) = 0;
|
||||
virtual void setVideoCapture(std::shared_ptr<VideoCaptureInterface> videoCapture) = 0;
|
||||
virtual void setVideoSource(std::function<webrtc::scoped_refptr<webrtc::VideoTrackSourceInterface>()> getVideoSource) = 0;
|
||||
virtual void setAudioOutputDevice(std::string id) = 0;
|
||||
virtual void setAudioInputDevice(std::string id) = 0;
|
||||
virtual void addExternalAudioSamples(std::vector<uint8_t> &&samples) = 0;
|
||||
|
||||
virtual void addOutgoingVideoOutput(std::weak_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink) = 0;
|
||||
virtual void addIncomingVideoOutput(std::string const &endpointId, std::weak_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink) = 0;
|
||||
|
||||
virtual void setVolume(uint32_t ssrc, double volume) = 0;
|
||||
virtual void setRequestedVideoChannels(std::vector<VideoChannelDescription> &&requestedVideoChannels) = 0;
|
||||
|
||||
virtual void getStats(std::function<void(GroupInstanceStats)> completion) = 0;
|
||||
virtual void internal_addCustomNetworkEvent(bool isRemoteConnected) = 0;
|
||||
|
||||
struct AudioDevice {
|
||||
enum class Type {Input, Output};
|
||||
std::string name;
|
||||
std::string guid;
|
||||
};
|
||||
static std::vector<GroupInstanceInterface::AudioDevice> getAudioDevices(AudioDevice::Type type);
|
||||
};
|
||||
|
||||
} // namespace tgcalls
|
||||
|
||||
#endif
|
||||
78
TMessagesProj/jni/voip/tgcalls/group/GroupJoinPayload.h
Normal file
78
TMessagesProj/jni/voip/tgcalls/group/GroupJoinPayload.h
Normal file
|
|
@ -0,0 +1,78 @@
|
|||
#ifndef TGCALLS_GROUP_JOIN_PAYLOAD_H
|
||||
#define TGCALLS_GROUP_JOIN_PAYLOAD_H
|
||||
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <stdint.h>
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
struct GroupJoinPayloadVideoSourceGroup {
|
||||
std::vector<uint32_t> ssrcs;
|
||||
std::string semantics;
|
||||
};
|
||||
|
||||
struct GroupJoinPayloadVideoPayloadType {
|
||||
struct FeedbackType {
|
||||
std::string type;
|
||||
std::string subtype;
|
||||
};
|
||||
|
||||
uint32_t id = 0;
|
||||
std::string name;
|
||||
uint32_t clockrate = 0;
|
||||
uint32_t channels = 0;
|
||||
std::vector<FeedbackType> feedbackTypes;
|
||||
std::vector<std::pair<std::string, std::string>> parameters;
|
||||
};
|
||||
|
||||
struct GroupJoinTransportDescription {
|
||||
struct Fingerprint {
|
||||
std::string hash;
|
||||
std::string setup;
|
||||
std::string fingerprint;
|
||||
};
|
||||
|
||||
struct Candidate {
|
||||
std::string port;
|
||||
std::string protocol;
|
||||
std::string network;
|
||||
std::string generation;
|
||||
std::string id;
|
||||
std::string component;
|
||||
std::string foundation;
|
||||
std::string priority;
|
||||
std::string ip;
|
||||
std::string type;
|
||||
|
||||
std::string tcpType;
|
||||
std::string relAddr;
|
||||
std::string relPort;
|
||||
};
|
||||
|
||||
std::string ufrag;
|
||||
std::string pwd;
|
||||
std::vector<Fingerprint> fingerprints;
|
||||
std::vector<Candidate> candidates;
|
||||
};
|
||||
|
||||
struct GroupJoinVideoInformation {
|
||||
uint32_t serverVideoBandwidthProbingSsrc = 0;
|
||||
std::string endpointId;
|
||||
std::vector<GroupJoinPayloadVideoPayloadType> payloadTypes;
|
||||
std::vector<std::pair<uint32_t, std::string>> extensionMap;
|
||||
};
|
||||
|
||||
struct GroupParticipantVideoInformation {
|
||||
std::string endpointId;
|
||||
std::vector<GroupJoinPayloadVideoSourceGroup> ssrcGroups;
|
||||
};
|
||||
|
||||
struct GroupJoinPayload {
|
||||
uint32_t audioSsrc = 0;
|
||||
std::string json;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
@ -0,0 +1,373 @@
|
|||
#include "GroupJoinPayloadInternal.h"
|
||||
|
||||
#include "third-party/json11.hpp"
|
||||
#include <sstream>
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
namespace {
|
||||
|
||||
absl::optional<int32_t> parseInt(json11::Json::object const &object, std::string const &key) {
|
||||
const auto value = object.find(key);
|
||||
if (value == object.end() || !value->second.is_number()) {
|
||||
return absl::nullopt;
|
||||
}
|
||||
return value->second.int_value();
|
||||
}
|
||||
|
||||
absl::optional<std::string> parseString(json11::Json::object const &object, std::string const &key) {
|
||||
const auto value = object.find(key);
|
||||
if (value == object.end() || !value->second.is_string()) {
|
||||
return absl::nullopt;
|
||||
}
|
||||
return value->second.string_value();
|
||||
}
|
||||
|
||||
template <typename Out>
|
||||
void splitString(const std::string &s, char delim, Out result) {
|
||||
std::istringstream iss(s);
|
||||
std::string item;
|
||||
while (std::getline(iss, item, delim)) {
|
||||
*result++ = item;
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<std::string> splitString(const std::string &s, char delim) {
|
||||
std::vector<std::string> elems;
|
||||
splitString(s, delim, std::back_inserter(elems));
|
||||
return elems;
|
||||
}
|
||||
|
||||
absl::optional<GroupJoinTransportDescription> parseTransportDescription(json11::Json::object const &object) {
|
||||
GroupJoinTransportDescription result;
|
||||
|
||||
if (const auto pwd = parseString(object, "pwd")) {
|
||||
result.pwd = pwd.value();
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
if (const auto ufrag = parseString(object, "ufrag")) {
|
||||
result.ufrag = ufrag.value();
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
const auto fingerprints = object.find("fingerprints");
|
||||
if (fingerprints == object.end() || !fingerprints->second.is_array()) {
|
||||
return absl::nullopt;
|
||||
}
|
||||
for (const auto &fingerprint : fingerprints->second.array_items()) {
|
||||
if (!fingerprint.is_object()) {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
GroupJoinTransportDescription::Fingerprint parsedFingerprint;
|
||||
|
||||
if (const auto hash = parseString(fingerprint.object_items(), "hash")) {
|
||||
parsedFingerprint.hash = hash.value();
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
if (const auto fingerprintValue = parseString(fingerprint.object_items(), "fingerprint")) {
|
||||
parsedFingerprint.fingerprint = fingerprintValue.value();
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
if (const auto setup = parseString(fingerprint.object_items(), "setup")) {
|
||||
parsedFingerprint.setup = setup.value();
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
result.fingerprints.push_back(std::move(parsedFingerprint));
|
||||
}
|
||||
|
||||
const auto candidates = object.find("candidates");
|
||||
if (candidates == object.end() || !candidates->second.is_array()) {
|
||||
return absl::nullopt;
|
||||
}
|
||||
for (const auto &candidate : candidates->second.array_items()) {
|
||||
if (!candidate.is_object()) {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
GroupJoinTransportDescription::Candidate parsedCandidate;
|
||||
|
||||
if (const auto port = parseString(candidate.object_items(), "port")) {
|
||||
parsedCandidate.port = port.value();
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
if (const auto protocol = parseString(candidate.object_items(), "protocol")) {
|
||||
parsedCandidate.protocol = protocol.value();
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
if (const auto network = parseString(candidate.object_items(), "network")) {
|
||||
parsedCandidate.network = network.value();
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
if (const auto generation = parseString(candidate.object_items(), "generation")) {
|
||||
parsedCandidate.generation = generation.value();
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
if (const auto id = parseString(candidate.object_items(), "id")) {
|
||||
parsedCandidate.id = id.value();
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
if (const auto component = parseString(candidate.object_items(), "component")) {
|
||||
parsedCandidate.component = component.value();
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
if (const auto foundation = parseString(candidate.object_items(), "foundation")) {
|
||||
parsedCandidate.foundation = foundation.value();
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
if (const auto priority = parseString(candidate.object_items(), "priority")) {
|
||||
parsedCandidate.priority = priority.value();
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
if (const auto ip = parseString(candidate.object_items(), "ip")) {
|
||||
parsedCandidate.ip = ip.value();
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
if (const auto type = parseString(candidate.object_items(), "type")) {
|
||||
parsedCandidate.type = type.value();
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
if (const auto tcpType = parseString(candidate.object_items(), "tcptype")) {
|
||||
parsedCandidate.tcpType = tcpType.value();
|
||||
}
|
||||
|
||||
if (const auto relAddr = parseString(candidate.object_items(), "rel-addr")) {
|
||||
parsedCandidate.relAddr = relAddr.value();
|
||||
}
|
||||
|
||||
if (const auto relPort = parseString(candidate.object_items(), "rel-port")) {
|
||||
parsedCandidate.relPort = relPort.value();
|
||||
}
|
||||
|
||||
result.candidates.push_back(std::move(parsedCandidate));
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
absl::optional<GroupJoinPayloadVideoPayloadType> parsePayloadType(json11::Json::object const &object) {
|
||||
GroupJoinPayloadVideoPayloadType result;
|
||||
|
||||
if (const auto id = parseInt(object, "id")) {
|
||||
result.id = (uint32_t)id.value();
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
if (const auto name = parseString(object, "name")) {
|
||||
result.name = name.value();
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
if (const auto clockrate = parseInt(object, "clockrate")) {
|
||||
result.clockrate = (uint32_t)clockrate.value();
|
||||
} else {
|
||||
result.clockrate = 0;
|
||||
}
|
||||
|
||||
if (const auto channels = parseInt(object, "channels")) {
|
||||
result.channels = (uint32_t)channels.value();
|
||||
} else {
|
||||
result.channels = 1;
|
||||
}
|
||||
|
||||
const auto parameters = object.find("parameters");
|
||||
if (parameters != object.end() && parameters->second.is_object()) {
|
||||
for (const auto ¶meter : parameters->second.object_items()) {
|
||||
if (parameter.second.is_string()) {
|
||||
result.parameters.push_back(std::make_pair(parameter.first, parameter.second.string_value()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const auto rtcpFbs = object.find("rtcp-fbs");
|
||||
if (rtcpFbs != object.end() && rtcpFbs->second.is_array()) {
|
||||
for (const auto &item : rtcpFbs->second.array_items()) {
|
||||
if (item.is_object()) {
|
||||
const auto type = item.object_items().find("type");
|
||||
if (type != item.object_items().end() && type->second.is_string()) {
|
||||
GroupJoinPayloadVideoPayloadType::FeedbackType parsedFeedbackType;
|
||||
|
||||
const auto typeString = type->second.string_value();
|
||||
|
||||
const auto subtype = item.object_items().find("subtype");
|
||||
if (subtype != item.object_items().end() && subtype->second.is_string()) {
|
||||
parsedFeedbackType.type = typeString;
|
||||
parsedFeedbackType.subtype = subtype->second.string_value();
|
||||
} else {
|
||||
auto components = splitString(typeString, ' ');
|
||||
if (components.size() == 1) {
|
||||
parsedFeedbackType.type = components[0];
|
||||
} else if (components.size() == 2) {
|
||||
parsedFeedbackType.type = components[0];
|
||||
parsedFeedbackType.subtype = components[1];
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
result.feedbackTypes.push_back(std::move(parsedFeedbackType));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
absl::optional<GroupJoinVideoInformation> parseVideoInformation(json11::Json::object const &object) {
|
||||
GroupJoinVideoInformation result;
|
||||
|
||||
const auto serverSources = object.find("server_sources");
|
||||
if (serverSources != object.end() && serverSources->second.is_array()) {
|
||||
for (const auto &item : serverSources->second.array_items()) {
|
||||
if (item.is_number()) {
|
||||
int32_t value = item.int_value();
|
||||
uint32_t unsignedValue = *(uint32_t *)&value;
|
||||
result.serverVideoBandwidthProbingSsrc = unsignedValue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const auto payloadTypes = object.find("payload-types");
|
||||
if (payloadTypes != object.end() && payloadTypes->second.is_array()) {
|
||||
for (const auto &payloadType : payloadTypes->second.array_items()) {
|
||||
if (payloadType.is_object()) {
|
||||
if (const auto parsedPayloadType = parsePayloadType(payloadType.object_items())) {
|
||||
result.payloadTypes.push_back(parsedPayloadType.value());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const auto rtpHdrexts = object.find("rtp-hdrexts");
|
||||
if (rtpHdrexts != object.end() && rtpHdrexts->second.is_array()) {
|
||||
for (const auto &rtpHdrext : rtpHdrexts->second.array_items()) {
|
||||
if (rtpHdrext.is_object()) {
|
||||
const auto id = rtpHdrext.object_items().find("id");
|
||||
if (id == rtpHdrext.object_items().end() || !id->second.is_number()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const auto uri = rtpHdrext.object_items().find("uri");
|
||||
if (uri == rtpHdrext.object_items().end() || !uri->second.is_string()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
result.extensionMap.push_back(std::make_pair(id->second.int_value(), uri->second.string_value()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const auto endpointId = object.find("endpoint");
|
||||
if (endpointId != object.end() && endpointId->second.is_string()) {
|
||||
result.endpointId = endpointId->second.string_value();
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
std::string GroupJoinInternalPayload::serialize() {
|
||||
json11::Json::object object;
|
||||
|
||||
int32_t signedSsrc = *(int32_t *)&audioSsrc;
|
||||
|
||||
object.insert(std::make_pair("ssrc", json11::Json(signedSsrc)));
|
||||
object.insert(std::make_pair("ufrag", json11::Json(transport.ufrag)));
|
||||
object.insert(std::make_pair("pwd", json11::Json(transport.pwd)));
|
||||
|
||||
json11::Json::array fingerprints;
|
||||
for (const auto &fingerprint : transport.fingerprints) {
|
||||
json11::Json::object fingerprintJson;
|
||||
|
||||
fingerprintJson.insert(std::make_pair("hash", json11::Json(fingerprint.hash)));
|
||||
fingerprintJson.insert(std::make_pair("fingerprint", json11::Json(fingerprint.fingerprint)));
|
||||
fingerprintJson.insert(std::make_pair("setup", json11::Json(fingerprint.setup)));
|
||||
|
||||
fingerprints.push_back(json11::Json(std::move(fingerprintJson)));
|
||||
}
|
||||
object.insert(std::make_pair("fingerprints", json11::Json(std::move(fingerprints))));
|
||||
|
||||
if (videoInformation) {
|
||||
json11::Json::array ssrcGroups;
|
||||
for (const auto &ssrcGroup : videoInformation->ssrcGroups) {
|
||||
json11::Json::object ssrcGroupJson;
|
||||
|
||||
json11::Json::array ssrcGroupSources;
|
||||
for (auto ssrc : ssrcGroup.ssrcs) {
|
||||
int32_t signedValue = *(int32_t *)&ssrc;
|
||||
ssrcGroupSources.push_back(json11::Json(signedValue));
|
||||
}
|
||||
|
||||
ssrcGroupJson.insert(std::make_pair("sources", json11::Json(std::move(ssrcGroupSources))));
|
||||
ssrcGroupJson.insert(std::make_pair("semantics", json11::Json(ssrcGroup.semantics)));
|
||||
|
||||
ssrcGroups.push_back(json11::Json(std::move(ssrcGroupJson)));
|
||||
}
|
||||
object.insert(std::make_pair("ssrc-groups", json11::Json(std::move(ssrcGroups))));
|
||||
}
|
||||
|
||||
auto json = json11::Json(std::move(object));
|
||||
return json.dump();
|
||||
}
|
||||
|
||||
absl::optional<GroupJoinResponsePayload> GroupJoinResponsePayload::parse(std::string const &data) {
|
||||
std::string parsingError;
|
||||
auto json = json11::Json::parse(std::string(data.begin(), data.end()), parsingError);
|
||||
if (json.type() != json11::Json::OBJECT) {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
tgcalls::GroupJoinResponsePayload result;
|
||||
|
||||
const auto transport = json.object_items().find("transport");
|
||||
if (transport == json.object_items().end() || !transport->second.is_object()) {
|
||||
return absl::nullopt;
|
||||
}
|
||||
if (const auto parsedTransport = parseTransportDescription(transport->second.object_items())) {
|
||||
result.transport = parsedTransport.value();
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
const auto video = json.object_items().find("video");
|
||||
if (video != json.object_items().end() && video->second.is_object()) {
|
||||
result.videoInformation = parseVideoInformation(video->second.object_items());
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,32 @@
|
|||
#ifndef TGCALLS_GROUP_JOIN_PAYLOAD_INTERNAL_H
|
||||
#define TGCALLS_GROUP_JOIN_PAYLOAD_INTERNAL_H
|
||||
|
||||
#include "GroupJoinPayload.h"
|
||||
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <stdint.h>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
struct GroupJoinResponsePayload {
|
||||
GroupJoinTransportDescription transport;
|
||||
absl::optional<GroupJoinVideoInformation> videoInformation;
|
||||
|
||||
static absl::optional<GroupJoinResponsePayload> parse(std::string const &data);
|
||||
};
|
||||
|
||||
struct GroupJoinInternalPayload {
|
||||
GroupJoinTransportDescription transport;
|
||||
|
||||
uint32_t audioSsrc = 0;
|
||||
absl::optional<GroupParticipantVideoInformation> videoInformation;
|
||||
|
||||
std::string serialize();
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
680
TMessagesProj/jni/voip/tgcalls/group/GroupNetworkManager.cpp
Normal file
680
TMessagesProj/jni/voip/tgcalls/group/GroupNetworkManager.cpp
Normal file
|
|
@ -0,0 +1,680 @@
|
|||
#include "group/GroupNetworkManager.h"
|
||||
|
||||
#include "p2p/base/basic_packet_socket_factory.h"
|
||||
#include "p2p/client/basic_port_allocator.h"
|
||||
#include "p2p/base/p2p_transport_channel.h"
|
||||
#include "p2p/base/basic_async_resolver_factory.h"
|
||||
#include "api/packet_socket_factory.h"
|
||||
#include "rtc_base/rtc_certificate_generator.h"
|
||||
#include "p2p/base/ice_credentials_iterator.h"
|
||||
#include "api/jsep_ice_candidate.h"
|
||||
#include "p2p/base/dtls_transport.h"
|
||||
#include "p2p/base/dtls_transport_factory.h"
|
||||
#include "pc/dtls_srtp_transport.h"
|
||||
#include "pc/dtls_transport.h"
|
||||
#include "modules/rtp_rtcp/source/rtp_util.h"
|
||||
#include "modules/rtp_rtcp/source/byte_io.h"
|
||||
#include "platform/PlatformInterface.h"
|
||||
#include "TurnCustomizerImpl.h"
|
||||
#include "SctpDataChannelProviderInterfaceImpl.h"
|
||||
#include "StaticThreads.h"
|
||||
#include "call/rtp_packet_sink_interface.h"
|
||||
#include "modules/rtp_rtcp/source/rtp_packet_received.h"
|
||||
#include "modules/rtp_rtcp/source/rtp_header_extensions.h"
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
enum {
|
||||
kRtcpExpectedVersion = 2,
|
||||
kRtcpMinHeaderLength = 4,
|
||||
kRtcpMinParseLength = 8,
|
||||
|
||||
kRtpExpectedVersion = 2,
|
||||
kRtpMinParseLength = 12
|
||||
};
|
||||
|
||||
static void updateHeaderWithVoiceActivity(rtc::CopyOnWriteBuffer *packet, const uint8_t* ptrRTPDataExtensionEnd, const uint8_t* ptr, bool voiceActivity, bool zeroAudioLevel) {
|
||||
while (ptrRTPDataExtensionEnd - ptr > 0) {
|
||||
// 0
|
||||
// 0 1 2 3 4 5 6 7
|
||||
// +-+-+-+-+-+-+-+-+
|
||||
// | ID | len |
|
||||
// +-+-+-+-+-+-+-+-+
|
||||
|
||||
// Note that 'len' is the header extension element length, which is the
|
||||
// number of bytes - 1.
|
||||
const int id = (*ptr & 0xf0) >> 4;
|
||||
const int len = (*ptr & 0x0f);
|
||||
ptr++;
|
||||
|
||||
if (id == 0) {
|
||||
// Padding byte, skip ignoring len.
|
||||
continue;
|
||||
}
|
||||
|
||||
if (id == 15) {
|
||||
RTC_LOG(LS_VERBOSE)
|
||||
<< "RTP extension header 15 encountered. Terminate parsing.";
|
||||
return;
|
||||
}
|
||||
|
||||
if (ptrRTPDataExtensionEnd - ptr < (len + 1)) {
|
||||
RTC_LOG(LS_WARNING) << "Incorrect one-byte extension len: " << (len + 1)
|
||||
<< ", bytes left in buffer: "
|
||||
<< (ptrRTPDataExtensionEnd - ptr);
|
||||
return;
|
||||
}
|
||||
|
||||
if (id == 1) { // kAudioLevelUri
|
||||
uint8_t audioLevel = ptr[0] & 0x7f;
|
||||
if (zeroAudioLevel) {
|
||||
if (audioLevel < 47) {
|
||||
audioLevel = 0;
|
||||
} else if (audioLevel < 107) {
|
||||
audioLevel = 106;
|
||||
} else {
|
||||
audioLevel = 127;
|
||||
}
|
||||
}
|
||||
bool parsedVoiceActivity = (ptr[0] & 0x80) != 0;
|
||||
|
||||
if (parsedVoiceActivity != voiceActivity) {
|
||||
ptrdiff_t byteOffset = ptr - packet->data();
|
||||
uint8_t *mutableBytes = packet->MutableData();
|
||||
uint8_t audioActivityBit = voiceActivity ? 0x80 : 0;
|
||||
mutableBytes[byteOffset] = audioLevel | audioActivityBit;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
ptr += (len + 1);
|
||||
}
|
||||
}
|
||||
|
||||
#if 0 // Currently unused.
|
||||
static void readHeaderVoiceActivity(const uint8_t* ptrRTPDataExtensionEnd, const uint8_t* ptr, bool &didRead, uint8_t &audioLevel, bool &voiceActivity) {
|
||||
while (ptrRTPDataExtensionEnd - ptr > 0) {
|
||||
// 0
|
||||
// 0 1 2 3 4 5 6 7
|
||||
// +-+-+-+-+-+-+-+-+
|
||||
// | ID | len |
|
||||
// +-+-+-+-+-+-+-+-+
|
||||
|
||||
// Note that 'len' is the header extension element length, which is the
|
||||
// number of bytes - 1.
|
||||
const int id = (*ptr & 0xf0) >> 4;
|
||||
const int len = (*ptr & 0x0f);
|
||||
ptr++;
|
||||
|
||||
if (id == 0) {
|
||||
// Padding byte, skip ignoring len.
|
||||
continue;
|
||||
}
|
||||
|
||||
if (id == 15) {
|
||||
RTC_LOG(LS_VERBOSE)
|
||||
<< "RTP extension header 15 encountered. Terminate parsing.";
|
||||
return;
|
||||
}
|
||||
|
||||
if (ptrRTPDataExtensionEnd - ptr < (len + 1)) {
|
||||
RTC_LOG(LS_WARNING) << "Incorrect one-byte extension len: " << (len + 1)
|
||||
<< ", bytes left in buffer: "
|
||||
<< (ptrRTPDataExtensionEnd - ptr);
|
||||
return;
|
||||
}
|
||||
|
||||
if (id == 1) { // kAudioLevelUri
|
||||
didRead = true;
|
||||
audioLevel = ptr[0] & 0x7f;
|
||||
voiceActivity = (ptr[0] & 0x80) != 0;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
ptr += (len + 1);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
static void maybeUpdateRtpVoiceActivity(rtc::CopyOnWriteBuffer *packet, bool voiceActivity, bool zeroAudioLevel) {
|
||||
const uint8_t *_ptrRTPDataBegin = packet->data();
|
||||
const uint8_t *_ptrRTPDataEnd = packet->data() + packet->size();
|
||||
|
||||
const ptrdiff_t length = _ptrRTPDataEnd - _ptrRTPDataBegin;
|
||||
if (length < kRtpMinParseLength) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Version
|
||||
const uint8_t V = _ptrRTPDataBegin[0] >> 6;
|
||||
// eXtension
|
||||
const bool X = ((_ptrRTPDataBegin[0] & 0x10) == 0) ? false : true;
|
||||
const uint8_t CC = _ptrRTPDataBegin[0] & 0x0f;
|
||||
|
||||
const uint8_t PT = _ptrRTPDataBegin[1] & 0x7f;
|
||||
|
||||
const uint8_t* ptr = &_ptrRTPDataBegin[4];
|
||||
|
||||
ptr += 4;
|
||||
|
||||
ptr += 4;
|
||||
|
||||
if (V != kRtpExpectedVersion) {
|
||||
return;
|
||||
}
|
||||
|
||||
const size_t CSRCocts = CC * 4;
|
||||
|
||||
if ((ptr + CSRCocts) > _ptrRTPDataEnd) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (PT != 111) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (uint8_t i = 0; i < CC; ++i) {
|
||||
ptr += 4;
|
||||
}
|
||||
|
||||
if (X) {
|
||||
/* RTP header extension, RFC 3550.
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| defined by profile | length |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| header extension |
|
||||
| .... |
|
||||
*/
|
||||
const ptrdiff_t remain = _ptrRTPDataEnd - ptr;
|
||||
if (remain < 4) {
|
||||
return;
|
||||
}
|
||||
|
||||
uint16_t definedByProfile = webrtc::ByteReader<uint16_t>::ReadBigEndian(ptr);
|
||||
ptr += 2;
|
||||
|
||||
// in 32 bit words
|
||||
size_t XLen = webrtc::ByteReader<uint16_t>::ReadBigEndian(ptr);
|
||||
ptr += 2;
|
||||
XLen *= 4; // in bytes
|
||||
|
||||
if (static_cast<size_t>(remain) < (4 + XLen)) {
|
||||
return;
|
||||
}
|
||||
static constexpr uint16_t kRtpOneByteHeaderExtensionId = 0xBEDE;
|
||||
if (definedByProfile == kRtpOneByteHeaderExtensionId) {
|
||||
const uint8_t* ptrRTPDataExtensionEnd = ptr + XLen;
|
||||
updateHeaderWithVoiceActivity(packet, ptrRTPDataExtensionEnd, ptr, voiceActivity, zeroAudioLevel);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#if 0 // Currently unused.
|
||||
static void maybeReadRtpVoiceActivity(rtc::CopyOnWriteBuffer *packet, bool &didRead, uint32_t &ssrc, uint8_t &audioLevel, bool &voiceActivity) {
|
||||
const uint8_t *_ptrRTPDataBegin = packet->data();
|
||||
const uint8_t *_ptrRTPDataEnd = packet->data() + packet->size();
|
||||
|
||||
const ptrdiff_t length = _ptrRTPDataEnd - _ptrRTPDataBegin;
|
||||
if (length < kRtpMinParseLength) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Version
|
||||
const uint8_t V = _ptrRTPDataBegin[0] >> 6;
|
||||
// eXtension
|
||||
const bool X = ((_ptrRTPDataBegin[0] & 0x10) == 0) ? false : true;
|
||||
const uint8_t CC = _ptrRTPDataBegin[0] & 0x0f;
|
||||
|
||||
const uint8_t PT = _ptrRTPDataBegin[1] & 0x7f;
|
||||
|
||||
const uint8_t* ptr = &_ptrRTPDataBegin[4];
|
||||
|
||||
ptr += 4;
|
||||
|
||||
ssrc = webrtc::ByteReader<uint32_t>::ReadBigEndian(ptr);
|
||||
ptr += 4;
|
||||
|
||||
if (V != kRtpExpectedVersion) {
|
||||
return;
|
||||
}
|
||||
|
||||
const size_t CSRCocts = CC * 4;
|
||||
|
||||
if ((ptr + CSRCocts) > _ptrRTPDataEnd) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (PT != 111) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (uint8_t i = 0; i < CC; ++i) {
|
||||
ptr += 4;
|
||||
}
|
||||
|
||||
if (X) {
|
||||
/* RTP header extension, RFC 3550.
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| defined by profile | length |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| header extension |
|
||||
| .... |
|
||||
*/
|
||||
const ptrdiff_t remain = _ptrRTPDataEnd - ptr;
|
||||
if (remain < 4) {
|
||||
return;
|
||||
}
|
||||
|
||||
uint16_t definedByProfile = webrtc::ByteReader<uint16_t>::ReadBigEndian(ptr);
|
||||
ptr += 2;
|
||||
|
||||
// in 32 bit words
|
||||
size_t XLen = webrtc::ByteReader<uint16_t>::ReadBigEndian(ptr);
|
||||
ptr += 2;
|
||||
XLen *= 4; // in bytes
|
||||
|
||||
if (static_cast<size_t>(remain) < (4 + XLen)) {
|
||||
return;
|
||||
}
|
||||
static constexpr uint16_t kRtpOneByteHeaderExtensionId = 0xBEDE;
|
||||
if (definedByProfile == kRtpOneByteHeaderExtensionId) {
|
||||
const uint8_t* ptrRTPDataExtensionEnd = ptr + XLen;
|
||||
readHeaderVoiceActivity(ptrRTPDataExtensionEnd, ptr, didRead, audioLevel, voiceActivity);
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
class WrappedDtlsSrtpTransport : public webrtc::DtlsSrtpTransport {
|
||||
public:
|
||||
bool _voiceActivity = false;
|
||||
|
||||
public:
|
||||
WrappedDtlsSrtpTransport(bool rtcp_mux_enabled, const webrtc::FieldTrialsView& fieldTrials, std::function<void(webrtc::RtpPacketReceived const &, bool)> &&processRtpPacket, bool zeroAudioLevel) :
|
||||
webrtc::DtlsSrtpTransport(rtcp_mux_enabled, fieldTrials),
|
||||
_processRtpPacket(std::move(processRtpPacket)),
|
||||
_zeroAudioLevel(zeroAudioLevel) {
|
||||
}
|
||||
|
||||
virtual ~WrappedDtlsSrtpTransport() {
|
||||
}
|
||||
|
||||
bool SendRtpPacket(rtc::CopyOnWriteBuffer *packet, const rtc::PacketOptions& options, int flags) override {
|
||||
maybeUpdateRtpVoiceActivity(packet, _voiceActivity, _zeroAudioLevel);
|
||||
return webrtc::DtlsSrtpTransport::SendRtpPacket(packet, options, flags);
|
||||
}
|
||||
|
||||
void ProcessRtpPacket(webrtc::RtpPacketReceived const &packet, bool isUnresolved) override {
|
||||
_processRtpPacket(packet, isUnresolved);
|
||||
}
|
||||
|
||||
private:
|
||||
std::function<void(webrtc::RtpPacketReceived const &, bool)> _processRtpPacket;
|
||||
bool _zeroAudioLevel;
|
||||
};
|
||||
|
||||
webrtc::CryptoOptions GroupNetworkManager::getDefaulCryptoOptions() {
|
||||
auto options = webrtc::CryptoOptions();
|
||||
options.srtp.enable_aes128_sha1_80_crypto_cipher = false;
|
||||
options.srtp.enable_gcm_crypto_suites = true;
|
||||
return options;
|
||||
}
|
||||
|
||||
GroupNetworkManager::GroupNetworkManager(
|
||||
const webrtc::FieldTrialsView &fieldTrials,
|
||||
std::function<void(const State &)> stateUpdated,
|
||||
std::function<void(uint32_t, int)> unknownSsrcPacketReceived,
|
||||
std::function<void(bool)> dataChannelStateUpdated,
|
||||
std::function<void(std::string const &)> dataChannelMessageReceived,
|
||||
std::function<void(uint32_t, uint8_t, bool)> audioActivityUpdated,
|
||||
bool zeroAudioLevel,
|
||||
std::function<void(uint32_t)> anyActivityUpdated,
|
||||
std::shared_ptr<Threads> threads) :
|
||||
_threads(std::move(threads)),
|
||||
_stateUpdated(std::move(stateUpdated)),
|
||||
_unknownSsrcPacketReceived(std::move(unknownSsrcPacketReceived)),
|
||||
_dataChannelStateUpdated(dataChannelStateUpdated),
|
||||
_dataChannelMessageReceived(dataChannelMessageReceived),
|
||||
_audioActivityUpdated(audioActivityUpdated),
|
||||
_zeroAudioLevel(zeroAudioLevel),
|
||||
_anyActivityUpdated(anyActivityUpdated) {
|
||||
assert(_threads->getNetworkThread()->IsCurrent());
|
||||
|
||||
_localIceParameters = PeerIceParameters(rtc::CreateRandomString(cricket::ICE_UFRAG_LENGTH), rtc::CreateRandomString(cricket::ICE_PWD_LENGTH), false);
|
||||
|
||||
_localCertificate = rtc::RTCCertificateGenerator::GenerateCertificate(rtc::KeyParams(rtc::KT_ECDSA), absl::nullopt);
|
||||
|
||||
_networkMonitorFactory = PlatformInterface::SharedInstance()->createNetworkMonitorFactory();
|
||||
|
||||
_socketFactory.reset(new rtc::BasicPacketSocketFactory(_threads->getNetworkThread()->socketserver()));
|
||||
_networkManager = std::make_unique<rtc::BasicNetworkManager>(_networkMonitorFactory.get(), _threads->getNetworkThread()->socketserver());
|
||||
_asyncResolverFactory = std::make_unique<webrtc::BasicAsyncDnsResolverFactory>();
|
||||
|
||||
_dtlsSrtpTransport = std::make_unique<WrappedDtlsSrtpTransport>(true, fieldTrials, [this](webrtc::RtpPacketReceived const &packet, bool isUnresolved) {
|
||||
this->RtpPacketReceived_n(packet, isUnresolved);
|
||||
}, _zeroAudioLevel);
|
||||
_dtlsSrtpTransport->SetDtlsTransports(nullptr, nullptr);
|
||||
_dtlsSrtpTransport->SetActiveResetSrtpParams(false);
|
||||
_dtlsSrtpTransport->SubscribeReadyToSend(this, [this](bool value) {
|
||||
this->DtlsReadyToSend(value);
|
||||
});
|
||||
|
||||
resetDtlsSrtpTransport();
|
||||
}
|
||||
|
||||
GroupNetworkManager::~GroupNetworkManager() {
|
||||
assert(_threads->getNetworkThread()->IsCurrent());
|
||||
|
||||
RTC_LOG(LS_INFO) << "GroupNetworkManager::~GroupNetworkManager()";
|
||||
|
||||
_dataChannelInterface.reset();
|
||||
_dtlsSrtpTransport.reset();
|
||||
_dtlsTransport.reset();
|
||||
_transportChannel.reset();
|
||||
_asyncResolverFactory.reset();
|
||||
_portAllocator.reset();
|
||||
_networkManager.reset();
|
||||
_socketFactory.reset();
|
||||
}
|
||||
|
||||
void GroupNetworkManager::resetDtlsSrtpTransport() {
|
||||
std::unique_ptr<cricket::BasicPortAllocator> portAllocator = std::make_unique<cricket::BasicPortAllocator>(_networkManager.get(), _socketFactory.get(), _turnCustomizer.get(), nullptr);
|
||||
|
||||
uint32_t flags = portAllocator->flags();
|
||||
|
||||
flags |=
|
||||
cricket::PORTALLOCATOR_ENABLE_IPV6 |
|
||||
cricket::PORTALLOCATOR_ENABLE_IPV6_ON_WIFI;
|
||||
|
||||
portAllocator->set_flags(flags);
|
||||
portAllocator->Initialize();
|
||||
|
||||
portAllocator->SetConfiguration({}, {}, 2, webrtc::NO_PRUNE, _turnCustomizer.get());
|
||||
|
||||
webrtc::IceTransportInit iceTransportInit;
|
||||
iceTransportInit.set_port_allocator(portAllocator.get());
|
||||
iceTransportInit.set_async_dns_resolver_factory(_asyncResolverFactory.get());
|
||||
|
||||
auto transportChannel = cricket::P2PTransportChannel::Create("transport", 0, std::move(iceTransportInit));
|
||||
|
||||
cricket::IceConfig iceConfig;
|
||||
iceConfig.continual_gathering_policy = cricket::GATHER_CONTINUALLY;
|
||||
iceConfig.prioritize_most_likely_candidate_pairs = true;
|
||||
iceConfig.regather_on_failed_networks_interval = 2000;
|
||||
transportChannel->SetIceConfig(iceConfig);
|
||||
|
||||
cricket::IceParameters localIceParameters(
|
||||
_localIceParameters.ufrag,
|
||||
_localIceParameters.pwd,
|
||||
false
|
||||
);
|
||||
|
||||
transportChannel->SetIceParameters(localIceParameters);
|
||||
const bool isOutgoing = false;
|
||||
transportChannel->SetIceRole(isOutgoing ? cricket::ICEROLE_CONTROLLING : cricket::ICEROLE_CONTROLLED);
|
||||
transportChannel->SetRemoteIceMode(cricket::ICEMODE_LITE);
|
||||
|
||||
transportChannel->SignalIceTransportStateChanged.connect(this, &GroupNetworkManager::transportStateChanged);
|
||||
transportChannel->SignalReadPacket.connect(this, &GroupNetworkManager::transportPacketReceived);
|
||||
|
||||
webrtc::CryptoOptions cryptoOptions = GroupNetworkManager::getDefaulCryptoOptions();
|
||||
|
||||
auto dtlsTransport = std::make_unique<cricket::DtlsTransport>(transportChannel.get(), cryptoOptions, nullptr);
|
||||
|
||||
dtlsTransport->SignalWritableState.connect(
|
||||
this, &GroupNetworkManager::OnTransportWritableState_n);
|
||||
dtlsTransport->SignalReceivingState.connect(
|
||||
this, &GroupNetworkManager::OnTransportReceivingState_n);
|
||||
|
||||
dtlsTransport->SetDtlsRole(rtc::SSLRole::SSL_SERVER);
|
||||
dtlsTransport->SetLocalCertificate(_localCertificate);
|
||||
|
||||
_dtlsSrtpTransport->SetDtlsTransports(dtlsTransport.get(), nullptr);
|
||||
|
||||
_dtlsTransport = std::move(dtlsTransport);
|
||||
_transportChannel = std::move(transportChannel);
|
||||
_portAllocator = std::move(portAllocator);
|
||||
}
|
||||
|
||||
void GroupNetworkManager::start() {
|
||||
_transportChannel->MaybeStartGathering();
|
||||
|
||||
restartDataChannel();
|
||||
}
|
||||
|
||||
void GroupNetworkManager::restartDataChannel() {
|
||||
_dataChannelStateUpdated(false);
|
||||
|
||||
const auto weak = std::weak_ptr<GroupNetworkManager>(shared_from_this());
|
||||
_dataChannelInterface.reset(new SctpDataChannelProviderInterfaceImpl(
|
||||
_dtlsTransport.get(),
|
||||
true,
|
||||
[weak, threads = _threads](bool state) {
|
||||
assert(threads->getNetworkThread()->IsCurrent());
|
||||
const auto strong = weak.lock();
|
||||
if (!strong) {
|
||||
return;
|
||||
}
|
||||
strong->_dataChannelStateUpdated(state);
|
||||
},
|
||||
[weak, threads = _threads]() {
|
||||
assert(threads->getNetworkThread()->IsCurrent());
|
||||
const auto strong = weak.lock();
|
||||
if (!strong) {
|
||||
return;
|
||||
}
|
||||
strong->restartDataChannel();
|
||||
},
|
||||
[weak, threads = _threads](std::string const &message) {
|
||||
assert(threads->getNetworkThread()->IsCurrent());
|
||||
const auto strong = weak.lock();
|
||||
if (!strong) {
|
||||
return;
|
||||
}
|
||||
strong->_dataChannelMessageReceived(message);
|
||||
},
|
||||
_threads
|
||||
));
|
||||
|
||||
_dataChannelInterface->updateIsConnected(_isConnected);
|
||||
}
|
||||
|
||||
void GroupNetworkManager::stop() {
|
||||
_transportChannel->SignalIceTransportStateChanged.disconnect(this);
|
||||
_transportChannel->SignalReadPacket.disconnect(this);
|
||||
|
||||
_dtlsTransport->SignalWritableState.disconnect(this);
|
||||
_dtlsTransport->SignalReceivingState.disconnect(this);
|
||||
|
||||
_dataChannelInterface.reset();
|
||||
|
||||
_localIceParameters = PeerIceParameters(rtc::CreateRandomString(cricket::ICE_UFRAG_LENGTH), rtc::CreateRandomString(cricket::ICE_PWD_LENGTH), false);
|
||||
|
||||
_localCertificate = rtc::RTCCertificateGenerator::GenerateCertificate(rtc::KeyParams(rtc::KT_ECDSA), absl::nullopt);
|
||||
|
||||
resetDtlsSrtpTransport();
|
||||
}
|
||||
|
||||
PeerIceParameters GroupNetworkManager::getLocalIceParameters() {
|
||||
return _localIceParameters;
|
||||
}
|
||||
|
||||
std::unique_ptr<rtc::SSLFingerprint> GroupNetworkManager::getLocalFingerprint() {
|
||||
auto certificate = _localCertificate;
|
||||
if (!certificate) {
|
||||
return nullptr;
|
||||
}
|
||||
return rtc::SSLFingerprint::CreateFromCertificate(*certificate);
|
||||
}
|
||||
|
||||
void GroupNetworkManager::setRemoteParams(PeerIceParameters const &remoteIceParameters, std::vector<cricket::Candidate> const &iceCandidates, rtc::SSLFingerprint *fingerprint) {
|
||||
_remoteIceParameters = remoteIceParameters;
|
||||
|
||||
cricket::IceParameters parameters(
|
||||
remoteIceParameters.ufrag,
|
||||
remoteIceParameters.pwd,
|
||||
true
|
||||
);
|
||||
|
||||
_transportChannel->SetRemoteIceParameters(parameters);
|
||||
|
||||
for (const auto &candidate : iceCandidates) {
|
||||
_transportChannel->AddRemoteCandidate(candidate);
|
||||
}
|
||||
|
||||
if (fingerprint) {
|
||||
_dtlsTransport->SetRemoteFingerprint(fingerprint->algorithm, fingerprint->digest.data(), fingerprint->digest.size());
|
||||
}
|
||||
}
|
||||
|
||||
void GroupNetworkManager::sendDataChannelMessage(std::string const &message) {
|
||||
if (_dataChannelInterface) {
|
||||
_dataChannelInterface->sendDataChannelMessage(message);
|
||||
}
|
||||
}
|
||||
|
||||
void GroupNetworkManager::setOutgoingVoiceActivity(bool isSpeech) {
|
||||
if (_dtlsSrtpTransport) {
|
||||
((WrappedDtlsSrtpTransport *)_dtlsSrtpTransport.get())->_voiceActivity = isSpeech;
|
||||
}
|
||||
}
|
||||
|
||||
webrtc::RtpTransport *GroupNetworkManager::getRtpTransport() {
|
||||
return _dtlsSrtpTransport.get();
|
||||
}
|
||||
|
||||
void GroupNetworkManager::checkConnectionTimeout() {
|
||||
const auto weak = std::weak_ptr<GroupNetworkManager>(shared_from_this());
|
||||
_threads->getNetworkThread()->PostDelayedTask([weak]() {
|
||||
auto strong = weak.lock();
|
||||
if (!strong) {
|
||||
return;
|
||||
}
|
||||
|
||||
int64_t currentTimestamp = rtc::TimeMillis();
|
||||
const int64_t maxTimeout = 20000;
|
||||
|
||||
if (strong->_lastNetworkActivityMs + maxTimeout < currentTimestamp) {
|
||||
GroupNetworkManager::State emitState;
|
||||
emitState.isReadyToSendData = false;
|
||||
emitState.isFailed = true;
|
||||
strong->_stateUpdated(emitState);
|
||||
}
|
||||
|
||||
strong->checkConnectionTimeout();
|
||||
}, webrtc::TimeDelta::Millis(1000));
|
||||
}
|
||||
|
||||
void GroupNetworkManager::candidateGathered(cricket::IceTransportInternal *transport, const cricket::Candidate &candidate) {
|
||||
assert(_threads->getNetworkThread()->IsCurrent());
|
||||
}
|
||||
|
||||
void GroupNetworkManager::candidateGatheringState(cricket::IceTransportInternal *transport) {
|
||||
assert(_threads->getNetworkThread()->IsCurrent());
|
||||
}
|
||||
|
||||
void GroupNetworkManager::OnTransportWritableState_n(rtc::PacketTransportInternal *transport) {
|
||||
assert(_threads->getNetworkThread()->IsCurrent());
|
||||
|
||||
UpdateAggregateStates_n();
|
||||
}
|
||||
void GroupNetworkManager::OnTransportReceivingState_n(rtc::PacketTransportInternal *transport) {
|
||||
assert(_threads->getNetworkThread()->IsCurrent());
|
||||
|
||||
UpdateAggregateStates_n();
|
||||
}
|
||||
|
||||
void GroupNetworkManager::DtlsReadyToSend(bool isReadyToSend) {
|
||||
UpdateAggregateStates_n();
|
||||
|
||||
if (isReadyToSend) {
|
||||
const auto weak = std::weak_ptr<GroupNetworkManager>(shared_from_this());
|
||||
_threads->getNetworkThread()->PostTask([weak]() {
|
||||
const auto strong = weak.lock();
|
||||
if (!strong) {
|
||||
return;
|
||||
}
|
||||
strong->UpdateAggregateStates_n();
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
void GroupNetworkManager::transportStateChanged(cricket::IceTransportInternal *transport) {
|
||||
UpdateAggregateStates_n();
|
||||
}
|
||||
|
||||
void GroupNetworkManager::transportReadyToSend(cricket::IceTransportInternal *transport) {
|
||||
assert(_threads->getNetworkThread()->IsCurrent());
|
||||
}
|
||||
|
||||
void GroupNetworkManager::transportPacketReceived(rtc::PacketTransportInternal *transport, const char *bytes, size_t size, const int64_t ×tamp, int unused) {
|
||||
assert(_threads->getNetworkThread()->IsCurrent());
|
||||
|
||||
_lastNetworkActivityMs = rtc::TimeMillis();
|
||||
}
|
||||
|
||||
void GroupNetworkManager::RtpPacketReceived_n(webrtc::RtpPacketReceived const &packet, bool isUnresolved) {
|
||||
if (packet.HasExtension(webrtc::kRtpExtensionAudioLevel)) {
|
||||
uint8_t audioLevel = 0;
|
||||
bool isSpeech = false;
|
||||
|
||||
if (packet.GetExtension<webrtc::AudioLevel>(&isSpeech, &audioLevel)) {
|
||||
if (_audioActivityUpdated) {
|
||||
_audioActivityUpdated(packet.Ssrc(), audioLevel, isSpeech);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (_anyActivityUpdated) {
|
||||
_anyActivityUpdated(packet.Ssrc());
|
||||
}
|
||||
|
||||
if (isUnresolved && _unknownSsrcPacketReceived) {
|
||||
uint32_t ssrc = packet.Ssrc();
|
||||
int payloadType = packet.PayloadType();
|
||||
|
||||
_unknownSsrcPacketReceived(ssrc, payloadType);
|
||||
}
|
||||
}
|
||||
|
||||
void GroupNetworkManager::UpdateAggregateStates_n() {
|
||||
assert(_threads->getNetworkThread()->IsCurrent());
|
||||
|
||||
auto state = _transportChannel->GetIceTransportState();
|
||||
bool isConnected = false;
|
||||
switch (state) {
|
||||
case webrtc::IceTransportState::kConnected:
|
||||
case webrtc::IceTransportState::kCompleted:
|
||||
isConnected = true;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (!_dtlsSrtpTransport->IsWritable(false)) {
|
||||
isConnected = false;
|
||||
}
|
||||
|
||||
if (_isConnected != isConnected) {
|
||||
_isConnected = isConnected;
|
||||
|
||||
GroupNetworkManager::State emitState;
|
||||
emitState.isReadyToSendData = isConnected;
|
||||
_stateUpdated(emitState);
|
||||
|
||||
if (_dataChannelInterface) {
|
||||
_dataChannelInterface->updateIsConnected(isConnected);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void GroupNetworkManager::sctpReadyToSendData() {
|
||||
}
|
||||
|
||||
} // namespace tgcalls
|
||||
133
TMessagesProj/jni/voip/tgcalls/group/GroupNetworkManager.h
Normal file
133
TMessagesProj/jni/voip/tgcalls/group/GroupNetworkManager.h
Normal file
|
|
@ -0,0 +1,133 @@
|
|||
#ifndef TGCALLS_GROUP_NETWORK_MANAGER_H
|
||||
#define TGCALLS_GROUP_NETWORK_MANAGER_H
|
||||
|
||||
#ifdef WEBRTC_WIN
|
||||
// Compiler errors in conflicting Windows headers if not included here.
|
||||
#include <winsock2.h>
|
||||
#endif // WEBRTC_WIN
|
||||
|
||||
#include "rtc_base/copy_on_write_buffer.h"
|
||||
#include "rtc_base/third_party/sigslot/sigslot.h"
|
||||
#include "api/candidate.h"
|
||||
#include "media/base/media_channel.h"
|
||||
#include "pc/sctp_transport.h"
|
||||
#include "pc/sctp_data_channel.h"
|
||||
#include "modules/rtp_rtcp/source/rtp_packet_received.h"
|
||||
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
|
||||
#include "Message.h"
|
||||
#include "ThreadLocalObject.h"
|
||||
|
||||
namespace rtc {
|
||||
class BasicPacketSocketFactory;
|
||||
class BasicNetworkManager;
|
||||
class PacketTransportInternal;
|
||||
struct NetworkRoute;
|
||||
} // namespace rtc
|
||||
|
||||
namespace cricket {
|
||||
class BasicPortAllocator;
|
||||
class P2PTransportChannel;
|
||||
class IceTransportInternal;
|
||||
class DtlsTransport;
|
||||
} // namespace cricket
|
||||
|
||||
namespace webrtc {
|
||||
class BasicAsyncResolverFactory;
|
||||
class TurnCustomizer;
|
||||
class DtlsSrtpTransport;
|
||||
class RtpTransport;
|
||||
} // namespace webrtc
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
struct Message;
|
||||
class SctpDataChannelProviderInterfaceImpl;
|
||||
class Threads;
|
||||
|
||||
class GroupNetworkManager : public sigslot::has_slots<>, public std::enable_shared_from_this<GroupNetworkManager> {
|
||||
public:
|
||||
struct State {
|
||||
bool isReadyToSendData = false;
|
||||
bool isFailed = false;
|
||||
};
|
||||
|
||||
static webrtc::CryptoOptions getDefaulCryptoOptions();
|
||||
|
||||
GroupNetworkManager(
|
||||
const webrtc::FieldTrialsView& fieldTrials,
|
||||
std::function<void(const State &)> stateUpdated,
|
||||
std::function<void(uint32_t, int)> unknownSsrcPacketReceived,
|
||||
std::function<void(bool)> dataChannelStateUpdated,
|
||||
std::function<void(std::string const &)> dataChannelMessageReceived,
|
||||
std::function<void(uint32_t, uint8_t, bool)> audioActivityUpdated,
|
||||
bool zeroAudioLevel,
|
||||
std::function<void(uint32_t)> anyActivityUpdated,
|
||||
std::shared_ptr<Threads> threads);
|
||||
~GroupNetworkManager();
|
||||
|
||||
void start();
|
||||
void stop();
|
||||
|
||||
PeerIceParameters getLocalIceParameters();
|
||||
std::unique_ptr<rtc::SSLFingerprint> getLocalFingerprint();
|
||||
void setRemoteParams(PeerIceParameters const &remoteIceParameters, std::vector<cricket::Candidate> const &iceCandidates, rtc::SSLFingerprint *fingerprint);
|
||||
|
||||
void sendDataChannelMessage(std::string const &message);
|
||||
|
||||
void setOutgoingVoiceActivity(bool isSpeech);
|
||||
|
||||
webrtc::RtpTransport *getRtpTransport();
|
||||
|
||||
private:
|
||||
void resetDtlsSrtpTransport();
|
||||
void restartDataChannel();
|
||||
void checkConnectionTimeout();
|
||||
void candidateGathered(cricket::IceTransportInternal *transport, const cricket::Candidate &candidate);
|
||||
void candidateGatheringState(cricket::IceTransportInternal *transport);
|
||||
void OnTransportWritableState_n(rtc::PacketTransportInternal *transport);
|
||||
void OnTransportReceivingState_n(rtc::PacketTransportInternal *transport);
|
||||
void transportStateChanged(cricket::IceTransportInternal *transport);
|
||||
void transportReadyToSend(cricket::IceTransportInternal *transport);
|
||||
void transportPacketReceived(rtc::PacketTransportInternal *transport, const char *bytes, size_t size, const int64_t ×tamp, int unused);
|
||||
void DtlsReadyToSend(bool DtlsReadyToSend);
|
||||
void UpdateAggregateStates_n();
|
||||
void RtpPacketReceived_n(webrtc::RtpPacketReceived const &packet, bool isUnresolved);
|
||||
void OnRtcpPacketReceived_n(rtc::CopyOnWriteBuffer *packet, int64_t packet_time_us);
|
||||
|
||||
void sctpReadyToSendData();
|
||||
|
||||
std::shared_ptr<Threads> _threads;
|
||||
std::function<void(const GroupNetworkManager::State &)> _stateUpdated;
|
||||
std::function<void(uint32_t, int)> _unknownSsrcPacketReceived;
|
||||
std::function<void(bool)> _dataChannelStateUpdated;
|
||||
std::function<void(std::string const &)> _dataChannelMessageReceived;
|
||||
std::function<void(uint32_t, uint8_t, bool)> _audioActivityUpdated;
|
||||
bool _zeroAudioLevel = false;
|
||||
std::function<void(uint32_t)> _anyActivityUpdated;
|
||||
|
||||
std::unique_ptr<rtc::NetworkMonitorFactory> _networkMonitorFactory;
|
||||
std::unique_ptr<rtc::BasicPacketSocketFactory> _socketFactory;
|
||||
std::unique_ptr<rtc::BasicNetworkManager> _networkManager;
|
||||
std::unique_ptr<webrtc::TurnCustomizer> _turnCustomizer;
|
||||
std::unique_ptr<cricket::BasicPortAllocator> _portAllocator;
|
||||
std::unique_ptr<webrtc::AsyncDnsResolverFactoryInterface> _asyncResolverFactory;
|
||||
std::unique_ptr<cricket::P2PTransportChannel> _transportChannel;
|
||||
std::unique_ptr<cricket::DtlsTransport> _dtlsTransport;
|
||||
std::unique_ptr<webrtc::DtlsSrtpTransport> _dtlsSrtpTransport;
|
||||
|
||||
std::unique_ptr<SctpDataChannelProviderInterfaceImpl> _dataChannelInterface;
|
||||
|
||||
webrtc::scoped_refptr<rtc::RTCCertificate> _localCertificate;
|
||||
PeerIceParameters _localIceParameters;
|
||||
absl::optional<PeerIceParameters> _remoteIceParameters;
|
||||
|
||||
bool _isConnected = false;
|
||||
int64_t _lastNetworkActivityMs = 0;
|
||||
};
|
||||
|
||||
} // namespace tgcalls
|
||||
|
||||
#endif
|
||||
1109
TMessagesProj/jni/voip/tgcalls/group/StreamingMediaContext.cpp
Normal file
1109
TMessagesProj/jni/voip/tgcalls/group/StreamingMediaContext.cpp
Normal file
File diff suppressed because it is too large
Load diff
54
TMessagesProj/jni/voip/tgcalls/group/StreamingMediaContext.h
Normal file
54
TMessagesProj/jni/voip/tgcalls/group/StreamingMediaContext.h
Normal file
|
|
@ -0,0 +1,54 @@
|
|||
#ifndef TGCALLS_STREAMING_MEDIA_CONTEXT_H
|
||||
#define TGCALLS_STREAMING_MEDIA_CONTEXT_H
|
||||
|
||||
#include "GroupInstanceImpl.h"
|
||||
#include <stdint.h>
|
||||
#include "../StaticThreads.h"
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
class StreamingMediaContextPrivate;
|
||||
|
||||
class StreamingMediaContext {
|
||||
public:
|
||||
struct VideoChannel {
|
||||
VideoChannelDescription::Quality quality = VideoChannelDescription::Quality::Thumbnail;
|
||||
std::string endpoint;
|
||||
|
||||
VideoChannel(VideoChannelDescription::Quality quality_, std::string endpoint_) :
|
||||
quality(quality_),
|
||||
endpoint(endpoint_) {
|
||||
}
|
||||
};
|
||||
|
||||
public:
|
||||
struct StreamingMediaContextArguments {
|
||||
std::shared_ptr<Threads> threads;
|
||||
bool isUnifiedBroadcast = false;
|
||||
std::function<std::shared_ptr<BroadcastPartTask>(std::function<void(int64_t)>)> requestCurrentTime;
|
||||
std::function<std::shared_ptr<BroadcastPartTask>(std::shared_ptr<PlatformContext>, int64_t, int64_t, std::function<void(BroadcastPart &&)>)> requestAudioBroadcastPart;
|
||||
std::function<std::shared_ptr<BroadcastPartTask>(std::shared_ptr<PlatformContext>, int64_t, int64_t, int32_t, VideoChannelDescription::Quality, std::function<void(BroadcastPart &&)>)> requestVideoBroadcastPart;
|
||||
std::function<void(uint32_t, float, bool)> updateAudioLevel;
|
||||
std::shared_ptr<PlatformContext> platformContext;
|
||||
};
|
||||
|
||||
public:
|
||||
StreamingMediaContext(StreamingMediaContextArguments &&arguments);
|
||||
~StreamingMediaContext();
|
||||
|
||||
StreamingMediaContext& operator=(const StreamingMediaContext&) = delete;
|
||||
StreamingMediaContext& operator=(StreamingMediaContext&&) = delete;
|
||||
|
||||
void setActiveVideoChannels(std::vector<VideoChannel> const &videoChannels);
|
||||
void setVolume(uint32_t ssrc, double volume);
|
||||
void addVideoSink(std::string const &endpointId, std::weak_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink);
|
||||
|
||||
void getAudio(int16_t *audio_samples, const size_t num_samples, const size_t num_channels, const uint32_t samples_per_sec);
|
||||
|
||||
private:
|
||||
std::shared_ptr<StreamingMediaContextPrivate> _private;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
923
TMessagesProj/jni/voip/tgcalls/group/VideoStreamingPart.cpp
Normal file
923
TMessagesProj/jni/voip/tgcalls/group/VideoStreamingPart.cpp
Normal file
|
|
@ -0,0 +1,923 @@
|
|||
#include "VideoStreamingPart.h"
|
||||
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/third_party/base64/base64.h"
|
||||
#include "api/video/i420_buffer.h"
|
||||
|
||||
#include "AVIOContextImpl.h"
|
||||
#include "platform/PlatformInterface.h"
|
||||
|
||||
#include <string>
|
||||
#include <set>
|
||||
#include <map>
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
namespace {
|
||||
|
||||
class MediaDataPacket {
|
||||
public:
|
||||
MediaDataPacket() : _packet(av_packet_alloc()) {
|
||||
}
|
||||
|
||||
MediaDataPacket(MediaDataPacket &&other) : _packet(other._packet) {
|
||||
other._packet = nullptr;
|
||||
}
|
||||
|
||||
~MediaDataPacket() {
|
||||
if (_packet) {
|
||||
av_packet_free(&_packet);
|
||||
}
|
||||
}
|
||||
|
||||
AVPacket *packet() {
|
||||
return _packet;
|
||||
}
|
||||
|
||||
private:
|
||||
AVPacket *_packet = nullptr;
|
||||
};
|
||||
|
||||
class DecodableFrame {
|
||||
public:
|
||||
DecodableFrame(MediaDataPacket packet, int64_t pts, int64_t dts):
|
||||
_packet(std::move(packet)),
|
||||
_pts(pts),
|
||||
_dts(dts) {
|
||||
}
|
||||
|
||||
~DecodableFrame() {
|
||||
}
|
||||
|
||||
MediaDataPacket &packet() {
|
||||
return _packet;
|
||||
}
|
||||
|
||||
int64_t pts() {
|
||||
return _pts;
|
||||
}
|
||||
|
||||
int64_t dts() {
|
||||
return _dts;
|
||||
}
|
||||
|
||||
private:
|
||||
MediaDataPacket _packet;
|
||||
int64_t _pts = 0;
|
||||
int64_t _dts = 0;
|
||||
};
|
||||
|
||||
class Frame {
|
||||
public:
|
||||
Frame() {
|
||||
_frame = av_frame_alloc();
|
||||
}
|
||||
|
||||
Frame(Frame &&other) {
|
||||
_frame = other._frame;
|
||||
other._frame = nullptr;
|
||||
}
|
||||
|
||||
~Frame() {
|
||||
if (_frame) {
|
||||
av_frame_free(&_frame);
|
||||
}
|
||||
}
|
||||
|
||||
AVFrame *frame() {
|
||||
return _frame;
|
||||
}
|
||||
|
||||
double pts(AVStream *stream, double &firstFramePts) {
|
||||
int64_t framePts = _frame->pts;
|
||||
double spf = av_q2d(stream->time_base);
|
||||
double value = ((double)framePts) * spf;
|
||||
|
||||
if (firstFramePts < 0.0) {
|
||||
firstFramePts = value;
|
||||
}
|
||||
|
||||
return value - firstFramePts;
|
||||
}
|
||||
|
||||
private:
|
||||
AVFrame *_frame = nullptr;
|
||||
};
|
||||
|
||||
struct VideoStreamEvent {
|
||||
int32_t offset = 0;
|
||||
std::string endpointId;
|
||||
int32_t rotation = 0;
|
||||
int32_t extra = 0;
|
||||
};
|
||||
|
||||
struct VideoStreamInfo {
|
||||
std::string container;
|
||||
int32_t activeMask = 0;
|
||||
std::vector<VideoStreamEvent> events;
|
||||
};
|
||||
|
||||
absl::optional<int32_t> readInt32(std::vector<uint8_t> const &data, int &offset) {
|
||||
if (offset + 4 > data.size()) {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
int32_t value = 0;
|
||||
memcpy(&value, data.data() + offset, 4);
|
||||
offset += 4;
|
||||
|
||||
return value;
|
||||
}
|
||||
|
||||
absl::optional<uint8_t> readBytesAsInt32(std::vector<uint8_t> const &data, int &offset, int count) {
|
||||
if (offset + count > data.size()) {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
if (count == 0) {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
if (count <= 4) {
|
||||
int32_t value = 0;
|
||||
memcpy(&value, data.data() + offset, count);
|
||||
offset += count;
|
||||
return value;
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
}
|
||||
|
||||
int32_t roundUp(int32_t numToRound, int32_t multiple) {
|
||||
if (multiple == 0) {
|
||||
return numToRound;
|
||||
}
|
||||
|
||||
int32_t remainder = numToRound % multiple;
|
||||
if (remainder == 0) {
|
||||
return numToRound;
|
||||
}
|
||||
|
||||
return numToRound + multiple - remainder;
|
||||
}
|
||||
|
||||
absl::optional<std::string> readSerializedString(std::vector<uint8_t> const &data, int &offset) {
|
||||
if (const auto tmp = readBytesAsInt32(data, offset, 1)) {
|
||||
int paddingBytes = 0;
|
||||
int length = 0;
|
||||
if (tmp.value() == 254) {
|
||||
if (const auto len = readBytesAsInt32(data, offset, 3)) {
|
||||
length = len.value();
|
||||
paddingBytes = roundUp(length, 4) - length;
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
}
|
||||
else {
|
||||
length = tmp.value();
|
||||
paddingBytes = roundUp(length + 1, 4) - (length + 1);
|
||||
}
|
||||
|
||||
if (offset + length > data.size()) {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
std::string result(data.data() + offset, data.data() + offset + length);
|
||||
|
||||
offset += length;
|
||||
offset += paddingBytes;
|
||||
|
||||
return result;
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
}
|
||||
|
||||
absl::optional<VideoStreamEvent> readVideoStreamEvent(std::vector<uint8_t> const &data, int &offset) {
|
||||
VideoStreamEvent event;
|
||||
|
||||
if (const auto offsetValue = readInt32(data, offset)) {
|
||||
event.offset = offsetValue.value();
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
if (const auto endpointId = readSerializedString(data, offset)) {
|
||||
event.endpointId = endpointId.value();
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
if (const auto rotation = readInt32(data, offset)) {
|
||||
event.rotation = rotation.value();
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
if (const auto extra = readInt32(data, offset)) {
|
||||
event.extra = extra.value();
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
return event;
|
||||
}
|
||||
|
||||
absl::optional<VideoStreamInfo> consumeVideoStreamInfo(std::vector<uint8_t> &data) {
|
||||
int offset = 0;
|
||||
if (const auto signature = readInt32(data, offset)) {
|
||||
if (signature.value() != 0xa12e810d) {
|
||||
return absl::nullopt;
|
||||
}
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
VideoStreamInfo info;
|
||||
|
||||
if (const auto container = readSerializedString(data, offset)) {
|
||||
info.container = container.value();
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
if (const auto activeMask = readInt32(data, offset)) {
|
||||
info.activeMask = activeMask.value();
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
if (const auto eventCount = readInt32(data, offset)) {
|
||||
if (eventCount > 0) {
|
||||
if (const auto event = readVideoStreamEvent(data, offset)) {
|
||||
info.events.push_back(event.value());
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
data.erase(data.begin(), data.begin() + offset);
|
||||
|
||||
return info;
|
||||
}
|
||||
|
||||
bool areCodecParametersEqual(AVCodecParameters const &lhs, AVCodecParameters const &rhs) {
|
||||
if (lhs.codec_id != rhs.codec_id) {
|
||||
return false;
|
||||
}
|
||||
if (lhs.extradata_size != rhs.extradata_size) {
|
||||
return false;
|
||||
}
|
||||
if (lhs.extradata_size != 0) {
|
||||
if (memcmp(lhs.extradata, rhs.extradata, lhs.extradata_size)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if (lhs.format != rhs.format) {
|
||||
return false;
|
||||
}
|
||||
if (lhs.profile != rhs.profile) {
|
||||
return false;
|
||||
}
|
||||
if (lhs.level != rhs.level) {
|
||||
return false;
|
||||
}
|
||||
if (lhs.width != rhs.width) {
|
||||
return false;
|
||||
}
|
||||
if (lhs.height != rhs.height) {
|
||||
return false;
|
||||
}
|
||||
if (lhs.sample_aspect_ratio.num != rhs.sample_aspect_ratio.num) {
|
||||
return false;
|
||||
}
|
||||
if (lhs.sample_aspect_ratio.den != rhs.sample_aspect_ratio.den) {
|
||||
return false;
|
||||
}
|
||||
if (lhs.field_order != rhs.field_order) {
|
||||
return false;
|
||||
}
|
||||
if (lhs.color_range != rhs.color_range) {
|
||||
return false;
|
||||
}
|
||||
if (lhs.color_primaries != rhs.color_primaries) {
|
||||
return false;
|
||||
}
|
||||
if (lhs.color_trc != rhs.color_trc) {
|
||||
return false;
|
||||
}
|
||||
if (lhs.color_space != rhs.color_space) {
|
||||
return false;
|
||||
}
|
||||
if (lhs.chroma_location != rhs.chroma_location) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
class VideoStreamingDecoderState {
|
||||
public:
|
||||
static std::unique_ptr<VideoStreamingDecoderState> create(
|
||||
AVCodecParameters const *codecParameters,
|
||||
AVRational pktTimebase
|
||||
) {
|
||||
AVCodec const *codec = nullptr;
|
||||
if (!codec) {
|
||||
codec = avcodec_find_decoder(codecParameters->codec_id);
|
||||
}
|
||||
if (!codec) {
|
||||
return nullptr;
|
||||
}
|
||||
AVCodecContext *codecContext = avcodec_alloc_context3(codec);
|
||||
int ret = avcodec_parameters_to_context(codecContext, codecParameters);
|
||||
if (ret < 0) {
|
||||
avcodec_free_context(&codecContext);
|
||||
return nullptr;
|
||||
} else {
|
||||
codecContext->pkt_timebase = pktTimebase;
|
||||
|
||||
PlatformInterface::SharedInstance()->setupVideoDecoding(codecContext);
|
||||
|
||||
ret = avcodec_open2(codecContext, codec, nullptr);
|
||||
if (ret < 0) {
|
||||
avcodec_free_context(&codecContext);
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
return std::make_unique<VideoStreamingDecoderState>(
|
||||
codecContext,
|
||||
codecParameters,
|
||||
pktTimebase
|
||||
);
|
||||
}
|
||||
|
||||
public:
|
||||
VideoStreamingDecoderState(
|
||||
AVCodecContext *codecContext,
|
||||
AVCodecParameters const *codecParameters,
|
||||
AVRational pktTimebase
|
||||
) {
|
||||
_codecContext = codecContext;
|
||||
_codecParameters = avcodec_parameters_alloc();
|
||||
avcodec_parameters_copy(_codecParameters, codecParameters);
|
||||
_pktTimebase = pktTimebase;
|
||||
}
|
||||
|
||||
~VideoStreamingDecoderState() {
|
||||
if (_codecContext) {
|
||||
avcodec_close(_codecContext);
|
||||
avcodec_free_context(&_codecContext);
|
||||
}
|
||||
if (_codecParameters) {
|
||||
avcodec_parameters_free(&_codecParameters);
|
||||
}
|
||||
}
|
||||
|
||||
bool supportsDecoding(
|
||||
AVCodecParameters const *codecParameters,
|
||||
AVRational pktTimebase
|
||||
) const {
|
||||
if (!areCodecParametersEqual(*_codecParameters, *codecParameters)) {
|
||||
return false;
|
||||
}
|
||||
if (_pktTimebase.num != pktTimebase.num) {
|
||||
return false;
|
||||
}
|
||||
if (_pktTimebase.den != pktTimebase.den) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
int sendFrame(std::shared_ptr<DecodableFrame> frame) {
|
||||
if (frame) {
|
||||
int status = avcodec_send_packet(_codecContext, frame->packet().packet());
|
||||
return status;
|
||||
} else {
|
||||
int status = avcodec_send_packet(_codecContext, nullptr);
|
||||
return status;
|
||||
}
|
||||
}
|
||||
|
||||
int receiveFrame(Frame &frame) {
|
||||
int status = avcodec_receive_frame(_codecContext, frame.frame());
|
||||
return status;
|
||||
}
|
||||
|
||||
void reset() {
|
||||
avcodec_flush_buffers(_codecContext);
|
||||
}
|
||||
|
||||
private:
|
||||
AVCodecContext *_codecContext = nullptr;
|
||||
AVCodecParameters *_codecParameters = nullptr;
|
||||
AVRational _pktTimebase;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
class VideoStreamingSharedStateInternal {
|
||||
public:
|
||||
VideoStreamingSharedStateInternal() {
|
||||
}
|
||||
|
||||
~VideoStreamingSharedStateInternal() {
|
||||
}
|
||||
|
||||
void updateDecoderState(
|
||||
AVCodecParameters const *codecParameters,
|
||||
AVRational pktTimebase
|
||||
) {
|
||||
if (_decoderState && _decoderState->supportsDecoding(codecParameters, pktTimebase)) {
|
||||
return;
|
||||
}
|
||||
|
||||
_decoderState.reset();
|
||||
_decoderState = VideoStreamingDecoderState::create(codecParameters, pktTimebase);
|
||||
}
|
||||
|
||||
int sendFrame(std::shared_ptr<DecodableFrame> frame) {
|
||||
if (!_decoderState) {
|
||||
return AVERROR(EIO);
|
||||
}
|
||||
return _decoderState->sendFrame(frame);
|
||||
}
|
||||
|
||||
int receiveFrame(Frame &frame) {
|
||||
if (!_decoderState) {
|
||||
return AVERROR(EIO);
|
||||
}
|
||||
return _decoderState->receiveFrame(frame);
|
||||
}
|
||||
|
||||
void reset() {
|
||||
if (!_decoderState) {
|
||||
return;
|
||||
}
|
||||
_decoderState->reset();
|
||||
}
|
||||
|
||||
private:
|
||||
std::unique_ptr<VideoStreamingDecoderState> _decoderState;
|
||||
};
|
||||
|
||||
VideoStreamingSharedState::VideoStreamingSharedState() {
|
||||
_impl = new VideoStreamingSharedStateInternal();
|
||||
}
|
||||
|
||||
VideoStreamingSharedState::~VideoStreamingSharedState() {
|
||||
delete _impl;
|
||||
}
|
||||
|
||||
class VideoStreamingPartInternal {
|
||||
public:
|
||||
VideoStreamingPartInternal(std::string endpointId, webrtc::VideoRotation rotation, std::vector<uint8_t> &&fileData, std::string const &container) :
|
||||
_endpointId(endpointId),
|
||||
_rotation(rotation) {
|
||||
_avIoContext = std::make_unique<AVIOContextImpl>(std::move(fileData));
|
||||
|
||||
int ret = 0;
|
||||
|
||||
#if LIBAVFORMAT_VERSION_MAJOR >= 59
|
||||
const
|
||||
#endif
|
||||
AVInputFormat *inputFormat = av_find_input_format(container.c_str());
|
||||
if (!inputFormat) {
|
||||
_didReadToEnd = true;
|
||||
return;
|
||||
}
|
||||
|
||||
_inputFormatContext = avformat_alloc_context();
|
||||
if (!_inputFormatContext) {
|
||||
_didReadToEnd = true;
|
||||
return;
|
||||
}
|
||||
|
||||
_inputFormatContext->pb = _avIoContext->getContext();
|
||||
|
||||
if ((ret = avformat_open_input(&_inputFormatContext, "", inputFormat, nullptr)) < 0) {
|
||||
_didReadToEnd = true;
|
||||
return;
|
||||
}
|
||||
|
||||
if ((ret = avformat_find_stream_info(_inputFormatContext, nullptr)) < 0) {
|
||||
_didReadToEnd = true;
|
||||
|
||||
avformat_close_input(&_inputFormatContext);
|
||||
_inputFormatContext = nullptr;
|
||||
return;
|
||||
}
|
||||
|
||||
AVCodecParameters *videoCodecParameters = nullptr;
|
||||
AVStream *videoStream = nullptr;
|
||||
for (int i = 0; i < _inputFormatContext->nb_streams; i++) {
|
||||
AVStream *inStream = _inputFormatContext->streams[i];
|
||||
|
||||
AVCodecParameters *inCodecpar = inStream->codecpar;
|
||||
if (inCodecpar->codec_type != AVMEDIA_TYPE_VIDEO) {
|
||||
continue;
|
||||
}
|
||||
videoCodecParameters = inCodecpar;
|
||||
videoStream = inStream;
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
if (videoCodecParameters && videoStream) {
|
||||
_videoCodecParameters = avcodec_parameters_alloc();
|
||||
avcodec_parameters_copy(_videoCodecParameters, videoCodecParameters);
|
||||
_videoStream = videoStream;
|
||||
|
||||
/*const AVCodec *codec = avcodec_find_decoder(videoCodecParameters->codec_id);
|
||||
if (codec) {
|
||||
_codecContext = avcodec_alloc_context3(codec);
|
||||
ret = avcodec_parameters_to_context(_codecContext, videoCodecParameters);
|
||||
if (ret < 0) {
|
||||
_didReadToEnd = true;
|
||||
|
||||
avcodec_free_context(&_codecContext);
|
||||
_codecContext = nullptr;
|
||||
} else {
|
||||
_codecContext->pkt_timebase = videoStream->time_base;
|
||||
|
||||
ret = avcodec_open2(_codecContext, codec, nullptr);
|
||||
if (ret < 0) {
|
||||
_didReadToEnd = true;
|
||||
|
||||
avcodec_free_context(&_codecContext);
|
||||
_codecContext = nullptr;
|
||||
} else {
|
||||
_videoStream = videoStream;
|
||||
}
|
||||
}
|
||||
}*/
|
||||
}
|
||||
}
|
||||
|
||||
~VideoStreamingPartInternal() {
|
||||
if (_videoCodecParameters) {
|
||||
avcodec_parameters_free(&_videoCodecParameters);
|
||||
}
|
||||
if (_inputFormatContext) {
|
||||
avformat_close_input(&_inputFormatContext);
|
||||
}
|
||||
}
|
||||
|
||||
std::string endpointId() {
|
||||
return _endpointId;
|
||||
}
|
||||
|
||||
absl::optional<MediaDataPacket> readPacket() {
|
||||
if (_didReadToEnd) {
|
||||
return absl::nullopt;
|
||||
}
|
||||
if (!_inputFormatContext) {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
MediaDataPacket packet;
|
||||
int result = av_read_frame(_inputFormatContext, packet.packet());
|
||||
if (result < 0) {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
return packet;
|
||||
}
|
||||
|
||||
std::shared_ptr<DecodableFrame> readNextDecodableFrame() {
|
||||
while (true) {
|
||||
absl::optional<MediaDataPacket> packet = readPacket();
|
||||
if (packet) {
|
||||
if (_videoStream && packet->packet()->stream_index == _videoStream->index) {
|
||||
return std::make_shared<DecodableFrame>(std::move(packet.value()), packet->packet()->pts, packet->packet()->dts);
|
||||
}
|
||||
} else {
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
absl::optional<VideoStreamingPartFrame> convertCurrentFrame() {
|
||||
auto platformFrameBuffer = PlatformInterface::SharedInstance()->createPlatformFrameFromData(_frame.frame());
|
||||
if (platformFrameBuffer) {
|
||||
auto videoFrame = webrtc::VideoFrame::Builder()
|
||||
.set_video_frame_buffer(platformFrameBuffer)
|
||||
.set_rotation(_rotation)
|
||||
.build();
|
||||
|
||||
return VideoStreamingPartFrame(_endpointId, videoFrame, _frame.pts(_videoStream, _firstFramePts), _frameIndex);
|
||||
} else {
|
||||
webrtc::scoped_refptr<webrtc::I420Buffer> i420Buffer = webrtc::I420Buffer::Copy(
|
||||
_frame.frame()->width,
|
||||
_frame.frame()->height,
|
||||
_frame.frame()->data[0],
|
||||
_frame.frame()->linesize[0],
|
||||
_frame.frame()->data[1],
|
||||
_frame.frame()->linesize[1],
|
||||
_frame.frame()->data[2],
|
||||
_frame.frame()->linesize[2]
|
||||
);
|
||||
if (i420Buffer) {
|
||||
auto videoFrame = webrtc::VideoFrame::Builder()
|
||||
.set_video_frame_buffer(i420Buffer)
|
||||
.set_rotation(_rotation)
|
||||
.build();
|
||||
|
||||
return VideoStreamingPartFrame(_endpointId, videoFrame, _frame.pts(_videoStream, _firstFramePts), _frameIndex);
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
absl::optional<VideoStreamingPartFrame> getNextFrame(VideoStreamingSharedState const *sharedState) {
|
||||
if (!_videoStream) {
|
||||
return {};
|
||||
}
|
||||
if (!_videoCodecParameters) {
|
||||
return {};
|
||||
}
|
||||
|
||||
sharedState->impl()->updateDecoderState(_videoCodecParameters, _videoStream->time_base);
|
||||
|
||||
while (true) {
|
||||
if (_didReadToEnd) {
|
||||
if (!_finalFrames.empty()) {
|
||||
auto frame = _finalFrames[0];
|
||||
_finalFrames.erase(_finalFrames.begin());
|
||||
return frame;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
const auto frame = readNextDecodableFrame();
|
||||
if (frame) {
|
||||
int sendStatus = sharedState->impl()->sendFrame(frame);
|
||||
if (sendStatus == 0) {
|
||||
int receiveStatus = sharedState->impl()->receiveFrame(_frame);
|
||||
if (receiveStatus == 0) {
|
||||
auto convertedFrame = convertCurrentFrame();
|
||||
if (convertedFrame) {
|
||||
_frameIndex++;
|
||||
return convertedFrame;
|
||||
}
|
||||
} else if (receiveStatus == AVERROR(EAGAIN)) {
|
||||
// more data needed
|
||||
} else {
|
||||
RTC_LOG(LS_ERROR) << "avcodec_receive_frame failed with result: " << receiveStatus;
|
||||
_didReadToEnd = true;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
RTC_LOG(LS_ERROR) << "avcodec_send_packet failed with result: " << sendStatus;
|
||||
_didReadToEnd = true;
|
||||
return {};
|
||||
}
|
||||
} else {
|
||||
_didReadToEnd = true;
|
||||
int sendStatus = sharedState->impl()->sendFrame(nullptr);
|
||||
if (sendStatus == 0) {
|
||||
while (true) {
|
||||
int receiveStatus = sharedState->impl()->receiveFrame(_frame);
|
||||
if (receiveStatus == 0) {
|
||||
auto convertedFrame = convertCurrentFrame();
|
||||
if (convertedFrame) {
|
||||
_frameIndex++;
|
||||
_finalFrames.push_back(convertedFrame.value());
|
||||
}
|
||||
} else {
|
||||
if (receiveStatus != AVERROR_EOF) {
|
||||
RTC_LOG(LS_ERROR) << "avcodec_receive_frame (drain) failed with result: " << receiveStatus;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
RTC_LOG(LS_ERROR) << "avcodec_send_packet (drain) failed with result: " << sendStatus;
|
||||
}
|
||||
sharedState->impl()->reset();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
private:
|
||||
std::string _endpointId;
|
||||
webrtc::VideoRotation _rotation = webrtc::VideoRotation::kVideoRotation_0;
|
||||
|
||||
std::unique_ptr<AVIOContextImpl> _avIoContext;
|
||||
|
||||
AVFormatContext *_inputFormatContext = nullptr;
|
||||
AVStream *_videoStream = nullptr;
|
||||
Frame _frame;
|
||||
|
||||
AVCodecParameters *_videoCodecParameters = nullptr;
|
||||
|
||||
std::vector<VideoStreamingPartFrame> _finalFrames;
|
||||
|
||||
int _frameIndex = 0;
|
||||
double _firstFramePts = -1.0;
|
||||
bool _didReadToEnd = false;
|
||||
};
|
||||
|
||||
class VideoStreamingPartState {
|
||||
public:
|
||||
VideoStreamingPartState(std::vector<uint8_t> &&data, VideoStreamingPart::ContentType contentType) {
|
||||
_videoStreamInfo = consumeVideoStreamInfo(data);
|
||||
if (!_videoStreamInfo) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < _videoStreamInfo->events.size(); i++) {
|
||||
if (_videoStreamInfo->events[i].offset < 0) {
|
||||
continue;
|
||||
}
|
||||
size_t endOffset = 0;
|
||||
if (i == _videoStreamInfo->events.size() - 1) {
|
||||
endOffset = data.size();
|
||||
} else {
|
||||
endOffset = _videoStreamInfo->events[i + 1].offset;
|
||||
}
|
||||
if (endOffset <= _videoStreamInfo->events[i].offset) {
|
||||
continue;
|
||||
}
|
||||
if (endOffset > data.size()) {
|
||||
continue;
|
||||
}
|
||||
std::vector<uint8_t> dataSlice(data.begin() + _videoStreamInfo->events[i].offset, data.begin() + endOffset);
|
||||
webrtc::VideoRotation rotation = webrtc::VideoRotation::kVideoRotation_0;
|
||||
switch (_videoStreamInfo->events[i].rotation) {
|
||||
case 0: {
|
||||
rotation = webrtc::VideoRotation::kVideoRotation_0;
|
||||
break;
|
||||
}
|
||||
case 90: {
|
||||
rotation = webrtc::VideoRotation::kVideoRotation_90;
|
||||
break;
|
||||
}
|
||||
case 180: {
|
||||
rotation = webrtc::VideoRotation::kVideoRotation_180;
|
||||
break;
|
||||
}
|
||||
case 270: {
|
||||
rotation = webrtc::VideoRotation::kVideoRotation_270;
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
switch (contentType) {
|
||||
case VideoStreamingPart::ContentType::Audio: {
|
||||
auto part = std::make_unique<AudioStreamingPart>(std::move(dataSlice), _videoStreamInfo->container, true);
|
||||
_parsedAudioParts.push_back(std::move(part));
|
||||
|
||||
break;
|
||||
}
|
||||
case VideoStreamingPart::ContentType::Video: {
|
||||
auto part = std::make_unique<VideoStreamingPartInternal>(_videoStreamInfo->events[i].endpointId, rotation, std::move(dataSlice), _videoStreamInfo->container);
|
||||
_parsedVideoParts.push_back(std::move(part));
|
||||
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
~VideoStreamingPartState() {
|
||||
}
|
||||
|
||||
absl::optional<VideoStreamingPartFrame> getFrameAtRelativeTimestamp(VideoStreamingSharedState const *sharedState, double timestamp) {
|
||||
while (true) {
|
||||
while (_availableFrames.size() >= 2) {
|
||||
if (timestamp >= _availableFrames[1].pts) {
|
||||
_availableFrames.erase(_availableFrames.begin());
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (_availableFrames.size() < 2) {
|
||||
if (!_parsedVideoParts.empty()) {
|
||||
auto result = _parsedVideoParts[0]->getNextFrame(sharedState);
|
||||
if (result) {
|
||||
_availableFrames.push_back(result.value());
|
||||
} else {
|
||||
_parsedVideoParts.erase(_parsedVideoParts.begin());
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (!_availableFrames.empty()) {
|
||||
for (size_t i = 1; i < _availableFrames.size(); i++) {
|
||||
if (timestamp < _availableFrames[i].pts) {
|
||||
return _availableFrames[i - 1];
|
||||
}
|
||||
}
|
||||
return _availableFrames[_availableFrames.size() - 1];
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
absl::optional<std::string> getActiveEndpointId() const {
|
||||
if (!_parsedVideoParts.empty()) {
|
||||
return _parsedVideoParts[0]->endpointId();
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
}
|
||||
|
||||
bool hasRemainingFrames() {
|
||||
return !_parsedVideoParts.empty() || getAudioRemainingMilliseconds() > 0;
|
||||
}
|
||||
|
||||
int getAudioRemainingMilliseconds() {
|
||||
while (!_parsedAudioParts.empty()) {
|
||||
auto firstPartResult = _parsedAudioParts[0]->getRemainingMilliseconds();
|
||||
if (firstPartResult <= 0) {
|
||||
_parsedAudioParts.erase(_parsedAudioParts.begin());
|
||||
} else {
|
||||
return firstPartResult;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
std::vector<AudioStreamingPart::StreamingPartChannel> getAudio10msPerChannel(AudioStreamingPartPersistentDecoder &persistentDecoder) {
|
||||
while (!_parsedAudioParts.empty()) {
|
||||
auto firstPartResult = _parsedAudioParts[0]->get10msPerChannel(persistentDecoder);
|
||||
if (firstPartResult.empty()) {
|
||||
_parsedAudioParts.erase(_parsedAudioParts.begin());
|
||||
} else {
|
||||
return firstPartResult;
|
||||
}
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
private:
|
||||
absl::optional<VideoStreamInfo> _videoStreamInfo;
|
||||
std::vector<std::unique_ptr<VideoStreamingPartInternal>> _parsedVideoParts;
|
||||
std::vector<VideoStreamingPartFrame> _availableFrames;
|
||||
|
||||
std::vector<std::unique_ptr<AudioStreamingPart>> _parsedAudioParts;
|
||||
};
|
||||
|
||||
VideoStreamingPart::VideoStreamingPart(std::vector<uint8_t> &&data, VideoStreamingPart::ContentType contentType) {
|
||||
if (!data.empty()) {
|
||||
_state = new VideoStreamingPartState(std::move(data), contentType);
|
||||
}
|
||||
}
|
||||
|
||||
VideoStreamingPart::~VideoStreamingPart() {
|
||||
if (_state) {
|
||||
delete _state;
|
||||
}
|
||||
}
|
||||
|
||||
absl::optional<VideoStreamingPartFrame> VideoStreamingPart::getFrameAtRelativeTimestamp(VideoStreamingSharedState const *sharedState, double timestamp) {
|
||||
return _state
|
||||
? _state->getFrameAtRelativeTimestamp(sharedState, timestamp)
|
||||
: absl::nullopt;
|
||||
}
|
||||
|
||||
absl::optional<std::string> VideoStreamingPart::getActiveEndpointId() const {
|
||||
return _state
|
||||
? _state->getActiveEndpointId()
|
||||
: absl::nullopt;
|
||||
}
|
||||
|
||||
bool VideoStreamingPart::hasRemainingFrames() const {
|
||||
return _state
|
||||
? _state->hasRemainingFrames()
|
||||
: false;
|
||||
}
|
||||
|
||||
int VideoStreamingPart::getAudioRemainingMilliseconds() {
|
||||
return _state
|
||||
? _state->getAudioRemainingMilliseconds()
|
||||
: 0;
|
||||
}
|
||||
std::vector<AudioStreamingPart::StreamingPartChannel> VideoStreamingPart::getAudio10msPerChannel(AudioStreamingPartPersistentDecoder &persistentDecoder) {
|
||||
return _state
|
||||
? _state->getAudio10msPerChannel(persistentDecoder)
|
||||
: std::vector<AudioStreamingPart::StreamingPartChannel>();
|
||||
}
|
||||
|
||||
}
|
||||
78
TMessagesProj/jni/voip/tgcalls/group/VideoStreamingPart.h
Normal file
78
TMessagesProj/jni/voip/tgcalls/group/VideoStreamingPart.h
Normal file
|
|
@ -0,0 +1,78 @@
|
|||
#ifndef TGCALLS_VIDEO_STREAMING_PART_H
|
||||
#define TGCALLS_VIDEO_STREAMING_PART_H
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include <vector>
|
||||
#include <stdint.h>
|
||||
|
||||
#include "api/video/video_frame.h"
|
||||
#include "absl/types/optional.h"
|
||||
|
||||
#include "AudioStreamingPart.h"
|
||||
#include "AudioStreamingPartInternal.h"
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
class VideoStreamingPartState;
|
||||
class VideoStreamingSharedStateInternal;
|
||||
|
||||
struct VideoStreamingPartFrame {
|
||||
std::string endpointId;
|
||||
webrtc::VideoFrame frame;
|
||||
double pts = 0;
|
||||
int index = 0;
|
||||
|
||||
VideoStreamingPartFrame(std::string endpointId_, webrtc::VideoFrame const &frame_, double pts_, int index_) :
|
||||
endpointId(endpointId_),
|
||||
frame(frame_),
|
||||
pts(pts_),
|
||||
index(index_) {
|
||||
}
|
||||
};
|
||||
|
||||
class VideoStreamingSharedState {
|
||||
public:
|
||||
VideoStreamingSharedState();
|
||||
~VideoStreamingSharedState();
|
||||
|
||||
VideoStreamingSharedStateInternal *impl() const {
|
||||
return _impl;
|
||||
}
|
||||
|
||||
private:
|
||||
VideoStreamingSharedStateInternal *_impl = nullptr;
|
||||
};
|
||||
|
||||
class VideoStreamingPart {
|
||||
public:
|
||||
enum class ContentType {
|
||||
Audio,
|
||||
Video
|
||||
};
|
||||
|
||||
public:
|
||||
explicit VideoStreamingPart(std::vector<uint8_t> &&data, VideoStreamingPart::ContentType contentType);
|
||||
~VideoStreamingPart();
|
||||
|
||||
VideoStreamingPart(const VideoStreamingPart&) = delete;
|
||||
VideoStreamingPart(VideoStreamingPart&& other) {
|
||||
_state = other._state;
|
||||
other._state = nullptr;
|
||||
}
|
||||
VideoStreamingPart& operator=(const VideoStreamingPart&) = delete;
|
||||
VideoStreamingPart& operator=(VideoStreamingPart&&) = delete;
|
||||
|
||||
absl::optional<VideoStreamingPartFrame> getFrameAtRelativeTimestamp(VideoStreamingSharedState const *sharedState, double timestamp);
|
||||
absl::optional<std::string> getActiveEndpointId() const;
|
||||
bool hasRemainingFrames() const;
|
||||
|
||||
int getAudioRemainingMilliseconds();
|
||||
std::vector<AudioStreamingPart::StreamingPartChannel> getAudio10msPerChannel(AudioStreamingPartPersistentDecoder &persistentDecoder);
|
||||
|
||||
private:
|
||||
VideoStreamingPartState *_state = nullptr;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
352
TMessagesProj/jni/voip/tgcalls/legacy/InstanceImplLegacy.cpp
Normal file
352
TMessagesProj/jni/voip/tgcalls/legacy/InstanceImplLegacy.cpp
Normal file
|
|
@ -0,0 +1,352 @@
|
|||
#include "InstanceImplLegacy.h"
|
||||
|
||||
#include <stdarg.h>
|
||||
|
||||
extern "C" {
|
||||
#include <openssl/sha.h>
|
||||
#include <openssl/aes.h>
|
||||
#ifndef OPENSSL_IS_BORINGSSL
|
||||
#include <openssl/modes.h>
|
||||
#endif
|
||||
#include <openssl/rand.h>
|
||||
}
|
||||
|
||||
void tgvoip_openssl_aes_ige_encrypt(uint8_t* in, uint8_t* out, size_t length, uint8_t* key, uint8_t* iv){
|
||||
AES_KEY akey;
|
||||
AES_set_encrypt_key(key, 32*8, &akey);
|
||||
AES_ige_encrypt(in, out, length, &akey, iv, AES_ENCRYPT);
|
||||
}
|
||||
|
||||
void tgvoip_openssl_aes_ige_decrypt(uint8_t* in, uint8_t* out, size_t length, uint8_t* key, uint8_t* iv){
|
||||
AES_KEY akey;
|
||||
AES_set_decrypt_key(key, 32*8, &akey);
|
||||
AES_ige_encrypt(in, out, length, &akey, iv, AES_DECRYPT);
|
||||
}
|
||||
|
||||
void tgvoip_openssl_rand_bytes(uint8_t* buffer, size_t len){
|
||||
RAND_bytes(buffer, len);
|
||||
}
|
||||
|
||||
void tgvoip_openssl_sha1(uint8_t* msg, size_t len, uint8_t* output){
|
||||
SHA1(msg, len, output);
|
||||
}
|
||||
|
||||
void tgvoip_openssl_sha256(uint8_t* msg, size_t len, uint8_t* output){
|
||||
SHA256(msg, len, output);
|
||||
}
|
||||
|
||||
void tgvoip_openssl_aes_ctr_encrypt(uint8_t* inout, size_t length, uint8_t* key, uint8_t* iv, uint8_t* ecount, uint32_t* num){
|
||||
AES_KEY akey;
|
||||
AES_set_encrypt_key(key, 32*8, &akey);
|
||||
#ifdef OPENSSL_IS_BORINGSSL
|
||||
AES_ctr128_encrypt(inout, inout, length, &akey, iv, ecount, num);
|
||||
#else
|
||||
CRYPTO_ctr128_encrypt(inout, inout, length, &akey, iv, ecount, num, (block128_f) AES_encrypt);
|
||||
#endif
|
||||
}
|
||||
|
||||
void tgvoip_openssl_aes_cbc_encrypt(uint8_t* in, uint8_t* out, size_t length, uint8_t* key, uint8_t* iv){
|
||||
AES_KEY akey;
|
||||
AES_set_encrypt_key(key, 256, &akey);
|
||||
AES_cbc_encrypt(in, out, length, &akey, iv, AES_ENCRYPT);
|
||||
}
|
||||
|
||||
void tgvoip_openssl_aes_cbc_decrypt(uint8_t* in, uint8_t* out, size_t length, uint8_t* key, uint8_t* iv){
|
||||
AES_KEY akey;
|
||||
AES_set_decrypt_key(key, 256, &akey);
|
||||
AES_cbc_encrypt(in, out, length, &akey, iv, AES_DECRYPT);
|
||||
}
|
||||
|
||||
tgvoip::CryptoFunctions tgvoip::VoIPController::crypto = {
|
||||
tgvoip_openssl_rand_bytes,
|
||||
tgvoip_openssl_sha1,
|
||||
tgvoip_openssl_sha256,
|
||||
tgvoip_openssl_aes_ige_encrypt,
|
||||
tgvoip_openssl_aes_ige_decrypt,
|
||||
tgvoip_openssl_aes_ctr_encrypt,
|
||||
tgvoip_openssl_aes_cbc_encrypt,
|
||||
tgvoip_openssl_aes_cbc_decrypt
|
||||
};
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
InstanceImplLegacy::InstanceImplLegacy(Descriptor &&descriptor) :
|
||||
onStateUpdated_(std::move(descriptor.stateUpdated)),
|
||||
onSignalBarsUpdated_(std::move(descriptor.signalBarsUpdated)) {
|
||||
controller_ = new tgvoip::VoIPController();
|
||||
controller_->implData = this;
|
||||
|
||||
controller_->SetPersistentState(descriptor.persistentState.value);
|
||||
|
||||
if (const auto proxy = descriptor.proxy.get()) {
|
||||
controller_->SetProxy(tgvoip::PROXY_SOCKS5, proxy->host, proxy->port, proxy->login, proxy->password);
|
||||
}
|
||||
|
||||
auto callbacks = tgvoip::VoIPController::Callbacks();
|
||||
callbacks.connectionStateChanged = &InstanceImplLegacy::ControllerStateCallback;
|
||||
callbacks.groupCallKeyReceived = nullptr;
|
||||
callbacks.groupCallKeySent = nullptr;
|
||||
callbacks.signalBarCountChanged = &InstanceImplLegacy::SignalBarsCallback;
|
||||
callbacks.upgradeToGroupCallRequested = nullptr;
|
||||
controller_->SetCallbacks(callbacks);
|
||||
|
||||
std::vector<tgvoip::Endpoint> mappedEndpoints;
|
||||
for (auto endpoint : descriptor.endpoints) {
|
||||
tgvoip::Endpoint::Type mappedType;
|
||||
switch (endpoint.type) {
|
||||
case EndpointType::UdpRelay:
|
||||
mappedType = tgvoip::Endpoint::Type::UDP_RELAY;
|
||||
break;
|
||||
case EndpointType::Lan:
|
||||
mappedType = tgvoip::Endpoint::Type::UDP_P2P_LAN;
|
||||
break;
|
||||
case EndpointType::Inet:
|
||||
mappedType = tgvoip::Endpoint::Type::UDP_P2P_INET;
|
||||
break;
|
||||
case EndpointType::TcpRelay:
|
||||
mappedType = tgvoip::Endpoint::Type::TCP_RELAY;
|
||||
break;
|
||||
default:
|
||||
mappedType = tgvoip::Endpoint::Type::UDP_RELAY;
|
||||
break;
|
||||
}
|
||||
|
||||
tgvoip::IPv4Address address(endpoint.host.ipv4);
|
||||
tgvoip::IPv6Address addressv6(endpoint.host.ipv6);
|
||||
|
||||
mappedEndpoints.emplace_back(endpoint.endpointId, endpoint.port, address, addressv6, mappedType, endpoint.peerTag);
|
||||
}
|
||||
|
||||
const auto mappedDataSaving = [&] {
|
||||
switch (descriptor.config.dataSaving) {
|
||||
case DataSaving::Mobile:
|
||||
return tgvoip::DATA_SAVING_MOBILE;
|
||||
case DataSaving::Always:
|
||||
return tgvoip::DATA_SAVING_ALWAYS;
|
||||
default:
|
||||
return tgvoip::DATA_SAVING_NEVER;
|
||||
}
|
||||
}();
|
||||
|
||||
tgvoip::VoIPController::Config mappedConfig(
|
||||
descriptor.config.initializationTimeout,
|
||||
descriptor.config.receiveTimeout,
|
||||
mappedDataSaving,
|
||||
descriptor.config.enableAEC,
|
||||
descriptor.config.enableNS,
|
||||
descriptor.config.enableAGC,
|
||||
descriptor.config.enableCallUpgrade
|
||||
);
|
||||
mappedConfig.enableVolumeControl = descriptor.config.enableVolumeControl;
|
||||
mappedConfig.logFilePath = descriptor.config.logPath.data;
|
||||
mappedConfig.statsDumpFilePath = {};
|
||||
|
||||
controller_->SetConfig(mappedConfig);
|
||||
|
||||
setNetworkType(descriptor.initialNetworkType);
|
||||
|
||||
controller_->SetEncryptionKey((char *)(descriptor.encryptionKey.value->data()), descriptor.encryptionKey.isOutgoing);
|
||||
controller_->SetRemoteEndpoints(mappedEndpoints, descriptor.config.enableP2P, descriptor.config.maxApiLayer);
|
||||
|
||||
controller_->Start();
|
||||
|
||||
controller_->Connect();
|
||||
|
||||
controller_->SetCurrentAudioInput(descriptor.mediaDevicesConfig.audioInputId);
|
||||
controller_->SetCurrentAudioOutput(descriptor.mediaDevicesConfig.audioOutputId);
|
||||
controller_->SetInputVolume(descriptor.mediaDevicesConfig.inputVolume);
|
||||
controller_->SetOutputVolume(descriptor.mediaDevicesConfig.outputVolume);
|
||||
}
|
||||
|
||||
InstanceImplLegacy::~InstanceImplLegacy() {
|
||||
if (controller_) {
|
||||
stop([](FinalState state){});
|
||||
}
|
||||
}
|
||||
|
||||
void InstanceImplLegacy::setNetworkType(NetworkType networkType) {
|
||||
const auto mappedType = [&] {
|
||||
switch (networkType) {
|
||||
case NetworkType::Unknown:
|
||||
return tgvoip::NET_TYPE_UNKNOWN;
|
||||
case NetworkType::Gprs:
|
||||
return tgvoip::NET_TYPE_GPRS;
|
||||
case NetworkType::Edge:
|
||||
return tgvoip::NET_TYPE_EDGE;
|
||||
case NetworkType::ThirdGeneration:
|
||||
return tgvoip::NET_TYPE_3G;
|
||||
case NetworkType::Hspa:
|
||||
return tgvoip::NET_TYPE_HSPA;
|
||||
case NetworkType::Lte:
|
||||
return tgvoip::NET_TYPE_LTE;
|
||||
case NetworkType::WiFi:
|
||||
return tgvoip::NET_TYPE_WIFI;
|
||||
case NetworkType::Ethernet:
|
||||
return tgvoip::NET_TYPE_ETHERNET;
|
||||
case NetworkType::OtherHighSpeed:
|
||||
return tgvoip::NET_TYPE_OTHER_HIGH_SPEED;
|
||||
case NetworkType::OtherLowSpeed:
|
||||
return tgvoip::NET_TYPE_OTHER_LOW_SPEED;
|
||||
case NetworkType::OtherMobile:
|
||||
return tgvoip::NET_TYPE_OTHER_MOBILE;
|
||||
case NetworkType::Dialup:
|
||||
return tgvoip::NET_TYPE_DIALUP;
|
||||
default:
|
||||
return tgvoip::NET_TYPE_UNKNOWN;
|
||||
}
|
||||
}();
|
||||
|
||||
controller_->SetNetworkType(mappedType);
|
||||
}
|
||||
|
||||
void InstanceImplLegacy::setMuteMicrophone(bool muteMicrophone) {
|
||||
controller_->SetMicMute(muteMicrophone);
|
||||
}
|
||||
|
||||
void InstanceImplLegacy::receiveSignalingData(const std::vector<uint8_t> &data) {
|
||||
}
|
||||
|
||||
void InstanceImplLegacy::setVideoCapture(std::shared_ptr<VideoCaptureInterface> videoCapture) {
|
||||
}
|
||||
|
||||
void InstanceImplLegacy::sendVideoDeviceUpdated() {
|
||||
}
|
||||
|
||||
void InstanceImplLegacy::setRequestedVideoAspect(float aspect) {
|
||||
}
|
||||
|
||||
void InstanceImplLegacy::setIncomingVideoOutput(std::weak_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink) {
|
||||
}
|
||||
|
||||
void InstanceImplLegacy::setAudioOutputGainControlEnabled(bool enabled) {
|
||||
controller_->SetAudioOutputGainControlEnabled(enabled);
|
||||
}
|
||||
|
||||
void InstanceImplLegacy::setEchoCancellationStrength(int strength) {
|
||||
controller_->SetEchoCancellationStrength(strength);
|
||||
}
|
||||
|
||||
void InstanceImplLegacy::setAudioInputDevice(std::string id) {
|
||||
controller_->SetCurrentAudioInput(id);
|
||||
}
|
||||
|
||||
void InstanceImplLegacy::setAudioOutputDevice(std::string id) {
|
||||
controller_->SetCurrentAudioOutput(id);
|
||||
}
|
||||
|
||||
void InstanceImplLegacy::setInputVolume(float level) {
|
||||
controller_->SetInputVolume(level);
|
||||
}
|
||||
|
||||
void InstanceImplLegacy::setOutputVolume(float level) {
|
||||
controller_->SetOutputVolume(level);
|
||||
}
|
||||
|
||||
void InstanceImplLegacy::setAudioOutputDuckingEnabled(bool enabled) {
|
||||
#if defined(__APPLE__) && TARGET_OS_OSX
|
||||
controller_->SetAudioOutputDuckingEnabled(enabled);
|
||||
#endif // TARGET_OS_OSX
|
||||
}
|
||||
|
||||
void InstanceImplLegacy::setIsLowBatteryLevel(bool isLowBatteryLevel) {
|
||||
}
|
||||
|
||||
std::string InstanceImplLegacy::getLastError() {
|
||||
switch (controller_->GetLastError()) {
|
||||
case tgvoip::ERROR_INCOMPATIBLE: return "ERROR_INCOMPATIBLE";
|
||||
case tgvoip::ERROR_TIMEOUT: return "ERROR_TIMEOUT";
|
||||
case tgvoip::ERROR_AUDIO_IO: return "ERROR_AUDIO_IO";
|
||||
case tgvoip::ERROR_PROXY: return "ERROR_PROXY";
|
||||
default: return "ERROR_UNKNOWN";
|
||||
}
|
||||
}
|
||||
|
||||
std::string InstanceImplLegacy::getDebugInfo() {
|
||||
return controller_->GetDebugString();
|
||||
}
|
||||
|
||||
int64_t InstanceImplLegacy::getPreferredRelayId() {
|
||||
return controller_->GetPreferredRelayID();
|
||||
}
|
||||
|
||||
TrafficStats InstanceImplLegacy::getTrafficStats() {
|
||||
tgvoip::VoIPController::TrafficStats stats;
|
||||
controller_->GetStats(&stats);
|
||||
auto result = TrafficStats();
|
||||
result.bytesSentWifi = stats.bytesSentWifi;
|
||||
result.bytesReceivedWifi = stats.bytesRecvdWifi;
|
||||
result.bytesSentMobile = stats.bytesSentMobile;
|
||||
result.bytesReceivedMobile = stats.bytesRecvdMobile;
|
||||
return result;
|
||||
}
|
||||
|
||||
PersistentState InstanceImplLegacy::getPersistentState() {
|
||||
return {controller_->GetPersistentState()};
|
||||
}
|
||||
|
||||
void InstanceImplLegacy::stop(std::function<void(FinalState)> completion) {
|
||||
controller_->Stop();
|
||||
|
||||
auto result = FinalState();
|
||||
result.persistentState = getPersistentState();
|
||||
result.debugLog = controller_->GetDebugLog();
|
||||
result.trafficStats = getTrafficStats();
|
||||
result.isRatingSuggested = controller_->NeedRate();
|
||||
|
||||
delete controller_;
|
||||
controller_ = nullptr;
|
||||
|
||||
completion(result);
|
||||
}
|
||||
|
||||
void InstanceImplLegacy::ControllerStateCallback(tgvoip::VoIPController *controller, int state) {
|
||||
const auto self = static_cast<InstanceImplLegacy*>(controller->implData);
|
||||
if (self->onStateUpdated_) {
|
||||
const auto mappedState = [&] {
|
||||
switch (state) {
|
||||
case tgvoip::STATE_WAIT_INIT:
|
||||
return State::WaitInit;
|
||||
case tgvoip::STATE_WAIT_INIT_ACK:
|
||||
return State::WaitInitAck;
|
||||
case tgvoip::STATE_ESTABLISHED:
|
||||
return State::Established;
|
||||
case tgvoip::STATE_FAILED:
|
||||
return State::Failed;
|
||||
case tgvoip::STATE_RECONNECTING:
|
||||
return State::Reconnecting;
|
||||
default:
|
||||
return State::Established;
|
||||
}
|
||||
}();
|
||||
|
||||
self->onStateUpdated_(mappedState);
|
||||
}
|
||||
}
|
||||
|
||||
void InstanceImplLegacy::SignalBarsCallback(tgvoip::VoIPController *controller, int signalBars) {
|
||||
const auto self = static_cast<InstanceImplLegacy*>(controller->implData);
|
||||
if (self->onSignalBarsUpdated_) {
|
||||
self->onSignalBarsUpdated_(signalBars);
|
||||
}
|
||||
}
|
||||
|
||||
int InstanceImplLegacy::GetConnectionMaxLayer() {
|
||||
return tgvoip::VoIPController::GetConnectionMaxLayer();
|
||||
}
|
||||
|
||||
std::vector<std::string> InstanceImplLegacy::GetVersions() {
|
||||
std::vector<std::string> result;
|
||||
result.push_back("2.4.4");
|
||||
return result;
|
||||
}
|
||||
|
||||
template <>
|
||||
bool Register<InstanceImplLegacy>() {
|
||||
return Meta::RegisterOne<InstanceImplLegacy>();
|
||||
}
|
||||
|
||||
void SetLegacyGlobalServerConfig(const std::string &serverConfig) {
|
||||
tgvoip::ServerConfig::GetSharedInstance()->Update(serverConfig);
|
||||
}
|
||||
|
||||
} // namespace tgcalls
|
||||
59
TMessagesProj/jni/voip/tgcalls/legacy/InstanceImplLegacy.h
Normal file
59
TMessagesProj/jni/voip/tgcalls/legacy/InstanceImplLegacy.h
Normal file
|
|
@ -0,0 +1,59 @@
|
|||
#ifndef TGCALLS_INSTANCE_IMPL_LEGACY_H
|
||||
#define TGCALLS_INSTANCE_IMPL_LEGACY_H
|
||||
|
||||
#include "Instance.h"
|
||||
|
||||
#include "VoIPController.h"
|
||||
#include "VoIPServerConfig.h"
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
class InstanceImplLegacy : public Instance {
|
||||
public:
|
||||
explicit InstanceImplLegacy(Descriptor &&descriptor);
|
||||
~InstanceImplLegacy();
|
||||
|
||||
static int GetConnectionMaxLayer();
|
||||
static std::vector<std::string> GetVersions();
|
||||
|
||||
void receiveSignalingData(const std::vector<uint8_t> &data) override;
|
||||
void setNetworkType(NetworkType networkType) override;
|
||||
void setMuteMicrophone(bool muteMicrophone) override;
|
||||
void setVideoCapture(std::shared_ptr<VideoCaptureInterface> videoCapture) override;
|
||||
void sendVideoDeviceUpdated() override;
|
||||
void setRequestedVideoAspect(float aspect) override;
|
||||
bool supportsVideo() override {
|
||||
return false;
|
||||
}
|
||||
void setIncomingVideoOutput(std::weak_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink) override;
|
||||
void setAudioOutputGainControlEnabled(bool enabled) override;
|
||||
void setEchoCancellationStrength(int strength) override;
|
||||
void setAudioInputDevice(std::string id) override;
|
||||
void setAudioOutputDevice(std::string id) override;
|
||||
void setInputVolume(float level) override;
|
||||
void setOutputVolume(float level) override;
|
||||
void setAudioOutputDuckingEnabled(bool enabled) override;
|
||||
void setIsLowBatteryLevel(bool isLowBatteryLevel) override;
|
||||
|
||||
std::string getLastError() override;
|
||||
std::string getDebugInfo() override;
|
||||
int64_t getPreferredRelayId() override;
|
||||
TrafficStats getTrafficStats() override;
|
||||
PersistentState getPersistentState() override;
|
||||
void stop(std::function<void(FinalState)> completion) override;
|
||||
|
||||
private:
|
||||
tgvoip::VoIPController *controller_;
|
||||
std::function<void(State)> onStateUpdated_;
|
||||
std::function<void(int)> onSignalBarsUpdated_;
|
||||
|
||||
static void ControllerStateCallback(tgvoip::VoIPController *controller, int state);
|
||||
static void SignalBarsCallback(tgvoip::VoIPController *controller, int signalBars);
|
||||
|
||||
};
|
||||
|
||||
void SetLegacyGlobalServerConfig(const std::string &serverConfig);
|
||||
|
||||
} // namespace tgcalls
|
||||
|
||||
#endif
|
||||
363
TMessagesProj/jni/voip/tgcalls/platform/PlatformInterface.h
Normal file
363
TMessagesProj/jni/voip/tgcalls/platform/PlatformInterface.h
Normal file
|
|
@ -0,0 +1,363 @@
|
|||
#ifndef TGCALLS_PLATFORM_INTERFACE_H
|
||||
#define TGCALLS_PLATFORM_INTERFACE_H
|
||||
|
||||
#include "rtc_base/thread.h"
|
||||
#include "api/video_codecs/video_encoder_factory.h"
|
||||
#include "api/video_codecs/video_decoder_factory.h"
|
||||
#include "api/media_stream_interface.h"
|
||||
#include "rtc_base/network_monitor_factory.h"
|
||||
#include "modules/audio_device/include/audio_device.h"
|
||||
#include "rtc_base/ref_counted_object.h"
|
||||
#include <string>
|
||||
#include <map>
|
||||
|
||||
struct AVFrame;
|
||||
struct AVCodecContext;
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
enum class VideoState;
|
||||
|
||||
class VideoCapturerInterface;
|
||||
class PlatformContext;
|
||||
|
||||
struct PlatformCaptureInfo {
|
||||
bool shouldBeAdaptedToReceiverAspectRate = false;
|
||||
int rotation = 0;
|
||||
};
|
||||
|
||||
class WrappedAudioDeviceModule : public webrtc::AudioDeviceModule {
|
||||
public:
|
||||
virtual void Stop() = 0;
|
||||
virtual void setIsActive(bool isActive) = 0;
|
||||
};
|
||||
|
||||
class DefaultWrappedAudioDeviceModule : public WrappedAudioDeviceModule {
|
||||
public:
|
||||
DefaultWrappedAudioDeviceModule(webrtc::scoped_refptr<webrtc::AudioDeviceModule> impl) :
|
||||
_impl(impl) {
|
||||
}
|
||||
|
||||
virtual ~DefaultWrappedAudioDeviceModule() {
|
||||
}
|
||||
|
||||
virtual void Stop() override {
|
||||
}
|
||||
|
||||
virtual void setIsActive(bool isActive) override {
|
||||
}
|
||||
|
||||
virtual int32_t ActiveAudioLayer(AudioLayer *audioLayer) const override {
|
||||
return _impl->ActiveAudioLayer(audioLayer);
|
||||
}
|
||||
|
||||
virtual int32_t RegisterAudioCallback(webrtc::AudioTransport *audioCallback) override {
|
||||
return _impl->RegisterAudioCallback(audioCallback);
|
||||
}
|
||||
|
||||
virtual int32_t Init() override {
|
||||
return _impl->Init();
|
||||
}
|
||||
|
||||
virtual int32_t Terminate() override {
|
||||
return _impl->Terminate();
|
||||
}
|
||||
|
||||
virtual bool Initialized() const override {
|
||||
return _impl->Initialized();
|
||||
}
|
||||
|
||||
virtual int16_t PlayoutDevices() override {
|
||||
return _impl->PlayoutDevices();
|
||||
}
|
||||
|
||||
virtual int16_t RecordingDevices() override {
|
||||
return _impl->RecordingDevices();
|
||||
}
|
||||
|
||||
virtual int32_t PlayoutDeviceName(uint16_t index, char name[webrtc::kAdmMaxDeviceNameSize], char guid[webrtc::kAdmMaxGuidSize]) override {
|
||||
return _impl->PlayoutDeviceName(index, name, guid);
|
||||
}
|
||||
|
||||
virtual int32_t RecordingDeviceName(uint16_t index, char name[webrtc::kAdmMaxDeviceNameSize], char guid[webrtc::kAdmMaxGuidSize]) override {
|
||||
return _impl->RecordingDeviceName(index, name, guid);
|
||||
}
|
||||
|
||||
virtual int32_t SetPlayoutDevice(uint16_t index) override {
|
||||
return _impl->SetPlayoutDevice(index);
|
||||
}
|
||||
|
||||
#ifdef TGCALLS_UWP_DESKTOP
|
||||
virtual int32_t SetPlayoutDevice(std::string deviceId) override {
|
||||
return _impl->SetPlayoutDevice(deviceId);
|
||||
}
|
||||
#endif
|
||||
|
||||
virtual int32_t SetPlayoutDevice(WindowsDeviceType device) override {
|
||||
return _impl->SetPlayoutDevice(device);
|
||||
}
|
||||
|
||||
virtual int32_t SetRecordingDevice(uint16_t index) override {
|
||||
return _impl->SetRecordingDevice(index);
|
||||
}
|
||||
|
||||
#ifdef TGCALLS_UWP_DESKTOP
|
||||
virtual int32_t SetRecordingDevice(std::string deviceId) override {
|
||||
return _impl->SetRecordingDevice(deviceId);
|
||||
}
|
||||
#endif
|
||||
|
||||
virtual int32_t SetRecordingDevice(WindowsDeviceType device) override {
|
||||
return _impl->SetRecordingDevice(device);
|
||||
}
|
||||
|
||||
virtual int32_t PlayoutIsAvailable(bool *available) override {
|
||||
return _impl->PlayoutIsAvailable(available);
|
||||
}
|
||||
|
||||
virtual int32_t InitPlayout() override {
|
||||
return _impl->InitPlayout();
|
||||
}
|
||||
|
||||
virtual bool PlayoutIsInitialized() const override {
|
||||
return _impl->PlayoutIsInitialized();
|
||||
}
|
||||
|
||||
virtual int32_t RecordingIsAvailable(bool *available) override {
|
||||
return _impl->RecordingIsAvailable(available);
|
||||
}
|
||||
|
||||
virtual int32_t InitRecording() override {
|
||||
return _impl->InitRecording();
|
||||
}
|
||||
|
||||
virtual bool RecordingIsInitialized() const override {
|
||||
return _impl->RecordingIsInitialized();
|
||||
}
|
||||
|
||||
virtual int32_t StartPlayout() override {
|
||||
return _impl->StartPlayout();
|
||||
}
|
||||
|
||||
virtual int32_t StopPlayout() override {
|
||||
return _impl->StopPlayout();
|
||||
}
|
||||
|
||||
virtual bool Playing() const override {
|
||||
return _impl->Playing();
|
||||
}
|
||||
|
||||
virtual int32_t StartRecording() override {
|
||||
return _impl->StartRecording();
|
||||
}
|
||||
|
||||
virtual int32_t StopRecording() override {
|
||||
return _impl->StopRecording();
|
||||
}
|
||||
|
||||
virtual bool Recording() const override {
|
||||
return _impl->Recording();
|
||||
}
|
||||
|
||||
virtual int32_t InitSpeaker() override {
|
||||
return _impl->InitSpeaker();
|
||||
}
|
||||
|
||||
virtual bool SpeakerIsInitialized() const override {
|
||||
return _impl->SpeakerIsInitialized();
|
||||
}
|
||||
|
||||
virtual int32_t InitMicrophone() override {
|
||||
return _impl->InitMicrophone();
|
||||
}
|
||||
|
||||
virtual bool MicrophoneIsInitialized() const override {
|
||||
return _impl->MicrophoneIsInitialized();
|
||||
}
|
||||
|
||||
virtual int32_t SpeakerVolumeIsAvailable(bool *available) override {
|
||||
return _impl->SpeakerVolumeIsAvailable(available);
|
||||
}
|
||||
|
||||
virtual int32_t SetSpeakerVolume(uint32_t volume) override {
|
||||
return _impl->SetSpeakerVolume(volume);
|
||||
}
|
||||
|
||||
virtual int32_t SpeakerVolume(uint32_t* volume) const override {
|
||||
return _impl->SpeakerVolume(volume);
|
||||
}
|
||||
|
||||
virtual int32_t MaxSpeakerVolume(uint32_t *maxVolume) const override {
|
||||
return _impl->MaxSpeakerVolume(maxVolume);
|
||||
}
|
||||
|
||||
virtual int32_t MinSpeakerVolume(uint32_t *minVolume) const override {
|
||||
return _impl->MinSpeakerVolume(minVolume);
|
||||
}
|
||||
|
||||
virtual int32_t MicrophoneVolumeIsAvailable(bool *available) override {
|
||||
return _impl->MicrophoneVolumeIsAvailable(available);
|
||||
}
|
||||
|
||||
virtual int32_t SetMicrophoneVolume(uint32_t volume) override {
|
||||
return _impl->SetMicrophoneVolume(volume);
|
||||
}
|
||||
|
||||
virtual int32_t MicrophoneVolume(uint32_t *volume) const override {
|
||||
return _impl->MicrophoneVolume(volume);
|
||||
}
|
||||
|
||||
virtual int32_t MaxMicrophoneVolume(uint32_t *maxVolume) const override {
|
||||
return _impl->MaxMicrophoneVolume(maxVolume);
|
||||
}
|
||||
|
||||
virtual int32_t MinMicrophoneVolume(uint32_t *minVolume) const override {
|
||||
return _impl->MinMicrophoneVolume(minVolume);
|
||||
}
|
||||
|
||||
virtual int32_t SpeakerMuteIsAvailable(bool *available) override {
|
||||
return _impl->SpeakerMuteIsAvailable(available);
|
||||
}
|
||||
|
||||
virtual int32_t SetSpeakerMute(bool enable) override {
|
||||
return _impl->SetSpeakerMute(enable);
|
||||
}
|
||||
|
||||
virtual int32_t SpeakerMute(bool *enabled) const override {
|
||||
return _impl->SpeakerMute(enabled);
|
||||
}
|
||||
|
||||
virtual int32_t MicrophoneMuteIsAvailable(bool *available) override {
|
||||
return _impl->MicrophoneMuteIsAvailable(available);
|
||||
}
|
||||
|
||||
virtual int32_t SetMicrophoneMute(bool enable) override {
|
||||
return _impl->SetMicrophoneMute(enable);
|
||||
}
|
||||
|
||||
virtual int32_t MicrophoneMute(bool *enabled) const override {
|
||||
return _impl->MicrophoneMute(enabled);
|
||||
}
|
||||
|
||||
virtual int32_t StereoPlayoutIsAvailable(bool *available) const override {
|
||||
return _impl->StereoPlayoutIsAvailable(available);
|
||||
}
|
||||
|
||||
virtual int32_t SetStereoPlayout(bool enable) override {
|
||||
return _impl->SetStereoPlayout(enable);
|
||||
}
|
||||
|
||||
virtual int32_t StereoPlayout(bool *enabled) const override {
|
||||
return _impl->StereoPlayout(enabled);
|
||||
}
|
||||
|
||||
virtual int32_t StereoRecordingIsAvailable(bool *available) const override {
|
||||
return _impl->StereoRecordingIsAvailable(available);
|
||||
}
|
||||
|
||||
virtual int32_t SetStereoRecording(bool enable) override {
|
||||
return _impl->SetStereoRecording(enable);
|
||||
}
|
||||
|
||||
virtual int32_t StereoRecording(bool *enabled) const override {
|
||||
return _impl->StereoRecording(enabled);
|
||||
}
|
||||
|
||||
virtual int32_t PlayoutDelay(uint16_t* delayMS) const override {
|
||||
return _impl->PlayoutDelay(delayMS);
|
||||
}
|
||||
|
||||
virtual bool BuiltInAECIsAvailable() const override {
|
||||
return _impl->BuiltInAECIsAvailable();
|
||||
}
|
||||
|
||||
virtual bool BuiltInAGCIsAvailable() const override {
|
||||
return _impl->BuiltInAGCIsAvailable();
|
||||
}
|
||||
|
||||
virtual bool BuiltInNSIsAvailable() const override {
|
||||
return _impl->BuiltInNSIsAvailable();
|
||||
}
|
||||
|
||||
virtual int32_t EnableBuiltInAEC(bool enable) override {
|
||||
return _impl->EnableBuiltInAEC(enable);
|
||||
}
|
||||
|
||||
virtual int32_t EnableBuiltInAGC(bool enable) override {
|
||||
return _impl->EnableBuiltInAGC(enable);
|
||||
}
|
||||
|
||||
virtual int32_t EnableBuiltInNS(bool enable) override {
|
||||
return _impl->EnableBuiltInNS(enable);
|
||||
}
|
||||
|
||||
virtual int32_t GetPlayoutUnderrunCount() const override {
|
||||
return _impl->GetPlayoutUnderrunCount();
|
||||
}
|
||||
|
||||
#if defined(WEBRTC_IOS)
|
||||
virtual int GetPlayoutAudioParameters(webrtc::AudioParameters *params) const override {
|
||||
return _impl->GetPlayoutAudioParameters(params);
|
||||
}
|
||||
virtual int GetRecordAudioParameters(webrtc::AudioParameters *params) const override {
|
||||
return _impl->GetRecordAudioParameters(params);
|
||||
}
|
||||
#endif // WEBRTC_IOS
|
||||
|
||||
webrtc::scoped_refptr<webrtc::AudioDeviceModule> WrappedInstance() const {
|
||||
return _impl;
|
||||
}
|
||||
|
||||
private:
|
||||
webrtc::scoped_refptr<webrtc::AudioDeviceModule> _impl;
|
||||
};
|
||||
|
||||
class PlatformVideoFrame {
|
||||
public:
|
||||
PlatformVideoFrame() {
|
||||
}
|
||||
|
||||
virtual ~PlatformVideoFrame() = default;
|
||||
};
|
||||
|
||||
class PlatformInterface {
|
||||
public:
|
||||
static PlatformInterface *SharedInstance();
|
||||
virtual ~PlatformInterface() = default;
|
||||
|
||||
virtual void configurePlatformAudio(int numChannels = 1) {
|
||||
}
|
||||
|
||||
virtual std::unique_ptr<rtc::NetworkMonitorFactory> createNetworkMonitorFactory() {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
virtual std::unique_ptr<webrtc::VideoEncoderFactory> makeVideoEncoderFactory(std::shared_ptr<PlatformContext> platformContext, bool preferHardwareEncoding = false, bool isScreencast = false) = 0;
|
||||
virtual std::unique_ptr<webrtc::VideoDecoderFactory> makeVideoDecoderFactory(std::shared_ptr<PlatformContext> platformContext) = 0;
|
||||
virtual bool supportsEncoding(const std::string &codecName, std::shared_ptr<PlatformContext> platformContext) = 0;
|
||||
virtual webrtc::scoped_refptr<webrtc::VideoTrackSourceInterface> makeVideoSource(rtc::Thread *signalingThread, rtc::Thread *workerThread, bool screencapture) = 0;
|
||||
virtual void adaptVideoSource(webrtc::scoped_refptr<webrtc::VideoTrackSourceInterface> videoSource, int width, int height, int fps) = 0;
|
||||
virtual std::unique_ptr<VideoCapturerInterface> makeVideoCapturer(webrtc::scoped_refptr<webrtc::VideoTrackSourceInterface> source, std::string deviceId, std::function<void(VideoState)> stateUpdated, std::function<void(PlatformCaptureInfo)> captureInfoUpdated, std::shared_ptr<PlatformContext> platformContext, std::pair<int, int> &outResolution) = 0;
|
||||
virtual webrtc::scoped_refptr<WrappedAudioDeviceModule> wrapAudioDeviceModule(webrtc::scoped_refptr<webrtc::AudioDeviceModule> module) {
|
||||
return rtc::make_ref_counted<DefaultWrappedAudioDeviceModule>(module);
|
||||
}
|
||||
virtual void setupVideoDecoding(AVCodecContext *codecContext) {
|
||||
}
|
||||
virtual webrtc::scoped_refptr<webrtc::VideoFrameBuffer> createPlatformFrameFromData(AVFrame const *frame) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
public:
|
||||
bool preferX264 = false;
|
||||
};
|
||||
|
||||
std::unique_ptr<PlatformInterface> CreatePlatformInterface();
|
||||
|
||||
inline PlatformInterface *PlatformInterface::SharedInstance() {
|
||||
static const auto result = CreatePlatformInterface();
|
||||
return result.get();
|
||||
}
|
||||
|
||||
} // namespace tgcalls
|
||||
|
||||
#endif
|
||||
|
|
@ -0,0 +1,62 @@
|
|||
#include "AndroidContext.h"
|
||||
|
||||
#include "sdk/android/native_api/jni/jvm.h"
|
||||
#include "tgnet/FileLog.h"
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
AndroidContext::AndroidContext(JNIEnv *env, jobject peerInstance, jobject groupInstance, bool screencast) {
|
||||
VideoCapturerDeviceClass = (jclass) env->NewGlobalRef(env->FindClass("org/telegram/messenger/voip/VideoCapturerDevice"));
|
||||
jmethodID initMethodId = env->GetMethodID(VideoCapturerDeviceClass, "<init>", "(Z)V");
|
||||
javaCapturer = env->NewGlobalRef(env->NewObject(VideoCapturerDeviceClass, initMethodId, screencast));
|
||||
if (peerInstance) {
|
||||
javaPeerInstance = env->NewGlobalRef(peerInstance);
|
||||
}
|
||||
if (groupInstance) {
|
||||
javaGroupInstance = env->NewGlobalRef(groupInstance);
|
||||
}
|
||||
}
|
||||
|
||||
AndroidContext::~AndroidContext() {
|
||||
JNIEnv *env = webrtc::AttachCurrentThreadIfNeeded();
|
||||
|
||||
jmethodID onDestroyMethodId = env->GetMethodID(VideoCapturerDeviceClass, "onDestroy", "()V");
|
||||
env->CallVoidMethod(javaCapturer, onDestroyMethodId);
|
||||
env->DeleteGlobalRef(javaCapturer);
|
||||
javaCapturer = nullptr;
|
||||
|
||||
env->DeleteGlobalRef(VideoCapturerDeviceClass);
|
||||
|
||||
if (javaPeerInstance) {
|
||||
env->DeleteGlobalRef(javaPeerInstance);
|
||||
}
|
||||
if (javaGroupInstance) {
|
||||
env->DeleteGlobalRef(javaGroupInstance);
|
||||
}
|
||||
}
|
||||
|
||||
void AndroidContext::setJavaPeerInstance(JNIEnv *env, jobject instance) {
|
||||
javaPeerInstance = env->NewGlobalRef(instance);
|
||||
}
|
||||
|
||||
void AndroidContext::setJavaGroupInstance(JNIEnv *env, jobject instance) {
|
||||
javaGroupInstance = env->NewGlobalRef(instance);
|
||||
}
|
||||
|
||||
jobject AndroidContext::getJavaPeerInstance() {
|
||||
return javaPeerInstance;
|
||||
}
|
||||
|
||||
jobject AndroidContext::getJavaGroupInstance() {
|
||||
return javaGroupInstance;
|
||||
}
|
||||
|
||||
jobject AndroidContext::getJavaCapturer() {
|
||||
return javaCapturer;
|
||||
}
|
||||
|
||||
jclass AndroidContext::getJavaCapturerClass() {
|
||||
return VideoCapturerDeviceClass;
|
||||
}
|
||||
|
||||
} // namespace tgcalls
|
||||
|
|
@ -0,0 +1,39 @@
|
|||
#ifndef TGCALLS_ANDROID_CONTEXT_H
|
||||
#define TGCALLS_ANDROID_CONTEXT_H
|
||||
|
||||
#include "PlatformContext.h"
|
||||
|
||||
#include <jni.h>
|
||||
#include <voip/tgcalls/group/GroupInstanceImpl.h>
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
class AndroidContext final : public PlatformContext {
|
||||
public:
|
||||
AndroidContext(JNIEnv *env, jobject peerInstance, jobject groupInstance, bool screencast);
|
||||
~AndroidContext() override;
|
||||
|
||||
jobject getJavaCapturer();
|
||||
jobject getJavaPeerInstance();
|
||||
jobject getJavaGroupInstance();
|
||||
jclass getJavaCapturerClass();
|
||||
|
||||
void setJavaPeerInstance(JNIEnv *env, jobject instance);
|
||||
void setJavaGroupInstance(JNIEnv *env, jobject instance);
|
||||
|
||||
std::vector<std::shared_ptr<BroadcastPartTask>> audioStreamTasks;
|
||||
std::vector<std::shared_ptr<BroadcastPartTask>> videoStreamTasks;
|
||||
std::vector<std::shared_ptr<RequestMediaChannelDescriptionTask>> descriptionTasks;
|
||||
|
||||
private:
|
||||
jclass VideoCapturerDeviceClass = nullptr;
|
||||
jobject javaCapturer = nullptr;
|
||||
|
||||
jobject javaPeerInstance = nullptr;
|
||||
jobject javaGroupInstance = nullptr;
|
||||
|
||||
};
|
||||
|
||||
} // namespace tgcalls
|
||||
|
||||
#endif
|
||||
|
|
@ -0,0 +1,160 @@
|
|||
#include "AndroidInterface.h"
|
||||
|
||||
#include <rtc_base/ssl_adapter.h>
|
||||
#include <modules/utility/include/jvm_android.h>
|
||||
#include <sdk/android/src/jni/android_video_track_source.h>
|
||||
#include <media/base/media_constants.h>
|
||||
|
||||
#include "VideoCapturerInterfaceImpl.h"
|
||||
|
||||
#include "sdk/android/native_api/base/init.h"
|
||||
#include "sdk/android/native_api/codecs/wrapper.h"
|
||||
#include "sdk/android/native_api/jni/class_loader.h"
|
||||
#include "sdk/android/native_api/jni/jvm.h"
|
||||
#include "sdk/android/native_api/jni/scoped_java_ref.h"
|
||||
#include "sdk/android/native_api/video/video_source.h"
|
||||
#include "api/video_codecs/builtin_video_encoder_factory.h"
|
||||
#include "api/video_codecs/builtin_video_decoder_factory.h"
|
||||
#include "pc/video_track_source_proxy.h"
|
||||
#include "sdk/android/src/jni/android_network_monitor.h"
|
||||
#include "api/video_track_source_proxy_factory.h"
|
||||
#include "AndroidContext.h"
|
||||
#include "media/engine/simulcast_encoder_adapter.h"
|
||||
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
void AndroidInterface::configurePlatformAudio(int numChannels) {
|
||||
|
||||
}
|
||||
|
||||
class SimulcastVideoEncoderFactory : public webrtc::VideoEncoderFactory {
|
||||
public:
|
||||
|
||||
std::unique_ptr<webrtc::VideoEncoderFactory> main_factory;
|
||||
std::unique_ptr<webrtc::SimulcastEncoderAdapter> simulcast_adapter;
|
||||
|
||||
SimulcastVideoEncoderFactory(
|
||||
std::unique_ptr<webrtc::VideoEncoderFactory> main_factory
|
||||
): main_factory(std::move(main_factory)) {}
|
||||
|
||||
std::vector<webrtc::SdpVideoFormat> GetSupportedFormats() const override {
|
||||
return main_factory->GetSupportedFormats();
|
||||
}
|
||||
|
||||
std::vector<webrtc::SdpVideoFormat> GetImplementations() const override {
|
||||
return main_factory->GetImplementations();
|
||||
}
|
||||
|
||||
std::unique_ptr<EncoderSelectorInterface> GetEncoderSelector() const override {
|
||||
return main_factory->GetEncoderSelector();
|
||||
}
|
||||
|
||||
std::unique_ptr<webrtc::VideoEncoder> CreateVideoEncoder(const webrtc::SdpVideoFormat& format) override {
|
||||
return std::make_unique<webrtc::SimulcastEncoderAdapter>(main_factory.get(), format);
|
||||
}
|
||||
|
||||
CodecSupport QueryCodecSupport(
|
||||
const webrtc::SdpVideoFormat& format,
|
||||
absl::optional<std::string> scalability_mode) const override {
|
||||
return main_factory->QueryCodecSupport(format, scalability_mode);
|
||||
}
|
||||
};
|
||||
|
||||
std::unique_ptr<webrtc::VideoEncoderFactory> AndroidInterface::makeVideoEncoderFactory(std::shared_ptr<PlatformContext> platformContext, bool preferHardwareEncoding, bool isScreencast) {
|
||||
JNIEnv *env = webrtc::AttachCurrentThreadIfNeeded();
|
||||
|
||||
// AndroidContext *context = (AndroidContext *) platformContext.get();
|
||||
// jmethodID methodId = env->GetMethodID(context->getJavaCapturerClass(), "getSharedEGLContext", "()Lorg/webrtc/EglBase$Context;");
|
||||
// jobject eglContext = env->CallObjectMethod(context->getJavaCapturer(), methodId);
|
||||
|
||||
webrtc::ScopedJavaLocalRef<jclass> factory_class =
|
||||
webrtc::GetClass(env, "org/webrtc/DefaultVideoEncoderFactory");
|
||||
jmethodID factory_constructor = env->GetMethodID(
|
||||
factory_class.obj(), "<init>", "(Lorg/webrtc/EglBase$Context;ZZ)V");
|
||||
webrtc::ScopedJavaLocalRef<jobject> factory_object(
|
||||
env, env->NewObject(factory_class.obj(), factory_constructor,
|
||||
nullptr /* shared_context */,
|
||||
false /* enable_intel_vp8_encoder */,
|
||||
true /* enable_h264_high_profile */));
|
||||
|
||||
// return webrtc::JavaToNativeVideoEncoderFactory(env, factory_object.obj());
|
||||
|
||||
return std::make_unique<SimulcastVideoEncoderFactory>(webrtc::JavaToNativeVideoEncoderFactory(env, factory_object.obj()));
|
||||
}
|
||||
|
||||
std::unique_ptr<webrtc::VideoDecoderFactory> AndroidInterface::makeVideoDecoderFactory(std::shared_ptr<PlatformContext> platformContext) {
|
||||
JNIEnv *env = webrtc::AttachCurrentThreadIfNeeded();
|
||||
|
||||
// AndroidContext *context = (AndroidContext *) platformContext.get();
|
||||
// jmethodID methodId = env->GetMethodID(context->getJavaCapturerClass(), "getSharedEGLContext", "()Lorg/webrtc/EglBase$Context;");
|
||||
// jobject eglContext = env->CallObjectMethod(context->getJavaCapturer(), methodId);
|
||||
|
||||
webrtc::ScopedJavaLocalRef<jclass> factory_class =
|
||||
webrtc::GetClass(env, "org/webrtc/DefaultVideoDecoderFactory");
|
||||
jmethodID factory_constructor = env->GetMethodID(
|
||||
factory_class.obj(), "<init>", "(Lorg/webrtc/EglBase$Context;)V");
|
||||
webrtc::ScopedJavaLocalRef<jobject> factory_object(
|
||||
env, env->NewObject(factory_class.obj(), factory_constructor,
|
||||
nullptr /* shared_context */));
|
||||
return webrtc::JavaToNativeVideoDecoderFactory(env, factory_object.obj());
|
||||
}
|
||||
|
||||
void AndroidInterface::adaptVideoSource(rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> videoSource, int width, int height, int fps) {
|
||||
|
||||
}
|
||||
|
||||
rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> AndroidInterface::makeVideoSource(rtc::Thread *signalingThread, rtc::Thread *workerThread, bool screencapture) {
|
||||
JNIEnv *env = webrtc::AttachCurrentThreadIfNeeded();
|
||||
_source[screencapture ? 1 : 0] = webrtc::CreateJavaVideoSource(env, signalingThread, false, false);
|
||||
return webrtc::CreateVideoTrackSourceProxy(signalingThread, workerThread, _source[screencapture ? 1 : 0].get());
|
||||
}
|
||||
|
||||
bool AndroidInterface::supportsEncoding(const std::string &codecName, std::shared_ptr<PlatformContext> platformContext) {
|
||||
if (hardwareVideoEncoderFactory == nullptr) {
|
||||
JNIEnv *env = webrtc::AttachCurrentThreadIfNeeded();
|
||||
|
||||
// AndroidContext *context = (AndroidContext *) platformContext.get();
|
||||
// jmethodID methodId = env->GetMethodID(context->getJavaCapturerClass(), "getSharedEGLContext", "()Lorg/webrtc/EglBase$Context;");
|
||||
// jobject eglContext = env->CallObjectMethod(context->getJavaCapturer(), methodId);
|
||||
|
||||
webrtc::ScopedJavaLocalRef<jclass> factory_class =
|
||||
webrtc::GetClass(env, "org/webrtc/HardwareVideoEncoderFactory");
|
||||
jmethodID factory_constructor = env->GetMethodID(
|
||||
factory_class.obj(), "<init>", "(Lorg/webrtc/EglBase$Context;ZZ)V");
|
||||
webrtc::ScopedJavaLocalRef<jobject> factory_object(
|
||||
env, env->NewObject(factory_class.obj(), factory_constructor,
|
||||
nullptr,
|
||||
false,
|
||||
true));
|
||||
hardwareVideoEncoderFactory = webrtc::JavaToNativeVideoEncoderFactory(env, factory_object.obj());
|
||||
}
|
||||
auto formats = hardwareVideoEncoderFactory->GetSupportedFormats();
|
||||
for (auto format : formats) {
|
||||
if (format.name == codecName) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return codecName == cricket::kVp8CodecName;
|
||||
}
|
||||
|
||||
std::unique_ptr<VideoCapturerInterface> AndroidInterface::makeVideoCapturer(
|
||||
rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> source,
|
||||
std::string deviceId,
|
||||
std::function<void(VideoState)> stateUpdated,
|
||||
std::function<void(PlatformCaptureInfo)> captureInfoUpdated,
|
||||
std::shared_ptr<PlatformContext> platformContext,
|
||||
std::pair<int, int> &outResolution
|
||||
) {
|
||||
return std::make_unique<VideoCapturerInterfaceImpl>(_source[deviceId == "screen" ? 1 : 0], deviceId, stateUpdated, platformContext);
|
||||
}
|
||||
|
||||
std::unique_ptr<rtc::NetworkMonitorFactory> AndroidInterface::createNetworkMonitorFactory() {
|
||||
return std::make_unique<webrtc::jni::AndroidNetworkMonitorFactory>();
|
||||
}
|
||||
|
||||
std::unique_ptr<PlatformInterface> CreatePlatformInterface() {
|
||||
return std::make_unique<AndroidInterface>();
|
||||
}
|
||||
|
||||
} // namespace tgcalls
|
||||
|
|
@ -0,0 +1,30 @@
|
|||
#ifndef TGCALLS_ANDROID_INTERFACE_H
|
||||
#define TGCALLS_ANDROID_INTERFACE_H
|
||||
|
||||
#include "sdk/android/native_api/video/video_source.h"
|
||||
#include "platform/PlatformInterface.h"
|
||||
#include "VideoCapturerInterface.h"
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
class AndroidInterface : public PlatformInterface {
|
||||
public:
|
||||
void configurePlatformAudio(int numChannels = 1) override;
|
||||
std::unique_ptr<webrtc::VideoEncoderFactory> makeVideoEncoderFactory(std::shared_ptr<PlatformContext> platformContext, bool preferHardwareEncoding = false, bool isScreencast = false) override;
|
||||
std::unique_ptr<webrtc::VideoDecoderFactory> makeVideoDecoderFactory(std::shared_ptr<PlatformContext> platformContext) override;
|
||||
bool supportsEncoding(const std::string &codecName, std::shared_ptr<PlatformContext> platformContext) override;
|
||||
rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> makeVideoSource(rtc::Thread *signalingThread, rtc::Thread *workerThread, bool screencapture) override;
|
||||
void adaptVideoSource(rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> videoSource, int width, int height, int fps) override;
|
||||
std::unique_ptr<VideoCapturerInterface> makeVideoCapturer(webrtc::scoped_refptr<webrtc::VideoTrackSourceInterface> source, std::string deviceId, std::function<void(VideoState)> stateUpdated, std::function<void(PlatformCaptureInfo)> captureInfoUpdated, std::shared_ptr<PlatformContext> platformContext, std::pair<int, int> &outResolution) override;
|
||||
std::unique_ptr<rtc::NetworkMonitorFactory> createNetworkMonitorFactory() override;
|
||||
|
||||
private:
|
||||
rtc::scoped_refptr<webrtc::JavaVideoTrackSourceInterface> _source[2];
|
||||
std::unique_ptr<webrtc::VideoEncoderFactory> hardwareVideoEncoderFactory;
|
||||
std::unique_ptr<webrtc::VideoEncoderFactory> softwareVideoEncoderFactory;
|
||||
|
||||
};
|
||||
|
||||
} // namespace tgcalls
|
||||
|
||||
#endif
|
||||
|
|
@ -0,0 +1,62 @@
|
|||
#include "VideoCameraCapturer.h"
|
||||
|
||||
#include <stdint.h>
|
||||
#include <memory>
|
||||
#include <algorithm>
|
||||
|
||||
#include "AndroidInterface.h"
|
||||
#include "AndroidContext.h"
|
||||
#include "sdk/android/native_api/jni/jvm.h"
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
VideoCameraCapturer::VideoCameraCapturer(rtc::scoped_refptr<webrtc::JavaVideoTrackSourceInterface> source, std::string deviceId, std::function<void(VideoState)> stateUpdated, std::shared_ptr<PlatformContext> platformContext) : _source(source), _stateUpdated(stateUpdated), _platformContext(platformContext) {
|
||||
AndroidContext *context = (AndroidContext *) platformContext.get();
|
||||
JNIEnv *env = webrtc::AttachCurrentThreadIfNeeded();
|
||||
jmethodID methodId = env->GetMethodID(context->getJavaCapturerClass(), "init", "(JLjava/lang/String;)V");
|
||||
env->CallVoidMethod(context->getJavaCapturer(), methodId, (jlong) (intptr_t) this, env->NewStringUTF(deviceId.c_str()));
|
||||
}
|
||||
|
||||
void VideoCameraCapturer::setState(VideoState state) {
|
||||
_state = state;
|
||||
if (_stateUpdated) {
|
||||
_stateUpdated(_state);
|
||||
}
|
||||
JNIEnv *env = webrtc::AttachCurrentThreadIfNeeded();
|
||||
auto context = (AndroidContext *) _platformContext.get();
|
||||
jmethodID methodId = env->GetMethodID(context->getJavaCapturerClass(), "onStateChanged", "(JI)V");
|
||||
env->CallVoidMethod(context->getJavaCapturer(), methodId, (jlong) (intptr_t) this, (jint) state);
|
||||
}
|
||||
|
||||
void VideoCameraCapturer::setPreferredCaptureAspectRatio(float aspectRatio) {
|
||||
_aspectRatio = aspectRatio;
|
||||
JNIEnv *env = webrtc::AttachCurrentThreadIfNeeded();
|
||||
auto context = (AndroidContext *) _platformContext.get();
|
||||
jmethodID methodId = env->GetMethodID(context->getJavaCapturerClass(), "onAspectRatioRequested", "(F)V");
|
||||
env->CallVoidMethod(context->getJavaCapturer(), methodId, (jfloat) aspectRatio);
|
||||
}
|
||||
|
||||
void VideoCameraCapturer::setUncroppedSink(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink) {
|
||||
if (_uncroppedSink != nullptr) {
|
||||
_source->RemoveSink(_uncroppedSink.get());
|
||||
}
|
||||
if (sink != nullptr) {
|
||||
_source->AddOrUpdateSink(sink.get(), rtc::VideoSinkWants());
|
||||
}
|
||||
_uncroppedSink = sink;
|
||||
}
|
||||
|
||||
webrtc::ScopedJavaLocalRef<jobject> VideoCameraCapturer::GetJavaVideoCapturerObserver(JNIEnv *env) {
|
||||
return _source->GetJavaVideoCapturerObserver(env);
|
||||
}
|
||||
|
||||
} // namespace tgcalls
|
||||
|
||||
extern "C" {
|
||||
|
||||
JNIEXPORT jobject Java_org_telegram_messenger_voip_VideoCapturerDevice_nativeGetJavaVideoCapturerObserver(JNIEnv *env, jclass clazz, jlong ptr) {
|
||||
tgcalls::VideoCameraCapturer *capturer = (tgcalls::VideoCameraCapturer *) (intptr_t) ptr;
|
||||
return capturer->GetJavaVideoCapturerObserver(env).Release();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,45 @@
|
|||
#ifndef TGCALLS_VIDEO_CAMERA_CAPTURER_H
|
||||
#define TGCALLS_VIDEO_CAMERA_CAPTURER_H
|
||||
|
||||
#include "api/scoped_refptr.h"
|
||||
#include "api/media_stream_interface.h"
|
||||
#include "modules/video_capture/video_capture.h"
|
||||
#include "sdk/android/native_api/jni/scoped_java_ref.h"
|
||||
#include "sdk/android/native_api/video/video_source.h"
|
||||
#include "VideoCaptureInterface.h"
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
#include <stddef.h>
|
||||
#include <jni.h>
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
class VideoCameraCapturer;
|
||||
|
||||
class VideoCameraCapturer {
|
||||
|
||||
public:
|
||||
VideoCameraCapturer(rtc::scoped_refptr<webrtc::JavaVideoTrackSourceInterface> source, std::string deviceId, std::function<void(VideoState)> stateUpdated, std::shared_ptr<PlatformContext> platformContext);
|
||||
|
||||
void setState(VideoState state);
|
||||
void setPreferredCaptureAspectRatio(float aspectRatio);
|
||||
void setUncroppedSink(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink);
|
||||
|
||||
webrtc::ScopedJavaLocalRef<jobject> GetJavaVideoCapturerObserver(JNIEnv* env);
|
||||
|
||||
private:
|
||||
rtc::scoped_refptr<webrtc::JavaVideoTrackSourceInterface> _source;
|
||||
|
||||
std::function<void(VideoState)> _stateUpdated;
|
||||
VideoState _state;
|
||||
|
||||
std::shared_ptr<PlatformContext> _platformContext;
|
||||
|
||||
float _aspectRatio;
|
||||
std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> _uncroppedSink;
|
||||
};
|
||||
|
||||
} // namespace tgcalls
|
||||
|
||||
#endif
|
||||
|
|
@ -0,0 +1,31 @@
|
|||
#include "VideoCapturerInterfaceImpl.h"
|
||||
|
||||
#include "VideoCameraCapturer.h"
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
VideoCapturerInterfaceImpl::VideoCapturerInterfaceImpl(rtc::scoped_refptr<webrtc::JavaVideoTrackSourceInterface> source, std::string deviceId, std::function<void(VideoState)> stateUpdated, std::shared_ptr<PlatformContext> platformContext) {
|
||||
_capturer = std::make_unique<VideoCameraCapturer>(source, deviceId, stateUpdated, platformContext);
|
||||
}
|
||||
|
||||
void VideoCapturerInterfaceImpl::setState(VideoState state) {
|
||||
_capturer->setState(state);
|
||||
}
|
||||
|
||||
void VideoCapturerInterfaceImpl::setPreferredCaptureAspectRatio(float aspectRatio) {
|
||||
_capturer->setPreferredCaptureAspectRatio(aspectRatio);
|
||||
}
|
||||
|
||||
void VideoCapturerInterfaceImpl::setUncroppedOutput(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink) {
|
||||
_capturer->setUncroppedSink(sink);
|
||||
}
|
||||
|
||||
int VideoCapturerInterfaceImpl::VideoCapturerInterfaceImpl::getRotation() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
void VideoCapturerInterfaceImpl::setOnFatalError(std::function<void()> error) {
|
||||
|
||||
}
|
||||
|
||||
} // namespace tgcalls
|
||||
|
|
@ -0,0 +1,32 @@
|
|||
#ifndef TGCALLS_VIDEO_CAPTURER_INTERFACE_IMPL_H
|
||||
#define TGCALLS_VIDEO_CAPTURER_INTERFACE_IMPL_H
|
||||
|
||||
#include "VideoCapturerInterface.h"
|
||||
#include "VideoCameraCapturer.h"
|
||||
#include "api/media_stream_interface.h"
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
class VideoCapturerInterfaceImpl final : public VideoCapturerInterface {
|
||||
public:
|
||||
VideoCapturerInterfaceImpl(
|
||||
rtc::scoped_refptr<webrtc::JavaVideoTrackSourceInterface> source,
|
||||
std::string deviceId,
|
||||
std::function<void(VideoState)> stateUpdated,
|
||||
std::shared_ptr<PlatformContext> platformContext
|
||||
);
|
||||
|
||||
void setState(VideoState state) override;
|
||||
void setPreferredCaptureAspectRatio(float aspectRatio) override;
|
||||
void setUncroppedOutput(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink) override;
|
||||
int getRotation() override;
|
||||
void setOnFatalError(std::function<void()> error) override;
|
||||
|
||||
private:
|
||||
std::unique_ptr<VideoCameraCapturer> _capturer;
|
||||
|
||||
};
|
||||
|
||||
} // namespace tgcalls
|
||||
|
||||
#endif
|
||||
|
|
@ -0,0 +1,39 @@
|
|||
#ifndef TGCALLS_AUDIO_DEVICE_MODULE_IOS
|
||||
#define TGCALLS_AUDIO_DEVICE_MODULE_IOS
|
||||
|
||||
#include "platform/PlatformInterface.h"
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
class AudioDeviceModuleIOS : public DefaultWrappedAudioDeviceModule {
|
||||
public:
|
||||
AudioDeviceModuleIOS(webrtc::scoped_refptr<webrtc::AudioDeviceModule> impl) :
|
||||
DefaultWrappedAudioDeviceModule(impl) {
|
||||
}
|
||||
|
||||
virtual ~AudioDeviceModuleIOS() {
|
||||
}
|
||||
|
||||
virtual int32_t StopPlayout() override {
|
||||
return 0;
|
||||
}
|
||||
|
||||
virtual int32_t StopRecording() override {
|
||||
return 0;
|
||||
}
|
||||
|
||||
virtual int32_t Terminate() override {
|
||||
return 0;
|
||||
}
|
||||
|
||||
virtual void Stop() override {
|
||||
WrappedInstance()->StopPlayout();
|
||||
WrappedInstance()->StopRecording();
|
||||
WrappedInstance()->Terminate();
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
|
@ -0,0 +1,31 @@
|
|||
|
||||
#ifndef TGCALLS_AUDIO_DEVICE_MODULE_MACOS
|
||||
#define TGCALLS_AUDIO_DEVICE_MODULE_MACOS
|
||||
|
||||
#include "platform/PlatformInterface.h"
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
class AudioDeviceModuleMacos : public DefaultWrappedAudioDeviceModule {
|
||||
public:
|
||||
AudioDeviceModuleMacos(webrtc::scoped_refptr<webrtc::AudioDeviceModule> impl) :
|
||||
DefaultWrappedAudioDeviceModule(impl) {
|
||||
}
|
||||
|
||||
virtual ~AudioDeviceModuleMacos() {
|
||||
}
|
||||
virtual int32_t SetStereoPlayout(bool enable) override {
|
||||
return WrappedInstance()->SetStereoPlayout(enable);
|
||||
}
|
||||
|
||||
|
||||
virtual void Stop() override {
|
||||
WrappedInstance()->StopPlayout();
|
||||
WrappedInstance()->StopRecording();
|
||||
WrappedInstance()->Terminate();
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
@ -0,0 +1,21 @@
|
|||
#ifndef TGCALLS_CUSTOM_EXTERNAL_CAPTURER_H
|
||||
#define TGCALLS_CUSTOM_EXTERNAL_CAPTURER_H
|
||||
#ifdef WEBRTC_IOS
|
||||
#import <Foundation/Foundation.h>
|
||||
#import <AVFoundation/AVFoundation.h>
|
||||
|
||||
#include <memory>
|
||||
#include "api/scoped_refptr.h"
|
||||
#include "api/media_stream_interface.h"
|
||||
#import "base/RTCVideoFrame.h"
|
||||
#include "Instance.h"
|
||||
|
||||
@interface CustomExternalCapturer : NSObject
|
||||
|
||||
- (instancetype)initWithSource:(webrtc::scoped_refptr<webrtc::VideoTrackSourceInterface>)source;
|
||||
|
||||
+ (void)passPixelBuffer:(CVPixelBufferRef)pixelBuffer sampleBufferReference:(CMSampleBufferRef)sampleBufferReference rotation:(RTCVideoRotation)rotation toSource:(webrtc::scoped_refptr<webrtc::VideoTrackSourceInterface>)source croppingBuffer:(std::vector<uint8_t> &)croppingBuffer;
|
||||
|
||||
@end
|
||||
#endif // WEBRTC_IOS
|
||||
#endif
|
||||
|
|
@ -0,0 +1,90 @@
|
|||
#include "CustomExternalCapturer.h"
|
||||
|
||||
#import <AVFoundation/AVFoundation.h>
|
||||
|
||||
#include "rtc_base/logging.h"
|
||||
#import "base/RTCLogging.h"
|
||||
#import "base/RTCVideoFrameBuffer.h"
|
||||
#import "TGRTCCVPixelBuffer.h"
|
||||
#import "sdk/objc/native/src/objc_video_track_source.h"
|
||||
#import "sdk/objc/native/src/objc_frame_buffer.h"
|
||||
//#import "api/video_track_source_proxy.h"
|
||||
|
||||
#import "helpers/UIDevice+RTCDevice.h"
|
||||
|
||||
#import "helpers/AVCaptureSession+DevicePosition.h"
|
||||
#import "helpers/RTCDispatcher+Private.h"
|
||||
#import "base/RTCVideoFrame.h"
|
||||
|
||||
#include "common_video/libyuv/include/webrtc_libyuv.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/logging.h"
|
||||
#include "third_party/libyuv/include/libyuv.h"
|
||||
#include "pc/video_track_source_proxy.h"
|
||||
|
||||
#include "DarwinVideoSource.h"
|
||||
|
||||
static const int64_t kNanosecondsPerSecond = 1000000000;
|
||||
|
||||
static tgcalls::DarwinVideoTrackSource *getObjCVideoSource(const webrtc::scoped_refptr<webrtc::VideoTrackSourceInterface> nativeSource) {
|
||||
webrtc::VideoTrackSourceProxy *proxy_source =
|
||||
static_cast<webrtc::VideoTrackSourceProxy *>(nativeSource.get());
|
||||
return static_cast<tgcalls::DarwinVideoTrackSource *>(proxy_source->internal());
|
||||
}
|
||||
|
||||
@interface CustomExternalCapturer () {
|
||||
webrtc::scoped_refptr<webrtc::VideoTrackSourceInterface> _source;
|
||||
}
|
||||
|
||||
@end
|
||||
|
||||
@implementation CustomExternalCapturer
|
||||
|
||||
- (instancetype)initWithSource:(webrtc::scoped_refptr<webrtc::VideoTrackSourceInterface>)source {
|
||||
self = [super init];
|
||||
if (self != nil) {
|
||||
_source = source;
|
||||
}
|
||||
return self;
|
||||
}
|
||||
|
||||
- (void)dealloc {
|
||||
}
|
||||
|
||||
+ (void)passPixelBuffer:(CVPixelBufferRef)pixelBuffer sampleBufferReference:(CMSampleBufferRef)sampleBufferReference rotation:(RTCVideoRotation)rotation toSource:(webrtc::scoped_refptr<webrtc::VideoTrackSourceInterface>)source croppingBuffer:(std::vector<uint8_t> &)croppingBuffer {
|
||||
TGRTCCVPixelBuffer *rtcPixelBuffer = [[TGRTCCVPixelBuffer alloc] initWithPixelBuffer:pixelBuffer];
|
||||
if (sampleBufferReference) {
|
||||
[rtcPixelBuffer storeSampleBufferReference:sampleBufferReference];
|
||||
}
|
||||
rtcPixelBuffer.deviceRelativeVideoRotation = -1;
|
||||
|
||||
int width = rtcPixelBuffer.width;
|
||||
int height = rtcPixelBuffer.height;
|
||||
|
||||
width -= width % 4;
|
||||
height -= height % 4;
|
||||
|
||||
if (width != rtcPixelBuffer.width || height != rtcPixelBuffer.height) {
|
||||
CVPixelBufferRef outputPixelBufferRef = NULL;
|
||||
OSType pixelFormat = CVPixelBufferGetPixelFormatType(rtcPixelBuffer.pixelBuffer);
|
||||
CVPixelBufferCreate(NULL, width, height, pixelFormat, NULL, &outputPixelBufferRef);
|
||||
if (outputPixelBufferRef) {
|
||||
int bufferSize = [rtcPixelBuffer bufferSizeForCroppingAndScalingToWidth:width height:height];
|
||||
if (croppingBuffer.size() < bufferSize) {
|
||||
croppingBuffer.resize(bufferSize);
|
||||
}
|
||||
if ([rtcPixelBuffer cropAndScaleTo:outputPixelBufferRef withTempBuffer:croppingBuffer.data()]) {
|
||||
rtcPixelBuffer = [[TGRTCCVPixelBuffer alloc] initWithPixelBuffer:outputPixelBufferRef];
|
||||
rtcPixelBuffer.deviceRelativeVideoRotation = -1;
|
||||
}
|
||||
CVPixelBufferRelease(outputPixelBufferRef);
|
||||
}
|
||||
}
|
||||
|
||||
int64_t timeStampNs = CACurrentMediaTime() * kNanosecondsPerSecond;
|
||||
RTCVideoFrame *videoFrame = [[RTCVideoFrame alloc] initWithBuffer:(id<RTCVideoFrameBuffer>)[rtcPixelBuffer toI420] rotation:rotation timeStampNs:timeStampNs];
|
||||
|
||||
getObjCVideoSource(source)->OnCapturedFrame(videoFrame);
|
||||
}
|
||||
|
||||
@end
|
||||
File diff suppressed because it is too large
Load diff
|
|
@ -0,0 +1,200 @@
|
|||
/*
|
||||
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef TGCALLS_CUSTOM_SIMULCAST_ENCODER_ADAPTER_H_
|
||||
#define TGCALLS_CUSTOM_SIMULCAST_ENCODER_ADAPTER_H_
|
||||
|
||||
#include <atomic>
|
||||
#include <list>
|
||||
#include <memory>
|
||||
#include <stack>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "api/fec_controller_override.h"
|
||||
#include "api/sequence_checker.h"
|
||||
#include "api/video_codecs/sdp_video_format.h"
|
||||
#include "api/video_codecs/video_encoder.h"
|
||||
#include "api/video_codecs/video_encoder_factory.h"
|
||||
#include "common_video/framerate_controller.h"
|
||||
#include "modules/video_coding/include/video_codec_interface.h"
|
||||
#include "rtc_base/experiments/encoder_info_settings.h"
|
||||
#include "rtc_base/system/no_unique_address.h"
|
||||
#include "rtc_base/system/rtc_export.h"
|
||||
#include "api/field_trials_view.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// CustomSimulcastEncoderAdapter implements simulcast support by creating multiple
|
||||
// webrtc::VideoEncoder instances with the given VideoEncoderFactory.
|
||||
// The object is created and destroyed on the worker thread, but all public
|
||||
// interfaces should be called from the encoder task queue.
|
||||
class RTC_EXPORT CustomSimulcastEncoderAdapter : public VideoEncoder {
|
||||
public:
|
||||
// TODO(bugs.webrtc.org/11000): Remove when downstream usage is gone.
|
||||
CustomSimulcastEncoderAdapter(VideoEncoderFactory* primarty_factory,
|
||||
const SdpVideoFormat& format);
|
||||
// `primary_factory` produces the first-choice encoders to use.
|
||||
// `fallback_factory`, if non-null, is used to create fallback encoder that
|
||||
// will be used if InitEncode() fails for the primary encoder.
|
||||
CustomSimulcastEncoderAdapter(VideoEncoderFactory* primary_factory,
|
||||
VideoEncoderFactory* hardware_factory,
|
||||
const SdpVideoFormat& format,
|
||||
const FieldTrialsView& field_trials);
|
||||
~CustomSimulcastEncoderAdapter() override;
|
||||
|
||||
// Implements VideoEncoder.
|
||||
void SetFecControllerOverride(
|
||||
FecControllerOverride* fec_controller_override) override;
|
||||
int Release() override;
|
||||
int InitEncode(const VideoCodec* codec_settings,
|
||||
const VideoEncoder::Settings& settings) override;
|
||||
int Encode(const VideoFrame& input_image,
|
||||
const std::vector<VideoFrameType>* frame_types) override;
|
||||
int RegisterEncodeCompleteCallback(EncodedImageCallback* callback) override;
|
||||
void SetRates(const RateControlParameters& parameters) override;
|
||||
void OnPacketLossRateUpdate(float packet_loss_rate) override;
|
||||
void OnRttUpdate(int64_t rtt_ms) override;
|
||||
void OnLossNotification(const LossNotification& loss_notification) override;
|
||||
|
||||
EncoderInfo GetEncoderInfo() const override;
|
||||
|
||||
private:
|
||||
class EncoderContext {
|
||||
public:
|
||||
EncoderContext(std::unique_ptr<VideoEncoder> encoder,
|
||||
bool prefer_temporal_support,
|
||||
VideoEncoder::EncoderInfo primary_info,
|
||||
VideoEncoder::EncoderInfo fallback_info);
|
||||
EncoderContext& operator=(EncoderContext&&) = delete;
|
||||
|
||||
VideoEncoder& encoder() { return *encoder_; }
|
||||
bool prefer_temporal_support() { return prefer_temporal_support_; }
|
||||
void Release();
|
||||
|
||||
const VideoEncoder::EncoderInfo& PrimaryInfo() { return primary_info_; }
|
||||
|
||||
const VideoEncoder::EncoderInfo& FallbackInfo() { return fallback_info_; }
|
||||
|
||||
private:
|
||||
std::unique_ptr<VideoEncoder> encoder_;
|
||||
bool prefer_temporal_support_;
|
||||
const VideoEncoder::EncoderInfo primary_info_;
|
||||
const VideoEncoder::EncoderInfo fallback_info_;
|
||||
};
|
||||
|
||||
class StreamContext : public EncodedImageCallback {
|
||||
public:
|
||||
StreamContext(CustomSimulcastEncoderAdapter* parent,
|
||||
std::unique_ptr<EncoderContext> encoder_context,
|
||||
std::unique_ptr<FramerateController> framerate_controller,
|
||||
int stream_idx,
|
||||
uint16_t width,
|
||||
uint16_t height,
|
||||
bool send_stream);
|
||||
StreamContext(StreamContext&& rhs);
|
||||
StreamContext& operator=(StreamContext&&) = delete;
|
||||
~StreamContext() override;
|
||||
|
||||
Result OnEncodedImage(
|
||||
const EncodedImage& encoded_image,
|
||||
const CodecSpecificInfo* codec_specific_info) override;
|
||||
void OnDroppedFrame(DropReason reason) override;
|
||||
|
||||
VideoEncoder& encoder() { return encoder_context_->encoder(); }
|
||||
const VideoEncoder& encoder() const { return encoder_context_->encoder(); }
|
||||
int stream_idx() const { return stream_idx_; }
|
||||
uint16_t width() const { return width_; }
|
||||
uint16_t height() const { return height_; }
|
||||
bool is_keyframe_needed() const {
|
||||
return !is_paused_ && is_keyframe_needed_;
|
||||
}
|
||||
void set_is_keyframe_needed() { is_keyframe_needed_ = true; }
|
||||
bool is_paused() const { return is_paused_; }
|
||||
void set_is_paused(bool is_paused) { is_paused_ = is_paused; }
|
||||
absl::optional<double> target_fps() const {
|
||||
return framerate_controller_ == nullptr
|
||||
? absl::nullopt
|
||||
: absl::optional<double>(
|
||||
framerate_controller_->GetMaxFramerate());
|
||||
}
|
||||
|
||||
std::unique_ptr<EncoderContext> ReleaseEncoderContext() &&;
|
||||
void OnKeyframe(Timestamp timestamp);
|
||||
bool ShouldDropFrame(Timestamp timestamp);
|
||||
|
||||
private:
|
||||
CustomSimulcastEncoderAdapter* const parent_;
|
||||
std::unique_ptr<EncoderContext> encoder_context_;
|
||||
std::unique_ptr<FramerateController> framerate_controller_;
|
||||
const int stream_idx_;
|
||||
const uint16_t width_;
|
||||
const uint16_t height_;
|
||||
bool is_keyframe_needed_;
|
||||
bool is_paused_;
|
||||
};
|
||||
|
||||
bool Initialized() const;
|
||||
|
||||
void DestroyStoredEncoders();
|
||||
|
||||
// This method creates encoder. May reuse previously created encoders from
|
||||
// `cached_encoder_contexts_`. It's const because it's used from
|
||||
// const GetEncoderInfo().
|
||||
std::unique_ptr<EncoderContext> FetchOrCreateEncoderContext(
|
||||
bool is_lowest_quality_stream, bool is_highest_quality_stream) const;
|
||||
|
||||
webrtc::VideoCodec MakeStreamCodec(const webrtc::VideoCodec& codec,
|
||||
int stream_idx,
|
||||
uint32_t start_bitrate_kbps,
|
||||
bool is_lowest_quality_stream,
|
||||
bool is_highest_quality_stream);
|
||||
|
||||
EncodedImageCallback::Result OnEncodedImage(
|
||||
size_t stream_idx,
|
||||
const EncodedImage& encoded_image,
|
||||
const CodecSpecificInfo* codec_specific_info);
|
||||
|
||||
void OnDroppedFrame(size_t stream_idx);
|
||||
|
||||
void OverrideFromFieldTrial(VideoEncoder::EncoderInfo* info) const;
|
||||
|
||||
std::atomic<int> inited_;
|
||||
VideoEncoderFactory* const primary_encoder_factory_;
|
||||
VideoEncoderFactory* const hardware_encoder_factory_;
|
||||
const SdpVideoFormat video_format_;
|
||||
VideoCodec codec_;
|
||||
int total_streams_count_;
|
||||
bool bypass_mode_;
|
||||
std::vector<StreamContext> stream_contexts_;
|
||||
EncodedImageCallback* encoded_complete_callback_;
|
||||
|
||||
// Used for checking the single-threaded access of the encoder interface.
|
||||
RTC_NO_UNIQUE_ADDRESS SequenceChecker encoder_queue_;
|
||||
|
||||
// Store previously created and released encoders , so they don't have to be
|
||||
// recreated. Remaining encoders are destroyed by the destructor.
|
||||
// Marked as `mutable` becuase we may need to temporarily create encoder in
|
||||
// GetEncoderInfo(), which is const.
|
||||
mutable std::list<std::unique_ptr<EncoderContext>> cached_encoder_contexts_;
|
||||
|
||||
const absl::optional<unsigned int> experimental_boosted_screenshare_qp_;
|
||||
const bool boost_base_layer_quality_;
|
||||
const bool prefer_temporal_support_on_base_layer_;
|
||||
|
||||
const SimulcastEncoderAdapterEncoderInfoSettings encoder_info_override_;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // TGCALLS_CUSTOM_SIMULCAST_ENCODER_ADAPTER_H_
|
||||
|
|
@ -0,0 +1,13 @@
|
|||
#ifndef TGCALLS_DARWIN_FFMPEG_H
|
||||
#define TGCALLS_DARWIN_FFMPEG_H
|
||||
|
||||
#include "platform/PlatformInterface.h"
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
void setupDarwinVideoDecoding(AVCodecContext *codecContext);
|
||||
webrtc::scoped_refptr<webrtc::VideoFrameBuffer> createDarwinPlatformFrameFromData(AVFrame const *frame);
|
||||
|
||||
} // namespace tgcalls
|
||||
|
||||
#endif
|
||||
|
|
@ -0,0 +1,41 @@
|
|||
#include "DarwinFFMpeg.h"
|
||||
|
||||
extern "C" {
|
||||
#include <libavutil/frame.h>
|
||||
#include <libavutil/pixfmt.h>
|
||||
#include <libavcodec/avcodec.h>
|
||||
}
|
||||
|
||||
#import "ExtractCVPixelBuffer.h"
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
static enum AVPixelFormat getDarwinPreferredPixelFormat(__unused AVCodecContext *ctx, __unused const enum AVPixelFormat *pix_fmts) {
|
||||
return AV_PIX_FMT_VIDEOTOOLBOX;
|
||||
}
|
||||
|
||||
void setupDarwinVideoDecoding(AVCodecContext *codecContext) {
|
||||
return;
|
||||
|
||||
#if TARGET_IPHONE_SIMULATOR
|
||||
#else
|
||||
if (!codecContext) {
|
||||
return;
|
||||
}
|
||||
av_hwdevice_ctx_create(&codecContext->hw_device_ctx, AV_HWDEVICE_TYPE_VIDEOTOOLBOX, nullptr, nullptr, 0);
|
||||
codecContext->get_format = getDarwinPreferredPixelFormat;
|
||||
#endif
|
||||
}
|
||||
|
||||
webrtc::scoped_refptr<webrtc::VideoFrameBuffer> createDarwinPlatformFrameFromData(AVFrame const *frame) {
|
||||
if (!frame) {
|
||||
return nullptr;
|
||||
}
|
||||
if (frame->format == AV_PIX_FMT_VIDEOTOOLBOX && frame->data[3]) {
|
||||
return extractCVPixelBuffer((void *)frame->data[3]);
|
||||
} else {
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,40 @@
|
|||
#ifndef TGCALLS_DARWIN_INTERFACE_H
|
||||
#define TGCALLS_DARWIN_INTERFACE_H
|
||||
|
||||
#include "platform/PlatformInterface.h"
|
||||
|
||||
#import <CoreVideo/CoreVideo.h>
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
class DarwinVideoFrame : public PlatformVideoFrame {
|
||||
public:
|
||||
DarwinVideoFrame(CVPixelBufferRef pixelBuffer);
|
||||
virtual ~DarwinVideoFrame();
|
||||
|
||||
CVPixelBufferRef pixelBuffer() const {
|
||||
return _pixelBuffer;
|
||||
}
|
||||
|
||||
private:
|
||||
CVPixelBufferRef _pixelBuffer = nullptr;
|
||||
};
|
||||
|
||||
class DarwinInterface : public PlatformInterface {
|
||||
public:
|
||||
std::unique_ptr<rtc::NetworkMonitorFactory> createNetworkMonitorFactory() override;
|
||||
void configurePlatformAudio(int numChannels) override;
|
||||
std::unique_ptr<webrtc::VideoEncoderFactory> makeVideoEncoderFactory(bool preferHardwareEncoding, bool isScreencast) override;
|
||||
std::unique_ptr<webrtc::VideoDecoderFactory> makeVideoDecoderFactory() override;
|
||||
bool supportsEncoding(const std::string &codecName) override;
|
||||
webrtc::scoped_refptr<webrtc::VideoTrackSourceInterface> makeVideoSource(rtc::Thread *signalingThread, rtc::Thread *workerThread) override;
|
||||
virtual void adaptVideoSource(webrtc::scoped_refptr<webrtc::VideoTrackSourceInterface> videoSource, int width, int height, int fps) override;
|
||||
std::unique_ptr<VideoCapturerInterface> makeVideoCapturer(webrtc::scoped_refptr<webrtc::VideoTrackSourceInterface> source, std::string deviceId, std::function<void(VideoState)> stateUpdated, std::function<void(PlatformCaptureInfo)> captureInfoUpdated, std::shared_ptr<PlatformContext> platformContext, std::pair<int, int> &outResolution) override;
|
||||
virtual webrtc::scoped_refptr<WrappedAudioDeviceModule> wrapAudioDeviceModule(webrtc::scoped_refptr<webrtc::AudioDeviceModule> module) override;
|
||||
virtual void setupVideoDecoding(AVCodecContext *codecContext) override;
|
||||
virtual webrtc::scoped_refptr<webrtc::VideoFrameBuffer> createPlatformFrameFromData(AVFrame const *frame) override;
|
||||
};
|
||||
|
||||
} // namespace tgcalls
|
||||
|
||||
#endif
|
||||
|
|
@ -0,0 +1,158 @@
|
|||
#include "DarwinInterface.h"
|
||||
|
||||
#include "VideoCapturerInterfaceImpl.h"
|
||||
#include "sdk/objc/native/src/objc_video_track_source.h"
|
||||
#include "sdk/objc/native/api/network_monitor_factory.h"
|
||||
|
||||
#include "media/base/media_constants.h"
|
||||
#include "TGRTCDefaultVideoEncoderFactory.h"
|
||||
#include "TGRTCDefaultVideoDecoderFactory.h"
|
||||
#include "sdk/objc/native/api/video_encoder_factory.h"
|
||||
#include "sdk/objc/native/api/video_decoder_factory.h"
|
||||
#include "pc/video_track_source_proxy.h"
|
||||
#import "base/RTCLogging.h"
|
||||
#include "AudioDeviceModuleIOS.h"
|
||||
#include "AudioDeviceModuleMacos.h"
|
||||
#include "DarwinVideoSource.h"
|
||||
#include "objc_video_encoder_factory.h"
|
||||
#include "objc_video_decoder_factory.h"
|
||||
#include "sdk/objc/native/src/objc_frame_buffer.h"
|
||||
#import "sdk/objc/components/video_frame_buffer/RTCCVPixelBuffer.h"
|
||||
|
||||
#import "DarwinFFMpeg.h"
|
||||
|
||||
#ifdef WEBRTC_IOS
|
||||
#include "platform/darwin/iOS/RTCAudioSession.h"
|
||||
#include "platform/darwin/iOS/RTCAudioSessionConfiguration.h"
|
||||
#import <UIKit/UIKit.h>
|
||||
#endif // WEBRTC_IOS
|
||||
|
||||
#import <AVFoundation/AVFoundation.h>
|
||||
|
||||
#include <sys/sysctl.h>
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
std::unique_ptr<webrtc::VideoDecoderFactory> CustomObjCToNativeVideoDecoderFactory(
|
||||
id<RTC_OBJC_TYPE(RTCVideoDecoderFactory)> objc_video_decoder_factory) {
|
||||
return std::make_unique<webrtc::CustomObjCVideoDecoderFactory>(objc_video_decoder_factory);
|
||||
}
|
||||
|
||||
static DarwinVideoTrackSource *getObjCVideoSource(const webrtc::scoped_refptr<webrtc::VideoTrackSourceInterface> nativeSource) {
|
||||
webrtc::VideoTrackSourceProxy *proxy_source =
|
||||
static_cast<webrtc::VideoTrackSourceProxy *>(nativeSource.get());
|
||||
return static_cast<DarwinVideoTrackSource *>(proxy_source->internal());
|
||||
}
|
||||
|
||||
[[maybe_unused]] static NSString *getPlatformInfo() {
|
||||
const char *typeSpecifier = "hw.machine";
|
||||
|
||||
size_t size;
|
||||
sysctlbyname(typeSpecifier, NULL, &size, NULL, 0);
|
||||
|
||||
char *answer = (char *)malloc(size);
|
||||
sysctlbyname(typeSpecifier, answer, &size, NULL, 0);
|
||||
|
||||
NSString *results = [NSString stringWithCString:answer encoding:NSUTF8StringEncoding];
|
||||
|
||||
free(answer);
|
||||
return results;
|
||||
}
|
||||
|
||||
std::unique_ptr<rtc::NetworkMonitorFactory> DarwinInterface::createNetworkMonitorFactory() {
|
||||
return webrtc::CreateNetworkMonitorFactory();
|
||||
}
|
||||
|
||||
void DarwinInterface::configurePlatformAudio(int numChanels) {
|
||||
}
|
||||
|
||||
std::unique_ptr<webrtc::VideoEncoderFactory> DarwinInterface::makeVideoEncoderFactory(bool preferHardwareEncoding, bool isScreencast) {
|
||||
auto nativeFactory = std::make_unique<webrtc::CustomObjCVideoEncoderFactory>([[TGRTCDefaultVideoEncoderFactory alloc] initWithPreferHardwareH264:preferHardwareEncoding preferX264:false]);
|
||||
if (!preferHardwareEncoding) {
|
||||
auto nativeHardwareFactory = std::make_unique<webrtc::CustomObjCVideoEncoderFactory>([[TGRTCDefaultVideoEncoderFactory alloc] initWithPreferHardwareH264:true preferX264:false]);
|
||||
return std::make_unique<webrtc::SimulcastVideoEncoderFactory>(std::move(nativeFactory), std::move(nativeHardwareFactory));
|
||||
}
|
||||
return nativeFactory;
|
||||
}
|
||||
|
||||
std::unique_ptr<webrtc::VideoDecoderFactory> DarwinInterface::makeVideoDecoderFactory() {
|
||||
return CustomObjCToNativeVideoDecoderFactory([[TGRTCDefaultVideoDecoderFactory alloc] init]);
|
||||
}
|
||||
|
||||
bool DarwinInterface::supportsEncoding(const std::string &codecName) {
|
||||
if (false) {
|
||||
}
|
||||
#ifndef WEBRTC_DISABLE_H265
|
||||
else if (codecName == cricket::kH265CodecName) {
|
||||
#ifdef WEBRTC_IOS
|
||||
if (@available(iOS 11.0, *)) {
|
||||
return [[AVAssetExportSession allExportPresets] containsObject:AVAssetExportPresetHEVCHighestQuality];
|
||||
}
|
||||
#elif defined WEBRTC_MAC // WEBRTC_IOS
|
||||
|
||||
#ifdef __x86_64__
|
||||
return NO;
|
||||
#else
|
||||
return YES;
|
||||
#endif
|
||||
#endif // WEBRTC_IOS || WEBRTC_MAC
|
||||
}
|
||||
#endif
|
||||
else if (codecName == cricket::kH264CodecName) {
|
||||
#ifdef __x86_64__
|
||||
return YES;
|
||||
#else
|
||||
return NO;
|
||||
#endif
|
||||
} else if (codecName == cricket::kVp8CodecName) {
|
||||
return true;
|
||||
} else if (codecName == cricket::kVp9CodecName) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
webrtc::scoped_refptr<webrtc::VideoTrackSourceInterface> DarwinInterface::makeVideoSource(rtc::Thread *signalingThread, rtc::Thread *workerThread) {
|
||||
webrtc::scoped_refptr<tgcalls::DarwinVideoTrackSource> objCVideoTrackSource(new rtc::RefCountedObject<tgcalls::DarwinVideoTrackSource>());
|
||||
return webrtc::VideoTrackSourceProxy::Create(signalingThread, workerThread, objCVideoTrackSource);
|
||||
}
|
||||
|
||||
void DarwinInterface::adaptVideoSource(webrtc::scoped_refptr<webrtc::VideoTrackSourceInterface> videoSource, int width, int height, int fps) {
|
||||
getObjCVideoSource(videoSource)->OnOutputFormatRequest(width, height, fps);
|
||||
}
|
||||
|
||||
std::unique_ptr<VideoCapturerInterface> DarwinInterface::makeVideoCapturer(webrtc::scoped_refptr<webrtc::VideoTrackSourceInterface> source, std::string deviceId, std::function<void(VideoState)> stateUpdated, std::function<void(PlatformCaptureInfo)> captureInfoUpdated, std::shared_ptr<PlatformContext> platformContext, std::pair<int, int> &outResolution) {
|
||||
return std::make_unique<VideoCapturerInterfaceImpl>(source, deviceId, stateUpdated, captureInfoUpdated, outResolution);
|
||||
}
|
||||
|
||||
webrtc::scoped_refptr<WrappedAudioDeviceModule> DarwinInterface::wrapAudioDeviceModule(webrtc::scoped_refptr<webrtc::AudioDeviceModule> module) {
|
||||
#ifdef WEBRTC_IOS
|
||||
return rtc::make_ref_counted<AudioDeviceModuleIOS>(module);
|
||||
#else
|
||||
return rtc::make_ref_counted<AudioDeviceModuleMacos>(module);
|
||||
#endif
|
||||
}
|
||||
|
||||
void DarwinInterface::setupVideoDecoding(AVCodecContext *codecContext) {
|
||||
return setupDarwinVideoDecoding(codecContext);
|
||||
}
|
||||
|
||||
webrtc::scoped_refptr<webrtc::VideoFrameBuffer> DarwinInterface::createPlatformFrameFromData(AVFrame const *frame) {
|
||||
return createDarwinPlatformFrameFromData(frame);
|
||||
}
|
||||
|
||||
std::unique_ptr<PlatformInterface> CreatePlatformInterface() {
|
||||
return std::make_unique<DarwinInterface>();
|
||||
}
|
||||
|
||||
DarwinVideoFrame::DarwinVideoFrame(CVPixelBufferRef pixelBuffer) {
|
||||
_pixelBuffer = CVPixelBufferRetain(pixelBuffer);
|
||||
}
|
||||
|
||||
DarwinVideoFrame::~DarwinVideoFrame() {
|
||||
if (_pixelBuffer) {
|
||||
CVPixelBufferRelease(_pixelBuffer);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace tgcalls
|
||||
|
|
@ -0,0 +1,57 @@
|
|||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef TGCALLS_DARWIN_VIDEO_SOURCE_H
|
||||
#define TGCALLS_DARWIN_VIDEO_SOURCE_H
|
||||
//#ifdef WEBRTC_IOS
|
||||
|
||||
#import "base/RTCVideoCapturer.h"
|
||||
|
||||
#include "base/RTCMacros.h"
|
||||
#include "media/base/adapted_video_track_source.h"
|
||||
#include "rtc_base/timestamp_aligner.h"
|
||||
|
||||
RTC_FWD_DECL_OBJC_CLASS(RTC_OBJC_TYPE(RTCVideoFrame));
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
class DarwinVideoTrackSource : public rtc::AdaptedVideoTrackSource {
|
||||
public:
|
||||
DarwinVideoTrackSource();
|
||||
|
||||
// This class can not be used for implementing screen casting. Hopefully, this
|
||||
// function will be removed before we add that to iOS/Mac.
|
||||
bool is_screencast() const override;
|
||||
|
||||
// Indicates that the encoder should denoise video before encoding it.
|
||||
// If it is not set, the default configuration is used which is different
|
||||
// depending on video codec.
|
||||
absl::optional<bool> needs_denoising() const override;
|
||||
|
||||
SourceState state() const override;
|
||||
|
||||
bool remote() const override;
|
||||
|
||||
void OnCapturedFrame(RTC_OBJC_TYPE(RTCVideoFrame) * frame);
|
||||
bool OnCapturedFrame(const webrtc::VideoFrame& frame);
|
||||
|
||||
// Called by RTCVideoSource.
|
||||
void OnOutputFormatRequest(int width, int height, int fps);
|
||||
|
||||
private:
|
||||
rtc::VideoBroadcaster broadcaster_;
|
||||
rtc::TimestampAligner timestamp_aligner_;
|
||||
|
||||
};
|
||||
|
||||
} // namespace tgcalls
|
||||
|
||||
//#endif //WEBRTC_IOS
|
||||
#endif
|
||||
|
|
@ -0,0 +1,165 @@
|
|||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "DarwinVideoSource.h"
|
||||
|
||||
#import "base/RTCVideoFrame.h"
|
||||
#import "base/RTCVideoFrameBuffer.h"
|
||||
#import "components/video_frame_buffer/RTCCVPixelBuffer.h"
|
||||
|
||||
#include "api/video/i420_buffer.h"
|
||||
#include "sdk/objc/native/src/objc_frame_buffer.h"
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
DarwinVideoTrackSource::DarwinVideoTrackSource()
|
||||
: AdaptedVideoTrackSource(/* required resolution alignment */ 2) {}
|
||||
|
||||
bool DarwinVideoTrackSource::is_screencast() const {
|
||||
return false;
|
||||
}
|
||||
|
||||
absl::optional<bool> DarwinVideoTrackSource::needs_denoising() const {
|
||||
return false;
|
||||
}
|
||||
|
||||
webrtc::MediaSourceInterface::SourceState DarwinVideoTrackSource::state() const {
|
||||
return SourceState::kLive;
|
||||
}
|
||||
|
||||
bool DarwinVideoTrackSource::remote() const {
|
||||
return false;
|
||||
}
|
||||
|
||||
void DarwinVideoTrackSource::OnOutputFormatRequest(int width, int height, int fps) {
|
||||
cricket::VideoFormat format(width, height, cricket::VideoFormat::FpsToInterval(fps), 0);
|
||||
video_adapter()->OnOutputFormatRequest(format);
|
||||
}
|
||||
|
||||
void DarwinVideoTrackSource::OnCapturedFrame(RTC_OBJC_TYPE(RTCVideoFrame) * frame) {
|
||||
const int64_t timestamp_us = frame.timeStampNs / rtc::kNumNanosecsPerMicrosec;
|
||||
const int64_t translated_timestamp_us =
|
||||
timestamp_aligner_.TranslateTimestamp(timestamp_us, rtc::TimeMicros());
|
||||
|
||||
int adapted_width;
|
||||
int adapted_height;
|
||||
int crop_width;
|
||||
int crop_height;
|
||||
int crop_x;
|
||||
int crop_y;
|
||||
if (!AdaptFrame(frame.width,
|
||||
frame.height,
|
||||
timestamp_us,
|
||||
&adapted_width,
|
||||
&adapted_height,
|
||||
&crop_width,
|
||||
&crop_height,
|
||||
&crop_x,
|
||||
&crop_y)) {
|
||||
return;
|
||||
}
|
||||
|
||||
webrtc::scoped_refptr<webrtc::VideoFrameBuffer> buffer;
|
||||
if (adapted_width == frame.width && adapted_height == frame.height) {
|
||||
// No adaption - optimized path.
|
||||
@autoreleasepool {
|
||||
buffer = new rtc::RefCountedObject<webrtc::ObjCFrameBuffer>(frame.buffer);
|
||||
}
|
||||
} else if ([frame.buffer isKindOfClass:[RTC_OBJC_TYPE(RTCCVPixelBuffer) class]]) {
|
||||
// Adapted CVPixelBuffer frame.
|
||||
@autoreleasepool {
|
||||
RTC_OBJC_TYPE(RTCCVPixelBuffer) *rtcPixelBuffer =
|
||||
(RTC_OBJC_TYPE(RTCCVPixelBuffer) *)frame.buffer;
|
||||
buffer = new rtc::RefCountedObject<webrtc::ObjCFrameBuffer>([[RTC_OBJC_TYPE(RTCCVPixelBuffer) alloc]
|
||||
initWithPixelBuffer:rtcPixelBuffer.pixelBuffer
|
||||
adaptedWidth:adapted_width
|
||||
adaptedHeight:adapted_height
|
||||
cropWidth:crop_width
|
||||
cropHeight:crop_height
|
||||
cropX:crop_x + rtcPixelBuffer.cropX
|
||||
cropY:crop_y + rtcPixelBuffer.cropY]);
|
||||
}
|
||||
} else {
|
||||
@autoreleasepool {
|
||||
// Adapted I420 frame.
|
||||
// TODO(magjed): Optimize this I420 path.
|
||||
webrtc::scoped_refptr<webrtc::I420Buffer> i420_buffer = webrtc::I420Buffer::Create(adapted_width, adapted_height);
|
||||
buffer = new rtc::RefCountedObject<webrtc::ObjCFrameBuffer>(frame.buffer);
|
||||
i420_buffer->CropAndScaleFrom(*buffer->ToI420(), crop_x, crop_y, crop_width, crop_height);
|
||||
buffer = i420_buffer;
|
||||
}
|
||||
}
|
||||
|
||||
// Applying rotation is only supported for legacy reasons and performance is
|
||||
// not critical here.
|
||||
webrtc::VideoRotation rotation = static_cast<webrtc::VideoRotation>(frame.rotation);
|
||||
if (apply_rotation() && rotation != webrtc::kVideoRotation_0) {
|
||||
buffer = webrtc::I420Buffer::Rotate(*buffer->ToI420(), rotation);
|
||||
rotation = webrtc::kVideoRotation_0;
|
||||
}
|
||||
|
||||
OnFrame(webrtc::VideoFrame::Builder()
|
||||
.set_video_frame_buffer(buffer)
|
||||
.set_rotation(rotation)
|
||||
.set_timestamp_us(translated_timestamp_us)
|
||||
.build());
|
||||
}
|
||||
|
||||
bool DarwinVideoTrackSource::OnCapturedFrame(const webrtc::VideoFrame& frame) {
|
||||
const int64_t timestamp_us = frame.timestamp_us() / rtc::kNumNanosecsPerMicrosec;
|
||||
const int64_t translated_timestamp_us =
|
||||
timestamp_aligner_.TranslateTimestamp(timestamp_us, rtc::TimeMicros());
|
||||
|
||||
int adapted_width;
|
||||
int adapted_height;
|
||||
int crop_width;
|
||||
int crop_height;
|
||||
int crop_x;
|
||||
int crop_y;
|
||||
if (!AdaptFrame(frame.width(),
|
||||
frame.height(),
|
||||
timestamp_us,
|
||||
&adapted_width,
|
||||
&adapted_height,
|
||||
&crop_width,
|
||||
&crop_height,
|
||||
&crop_x,
|
||||
&crop_y)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
webrtc::scoped_refptr<webrtc::VideoFrameBuffer> buffer;
|
||||
if (adapted_width == frame.width() && adapted_height == frame.height()) {
|
||||
buffer = frame.video_frame_buffer();
|
||||
} else {
|
||||
webrtc::scoped_refptr<webrtc::I420Buffer> i420_buffer = webrtc::I420Buffer::Create(adapted_width, adapted_height);
|
||||
buffer = frame.video_frame_buffer();
|
||||
i420_buffer->CropAndScaleFrom(*buffer->ToI420(), crop_x, crop_y, crop_width, crop_height);
|
||||
buffer = i420_buffer;
|
||||
}
|
||||
|
||||
// Applying rotation is only supported for legacy reasons and performance is
|
||||
// not critical here.
|
||||
webrtc::VideoRotation rotation = frame.rotation();
|
||||
if (apply_rotation() && rotation != webrtc::kVideoRotation_0) {
|
||||
buffer = webrtc::I420Buffer::Rotate(*buffer->ToI420(), rotation);
|
||||
rotation = webrtc::kVideoRotation_0;
|
||||
}
|
||||
|
||||
OnFrame(webrtc::VideoFrame::Builder()
|
||||
.set_video_frame_buffer(buffer)
|
||||
.set_rotation(rotation)
|
||||
.set_timestamp_us(translated_timestamp_us)
|
||||
.build());
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,55 @@
|
|||
//
|
||||
// DesktopCaptureSourceView.h
|
||||
// TgVoipWebrtc
|
||||
//
|
||||
// Created by Mikhail Filimonov on 28.12.2020.
|
||||
// Copyright © 2020 Mikhail Filimonov. All rights reserved.
|
||||
//
|
||||
|
||||
#import <Foundation/Foundation.h>
|
||||
#import <AppKit/AppKit.h>
|
||||
|
||||
|
||||
NS_ASSUME_NONNULL_BEGIN
|
||||
|
||||
@protocol VideoSourceMac
|
||||
-(NSString *)deviceIdKey;
|
||||
-(NSString *)title;
|
||||
-(NSString *)uniqueKey;
|
||||
-(BOOL)isEqual:(id)another;
|
||||
@end
|
||||
|
||||
@interface DesktopCaptureSourceDataMac : NSObject
|
||||
@property CGSize aspectSize;
|
||||
@property double fps;
|
||||
@property bool captureMouse;
|
||||
-(id)initWithSize:(CGSize)size fps:(double)fps captureMouse:(bool)captureMouse;
|
||||
|
||||
-(NSString *)cachedKey;
|
||||
@end
|
||||
|
||||
@interface DesktopCaptureSourceMac : NSObject <VideoSourceMac>
|
||||
-(long)uniqueId;
|
||||
-(BOOL)isWindow;
|
||||
@end
|
||||
|
||||
@interface DesktopCaptureSourceScopeMac : NSObject
|
||||
@property(nonatomic, strong, readonly) DesktopCaptureSourceDataMac *data;
|
||||
@property(nonatomic, strong, readonly) DesktopCaptureSourceMac *source;
|
||||
-(id)initWithSource:(DesktopCaptureSourceMac *)source data:(DesktopCaptureSourceDataMac *)data;
|
||||
-(NSString *)cachedKey;
|
||||
@end
|
||||
|
||||
@interface DesktopCaptureSourceManagerMac : NSObject
|
||||
|
||||
-(instancetype)init_s;
|
||||
-(instancetype)init_w;
|
||||
-(NSArray<DesktopCaptureSourceMac *> *)list;
|
||||
|
||||
-(NSView *)createForScope:(DesktopCaptureSourceScopeMac *)scope;
|
||||
-(void)start:(DesktopCaptureSourceScopeMac *)scope;
|
||||
-(void)stop:(DesktopCaptureSourceScopeMac *)scope;
|
||||
|
||||
@end
|
||||
|
||||
NS_ASSUME_NONNULL_END
|
||||
|
|
@ -0,0 +1,207 @@
|
|||
//
|
||||
// DesktopCaptureSourceView.m
|
||||
// TgVoipWebrtc
|
||||
//
|
||||
// Created by Mikhail Filimonov on 28.12.2020.
|
||||
// Copyright © 2020 Mikhail Filimonov. All rights reserved.
|
||||
//
|
||||
#import <Cocoa/Cocoa.h>
|
||||
#import "DesktopCaptureSourceViewMac.h"
|
||||
#import "platform/darwin/VideoMetalViewMac.h"
|
||||
#import "tgcalls/desktop_capturer/DesktopCaptureSource.h"
|
||||
#import "tgcalls/desktop_capturer/DesktopCaptureSourceHelper.h"
|
||||
#import "tgcalls/desktop_capturer/DesktopCaptureSourceManager.h"
|
||||
#import "platform/darwin/VideoMetalViewMac.h"
|
||||
|
||||
|
||||
|
||||
@interface DesktopCaptureSourceViewMetal : VideoMetalView
|
||||
@end
|
||||
|
||||
|
||||
@implementation DesktopCaptureSourceViewMetal
|
||||
|
||||
-(id)initWithHelper:(tgcalls::DesktopCaptureSourceHelper)helper {
|
||||
if (self = [super initWithFrame:CGRectZero]) {
|
||||
std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink = [self getSink];
|
||||
helper.setOutput(sink);
|
||||
[self setVideoContentMode:kCAGravityResizeAspectFill];
|
||||
}
|
||||
return self;
|
||||
}
|
||||
@end
|
||||
|
||||
|
||||
@implementation DesktopCaptureSourceDataMac
|
||||
-(id)initWithSize:(CGSize)size fps:(double)fps captureMouse:(bool)captureMouse {
|
||||
if (self = [super init]) {
|
||||
self.aspectSize = size;
|
||||
self.fps = fps;
|
||||
self.captureMouse = captureMouse;
|
||||
}
|
||||
return self;
|
||||
}
|
||||
|
||||
-(NSString *)cachedKey {
|
||||
return [[NSString alloc] initWithFormat:@"%@:%f:%d", NSStringFromSize(self.aspectSize), self.fps, self.captureMouse];
|
||||
}
|
||||
@end
|
||||
|
||||
@interface DesktopCaptureSourceMac ()
|
||||
{
|
||||
absl::optional<tgcalls::DesktopCaptureSource> _source;
|
||||
BOOL _isWindow;
|
||||
}
|
||||
-(id)initWithSource:(tgcalls::DesktopCaptureSource)source isWindow:(BOOL)isWindow;
|
||||
-(tgcalls::DesktopCaptureSource)getSource;
|
||||
|
||||
@end
|
||||
|
||||
@implementation DesktopCaptureSourceMac
|
||||
|
||||
-(tgcalls::DesktopCaptureSource)getSource {
|
||||
return _source.value();
|
||||
}
|
||||
|
||||
-(NSString *)title {
|
||||
if (_isWindow) {
|
||||
const tgcalls::DesktopCaptureSource source = _source.value();
|
||||
return [[NSString alloc] initWithCString:source.title().c_str() encoding:NSUTF8StringEncoding];
|
||||
}
|
||||
else
|
||||
return [[NSString alloc] initWithFormat:@"Screen"];
|
||||
}
|
||||
|
||||
-(long)uniqueId {
|
||||
return _source.value().uniqueId();
|
||||
}
|
||||
-(BOOL)isWindow {
|
||||
return _isWindow;
|
||||
}
|
||||
-(NSString *)uniqueKey {
|
||||
return [[NSString alloc] initWithFormat:@"%ld:%@", self.uniqueId, _isWindow ? @"Window" : @"Screen"];
|
||||
}
|
||||
|
||||
-(NSString *)deviceIdKey {
|
||||
return [[NSString alloc] initWithFormat:@"desktop_capturer_%@_%ld", _isWindow ? @"window" : @"screen", self.uniqueId];
|
||||
}
|
||||
|
||||
-(BOOL)isEqual:(id)object {
|
||||
return [[((DesktopCaptureSourceMac *)object) uniqueKey] isEqualToString:[self uniqueKey]];
|
||||
}
|
||||
- (BOOL)isEqualTo:(id)object {
|
||||
return [[((DesktopCaptureSourceMac *)object) uniqueKey] isEqualToString:[self uniqueKey]];
|
||||
}
|
||||
|
||||
-(id)initWithSource:(tgcalls::DesktopCaptureSource)source isWindow:(BOOL)isWindow {
|
||||
if (self = [super init]) {
|
||||
_source = source;
|
||||
_isWindow = isWindow;
|
||||
}
|
||||
return self;
|
||||
}
|
||||
|
||||
@end
|
||||
|
||||
|
||||
@interface DesktopCaptureSourceScopeMac ()
|
||||
-(tgcalls::DesktopCaptureSourceData)getData;
|
||||
-(tgcalls::DesktopCaptureSource)getSource;
|
||||
@end
|
||||
|
||||
@implementation DesktopCaptureSourceScopeMac
|
||||
|
||||
-(id)initWithSource:(DesktopCaptureSourceMac *)source data:(DesktopCaptureSourceDataMac *)data {
|
||||
if (self = [super init]) {
|
||||
_data = data;
|
||||
_source = source;
|
||||
}
|
||||
return self;
|
||||
}
|
||||
|
||||
-(NSString *)cachedKey {
|
||||
return [[NSString alloc] initWithFormat:@"%@:%@", _source.uniqueKey, _data.cachedKey];
|
||||
}
|
||||
|
||||
-(tgcalls::DesktopCaptureSourceData)getData {
|
||||
tgcalls::DesktopCaptureSourceData data{
|
||||
/*.aspectSize = */{ (int)_data.aspectSize.width, (int)_data.aspectSize.height},
|
||||
/*.fps = */_data.fps,
|
||||
/*.captureMouse = */_data.captureMouse,
|
||||
};
|
||||
return data;
|
||||
}
|
||||
-(tgcalls::DesktopCaptureSource)getSource {
|
||||
return [_source getSource];
|
||||
}
|
||||
|
||||
@end
|
||||
|
||||
@implementation DesktopCaptureSourceManagerMac
|
||||
{
|
||||
std::map<std::string, tgcalls::DesktopCaptureSourceHelper> _cached;
|
||||
std::unique_ptr<tgcalls::DesktopCaptureSourceManager> _manager;
|
||||
BOOL _isWindow;
|
||||
}
|
||||
|
||||
-(instancetype)init_s {
|
||||
if (self = [super init]) {
|
||||
_manager = std::make_unique<tgcalls::DesktopCaptureSourceManager>(tgcalls::DesktopCaptureType::Screen);
|
||||
_isWindow = NO;
|
||||
}
|
||||
return self;
|
||||
}
|
||||
-(instancetype)init_w {
|
||||
if (self = [super init]) {
|
||||
_manager = std::make_unique<tgcalls::DesktopCaptureSourceManager>(tgcalls::DesktopCaptureType::Window);
|
||||
_isWindow = YES;
|
||||
}
|
||||
return self;
|
||||
}
|
||||
|
||||
-(NSArray<DesktopCaptureSourceMac *> *)list {
|
||||
std::vector<tgcalls::DesktopCaptureSource> sources = _manager->sources();
|
||||
NSMutableArray<DesktopCaptureSourceMac *> *macSources = [[NSMutableArray alloc] init];
|
||||
for (auto i = sources.begin(); i != sources.end(); ++i) {
|
||||
[macSources addObject:[[DesktopCaptureSourceMac alloc] initWithSource:*i isWindow:_isWindow]];
|
||||
}
|
||||
return macSources;
|
||||
}
|
||||
|
||||
-(NSView *)createForScope:(DesktopCaptureSourceScopeMac*)scope {
|
||||
auto i = _cached.find(std::string([scope.cachedKey UTF8String]));
|
||||
if (i == _cached.end()) {
|
||||
i = _cached.emplace(
|
||||
std::string([scope.cachedKey UTF8String]),
|
||||
tgcalls::DesktopCaptureSourceHelper([scope getSource], [scope getData])).first;
|
||||
}
|
||||
|
||||
DesktopCaptureSourceViewMetal *view = [[DesktopCaptureSourceViewMetal alloc] initWithHelper:i->second];
|
||||
if (scope.data.captureMouse) {
|
||||
[view setVideoContentMode:kCAGravityResizeAspect];
|
||||
}
|
||||
return view;
|
||||
}
|
||||
|
||||
-(void)start:(DesktopCaptureSourceScopeMac *)scope {
|
||||
const auto i = _cached.find(std::string([scope.cachedKey UTF8String]));
|
||||
if (i != _cached.end()) {
|
||||
i->second.start();
|
||||
}
|
||||
}
|
||||
|
||||
-(void)stop:(DesktopCaptureSourceScopeMac *)scope {
|
||||
const auto i = _cached.find(std::string([scope.cachedKey UTF8String]));
|
||||
if (i != _cached.end()) {
|
||||
i->second.stop();
|
||||
}
|
||||
}
|
||||
|
||||
-(void)dealloc {
|
||||
for (auto &[key, helper] : _cached) {
|
||||
helper.stop();
|
||||
}
|
||||
_manager.reset();
|
||||
}
|
||||
|
||||
@end
|
||||
|
|
@ -0,0 +1,27 @@
|
|||
#ifndef SCREEN_CAPTURER_H
|
||||
#define SCREEN_CAPTURER_H
|
||||
#ifndef WEBRTC_IOS
|
||||
#import <Foundation/Foundation.h>
|
||||
|
||||
|
||||
#import "api/video/video_sink_interface.h"
|
||||
#import "api/media_stream_interface.h"
|
||||
#import "rtc_base/time_utils.h"
|
||||
|
||||
#import "api/video/video_sink_interface.h"
|
||||
#import "api/media_stream_interface.h"
|
||||
|
||||
#import "sdk/objc/native/src/objc_video_track_source.h"
|
||||
#import "sdk/objc/native/src/objc_frame_buffer.h"
|
||||
#import "pc/video_track_source_proxy.h"
|
||||
#import "tgcalls/platform/darwin/VideoCameraCapturerMac.h"
|
||||
#import "tgcalls/desktop_capturer/DesktopCaptureSource.h"
|
||||
|
||||
@interface DesktopSharingCapturer : NSObject<CapturerInterface>
|
||||
- (instancetype)initWithSource:(rtc::scoped_refptr<webrtc::VideoTrackSourceInterface>)trackSource captureSource:(tgcalls::DesktopCaptureSource)captureSource;
|
||||
|
||||
@end
|
||||
|
||||
|
||||
#endif //WEBRTC_IOS
|
||||
#endif
|
||||
|
|
@ -0,0 +1,117 @@
|
|||
#import "DesktopSharingCapturer.h"
|
||||
|
||||
#include "modules/desktop_capture/mac/screen_capturer_mac.h"
|
||||
#include "modules/desktop_capture/desktop_and_cursor_composer.h"
|
||||
#include "modules/desktop_capture/desktop_capturer_differ_wrapper.h"
|
||||
#include "third_party/libyuv/include/libyuv.h"
|
||||
#include "api/video/i420_buffer.h"
|
||||
#include "common_video/libyuv/include/webrtc_libyuv.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/logging.h"
|
||||
#include "third_party/libyuv/include/libyuv.h"
|
||||
|
||||
#include "tgcalls/desktop_capturer/DesktopCaptureSource.h"
|
||||
#include "tgcalls/desktop_capturer/DesktopCaptureSourceHelper.h"
|
||||
#include "tgcalls/desktop_capturer/DesktopCaptureSourceManager.h"
|
||||
#include "DarwinVideoSource.h"
|
||||
|
||||
#import "helpers/RTCDispatcher+Private.h"
|
||||
#import <QuartzCore/QuartzCore.h>
|
||||
|
||||
static RTCVideoFrame *customToObjCVideoFrame(const webrtc::VideoFrame &frame, RTCVideoRotation &rotation) {
|
||||
rotation = RTCVideoRotation(frame.rotation());
|
||||
RTCVideoFrame *videoFrame =
|
||||
[[RTCVideoFrame alloc] initWithBuffer:webrtc::ToObjCVideoFrameBuffer(frame.video_frame_buffer())
|
||||
rotation:rotation
|
||||
timeStampNs:frame.timestamp_us() * rtc::kNumNanosecsPerMicrosec];
|
||||
videoFrame.timeStamp = frame.timestamp();
|
||||
|
||||
return videoFrame;
|
||||
}
|
||||
|
||||
static tgcalls::DarwinVideoTrackSource *getObjCVideoSource(const rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> nativeSource) {
|
||||
webrtc::VideoTrackSourceProxy *proxy_source =
|
||||
static_cast<webrtc::VideoTrackSourceProxy *>(nativeSource.get());
|
||||
return static_cast<tgcalls::DarwinVideoTrackSource *>(proxy_source->internal());
|
||||
}
|
||||
|
||||
class RendererAdapterImpl : public rtc::VideoSinkInterface<webrtc::VideoFrame> {
|
||||
public:
|
||||
RendererAdapterImpl(void (^frameReceived)(CGSize, RTCVideoFrame *, RTCVideoRotation)) {
|
||||
_frameReceived = [frameReceived copy];
|
||||
}
|
||||
|
||||
void OnFrame(const webrtc::VideoFrame& nativeVideoFrame) override {
|
||||
RTCVideoRotation rotation = RTCVideoRotation_0;
|
||||
RTCVideoFrame* videoFrame = customToObjCVideoFrame(nativeVideoFrame, rotation);
|
||||
|
||||
CGSize currentSize = (videoFrame.rotation % 180 == 0) ? CGSizeMake(videoFrame.width, videoFrame.height) : CGSizeMake(videoFrame.height, videoFrame.width);
|
||||
|
||||
if (_frameReceived) {
|
||||
_frameReceived(currentSize, videoFrame, rotation);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
void (^_frameReceived)(CGSize, RTCVideoFrame *, RTCVideoRotation);
|
||||
|
||||
};
|
||||
|
||||
@implementation DesktopSharingCapturer {
|
||||
absl::optional<tgcalls::DesktopCaptureSourceHelper> renderer;
|
||||
std::shared_ptr<RendererAdapterImpl> _sink;
|
||||
BOOL _isPaused;
|
||||
}
|
||||
- (instancetype)initWithSource:(rtc::scoped_refptr<webrtc::VideoTrackSourceInterface>)trackSource captureSource:(tgcalls::DesktopCaptureSource)captureSource {
|
||||
self = [super init];
|
||||
if (self != nil) {
|
||||
_sink.reset(new RendererAdapterImpl(^(CGSize size, RTCVideoFrame *videoFrame, RTCVideoRotation rotation) {
|
||||
getObjCVideoSource(trackSource)->OnCapturedFrame(videoFrame);
|
||||
}));
|
||||
|
||||
tgcalls::DesktopCaptureSourceData data{
|
||||
/*.aspectSize = */{ 1920, 1080 },
|
||||
/*.fps = */25,
|
||||
/*.captureMouse = */true,
|
||||
};
|
||||
renderer.emplace(captureSource, data);
|
||||
renderer->setOutput(_sink);
|
||||
}
|
||||
return self;
|
||||
}
|
||||
|
||||
-(void)setOnFatalError:(std::function<void ()>)error {
|
||||
renderer->setOnFatalError(error);
|
||||
}
|
||||
-(void)setOnPause:(std::function<void (bool)>)pause {
|
||||
renderer->setOnPause(pause);
|
||||
}
|
||||
|
||||
-(void)start {
|
||||
renderer->start();
|
||||
}
|
||||
|
||||
-(void)stop {
|
||||
renderer->stop();
|
||||
}
|
||||
|
||||
- (void)setIsEnabled:(bool)isEnabled {
|
||||
BOOL updated = _isPaused != !isEnabled;
|
||||
_isPaused = !isEnabled;
|
||||
if (updated) {
|
||||
if (isEnabled) {
|
||||
renderer->start();
|
||||
} else {
|
||||
renderer->stop();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
- (void)setPreferredCaptureAspectRatio:(float)aspectRatio {
|
||||
}
|
||||
|
||||
- (void)setUncroppedSink:(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame> >)sink {
|
||||
renderer->setSecondaryOutput(sink);
|
||||
}
|
||||
|
||||
@end
|
||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue