Repo created

This commit is contained in:
Fr4nz D13trich 2025-11-22 14:04:28 +01:00
parent 81b91f4139
commit f8c34fa5ee
22732 changed files with 4815320 additions and 2 deletions

View file

@ -0,0 +1,82 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/engine/adm_helpers.h"
#include "modules/audio_device/include/audio_device.h"
#include "rtc_base/checks.h"
#include "rtc_base/logging.h"
namespace webrtc {
namespace adm_helpers {
// On Windows Vista and newer, Microsoft introduced the concept of "Default
// Communications Device". This means that there are two types of default
// devices (old Wave Audio style default and Default Communications Device).
//
// On Windows systems which only support Wave Audio style default, uses either
// -1 or 0 to select the default device.
//
// Using a #define for AUDIO_DEVICE since we will call *different* versions of
// the ADM functions, depending on the ID type.
#if defined(WEBRTC_WIN)
#define AUDIO_DEVICE_ID \
(AudioDeviceModule::WindowsDeviceType::kDefaultCommunicationDevice)
#else
#define AUDIO_DEVICE_ID (0u)
#endif // defined(WEBRTC_WIN)
void Init(AudioDeviceModule* adm) {
RTC_DCHECK(adm);
RTC_CHECK_EQ(0, adm->Init()) << "Failed to initialize the ADM.";
// Playout device.
{
if (adm->SetPlayoutDevice(AUDIO_DEVICE_ID) != 0) {
RTC_LOG(LS_ERROR) << "Unable to set playout device.";
return;
}
if (adm->InitSpeaker() != 0) {
RTC_LOG(LS_ERROR) << "Unable to access speaker.";
}
// Set number of channels
bool available = false;
if (adm->StereoPlayoutIsAvailable(&available) != 0) {
RTC_LOG(LS_ERROR) << "Failed to query stereo playout.";
}
if (adm->SetStereoPlayout(available) != 0) {
RTC_LOG(LS_ERROR) << "Failed to set stereo playout mode.";
}
}
// Recording device.
{
if (adm->SetRecordingDevice(AUDIO_DEVICE_ID) != 0) {
RTC_LOG(LS_ERROR) << "Unable to set recording device.";
return;
}
if (adm->InitMicrophone() != 0) {
RTC_LOG(LS_ERROR) << "Unable to access microphone.";
}
// Set number of channels
bool available = false;
if (adm->StereoRecordingIsAvailable(&available) != 0) {
RTC_LOG(LS_ERROR) << "Failed to query stereo recording.";
}
if (adm->SetStereoRecording(available) != 0) {
RTC_LOG(LS_ERROR) << "Failed to set stereo recording mode.";
}
}
}
} // namespace adm_helpers
} // namespace webrtc

View file

@ -0,0 +1,25 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_ENGINE_ADM_HELPERS_H_
#define MEDIA_ENGINE_ADM_HELPERS_H_
namespace webrtc {
class AudioDeviceModule;
namespace adm_helpers {
void Init(AudioDeviceModule* adm);
} // namespace adm_helpers
} // namespace webrtc
#endif // MEDIA_ENGINE_ADM_HELPERS_H_

View file

@ -0,0 +1,69 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/engine/fake_video_codec_factory.h"
#include <memory>
#include "api/video_codecs/sdp_video_format.h"
#include "api/video_codecs/video_decoder.h"
#include "api/video_codecs/video_encoder.h"
#include "modules/video_coding/include/video_codec_interface.h"
#include "modules/video_coding/include/video_error_codes.h"
#include "rtc_base/checks.h"
#include "rtc_base/logging.h"
#include "test/fake_decoder.h"
#include "test/fake_encoder.h"
namespace {
static const char kFakeCodecFactoryCodecName[] = "FakeCodec";
} // anonymous namespace
namespace webrtc {
FakeVideoEncoderFactory::FakeVideoEncoderFactory() = default;
// static
std::unique_ptr<VideoEncoder> FakeVideoEncoderFactory::CreateVideoEncoder() {
return std::make_unique<test::FakeEncoder>(Clock::GetRealTimeClock());
}
std::vector<SdpVideoFormat> FakeVideoEncoderFactory::GetSupportedFormats()
const {
return std::vector<SdpVideoFormat>(
1, SdpVideoFormat(kFakeCodecFactoryCodecName));
}
std::unique_ptr<VideoEncoder> FakeVideoEncoderFactory::CreateVideoEncoder(
const SdpVideoFormat& format) {
return std::make_unique<test::FakeEncoder>(Clock::GetRealTimeClock());
}
FakeVideoDecoderFactory::FakeVideoDecoderFactory() = default;
// static
std::unique_ptr<VideoDecoder> FakeVideoDecoderFactory::CreateVideoDecoder() {
return std::make_unique<test::FakeDecoder>();
}
std::vector<SdpVideoFormat> FakeVideoDecoderFactory::GetSupportedFormats()
const {
return std::vector<SdpVideoFormat>(
1, SdpVideoFormat(kFakeCodecFactoryCodecName));
}
std::unique_ptr<VideoDecoder> FakeVideoDecoderFactory::CreateVideoDecoder(
const SdpVideoFormat& format) {
return std::make_unique<test::FakeDecoder>();
}
} // namespace webrtc

View file

@ -0,0 +1,53 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_ENGINE_FAKE_VIDEO_CODEC_FACTORY_H_
#define MEDIA_ENGINE_FAKE_VIDEO_CODEC_FACTORY_H_
#include <memory>
#include <vector>
#include "api/video_codecs/video_decoder_factory.h"
#include "api/video_codecs/video_encoder_factory.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// Provides a fake video encoder instance that produces frames large enough for
// the given bitrate constraints.
class RTC_EXPORT FakeVideoEncoderFactory : public VideoEncoderFactory {
public:
FakeVideoEncoderFactory();
static std::unique_ptr<VideoEncoder> CreateVideoEncoder();
// VideoEncoderFactory implementation
std::vector<SdpVideoFormat> GetSupportedFormats() const override;
std::unique_ptr<VideoEncoder> CreateVideoEncoder(
const SdpVideoFormat& format) override;
};
// Provides a fake video decoder instance that ignores the given bitstream and
// produces frames.
class RTC_EXPORT FakeVideoDecoderFactory : public VideoDecoderFactory {
public:
FakeVideoDecoderFactory();
static std::unique_ptr<VideoDecoder> CreateVideoDecoder();
// VideoDecoderFactory implementation
std::vector<SdpVideoFormat> GetSupportedFormats() const override;
std::unique_ptr<VideoDecoder> CreateVideoDecoder(
const SdpVideoFormat& format) override;
};
} // namespace webrtc
#endif // MEDIA_ENGINE_FAKE_VIDEO_CODEC_FACTORY_H_

View file

@ -0,0 +1,763 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/engine/fake_webrtc_call.h"
#include <cstdint>
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/strings/string_view.h"
#include "api/call/audio_sink.h"
#include "api/units/timestamp.h"
#include "call/packet_receiver.h"
#include "media/base/media_channel.h"
#include "modules/rtp_rtcp/source/rtp_util.h"
#include "rtc_base/checks.h"
#include "rtc_base/gunit.h"
#include "rtc_base/thread.h"
#include "video/config/encoder_stream_factory.h"
namespace cricket {
using ::webrtc::ParseRtpSsrc;
FakeAudioSendStream::FakeAudioSendStream(
int id,
const webrtc::AudioSendStream::Config& config)
: id_(id), config_(config) {}
void FakeAudioSendStream::Reconfigure(
const webrtc::AudioSendStream::Config& config,
webrtc::SetParametersCallback callback) {
config_ = config;
webrtc::InvokeSetParametersCallback(callback, webrtc::RTCError::OK());
}
const webrtc::AudioSendStream::Config& FakeAudioSendStream::GetConfig() const {
return config_;
}
void FakeAudioSendStream::SetStats(
const webrtc::AudioSendStream::Stats& stats) {
stats_ = stats;
}
FakeAudioSendStream::TelephoneEvent
FakeAudioSendStream::GetLatestTelephoneEvent() const {
return latest_telephone_event_;
}
bool FakeAudioSendStream::SendTelephoneEvent(int payload_type,
int payload_frequency,
int event,
int duration_ms) {
latest_telephone_event_.payload_type = payload_type;
latest_telephone_event_.payload_frequency = payload_frequency;
latest_telephone_event_.event_code = event;
latest_telephone_event_.duration_ms = duration_ms;
return true;
}
void FakeAudioSendStream::SetMuted(bool muted) {
muted_ = muted;
}
webrtc::AudioSendStream::Stats FakeAudioSendStream::GetStats() const {
return stats_;
}
webrtc::AudioSendStream::Stats FakeAudioSendStream::GetStats(
bool /*has_remote_tracks*/) const {
return stats_;
}
FakeAudioReceiveStream::FakeAudioReceiveStream(
int id,
const webrtc::AudioReceiveStreamInterface::Config& config)
: id_(id), config_(config) {}
const webrtc::AudioReceiveStreamInterface::Config&
FakeAudioReceiveStream::GetConfig() const {
return config_;
}
void FakeAudioReceiveStream::SetStats(
const webrtc::AudioReceiveStreamInterface::Stats& stats) {
stats_ = stats;
}
bool FakeAudioReceiveStream::VerifyLastPacket(const uint8_t* data,
size_t length) const {
return last_packet_ == rtc::Buffer(data, length);
}
bool FakeAudioReceiveStream::DeliverRtp(const uint8_t* packet,
size_t length,
int64_t /* packet_time_us */) {
++received_packets_;
last_packet_.SetData(packet, length);
return true;
}
void FakeAudioReceiveStream::SetDepacketizerToDecoderFrameTransformer(
rtc::scoped_refptr<webrtc::FrameTransformerInterface> frame_transformer) {
config_.frame_transformer = std::move(frame_transformer);
}
void FakeAudioReceiveStream::SetDecoderMap(
std::map<int, webrtc::SdpAudioFormat> decoder_map) {
config_.decoder_map = std::move(decoder_map);
}
void FakeAudioReceiveStream::SetNackHistory(int history_ms) {
config_.rtp.nack.rtp_history_ms = history_ms;
}
void FakeAudioReceiveStream::SetNonSenderRttMeasurement(bool enabled) {
config_.enable_non_sender_rtt = enabled;
}
void FakeAudioReceiveStream::SetFrameDecryptor(
rtc::scoped_refptr<webrtc::FrameDecryptorInterface> frame_decryptor) {
config_.frame_decryptor = std::move(frame_decryptor);
}
webrtc::AudioReceiveStreamInterface::Stats FakeAudioReceiveStream::GetStats(
bool get_and_clear_legacy_stats) const {
return stats_;
}
void FakeAudioReceiveStream::SetSink(webrtc::AudioSinkInterface* sink) {
sink_ = sink;
}
void FakeAudioReceiveStream::SetGain(float gain) {
gain_ = gain;
}
FakeVideoSendStream::FakeVideoSendStream(
webrtc::VideoSendStream::Config config,
webrtc::VideoEncoderConfig encoder_config)
: sending_(false),
config_(std::move(config)),
codec_settings_set_(false),
resolution_scaling_enabled_(false),
framerate_scaling_enabled_(false),
source_(nullptr),
num_swapped_frames_(0) {
RTC_DCHECK(config.encoder_settings.encoder_factory != nullptr);
RTC_DCHECK(config.encoder_settings.bitrate_allocator_factory != nullptr);
ReconfigureVideoEncoder(std::move(encoder_config));
}
FakeVideoSendStream::~FakeVideoSendStream() {
if (source_)
source_->RemoveSink(this);
}
const webrtc::VideoSendStream::Config& FakeVideoSendStream::GetConfig() const {
return config_;
}
const webrtc::VideoEncoderConfig& FakeVideoSendStream::GetEncoderConfig()
const {
return encoder_config_;
}
const std::vector<webrtc::VideoStream>& FakeVideoSendStream::GetVideoStreams()
const {
return video_streams_;
}
bool FakeVideoSendStream::IsSending() const {
return sending_;
}
bool FakeVideoSendStream::GetVp8Settings(
webrtc::VideoCodecVP8* settings) const {
if (!codec_settings_set_) {
return false;
}
*settings = codec_specific_settings_.vp8;
return true;
}
bool FakeVideoSendStream::GetVp9Settings(
webrtc::VideoCodecVP9* settings) const {
if (!codec_settings_set_) {
return false;
}
*settings = codec_specific_settings_.vp9;
return true;
}
bool FakeVideoSendStream::GetH264Settings(
webrtc::VideoCodecH264* settings) const {
if (!codec_settings_set_) {
return false;
}
*settings = codec_specific_settings_.h264;
return true;
}
bool FakeVideoSendStream::GetAv1Settings(
webrtc::VideoCodecAV1* settings) const {
if (!codec_settings_set_) {
return false;
}
*settings = codec_specific_settings_.av1;
return true;
}
int FakeVideoSendStream::GetNumberOfSwappedFrames() const {
return num_swapped_frames_;
}
int FakeVideoSendStream::GetLastWidth() const {
return last_frame_->width();
}
int FakeVideoSendStream::GetLastHeight() const {
return last_frame_->height();
}
int64_t FakeVideoSendStream::GetLastTimestamp() const {
RTC_DCHECK(last_frame_->ntp_time_ms() == 0);
return last_frame_->render_time_ms();
}
void FakeVideoSendStream::OnFrame(const webrtc::VideoFrame& frame) {
++num_swapped_frames_;
if (!last_frame_ || frame.width() != last_frame_->width() ||
frame.height() != last_frame_->height() ||
frame.rotation() != last_frame_->rotation()) {
if (encoder_config_.video_stream_factory) {
// Note: only tests set their own EncoderStreamFactory...
video_streams_ =
encoder_config_.video_stream_factory->CreateEncoderStreams(
frame.width(), frame.height(), encoder_config_);
} else {
webrtc::VideoEncoder::EncoderInfo encoder_info;
rtc::scoped_refptr<
webrtc::VideoEncoderConfig::VideoStreamFactoryInterface>
factory = rtc::make_ref_counted<cricket::EncoderStreamFactory>(
encoder_config_.video_format.name, encoder_config_.max_qp,
encoder_config_.content_type ==
webrtc::VideoEncoderConfig::ContentType::kScreen,
encoder_config_.legacy_conference_mode, encoder_info);
video_streams_ = factory->CreateEncoderStreams(
frame.width(), frame.height(), encoder_config_);
}
}
last_frame_ = frame;
}
void FakeVideoSendStream::SetStats(
const webrtc::VideoSendStream::Stats& stats) {
stats_ = stats;
}
webrtc::VideoSendStream::Stats FakeVideoSendStream::GetStats() {
return stats_;
}
void FakeVideoSendStream::ReconfigureVideoEncoder(
webrtc::VideoEncoderConfig config) {
ReconfigureVideoEncoder(std::move(config), nullptr);
}
void FakeVideoSendStream::ReconfigureVideoEncoder(
webrtc::VideoEncoderConfig config,
webrtc::SetParametersCallback callback) {
int width, height;
if (last_frame_) {
width = last_frame_->width();
height = last_frame_->height();
} else {
width = height = 0;
}
if (config.video_stream_factory) {
// Note: only tests set their own EncoderStreamFactory...
video_streams_ = config.video_stream_factory->CreateEncoderStreams(
width, height, config);
} else {
webrtc::VideoEncoder::EncoderInfo encoder_info;
rtc::scoped_refptr<webrtc::VideoEncoderConfig::VideoStreamFactoryInterface>
factory = rtc::make_ref_counted<cricket::EncoderStreamFactory>(
config.video_format.name, config.max_qp,
config.content_type ==
webrtc::VideoEncoderConfig::ContentType::kScreen,
config.legacy_conference_mode, encoder_info);
video_streams_ = factory->CreateEncoderStreams(width, height, config);
}
if (config.encoder_specific_settings != nullptr) {
const unsigned char num_temporal_layers = static_cast<unsigned char>(
video_streams_.back().num_temporal_layers.value_or(1));
if (config_.rtp.payload_name == "VP8") {
config.encoder_specific_settings->FillVideoCodecVp8(
&codec_specific_settings_.vp8);
if (!video_streams_.empty()) {
codec_specific_settings_.vp8.numberOfTemporalLayers =
num_temporal_layers;
}
} else if (config_.rtp.payload_name == "VP9") {
config.encoder_specific_settings->FillVideoCodecVp9(
&codec_specific_settings_.vp9);
if (!video_streams_.empty()) {
codec_specific_settings_.vp9.numberOfTemporalLayers =
num_temporal_layers;
}
} else if (config_.rtp.payload_name == "H264") {
codec_specific_settings_.h264.numberOfTemporalLayers =
num_temporal_layers;
} else if (config_.rtp.payload_name == "AV1") {
config.encoder_specific_settings->FillVideoCodecAv1(
&codec_specific_settings_.av1);
} else {
ADD_FAILURE() << "Unsupported encoder payload: "
<< config_.rtp.payload_name;
}
}
codec_settings_set_ = config.encoder_specific_settings != nullptr;
encoder_config_ = std::move(config);
++num_encoder_reconfigurations_;
webrtc::InvokeSetParametersCallback(callback, webrtc::RTCError::OK());
}
void FakeVideoSendStream::Start() {
sending_ = true;
}
void FakeVideoSendStream::Stop() {
sending_ = false;
}
void FakeVideoSendStream::AddAdaptationResource(
rtc::scoped_refptr<webrtc::Resource> resource) {}
std::vector<rtc::scoped_refptr<webrtc::Resource>>
FakeVideoSendStream::GetAdaptationResources() {
return {};
}
void FakeVideoSendStream::SetSource(
rtc::VideoSourceInterface<webrtc::VideoFrame>* source,
const webrtc::DegradationPreference& degradation_preference) {
if (source_)
source_->RemoveSink(this);
source_ = source;
switch (degradation_preference) {
case webrtc::DegradationPreference::MAINTAIN_FRAMERATE:
resolution_scaling_enabled_ = true;
framerate_scaling_enabled_ = false;
break;
case webrtc::DegradationPreference::MAINTAIN_RESOLUTION:
resolution_scaling_enabled_ = false;
framerate_scaling_enabled_ = true;
break;
case webrtc::DegradationPreference::BALANCED:
resolution_scaling_enabled_ = true;
framerate_scaling_enabled_ = true;
break;
case webrtc::DegradationPreference::DISABLED:
resolution_scaling_enabled_ = false;
framerate_scaling_enabled_ = false;
break;
}
if (source)
source->AddOrUpdateSink(this, resolution_scaling_enabled_
? sink_wants_
: rtc::VideoSinkWants());
}
void FakeVideoSendStream::GenerateKeyFrame(
const std::vector<std::string>& rids) {
keyframes_requested_by_rid_ = rids;
}
void FakeVideoSendStream::InjectVideoSinkWants(
const rtc::VideoSinkWants& wants) {
sink_wants_ = wants;
source_->AddOrUpdateSink(this, wants);
}
FakeVideoReceiveStream::FakeVideoReceiveStream(
webrtc::VideoReceiveStreamInterface::Config config)
: config_(std::move(config)), receiving_(false) {}
const webrtc::VideoReceiveStreamInterface::Config&
FakeVideoReceiveStream::GetConfig() const {
return config_;
}
bool FakeVideoReceiveStream::IsReceiving() const {
return receiving_;
}
void FakeVideoReceiveStream::InjectFrame(const webrtc::VideoFrame& frame) {
config_.renderer->OnFrame(frame);
}
webrtc::VideoReceiveStreamInterface::Stats FakeVideoReceiveStream::GetStats()
const {
return stats_;
}
void FakeVideoReceiveStream::Start() {
receiving_ = true;
}
void FakeVideoReceiveStream::Stop() {
receiving_ = false;
}
void FakeVideoReceiveStream::SetStats(
const webrtc::VideoReceiveStreamInterface::Stats& stats) {
stats_ = stats;
}
FakeFlexfecReceiveStream::FakeFlexfecReceiveStream(
const webrtc::FlexfecReceiveStream::Config config)
: config_(std::move(config)) {}
const webrtc::FlexfecReceiveStream::Config&
FakeFlexfecReceiveStream::GetConfig() const {
return config_;
}
void FakeFlexfecReceiveStream::OnRtpPacket(const webrtc::RtpPacketReceived&) {
RTC_DCHECK_NOTREACHED() << "Not implemented.";
}
FakeCall::FakeCall(webrtc::test::ScopedKeyValueConfig* field_trials)
: FakeCall(rtc::Thread::Current(), rtc::Thread::Current(), field_trials) {}
FakeCall::FakeCall(webrtc::TaskQueueBase* worker_thread,
webrtc::TaskQueueBase* network_thread,
webrtc::test::ScopedKeyValueConfig* field_trials)
: network_thread_(network_thread),
worker_thread_(worker_thread),
audio_network_state_(webrtc::kNetworkUp),
video_network_state_(webrtc::kNetworkUp),
num_created_send_streams_(0),
num_created_receive_streams_(0),
trials_(field_trials ? field_trials : &fallback_trials_) {}
FakeCall::~FakeCall() {
EXPECT_EQ(0u, video_send_streams_.size());
EXPECT_EQ(0u, audio_send_streams_.size());
EXPECT_EQ(0u, video_receive_streams_.size());
EXPECT_EQ(0u, audio_receive_streams_.size());
}
const std::vector<FakeVideoSendStream*>& FakeCall::GetVideoSendStreams() {
return video_send_streams_;
}
const std::vector<FakeVideoReceiveStream*>& FakeCall::GetVideoReceiveStreams() {
return video_receive_streams_;
}
const FakeVideoReceiveStream* FakeCall::GetVideoReceiveStream(uint32_t ssrc) {
for (const auto* p : GetVideoReceiveStreams()) {
if (p->GetConfig().rtp.remote_ssrc == ssrc) {
return p;
}
}
return nullptr;
}
const std::vector<FakeAudioSendStream*>& FakeCall::GetAudioSendStreams() {
return audio_send_streams_;
}
const FakeAudioSendStream* FakeCall::GetAudioSendStream(uint32_t ssrc) {
for (const auto* p : GetAudioSendStreams()) {
if (p->GetConfig().rtp.ssrc == ssrc) {
return p;
}
}
return nullptr;
}
const std::vector<FakeAudioReceiveStream*>& FakeCall::GetAudioReceiveStreams() {
return audio_receive_streams_;
}
const FakeAudioReceiveStream* FakeCall::GetAudioReceiveStream(uint32_t ssrc) {
for (const auto* p : GetAudioReceiveStreams()) {
if (p->GetConfig().rtp.remote_ssrc == ssrc) {
return p;
}
}
return nullptr;
}
const std::vector<FakeFlexfecReceiveStream*>&
FakeCall::GetFlexfecReceiveStreams() {
return flexfec_receive_streams_;
}
webrtc::NetworkState FakeCall::GetNetworkState(webrtc::MediaType media) const {
switch (media) {
case webrtc::MediaType::AUDIO:
return audio_network_state_;
case webrtc::MediaType::VIDEO:
return video_network_state_;
case webrtc::MediaType::DATA:
case webrtc::MediaType::ANY:
ADD_FAILURE() << "GetNetworkState called with unknown parameter.";
return webrtc::kNetworkDown;
}
// Even though all the values for the enum class are listed above,the compiler
// will emit a warning as the method may be called with a value outside of the
// valid enum range, unless this case is also handled.
ADD_FAILURE() << "GetNetworkState called with unknown parameter.";
return webrtc::kNetworkDown;
}
webrtc::AudioSendStream* FakeCall::CreateAudioSendStream(
const webrtc::AudioSendStream::Config& config) {
FakeAudioSendStream* fake_stream =
new FakeAudioSendStream(next_stream_id_++, config);
audio_send_streams_.push_back(fake_stream);
++num_created_send_streams_;
return fake_stream;
}
void FakeCall::DestroyAudioSendStream(webrtc::AudioSendStream* send_stream) {
auto it = absl::c_find(audio_send_streams_,
static_cast<FakeAudioSendStream*>(send_stream));
if (it == audio_send_streams_.end()) {
ADD_FAILURE() << "DestroyAudioSendStream called with unknown parameter.";
} else {
delete *it;
audio_send_streams_.erase(it);
}
}
webrtc::AudioReceiveStreamInterface* FakeCall::CreateAudioReceiveStream(
const webrtc::AudioReceiveStreamInterface::Config& config) {
audio_receive_streams_.push_back(
new FakeAudioReceiveStream(next_stream_id_++, config));
++num_created_receive_streams_;
return audio_receive_streams_.back();
}
void FakeCall::DestroyAudioReceiveStream(
webrtc::AudioReceiveStreamInterface* receive_stream) {
auto it = absl::c_find(audio_receive_streams_,
static_cast<FakeAudioReceiveStream*>(receive_stream));
if (it == audio_receive_streams_.end()) {
ADD_FAILURE() << "DestroyAudioReceiveStream called with unknown parameter.";
} else {
delete *it;
audio_receive_streams_.erase(it);
}
}
webrtc::VideoSendStream* FakeCall::CreateVideoSendStream(
webrtc::VideoSendStream::Config config,
webrtc::VideoEncoderConfig encoder_config) {
FakeVideoSendStream* fake_stream =
new FakeVideoSendStream(std::move(config), std::move(encoder_config));
video_send_streams_.push_back(fake_stream);
++num_created_send_streams_;
return fake_stream;
}
void FakeCall::DestroyVideoSendStream(webrtc::VideoSendStream* send_stream) {
auto it = absl::c_find(video_send_streams_,
static_cast<FakeVideoSendStream*>(send_stream));
if (it == video_send_streams_.end()) {
ADD_FAILURE() << "DestroyVideoSendStream called with unknown parameter.";
} else {
delete *it;
video_send_streams_.erase(it);
}
}
webrtc::VideoReceiveStreamInterface* FakeCall::CreateVideoReceiveStream(
webrtc::VideoReceiveStreamInterface::Config config) {
video_receive_streams_.push_back(
new FakeVideoReceiveStream(std::move(config)));
++num_created_receive_streams_;
return video_receive_streams_.back();
}
void FakeCall::DestroyVideoReceiveStream(
webrtc::VideoReceiveStreamInterface* receive_stream) {
auto it = absl::c_find(video_receive_streams_,
static_cast<FakeVideoReceiveStream*>(receive_stream));
if (it == video_receive_streams_.end()) {
ADD_FAILURE() << "DestroyVideoReceiveStream called with unknown parameter.";
} else {
delete *it;
video_receive_streams_.erase(it);
}
}
webrtc::FlexfecReceiveStream* FakeCall::CreateFlexfecReceiveStream(
const webrtc::FlexfecReceiveStream::Config config) {
FakeFlexfecReceiveStream* fake_stream =
new FakeFlexfecReceiveStream(std::move(config));
flexfec_receive_streams_.push_back(fake_stream);
++num_created_receive_streams_;
return fake_stream;
}
void FakeCall::DestroyFlexfecReceiveStream(
webrtc::FlexfecReceiveStream* receive_stream) {
auto it =
absl::c_find(flexfec_receive_streams_,
static_cast<FakeFlexfecReceiveStream*>(receive_stream));
if (it == flexfec_receive_streams_.end()) {
ADD_FAILURE()
<< "DestroyFlexfecReceiveStream called with unknown parameter.";
} else {
delete *it;
flexfec_receive_streams_.erase(it);
}
}
void FakeCall::AddAdaptationResource(
rtc::scoped_refptr<webrtc::Resource> resource) {}
webrtc::PacketReceiver* FakeCall::Receiver() {
return this;
}
void FakeCall::DeliverRtpPacket(
webrtc::MediaType media_type,
webrtc::RtpPacketReceived packet,
OnUndemuxablePacketHandler undemuxable_packet_handler) {
if (!DeliverPacketInternal(media_type, packet.Ssrc(), packet.Buffer(),
packet.arrival_time())) {
if (undemuxable_packet_handler(packet)) {
DeliverPacketInternal(media_type, packet.Ssrc(), packet.Buffer(),
packet.arrival_time());
}
}
last_received_rtp_packet_ = packet;
}
bool FakeCall::DeliverPacketInternal(webrtc::MediaType media_type,
uint32_t ssrc,
const rtc::CopyOnWriteBuffer& packet,
webrtc::Timestamp arrival_time) {
EXPECT_GE(packet.size(), 12u);
RTC_DCHECK(arrival_time.IsFinite());
RTC_DCHECK(media_type == webrtc::MediaType::AUDIO ||
media_type == webrtc::MediaType::VIDEO);
if (media_type == webrtc::MediaType::VIDEO) {
for (auto receiver : video_receive_streams_) {
if (receiver->GetConfig().rtp.remote_ssrc == ssrc ||
receiver->GetConfig().rtp.rtx_ssrc == ssrc) {
++delivered_packets_by_ssrc_[ssrc];
return true;
}
}
}
if (media_type == webrtc::MediaType::AUDIO) {
for (auto receiver : audio_receive_streams_) {
if (receiver->GetConfig().rtp.remote_ssrc == ssrc) {
receiver->DeliverRtp(packet.cdata(), packet.size(), arrival_time.us());
++delivered_packets_by_ssrc_[ssrc];
return true;
}
}
}
return false;
}
void FakeCall::SetStats(const webrtc::Call::Stats& stats) {
stats_ = stats;
}
int FakeCall::GetNumCreatedSendStreams() const {
return num_created_send_streams_;
}
int FakeCall::GetNumCreatedReceiveStreams() const {
return num_created_receive_streams_;
}
webrtc::Call::Stats FakeCall::GetStats() const {
return stats_;
}
webrtc::TaskQueueBase* FakeCall::network_thread() const {
return network_thread_;
}
webrtc::TaskQueueBase* FakeCall::worker_thread() const {
return worker_thread_;
}
void FakeCall::SignalChannelNetworkState(webrtc::MediaType media,
webrtc::NetworkState state) {
switch (media) {
case webrtc::MediaType::AUDIO:
audio_network_state_ = state;
break;
case webrtc::MediaType::VIDEO:
video_network_state_ = state;
break;
case webrtc::MediaType::DATA:
case webrtc::MediaType::ANY:
ADD_FAILURE()
<< "SignalChannelNetworkState called with unknown parameter.";
}
}
void FakeCall::OnAudioTransportOverheadChanged(
int transport_overhead_per_packet) {}
void FakeCall::OnLocalSsrcUpdated(webrtc::AudioReceiveStreamInterface& stream,
uint32_t local_ssrc) {
auto& fake_stream = static_cast<FakeAudioReceiveStream&>(stream);
fake_stream.SetLocalSsrc(local_ssrc);
}
void FakeCall::OnLocalSsrcUpdated(webrtc::VideoReceiveStreamInterface& stream,
uint32_t local_ssrc) {
auto& fake_stream = static_cast<FakeVideoReceiveStream&>(stream);
fake_stream.SetLocalSsrc(local_ssrc);
}
void FakeCall::OnLocalSsrcUpdated(webrtc::FlexfecReceiveStream& stream,
uint32_t local_ssrc) {
auto& fake_stream = static_cast<FakeFlexfecReceiveStream&>(stream);
fake_stream.SetLocalSsrc(local_ssrc);
}
void FakeCall::OnUpdateSyncGroup(webrtc::AudioReceiveStreamInterface& stream,
absl::string_view sync_group) {
auto& fake_stream = static_cast<FakeAudioReceiveStream&>(stream);
fake_stream.SetSyncGroup(sync_group);
}
void FakeCall::OnSentPacket(const rtc::SentPacket& sent_packet) {
last_sent_packet_ = sent_packet;
if (sent_packet.packet_id >= 0) {
last_sent_nonnegative_packet_id_ = sent_packet.packet_id;
}
}
} // namespace cricket

View file

@ -0,0 +1,518 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
// This file contains fake implementations, for use in unit tests, of the
// following classes:
//
// webrtc::Call
// webrtc::AudioSendStream
// webrtc::AudioReceiveStreamInterface
// webrtc::VideoSendStream
// webrtc::VideoReceiveStreamInterface
#ifndef MEDIA_ENGINE_FAKE_WEBRTC_CALL_H_
#define MEDIA_ENGINE_FAKE_WEBRTC_CALL_H_
#include <map>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/string_view.h"
#include "api/transport/field_trial_based_config.h"
#include "api/video/video_frame.h"
#include "call/audio_receive_stream.h"
#include "call/audio_send_stream.h"
#include "call/call.h"
#include "call/flexfec_receive_stream.h"
#include "call/test/mock_rtp_transport_controller_send.h"
#include "call/video_receive_stream.h"
#include "call/video_send_stream.h"
#include "modules/rtp_rtcp/source/rtp_packet_received.h"
#include "rtc_base/buffer.h"
#include "test/scoped_key_value_config.h"
namespace cricket {
class FakeAudioSendStream final : public webrtc::AudioSendStream {
public:
struct TelephoneEvent {
int payload_type = -1;
int payload_frequency = -1;
int event_code = 0;
int duration_ms = 0;
};
explicit FakeAudioSendStream(int id,
const webrtc::AudioSendStream::Config& config);
int id() const { return id_; }
const webrtc::AudioSendStream::Config& GetConfig() const override;
void SetStats(const webrtc::AudioSendStream::Stats& stats);
TelephoneEvent GetLatestTelephoneEvent() const;
bool IsSending() const { return sending_; }
bool muted() const { return muted_; }
private:
// webrtc::AudioSendStream implementation.
void Reconfigure(const webrtc::AudioSendStream::Config& config,
webrtc::SetParametersCallback callback) override;
void Start() override { sending_ = true; }
void Stop() override { sending_ = false; }
void SendAudioData(std::unique_ptr<webrtc::AudioFrame> audio_frame) override {
}
bool SendTelephoneEvent(int payload_type,
int payload_frequency,
int event,
int duration_ms) override;
void SetMuted(bool muted) override;
webrtc::AudioSendStream::Stats GetStats() const override;
webrtc::AudioSendStream::Stats GetStats(
bool has_remote_tracks) const override;
int id_ = -1;
TelephoneEvent latest_telephone_event_;
webrtc::AudioSendStream::Config config_;
webrtc::AudioSendStream::Stats stats_;
bool sending_ = false;
bool muted_ = false;
};
class FakeAudioReceiveStream final
: public webrtc::AudioReceiveStreamInterface {
public:
explicit FakeAudioReceiveStream(
int id,
const webrtc::AudioReceiveStreamInterface::Config& config);
int id() const { return id_; }
const webrtc::AudioReceiveStreamInterface::Config& GetConfig() const;
void SetStats(const webrtc::AudioReceiveStreamInterface::Stats& stats);
int received_packets() const { return received_packets_; }
bool VerifyLastPacket(const uint8_t* data, size_t length) const;
const webrtc::AudioSinkInterface* sink() const { return sink_; }
float gain() const { return gain_; }
bool DeliverRtp(const uint8_t* packet, size_t length, int64_t packet_time_us);
bool started() const { return started_; }
int base_mininum_playout_delay_ms() const {
return base_mininum_playout_delay_ms_;
}
void SetLocalSsrc(uint32_t local_ssrc) {
config_.rtp.local_ssrc = local_ssrc;
}
void SetSyncGroup(absl::string_view sync_group) {
config_.sync_group = std::string(sync_group);
}
uint32_t remote_ssrc() const override { return config_.rtp.remote_ssrc; }
void Start() override { started_ = true; }
void Stop() override { started_ = false; }
bool IsRunning() const override { return started_; }
void SetDepacketizerToDecoderFrameTransformer(
rtc::scoped_refptr<webrtc::FrameTransformerInterface> frame_transformer)
override;
void SetDecoderMap(
std::map<int, webrtc::SdpAudioFormat> decoder_map) override;
void SetNackHistory(int history_ms) override;
void SetNonSenderRttMeasurement(bool enabled) override;
void SetFrameDecryptor(rtc::scoped_refptr<webrtc::FrameDecryptorInterface>
frame_decryptor) override;
webrtc::AudioReceiveStreamInterface::Stats GetStats(
bool get_and_clear_legacy_stats) const override;
void SetSink(webrtc::AudioSinkInterface* sink) override;
void SetGain(float gain) override;
bool SetBaseMinimumPlayoutDelayMs(int delay_ms) override {
base_mininum_playout_delay_ms_ = delay_ms;
return true;
}
int GetBaseMinimumPlayoutDelayMs() const override {
return base_mininum_playout_delay_ms_;
}
std::vector<webrtc::RtpSource> GetSources() const override {
return std::vector<webrtc::RtpSource>();
}
private:
int id_ = -1;
webrtc::AudioReceiveStreamInterface::Config config_;
webrtc::AudioReceiveStreamInterface::Stats stats_;
int received_packets_ = 0;
webrtc::AudioSinkInterface* sink_ = nullptr;
float gain_ = 1.0f;
rtc::Buffer last_packet_;
bool started_ = false;
int base_mininum_playout_delay_ms_ = 0;
};
class FakeVideoSendStream final
: public webrtc::VideoSendStream,
public rtc::VideoSinkInterface<webrtc::VideoFrame> {
public:
FakeVideoSendStream(webrtc::VideoSendStream::Config config,
webrtc::VideoEncoderConfig encoder_config);
~FakeVideoSendStream() override;
const webrtc::VideoSendStream::Config& GetConfig() const;
const webrtc::VideoEncoderConfig& GetEncoderConfig() const;
const std::vector<webrtc::VideoStream>& GetVideoStreams() const;
bool IsSending() const;
bool GetVp8Settings(webrtc::VideoCodecVP8* settings) const;
bool GetVp9Settings(webrtc::VideoCodecVP9* settings) const;
bool GetH264Settings(webrtc::VideoCodecH264* settings) const;
bool GetAv1Settings(webrtc::VideoCodecAV1* settings) const;
int GetNumberOfSwappedFrames() const;
int GetLastWidth() const;
int GetLastHeight() const;
int64_t GetLastTimestamp() const;
void SetStats(const webrtc::VideoSendStream::Stats& stats);
int num_encoder_reconfigurations() const {
return num_encoder_reconfigurations_;
}
bool resolution_scaling_enabled() const {
return resolution_scaling_enabled_;
}
bool framerate_scaling_enabled() const { return framerate_scaling_enabled_; }
void InjectVideoSinkWants(const rtc::VideoSinkWants& wants);
rtc::VideoSourceInterface<webrtc::VideoFrame>* source() const {
return source_;
}
void GenerateKeyFrame(const std::vector<std::string>& rids);
const std::vector<std::string>& GetKeyFramesRequested() const {
return keyframes_requested_by_rid_;
}
private:
// rtc::VideoSinkInterface<VideoFrame> implementation.
void OnFrame(const webrtc::VideoFrame& frame) override;
// webrtc::VideoSendStream implementation.
void Start() override;
void Stop() override;
bool started() override { return IsSending(); }
void AddAdaptationResource(
rtc::scoped_refptr<webrtc::Resource> resource) override;
std::vector<rtc::scoped_refptr<webrtc::Resource>> GetAdaptationResources()
override;
void SetSource(
rtc::VideoSourceInterface<webrtc::VideoFrame>* source,
const webrtc::DegradationPreference& degradation_preference) override;
webrtc::VideoSendStream::Stats GetStats() override;
void ReconfigureVideoEncoder(webrtc::VideoEncoderConfig config) override;
void ReconfigureVideoEncoder(webrtc::VideoEncoderConfig config,
webrtc::SetParametersCallback callback) override;
bool sending_;
webrtc::VideoSendStream::Config config_;
webrtc::VideoEncoderConfig encoder_config_;
std::vector<webrtc::VideoStream> video_streams_;
rtc::VideoSinkWants sink_wants_;
bool codec_settings_set_;
union CodecSpecificSettings {
webrtc::VideoCodecVP8 vp8;
webrtc::VideoCodecVP9 vp9;
webrtc::VideoCodecH264 h264;
webrtc::VideoCodecAV1 av1;
} codec_specific_settings_;
bool resolution_scaling_enabled_;
bool framerate_scaling_enabled_;
rtc::VideoSourceInterface<webrtc::VideoFrame>* source_;
int num_swapped_frames_;
absl::optional<webrtc::VideoFrame> last_frame_;
webrtc::VideoSendStream::Stats stats_;
int num_encoder_reconfigurations_ = 0;
std::vector<std::string> keyframes_requested_by_rid_;
};
class FakeVideoReceiveStream final
: public webrtc::VideoReceiveStreamInterface {
public:
explicit FakeVideoReceiveStream(
webrtc::VideoReceiveStreamInterface::Config config);
const webrtc::VideoReceiveStreamInterface::Config& GetConfig() const;
bool IsReceiving() const;
void InjectFrame(const webrtc::VideoFrame& frame);
void SetStats(const webrtc::VideoReceiveStreamInterface::Stats& stats);
std::vector<webrtc::RtpSource> GetSources() const override {
return std::vector<webrtc::RtpSource>();
}
int base_mininum_playout_delay_ms() const {
return base_mininum_playout_delay_ms_;
}
void SetLocalSsrc(uint32_t local_ssrc) {
config_.rtp.local_ssrc = local_ssrc;
}
void UpdateRtxSsrc(uint32_t ssrc) { config_.rtp.rtx_ssrc = ssrc; }
void SetFrameDecryptor(rtc::scoped_refptr<webrtc::FrameDecryptorInterface>
frame_decryptor) override {}
void SetDepacketizerToDecoderFrameTransformer(
rtc::scoped_refptr<webrtc::FrameTransformerInterface> frame_transformer)
override {}
RecordingState SetAndGetRecordingState(RecordingState state,
bool generate_key_frame) override {
return RecordingState();
}
void GenerateKeyFrame() override {}
void SetRtcpMode(webrtc::RtcpMode mode) override {
config_.rtp.rtcp_mode = mode;
}
void SetFlexFecProtection(webrtc::RtpPacketSinkInterface* sink) override {
config_.rtp.packet_sink_ = sink;
config_.rtp.protected_by_flexfec = (sink != nullptr);
}
void SetLossNotificationEnabled(bool enabled) override {
config_.rtp.lntf.enabled = enabled;
}
void SetNackHistory(webrtc::TimeDelta history) override {
config_.rtp.nack.rtp_history_ms = history.ms();
}
void SetProtectionPayloadTypes(int red_payload_type,
int ulpfec_payload_type) override {
config_.rtp.red_payload_type = red_payload_type;
config_.rtp.ulpfec_payload_type = ulpfec_payload_type;
}
void SetRtcpXr(Config::Rtp::RtcpXr rtcp_xr) override {
config_.rtp.rtcp_xr = rtcp_xr;
}
void SetAssociatedPayloadTypes(std::map<int, int> associated_payload_types) {
config_.rtp.rtx_associated_payload_types =
std::move(associated_payload_types);
}
void Start() override;
void Stop() override;
webrtc::VideoReceiveStreamInterface::Stats GetStats() const override;
bool SetBaseMinimumPlayoutDelayMs(int delay_ms) override {
base_mininum_playout_delay_ms_ = delay_ms;
return true;
}
int GetBaseMinimumPlayoutDelayMs() const override {
return base_mininum_playout_delay_ms_;
}
private:
webrtc::VideoReceiveStreamInterface::Config config_;
bool receiving_;
webrtc::VideoReceiveStreamInterface::Stats stats_;
int base_mininum_playout_delay_ms_ = 0;
};
class FakeFlexfecReceiveStream final : public webrtc::FlexfecReceiveStream {
public:
explicit FakeFlexfecReceiveStream(
const webrtc::FlexfecReceiveStream::Config config);
void SetLocalSsrc(uint32_t local_ssrc) {
config_.rtp.local_ssrc = local_ssrc;
}
void SetRtcpMode(webrtc::RtcpMode mode) override { config_.rtcp_mode = mode; }
int payload_type() const override { return config_.payload_type; }
void SetPayloadType(int payload_type) override {
config_.payload_type = payload_type;
}
const webrtc::FlexfecReceiveStream::Config& GetConfig() const;
uint32_t remote_ssrc() const { return config_.rtp.remote_ssrc; }
const webrtc::ReceiveStatistics* GetStats() const override { return nullptr; }
private:
void OnRtpPacket(const webrtc::RtpPacketReceived& packet) override;
webrtc::FlexfecReceiveStream::Config config_;
};
class FakeCall final : public webrtc::Call, public webrtc::PacketReceiver {
public:
explicit FakeCall(webrtc::test::ScopedKeyValueConfig* field_trials = nullptr);
FakeCall(webrtc::TaskQueueBase* worker_thread,
webrtc::TaskQueueBase* network_thread,
webrtc::test::ScopedKeyValueConfig* field_trials = nullptr);
~FakeCall() override;
webrtc::MockRtpTransportControllerSend* GetMockTransportControllerSend() {
return &transport_controller_send_;
}
const std::vector<FakeVideoSendStream*>& GetVideoSendStreams();
const std::vector<FakeVideoReceiveStream*>& GetVideoReceiveStreams();
const std::vector<FakeAudioSendStream*>& GetAudioSendStreams();
const FakeAudioSendStream* GetAudioSendStream(uint32_t ssrc);
const std::vector<FakeAudioReceiveStream*>& GetAudioReceiveStreams();
const FakeAudioReceiveStream* GetAudioReceiveStream(uint32_t ssrc);
const FakeVideoReceiveStream* GetVideoReceiveStream(uint32_t ssrc);
const std::vector<FakeFlexfecReceiveStream*>& GetFlexfecReceiveStreams();
rtc::SentPacket last_sent_packet() const { return last_sent_packet_; }
const webrtc::RtpPacketReceived& last_received_rtp_packet() const {
return last_received_rtp_packet_;
}
size_t GetDeliveredPacketsForSsrc(uint32_t ssrc) const {
auto it = delivered_packets_by_ssrc_.find(ssrc);
return it != delivered_packets_by_ssrc_.end() ? it->second : 0u;
}
// This is useful if we care about the last media packet (with id populated)
// but not the last ICE packet (with -1 ID).
int last_sent_nonnegative_packet_id() const {
return last_sent_nonnegative_packet_id_;
}
webrtc::NetworkState GetNetworkState(webrtc::MediaType media) const;
int GetNumCreatedSendStreams() const;
int GetNumCreatedReceiveStreams() const;
void SetStats(const webrtc::Call::Stats& stats);
void SetClientBitratePreferences(
const webrtc::BitrateSettings& preferences) override {}
void SetFieldTrial(const std::string& field_trial_string) {
trials_overrides_ = std::make_unique<webrtc::test::ScopedKeyValueConfig>(
*trials_, field_trial_string);
}
const webrtc::FieldTrialsView& trials() const override { return *trials_; }
private:
webrtc::AudioSendStream* CreateAudioSendStream(
const webrtc::AudioSendStream::Config& config) override;
void DestroyAudioSendStream(webrtc::AudioSendStream* send_stream) override;
webrtc::AudioReceiveStreamInterface* CreateAudioReceiveStream(
const webrtc::AudioReceiveStreamInterface::Config& config) override;
void DestroyAudioReceiveStream(
webrtc::AudioReceiveStreamInterface* receive_stream) override;
webrtc::VideoSendStream* CreateVideoSendStream(
webrtc::VideoSendStream::Config config,
webrtc::VideoEncoderConfig encoder_config) override;
void DestroyVideoSendStream(webrtc::VideoSendStream* send_stream) override;
webrtc::VideoReceiveStreamInterface* CreateVideoReceiveStream(
webrtc::VideoReceiveStreamInterface::Config config) override;
void DestroyVideoReceiveStream(
webrtc::VideoReceiveStreamInterface* receive_stream) override;
webrtc::FlexfecReceiveStream* CreateFlexfecReceiveStream(
const webrtc::FlexfecReceiveStream::Config config) override;
void DestroyFlexfecReceiveStream(
webrtc::FlexfecReceiveStream* receive_stream) override;
void AddAdaptationResource(
rtc::scoped_refptr<webrtc::Resource> resource) override;
webrtc::PacketReceiver* Receiver() override;
void DeliverRtcpPacket(rtc::CopyOnWriteBuffer packet) override {}
void DeliverRtpPacket(
webrtc::MediaType media_type,
webrtc::RtpPacketReceived packet,
OnUndemuxablePacketHandler un_demuxable_packet_handler) override;
bool DeliverPacketInternal(webrtc::MediaType media_type,
uint32_t ssrc,
const rtc::CopyOnWriteBuffer& packet,
webrtc::Timestamp arrival_time);
webrtc::RtpTransportControllerSendInterface* GetTransportControllerSend()
override {
return &transport_controller_send_;
}
webrtc::Call::Stats GetStats() const override;
webrtc::TaskQueueBase* network_thread() const override;
webrtc::TaskQueueBase* worker_thread() const override;
void SignalChannelNetworkState(webrtc::MediaType media,
webrtc::NetworkState state) override;
void OnAudioTransportOverheadChanged(
int transport_overhead_per_packet) override;
void OnLocalSsrcUpdated(webrtc::AudioReceiveStreamInterface& stream,
uint32_t local_ssrc) override;
void OnLocalSsrcUpdated(webrtc::VideoReceiveStreamInterface& stream,
uint32_t local_ssrc) override;
void OnLocalSsrcUpdated(webrtc::FlexfecReceiveStream& stream,
uint32_t local_ssrc) override;
void OnUpdateSyncGroup(webrtc::AudioReceiveStreamInterface& stream,
absl::string_view sync_group) override;
void OnSentPacket(const rtc::SentPacket& sent_packet) override;
webrtc::TaskQueueBase* const network_thread_;
webrtc::TaskQueueBase* const worker_thread_;
::testing::NiceMock<webrtc::MockRtpTransportControllerSend>
transport_controller_send_;
webrtc::NetworkState audio_network_state_;
webrtc::NetworkState video_network_state_;
rtc::SentPacket last_sent_packet_;
webrtc::RtpPacketReceived last_received_rtp_packet_;
int last_sent_nonnegative_packet_id_ = -1;
int next_stream_id_ = 665;
webrtc::Call::Stats stats_;
std::vector<FakeVideoSendStream*> video_send_streams_;
std::vector<FakeAudioSendStream*> audio_send_streams_;
std::vector<FakeVideoReceiveStream*> video_receive_streams_;
std::vector<FakeAudioReceiveStream*> audio_receive_streams_;
std::vector<FakeFlexfecReceiveStream*> flexfec_receive_streams_;
std::map<uint32_t, size_t> delivered_packets_by_ssrc_;
int num_created_send_streams_;
int num_created_receive_streams_;
// The field trials that are in use, either supplied by caller
// or pointer to &fallback_trials_.
webrtc::test::ScopedKeyValueConfig* trials_;
// fallback_trials_ is used if caller does not provide any field trials.
webrtc::test::ScopedKeyValueConfig fallback_trials_;
// An extra field trial that can be set using SetFieldTrial.
std::unique_ptr<webrtc::test::ScopedKeyValueConfig> trials_overrides_;
};
} // namespace cricket
#endif // MEDIA_ENGINE_FAKE_WEBRTC_CALL_H_

View file

@ -0,0 +1,304 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/engine/fake_webrtc_video_engine.h"
#include <algorithm>
#include <memory>
#include "absl/strings/match.h"
#include "media/base/codec.h"
#include "media/base/media_constants.h"
#include "media/engine/simulcast_encoder_adapter.h"
#include "modules/video_coding/include/video_error_codes.h"
#include "rtc_base/time_utils.h"
namespace cricket {
namespace {
static constexpr webrtc::TimeDelta kEventTimeout =
webrtc::TimeDelta::Seconds(10);
bool IsScalabilityModeSupported(
const std::vector<webrtc::SdpVideoFormat>& formats,
absl::optional<std::string> scalability_mode) {
if (!scalability_mode.has_value()) {
return true;
}
for (const auto& format : formats) {
for (const auto& mode : format.scalability_modes) {
if (ScalabilityModeToString(mode) == scalability_mode)
return true;
}
}
return false;
}
} // namespace
// Decoder.
FakeWebRtcVideoDecoder::FakeWebRtcVideoDecoder(
FakeWebRtcVideoDecoderFactory* factory)
: num_frames_received_(0), factory_(factory) {}
FakeWebRtcVideoDecoder::~FakeWebRtcVideoDecoder() {
if (factory_) {
factory_->DecoderDestroyed(this);
}
}
bool FakeWebRtcVideoDecoder::Configure(const Settings& settings) {
return true;
}
int32_t FakeWebRtcVideoDecoder::Decode(const webrtc::EncodedImage&,
int64_t) {
num_frames_received_++;
return WEBRTC_VIDEO_CODEC_OK;
}
int32_t FakeWebRtcVideoDecoder::RegisterDecodeCompleteCallback(
webrtc::DecodedImageCallback*) {
return WEBRTC_VIDEO_CODEC_OK;
}
int32_t FakeWebRtcVideoDecoder::Release() {
return WEBRTC_VIDEO_CODEC_OK;
}
int FakeWebRtcVideoDecoder::GetNumFramesReceived() const {
return num_frames_received_;
}
// Decoder factory.
FakeWebRtcVideoDecoderFactory::FakeWebRtcVideoDecoderFactory()
: num_created_decoders_(0) {}
std::vector<webrtc::SdpVideoFormat>
FakeWebRtcVideoDecoderFactory::GetSupportedFormats() const {
std::vector<webrtc::SdpVideoFormat> formats;
for (const webrtc::SdpVideoFormat& format : supported_codec_formats_) {
// Don't add same codec twice.
if (!format.IsCodecInList(formats))
formats.push_back(format);
}
return formats;
}
std::unique_ptr<webrtc::VideoDecoder>
FakeWebRtcVideoDecoderFactory::CreateVideoDecoder(
const webrtc::SdpVideoFormat& format) {
if (format.IsCodecInList(supported_codec_formats_)) {
num_created_decoders_++;
std::unique_ptr<FakeWebRtcVideoDecoder> decoder =
std::make_unique<FakeWebRtcVideoDecoder>(this);
decoders_.push_back(decoder.get());
return decoder;
}
return nullptr;
}
void FakeWebRtcVideoDecoderFactory::DecoderDestroyed(
FakeWebRtcVideoDecoder* decoder) {
decoders_.erase(std::remove(decoders_.begin(), decoders_.end(), decoder),
decoders_.end());
}
void FakeWebRtcVideoDecoderFactory::AddSupportedVideoCodecType(
const std::string& name) {
// This is to match the default H264 params of cricket::VideoCodec.
cricket::VideoCodec video_codec = cricket::CreateVideoCodec(name);
supported_codec_formats_.push_back(
webrtc::SdpVideoFormat(video_codec.name, video_codec.params));
}
int FakeWebRtcVideoDecoderFactory::GetNumCreatedDecoders() {
return num_created_decoders_;
}
const std::vector<FakeWebRtcVideoDecoder*>&
FakeWebRtcVideoDecoderFactory::decoders() {
return decoders_;
}
// Encoder.
FakeWebRtcVideoEncoder::FakeWebRtcVideoEncoder(
FakeWebRtcVideoEncoderFactory* factory)
: num_frames_encoded_(0), factory_(factory) {}
FakeWebRtcVideoEncoder::~FakeWebRtcVideoEncoder() {
if (factory_) {
factory_->EncoderDestroyed(this);
}
}
void FakeWebRtcVideoEncoder::SetFecControllerOverride(
webrtc::FecControllerOverride* fec_controller_override) {
// Ignored.
}
int32_t FakeWebRtcVideoEncoder::InitEncode(
const webrtc::VideoCodec* codecSettings,
const VideoEncoder::Settings& settings) {
webrtc::MutexLock lock(&mutex_);
codec_settings_ = *codecSettings;
init_encode_event_.Set();
return WEBRTC_VIDEO_CODEC_OK;
}
int32_t FakeWebRtcVideoEncoder::Encode(
const webrtc::VideoFrame& inputImage,
const std::vector<webrtc::VideoFrameType>* frame_types) {
webrtc::MutexLock lock(&mutex_);
++num_frames_encoded_;
init_encode_event_.Set();
return WEBRTC_VIDEO_CODEC_OK;
}
int32_t FakeWebRtcVideoEncoder::RegisterEncodeCompleteCallback(
webrtc::EncodedImageCallback* callback) {
return WEBRTC_VIDEO_CODEC_OK;
}
int32_t FakeWebRtcVideoEncoder::Release() {
return WEBRTC_VIDEO_CODEC_OK;
}
void FakeWebRtcVideoEncoder::SetRates(const RateControlParameters& parameters) {
}
webrtc::VideoEncoder::EncoderInfo FakeWebRtcVideoEncoder::GetEncoderInfo()
const {
EncoderInfo info;
info.is_hardware_accelerated = true;
return info;
}
bool FakeWebRtcVideoEncoder::WaitForInitEncode() {
return init_encode_event_.Wait(kEventTimeout);
}
webrtc::VideoCodec FakeWebRtcVideoEncoder::GetCodecSettings() {
webrtc::MutexLock lock(&mutex_);
return codec_settings_;
}
int FakeWebRtcVideoEncoder::GetNumEncodedFrames() {
webrtc::MutexLock lock(&mutex_);
return num_frames_encoded_;
}
// Video encoder factory.
FakeWebRtcVideoEncoderFactory::FakeWebRtcVideoEncoderFactory()
: num_created_encoders_(0), vp8_factory_mode_(false) {}
std::vector<webrtc::SdpVideoFormat>
FakeWebRtcVideoEncoderFactory::GetSupportedFormats() const {
std::vector<webrtc::SdpVideoFormat> formats;
for (const webrtc::SdpVideoFormat& format : formats_) {
// Don't add same codec twice.
if (!format.IsCodecInList(formats))
formats.push_back(format);
}
return formats;
}
webrtc::VideoEncoderFactory::CodecSupport
FakeWebRtcVideoEncoderFactory::QueryCodecSupport(
const webrtc::SdpVideoFormat& format,
absl::optional<std::string> scalability_mode) const {
std::vector<webrtc::SdpVideoFormat> supported_formats;
for (const auto& f : formats_) {
if (format.IsSameCodec(f))
supported_formats.push_back(f);
}
if (format.IsCodecInList(formats_)) {
return {.is_supported = IsScalabilityModeSupported(supported_formats,
scalability_mode)};
}
return {.is_supported = false};
}
std::unique_ptr<webrtc::VideoEncoder>
FakeWebRtcVideoEncoderFactory::CreateVideoEncoder(
const webrtc::SdpVideoFormat& format) {
webrtc::MutexLock lock(&mutex_);
std::unique_ptr<webrtc::VideoEncoder> encoder;
if (format.IsCodecInList(formats_)) {
if (absl::EqualsIgnoreCase(format.name, kVp8CodecName) &&
!vp8_factory_mode_) {
// The simulcast adapter will ask this factory for multiple VP8
// encoders. Enter vp8_factory_mode so that we now create these encoders
// instead of more adapters.
vp8_factory_mode_ = true;
encoder = std::make_unique<webrtc::SimulcastEncoderAdapter>(this, format);
} else {
num_created_encoders_++;
created_video_encoder_event_.Set();
encoder = std::make_unique<FakeWebRtcVideoEncoder>(this);
encoders_.push_back(static_cast<FakeWebRtcVideoEncoder*>(encoder.get()));
}
}
return encoder;
}
bool FakeWebRtcVideoEncoderFactory::WaitForCreatedVideoEncoders(
int num_encoders) {
int64_t start_offset_ms = rtc::TimeMillis();
int64_t wait_time = kEventTimeout.ms();
do {
if (GetNumCreatedEncoders() >= num_encoders)
return true;
wait_time = kEventTimeout.ms() - (rtc::TimeMillis() - start_offset_ms);
} while (wait_time > 0 && created_video_encoder_event_.Wait(
webrtc::TimeDelta::Millis(wait_time)));
return false;
}
void FakeWebRtcVideoEncoderFactory::EncoderDestroyed(
FakeWebRtcVideoEncoder* encoder) {
webrtc::MutexLock lock(&mutex_);
encoders_.erase(std::remove(encoders_.begin(), encoders_.end(), encoder),
encoders_.end());
}
void FakeWebRtcVideoEncoderFactory::AddSupportedVideoCodec(
const webrtc::SdpVideoFormat& format) {
formats_.push_back(format);
}
void FakeWebRtcVideoEncoderFactory::AddSupportedVideoCodecType(
const std::string& name,
const std::vector<webrtc::ScalabilityMode>& scalability_modes) {
// This is to match the default H264 params of cricket::VideoCodec.
cricket::VideoCodec video_codec = cricket::CreateVideoCodec(name);
formats_.push_back(webrtc::SdpVideoFormat(
video_codec.name, video_codec.params,
{scalability_modes.begin(), scalability_modes.end()}));
}
int FakeWebRtcVideoEncoderFactory::GetNumCreatedEncoders() {
webrtc::MutexLock lock(&mutex_);
return num_created_encoders_;
}
const std::vector<FakeWebRtcVideoEncoder*>
FakeWebRtcVideoEncoderFactory::encoders() {
webrtc::MutexLock lock(&mutex_);
return encoders_;
}
} // namespace cricket

View file

@ -0,0 +1,144 @@
/*
* Copyright (c) 2010 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_ENGINE_FAKE_WEBRTC_VIDEO_ENGINE_H_
#define MEDIA_ENGINE_FAKE_WEBRTC_VIDEO_ENGINE_H_
#include <stddef.h>
#include <stdint.h>
#include <memory>
#include <string>
#include <vector>
#include "api/fec_controller_override.h"
#include "api/video/encoded_image.h"
#include "api/video/video_bitrate_allocation.h"
#include "api/video/video_frame.h"
#include "api/video_codecs/sdp_video_format.h"
#include "api/video_codecs/video_codec.h"
#include "api/video_codecs/video_decoder.h"
#include "api/video_codecs/video_decoder_factory.h"
#include "api/video_codecs/video_encoder.h"
#include "api/video_codecs/video_encoder_factory.h"
#include "modules/video_coding/include/video_codec_interface.h"
#include "rtc_base/event.h"
#include "rtc_base/synchronization/mutex.h"
#include "rtc_base/thread_annotations.h"
namespace cricket {
class FakeWebRtcVideoDecoderFactory;
class FakeWebRtcVideoEncoderFactory;
// Fake class for mocking out webrtc::VideoDecoder
class FakeWebRtcVideoDecoder : public webrtc::VideoDecoder {
public:
explicit FakeWebRtcVideoDecoder(FakeWebRtcVideoDecoderFactory* factory);
~FakeWebRtcVideoDecoder();
bool Configure(const Settings& settings) override;
int32_t Decode(const webrtc::EncodedImage&, int64_t) override;
int32_t RegisterDecodeCompleteCallback(
webrtc::DecodedImageCallback*) override;
int32_t Release() override;
int GetNumFramesReceived() const;
private:
int num_frames_received_;
FakeWebRtcVideoDecoderFactory* factory_;
};
// Fake class for mocking out webrtc::VideoDecoderFactory.
class FakeWebRtcVideoDecoderFactory : public webrtc::VideoDecoderFactory {
public:
FakeWebRtcVideoDecoderFactory();
std::vector<webrtc::SdpVideoFormat> GetSupportedFormats() const override;
std::unique_ptr<webrtc::VideoDecoder> CreateVideoDecoder(
const webrtc::SdpVideoFormat& format) override;
void DecoderDestroyed(FakeWebRtcVideoDecoder* decoder);
void AddSupportedVideoCodecType(const std::string& name);
int GetNumCreatedDecoders();
const std::vector<FakeWebRtcVideoDecoder*>& decoders();
private:
std::vector<webrtc::SdpVideoFormat> supported_codec_formats_;
std::vector<FakeWebRtcVideoDecoder*> decoders_;
int num_created_decoders_;
};
// Fake class for mocking out webrtc::VideoEnoder
class FakeWebRtcVideoEncoder : public webrtc::VideoEncoder {
public:
explicit FakeWebRtcVideoEncoder(FakeWebRtcVideoEncoderFactory* factory);
~FakeWebRtcVideoEncoder();
void SetFecControllerOverride(
webrtc::FecControllerOverride* fec_controller_override) override;
int32_t InitEncode(const webrtc::VideoCodec* codecSettings,
const VideoEncoder::Settings& settings) override;
int32_t Encode(
const webrtc::VideoFrame& inputImage,
const std::vector<webrtc::VideoFrameType>* frame_types) override;
int32_t RegisterEncodeCompleteCallback(
webrtc::EncodedImageCallback* callback) override;
int32_t Release() override;
void SetRates(const RateControlParameters& parameters) override;
webrtc::VideoEncoder::EncoderInfo GetEncoderInfo() const override;
bool WaitForInitEncode();
webrtc::VideoCodec GetCodecSettings();
int GetNumEncodedFrames();
private:
webrtc::Mutex mutex_;
rtc::Event init_encode_event_;
int num_frames_encoded_ RTC_GUARDED_BY(mutex_);
webrtc::VideoCodec codec_settings_ RTC_GUARDED_BY(mutex_);
FakeWebRtcVideoEncoderFactory* factory_;
};
// Fake class for mocking out webrtc::VideoEncoderFactory.
class FakeWebRtcVideoEncoderFactory : public webrtc::VideoEncoderFactory {
public:
FakeWebRtcVideoEncoderFactory();
std::vector<webrtc::SdpVideoFormat> GetSupportedFormats() const override;
webrtc::VideoEncoderFactory::CodecSupport QueryCodecSupport(
const webrtc::SdpVideoFormat& format,
absl::optional<std::string> scalability_mode) const override;
std::unique_ptr<webrtc::VideoEncoder> CreateVideoEncoder(
const webrtc::SdpVideoFormat& format) override;
bool WaitForCreatedVideoEncoders(int num_encoders);
void EncoderDestroyed(FakeWebRtcVideoEncoder* encoder);
void set_encoders_have_internal_sources(bool internal_source);
void AddSupportedVideoCodec(const webrtc::SdpVideoFormat& format);
void AddSupportedVideoCodecType(
const std::string& name,
const std::vector<webrtc::ScalabilityMode>& scalability_modes = {});
int GetNumCreatedEncoders();
const std::vector<FakeWebRtcVideoEncoder*> encoders();
private:
webrtc::Mutex mutex_;
rtc::Event created_video_encoder_event_;
std::vector<webrtc::SdpVideoFormat> formats_;
std::vector<FakeWebRtcVideoEncoder*> encoders_ RTC_GUARDED_BY(mutex_);
int num_created_encoders_ RTC_GUARDED_BY(mutex_);
bool vp8_factory_mode_;
};
} // namespace cricket
#endif // MEDIA_ENGINE_FAKE_WEBRTC_VIDEO_ENGINE_H_

View file

@ -0,0 +1,105 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/engine/internal_decoder_factory.h"
#include "absl/strings/match.h"
#include "api/video_codecs/av1_profile.h"
#include "api/video_codecs/sdp_video_format.h"
#include "api/video_codecs/video_codec.h"
#include "media/base/codec.h"
#include "media/base/media_constants.h"
#include "modules/video_coding/codecs/h264/include/h264.h"
#include "modules/video_coding/codecs/vp8/include/vp8.h"
#include "modules/video_coding/codecs/vp9/include/vp9.h"
#include "rtc_base/checks.h"
#include "rtc_base/logging.h"
#include "system_wrappers/include/field_trial.h"
#if defined(RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY)
#include "modules/video_coding/codecs/av1/dav1d_decoder.h" // nogncheck
#endif
namespace webrtc {
namespace {
#if defined(RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY)
constexpr bool kDav1dIsIncluded = true;
#else
constexpr bool kDav1dIsIncluded = false;
std::unique_ptr<VideoDecoder> CreateDav1dDecoder() {
return nullptr;
}
#endif
} // namespace
std::vector<SdpVideoFormat> InternalDecoderFactory::GetSupportedFormats()
const {
std::vector<SdpVideoFormat> formats;
formats.push_back(SdpVideoFormat(cricket::kVp8CodecName));
for (const SdpVideoFormat& format : SupportedVP9DecoderCodecs())
formats.push_back(format);
for (const SdpVideoFormat& h264_format : SupportedH264DecoderCodecs())
formats.push_back(h264_format);
if (kDav1dIsIncluded) {
formats.push_back(SdpVideoFormat(cricket::kAv1CodecName));
formats.push_back(
SdpVideoFormat(cricket::kAv1CodecName,
{{cricket::kAv1FmtpProfile,
AV1ProfileToString(AV1Profile::kProfile1).data()}}));
}
return formats;
}
VideoDecoderFactory::CodecSupport InternalDecoderFactory::QueryCodecSupport(
const SdpVideoFormat& format,
bool reference_scaling) const {
// Query for supported formats and check if the specified format is supported.
// Return unsupported if an invalid combination of format and
// reference_scaling is specified.
if (reference_scaling) {
VideoCodecType codec = PayloadStringToCodecType(format.name);
if (codec != kVideoCodecVP9 && codec != kVideoCodecAV1) {
return {/*is_supported=*/false, /*is_power_efficient=*/false};
}
}
CodecSupport codec_support;
codec_support.is_supported = format.IsCodecInList(GetSupportedFormats());
return codec_support;
}
std::unique_ptr<VideoDecoder> InternalDecoderFactory::CreateVideoDecoder(
const SdpVideoFormat& format) {
if (!format.IsCodecInList(GetSupportedFormats())) {
RTC_LOG(LS_WARNING) << "Trying to create decoder for unsupported format. "
<< format.ToString();
return nullptr;
}
if (absl::EqualsIgnoreCase(format.name, cricket::kVp8CodecName))
return VP8Decoder::Create();
if (absl::EqualsIgnoreCase(format.name, cricket::kVp9CodecName))
return VP9Decoder::Create();
if (absl::EqualsIgnoreCase(format.name, cricket::kH264CodecName))
return H264Decoder::Create();
if (absl::EqualsIgnoreCase(format.name, cricket::kAv1CodecName) &&
kDav1dIsIncluded) {
return CreateDav1dDecoder();
}
RTC_DCHECK_NOTREACHED();
return nullptr;
}
} // namespace webrtc

View file

@ -0,0 +1,35 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_ENGINE_INTERNAL_DECODER_FACTORY_H_
#define MEDIA_ENGINE_INTERNAL_DECODER_FACTORY_H_
#include <memory>
#include <vector>
#include "api/video_codecs/sdp_video_format.h"
#include "api/video_codecs/video_decoder.h"
#include "api/video_codecs/video_decoder_factory.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
class RTC_EXPORT InternalDecoderFactory : public VideoDecoderFactory {
public:
std::vector<SdpVideoFormat> GetSupportedFormats() const override;
CodecSupport QueryCodecSupport(const SdpVideoFormat& format,
bool reference_scaling) const override;
std::unique_ptr<VideoDecoder> CreateVideoDecoder(
const SdpVideoFormat& format) override;
};
} // namespace webrtc
#endif // MEDIA_ENGINE_INTERNAL_DECODER_FACTORY_H_

View file

@ -0,0 +1,66 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/engine/internal_encoder_factory.h"
#include <memory>
#include <string>
#include <vector>
#include "absl/strings/match.h"
#include "api/video_codecs/video_encoder_factory.h"
#include "api/video_codecs/video_encoder_factory_template.h"
#if defined(RTC_USE_LIBAOM_AV1_ENCODER)
#include "api/video_codecs/video_encoder_factory_template_libaom_av1_adapter.h" // nogncheck
#endif
#include "api/video_codecs/video_encoder_factory_template_libvpx_vp8_adapter.h"
#include "api/video_codecs/video_encoder_factory_template_libvpx_vp9_adapter.h"
#if defined(WEBRTC_USE_H264)
#include "api/video_codecs/video_encoder_factory_template_open_h264_adapter.h" // nogncheck
#endif
namespace webrtc {
namespace {
using Factory =
VideoEncoderFactoryTemplate<webrtc::LibvpxVp8EncoderTemplateAdapter,
#if defined(WEBRTC_USE_H264)
webrtc::OpenH264EncoderTemplateAdapter,
#endif
#if defined(RTC_USE_LIBAOM_AV1_ENCODER)
webrtc::LibaomAv1EncoderTemplateAdapter,
#endif
webrtc::LibvpxVp9EncoderTemplateAdapter>;
} // namespace
std::vector<SdpVideoFormat> InternalEncoderFactory::GetSupportedFormats()
const {
return Factory().GetSupportedFormats();
}
std::unique_ptr<VideoEncoder> InternalEncoderFactory::CreateVideoEncoder(
const SdpVideoFormat& format) {
auto original_format =
FuzzyMatchSdpVideoFormat(Factory().GetSupportedFormats(), format);
return original_format ? Factory().CreateVideoEncoder(*original_format)
: nullptr;
}
VideoEncoderFactory::CodecSupport InternalEncoderFactory::QueryCodecSupport(
const SdpVideoFormat& format,
absl::optional<std::string> scalability_mode) const {
auto original_format =
FuzzyMatchSdpVideoFormat(Factory().GetSupportedFormats(), format);
return original_format
? Factory().QueryCodecSupport(*original_format, scalability_mode)
: VideoEncoderFactory::CodecSupport{.is_supported = false};
}
} // namespace webrtc

View file

@ -0,0 +1,34 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_ENGINE_INTERNAL_ENCODER_FACTORY_H_
#define MEDIA_ENGINE_INTERNAL_ENCODER_FACTORY_H_
#include <memory>
#include <string>
#include <vector>
#include "api/video_codecs/video_encoder_factory.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
class RTC_EXPORT InternalEncoderFactory : public VideoEncoderFactory {
public:
std::vector<SdpVideoFormat> GetSupportedFormats() const override;
CodecSupport QueryCodecSupport(
const SdpVideoFormat& format,
absl::optional<std::string> scalability_mode) const override;
std::unique_ptr<VideoEncoder> CreateVideoEncoder(
const SdpVideoFormat& format) override;
};
} // namespace webrtc
#endif // MEDIA_ENGINE_INTERNAL_ENCODER_FACTORY_H_

View file

@ -0,0 +1,117 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/engine/multiplex_codec_factory.h"
#include <map>
#include <memory>
#include <string>
#include <utility>
#include "absl/strings/match.h"
#include "api/environment/environment.h"
#include "api/video_codecs/sdp_video_format.h"
#include "media/base/codec.h"
#include "media/base/media_constants.h"
#include "modules/video_coding/codecs/multiplex/include/multiplex_decoder_adapter.h"
#include "modules/video_coding/codecs/multiplex/include/multiplex_encoder_adapter.h"
#include "rtc_base/logging.h"
namespace {
bool IsMultiplexCodec(const cricket::VideoCodec& codec) {
return absl::EqualsIgnoreCase(codec.name.c_str(),
cricket::kMultiplexCodecName);
}
} // anonymous namespace
namespace webrtc {
constexpr const char* kMultiplexAssociatedCodecName = cricket::kVp9CodecName;
MultiplexEncoderFactory::MultiplexEncoderFactory(
std::unique_ptr<VideoEncoderFactory> factory,
bool supports_augmenting_data)
: factory_(std::move(factory)),
supports_augmenting_data_(supports_augmenting_data) {}
std::vector<SdpVideoFormat> MultiplexEncoderFactory::GetSupportedFormats()
const {
std::vector<SdpVideoFormat> formats = factory_->GetSupportedFormats();
for (const auto& format : formats) {
if (absl::EqualsIgnoreCase(format.name, kMultiplexAssociatedCodecName)) {
SdpVideoFormat multiplex_format = format;
multiplex_format.parameters[cricket::kCodecParamAssociatedCodecName] =
format.name;
multiplex_format.name = cricket::kMultiplexCodecName;
formats.push_back(multiplex_format);
break;
}
}
return formats;
}
std::unique_ptr<VideoEncoder> MultiplexEncoderFactory::CreateVideoEncoder(
const SdpVideoFormat& format) {
if (!IsMultiplexCodec(cricket::CreateVideoCodec(format)))
return factory_->CreateVideoEncoder(format);
const auto& it =
format.parameters.find(cricket::kCodecParamAssociatedCodecName);
if (it == format.parameters.end()) {
RTC_LOG(LS_ERROR) << "No assicated codec for multiplex.";
return nullptr;
}
SdpVideoFormat associated_format = format;
associated_format.name = it->second;
return std::unique_ptr<VideoEncoder>(new MultiplexEncoderAdapter(
factory_.get(), associated_format, supports_augmenting_data_));
}
MultiplexDecoderFactory::MultiplexDecoderFactory(
std::unique_ptr<VideoDecoderFactory> factory,
bool supports_augmenting_data)
: factory_(std::move(factory)),
supports_augmenting_data_(supports_augmenting_data) {}
std::vector<SdpVideoFormat> MultiplexDecoderFactory::GetSupportedFormats()
const {
std::vector<SdpVideoFormat> formats = factory_->GetSupportedFormats();
std::vector<SdpVideoFormat> augmented_formats = formats;
for (const auto& format : formats) {
if (absl::EqualsIgnoreCase(format.name, kMultiplexAssociatedCodecName)) {
SdpVideoFormat multiplex_format = format;
multiplex_format.parameters[cricket::kCodecParamAssociatedCodecName] =
format.name;
multiplex_format.name = cricket::kMultiplexCodecName;
augmented_formats.push_back(multiplex_format);
}
}
return augmented_formats;
}
std::unique_ptr<VideoDecoder> MultiplexDecoderFactory::Create(
const Environment& env,
const SdpVideoFormat& format) {
if (!IsMultiplexCodec(cricket::CreateVideoCodec(format))) {
return factory_->Create(env, format);
}
auto it = format.parameters.find(cricket::kCodecParamAssociatedCodecName);
if (it == format.parameters.end()) {
RTC_LOG(LS_ERROR) << "No assicated codec for multiplex.";
return nullptr;
}
SdpVideoFormat associated_format = format;
associated_format.name = it->second;
return std::make_unique<MultiplexDecoderAdapter>(
env, factory_.get(), associated_format, supports_augmenting_data_);
}
} // namespace webrtc

View file

@ -0,0 +1,80 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_ENGINE_MULTIPLEX_CODEC_FACTORY_H_
#define MEDIA_ENGINE_MULTIPLEX_CODEC_FACTORY_H_
#include <memory>
#include <vector>
#include "api/environment/environment.h"
#include "api/video_codecs/sdp_video_format.h"
#include "api/video_codecs/video_decoder.h"
#include "api/video_codecs/video_decoder_factory.h"
#include "api/video_codecs/video_encoder.h"
#include "api/video_codecs/video_encoder_factory.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// Multiplex codec is a completely modular/optional codec that allows users to
// send more than a frame's opaque content(RGB/YUV) over video channels.
// - Allows sending Alpha channel over the wire iff input is
// I420ABufferInterface. Users can expect to receive I420ABufferInterface as the
// decoded video frame buffer. I420A data is split into YUV/AXX portions,
// encoded/decoded seperately and bitstreams are concatanated.
// - Allows sending augmenting data over the wire attached to the frame. This
// attached data portion is not encoded in any way and sent as it is. Users can
// input AugmentedVideoFrameBuffer and can expect the same interface as the
// decoded video frame buffer.
// - Showcases an example of how to add a custom codec in webrtc video channel.
// How to use it end-to-end:
// - Wrap your existing VideoEncoderFactory implemention with
// MultiplexEncoderFactory and VideoDecoderFactory implemention with
// MultiplexDecoderFactory below. For actual coding, multiplex creates encoder
// and decoder instance(s) using these factories.
// - Use Multiplex*coderFactory classes in CreatePeerConnectionFactory() calls.
// - Select "multiplex" codec in SDP negotiation.
class RTC_EXPORT MultiplexEncoderFactory : public VideoEncoderFactory {
public:
// `supports_augmenting_data` defines if the encoder would support augmenting
// data. If set, the encoder expects to receive video frame buffers of type
// AugmentedVideoFrameBuffer.
MultiplexEncoderFactory(std::unique_ptr<VideoEncoderFactory> factory,
bool supports_augmenting_data = false);
std::vector<SdpVideoFormat> GetSupportedFormats() const override;
std::unique_ptr<VideoEncoder> CreateVideoEncoder(
const SdpVideoFormat& format) override;
private:
std::unique_ptr<VideoEncoderFactory> factory_;
const bool supports_augmenting_data_;
};
class RTC_EXPORT MultiplexDecoderFactory : public VideoDecoderFactory {
public:
// `supports_augmenting_data` defines if the decoder would support augmenting
// data. If set, the decoder is expected to output video frame buffers of type
// AugmentedVideoFrameBuffer.
MultiplexDecoderFactory(std::unique_ptr<VideoDecoderFactory> factory,
bool supports_augmenting_data = false);
std::vector<SdpVideoFormat> GetSupportedFormats() const override;
std::unique_ptr<VideoDecoder> Create(const Environment& env,
const SdpVideoFormat& format) override;
private:
std::unique_ptr<VideoDecoderFactory> factory_;
const bool supports_augmenting_data_;
};
} // namespace webrtc
#endif // MEDIA_ENGINE_MULTIPLEX_CODEC_FACTORY_H_

View file

@ -0,0 +1,54 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_ENGINE_NULL_WEBRTC_VIDEO_ENGINE_H_
#define MEDIA_ENGINE_NULL_WEBRTC_VIDEO_ENGINE_H_
#include <vector>
#include "media/base/media_channel.h"
#include "media/base/media_engine.h"
namespace webrtc {
class Call;
} // namespace webrtc
namespace cricket {
// Video engine implementation that does nothing and can be used in
// CompositeMediaEngine.
class NullWebRtcVideoEngine : public VideoEngineInterface {
public:
std::vector<VideoCodec> send_codecs(bool) const override {
return std::vector<VideoCodec>();
}
std::vector<VideoCodec> recv_codecs(bool) const override {
return std::vector<VideoCodec>();
}
std::vector<VideoCodec> send_codecs() const override {
return std::vector<VideoCodec>();
}
std::vector<VideoCodec> recv_codecs() const override {
return std::vector<VideoCodec>();
}
std::vector<webrtc::RtpHeaderExtensionCapability> GetRtpHeaderExtensions()
const override {
return {};
}
};
} // namespace cricket
#endif // MEDIA_ENGINE_NULL_WEBRTC_VIDEO_ENGINE_H_

View file

@ -0,0 +1,160 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/engine/payload_type_mapper.h"
#include <utility>
#include "absl/strings/ascii.h"
#include "api/audio_codecs/audio_format.h"
#include "media/base/codec.h"
#include "media/base/media_constants.h"
namespace cricket {
webrtc::SdpAudioFormat AudioCodecToSdpAudioFormat(const AudioCodec& ac) {
return webrtc::SdpAudioFormat(ac.name, ac.clockrate, ac.channels, ac.params);
}
PayloadTypeMapper::PayloadTypeMapper()
// RFC 3551 reserves payload type numbers in the range 96-127 exclusively
// for dynamic assignment. Once those are used up, it is recommended that
// payload types unassigned by the RFC are used for dynamic payload type
// mapping, before any static payload ids. At this point, we only support
// mapping within the exclusive range.
: next_unused_payload_type_(96),
max_payload_type_(127),
mappings_(
{// Static payload type assignments according to RFC 3551.
{{kPcmuCodecName, 8000, 1}, 0},
{{"GSM", 8000, 1}, 3},
{{"G723", 8000, 1}, 4},
{{"DVI4", 8000, 1}, 5},
{{"DVI4", 16000, 1}, 6},
{{"LPC", 8000, 1}, 7},
{{kPcmaCodecName, 8000, 1}, 8},
{{kG722CodecName, 8000, 1}, 9},
{{kL16CodecName, 44100, 2}, 10},
{{kL16CodecName, 44100, 1}, 11},
{{"QCELP", 8000, 1}, 12},
{{kCnCodecName, 8000, 1}, 13},
// RFC 4566 is a bit ambiguous on the contents of the "encoding
// parameters" field, which, for audio, encodes the number of
// channels. It is "optional and may be omitted if the number of
// channels is one". Does that necessarily imply that an omitted
// encoding parameter means one channel? Since RFC 3551 doesn't
// specify a value for this parameter for MPA, I've included both 0
// and 1 here, to increase the chances it will be correctly used if
// someone implements an MPEG audio encoder/decoder.
{{"MPA", 90000, 0}, 14},
{{"MPA", 90000, 1}, 14},
{{"G728", 8000, 1}, 15},
{{"DVI4", 11025, 1}, 16},
{{"DVI4", 22050, 1}, 17},
{{"G729", 8000, 1}, 18},
// Payload type assignments currently used by WebRTC.
// Includes data to reduce collisions (and thus reassignments)
{{kIlbcCodecName, 8000, 1}, 102},
{{kCnCodecName, 16000, 1}, 105},
{{kCnCodecName, 32000, 1}, 106},
{{kOpusCodecName,
48000,
2,
{{kCodecParamMinPTime, "10"},
{kCodecParamUseInbandFec, kParamValueTrue}}},
111},
// RED for opus is assigned in the lower range, starting at the top.
// Note that the FMTP refers to the opus payload type.
{{kRedCodecName,
48000,
2,
{{kCodecParamNotInNameValueFormat, "111/111"}}},
63},
// TODO(solenberg): Remove the hard coded 16k,32k,48k DTMF once we
// assign payload types dynamically for send side as well.
{{kDtmfCodecName, 48000, 1}, 110},
{{kDtmfCodecName, 32000, 1}, 112},
{{kDtmfCodecName, 16000, 1}, 113},
{{kDtmfCodecName, 8000, 1}, 126}}) {
// TODO(ossu): Try to keep this as change-proof as possible until we're able
// to remove the payload type constants from everywhere in the code.
for (const auto& mapping : mappings_) {
used_payload_types_.insert(mapping.second);
}
}
PayloadTypeMapper::~PayloadTypeMapper() = default;
absl::optional<int> PayloadTypeMapper::GetMappingFor(
const webrtc::SdpAudioFormat& format) {
auto iter = mappings_.find(format);
if (iter != mappings_.end())
return iter->second;
for (; next_unused_payload_type_ <= max_payload_type_;
++next_unused_payload_type_) {
int payload_type = next_unused_payload_type_;
if (used_payload_types_.find(payload_type) == used_payload_types_.end()) {
used_payload_types_.insert(payload_type);
mappings_[format] = payload_type;
++next_unused_payload_type_;
return payload_type;
}
}
return absl::nullopt;
}
absl::optional<int> PayloadTypeMapper::FindMappingFor(
const webrtc::SdpAudioFormat& format) const {
auto iter = mappings_.find(format);
if (iter != mappings_.end())
return iter->second;
return absl::nullopt;
}
absl::optional<AudioCodec> PayloadTypeMapper::ToAudioCodec(
const webrtc::SdpAudioFormat& format) {
// TODO(ossu): We can safely set bitrate to zero here, since that field is
// not presented in the SDP. It is used to ferry around some target bitrate
// values for certain codecs (ISAC and Opus) and in ways it really
// shouldn't. It should be removed once we no longer use CodecInsts in the
// ACM or NetEq.
auto opt_payload_type = GetMappingFor(format);
if (opt_payload_type) {
AudioCodec codec =
cricket::CreateAudioCodec(*opt_payload_type, format.name,
format.clockrate_hz, format.num_channels);
codec.params = format.parameters;
return std::move(codec);
}
return absl::nullopt;
}
bool PayloadTypeMapper::SdpAudioFormatOrdering::operator()(
const webrtc::SdpAudioFormat& a,
const webrtc::SdpAudioFormat& b) const {
if (a.clockrate_hz == b.clockrate_hz) {
if (a.num_channels == b.num_channels) {
int name_cmp =
absl::AsciiStrToLower(a.name).compare(absl::AsciiStrToLower(b.name));
if (name_cmp == 0)
return a.parameters < b.parameters;
return name_cmp < 0;
}
return a.num_channels < b.num_channels;
}
return a.clockrate_hz < b.clockrate_hz;
}
} // namespace cricket

View file

@ -0,0 +1,57 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_ENGINE_PAYLOAD_TYPE_MAPPER_H_
#define MEDIA_ENGINE_PAYLOAD_TYPE_MAPPER_H_
#include <map>
#include <set>
#include "absl/types/optional.h"
#include "api/audio_codecs/audio_format.h"
#include "media/base/codec.h"
namespace cricket {
webrtc::SdpAudioFormat AudioCodecToSdpAudioFormat(const AudioCodec& ac);
class PayloadTypeMapper {
public:
PayloadTypeMapper();
~PayloadTypeMapper();
// Finds the current payload type for `format` or assigns a new one, if no
// current mapping exists. Will return an empty value if it was unable to
// create a mapping, i.e. if all dynamic payload type ids have been used up.
absl::optional<int> GetMappingFor(const webrtc::SdpAudioFormat& format);
// Finds the current payload type for `format`, if any. Returns an empty value
// if no payload type mapping exists for the format.
absl::optional<int> FindMappingFor(
const webrtc::SdpAudioFormat& format) const;
// Like GetMappingFor, but fills in an AudioCodec structure with the necessary
// information instead.
absl::optional<AudioCodec> ToAudioCodec(const webrtc::SdpAudioFormat& format);
private:
struct SdpAudioFormatOrdering {
bool operator()(const webrtc::SdpAudioFormat& a,
const webrtc::SdpAudioFormat& b) const;
};
int next_unused_payload_type_;
int max_payload_type_;
std::map<webrtc::SdpAudioFormat, int, SdpAudioFormatOrdering> mappings_;
std::set<int> used_payload_types_;
};
} // namespace cricket
#endif // MEDIA_ENGINE_PAYLOAD_TYPE_MAPPER_H_

View file

@ -0,0 +1,995 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/engine/simulcast_encoder_adapter.h"
#include <stdio.h>
#include <string.h>
#include <algorithm>
#include <cstdint>
#include <string>
#include <utility>
#include "absl/algorithm/container.h"
#include "api/field_trials_view.h"
#include "api/scoped_refptr.h"
#include "api/transport/field_trial_based_config.h"
#include "api/video/i420_buffer.h"
#include "api/video/video_codec_constants.h"
#include "api/video/video_frame_buffer.h"
#include "api/video/video_rotation.h"
#include "api/video_codecs/video_encoder.h"
#include "api/video_codecs/video_encoder_factory.h"
#include "api/video_codecs/video_encoder_software_fallback_wrapper.h"
#include "media/base/media_constants.h"
#include "media/base/video_common.h"
#include "modules/video_coding/include/video_error_codes.h"
#include "modules/video_coding/include/video_error_codes_utils.h"
#include "modules/video_coding/utility/simulcast_rate_allocator.h"
#include "rtc_base/checks.h"
#include "rtc_base/experiments/rate_control_settings.h"
#include "rtc_base/logging.h"
namespace {
// Max qp for lowest spatial resolution when doing simulcast.
const unsigned int kLowestResMaxQp = 45;
absl::optional<unsigned int> GetScreenshareBoostedQpValue(
const webrtc::FieldTrialsView& field_trials) {
std::string experiment_group =
field_trials.Lookup("WebRTC-BoostedScreenshareQp");
unsigned int qp;
if (sscanf(experiment_group.c_str(), "%u", &qp) != 1)
return absl::nullopt;
qp = std::min(qp, 63u);
qp = std::max(qp, 1u);
return qp;
}
uint32_t SumStreamMaxBitrate(int streams, const webrtc::VideoCodec& codec) {
uint32_t bitrate_sum = 0;
for (int i = 0; i < streams; ++i) {
bitrate_sum += codec.simulcastStream[i].maxBitrate;
}
return bitrate_sum;
}
int CountAllStreams(const webrtc::VideoCodec& codec) {
int total_streams_count =
codec.numberOfSimulcastStreams < 1 ? 1 : codec.numberOfSimulcastStreams;
uint32_t simulcast_max_bitrate =
SumStreamMaxBitrate(total_streams_count, codec);
if (simulcast_max_bitrate == 0) {
total_streams_count = 1;
}
return total_streams_count;
}
int CountActiveStreams(const webrtc::VideoCodec& codec) {
if (codec.numberOfSimulcastStreams < 1) {
return 1;
}
int total_streams_count = CountAllStreams(codec);
int active_streams_count = 0;
for (int i = 0; i < total_streams_count; ++i) {
if (codec.simulcastStream[i].active) {
++active_streams_count;
}
}
return active_streams_count;
}
int VerifyCodec(const webrtc::VideoCodec* codec_settings) {
if (codec_settings == nullptr) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
if (codec_settings->maxFramerate < 1) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
// allow zero to represent an unspecified maxBitRate
if (codec_settings->maxBitrate > 0 &&
codec_settings->startBitrate > codec_settings->maxBitrate) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
if (codec_settings->width <= 1 || codec_settings->height <= 1) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
if (codec_settings->codecType == webrtc::kVideoCodecVP8 &&
codec_settings->VP8().automaticResizeOn &&
CountActiveStreams(*codec_settings) > 1) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
return WEBRTC_VIDEO_CODEC_OK;
}
bool StreamQualityCompare(const webrtc::SimulcastStream& a,
const webrtc::SimulcastStream& b) {
return std::tie(a.height, a.width, a.maxBitrate, a.maxFramerate) <
std::tie(b.height, b.width, b.maxBitrate, b.maxFramerate);
}
void GetLowestAndHighestQualityStreamIndixes(
rtc::ArrayView<webrtc::SimulcastStream> streams,
int* lowest_quality_stream_idx,
int* highest_quality_stream_idx) {
const auto lowest_highest_quality_streams =
absl::c_minmax_element(streams, StreamQualityCompare);
*lowest_quality_stream_idx =
std::distance(streams.begin(), lowest_highest_quality_streams.first);
*highest_quality_stream_idx =
std::distance(streams.begin(), lowest_highest_quality_streams.second);
}
std::vector<uint32_t> GetStreamStartBitratesKbps(
const webrtc::VideoCodec& codec) {
std::vector<uint32_t> start_bitrates;
std::unique_ptr<webrtc::VideoBitrateAllocator> rate_allocator =
std::make_unique<webrtc::SimulcastRateAllocator>(codec);
webrtc::VideoBitrateAllocation allocation =
rate_allocator->Allocate(webrtc::VideoBitrateAllocationParameters(
codec.startBitrate * 1000, codec.maxFramerate));
int total_streams_count = CountAllStreams(codec);
for (int i = 0; i < total_streams_count; ++i) {
uint32_t stream_bitrate = allocation.GetSpatialLayerSum(i) / 1000;
start_bitrates.push_back(stream_bitrate);
}
return start_bitrates;
}
} // namespace
namespace webrtc {
SimulcastEncoderAdapter::EncoderContext::EncoderContext(
std::unique_ptr<VideoEncoder> encoder,
bool prefer_temporal_support,
VideoEncoder::EncoderInfo primary_info,
VideoEncoder::EncoderInfo fallback_info)
: encoder_(std::move(encoder)),
prefer_temporal_support_(prefer_temporal_support),
primary_info_(std::move(primary_info)),
fallback_info_(std::move(fallback_info)) {}
void SimulcastEncoderAdapter::EncoderContext::Release() {
if (encoder_) {
encoder_->Release();
encoder_->RegisterEncodeCompleteCallback(nullptr);
}
}
SimulcastEncoderAdapter::StreamContext::StreamContext(
SimulcastEncoderAdapter* parent,
std::unique_ptr<EncoderContext> encoder_context,
std::unique_ptr<FramerateController> framerate_controller,
int stream_idx,
uint16_t width,
uint16_t height,
bool is_paused)
: parent_(parent),
encoder_context_(std::move(encoder_context)),
framerate_controller_(std::move(framerate_controller)),
stream_idx_(stream_idx),
width_(width),
height_(height),
is_keyframe_needed_(false),
is_paused_(is_paused) {
if (parent_) {
encoder_context_->encoder().RegisterEncodeCompleteCallback(this);
}
}
SimulcastEncoderAdapter::StreamContext::StreamContext(StreamContext&& rhs)
: parent_(rhs.parent_),
encoder_context_(std::move(rhs.encoder_context_)),
framerate_controller_(std::move(rhs.framerate_controller_)),
stream_idx_(rhs.stream_idx_),
width_(rhs.width_),
height_(rhs.height_),
is_keyframe_needed_(rhs.is_keyframe_needed_),
is_paused_(rhs.is_paused_) {
if (parent_) {
encoder_context_->encoder().RegisterEncodeCompleteCallback(this);
}
}
SimulcastEncoderAdapter::StreamContext::~StreamContext() {
if (encoder_context_) {
encoder_context_->Release();
}
}
std::unique_ptr<SimulcastEncoderAdapter::EncoderContext>
SimulcastEncoderAdapter::StreamContext::ReleaseEncoderContext() && {
encoder_context_->Release();
return std::move(encoder_context_);
}
void SimulcastEncoderAdapter::StreamContext::OnKeyframe(Timestamp timestamp) {
is_keyframe_needed_ = false;
if (framerate_controller_) {
framerate_controller_->KeepFrame(timestamp.us() * 1000);
}
}
bool SimulcastEncoderAdapter::StreamContext::ShouldDropFrame(
Timestamp timestamp) {
if (!framerate_controller_) {
return false;
}
return framerate_controller_->ShouldDropFrame(timestamp.us() * 1000);
}
EncodedImageCallback::Result
SimulcastEncoderAdapter::StreamContext::OnEncodedImage(
const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info) {
RTC_CHECK(parent_); // If null, this method should never be called.
return parent_->OnEncodedImage(stream_idx_, encoded_image,
codec_specific_info);
}
void SimulcastEncoderAdapter::StreamContext::OnDroppedFrame(
DropReason /*reason*/) {
RTC_CHECK(parent_); // If null, this method should never be called.
parent_->OnDroppedFrame(stream_idx_);
}
SimulcastEncoderAdapter::SimulcastEncoderAdapter(VideoEncoderFactory* factory,
const SdpVideoFormat& format)
: SimulcastEncoderAdapter(factory,
nullptr,
format,
FieldTrialBasedConfig()) {}
SimulcastEncoderAdapter::SimulcastEncoderAdapter(
VideoEncoderFactory* primary_factory,
VideoEncoderFactory* fallback_factory,
const SdpVideoFormat& format,
const FieldTrialsView& field_trials)
: inited_(0),
primary_encoder_factory_(primary_factory),
fallback_encoder_factory_(fallback_factory),
video_format_(format),
total_streams_count_(0),
bypass_mode_(false),
encoded_complete_callback_(nullptr),
experimental_boosted_screenshare_qp_(
GetScreenshareBoostedQpValue(field_trials)),
boost_base_layer_quality_(
RateControlSettings::ParseFromKeyValueConfig(&field_trials)
.Vp8BoostBaseLayerQuality()),
prefer_temporal_support_on_base_layer_(field_trials.IsEnabled(
"WebRTC-Video-PreferTemporalSupportOnBaseLayer")) {
RTC_DCHECK(primary_factory);
// The adapter is typically created on the worker thread, but operated on
// the encoder task queue.
encoder_queue_.Detach();
}
SimulcastEncoderAdapter::~SimulcastEncoderAdapter() {
RTC_DCHECK(!Initialized());
DestroyStoredEncoders();
}
void SimulcastEncoderAdapter::SetFecControllerOverride(
FecControllerOverride* /*fec_controller_override*/) {
// Ignored.
}
int SimulcastEncoderAdapter::Release() {
RTC_DCHECK_RUN_ON(&encoder_queue_);
while (!stream_contexts_.empty()) {
// Move the encoder instances and put it on the `cached_encoder_contexts_`
// where it may possibly be reused from (ordering does not matter).
cached_encoder_contexts_.push_front(
std::move(stream_contexts_.back()).ReleaseEncoderContext());
stream_contexts_.pop_back();
}
bypass_mode_ = false;
// It's legal to move the encoder to another queue now.
encoder_queue_.Detach();
inited_.store(0);
return WEBRTC_VIDEO_CODEC_OK;
}
int SimulcastEncoderAdapter::InitEncode(
const VideoCodec* codec_settings,
const VideoEncoder::Settings& settings) {
RTC_DCHECK_RUN_ON(&encoder_queue_);
if (settings.number_of_cores < 1) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
int ret = VerifyCodec(codec_settings);
if (ret < 0) {
return ret;
}
Release();
codec_ = *codec_settings;
total_streams_count_ = CountAllStreams(*codec_settings);
bool is_legacy_singlecast = codec_.numberOfSimulcastStreams == 0;
int lowest_quality_stream_idx = 0;
int highest_quality_stream_idx = 0;
if (!is_legacy_singlecast) {
GetLowestAndHighestQualityStreamIndixes(
rtc::ArrayView<SimulcastStream>(codec_.simulcastStream,
total_streams_count_),
&lowest_quality_stream_idx, &highest_quality_stream_idx);
}
std::unique_ptr<EncoderContext> encoder_context = FetchOrCreateEncoderContext(
/*is_lowest_quality_stream=*/(
is_legacy_singlecast ||
codec_.simulcastStream[lowest_quality_stream_idx].active));
if (encoder_context == nullptr) {
return WEBRTC_VIDEO_CODEC_MEMORY;
}
// Two distinct scenarios:
// * Singlecast (total_streams_count == 1) or simulcast with simulcast-capable
// underlaying encoder implementation if active_streams_count > 1. SEA
// operates in bypass mode: original settings are passed to the underlaying
// encoder, frame encode complete callback is not intercepted.
// * Multi-encoder simulcast or singlecast if layers are deactivated
// (active_streams_count >= 1). SEA creates N=active_streams_count encoders
// and configures each to produce a single stream.
int active_streams_count = CountActiveStreams(*codec_settings);
// If we only have a single active layer it is better to create an encoder
// with only one configured layer than creating it with all-but-one disabled
// layers because that way we control scaling.
bool separate_encoders_needed =
!encoder_context->encoder().GetEncoderInfo().supports_simulcast ||
active_streams_count == 1;
RTC_LOG(LS_INFO) << "[SEA] InitEncode: total_streams_count: "
<< total_streams_count_
<< ", active_streams_count: " << active_streams_count
<< ", separate_encoders_needed: "
<< (separate_encoders_needed ? "true" : "false");
// Singlecast or simulcast with simulcast-capable underlaying encoder.
if (total_streams_count_ == 1 || !separate_encoders_needed) {
RTC_LOG(LS_INFO) << "[SEA] InitEncode: Single-encoder mode";
int ret = encoder_context->encoder().InitEncode(&codec_, settings);
if (ret >= 0) {
stream_contexts_.emplace_back(
/*parent=*/nullptr, std::move(encoder_context),
/*framerate_controller=*/nullptr, /*stream_idx=*/0, codec_.width,
codec_.height, /*is_paused=*/active_streams_count == 0);
bypass_mode_ = true;
DestroyStoredEncoders();
inited_.store(1);
return WEBRTC_VIDEO_CODEC_OK;
}
encoder_context->Release();
if (total_streams_count_ == 1) {
RTC_LOG(LS_ERROR) << "[SEA] InitEncode: failed with error code: "
<< WebRtcVideoCodecErrorToString(ret);
return ret;
}
}
// Multi-encoder simulcast or singlecast (deactivated layers).
std::vector<uint32_t> stream_start_bitrate_kbps =
GetStreamStartBitratesKbps(codec_);
for (int stream_idx = 0; stream_idx < total_streams_count_; ++stream_idx) {
if (!is_legacy_singlecast && !codec_.simulcastStream[stream_idx].active) {
continue;
}
if (encoder_context == nullptr) {
encoder_context = FetchOrCreateEncoderContext(
/*is_lowest_quality_stream=*/stream_idx == lowest_quality_stream_idx);
}
if (encoder_context == nullptr) {
Release();
return WEBRTC_VIDEO_CODEC_MEMORY;
}
VideoCodec stream_codec = MakeStreamCodec(
codec_, stream_idx, stream_start_bitrate_kbps[stream_idx],
/*is_lowest_quality_stream=*/stream_idx == lowest_quality_stream_idx,
/*is_highest_quality_stream=*/stream_idx == highest_quality_stream_idx);
RTC_LOG(LS_INFO) << "[SEA] Multi-encoder mode: initializing stream: "
<< stream_idx << ", active: "
<< (codec_.simulcastStream[stream_idx].active ? "true"
: "false");
int ret = encoder_context->encoder().InitEncode(&stream_codec, settings);
if (ret < 0) {
encoder_context.reset();
Release();
RTC_LOG(LS_ERROR) << "[SEA] InitEncode: failed with error code: "
<< WebRtcVideoCodecErrorToString(ret);
return ret;
}
// Intercept frame encode complete callback only for upper streams, where
// we need to set a correct stream index. Set `parent` to nullptr for the
// lowest stream to bypass the callback.
SimulcastEncoderAdapter* parent = stream_idx > 0 ? this : nullptr;
bool is_paused = stream_start_bitrate_kbps[stream_idx] == 0;
stream_contexts_.emplace_back(
parent, std::move(encoder_context),
std::make_unique<FramerateController>(stream_codec.maxFramerate),
stream_idx, stream_codec.width, stream_codec.height, is_paused);
}
// To save memory, don't store encoders that we don't use.
DestroyStoredEncoders();
inited_.store(1);
return WEBRTC_VIDEO_CODEC_OK;
}
int SimulcastEncoderAdapter::Encode(
const VideoFrame& input_image,
const std::vector<VideoFrameType>* frame_types) {
RTC_DCHECK_RUN_ON(&encoder_queue_);
if (!Initialized()) {
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
if (encoded_complete_callback_ == nullptr) {
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
if (encoder_info_override_.requested_resolution_alignment()) {
const int alignment =
*encoder_info_override_.requested_resolution_alignment();
if (input_image.width() % alignment != 0 ||
input_image.height() % alignment != 0) {
RTC_LOG(LS_WARNING) << "Frame " << input_image.width() << "x"
<< input_image.height() << " not divisible by "
<< alignment;
return WEBRTC_VIDEO_CODEC_ERROR;
}
if (encoder_info_override_.apply_alignment_to_all_simulcast_layers()) {
for (const auto& layer : stream_contexts_) {
if (layer.width() % alignment != 0 || layer.height() % alignment != 0) {
RTC_LOG(LS_WARNING)
<< "Codec " << layer.width() << "x" << layer.height()
<< " not divisible by " << alignment;
return WEBRTC_VIDEO_CODEC_ERROR;
}
}
}
}
bool is_keyframe_needed = false;
for (const auto& layer : stream_contexts_) {
if (layer.is_keyframe_needed()) {
// This is legacy behavior, generating a keyframe on all layers
// when generating one for a layer that became active for the first time
// or after being disabled.
is_keyframe_needed = true;
break;
}
}
// Temporary thay may hold the result of texture to i420 buffer conversion.
rtc::scoped_refptr<VideoFrameBuffer> src_buffer;
int src_width = input_image.width();
int src_height = input_image.height();
for (auto& layer : stream_contexts_) {
// Don't encode frames in resolutions that we don't intend to send.
if (layer.is_paused()) {
continue;
}
// Convert timestamp from RTP 90kHz clock.
const Timestamp frame_timestamp =
Timestamp::Micros((1000 * input_image.timestamp()) / 90);
// If adapter is passed through and only one sw encoder does simulcast,
// frame types for all streams should be passed to the encoder unchanged.
// Otherwise a single per-encoder frame type is passed.
std::vector<VideoFrameType> stream_frame_types(
bypass_mode_
? std::max<unsigned char>(codec_.numberOfSimulcastStreams, 1)
: 1,
VideoFrameType::kVideoFrameDelta);
bool keyframe_requested = false;
if (is_keyframe_needed) {
std::fill(stream_frame_types.begin(), stream_frame_types.end(),
VideoFrameType::kVideoFrameKey);
keyframe_requested = true;
} else if (frame_types) {
if (bypass_mode_) {
// In bypass mode, we effectively pass on frame_types.
RTC_DCHECK_EQ(frame_types->size(), stream_frame_types.size());
stream_frame_types = *frame_types;
keyframe_requested =
absl::c_any_of(*frame_types, [](const VideoFrameType frame_type) {
return frame_type == VideoFrameType::kVideoFrameKey;
});
} else {
size_t stream_idx = static_cast<size_t>(layer.stream_idx());
if (frame_types->size() >= stream_idx &&
(*frame_types)[stream_idx] == VideoFrameType::kVideoFrameKey) {
stream_frame_types[0] = VideoFrameType::kVideoFrameKey;
keyframe_requested = true;
}
}
}
if (keyframe_requested) {
layer.OnKeyframe(frame_timestamp);
} else if (layer.ShouldDropFrame(frame_timestamp)) {
continue;
}
// If scaling isn't required, because the input resolution
// matches the destination or the input image is empty (e.g.
// a keyframe request for encoders with internal camera
// sources) or the source image has a native handle, pass the image on
// directly. Otherwise, we'll scale it to match what the encoder expects
// (below).
// For texture frames, the underlying encoder is expected to be able to
// correctly sample/scale the source texture.
// TODO(perkj): ensure that works going forward, and figure out how this
// affects webrtc:5683.
if ((layer.width() == src_width && layer.height() == src_height) ||
(input_image.video_frame_buffer()->type() ==
VideoFrameBuffer::Type::kNative &&
layer.encoder().GetEncoderInfo().supports_native_handle)) {
int ret = layer.encoder().Encode(input_image, &stream_frame_types);
if (ret != WEBRTC_VIDEO_CODEC_OK) {
return ret;
}
} else {
if (src_buffer == nullptr) {
src_buffer = input_image.video_frame_buffer();
}
rtc::scoped_refptr<VideoFrameBuffer> dst_buffer =
src_buffer->Scale(layer.width(), layer.height());
if (!dst_buffer) {
RTC_LOG(LS_ERROR) << "Failed to scale video frame";
return WEBRTC_VIDEO_CODEC_ENCODER_FAILURE;
}
// UpdateRect is not propagated to lower simulcast layers currently.
// TODO(ilnik): Consider scaling UpdateRect together with the buffer.
VideoFrame frame(input_image);
frame.set_video_frame_buffer(dst_buffer);
frame.set_rotation(webrtc::kVideoRotation_0);
frame.set_update_rect(
VideoFrame::UpdateRect{0, 0, frame.width(), frame.height()});
int ret = layer.encoder().Encode(frame, &stream_frame_types);
if (ret != WEBRTC_VIDEO_CODEC_OK) {
return ret;
}
}
}
return WEBRTC_VIDEO_CODEC_OK;
}
int SimulcastEncoderAdapter::RegisterEncodeCompleteCallback(
EncodedImageCallback* callback) {
RTC_DCHECK_RUN_ON(&encoder_queue_);
encoded_complete_callback_ = callback;
if (!stream_contexts_.empty() && stream_contexts_.front().stream_idx() == 0) {
// Bypass frame encode complete callback for the lowest layer since there is
// no need to override frame's spatial index.
stream_contexts_.front().encoder().RegisterEncodeCompleteCallback(callback);
}
return WEBRTC_VIDEO_CODEC_OK;
}
void SimulcastEncoderAdapter::SetRates(
const RateControlParameters& parameters) {
RTC_DCHECK_RUN_ON(&encoder_queue_);
if (!Initialized()) {
RTC_LOG(LS_WARNING) << "SetRates while not initialized";
return;
}
if (parameters.framerate_fps < 1.0) {
RTC_LOG(LS_WARNING) << "Invalid framerate: " << parameters.framerate_fps;
return;
}
codec_.maxFramerate = static_cast<uint32_t>(parameters.framerate_fps + 0.5);
if (bypass_mode_) {
stream_contexts_.front().encoder().SetRates(parameters);
return;
}
for (StreamContext& layer_context : stream_contexts_) {
int stream_idx = layer_context.stream_idx();
uint32_t stream_bitrate_kbps =
parameters.bitrate.GetSpatialLayerSum(stream_idx) / 1000;
// Need a key frame if we have not sent this stream before.
if (stream_bitrate_kbps > 0 && layer_context.is_paused()) {
layer_context.set_is_keyframe_needed();
}
layer_context.set_is_paused(stream_bitrate_kbps == 0);
// Slice the temporal layers out of the full allocation and pass it on to
// the encoder handling the current simulcast stream.
RateControlParameters stream_parameters = parameters;
stream_parameters.bitrate = VideoBitrateAllocation();
for (int i = 0; i < kMaxTemporalStreams; ++i) {
if (parameters.bitrate.HasBitrate(stream_idx, i)) {
stream_parameters.bitrate.SetBitrate(
0, i, parameters.bitrate.GetBitrate(stream_idx, i));
}
}
// Assign link allocation proportionally to spatial layer allocation.
if (!parameters.bandwidth_allocation.IsZero() &&
parameters.bitrate.get_sum_bps() > 0) {
stream_parameters.bandwidth_allocation =
DataRate::BitsPerSec((parameters.bandwidth_allocation.bps() *
stream_parameters.bitrate.get_sum_bps()) /
parameters.bitrate.get_sum_bps());
// Make sure we don't allocate bandwidth lower than target bitrate.
if (stream_parameters.bandwidth_allocation.bps() <
stream_parameters.bitrate.get_sum_bps()) {
stream_parameters.bandwidth_allocation =
DataRate::BitsPerSec(stream_parameters.bitrate.get_sum_bps());
}
}
stream_parameters.framerate_fps = std::min<double>(
parameters.framerate_fps,
layer_context.target_fps().value_or(parameters.framerate_fps));
layer_context.encoder().SetRates(stream_parameters);
}
}
void SimulcastEncoderAdapter::OnPacketLossRateUpdate(float packet_loss_rate) {
for (auto& c : stream_contexts_) {
c.encoder().OnPacketLossRateUpdate(packet_loss_rate);
}
}
void SimulcastEncoderAdapter::OnRttUpdate(int64_t rtt_ms) {
for (auto& c : stream_contexts_) {
c.encoder().OnRttUpdate(rtt_ms);
}
}
void SimulcastEncoderAdapter::OnLossNotification(
const LossNotification& loss_notification) {
for (auto& c : stream_contexts_) {
c.encoder().OnLossNotification(loss_notification);
}
}
// TODO(brandtr): Add task checker to this member function, when all encoder
// callbacks are coming in on the encoder queue.
EncodedImageCallback::Result SimulcastEncoderAdapter::OnEncodedImage(
size_t stream_idx,
const EncodedImage& encodedImage,
const CodecSpecificInfo* codecSpecificInfo) {
EncodedImage stream_image(encodedImage);
CodecSpecificInfo stream_codec_specific = *codecSpecificInfo;
stream_image.SetSimulcastIndex(stream_idx);
return encoded_complete_callback_->OnEncodedImage(stream_image,
&stream_codec_specific);
}
void SimulcastEncoderAdapter::OnDroppedFrame(size_t stream_idx) {
// Not yet implemented.
}
bool SimulcastEncoderAdapter::Initialized() const {
return inited_.load() == 1;
}
void SimulcastEncoderAdapter::DestroyStoredEncoders() {
while (!cached_encoder_contexts_.empty()) {
cached_encoder_contexts_.pop_back();
}
}
std::unique_ptr<SimulcastEncoderAdapter::EncoderContext>
SimulcastEncoderAdapter::FetchOrCreateEncoderContext(
bool is_lowest_quality_stream) const {
bool prefer_temporal_support = fallback_encoder_factory_ != nullptr &&
is_lowest_quality_stream &&
prefer_temporal_support_on_base_layer_;
// Toggling of `prefer_temporal_support` requires encoder recreation. Find
// and reuse encoder with desired `prefer_temporal_support`. Otherwise, if
// there is no such encoder in the cache, create a new instance.
auto encoder_context_iter =
std::find_if(cached_encoder_contexts_.begin(),
cached_encoder_contexts_.end(), [&](auto& encoder_context) {
return encoder_context->prefer_temporal_support() ==
prefer_temporal_support;
});
std::unique_ptr<SimulcastEncoderAdapter::EncoderContext> encoder_context;
if (encoder_context_iter != cached_encoder_contexts_.end()) {
encoder_context = std::move(*encoder_context_iter);
cached_encoder_contexts_.erase(encoder_context_iter);
} else {
std::unique_ptr<VideoEncoder> primary_encoder =
primary_encoder_factory_->CreateVideoEncoder(video_format_);
std::unique_ptr<VideoEncoder> fallback_encoder;
if (fallback_encoder_factory_ != nullptr) {
fallback_encoder =
fallback_encoder_factory_->CreateVideoEncoder(video_format_);
}
std::unique_ptr<VideoEncoder> encoder;
VideoEncoder::EncoderInfo primary_info;
VideoEncoder::EncoderInfo fallback_info;
if (primary_encoder != nullptr) {
primary_info = primary_encoder->GetEncoderInfo();
fallback_info = primary_info;
if (fallback_encoder == nullptr) {
encoder = std::move(primary_encoder);
} else {
encoder = CreateVideoEncoderSoftwareFallbackWrapper(
std::move(fallback_encoder), std::move(primary_encoder),
prefer_temporal_support);
}
} else if (fallback_encoder != nullptr) {
RTC_LOG(LS_WARNING) << "Failed to create primary " << video_format_.name
<< " encoder. Use fallback encoder.";
fallback_info = fallback_encoder->GetEncoderInfo();
primary_info = fallback_info;
encoder = std::move(fallback_encoder);
} else {
RTC_LOG(LS_ERROR) << "Failed to create primary and fallback "
<< video_format_.name << " encoders.";
return nullptr;
}
encoder_context = std::make_unique<SimulcastEncoderAdapter::EncoderContext>(
std::move(encoder), prefer_temporal_support, primary_info,
fallback_info);
}
encoder_context->encoder().RegisterEncodeCompleteCallback(
encoded_complete_callback_);
return encoder_context;
}
webrtc::VideoCodec SimulcastEncoderAdapter::MakeStreamCodec(
const webrtc::VideoCodec& codec,
int stream_idx,
uint32_t start_bitrate_kbps,
bool is_lowest_quality_stream,
bool is_highest_quality_stream) {
webrtc::VideoCodec codec_params = codec;
const SimulcastStream& stream_params = codec.simulcastStream[stream_idx];
codec_params.numberOfSimulcastStreams = 0;
codec_params.width = stream_params.width;
codec_params.height = stream_params.height;
codec_params.maxBitrate = stream_params.maxBitrate;
codec_params.minBitrate = stream_params.minBitrate;
codec_params.maxFramerate = stream_params.maxFramerate;
codec_params.qpMax = stream_params.qpMax;
codec_params.active = stream_params.active;
// By default, `scalability_mode` comes from SimulcastStream when
// SimulcastEncoderAdapter is used. This allows multiple encodings of L1Tx,
// but SimulcastStream currently does not support multiple spatial layers.
ScalabilityMode scalability_mode = stream_params.GetScalabilityMode();
// To support the full set of scalability modes in the event that this is the
// only active encoding, prefer VideoCodec::GetScalabilityMode() if all other
// encodings are inactive.
if (codec.GetScalabilityMode().has_value()) {
bool only_active_stream = true;
for (int i = 0; i < codec.numberOfSimulcastStreams; ++i) {
if (i != stream_idx && codec.simulcastStream[i].active) {
only_active_stream = false;
break;
}
}
if (only_active_stream) {
scalability_mode = codec.GetScalabilityMode().value();
}
}
codec_params.SetScalabilityMode(scalability_mode);
// Settings that are based on stream/resolution.
if (is_lowest_quality_stream) {
// Settings for lowest spatial resolutions.
if (codec.mode == VideoCodecMode::kScreensharing) {
if (experimental_boosted_screenshare_qp_) {
codec_params.qpMax = *experimental_boosted_screenshare_qp_;
}
} else if (boost_base_layer_quality_) {
codec_params.qpMax = kLowestResMaxQp;
}
}
if (codec.codecType == webrtc::kVideoCodecVP8) {
codec_params.VP8()->numberOfTemporalLayers =
stream_params.numberOfTemporalLayers;
if (!is_highest_quality_stream) {
// For resolutions below CIF, set the codec `complexity` parameter to
// kComplexityHigher, which maps to cpu_used = -4.
int pixels_per_frame = codec_params.width * codec_params.height;
if (pixels_per_frame < 352 * 288) {
codec_params.SetVideoEncoderComplexity(
webrtc::VideoCodecComplexity::kComplexityHigher);
}
// Turn off denoising for all streams but the highest resolution.
codec_params.VP8()->denoisingOn = false;
}
} else if (codec.codecType == webrtc::kVideoCodecH264) {
codec_params.H264()->numberOfTemporalLayers =
stream_params.numberOfTemporalLayers;
}
// Cap start bitrate to the min bitrate in order to avoid strange codec
// behavior.
codec_params.startBitrate =
std::max(stream_params.minBitrate, start_bitrate_kbps);
// Legacy screenshare mode is only enabled for the first simulcast layer
codec_params.legacy_conference_mode =
codec.legacy_conference_mode && stream_idx == 0;
return codec_params;
}
void SimulcastEncoderAdapter::OverrideFromFieldTrial(
VideoEncoder::EncoderInfo* info) const {
if (encoder_info_override_.requested_resolution_alignment()) {
info->requested_resolution_alignment = cricket::LeastCommonMultiple(
info->requested_resolution_alignment,
*encoder_info_override_.requested_resolution_alignment());
info->apply_alignment_to_all_simulcast_layers =
info->apply_alignment_to_all_simulcast_layers ||
encoder_info_override_.apply_alignment_to_all_simulcast_layers();
}
// Override resolution bitrate limits unless they're set already.
if (info->resolution_bitrate_limits.empty() &&
!encoder_info_override_.resolution_bitrate_limits().empty()) {
info->resolution_bitrate_limits =
encoder_info_override_.resolution_bitrate_limits();
}
}
VideoEncoder::EncoderInfo SimulcastEncoderAdapter::GetEncoderInfo() const {
if (stream_contexts_.size() == 1) {
// Not using simulcast adapting functionality, just pass through.
VideoEncoder::EncoderInfo info =
stream_contexts_.front().encoder().GetEncoderInfo();
OverrideFromFieldTrial(&info);
return info;
}
VideoEncoder::EncoderInfo encoder_info;
encoder_info.implementation_name = "SimulcastEncoderAdapter";
encoder_info.requested_resolution_alignment = 1;
encoder_info.apply_alignment_to_all_simulcast_layers = false;
encoder_info.supports_native_handle = true;
encoder_info.scaling_settings.thresholds = absl::nullopt;
if (stream_contexts_.empty()) {
// GetEncoderInfo queried before InitEncode. Only alignment info is needed
// to be filled.
// Create one encoder and query it.
std::unique_ptr<SimulcastEncoderAdapter::EncoderContext> encoder_context =
FetchOrCreateEncoderContext(/*is_lowest_quality_stream=*/true);
if (encoder_context == nullptr) {
return encoder_info;
}
const VideoEncoder::EncoderInfo& primary_info =
encoder_context->PrimaryInfo();
const VideoEncoder::EncoderInfo& fallback_info =
encoder_context->FallbackInfo();
encoder_info.requested_resolution_alignment = cricket::LeastCommonMultiple(
primary_info.requested_resolution_alignment,
fallback_info.requested_resolution_alignment);
encoder_info.apply_alignment_to_all_simulcast_layers =
primary_info.apply_alignment_to_all_simulcast_layers ||
fallback_info.apply_alignment_to_all_simulcast_layers;
if (!primary_info.supports_simulcast || !fallback_info.supports_simulcast) {
encoder_info.apply_alignment_to_all_simulcast_layers = true;
}
cached_encoder_contexts_.emplace_back(std::move(encoder_context));
OverrideFromFieldTrial(&encoder_info);
return encoder_info;
}
encoder_info.scaling_settings = VideoEncoder::ScalingSettings::kOff;
for (size_t i = 0; i < stream_contexts_.size(); ++i) {
VideoEncoder::EncoderInfo encoder_impl_info =
stream_contexts_[i].encoder().GetEncoderInfo();
if (i == 0) {
// Encoder name indicates names of all sub-encoders.
encoder_info.implementation_name += " (";
encoder_info.implementation_name += encoder_impl_info.implementation_name;
encoder_info.supports_native_handle =
encoder_impl_info.supports_native_handle;
encoder_info.has_trusted_rate_controller =
encoder_impl_info.has_trusted_rate_controller;
encoder_info.is_hardware_accelerated =
encoder_impl_info.is_hardware_accelerated;
encoder_info.is_qp_trusted = encoder_impl_info.is_qp_trusted;
} else {
encoder_info.implementation_name += ", ";
encoder_info.implementation_name += encoder_impl_info.implementation_name;
// Native handle supported if any encoder supports it.
encoder_info.supports_native_handle |=
encoder_impl_info.supports_native_handle;
// Trusted rate controller only if all encoders have it.
encoder_info.has_trusted_rate_controller &=
encoder_impl_info.has_trusted_rate_controller;
// Uses hardware support if any of the encoders uses it.
// For example, if we are having issues with down-scaling due to
// pipelining delay in HW encoders we need higher encoder usage
// thresholds in CPU adaptation.
encoder_info.is_hardware_accelerated |=
encoder_impl_info.is_hardware_accelerated;
// Treat QP from frame/slice/tile header as average QP only if all
// encoders report it as average QP.
encoder_info.is_qp_trusted =
encoder_info.is_qp_trusted.value_or(true) &&
encoder_impl_info.is_qp_trusted.value_or(true);
}
encoder_info.fps_allocation[i] = encoder_impl_info.fps_allocation[0];
encoder_info.requested_resolution_alignment = cricket::LeastCommonMultiple(
encoder_info.requested_resolution_alignment,
encoder_impl_info.requested_resolution_alignment);
// request alignment on all layers if any of the encoders may need it, or
// if any non-top layer encoder requests a non-trivial alignment.
if (encoder_impl_info.apply_alignment_to_all_simulcast_layers ||
(encoder_impl_info.requested_resolution_alignment > 1 &&
(codec_.simulcastStream[i].height < codec_.height ||
codec_.simulcastStream[i].width < codec_.width))) {
encoder_info.apply_alignment_to_all_simulcast_layers = true;
}
}
encoder_info.implementation_name += ")";
OverrideFromFieldTrial(&encoder_info);
return encoder_info;
}
} // namespace webrtc

View file

@ -0,0 +1,200 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*
*/
#ifndef MEDIA_ENGINE_SIMULCAST_ENCODER_ADAPTER_H_
#define MEDIA_ENGINE_SIMULCAST_ENCODER_ADAPTER_H_
#include <atomic>
#include <list>
#include <memory>
#include <stack>
#include <string>
#include <utility>
#include <vector>
#include "absl/types/optional.h"
#include "api/fec_controller_override.h"
#include "api/field_trials_view.h"
#include "api/sequence_checker.h"
#include "api/video_codecs/sdp_video_format.h"
#include "api/video_codecs/video_encoder.h"
#include "api/video_codecs/video_encoder_factory.h"
#include "common_video/framerate_controller.h"
#include "modules/video_coding/include/video_codec_interface.h"
#include "rtc_base/experiments/encoder_info_settings.h"
#include "rtc_base/system/no_unique_address.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// SimulcastEncoderAdapter implements simulcast support by creating multiple
// webrtc::VideoEncoder instances with the given VideoEncoderFactory.
// The object is created and destroyed on the worker thread, but all public
// interfaces should be called from the encoder task queue.
class RTC_EXPORT SimulcastEncoderAdapter : public VideoEncoder {
public:
// TODO(bugs.webrtc.org/11000): Remove when downstream usage is gone.
SimulcastEncoderAdapter(VideoEncoderFactory* primarty_factory,
const SdpVideoFormat& format);
// `primary_factory` produces the first-choice encoders to use.
// `fallback_factory`, if non-null, is used to create fallback encoder that
// will be used if InitEncode() fails for the primary encoder.
SimulcastEncoderAdapter(VideoEncoderFactory* primary_factory,
VideoEncoderFactory* fallback_factory,
const SdpVideoFormat& format,
const FieldTrialsView& field_trials);
~SimulcastEncoderAdapter() override;
// Implements VideoEncoder.
void SetFecControllerOverride(
FecControllerOverride* fec_controller_override) override;
int Release() override;
int InitEncode(const VideoCodec* codec_settings,
const VideoEncoder::Settings& settings) override;
int Encode(const VideoFrame& input_image,
const std::vector<VideoFrameType>* frame_types) override;
int RegisterEncodeCompleteCallback(EncodedImageCallback* callback) override;
void SetRates(const RateControlParameters& parameters) override;
void OnPacketLossRateUpdate(float packet_loss_rate) override;
void OnRttUpdate(int64_t rtt_ms) override;
void OnLossNotification(const LossNotification& loss_notification) override;
EncoderInfo GetEncoderInfo() const override;
private:
class EncoderContext {
public:
EncoderContext(std::unique_ptr<VideoEncoder> encoder,
bool prefer_temporal_support,
VideoEncoder::EncoderInfo primary_info,
VideoEncoder::EncoderInfo fallback_info);
EncoderContext& operator=(EncoderContext&&) = delete;
VideoEncoder& encoder() { return *encoder_; }
bool prefer_temporal_support() { return prefer_temporal_support_; }
void Release();
const VideoEncoder::EncoderInfo& PrimaryInfo() { return primary_info_; }
const VideoEncoder::EncoderInfo& FallbackInfo() { return fallback_info_; }
private:
std::unique_ptr<VideoEncoder> encoder_;
bool prefer_temporal_support_;
const VideoEncoder::EncoderInfo primary_info_;
const VideoEncoder::EncoderInfo fallback_info_;
};
class StreamContext : public EncodedImageCallback {
public:
StreamContext(SimulcastEncoderAdapter* parent,
std::unique_ptr<EncoderContext> encoder_context,
std::unique_ptr<FramerateController> framerate_controller,
int stream_idx,
uint16_t width,
uint16_t height,
bool send_stream);
StreamContext(StreamContext&& rhs);
StreamContext& operator=(StreamContext&&) = delete;
~StreamContext() override;
Result OnEncodedImage(
const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info) override;
void OnDroppedFrame(DropReason reason) override;
VideoEncoder& encoder() { return encoder_context_->encoder(); }
const VideoEncoder& encoder() const { return encoder_context_->encoder(); }
int stream_idx() const { return stream_idx_; }
uint16_t width() const { return width_; }
uint16_t height() const { return height_; }
bool is_keyframe_needed() const {
return !is_paused_ && is_keyframe_needed_;
}
void set_is_keyframe_needed() { is_keyframe_needed_ = true; }
bool is_paused() const { return is_paused_; }
void set_is_paused(bool is_paused) { is_paused_ = is_paused; }
absl::optional<double> target_fps() const {
return framerate_controller_ == nullptr
? absl::nullopt
: absl::optional<double>(
framerate_controller_->GetMaxFramerate());
}
std::unique_ptr<EncoderContext> ReleaseEncoderContext() &&;
void OnKeyframe(Timestamp timestamp);
bool ShouldDropFrame(Timestamp timestamp);
private:
SimulcastEncoderAdapter* const parent_;
std::unique_ptr<EncoderContext> encoder_context_;
std::unique_ptr<FramerateController> framerate_controller_;
const int stream_idx_;
const uint16_t width_;
const uint16_t height_;
bool is_keyframe_needed_;
bool is_paused_;
};
bool Initialized() const;
void DestroyStoredEncoders();
// This method creates encoder. May reuse previously created encoders from
// `cached_encoder_contexts_`. It's const because it's used from
// const GetEncoderInfo().
std::unique_ptr<EncoderContext> FetchOrCreateEncoderContext(
bool is_lowest_quality_stream) const;
webrtc::VideoCodec MakeStreamCodec(const webrtc::VideoCodec& codec,
int stream_idx,
uint32_t start_bitrate_kbps,
bool is_lowest_quality_stream,
bool is_highest_quality_stream);
EncodedImageCallback::Result OnEncodedImage(
size_t stream_idx,
const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info);
void OnDroppedFrame(size_t stream_idx);
void OverrideFromFieldTrial(VideoEncoder::EncoderInfo* info) const;
std::atomic<int> inited_;
VideoEncoderFactory* const primary_encoder_factory_;
VideoEncoderFactory* const fallback_encoder_factory_;
const SdpVideoFormat video_format_;
VideoCodec codec_;
int total_streams_count_;
bool bypass_mode_;
std::vector<StreamContext> stream_contexts_;
EncodedImageCallback* encoded_complete_callback_;
// Used for checking the single-threaded access of the encoder interface.
RTC_NO_UNIQUE_ADDRESS SequenceChecker encoder_queue_;
// Store previously created and released encoders , so they don't have to be
// recreated. Remaining encoders are destroyed by the destructor.
// Marked as `mutable` becuase we may need to temporarily create encoder in
// GetEncoderInfo(), which is const.
mutable std::list<std::unique_ptr<EncoderContext>> cached_encoder_contexts_;
const absl::optional<unsigned int> experimental_boosted_screenshare_qp_;
const bool boost_base_layer_quality_;
const bool prefer_temporal_support_on_base_layer_;
const SimulcastEncoderAdapterEncoderInfoSettings encoder_info_override_;
};
} // namespace webrtc
#endif // MEDIA_ENGINE_SIMULCAST_ENCODER_ADAPTER_H_

View file

@ -0,0 +1,185 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/engine/webrtc_media_engine.h"
#include <algorithm>
#include <map>
#include <string>
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/strings/match.h"
#include "media/base/media_constants.h"
#include "rtc_base/checks.h"
#include "rtc_base/logging.h"
namespace cricket {
namespace {
// Remove mutually exclusive extensions with lower priority.
void DiscardRedundantExtensions(
std::vector<webrtc::RtpExtension>* extensions,
rtc::ArrayView<const char* const> extensions_decreasing_prio) {
RTC_DCHECK(extensions);
bool found = false;
for (const char* uri : extensions_decreasing_prio) {
auto it = absl::c_find_if(
*extensions,
[uri](const webrtc::RtpExtension& rhs) { return rhs.uri == uri; });
if (it != extensions->end()) {
if (found) {
extensions->erase(it);
}
found = true;
}
}
}
} // namespace
bool ValidateRtpExtensions(
rtc::ArrayView<const webrtc::RtpExtension> extensions,
rtc::ArrayView<const webrtc::RtpExtension> old_extensions) {
bool id_used[1 + webrtc::RtpExtension::kMaxId] = {false};
for (const auto& extension : extensions) {
if (extension.id < webrtc::RtpExtension::kMinId ||
extension.id > webrtc::RtpExtension::kMaxId) {
RTC_LOG(LS_ERROR) << "Bad RTP extension ID: " << extension.ToString();
return false;
}
if (id_used[extension.id]) {
RTC_LOG(LS_ERROR) << "Duplicate RTP extension ID: "
<< extension.ToString();
return false;
}
id_used[extension.id] = true;
}
// Validate the extension list against the already negotiated extensions.
// Re-registering is OK, re-mapping (either same URL at new ID or same
// ID used with new URL) is an illegal remap.
// This is required in order to avoid a crash when registering an
// extension. A better structure would use the registered extensions
// in the RTPSender. This requires spinning through:
//
// WebRtcVoiceMediaChannel::::WebRtcAudioSendStream::stream_ (pointer)
// AudioSendStream::rtp_rtcp_module_ (pointer)
// ModuleRtpRtcpImpl2::rtp_sender_ (pointer)
// RtpSenderContext::packet_generator (struct member)
// RTPSender::rtp_header_extension_map_ (class member)
//
// Getting at this seems like a hard slog.
if (!old_extensions.empty()) {
absl::string_view urimap[1 + webrtc::RtpExtension::kMaxId];
std::map<absl::string_view, int> idmap;
for (const auto& old_extension : old_extensions) {
urimap[old_extension.id] = old_extension.uri;
idmap[old_extension.uri] = old_extension.id;
}
for (const auto& extension : extensions) {
if (!urimap[extension.id].empty() &&
urimap[extension.id] != extension.uri) {
RTC_LOG(LS_ERROR) << "Extension negotiation failure: " << extension.id
<< " was mapped to " << urimap[extension.id]
<< " but is proposed changed to " << extension.uri;
return false;
}
const auto& it = idmap.find(extension.uri);
if (it != idmap.end() && it->second != extension.id) {
RTC_LOG(LS_ERROR) << "Extension negotation failure: " << extension.uri
<< " was identified by " << it->second
<< " but is proposed changed to " << extension.id;
return false;
}
}
}
return true;
}
std::vector<webrtc::RtpExtension> FilterRtpExtensions(
const std::vector<webrtc::RtpExtension>& extensions,
bool (*supported)(absl::string_view),
bool filter_redundant_extensions,
const webrtc::FieldTrialsView& trials) {
// Don't check against old parameters; this should have been done earlier.
RTC_DCHECK(ValidateRtpExtensions(extensions, {}));
RTC_DCHECK(supported);
std::vector<webrtc::RtpExtension> result;
// Ignore any extensions that we don't recognize.
for (const auto& extension : extensions) {
if (supported(extension.uri)) {
result.push_back(extension);
} else {
RTC_LOG(LS_WARNING) << "Unsupported RTP extension: "
<< extension.ToString();
}
}
// Sort by name, ascending (prioritise encryption), so that we don't reset
// extensions if they were specified in a different order (also allows us
// to use std::unique below).
absl::c_sort(result, [](const webrtc::RtpExtension& rhs,
const webrtc::RtpExtension& lhs) {
return rhs.encrypt == lhs.encrypt ? rhs.uri < lhs.uri
: rhs.encrypt > lhs.encrypt;
});
// Remove unnecessary extensions (used on send side).
if (filter_redundant_extensions) {
auto it = std::unique(
result.begin(), result.end(),
[](const webrtc::RtpExtension& rhs, const webrtc::RtpExtension& lhs) {
return rhs.uri == lhs.uri && rhs.encrypt == lhs.encrypt;
});
result.erase(it, result.end());
// Keep just the highest priority extension of any in the following lists.
if (absl::StartsWith(trials.Lookup("WebRTC-FilterAbsSendTimeExtension"),
"Enabled")) {
static const char* const kBweExtensionPriorities[] = {
webrtc::RtpExtension::kTransportSequenceNumberUri,
webrtc::RtpExtension::kAbsSendTimeUri,
webrtc::RtpExtension::kTimestampOffsetUri};
DiscardRedundantExtensions(&result, kBweExtensionPriorities);
} else {
static const char* const kBweExtensionPriorities[] = {
webrtc::RtpExtension::kAbsSendTimeUri,
webrtc::RtpExtension::kTimestampOffsetUri};
DiscardRedundantExtensions(&result, kBweExtensionPriorities);
}
}
return result;
}
webrtc::BitrateConstraints GetBitrateConfigForCodec(const Codec& codec) {
webrtc::BitrateConstraints config;
int bitrate_kbps = 0;
if (codec.GetParam(kCodecParamMinBitrate, &bitrate_kbps) &&
bitrate_kbps > 0) {
config.min_bitrate_bps = bitrate_kbps * 1000;
} else {
config.min_bitrate_bps = 0;
}
if (codec.GetParam(kCodecParamStartBitrate, &bitrate_kbps) &&
bitrate_kbps > 0) {
config.start_bitrate_bps = bitrate_kbps * 1000;
} else {
// Do not reconfigure start bitrate unless it's specified and positive.
config.start_bitrate_bps = -1;
}
if (codec.GetParam(kCodecParamMaxBitrate, &bitrate_kbps) &&
bitrate_kbps > 0) {
config.max_bitrate_bps = bitrate_kbps * 1000;
} else {
config.max_bitrate_bps = -1;
}
return config;
}
} // namespace cricket

View file

@ -0,0 +1,45 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_ENGINE_WEBRTC_MEDIA_ENGINE_H_
#define MEDIA_ENGINE_WEBRTC_MEDIA_ENGINE_H_
#include <vector>
#include "absl/strings/string_view.h"
#include "api/array_view.h"
#include "api/field_trials_view.h"
#include "api/rtp_parameters.h"
#include "api/transport/bitrate_settings.h"
#include "media/base/codec.h"
namespace cricket {
// Verify that extension IDs are within 1-byte extension range and are not
// overlapping, and that they form a legal change from previously registerd
// extensions (if any).
bool ValidateRtpExtensions(
rtc::ArrayView<const webrtc::RtpExtension> extennsions,
rtc::ArrayView<const webrtc::RtpExtension> old_extensions);
// Discard any extensions not validated by the 'supported' predicate. Duplicate
// extensions are removed if 'filter_redundant_extensions' is set, and also any
// mutually exclusive extensions (see implementation for details) are removed.
std::vector<webrtc::RtpExtension> FilterRtpExtensions(
const std::vector<webrtc::RtpExtension>& extensions,
bool (*supported)(absl::string_view),
bool filter_redundant_extensions,
const webrtc::FieldTrialsView& trials);
webrtc::BitrateConstraints GetBitrateConfigForCodec(const Codec& codec);
} // namespace cricket
#endif // MEDIA_ENGINE_WEBRTC_MEDIA_ENGINE_H_

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,901 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_ENGINE_WEBRTC_VIDEO_ENGINE_H_
#define MEDIA_ENGINE_WEBRTC_VIDEO_ENGINE_H_
#include <stddef.h>
#include <cstdint>
#include <functional>
#include <map>
#include <memory>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include "absl/functional/any_invocable.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "api/array_view.h"
#include "api/call/transport.h"
#include "api/crypto/crypto_options.h"
#include "api/crypto/frame_decryptor_interface.h"
#include "api/crypto/frame_encryptor_interface.h"
#include "api/field_trials_view.h"
#include "api/frame_transformer_interface.h"
#include "api/rtc_error.h"
#include "api/rtp_headers.h"
#include "api/rtp_parameters.h"
#include "api/rtp_sender_interface.h"
#include "api/scoped_refptr.h"
#include "api/sequence_checker.h"
#include "api/task_queue/pending_task_safety_flag.h"
#include "api/task_queue/task_queue_base.h"
#include "api/transport/bitrate_settings.h"
#include "api/transport/field_trial_based_config.h"
#include "api/transport/rtp/rtp_source.h"
#include "api/video/recordable_encoded_frame.h"
#include "api/video/video_bitrate_allocator_factory.h"
#include "api/video/video_frame.h"
#include "api/video/video_sink_interface.h"
#include "api/video/video_source_interface.h"
#include "api/video/video_stream_encoder_settings.h"
#include "api/video_codecs/sdp_video_format.h"
#include "api/video_codecs/video_encoder_factory.h"
#include "call/call.h"
#include "call/flexfec_receive_stream.h"
#include "call/rtp_config.h"
#include "call/video_receive_stream.h"
#include "call/video_send_stream.h"
#include "media/base/codec.h"
#include "media/base/media_channel.h"
#include "media/base/media_channel_impl.h"
#include "media/base/media_config.h"
#include "media/base/media_engine.h"
#include "media/base/stream_params.h"
#include "modules/rtp_rtcp/include/rtp_header_extension_map.h"
#include "modules/rtp_rtcp/source/rtp_packet_received.h"
#include "rtc_base/network/sent_packet.h"
#include "rtc_base/network_route.h"
#include "rtc_base/synchronization/mutex.h"
#include "rtc_base/system/no_unique_address.h"
#include "rtc_base/thread_annotations.h"
#include "video/config/video_encoder_config.h"
namespace webrtc {
class VideoDecoderFactory;
class VideoEncoderFactory;
} // namespace webrtc
namespace cricket {
// Public for testing.
// Inputs StreamStats for all types of substreams (kMedia, kRtx, kFlexfec) and
// merges any non-kMedia substream stats object into its referenced kMedia-type
// substream. The resulting substreams are all kMedia. This means, for example,
// that packet and byte counters of RTX and FlexFEC streams are accounted for in
// the relevant RTP media stream's stats. This makes the resulting StreamStats
// objects ready to be turned into "outbound-rtp" stats objects for GetStats()
// which does not create separate stream stats objects for complementary
// streams.
std::map<uint32_t, webrtc::VideoSendStream::StreamStats>
MergeInfoAboutOutboundRtpSubstreamsForTesting(
const std::map<uint32_t, webrtc::VideoSendStream::StreamStats>& substreams);
// WebRtcVideoEngine is used for the new native WebRTC Video API (webrtc:1667).
class WebRtcVideoEngine : public VideoEngineInterface {
public:
// These video codec factories represents all video codecs, i.e. both software
// and external hardware codecs.
WebRtcVideoEngine(
std::unique_ptr<webrtc::VideoEncoderFactory> video_encoder_factory,
std::unique_ptr<webrtc::VideoDecoderFactory> video_decoder_factory,
const webrtc::FieldTrialsView& trials);
~WebRtcVideoEngine() override;
std::unique_ptr<VideoMediaSendChannelInterface> CreateSendChannel(
webrtc::Call* call,
const MediaConfig& config,
const VideoOptions& options,
const webrtc::CryptoOptions& crypto_options,
webrtc::VideoBitrateAllocatorFactory* video_bitrate_allocator_factory)
override;
std::unique_ptr<VideoMediaReceiveChannelInterface> CreateReceiveChannel(
webrtc::Call* call,
const MediaConfig& config,
const VideoOptions& options,
const webrtc::CryptoOptions& crypto_options) override;
std::vector<VideoCodec> send_codecs() const override {
return send_codecs(true);
}
std::vector<VideoCodec> recv_codecs() const override {
return recv_codecs(true);
}
std::vector<VideoCodec> send_codecs(bool include_rtx) const override;
std::vector<VideoCodec> recv_codecs(bool include_rtx) const override;
std::vector<webrtc::RtpHeaderExtensionCapability> GetRtpHeaderExtensions()
const override;
private:
const std::unique_ptr<webrtc::VideoDecoderFactory> decoder_factory_;
const std::unique_ptr<webrtc::VideoEncoderFactory> encoder_factory_;
const std::unique_ptr<webrtc::VideoBitrateAllocatorFactory>
bitrate_allocator_factory_;
const webrtc::FieldTrialsView& trials_;
};
struct VideoCodecSettings {
explicit VideoCodecSettings(const VideoCodec& codec);
// Checks if all members of |*this| are equal to the corresponding members
// of `other`.
bool operator==(const VideoCodecSettings& other) const;
bool operator!=(const VideoCodecSettings& other) const;
// Checks if all members of `a`, except `flexfec_payload_type`, are equal
// to the corresponding members of `b`.
static bool EqualsDisregardingFlexfec(const VideoCodecSettings& a,
const VideoCodecSettings& b);
VideoCodec codec;
webrtc::UlpfecConfig ulpfec;
int flexfec_payload_type; // -1 if absent.
int rtx_payload_type; // -1 if absent.
absl::optional<int> rtx_time;
};
class WebRtcVideoSendChannel : public MediaChannelUtil,
public VideoMediaSendChannelInterface,
public webrtc::EncoderSwitchRequestCallback {
public:
WebRtcVideoSendChannel(
webrtc::Call* call,
const MediaConfig& config,
const VideoOptions& options,
const webrtc::CryptoOptions& crypto_options,
webrtc::VideoEncoderFactory* encoder_factory,
webrtc::VideoDecoderFactory* decoder_factory,
webrtc::VideoBitrateAllocatorFactory* bitrate_allocator_factory);
~WebRtcVideoSendChannel() override;
MediaType media_type() const override { return MEDIA_TYPE_VIDEO; }
// Type manipulations
VideoMediaSendChannelInterface* AsVideoSendChannel() override { return this; }
VoiceMediaSendChannelInterface* AsVoiceSendChannel() override {
RTC_CHECK_NOTREACHED();
return nullptr;
}
// Functions imported from MediaChannelUtil
bool HasNetworkInterface() const override {
return MediaChannelUtil::HasNetworkInterface();
}
void SetExtmapAllowMixed(bool extmap_allow_mixed) override {
MediaChannelUtil::SetExtmapAllowMixed(extmap_allow_mixed);
}
bool ExtmapAllowMixed() const override {
return MediaChannelUtil::ExtmapAllowMixed();
}
// Common functions between sender and receiver
void SetInterface(MediaChannelNetworkInterface* iface) override;
// VideoMediaSendChannelInterface implementation
bool SetSenderParameters(const VideoSenderParameters& params) override;
webrtc::RTCError SetRtpSendParameters(
uint32_t ssrc,
const webrtc::RtpParameters& parameters,
webrtc::SetParametersCallback callback) override;
webrtc::RtpParameters GetRtpSendParameters(uint32_t ssrc) const override;
absl::optional<Codec> GetSendCodec() const override;
bool SetSend(bool send) override;
bool SetVideoSend(
uint32_t ssrc,
const VideoOptions* options,
rtc::VideoSourceInterface<webrtc::VideoFrame>* source) override;
bool AddSendStream(const StreamParams& sp) override;
bool RemoveSendStream(uint32_t ssrc) override;
void FillBitrateInfo(BandwidthEstimationInfo* bwe_info) override;
bool GetStats(VideoMediaSendInfo* info) override;
void OnPacketSent(const rtc::SentPacket& sent_packet) override;
void OnReadyToSend(bool ready) override;
void OnNetworkRouteChanged(absl::string_view transport_name,
const rtc::NetworkRoute& network_route) override;
// Set a frame encryptor to a particular ssrc that will intercept all
// outgoing video frames and attempt to encrypt them and forward the result
// to the packetizer.
void SetFrameEncryptor(uint32_t ssrc,
rtc::scoped_refptr<webrtc::FrameEncryptorInterface>
frame_encryptor) override;
// note: The encoder_selector object must remain valid for the lifetime of the
// MediaChannel, unless replaced.
void SetEncoderSelector(uint32_t ssrc,
webrtc::VideoEncoderFactory::EncoderSelectorInterface*
encoder_selector) override;
void SetSendCodecChangedCallback(
absl::AnyInvocable<void()> callback) override {
send_codec_changed_callback_ = std::move(callback);
}
void SetSsrcListChangedCallback(
absl::AnyInvocable<void(const std::set<uint32_t>&)> callback) override {
ssrc_list_changed_callback_ = std::move(callback);
}
// Implemented for VideoMediaChannelTest.
bool sending() const {
RTC_DCHECK_RUN_ON(&thread_checker_);
return sending_;
}
// AdaptReason is used for expressing why a WebRtcVideoSendStream request
// a lower input frame size than the currently configured camera input frame
// size. There can be more than one reason OR:ed together.
enum AdaptReason {
ADAPTREASON_NONE = 0,
ADAPTREASON_CPU = 1,
ADAPTREASON_BANDWIDTH = 2,
};
// Implements webrtc::EncoderSwitchRequestCallback.
void RequestEncoderFallback() override;
void RequestEncoderSwitch(const webrtc::SdpVideoFormat& format,
bool allow_default_fallback) override;
void GenerateSendKeyFrame(uint32_t ssrc,
const std::vector<std::string>& rids) override;
void SetEncoderToPacketizerFrameTransformer(
uint32_t ssrc,
rtc::scoped_refptr<webrtc::FrameTransformerInterface> frame_transformer)
override;
// Information queries to support SetReceiverFeedbackParameters
webrtc::RtcpMode SendCodecRtcpMode() const override {
RTC_DCHECK_RUN_ON(&thread_checker_);
return send_params_.rtcp.reduced_size ? webrtc::RtcpMode::kReducedSize
: webrtc::RtcpMode::kCompound;
}
bool SendCodecHasLntf() const override {
RTC_DCHECK_RUN_ON(&thread_checker_);
if (!send_codec()) {
return false;
}
return HasLntf(send_codec()->codec);
}
bool SendCodecHasNack() const override {
RTC_DCHECK_RUN_ON(&thread_checker_);
if (!send_codec()) {
return false;
}
return HasNack(send_codec()->codec);
}
absl::optional<int> SendCodecRtxTime() const override {
RTC_DCHECK_RUN_ON(&thread_checker_);
if (!send_codec()) {
return absl::nullopt;
}
return send_codec()->rtx_time;
}
private:
struct ChangedSenderParameters {
// These optionals are unset if not changed.
absl::optional<VideoCodecSettings> send_codec;
absl::optional<std::vector<VideoCodecSettings>> negotiated_codecs;
absl::optional<std::vector<webrtc::RtpExtension>> rtp_header_extensions;
absl::optional<std::string> mid;
absl::optional<bool> extmap_allow_mixed;
absl::optional<int> max_bandwidth_bps;
absl::optional<bool> conference_mode;
absl::optional<webrtc::RtcpMode> rtcp_mode;
};
bool GetChangedSenderParameters(const VideoSenderParameters& params,
ChangedSenderParameters* changed_params) const
RTC_EXCLUSIVE_LOCKS_REQUIRED(thread_checker_);
bool ApplyChangedParams(const ChangedSenderParameters& changed_params);
bool ValidateSendSsrcAvailability(const StreamParams& sp) const
RTC_EXCLUSIVE_LOCKS_REQUIRED(thread_checker_);
// Populates `rtx_associated_payload_types`, `raw_payload_types` and
// `decoders` based on codec settings provided by `recv_codecs`.
// `recv_codecs` must be non-empty and all other parameters must be empty.
static void ExtractCodecInformation(
rtc::ArrayView<const VideoCodecSettings> recv_codecs,
std::map<int, int>& rtx_associated_payload_types,
std::set<int>& raw_payload_types,
std::vector<webrtc::VideoReceiveStreamInterface::Decoder>& decoders);
// Wrapper for the sender part.
class WebRtcVideoSendStream {
public:
WebRtcVideoSendStream(
webrtc::Call* call,
const StreamParams& sp,
webrtc::VideoSendStream::Config config,
const VideoOptions& options,
bool enable_cpu_overuse_detection,
int max_bitrate_bps,
const absl::optional<VideoCodecSettings>& codec_settings,
const absl::optional<std::vector<webrtc::RtpExtension>>& rtp_extensions,
const VideoSenderParameters& send_params);
~WebRtcVideoSendStream();
void SetSenderParameters(const ChangedSenderParameters& send_params);
webrtc::RTCError SetRtpParameters(const webrtc::RtpParameters& parameters,
webrtc::SetParametersCallback callback);
webrtc::RtpParameters GetRtpParameters() const;
void SetFrameEncryptor(
rtc::scoped_refptr<webrtc::FrameEncryptorInterface> frame_encryptor);
bool SetVideoSend(const VideoOptions* options,
rtc::VideoSourceInterface<webrtc::VideoFrame>* source);
// note: The encoder_selector object must remain valid for the lifetime of
// the MediaChannel, unless replaced.
void SetEncoderSelector(
webrtc::VideoEncoderFactory::EncoderSelectorInterface*
encoder_selector);
void SetSend(bool send);
const std::vector<uint32_t>& GetSsrcs() const;
// Returns per ssrc VideoSenderInfos. Useful for simulcast scenario.
std::vector<VideoSenderInfo> GetPerLayerVideoSenderInfos(bool log_stats);
// Aggregates per ssrc VideoSenderInfos to single VideoSenderInfo for
// legacy reasons. Used in old GetStats API and track stats.
VideoSenderInfo GetAggregatedVideoSenderInfo(
const std::vector<VideoSenderInfo>& infos) const;
void FillBitrateInfo(BandwidthEstimationInfo* bwe_info);
void SetEncoderToPacketizerFrameTransformer(
rtc::scoped_refptr<webrtc::FrameTransformerInterface>
frame_transformer);
void GenerateKeyFrame(const std::vector<std::string>& rids);
private:
// Parameters needed to reconstruct the underlying stream.
// webrtc::VideoSendStream doesn't support setting a lot of options on the
// fly, so when those need to be changed we tear down and reconstruct with
// similar parameters depending on which options changed etc.
struct VideoSendStreamParameters {
VideoSendStreamParameters(
webrtc::VideoSendStream::Config config,
const VideoOptions& options,
int max_bitrate_bps,
const absl::optional<VideoCodecSettings>& codec_settings);
webrtc::VideoSendStream::Config config;
VideoOptions options;
int max_bitrate_bps;
bool conference_mode;
absl::optional<VideoCodecSettings> codec_settings;
// Sent resolutions + bitrates etc. by the underlying VideoSendStream,
// typically changes when setting a new resolution or reconfiguring
// bitrates.
webrtc::VideoEncoderConfig encoder_config;
};
rtc::scoped_refptr<webrtc::VideoEncoderConfig::EncoderSpecificSettings>
ConfigureVideoEncoderSettings(const VideoCodec& codec);
void SetCodec(const VideoCodecSettings& codec);
void RecreateWebRtcStream();
webrtc::VideoEncoderConfig CreateVideoEncoderConfig(
const VideoCodec& codec) const;
void ReconfigureEncoder(webrtc::SetParametersCallback callback);
// Calls Start or Stop according to whether or not `sending_` is true.
void UpdateSendState();
webrtc::DegradationPreference GetDegradationPreference() const
RTC_EXCLUSIVE_LOCKS_REQUIRED(&thread_checker_);
RTC_NO_UNIQUE_ADDRESS webrtc::SequenceChecker thread_checker_;
webrtc::TaskQueueBase* const worker_thread_;
const std::vector<uint32_t> ssrcs_ RTC_GUARDED_BY(&thread_checker_);
const std::vector<SsrcGroup> ssrc_groups_ RTC_GUARDED_BY(&thread_checker_);
webrtc::Call* const call_;
const bool enable_cpu_overuse_detection_;
rtc::VideoSourceInterface<webrtc::VideoFrame>* source_
RTC_GUARDED_BY(&thread_checker_);
webrtc::VideoSendStream* stream_ RTC_GUARDED_BY(&thread_checker_);
// Contains settings that are the same for all streams in the MediaChannel,
// such as codecs, header extensions, and the global bitrate limit for the
// entire channel.
VideoSendStreamParameters parameters_ RTC_GUARDED_BY(&thread_checker_);
// Contains settings that are unique for each stream, such as max_bitrate.
// Does *not* contain codecs, however.
// TODO(skvlad): Move ssrcs_ and ssrc_groups_ into rtp_parameters_.
// TODO(skvlad): Combine parameters_ and rtp_parameters_ once we have only
// one stream per MediaChannel.
webrtc::RtpParameters rtp_parameters_ RTC_GUARDED_BY(&thread_checker_);
bool sending_ RTC_GUARDED_BY(&thread_checker_);
// TODO(asapersson): investigate why setting
// DegrationPreferences::MAINTAIN_RESOLUTION isn't sufficient to disable
// downscaling everywhere in the pipeline.
const bool disable_automatic_resize_;
};
void Construct(webrtc::Call* call, WebRtcVideoEngine* engine);
// Get all codecs that are compatible with the receiver.
std::vector<VideoCodecSettings> SelectSendVideoCodecs(
const std::vector<VideoCodecSettings>& remote_mapped_codecs) const
RTC_EXCLUSIVE_LOCKS_REQUIRED(thread_checker_);
void FillSenderStats(VideoMediaSendInfo* info, bool log_stats)
RTC_EXCLUSIVE_LOCKS_REQUIRED(thread_checker_);
void FillBandwidthEstimationStats(const webrtc::Call::Stats& stats,
VideoMediaInfo* info)
RTC_EXCLUSIVE_LOCKS_REQUIRED(thread_checker_);
void FillSendCodecStats(VideoMediaSendInfo* video_media_info)
RTC_EXCLUSIVE_LOCKS_REQUIRED(thread_checker_);
// Accessor function for send_codec_. Introduced in order to ensure
// that a receive channel does not touch the send codec directly.
// Can go away once these are different classes.
// TODO(bugs.webrtc.org/13931): Remove this function
absl::optional<VideoCodecSettings>& send_codec() { return send_codec_; }
const absl::optional<VideoCodecSettings>& send_codec() const {
return send_codec_;
}
webrtc::TaskQueueBase* const worker_thread_;
webrtc::ScopedTaskSafety task_safety_;
RTC_NO_UNIQUE_ADDRESS webrtc::SequenceChecker network_thread_checker_{
webrtc::SequenceChecker::kDetached};
RTC_NO_UNIQUE_ADDRESS webrtc::SequenceChecker thread_checker_;
uint32_t rtcp_receiver_report_ssrc_ RTC_GUARDED_BY(thread_checker_);
bool sending_ RTC_GUARDED_BY(thread_checker_);
bool receiving_ RTC_GUARDED_BY(&thread_checker_);
webrtc::Call* const call_;
rtc::VideoSinkInterface<webrtc::VideoFrame>* default_sink_
RTC_GUARDED_BY(thread_checker_);
// Delay for unsignaled streams, which may be set before the stream exists.
int default_recv_base_minimum_delay_ms_ RTC_GUARDED_BY(thread_checker_) = 0;
const MediaConfig::Video video_config_ RTC_GUARDED_BY(thread_checker_);
// Using primary-ssrc (first ssrc) as key.
std::map<uint32_t, WebRtcVideoSendStream*> send_streams_
RTC_GUARDED_BY(thread_checker_);
// When the channel and demuxer get reconfigured, there is a window of time
// where we have to be prepared for packets arriving based on the old demuxer
// criteria because the streams live on the worker thread and the demuxer
// lives on the network thread. Because packets are posted from the network
// thread to the worker thread, they can still be in-flight when streams are
// reconfgured. This can happen when `demuxer_criteria_id_` and
// `demuxer_criteria_completed_id_` don't match. During this time, we do not
// want to create unsignalled receive streams and should instead drop the
// packets. E.g:
// * If RemoveRecvStream(old_ssrc) was recently called, there may be packets
// in-flight for that ssrc. This happens when a receiver becomes inactive.
// * If we go from one to many m= sections, the demuxer may change from
// forwarding all packets to only forwarding the configured ssrcs, so there
// is a risk of receiving ssrcs for other, recently added m= sections.
uint32_t demuxer_criteria_id_ RTC_GUARDED_BY(thread_checker_) = 0;
uint32_t demuxer_criteria_completed_id_ RTC_GUARDED_BY(thread_checker_) = 0;
absl::optional<int64_t> last_unsignalled_ssrc_creation_time_ms_
RTC_GUARDED_BY(thread_checker_);
std::set<uint32_t> send_ssrcs_ RTC_GUARDED_BY(thread_checker_);
std::set<uint32_t> receive_ssrcs_ RTC_GUARDED_BY(thread_checker_);
absl::optional<VideoCodecSettings> send_codec_
RTC_GUARDED_BY(thread_checker_);
std::vector<VideoCodecSettings> negotiated_codecs_
RTC_GUARDED_BY(thread_checker_);
std::vector<webrtc::RtpExtension> send_rtp_extensions_
RTC_GUARDED_BY(thread_checker_);
webrtc::VideoEncoderFactory* const encoder_factory_
RTC_GUARDED_BY(thread_checker_);
webrtc::VideoDecoderFactory* const decoder_factory_
RTC_GUARDED_BY(thread_checker_);
webrtc::VideoBitrateAllocatorFactory* const bitrate_allocator_factory_
RTC_GUARDED_BY(thread_checker_);
std::vector<VideoCodecSettings> recv_codecs_ RTC_GUARDED_BY(thread_checker_);
webrtc::RtpHeaderExtensionMap recv_rtp_extension_map_
RTC_GUARDED_BY(thread_checker_);
std::vector<webrtc::RtpExtension> recv_rtp_extensions_
RTC_GUARDED_BY(thread_checker_);
// See reason for keeping track of the FlexFEC payload type separately in
// comment in WebRtcVideoChannel::ChangedReceiverParameters.
int recv_flexfec_payload_type_ RTC_GUARDED_BY(thread_checker_);
webrtc::BitrateConstraints bitrate_config_ RTC_GUARDED_BY(thread_checker_);
// TODO(deadbeef): Don't duplicate information between
// send_params/recv_params, rtp_extensions, options, etc.
VideoSenderParameters send_params_ RTC_GUARDED_BY(thread_checker_);
VideoOptions default_send_options_ RTC_GUARDED_BY(thread_checker_);
VideoReceiverParameters recv_params_ RTC_GUARDED_BY(thread_checker_);
int64_t last_send_stats_log_ms_ RTC_GUARDED_BY(thread_checker_);
int64_t last_receive_stats_log_ms_ RTC_GUARDED_BY(thread_checker_);
const bool discard_unknown_ssrc_packets_ RTC_GUARDED_BY(thread_checker_);
// This is a stream param that comes from the remote description, but wasn't
// signaled with any a=ssrc lines. It holds information that was signaled
// before the unsignaled receive stream is created when the first packet is
// received.
StreamParams unsignaled_stream_params_ RTC_GUARDED_BY(thread_checker_);
// Per peer connection crypto options that last for the lifetime of the peer
// connection.
const webrtc::CryptoOptions crypto_options_ RTC_GUARDED_BY(thread_checker_);
// Optional frame transformer set on unsignaled streams.
rtc::scoped_refptr<webrtc::FrameTransformerInterface>
unsignaled_frame_transformer_ RTC_GUARDED_BY(thread_checker_);
// RTP parameters that need to be set when creating a video receive stream.
// Only used in Receiver mode - in Both mode, it reads those things from the
// codec.
webrtc::VideoReceiveStreamInterface::Config::Rtp rtp_config_;
// Callback invoked whenever the send codec changes.
// TODO(bugs.webrtc.org/13931): Remove again when coupling isn't needed.
absl::AnyInvocable<void()> send_codec_changed_callback_;
// Callback invoked whenever the list of SSRCs changes.
absl::AnyInvocable<void(const std::set<uint32_t>&)>
ssrc_list_changed_callback_;
};
class WebRtcVideoReceiveChannel : public MediaChannelUtil,
public VideoMediaReceiveChannelInterface {
public:
WebRtcVideoReceiveChannel(webrtc::Call* call,
const MediaConfig& config,
const VideoOptions& options,
const webrtc::CryptoOptions& crypto_options,
webrtc::VideoDecoderFactory* decoder_factory);
~WebRtcVideoReceiveChannel() override;
public:
MediaType media_type() const override { return MEDIA_TYPE_VIDEO; }
VideoMediaReceiveChannelInterface* AsVideoReceiveChannel() override {
return this;
}
VoiceMediaReceiveChannelInterface* AsVoiceReceiveChannel() override {
RTC_CHECK_NOTREACHED();
return nullptr;
}
// Common functions between sender and receiver
void SetInterface(MediaChannelNetworkInterface* iface) override;
// VideoMediaReceiveChannelInterface implementation
bool SetReceiverParameters(const VideoReceiverParameters& params) override;
webrtc::RtpParameters GetRtpReceiverParameters(uint32_t ssrc) const override;
webrtc::RtpParameters GetDefaultRtpReceiveParameters() const override;
void SetReceive(bool receive) override;
bool AddRecvStream(const StreamParams& sp) override;
bool AddDefaultRecvStreamForTesting(const StreamParams& sp) override {
// Invokes private AddRecvStream variant function
return AddRecvStream(sp, true);
}
bool RemoveRecvStream(uint32_t ssrc) override;
void ResetUnsignaledRecvStream() override;
absl::optional<uint32_t> GetUnsignaledSsrc() const override;
void OnDemuxerCriteriaUpdatePending() override;
void OnDemuxerCriteriaUpdateComplete() override;
bool SetSink(uint32_t ssrc,
rtc::VideoSinkInterface<webrtc::VideoFrame>* sink) override;
void SetDefaultSink(
rtc::VideoSinkInterface<webrtc::VideoFrame>* sink) override;
bool GetStats(VideoMediaReceiveInfo* info) override;
void OnPacketReceived(const webrtc::RtpPacketReceived& packet) override;
bool SetBaseMinimumPlayoutDelayMs(uint32_t ssrc, int delay_ms) override;
absl::optional<int> GetBaseMinimumPlayoutDelayMs(
uint32_t ssrc) const override;
// Choose one of the available SSRCs (or default if none) as the current
// receiver report SSRC.
void ChooseReceiverReportSsrc(const std::set<uint32_t>& choices) override;
// E2E Encrypted Video Frame API
// Set a frame decryptor to a particular ssrc that will intercept all
// incoming video frames and attempt to decrypt them before forwarding the
// result.
void SetFrameDecryptor(uint32_t ssrc,
rtc::scoped_refptr<webrtc::FrameDecryptorInterface>
frame_decryptor) override;
void SetRecordableEncodedFrameCallback(
uint32_t ssrc,
std::function<void(const webrtc::RecordableEncodedFrame&)> callback)
override;
void ClearRecordableEncodedFrameCallback(uint32_t ssrc) override;
void RequestRecvKeyFrame(uint32_t ssrc) override;
void SetDepacketizerToDecoderFrameTransformer(
uint32_t ssrc,
rtc::scoped_refptr<webrtc::FrameTransformerInterface> frame_transformer)
override;
std::vector<webrtc::RtpSource> GetSources(uint32_t ssrc) const override;
void SetReceiverFeedbackParameters(bool lntf_enabled,
bool nack_enabled,
webrtc::RtcpMode rtcp_mode,
absl::optional<int> rtx_time) override;
private:
class WebRtcVideoReceiveStream;
struct ChangedReceiverParameters {
// These optionals are unset if not changed.
absl::optional<std::vector<VideoCodecSettings>> codec_settings;
absl::optional<std::vector<webrtc::RtpExtension>> rtp_header_extensions;
// Keep track of the FlexFEC payload type separately from `codec_settings`.
// This allows us to recreate the FlexfecReceiveStream separately from the
// VideoReceiveStreamInterface when the FlexFEC payload type is changed.
absl::optional<int> flexfec_payload_type;
};
// Finds VideoReceiveStreamInterface corresponding to ssrc. Aware of
// unsignalled ssrc handling.
WebRtcVideoReceiveStream* FindReceiveStream(uint32_t ssrc)
RTC_EXCLUSIVE_LOCKS_REQUIRED(thread_checker_);
void ProcessReceivedPacket(webrtc::RtpPacketReceived packet)
RTC_RUN_ON(thread_checker_);
// Expected to be invoked once per packet that belongs to this channel that
// can not be demuxed.
// Returns true if a new default stream has been created.
bool MaybeCreateDefaultReceiveStream(
const webrtc::RtpPacketReceived& parsed_packet)
RTC_EXCLUSIVE_LOCKS_REQUIRED(thread_checker_);
void ReCreateDefaultReceiveStream(uint32_t ssrc,
absl::optional<uint32_t> rtx_ssrc);
// Add a receive stream. Used for testing.
bool AddRecvStream(const StreamParams& sp, bool default_stream);
void ConfigureReceiverRtp(
webrtc::VideoReceiveStreamInterface::Config* config,
webrtc::FlexfecReceiveStream::Config* flexfec_config,
const StreamParams& sp) const
RTC_EXCLUSIVE_LOCKS_REQUIRED(thread_checker_);
bool ValidateReceiveSsrcAvailability(const StreamParams& sp) const
RTC_EXCLUSIVE_LOCKS_REQUIRED(thread_checker_);
void DeleteReceiveStream(WebRtcVideoReceiveStream* stream)
RTC_EXCLUSIVE_LOCKS_REQUIRED(thread_checker_);
// Called when the local ssrc changes. Sets `rtcp_receiver_report_ssrc_` and
// updates the receive streams.
void SetReceiverReportSsrc(uint32_t ssrc) RTC_RUN_ON(&thread_checker_);
// Wrapper for the receiver part, contains configs etc. that are needed to
// reconstruct the underlying VideoReceiveStreamInterface.
class WebRtcVideoReceiveStream
: public rtc::VideoSinkInterface<webrtc::VideoFrame> {
public:
WebRtcVideoReceiveStream(
webrtc::Call* call,
const StreamParams& sp,
webrtc::VideoReceiveStreamInterface::Config config,
bool default_stream,
const std::vector<VideoCodecSettings>& recv_codecs,
const webrtc::FlexfecReceiveStream::Config& flexfec_config);
~WebRtcVideoReceiveStream();
webrtc::VideoReceiveStreamInterface& stream();
// Return value may be nullptr.
webrtc::FlexfecReceiveStream* flexfec_stream();
const std::vector<uint32_t>& GetSsrcs() const;
std::vector<webrtc::RtpSource> GetSources();
// Does not return codecs, nor header extensions, they are filled by the
// owning WebRtcVideoChannel.
webrtc::RtpParameters GetRtpParameters() const;
// TODO(deadbeef): Move these feedback parameters into the recv parameters.
void SetFeedbackParameters(bool lntf_enabled,
bool nack_enabled,
webrtc::RtcpMode rtcp_mode,
absl::optional<int> rtx_time);
void SetReceiverParameters(const ChangedReceiverParameters& recv_params);
void OnFrame(const webrtc::VideoFrame& frame) override;
bool IsDefaultStream() const;
void SetFrameDecryptor(
rtc::scoped_refptr<webrtc::FrameDecryptorInterface> frame_decryptor);
bool SetBaseMinimumPlayoutDelayMs(int delay_ms);
int GetBaseMinimumPlayoutDelayMs() const;
void SetSink(rtc::VideoSinkInterface<webrtc::VideoFrame>* sink);
VideoReceiverInfo GetVideoReceiverInfo(bool log_stats);
void SetRecordableEncodedFrameCallback(
std::function<void(const webrtc::RecordableEncodedFrame&)> callback);
void ClearRecordableEncodedFrameCallback();
void GenerateKeyFrame();
void SetDepacketizerToDecoderFrameTransformer(
rtc::scoped_refptr<webrtc::FrameTransformerInterface>
frame_transformer);
void SetLocalSsrc(uint32_t local_ssrc);
void UpdateRtxSsrc(uint32_t ssrc);
void StartReceiveStream();
void StopReceiveStream();
private:
// Attempts to reconfigure an already existing `flexfec_stream_`, create
// one if the configuration is now complete or remove a flexfec stream
// when disabled.
void SetFlexFecPayload(int payload_type);
void RecreateReceiveStream();
void CreateReceiveStream();
// Applies a new receive codecs configration to `config_`. Returns true
// if the internal stream needs to be reconstructed, or false if no changes
// were applied.
bool ReconfigureCodecs(const std::vector<VideoCodecSettings>& recv_codecs);
webrtc::Call* const call_;
const StreamParams stream_params_;
// Both `stream_` and `flexfec_stream_` are managed by `this`. They are
// destroyed by calling call_->DestroyVideoReceiveStream and
// call_->DestroyFlexfecReceiveStream, respectively.
webrtc::VideoReceiveStreamInterface* stream_;
const bool default_stream_;
webrtc::VideoReceiveStreamInterface::Config config_;
webrtc::FlexfecReceiveStream::Config flexfec_config_;
webrtc::FlexfecReceiveStream* flexfec_stream_;
webrtc::Mutex sink_lock_;
rtc::VideoSinkInterface<webrtc::VideoFrame>* sink_
RTC_GUARDED_BY(sink_lock_);
int64_t first_frame_timestamp_ RTC_GUARDED_BY(sink_lock_);
// Start NTP time is estimated as current remote NTP time (estimated from
// RTCP) minus the elapsed time, as soon as remote NTP time is available.
int64_t estimated_remote_start_ntp_time_ms_ RTC_GUARDED_BY(sink_lock_);
RTC_NO_UNIQUE_ADDRESS webrtc::SequenceChecker thread_checker_;
bool receiving_ RTC_GUARDED_BY(&thread_checker_);
};
bool GetChangedReceiverParameters(const VideoReceiverParameters& params,
ChangedReceiverParameters* changed_params)
const RTC_EXCLUSIVE_LOCKS_REQUIRED(thread_checker_);
std::map<uint32_t, WebRtcVideoReceiveStream*> receive_streams_
RTC_GUARDED_BY(thread_checker_);
void FillReceiverStats(VideoMediaReceiveInfo* info, bool log_stats)
RTC_EXCLUSIVE_LOCKS_REQUIRED(thread_checker_);
void FillReceiveCodecStats(VideoMediaReceiveInfo* video_media_info)
RTC_EXCLUSIVE_LOCKS_REQUIRED(thread_checker_);
StreamParams unsignaled_stream_params() {
RTC_DCHECK_RUN_ON(&thread_checker_);
return unsignaled_stream_params_;
}
// Variables.
webrtc::TaskQueueBase* const worker_thread_;
webrtc::ScopedTaskSafety task_safety_;
RTC_NO_UNIQUE_ADDRESS webrtc::SequenceChecker network_thread_checker_{
webrtc::SequenceChecker::kDetached};
RTC_NO_UNIQUE_ADDRESS webrtc::SequenceChecker thread_checker_;
uint32_t rtcp_receiver_report_ssrc_ RTC_GUARDED_BY(thread_checker_);
bool receiving_ RTC_GUARDED_BY(&thread_checker_);
webrtc::Call* const call_;
rtc::VideoSinkInterface<webrtc::VideoFrame>* default_sink_
RTC_GUARDED_BY(thread_checker_);
// Delay for unsignaled streams, which may be set before the stream exists.
int default_recv_base_minimum_delay_ms_ RTC_GUARDED_BY(thread_checker_) = 0;
const MediaConfig::Video video_config_ RTC_GUARDED_BY(thread_checker_);
// When the channel and demuxer get reconfigured, there is a window of time
// where we have to be prepared for packets arriving based on the old demuxer
// criteria because the streams live on the worker thread and the demuxer
// lives on the network thread. Because packets are posted from the network
// thread to the worker thread, they can still be in-flight when streams are
// reconfgured. This can happen when `demuxer_criteria_id_` and
// `demuxer_criteria_completed_id_` don't match. During this time, we do not
// want to create unsignalled receive streams and should instead drop the
// packets. E.g:
// * If RemoveRecvStream(old_ssrc) was recently called, there may be packets
// in-flight for that ssrc. This happens when a receiver becomes inactive.
// * If we go from one to many m= sections, the demuxer may change from
// forwarding all packets to only forwarding the configured ssrcs, so there
// is a risk of receiving ssrcs for other, recently added m= sections.
uint32_t demuxer_criteria_id_ RTC_GUARDED_BY(thread_checker_) = 0;
uint32_t demuxer_criteria_completed_id_ RTC_GUARDED_BY(thread_checker_) = 0;
absl::optional<int64_t> last_unsignalled_ssrc_creation_time_ms_
RTC_GUARDED_BY(thread_checker_);
std::set<uint32_t> send_ssrcs_ RTC_GUARDED_BY(thread_checker_);
std::set<uint32_t> receive_ssrcs_ RTC_GUARDED_BY(thread_checker_);
absl::optional<VideoCodecSettings> send_codec_
RTC_GUARDED_BY(thread_checker_);
std::vector<VideoCodecSettings> negotiated_codecs_
RTC_GUARDED_BY(thread_checker_);
std::vector<webrtc::RtpExtension> send_rtp_extensions_
RTC_GUARDED_BY(thread_checker_);
webrtc::VideoDecoderFactory* const decoder_factory_
RTC_GUARDED_BY(thread_checker_);
std::vector<VideoCodecSettings> recv_codecs_ RTC_GUARDED_BY(thread_checker_);
webrtc::RtpHeaderExtensionMap recv_rtp_extension_map_
RTC_GUARDED_BY(thread_checker_);
std::vector<webrtc::RtpExtension> recv_rtp_extensions_
RTC_GUARDED_BY(thread_checker_);
// See reason for keeping track of the FlexFEC payload type separately in
// comment in WebRtcVideoChannel::ChangedReceiverParameters.
int recv_flexfec_payload_type_ RTC_GUARDED_BY(thread_checker_);
webrtc::BitrateConstraints bitrate_config_ RTC_GUARDED_BY(thread_checker_);
// TODO(deadbeef): Don't duplicate information between
// send_params/recv_params, rtp_extensions, options, etc.
VideoSenderParameters send_params_ RTC_GUARDED_BY(thread_checker_);
VideoOptions default_send_options_ RTC_GUARDED_BY(thread_checker_);
VideoReceiverParameters recv_params_ RTC_GUARDED_BY(thread_checker_);
int64_t last_receive_stats_log_ms_ RTC_GUARDED_BY(thread_checker_);
const bool discard_unknown_ssrc_packets_ RTC_GUARDED_BY(thread_checker_);
// This is a stream param that comes from the remote description, but wasn't
// signaled with any a=ssrc lines. It holds information that was signaled
// before the unsignaled receive stream is created when the first packet is
// received.
StreamParams unsignaled_stream_params_ RTC_GUARDED_BY(thread_checker_);
// Per peer connection crypto options that last for the lifetime of the peer
// connection.
const webrtc::CryptoOptions crypto_options_ RTC_GUARDED_BY(thread_checker_);
// Optional frame transformer set on unsignaled streams.
rtc::scoped_refptr<webrtc::FrameTransformerInterface>
unsignaled_frame_transformer_ RTC_GUARDED_BY(thread_checker_);
// RTP parameters that need to be set when creating a video receive stream.
// Only used in Receiver mode - in Both mode, it reads those things from the
// codec.
webrtc::VideoReceiveStreamInterface::Config::Rtp rtp_config_;
// Callback invoked whenever the send codec changes.
// TODO(bugs.webrtc.org/13931): Remove again when coupling isn't needed.
absl::AnyInvocable<void()> send_codec_changed_callback_;
// Callback invoked whenever the list of SSRCs changes.
absl::AnyInvocable<void(const std::set<uint32_t>&)>
ssrc_list_changed_callback_;
const int receive_buffer_size_;
};
// Keeping the old name "WebRtcVideoChannel" around because some external
// customers are using cricket::WebRtcVideoChannel::AdaptReason
// TODO(bugs.webrtc.org/15216): Move this enum to an interface class and
// delete this workaround.
class WebRtcVideoChannel : public WebRtcVideoSendChannel {
public:
// Make all the values of AdaptReason available as
// WebRtcVideoChannel::ADAPT_xxx.
using WebRtcVideoSendChannel::AdaptReason;
};
} // namespace cricket
#endif // MEDIA_ENGINE_WEBRTC_VIDEO_ENGINE_H_

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,516 @@
/*
* Copyright (c) 2004 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_ENGINE_WEBRTC_VOICE_ENGINE_H_
#define MEDIA_ENGINE_WEBRTC_VOICE_ENGINE_H_
#include <stddef.h>
#include <stdint.h>
#include <map>
#include <memory>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include "absl/functional/any_invocable.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "api/audio/audio_frame_processor.h"
#include "api/audio/audio_mixer.h"
#include "api/audio_codecs/audio_codec_pair_id.h"
#include "api/audio_codecs/audio_decoder_factory.h"
#include "api/audio_codecs/audio_encoder_factory.h"
#include "api/audio_codecs/audio_format.h"
#include "api/audio_options.h"
#include "api/call/audio_sink.h"
#include "api/call/transport.h"
#include "api/crypto/crypto_options.h"
#include "api/crypto/frame_decryptor_interface.h"
#include "api/crypto/frame_encryptor_interface.h"
#include "api/field_trials_view.h"
#include "api/frame_transformer_interface.h"
#include "api/rtc_error.h"
#include "api/rtp_parameters.h"
#include "api/rtp_sender_interface.h"
#include "api/scoped_refptr.h"
#include "api/sequence_checker.h"
#include "api/task_queue/pending_task_safety_flag.h"
#include "api/task_queue/task_queue_base.h"
#include "api/task_queue/task_queue_factory.h"
#include "api/transport/rtp/rtp_source.h"
#include "call/audio_send_stream.h"
#include "call/audio_state.h"
#include "call/call.h"
#include "media/base/codec.h"
#include "media/base/media_channel.h"
#include "media/base/media_channel_impl.h"
#include "media/base/media_config.h"
#include "media/base/media_engine.h"
#include "media/base/rtp_utils.h"
#include "media/base/stream_params.h"
#include "modules/async_audio_processing/async_audio_processing.h"
#include "modules/audio_device/include/audio_device.h"
#include "modules/audio_processing/include/audio_processing.h"
#include "modules/rtp_rtcp/include/rtp_header_extension_map.h"
#include "modules/rtp_rtcp/source/rtp_packet_received.h"
#include "rtc_base/buffer.h"
#include "rtc_base/network/sent_packet.h"
#include "rtc_base/network_route.h"
#include "rtc_base/system/file_wrapper.h"
namespace webrtc {
class AudioFrameProcessor;
}
namespace cricket {
class AudioSource;
// WebRtcVoiceEngine is a class to be used with CompositeMediaEngine.
// It uses the WebRtc VoiceEngine library for audio handling.
class WebRtcVoiceEngine final : public VoiceEngineInterface {
friend class WebRtcVoiceSendChannel;
friend class WebRtcVoiceReceiveChannel;
public:
WebRtcVoiceEngine(
webrtc::TaskQueueFactory* task_queue_factory,
webrtc::AudioDeviceModule* adm,
const rtc::scoped_refptr<webrtc::AudioEncoderFactory>& encoder_factory,
const rtc::scoped_refptr<webrtc::AudioDecoderFactory>& decoder_factory,
rtc::scoped_refptr<webrtc::AudioMixer> audio_mixer,
rtc::scoped_refptr<webrtc::AudioProcessing> audio_processing,
std::unique_ptr<webrtc::AudioFrameProcessor> owned_audio_frame_processor,
const webrtc::FieldTrialsView& trials);
WebRtcVoiceEngine() = delete;
WebRtcVoiceEngine(const WebRtcVoiceEngine&) = delete;
WebRtcVoiceEngine& operator=(const WebRtcVoiceEngine&) = delete;
~WebRtcVoiceEngine() override;
// Does initialization that needs to occur on the worker thread.
void Init() override;
rtc::scoped_refptr<webrtc::AudioState> GetAudioState() const override;
std::unique_ptr<VoiceMediaSendChannelInterface> CreateSendChannel(
webrtc::Call* call,
const MediaConfig& config,
const AudioOptions& options,
const webrtc::CryptoOptions& crypto_options,
webrtc::AudioCodecPairId codec_pair_id) override;
std::unique_ptr<VoiceMediaReceiveChannelInterface> CreateReceiveChannel(
webrtc::Call* call,
const MediaConfig& config,
const AudioOptions& options,
const webrtc::CryptoOptions& crypto_options,
webrtc::AudioCodecPairId codec_pair_id) override;
const std::vector<AudioCodec>& send_codecs() const override;
const std::vector<AudioCodec>& recv_codecs() const override;
std::vector<webrtc::RtpHeaderExtensionCapability> GetRtpHeaderExtensions()
const override;
// Starts AEC dump using an existing file. A maximum file size in bytes can be
// specified. When the maximum file size is reached, logging is stopped and
// the file is closed. If max_size_bytes is set to <= 0, no limit will be
// used.
bool StartAecDump(webrtc::FileWrapper file, int64_t max_size_bytes) override;
// Stops AEC dump.
void StopAecDump() override;
absl::optional<webrtc::AudioDeviceModule::Stats> GetAudioDeviceStats()
override;
private:
// Every option that is "set" will be applied. Every option not "set" will be
// ignored. This allows us to selectively turn on and off different options
// easily at any time.
void ApplyOptions(const AudioOptions& options);
webrtc::TaskQueueFactory* const task_queue_factory_;
std::unique_ptr<webrtc::TaskQueueBase, webrtc::TaskQueueDeleter>
low_priority_worker_queue_;
webrtc::AudioDeviceModule* adm();
webrtc::AudioProcessing* apm() const;
webrtc::AudioState* audio_state();
std::vector<AudioCodec> CollectCodecs(
const std::vector<webrtc::AudioCodecSpec>& specs) const;
webrtc::SequenceChecker signal_thread_checker_{
webrtc::SequenceChecker::kDetached};
webrtc::SequenceChecker worker_thread_checker_{
webrtc::SequenceChecker::kDetached};
// The audio device module.
rtc::scoped_refptr<webrtc::AudioDeviceModule> adm_;
rtc::scoped_refptr<webrtc::AudioEncoderFactory> encoder_factory_;
rtc::scoped_refptr<webrtc::AudioDecoderFactory> decoder_factory_;
rtc::scoped_refptr<webrtc::AudioMixer> audio_mixer_;
// The audio processing module.
rtc::scoped_refptr<webrtc::AudioProcessing> apm_;
// Asynchronous audio processing.
std::unique_ptr<webrtc::AudioFrameProcessor> audio_frame_processor_;
// The primary instance of WebRtc VoiceEngine.
rtc::scoped_refptr<webrtc::AudioState> audio_state_;
std::vector<AudioCodec> send_codecs_;
std::vector<AudioCodec> recv_codecs_;
bool is_dumping_aec_ = false;
bool initialized_ = false;
// Jitter buffer settings for new streams.
size_t audio_jitter_buffer_max_packets_ = 200;
bool audio_jitter_buffer_fast_accelerate_ = false;
int audio_jitter_buffer_min_delay_ms_ = 0;
const bool minimized_remsampling_on_mobile_trial_enabled_;
};
class WebRtcVoiceSendChannel final : public MediaChannelUtil,
public VoiceMediaSendChannelInterface {
public:
WebRtcVoiceSendChannel(WebRtcVoiceEngine* engine,
const MediaConfig& config,
const AudioOptions& options,
const webrtc::CryptoOptions& crypto_options,
webrtc::Call* call,
webrtc::AudioCodecPairId codec_pair_id);
WebRtcVoiceSendChannel() = delete;
WebRtcVoiceSendChannel(const WebRtcVoiceSendChannel&) = delete;
WebRtcVoiceSendChannel& operator=(const WebRtcVoiceSendChannel&) = delete;
~WebRtcVoiceSendChannel() override;
MediaType media_type() const override { return MEDIA_TYPE_AUDIO; }
VideoMediaSendChannelInterface* AsVideoSendChannel() override {
RTC_CHECK_NOTREACHED();
return nullptr;
}
VoiceMediaSendChannelInterface* AsVoiceSendChannel() override { return this; }
absl::optional<Codec> GetSendCodec() const override;
// Functions imported from MediaChannelUtil
void SetInterface(MediaChannelNetworkInterface* iface) override {
MediaChannelUtil::SetInterface(iface);
}
bool HasNetworkInterface() const override {
return MediaChannelUtil::HasNetworkInterface();
}
void SetExtmapAllowMixed(bool extmap_allow_mixed) override {
MediaChannelUtil::SetExtmapAllowMixed(extmap_allow_mixed);
}
bool ExtmapAllowMixed() const override {
return MediaChannelUtil::ExtmapAllowMixed();
}
const AudioOptions& options() const { return options_; }
bool SetSenderParameters(const AudioSenderParameter& params) override;
webrtc::RtpParameters GetRtpSendParameters(uint32_t ssrc) const override;
webrtc::RTCError SetRtpSendParameters(
uint32_t ssrc,
const webrtc::RtpParameters& parameters,
webrtc::SetParametersCallback callback) override;
void SetSend(bool send) override;
bool SetAudioSend(uint32_t ssrc,
bool enable,
const AudioOptions* options,
AudioSource* source) override;
bool AddSendStream(const StreamParams& sp) override;
bool RemoveSendStream(uint32_t ssrc) override;
void SetSsrcListChangedCallback(
absl::AnyInvocable<void(const std::set<uint32_t>&)> callback) override;
// E2EE Frame API
// Set a frame encryptor to a particular ssrc that will intercept all
// outgoing audio payloads frames and attempt to encrypt them and forward the
// result to the packetizer.
void SetFrameEncryptor(uint32_t ssrc,
rtc::scoped_refptr<webrtc::FrameEncryptorInterface>
frame_encryptor) override;
bool CanInsertDtmf() override;
bool InsertDtmf(uint32_t ssrc, int event, int duration) override;
void OnPacketSent(const rtc::SentPacket& sent_packet) override;
void OnNetworkRouteChanged(absl::string_view transport_name,
const rtc::NetworkRoute& network_route) override;
void OnReadyToSend(bool ready) override;
bool GetStats(VoiceMediaSendInfo* info) override;
// Sets a frame transformer between encoder and packetizer, to transform
// encoded frames before sending them out the network.
void SetEncoderToPacketizerFrameTransformer(
uint32_t ssrc,
rtc::scoped_refptr<webrtc::FrameTransformerInterface> frame_transformer)
override;
bool SenderNackEnabled() const override {
if (!send_codec_spec_) {
return false;
}
return send_codec_spec_->nack_enabled;
}
bool SenderNonSenderRttEnabled() const override {
if (!send_codec_spec_) {
return false;
}
return send_codec_spec_->enable_non_sender_rtt;
}
bool SendCodecHasNack() const override { return SenderNackEnabled(); }
void SetSendCodecChangedCallback(
absl::AnyInvocable<void()> callback) override {
send_codec_changed_callback_ = std::move(callback);
}
private:
bool SetOptions(const AudioOptions& options);
bool SetSendCodecs(const std::vector<Codec>& codecs,
absl::optional<Codec> preferred_codec);
bool SetLocalSource(uint32_t ssrc, AudioSource* source);
bool MuteStream(uint32_t ssrc, bool mute);
WebRtcVoiceEngine* engine() { return engine_; }
bool SetMaxSendBitrate(int bps);
void SetupRecording();
webrtc::TaskQueueBase* const worker_thread_;
webrtc::ScopedTaskSafety task_safety_;
webrtc::SequenceChecker network_thread_checker_{
webrtc::SequenceChecker::kDetached};
WebRtcVoiceEngine* const engine_ = nullptr;
std::vector<AudioCodec> send_codecs_;
int max_send_bitrate_bps_ = 0;
AudioOptions options_;
absl::optional<int> dtmf_payload_type_;
int dtmf_payload_freq_ = -1;
bool enable_non_sender_rtt_ = false;
bool send_ = false;
webrtc::Call* const call_ = nullptr;
const MediaConfig::Audio audio_config_;
class WebRtcAudioSendStream;
std::map<uint32_t, WebRtcAudioSendStream*> send_streams_;
std::vector<webrtc::RtpExtension> send_rtp_extensions_;
std::string mid_;
absl::optional<webrtc::AudioSendStream::Config::SendCodecSpec>
send_codec_spec_;
// TODO(kwiberg): Per-SSRC codec pair IDs?
const webrtc::AudioCodecPairId codec_pair_id_;
// Per peer connection crypto options that last for the lifetime of the peer
// connection.
const webrtc::CryptoOptions crypto_options_;
rtc::scoped_refptr<webrtc::FrameTransformerInterface>
unsignaled_frame_transformer_;
void FillSendCodecStats(VoiceMediaSendInfo* voice_media_info);
// Callback invoked whenever the send codec changes.
// TODO(bugs.webrtc.org/13931): Remove again when coupling isn't needed.
absl::AnyInvocable<void()> send_codec_changed_callback_;
// Callback invoked whenever the list of SSRCs changes.
absl::AnyInvocable<void(const std::set<uint32_t>&)>
ssrc_list_changed_callback_;
};
class WebRtcVoiceReceiveChannel final
: public MediaChannelUtil,
public VoiceMediaReceiveChannelInterface {
public:
WebRtcVoiceReceiveChannel(WebRtcVoiceEngine* engine,
const MediaConfig& config,
const AudioOptions& options,
const webrtc::CryptoOptions& crypto_options,
webrtc::Call* call,
webrtc::AudioCodecPairId codec_pair_id);
WebRtcVoiceReceiveChannel() = delete;
WebRtcVoiceReceiveChannel(const WebRtcVoiceReceiveChannel&) = delete;
WebRtcVoiceReceiveChannel& operator=(const WebRtcVoiceReceiveChannel&) =
delete;
~WebRtcVoiceReceiveChannel() override;
MediaType media_type() const override { return MEDIA_TYPE_AUDIO; }
VideoMediaReceiveChannelInterface* AsVideoReceiveChannel() override {
RTC_CHECK_NOTREACHED();
return nullptr;
}
VoiceMediaReceiveChannelInterface* AsVoiceReceiveChannel() override {
return this;
}
const AudioOptions& options() const { return options_; }
void SetInterface(MediaChannelNetworkInterface* iface) override {
MediaChannelUtil::SetInterface(iface);
}
bool SetReceiverParameters(const AudioReceiverParameters& params) override;
webrtc::RtpParameters GetRtpReceiverParameters(uint32_t ssrc) const override;
webrtc::RtpParameters GetDefaultRtpReceiveParameters() const override;
void SetPlayout(bool playout) override;
bool AddRecvStream(const StreamParams& sp) override;
bool RemoveRecvStream(uint32_t ssrc) override;
void ResetUnsignaledRecvStream() override;
absl::optional<uint32_t> GetUnsignaledSsrc() const override;
void ChooseReceiverReportSsrc(const std::set<uint32_t>& choices) override;
void OnDemuxerCriteriaUpdatePending() override;
void OnDemuxerCriteriaUpdateComplete() override;
// E2EE Frame API
// Set a frame decryptor to a particular ssrc that will intercept all
// incoming audio payloads and attempt to decrypt them before forwarding the
// result.
void SetFrameDecryptor(uint32_t ssrc,
rtc::scoped_refptr<webrtc::FrameDecryptorInterface>
frame_decryptor) override;
bool SetOutputVolume(uint32_t ssrc, double volume) override;
// Applies the new volume to current and future unsignaled streams.
bool SetDefaultOutputVolume(double volume) override;
bool SetBaseMinimumPlayoutDelayMs(uint32_t ssrc, int delay_ms) override;
absl::optional<int> GetBaseMinimumPlayoutDelayMs(
uint32_t ssrc) const override;
void OnPacketReceived(const webrtc::RtpPacketReceived& packet) override;
bool GetStats(VoiceMediaReceiveInfo* info,
bool get_and_clear_legacy_stats) override;
// Set the audio sink for an existing stream.
void SetRawAudioSink(
uint32_t ssrc,
std::unique_ptr<webrtc::AudioSinkInterface> sink) override;
// Will set the audio sink on the latest unsignaled stream, future or
// current. Only one stream at a time will use the sink.
void SetDefaultRawAudioSink(
std::unique_ptr<webrtc::AudioSinkInterface> sink) override;
std::vector<webrtc::RtpSource> GetSources(uint32_t ssrc) const override;
void SetDepacketizerToDecoderFrameTransformer(
uint32_t ssrc,
rtc::scoped_refptr<webrtc::FrameTransformerInterface> frame_transformer)
override;
void SetReceiveNackEnabled(bool enabled) override;
void SetReceiveNonSenderRttEnabled(bool enabled) override;
private:
bool SetOptions(const AudioOptions& options);
bool SetRecvCodecs(const std::vector<AudioCodec>& codecs);
bool SetLocalSource(uint32_t ssrc, AudioSource* source);
bool MuteStream(uint32_t ssrc, bool mute);
WebRtcVoiceEngine* engine() { return engine_; }
void SetupRecording();
// Expected to be invoked once per packet that belongs to this channel that
// can not be demuxed. Returns true if a default receive stream has been
// created.
bool MaybeCreateDefaultReceiveStream(const webrtc::RtpPacketReceived& packet);
// Check if 'ssrc' is an unsignaled stream, and if so mark it as not being
// unsignaled anymore (i.e. it is now removed, or signaled), and return true.
bool MaybeDeregisterUnsignaledRecvStream(uint32_t ssrc);
webrtc::TaskQueueBase* const worker_thread_;
webrtc::ScopedTaskSafety task_safety_;
webrtc::SequenceChecker network_thread_checker_{
webrtc::SequenceChecker::kDetached};
WebRtcVoiceEngine* const engine_ = nullptr;
// TODO(kwiberg): decoder_map_ and recv_codecs_ store the exact same
// information, in slightly different formats. Eliminate recv_codecs_.
std::map<int, webrtc::SdpAudioFormat> decoder_map_;
std::vector<AudioCodec> recv_codecs_;
AudioOptions options_;
bool recv_nack_enabled_ = false;
bool enable_non_sender_rtt_ = false;
bool playout_ = false;
webrtc::Call* const call_ = nullptr;
const MediaConfig::Audio audio_config_;
// Queue of unsignaled SSRCs; oldest at the beginning.
std::vector<uint32_t> unsignaled_recv_ssrcs_;
// This is a stream param that comes from the remote description, but wasn't
// signaled with any a=ssrc lines. It holds the information that was signaled
// before the unsignaled receive stream is created when the first packet is
// received.
StreamParams unsignaled_stream_params_;
// Volume for unsignaled streams, which may be set before the stream exists.
double default_recv_volume_ = 1.0;
// Delay for unsignaled streams, which may be set before the stream exists.
int default_recv_base_minimum_delay_ms_ = 0;
// Sink for latest unsignaled stream - may be set before the stream exists.
std::unique_ptr<webrtc::AudioSinkInterface> default_sink_;
// Default SSRC to use for RTCP receiver reports in case of no signaled
// send streams. See: https://code.google.com/p/webrtc/issues/detail?id=4740
// and https://code.google.com/p/chromium/issues/detail?id=547661
uint32_t receiver_reports_ssrc_ = 0xFA17FA17u;
std::string mid_;
class WebRtcAudioReceiveStream;
std::map<uint32_t, WebRtcAudioReceiveStream*> recv_streams_;
std::vector<webrtc::RtpExtension> recv_rtp_extensions_;
webrtc::RtpHeaderExtensionMap recv_rtp_extension_map_;
absl::optional<webrtc::AudioSendStream::Config::SendCodecSpec>
send_codec_spec_;
// TODO(kwiberg): Per-SSRC codec pair IDs?
const webrtc::AudioCodecPairId codec_pair_id_;
// Per peer connection crypto options that last for the lifetime of the peer
// connection.
const webrtc::CryptoOptions crypto_options_;
// Unsignaled streams have an option to have a frame decryptor set on them.
rtc::scoped_refptr<webrtc::FrameDecryptorInterface>
unsignaled_frame_decryptor_;
rtc::scoped_refptr<webrtc::FrameTransformerInterface>
unsignaled_frame_transformer_;
void FillReceiveCodecStats(VoiceMediaReceiveInfo* voice_media_info);
};
} // namespace cricket
#endif // MEDIA_ENGINE_WEBRTC_VOICE_ENGINE_H_