Repo created

This commit is contained in:
Fr4nz D13trich 2025-11-22 14:04:28 +01:00
parent 81b91f4139
commit f8c34fa5ee
22732 changed files with 4815320 additions and 2 deletions

View file

@ -0,0 +1,13 @@
brandtr@webrtc.org
ilnik@webrtc.org
sprang@webrtc.org
magjed@webrtc.org
mflodman@webrtc.org
perkj@webrtc.org
# Audio-related changes:
peah@webrtc.org
saza@webrtc.org
# Datachannel-related changes:
orphis@webrtc.org

View file

@ -0,0 +1,126 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/base/adapted_video_track_source.h"
#include "api/scoped_refptr.h"
#include "api/video/i420_buffer.h"
#include "api/video/video_frame_buffer.h"
#include "api/video/video_rotation.h"
#include "rtc_base/checks.h"
#include "rtc_base/time_utils.h"
namespace rtc {
AdaptedVideoTrackSource::AdaptedVideoTrackSource() = default;
AdaptedVideoTrackSource::AdaptedVideoTrackSource(int required_alignment)
: video_adapter_(required_alignment) {}
AdaptedVideoTrackSource::~AdaptedVideoTrackSource() = default;
bool AdaptedVideoTrackSource::GetStats(Stats* stats) {
webrtc::MutexLock lock(&stats_mutex_);
if (!stats_) {
return false;
}
*stats = *stats_;
return true;
}
void AdaptedVideoTrackSource::OnFrame(const webrtc::VideoFrame& frame) {
rtc::scoped_refptr<webrtc::VideoFrameBuffer> buffer(
frame.video_frame_buffer());
/* Note that this is a "best effort" approach to
wants.rotation_applied; apply_rotation_ can change from false to
true between the check of apply_rotation() and the call to
broadcaster_.OnFrame(), in which case we generate a frame with
pending rotation despite some sink with wants.rotation_applied ==
true was just added. The VideoBroadcaster enforces
synchronization for us in this case, by not passing the frame on
to sinks which don't want it. */
if (apply_rotation() && frame.rotation() != webrtc::kVideoRotation_0 &&
buffer->type() == webrtc::VideoFrameBuffer::Type::kI420) {
/* Apply pending rotation. */
webrtc::VideoFrame rotated_frame(frame);
rotated_frame.set_video_frame_buffer(
webrtc::I420Buffer::Rotate(*buffer->GetI420(), frame.rotation()));
rotated_frame.set_rotation(webrtc::kVideoRotation_0);
broadcaster_.OnFrame(rotated_frame);
} else {
broadcaster_.OnFrame(frame);
}
}
void AdaptedVideoTrackSource::OnFrameDropped() {
broadcaster_.OnDiscardedFrame();
}
void AdaptedVideoTrackSource::AddOrUpdateSink(
rtc::VideoSinkInterface<webrtc::VideoFrame>* sink,
const rtc::VideoSinkWants& wants) {
broadcaster_.AddOrUpdateSink(sink, wants);
OnSinkWantsChanged(broadcaster_.wants());
}
void AdaptedVideoTrackSource::RemoveSink(
rtc::VideoSinkInterface<webrtc::VideoFrame>* sink) {
broadcaster_.RemoveSink(sink);
OnSinkWantsChanged(broadcaster_.wants());
}
bool AdaptedVideoTrackSource::apply_rotation() {
return broadcaster_.wants().rotation_applied;
}
void AdaptedVideoTrackSource::OnSinkWantsChanged(
const rtc::VideoSinkWants& wants) {
video_adapter_.OnSinkWants(wants);
}
bool AdaptedVideoTrackSource::AdaptFrame(int width,
int height,
int64_t time_us,
int* out_width,
int* out_height,
int* crop_width,
int* crop_height,
int* crop_x,
int* crop_y) {
{
webrtc::MutexLock lock(&stats_mutex_);
stats_ = Stats{width, height};
}
if (!broadcaster_.frame_wanted()) {
return false;
}
if (!video_adapter_.AdaptFrameResolution(
width, height, time_us * rtc::kNumNanosecsPerMicrosec, crop_width,
crop_height, out_width, out_height)) {
broadcaster_.OnDiscardedFrame();
// VideoAdapter dropped the frame.
return false;
}
*crop_x = (width - *crop_width) / 2;
*crop_y = (height - *crop_height) / 2;
return true;
}
void AdaptedVideoTrackSource::ProcessConstraints(
const webrtc::VideoTrackSourceConstraints& constraints) {
broadcaster_.ProcessConstraints(constraints);
}
} // namespace rtc

View file

@ -0,0 +1,104 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_BASE_ADAPTED_VIDEO_TRACK_SOURCE_H_
#define MEDIA_BASE_ADAPTED_VIDEO_TRACK_SOURCE_H_
#include <stdint.h>
#include "absl/types/optional.h"
#include "api/media_stream_interface.h"
#include "api/notifier.h"
#include "api/video/video_frame.h"
#include "api/video/video_sink_interface.h"
#include "api/video/video_source_interface.h"
#include "media/base/video_adapter.h"
#include "media/base/video_broadcaster.h"
#include "rtc_base/synchronization/mutex.h"
#include "rtc_base/system/rtc_export.h"
#include "rtc_base/thread_annotations.h"
namespace rtc {
// Base class for sources which needs video adaptation, e.g., video
// capture sources. Sinks must be added and removed on one and only
// one thread, while AdaptFrame and OnFrame may be called on any
// thread.
class RTC_EXPORT AdaptedVideoTrackSource
: public webrtc::Notifier<webrtc::VideoTrackSourceInterface> {
public:
AdaptedVideoTrackSource();
~AdaptedVideoTrackSource() override;
protected:
// Allows derived classes to initialize `video_adapter_` with a custom
// alignment.
explicit AdaptedVideoTrackSource(int required_alignment);
// Checks the apply_rotation() flag. If the frame needs rotation, and it is a
// plain memory frame, it is rotated. Subclasses producing native frames must
// handle apply_rotation() themselves.
void OnFrame(const webrtc::VideoFrame& frame);
// Indication from source that a frame was dropped.
void OnFrameDropped();
// Reports the appropriate frame size after adaptation. Returns true
// if a frame is wanted. Returns false if there are no interested
// sinks, or if the VideoAdapter decides to drop the frame.
bool AdaptFrame(int width,
int height,
int64_t time_us,
int* out_width,
int* out_height,
int* crop_width,
int* crop_height,
int* crop_x,
int* crop_y);
// Returns the current value of the apply_rotation flag, derived
// from the VideoSinkWants of registered sinks. The value is derived
// from sinks' wants, in AddOrUpdateSink and RemoveSink. Beware that
// when using this method from a different thread, the value may
// become stale before it is used.
bool apply_rotation();
cricket::VideoAdapter* video_adapter() { return &video_adapter_; }
private:
// Implements rtc::VideoSourceInterface.
void AddOrUpdateSink(rtc::VideoSinkInterface<webrtc::VideoFrame>* sink,
const rtc::VideoSinkWants& wants) override;
void RemoveSink(rtc::VideoSinkInterface<webrtc::VideoFrame>* sink) override;
// Part of VideoTrackSourceInterface.
bool GetStats(Stats* stats) override;
void OnSinkWantsChanged(const rtc::VideoSinkWants& wants);
// Encoded sinks not implemented for AdaptedVideoTrackSource.
bool SupportsEncodedOutput() const override { return false; }
void GenerateKeyFrame() override {}
void AddEncodedSink(
rtc::VideoSinkInterface<webrtc::RecordableEncodedFrame>* sink) override {}
void RemoveEncodedSink(
rtc::VideoSinkInterface<webrtc::RecordableEncodedFrame>* sink) override {}
void ProcessConstraints(
const webrtc::VideoTrackSourceConstraints& constraints) override;
cricket::VideoAdapter video_adapter_;
webrtc::Mutex stats_mutex_;
absl::optional<Stats> stats_ RTC_GUARDED_BY(stats_mutex_);
VideoBroadcaster broadcaster_;
};
} // namespace rtc
#endif // MEDIA_BASE_ADAPTED_VIDEO_TRACK_SOURCE_H_

View file

@ -0,0 +1,58 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_BASE_AUDIO_SOURCE_H_
#define MEDIA_BASE_AUDIO_SOURCE_H_
#include <cstddef>
#include "absl/types/optional.h"
namespace cricket {
// Abstract interface for providing the audio data.
// TODO(deadbeef): Rename this to AudioSourceInterface, and rename
// webrtc::AudioSourceInterface to AudioTrackSourceInterface.
class AudioSource {
public:
class Sink {
public:
// Callback to receive data from the AudioSource.
virtual void OnData(
const void* audio_data,
int bits_per_sample,
int sample_rate,
size_t number_of_channels,
size_t number_of_frames,
absl::optional<int64_t> absolute_capture_timestamp_ms) = 0;
// Called when the AudioSource is going away.
virtual void OnClose() = 0;
// Returns the number of channels encoded by the sink. This can be less than
// the number_of_channels if down-mixing occur. A value of -1 means an
// unknown number.
virtual int NumPreferredChannels() const = 0;
protected:
virtual ~Sink() {}
};
// Sets a sink to the AudioSource. There can be only one sink connected
// to the source at a time.
virtual void SetSink(Sink* sink) = 0;
protected:
virtual ~AudioSource() {}
};
} // namespace cricket
#endif // MEDIA_BASE_AUDIO_SOURCE_H_

View file

@ -0,0 +1,522 @@
/*
* Copyright (c) 2004 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/base/codec.h"
#include "absl/algorithm/container.h"
#include "absl/strings/match.h"
#include "api/audio_codecs/audio_format.h"
#include "api/video_codecs/av1_profile.h"
#include "api/video_codecs/h264_profile_level_id.h"
#ifdef RTC_ENABLE_H265
#include "api/video_codecs/h265_profile_tier_level.h"
#endif
#include "api/video_codecs/vp9_profile.h"
#include "media/base/media_constants.h"
#include "rtc_base/checks.h"
#include "rtc_base/logging.h"
#include "rtc_base/string_encode.h"
#include "rtc_base/strings/string_builder.h"
namespace cricket {
namespace {
std::string GetH264PacketizationModeOrDefault(
const webrtc::CodecParameterMap& params) {
auto it = params.find(kH264FmtpPacketizationMode);
if (it != params.end()) {
return it->second;
}
// If packetization-mode is not present, default to "0".
// https://tools.ietf.org/html/rfc6184#section-6.2
return "0";
}
bool IsSameH264PacketizationMode(const webrtc::CodecParameterMap& left,
const webrtc::CodecParameterMap& right) {
return GetH264PacketizationModeOrDefault(left) ==
GetH264PacketizationModeOrDefault(right);
}
#ifdef RTC_ENABLE_H265
std::string GetH265TxModeOrDefault(const webrtc::CodecParameterMap& params) {
auto it = params.find(kH265FmtpTxMode);
if (it != params.end()) {
return it->second;
}
// If TxMode is not present, a value of "SRST" must be inferred.
// https://tools.ietf.org/html/rfc7798@section-7.1
return "SRST";
}
bool IsSameH265TxMode(const webrtc::CodecParameterMap& left,
const webrtc::CodecParameterMap& right) {
return absl::EqualsIgnoreCase(GetH265TxModeOrDefault(left),
GetH265TxModeOrDefault(right));
}
#endif
// Some (video) codecs are actually families of codecs and rely on parameters
// to distinguish different incompatible family members.
bool IsSameCodecSpecific(const std::string& name1,
const webrtc::CodecParameterMap& params1,
const std::string& name2,
const webrtc::CodecParameterMap& params2) {
// The names might not necessarily match, so check both.
auto either_name_matches = [&](const std::string name) {
return absl::EqualsIgnoreCase(name, name1) ||
absl::EqualsIgnoreCase(name, name2);
};
if (either_name_matches(kH264CodecName))
return webrtc::H264IsSameProfile(params1, params2) &&
IsSameH264PacketizationMode(params1, params2);
if (either_name_matches(kVp9CodecName))
return webrtc::VP9IsSameProfile(params1, params2);
if (either_name_matches(kAv1CodecName))
return webrtc::AV1IsSameProfile(params1, params2);
#ifdef RTC_ENABLE_H265
if (either_name_matches(kH265CodecName)) {
return webrtc::H265IsSameProfileTierLevel(params1, params2) &&
IsSameH265TxMode(params1, params2);
}
#endif
return true;
}
} // namespace
FeedbackParams::FeedbackParams() = default;
FeedbackParams::~FeedbackParams() = default;
bool FeedbackParam::operator==(const FeedbackParam& other) const {
return absl::EqualsIgnoreCase(other.id(), id()) &&
absl::EqualsIgnoreCase(other.param(), param());
}
bool FeedbackParams::operator==(const FeedbackParams& other) const {
return params_ == other.params_;
}
bool FeedbackParams::Has(const FeedbackParam& param) const {
return absl::c_linear_search(params_, param);
}
void FeedbackParams::Add(const FeedbackParam& param) {
if (param.id().empty()) {
return;
}
if (Has(param)) {
// Param already in `this`.
return;
}
params_.push_back(param);
RTC_CHECK(!HasDuplicateEntries());
}
void FeedbackParams::Intersect(const FeedbackParams& from) {
std::vector<FeedbackParam>::iterator iter_to = params_.begin();
while (iter_to != params_.end()) {
if (!from.Has(*iter_to)) {
iter_to = params_.erase(iter_to);
} else {
++iter_to;
}
}
}
bool FeedbackParams::HasDuplicateEntries() const {
for (std::vector<FeedbackParam>::const_iterator iter = params_.begin();
iter != params_.end(); ++iter) {
for (std::vector<FeedbackParam>::const_iterator found = iter + 1;
found != params_.end(); ++found) {
if (*found == *iter) {
return true;
}
}
}
return false;
}
Codec::Codec(Type type, int id, const std::string& name, int clockrate)
: Codec(type, id, name, clockrate, 0) {}
Codec::Codec(Type type,
int id,
const std::string& name,
int clockrate,
size_t channels)
: type(type),
id(id),
name(name),
clockrate(clockrate),
bitrate(0),
channels(channels) {}
Codec::Codec(Type type) : Codec(type, 0, "", 0) {}
Codec::Codec(const webrtc::SdpAudioFormat& c)
: Codec(Type::kAudio, 0, c.name, c.clockrate_hz, c.num_channels) {
params = c.parameters;
}
Codec::Codec(const webrtc::SdpVideoFormat& c)
: Codec(Type::kVideo, 0, c.name, kVideoCodecClockrate) {
params = c.parameters;
scalability_modes = c.scalability_modes;
}
Codec::Codec(const Codec& c) = default;
Codec::Codec(Codec&& c) = default;
Codec::~Codec() = default;
Codec& Codec::operator=(const Codec& c) = default;
Codec& Codec::operator=(Codec&& c) = default;
bool Codec::operator==(const Codec& c) const {
return type == c.type && this->id == c.id && // id is reserved in objective-c
name == c.name && clockrate == c.clockrate && params == c.params &&
feedback_params == c.feedback_params &&
(type == Type::kAudio
? (bitrate == c.bitrate && channels == c.channels)
: (packetization == c.packetization));
}
bool Codec::Matches(const Codec& codec) const {
// Match the codec id/name based on the typical static/dynamic name rules.
// Matching is case-insensitive.
// We support the ranges [96, 127] and more recently [35, 65].
// https://www.iana.org/assignments/rtp-parameters/rtp-parameters.xhtml#rtp-parameters-1
// Within those ranges we match by codec name, outside by codec id.
// Since no codecs are assigned an id in the range [66, 95] by us, these will
// never match.
const int kLowerDynamicRangeMin = 35;
const int kLowerDynamicRangeMax = 65;
const int kUpperDynamicRangeMin = 96;
const int kUpperDynamicRangeMax = 127;
const bool is_id_in_dynamic_range =
(id >= kLowerDynamicRangeMin && id <= kLowerDynamicRangeMax) ||
(id >= kUpperDynamicRangeMin && id <= kUpperDynamicRangeMax);
const bool is_codec_id_in_dynamic_range =
(codec.id >= kLowerDynamicRangeMin &&
codec.id <= kLowerDynamicRangeMax) ||
(codec.id >= kUpperDynamicRangeMin && codec.id <= kUpperDynamicRangeMax);
bool matches_id = is_id_in_dynamic_range && is_codec_id_in_dynamic_range
? (absl::EqualsIgnoreCase(name, codec.name))
: (id == codec.id);
auto matches_type_specific = [&]() {
switch (type) {
case Type::kAudio:
// If a nonzero clockrate is specified, it must match the actual
// clockrate. If a nonzero bitrate is specified, it must match the
// actual bitrate, unless the codec is VBR (0), where we just force the
// supplied value. The number of channels must match exactly, with the
// exception that channels=0 is treated synonymously as channels=1, per
// RFC 4566 section 6: " [The channels] parameter is OPTIONAL and may be
// omitted if the number of channels is one."
// Preference is ignored.
// TODO(juberti): Treat a zero clockrate as 8000Hz, the RTP default
// clockrate.
return ((codec.clockrate == 0 /*&& clockrate == 8000*/) ||
clockrate == codec.clockrate) &&
(codec.bitrate == 0 || bitrate <= 0 ||
bitrate == codec.bitrate) &&
((codec.channels < 2 && channels < 2) ||
channels == codec.channels);
case Type::kVideo:
return IsSameCodecSpecific(name, params, codec.name, codec.params);
}
};
return matches_id && matches_type_specific();
}
bool Codec::MatchesRtpCodec(const webrtc::RtpCodec& codec_capability) const {
webrtc::RtpCodecParameters codec_parameters = ToCodecParameters();
return codec_parameters.name == codec_capability.name &&
codec_parameters.kind == codec_capability.kind &&
(codec_parameters.name == cricket::kRtxCodecName ||
(codec_parameters.num_channels == codec_capability.num_channels &&
codec_parameters.clock_rate == codec_capability.clock_rate &&
codec_parameters.parameters == codec_capability.parameters));
}
bool Codec::GetParam(const std::string& name, std::string* out) const {
webrtc::CodecParameterMap::const_iterator iter = params.find(name);
if (iter == params.end())
return false;
*out = iter->second;
return true;
}
bool Codec::GetParam(const std::string& name, int* out) const {
webrtc::CodecParameterMap::const_iterator iter = params.find(name);
if (iter == params.end())
return false;
return rtc::FromString(iter->second, out);
}
void Codec::SetParam(const std::string& name, const std::string& value) {
params[name] = value;
}
void Codec::SetParam(const std::string& name, int value) {
params[name] = rtc::ToString(value);
}
bool Codec::RemoveParam(const std::string& name) {
return params.erase(name) == 1;
}
void Codec::AddFeedbackParam(const FeedbackParam& param) {
feedback_params.Add(param);
}
bool Codec::HasFeedbackParam(const FeedbackParam& param) const {
return feedback_params.Has(param);
}
void Codec::IntersectFeedbackParams(const Codec& other) {
feedback_params.Intersect(other.feedback_params);
}
webrtc::RtpCodecParameters Codec::ToCodecParameters() const {
webrtc::RtpCodecParameters codec_params;
codec_params.payload_type = id;
codec_params.name = name;
codec_params.clock_rate = clockrate;
codec_params.parameters.insert(params.begin(), params.end());
switch (type) {
case Type::kAudio: {
codec_params.num_channels = static_cast<int>(channels);
codec_params.kind = MEDIA_TYPE_AUDIO;
break;
}
case Type::kVideo: {
codec_params.kind = MEDIA_TYPE_VIDEO;
break;
}
}
return codec_params;
}
bool Codec::IsMediaCodec() const {
return !IsResiliencyCodec() &&
!absl::EqualsIgnoreCase(name, kComfortNoiseCodecName);
}
bool Codec::IsResiliencyCodec() const {
return GetResiliencyType() != ResiliencyType::kNone;
}
Codec::ResiliencyType Codec::GetResiliencyType() const {
if (absl::EqualsIgnoreCase(name, kRedCodecName)) {
return ResiliencyType::kRed;
}
if (absl::EqualsIgnoreCase(name, kUlpfecCodecName)) {
return ResiliencyType::kUlpfec;
}
if (absl::EqualsIgnoreCase(name, kFlexfecCodecName)) {
return ResiliencyType::kFlexfec;
}
if (absl::EqualsIgnoreCase(name, kRtxCodecName)) {
return ResiliencyType::kRtx;
}
return ResiliencyType::kNone;
}
bool Codec::ValidateCodecFormat() const {
if (id < 0 || id > 127) {
RTC_LOG(LS_ERROR) << "Codec with invalid payload type: " << ToString();
return false;
}
if (IsResiliencyCodec()) {
return true;
}
int min_bitrate = -1;
int max_bitrate = -1;
if (GetParam(kCodecParamMinBitrate, &min_bitrate) &&
GetParam(kCodecParamMaxBitrate, &max_bitrate)) {
if (max_bitrate < min_bitrate) {
RTC_LOG(LS_ERROR) << "Codec with max < min bitrate: " << ToString();
return false;
}
}
return true;
}
std::string Codec::ToString() const {
char buf[256];
rtc::SimpleStringBuilder sb(buf);
switch (type) {
case Type::kAudio: {
sb << "AudioCodec[" << id << ":" << name << ":" << clockrate << ":"
<< bitrate << ":" << channels << "]";
break;
}
case Type::kVideo: {
sb << "VideoCodec[" << id << ":" << name;
if (packetization.has_value()) {
sb << ":" << *packetization;
}
sb << "]";
break;
}
}
return sb.str();
}
Codec CreateAudioRtxCodec(int rtx_payload_type, int associated_payload_type) {
Codec rtx_codec = CreateAudioCodec(rtx_payload_type, kRtxCodecName, 0, 1);
rtx_codec.SetParam(kCodecParamAssociatedPayloadType, associated_payload_type);
return rtx_codec;
}
Codec CreateVideoRtxCodec(int rtx_payload_type, int associated_payload_type) {
Codec rtx_codec = CreateVideoCodec(rtx_payload_type, kRtxCodecName);
rtx_codec.SetParam(kCodecParamAssociatedPayloadType, associated_payload_type);
return rtx_codec;
}
const Codec* FindCodecById(const std::vector<Codec>& codecs, int payload_type) {
for (const auto& codec : codecs) {
if (codec.id == payload_type)
return &codec;
}
return nullptr;
}
bool HasLntf(const Codec& codec) {
return codec.HasFeedbackParam(
FeedbackParam(kRtcpFbParamLntf, kParamValueEmpty));
}
bool HasNack(const Codec& codec) {
return codec.HasFeedbackParam(
FeedbackParam(kRtcpFbParamNack, kParamValueEmpty));
}
bool HasRemb(const Codec& codec) {
return codec.HasFeedbackParam(
FeedbackParam(kRtcpFbParamRemb, kParamValueEmpty));
}
bool HasRrtr(const Codec& codec) {
return codec.HasFeedbackParam(
FeedbackParam(kRtcpFbParamRrtr, kParamValueEmpty));
}
bool HasTransportCc(const Codec& codec) {
return codec.HasFeedbackParam(
FeedbackParam(kRtcpFbParamTransportCc, kParamValueEmpty));
}
const Codec* FindMatchingVideoCodec(const std::vector<Codec>& supported_codecs,
const Codec& codec) {
webrtc::SdpVideoFormat sdp_video_format{codec.name, codec.params};
for (const Codec& supported_codec : supported_codecs) {
if (sdp_video_format.IsSameCodec(
{supported_codec.name, supported_codec.params})) {
return &supported_codec;
}
}
return nullptr;
}
std::vector<const Codec*> FindAllMatchingCodecs(
const std::vector<Codec>& supported_codecs,
const Codec& codec) {
std::vector<const Codec*> result;
webrtc::SdpVideoFormat sdp(codec.name, codec.params);
for (const Codec& supported_codec : supported_codecs) {
if (sdp.IsSameCodec({supported_codec.name, supported_codec.params})) {
result.push_back(&supported_codec);
}
}
return result;
}
// If a decoder supports any H264 profile, it is implicitly assumed to also
// support constrained base line even though it's not explicitly listed.
void AddH264ConstrainedBaselineProfileToSupportedFormats(
std::vector<webrtc::SdpVideoFormat>* supported_formats) {
std::vector<webrtc::SdpVideoFormat> cbr_supported_formats;
// For any H264 supported profile, add the corresponding constrained baseline
// profile.
for (auto it = supported_formats->cbegin(); it != supported_formats->cend();
++it) {
if (it->name == cricket::kH264CodecName) {
const absl::optional<webrtc::H264ProfileLevelId> profile_level_id =
webrtc::ParseSdpForH264ProfileLevelId(it->parameters);
if (profile_level_id &&
profile_level_id->profile !=
webrtc::H264Profile::kProfileConstrainedBaseline) {
webrtc::SdpVideoFormat cbp_format = *it;
webrtc::H264ProfileLevelId cbp_profile = *profile_level_id;
cbp_profile.profile = webrtc::H264Profile::kProfileConstrainedBaseline;
cbp_format.parameters[cricket::kH264FmtpProfileLevelId] =
*webrtc::H264ProfileLevelIdToString(cbp_profile);
cbr_supported_formats.push_back(cbp_format);
}
}
}
size_t original_size = supported_formats->size();
// ...if it's not already in the list.
std::copy_if(cbr_supported_formats.begin(), cbr_supported_formats.end(),
std::back_inserter(*supported_formats),
[supported_formats](const webrtc::SdpVideoFormat& format) {
return !format.IsCodecInList(*supported_formats);
});
if (supported_formats->size() > original_size) {
RTC_LOG(LS_WARNING) << "Explicitly added H264 constrained baseline to list "
"of supported formats.";
}
}
Codec CreateAudioCodec(int id,
const std::string& name,
int clockrate,
size_t channels) {
return Codec(Codec::Type::kAudio, id, name, clockrate, channels);
}
Codec CreateAudioCodec(const webrtc::SdpAudioFormat& c) {
return Codec(c);
}
Codec CreateVideoCodec(const std::string& name) {
return CreateVideoCodec(0, name);
}
Codec CreateVideoCodec(int id, const std::string& name) {
Codec c(Codec::Type::kVideo, id, name, kVideoCodecClockrate);
if (absl::EqualsIgnoreCase(kH264CodecName, name)) {
// This default is set for all H.264 codecs created because
// that was the default before packetization mode support was added.
// TODO(hta): Move this to the places that create VideoCodecs from
// SDP or from knowledge of implementation capabilities.
c.SetParam(kH264FmtpPacketizationMode, "1");
}
return c;
}
Codec CreateVideoCodec(const webrtc::SdpVideoFormat& c) {
return Codec(c);
}
} // namespace cricket

View file

@ -0,0 +1,230 @@
/*
* Copyright (c) 2004 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_BASE_CODEC_H_
#define MEDIA_BASE_CODEC_H_
#include <map>
#include <set>
#include <string>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "api/audio_codecs/audio_format.h"
#include "api/rtp_parameters.h"
#include "api/video_codecs/sdp_video_format.h"
#include "media/base/media_constants.h"
#include "rtc_base/system/rtc_export.h"
namespace cricket {
class FeedbackParam {
public:
FeedbackParam() = default;
FeedbackParam(absl::string_view id, const std::string& param)
: id_(id), param_(param) {}
explicit FeedbackParam(absl::string_view id)
: id_(id), param_(kParamValueEmpty) {}
bool operator==(const FeedbackParam& other) const;
bool operator!=(const FeedbackParam& c) const { return !(*this == c); }
const std::string& id() const { return id_; }
const std::string& param() const { return param_; }
private:
std::string id_; // e.g. "nack", "ccm"
std::string param_; // e.g. "", "rpsi", "fir"
};
class FeedbackParams {
public:
FeedbackParams();
~FeedbackParams();
bool operator==(const FeedbackParams& other) const;
bool operator!=(const FeedbackParams& c) const { return !(*this == c); }
bool Has(const FeedbackParam& param) const;
void Add(const FeedbackParam& param);
void Intersect(const FeedbackParams& from);
const std::vector<FeedbackParam>& params() const { return params_; }
private:
bool HasDuplicateEntries() const;
std::vector<FeedbackParam> params_;
};
struct RTC_EXPORT Codec {
enum class Type {
kAudio,
kVideo,
};
enum class ResiliencyType {
kNone,
kRed,
kUlpfec,
kFlexfec,
kRtx,
};
Type type;
int id;
std::string name;
int clockrate;
// Audio only
// Can be used to override the target bitrate in the encoder.
// TODO(orphis): Remove in favor of alternative APIs
int bitrate;
size_t channels;
// Video only
absl::optional<std::string> packetization;
absl::InlinedVector<webrtc::ScalabilityMode, webrtc::kScalabilityModeCount>
scalability_modes;
// H.265 only
absl::optional<std::string> tx_mode;
// Non key-value parameters such as the telephone-event "015" are
// represented using an empty string as key, i.e. {"": "0-15"}.
webrtc::CodecParameterMap params;
FeedbackParams feedback_params;
Codec(const Codec& c);
Codec(Codec&& c);
virtual ~Codec();
// Indicates if this codec is compatible with the specified codec by
// checking the assigned id and profile values for the relevant video codecs.
// For H.264, packetization modes will be compared; If H.265 is enabled,
// TxModes will be compared.
// H.264(and H.265, if enabled) levels are not compared.
bool Matches(const Codec& codec) const;
bool MatchesRtpCodec(const webrtc::RtpCodec& capability) const;
// Find the parameter for `name` and write the value to `out`.
bool GetParam(const std::string& name, std::string* out) const;
bool GetParam(const std::string& name, int* out) const;
void SetParam(const std::string& name, const std::string& value);
void SetParam(const std::string& name, int value);
// It is safe to input a non-existent parameter.
// Returns true if the parameter existed, false if it did not exist.
bool RemoveParam(const std::string& name);
bool HasFeedbackParam(const FeedbackParam& param) const;
void AddFeedbackParam(const FeedbackParam& param);
// Filter `this` feedbacks params such that only those shared by both `this`
// and `other` are kept.
void IntersectFeedbackParams(const Codec& other);
virtual webrtc::RtpCodecParameters ToCodecParameters() const;
// The codec represent an actual media codec, and not a resiliency codec.
bool IsMediaCodec() const;
// The codec represent a resiliency codec such as RED, RTX or FEC variants.
bool IsResiliencyCodec() const;
ResiliencyType GetResiliencyType() const;
// Validates a VideoCodec's payload type, dimensions and bitrates etc. If they
// don't make sense (such as max < min bitrate), and error is logged and
// ValidateCodecFormat returns false.
bool ValidateCodecFormat() const;
std::string ToString() const;
Codec& operator=(const Codec& c);
Codec& operator=(Codec&& c);
bool operator==(const Codec& c) const;
bool operator!=(const Codec& c) const { return !(*this == c); }
protected:
// Creates an empty codec.
explicit Codec(Type type);
// Creates a codec with the given parameters.
Codec(Type type, int id, const std::string& name, int clockrate);
Codec(Type type,
int id,
const std::string& name,
int clockrate,
size_t channels);
explicit Codec(const webrtc::SdpAudioFormat& c);
explicit Codec(const webrtc::SdpVideoFormat& c);
friend Codec CreateAudioCodec(int id,
const std::string& name,
int clockrate,
size_t channels);
friend Codec CreateAudioCodec(const webrtc::SdpAudioFormat& c);
friend Codec CreateAudioRtxCodec(int rtx_payload_type,
int associated_payload_type);
friend Codec CreateVideoCodec(int id, const std::string& name);
friend Codec CreateVideoCodec(const webrtc::SdpVideoFormat& c);
friend Codec CreateVideoRtxCodec(int rtx_payload_type,
int associated_payload_type);
};
// TODO(webrtc:15214): Compatibility names, to be migrated away and removed.
using VideoCodec = Codec;
using AudioCodec = Codec;
using VideoCodecs = std::vector<Codec>;
using AudioCodecs = std::vector<Codec>;
Codec CreateAudioCodec(int id,
const std::string& name,
int clockrate,
size_t channels);
Codec CreateAudioCodec(const webrtc::SdpAudioFormat& c);
Codec CreateAudioRtxCodec(int rtx_payload_type, int associated_payload_type);
Codec CreateVideoCodec(const std::string& name);
Codec CreateVideoCodec(int id, const std::string& name);
Codec CreateVideoCodec(const webrtc::SdpVideoFormat& c);
Codec CreateVideoRtxCodec(int rtx_payload_type, int associated_payload_type);
// Get the codec setting associated with `payload_type`. If there
// is no codec associated with that payload type it returns nullptr.
const Codec* FindCodecById(const std::vector<Codec>& codecs, int payload_type);
bool HasLntf(const Codec& codec);
bool HasNack(const Codec& codec);
bool HasRemb(const Codec& codec);
bool HasRrtr(const Codec& codec);
bool HasTransportCc(const Codec& codec);
// Returns the first codec in `supported_codecs` that matches `codec`, or
// nullptr if no codec matches.
const Codec* FindMatchingVideoCodec(const std::vector<Codec>& supported_codecs,
const Codec& codec);
// Returns all codecs in `supported_codecs` that matches `codec`.
std::vector<const Codec*> FindAllMatchingCodecs(
const std::vector<Codec>& supported_codecs,
const Codec& codec);
RTC_EXPORT void AddH264ConstrainedBaselineProfileToSupportedFormats(
std::vector<webrtc::SdpVideoFormat>* supported_formats);
} // namespace cricket
#endif // MEDIA_BASE_CODEC_H_

View file

@ -0,0 +1,88 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/base/fake_frame_source.h"
#include "api/scoped_refptr.h"
#include "api/video/i420_buffer.h"
#include "api/video/video_frame_buffer.h"
#include "rtc_base/checks.h"
#include "rtc_base/time_utils.h"
namespace cricket {
FakeFrameSource::FakeFrameSource(int width,
int height,
int interval_us,
int64_t timestamp_offset_us)
: width_(width),
height_(height),
interval_us_(interval_us),
next_timestamp_us_(timestamp_offset_us) {
RTC_CHECK_GT(width_, 0);
RTC_CHECK_GT(height_, 0);
RTC_CHECK_GT(interval_us_, 0);
RTC_CHECK_GE(next_timestamp_us_, 0);
}
FakeFrameSource::FakeFrameSource(int width, int height, int interval_us)
: FakeFrameSource(width, height, interval_us, rtc::TimeMicros()) {}
webrtc::VideoRotation FakeFrameSource::GetRotation() const {
return rotation_;
}
void FakeFrameSource::SetRotation(webrtc::VideoRotation rotation) {
rotation_ = rotation;
}
webrtc::VideoFrame FakeFrameSource::GetFrameRotationApplied() {
switch (rotation_) {
case webrtc::kVideoRotation_0:
case webrtc::kVideoRotation_180:
return GetFrame(width_, height_, webrtc::kVideoRotation_0, interval_us_);
case webrtc::kVideoRotation_90:
case webrtc::kVideoRotation_270:
return GetFrame(height_, width_, webrtc::kVideoRotation_0, interval_us_);
}
RTC_DCHECK_NOTREACHED() << "Invalid rotation value: "
<< static_cast<int>(rotation_);
// Without this return, the Windows Visual Studio compiler complains
// "not all control paths return a value".
return GetFrame();
}
webrtc::VideoFrame FakeFrameSource::GetFrame() {
return GetFrame(width_, height_, rotation_, interval_us_);
}
webrtc::VideoFrame FakeFrameSource::GetFrame(int width,
int height,
webrtc::VideoRotation rotation,
int interval_us) {
RTC_CHECK_GT(width, 0);
RTC_CHECK_GT(height, 0);
RTC_CHECK_GT(interval_us, 0);
rtc::scoped_refptr<webrtc::I420Buffer> buffer(
webrtc::I420Buffer::Create(width, height));
buffer->InitializeData();
webrtc::VideoFrame frame = webrtc::VideoFrame::Builder()
.set_video_frame_buffer(buffer)
.set_rotation(rotation)
.set_timestamp_us(next_timestamp_us_)
.build();
next_timestamp_us_ += interval_us;
return frame;
}
} // namespace cricket

View file

@ -0,0 +1,50 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_BASE_FAKE_FRAME_SOURCE_H_
#define MEDIA_BASE_FAKE_FRAME_SOURCE_H_
#include "api/video/video_frame.h"
#include "rtc_base/time_utils.h"
namespace cricket {
class FakeFrameSource {
public:
FakeFrameSource(int width,
int height,
int interval_us,
int64_t timestamp_offset_us);
FakeFrameSource(int width, int height, int interval_us);
webrtc::VideoRotation GetRotation() const;
void SetRotation(webrtc::VideoRotation rotation);
webrtc::VideoFrame GetFrame();
webrtc::VideoFrame GetFrameRotationApplied();
// Override configuration.
webrtc::VideoFrame GetFrame(int width,
int height,
webrtc::VideoRotation rotation,
int interval_us);
private:
const int width_;
const int height_;
const int interval_us_;
webrtc::VideoRotation rotation_ = webrtc::kVideoRotation_0;
int64_t next_timestamp_us_;
};
} // namespace cricket
#endif // MEDIA_BASE_FAKE_FRAME_SOURCE_H_

View file

@ -0,0 +1,705 @@
/*
* Copyright 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/base/fake_media_engine.h"
#include <memory>
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/strings/match.h"
#include "absl/types/optional.h"
#include "media/base/media_channel.h"
#include "rtc_base/checks.h"
namespace cricket {
using webrtc::TaskQueueBase;
FakeVoiceMediaReceiveChannel::DtmfInfo::DtmfInfo(uint32_t ssrc,
int event_code,
int duration)
: ssrc(ssrc), event_code(event_code), duration(duration) {}
FakeVoiceMediaReceiveChannel::VoiceChannelAudioSink::VoiceChannelAudioSink(
AudioSource* source)
: source_(source) {
source_->SetSink(this);
}
FakeVoiceMediaReceiveChannel::VoiceChannelAudioSink::~VoiceChannelAudioSink() {
if (source_) {
source_->SetSink(nullptr);
}
}
void FakeVoiceMediaReceiveChannel::VoiceChannelAudioSink::OnData(
const void* audio_data,
int bits_per_sample,
int sample_rate,
size_t number_of_channels,
size_t number_of_frames,
absl::optional<int64_t> absolute_capture_timestamp_ms) {}
void FakeVoiceMediaReceiveChannel::VoiceChannelAudioSink::OnClose() {
source_ = nullptr;
}
AudioSource* FakeVoiceMediaReceiveChannel::VoiceChannelAudioSink::source()
const {
return source_;
}
FakeVoiceMediaReceiveChannel::FakeVoiceMediaReceiveChannel(
const AudioOptions& options,
TaskQueueBase* network_thread)
: RtpReceiveChannelHelper<VoiceMediaReceiveChannelInterface>(
network_thread),
max_bps_(-1) {
output_scalings_[0] = 1.0; // For default channel.
SetOptions(options);
}
FakeVoiceMediaReceiveChannel::~FakeVoiceMediaReceiveChannel() = default;
const std::vector<AudioCodec>& FakeVoiceMediaReceiveChannel::recv_codecs()
const {
return recv_codecs_;
}
const std::vector<FakeVoiceMediaReceiveChannel::DtmfInfo>&
FakeVoiceMediaReceiveChannel::dtmf_info_queue() const {
return dtmf_info_queue_;
}
const AudioOptions& FakeVoiceMediaReceiveChannel::options() const {
return options_;
}
int FakeVoiceMediaReceiveChannel::max_bps() const {
return max_bps_;
}
bool FakeVoiceMediaReceiveChannel::SetReceiverParameters(
const AudioReceiverParameters& params) {
set_recv_rtcp_parameters(params.rtcp);
return (SetRecvCodecs(params.codecs) &&
SetRecvRtpHeaderExtensions(params.extensions));
}
void FakeVoiceMediaReceiveChannel::SetPlayout(bool playout) {
set_playout(playout);
}
bool FakeVoiceMediaReceiveChannel::HasSource(uint32_t ssrc) const {
return local_sinks_.find(ssrc) != local_sinks_.end();
}
bool FakeVoiceMediaReceiveChannel::AddRecvStream(const StreamParams& sp) {
if (!RtpReceiveChannelHelper<
VoiceMediaReceiveChannelInterface>::AddRecvStream(sp))
return false;
output_scalings_[sp.first_ssrc()] = 1.0;
output_delays_[sp.first_ssrc()] = 0;
return true;
}
bool FakeVoiceMediaReceiveChannel::RemoveRecvStream(uint32_t ssrc) {
if (!RtpReceiveChannelHelper<
VoiceMediaReceiveChannelInterface>::RemoveRecvStream(ssrc))
return false;
output_scalings_.erase(ssrc);
output_delays_.erase(ssrc);
return true;
}
bool FakeVoiceMediaReceiveChannel::SetOutputVolume(uint32_t ssrc,
double volume) {
if (output_scalings_.find(ssrc) != output_scalings_.end()) {
output_scalings_[ssrc] = volume;
return true;
}
return false;
}
bool FakeVoiceMediaReceiveChannel::SetDefaultOutputVolume(double volume) {
for (auto& entry : output_scalings_) {
entry.second = volume;
}
return true;
}
bool FakeVoiceMediaReceiveChannel::GetOutputVolume(uint32_t ssrc,
double* volume) {
if (output_scalings_.find(ssrc) == output_scalings_.end())
return false;
*volume = output_scalings_[ssrc];
return true;
}
bool FakeVoiceMediaReceiveChannel::SetBaseMinimumPlayoutDelayMs(uint32_t ssrc,
int delay_ms) {
if (output_delays_.find(ssrc) == output_delays_.end()) {
return false;
} else {
output_delays_[ssrc] = delay_ms;
return true;
}
}
absl::optional<int> FakeVoiceMediaReceiveChannel::GetBaseMinimumPlayoutDelayMs(
uint32_t ssrc) const {
const auto it = output_delays_.find(ssrc);
if (it != output_delays_.end()) {
return it->second;
}
return absl::nullopt;
}
bool FakeVoiceMediaReceiveChannel::GetStats(VoiceMediaReceiveInfo* info,
bool get_and_clear_legacy_stats) {
return false;
}
void FakeVoiceMediaReceiveChannel::SetRawAudioSink(
uint32_t ssrc,
std::unique_ptr<webrtc::AudioSinkInterface> sink) {
sink_ = std::move(sink);
}
void FakeVoiceMediaReceiveChannel::SetDefaultRawAudioSink(
std::unique_ptr<webrtc::AudioSinkInterface> sink) {
sink_ = std::move(sink);
}
std::vector<webrtc::RtpSource> FakeVoiceMediaReceiveChannel::GetSources(
uint32_t ssrc) const {
return std::vector<webrtc::RtpSource>();
}
bool FakeVoiceMediaReceiveChannel::SetRecvCodecs(
const std::vector<AudioCodec>& codecs) {
if (fail_set_recv_codecs()) {
// Fake the failure in SetRecvCodecs.
return false;
}
recv_codecs_ = codecs;
return true;
}
bool FakeVoiceMediaReceiveChannel::SetMaxSendBandwidth(int bps) {
max_bps_ = bps;
return true;
}
bool FakeVoiceMediaReceiveChannel::SetOptions(const AudioOptions& options) {
// Does a "merge" of current options and set options.
options_.SetAll(options);
return true;
}
FakeVoiceMediaSendChannel::DtmfInfo::DtmfInfo(uint32_t ssrc,
int event_code,
int duration)
: ssrc(ssrc), event_code(event_code), duration(duration) {}
FakeVoiceMediaSendChannel::VoiceChannelAudioSink::VoiceChannelAudioSink(
AudioSource* source)
: source_(source) {
source_->SetSink(this);
}
FakeVoiceMediaSendChannel::VoiceChannelAudioSink::~VoiceChannelAudioSink() {
if (source_) {
source_->SetSink(nullptr);
}
}
void FakeVoiceMediaSendChannel::VoiceChannelAudioSink::OnData(
const void* audio_data,
int bits_per_sample,
int sample_rate,
size_t number_of_channels,
size_t number_of_frames,
absl::optional<int64_t> absolute_capture_timestamp_ms) {}
void FakeVoiceMediaSendChannel::VoiceChannelAudioSink::OnClose() {
source_ = nullptr;
}
AudioSource* FakeVoiceMediaSendChannel::VoiceChannelAudioSink::source() const {
return source_;
}
FakeVoiceMediaSendChannel::FakeVoiceMediaSendChannel(
const AudioOptions& options,
TaskQueueBase* network_thread)
: RtpSendChannelHelper<VoiceMediaSendChannelInterface>(network_thread),
max_bps_(-1) {
output_scalings_[0] = 1.0; // For default channel.
SetOptions(options);
}
FakeVoiceMediaSendChannel::~FakeVoiceMediaSendChannel() = default;
const std::vector<AudioCodec>& FakeVoiceMediaSendChannel::send_codecs() const {
return send_codecs_;
}
absl::optional<Codec> FakeVoiceMediaSendChannel::GetSendCodec() const {
if (!send_codecs_.empty()) {
return send_codecs_.front();
}
return absl::nullopt;
}
const std::vector<FakeVoiceMediaSendChannel::DtmfInfo>&
FakeVoiceMediaSendChannel::dtmf_info_queue() const {
return dtmf_info_queue_;
}
const AudioOptions& FakeVoiceMediaSendChannel::options() const {
return options_;
}
int FakeVoiceMediaSendChannel::max_bps() const {
return max_bps_;
}
bool FakeVoiceMediaSendChannel::SetSenderParameters(
const AudioSenderParameter& params) {
set_send_rtcp_parameters(params.rtcp);
SetExtmapAllowMixed(params.extmap_allow_mixed);
return (SetSendCodecs(params.codecs) &&
SetSendRtpHeaderExtensions(params.extensions) &&
SetMaxSendBandwidth(params.max_bandwidth_bps) &&
SetOptions(params.options));
}
void FakeVoiceMediaSendChannel::SetSend(bool send) {
set_sending(send);
}
bool FakeVoiceMediaSendChannel::SetAudioSend(uint32_t ssrc,
bool enable,
const AudioOptions* options,
AudioSource* source) {
if (!SetLocalSource(ssrc, source)) {
return false;
}
if (!RtpSendChannelHelper<VoiceMediaSendChannelInterface>::MuteStream(
ssrc, !enable)) {
return false;
}
if (enable && options) {
return SetOptions(*options);
}
return true;
}
bool FakeVoiceMediaSendChannel::HasSource(uint32_t ssrc) const {
return local_sinks_.find(ssrc) != local_sinks_.end();
}
bool FakeVoiceMediaSendChannel::CanInsertDtmf() {
for (std::vector<AudioCodec>::const_iterator it = send_codecs_.begin();
it != send_codecs_.end(); ++it) {
// Find the DTMF telephone event "codec".
if (absl::EqualsIgnoreCase(it->name, "telephone-event")) {
return true;
}
}
return false;
}
bool FakeVoiceMediaSendChannel::InsertDtmf(uint32_t ssrc,
int event_code,
int duration) {
dtmf_info_queue_.push_back(DtmfInfo(ssrc, event_code, duration));
return true;
}
bool FakeVoiceMediaSendChannel::GetOutputVolume(uint32_t ssrc, double* volume) {
if (output_scalings_.find(ssrc) == output_scalings_.end())
return false;
*volume = output_scalings_[ssrc];
return true;
}
bool FakeVoiceMediaSendChannel::GetStats(VoiceMediaSendInfo* info) {
return false;
}
bool FakeVoiceMediaSendChannel::SetSendCodecs(
const std::vector<AudioCodec>& codecs) {
if (fail_set_send_codecs()) {
// Fake the failure in SetSendCodecs.
return false;
}
send_codecs_ = codecs;
return true;
}
bool FakeVoiceMediaSendChannel::SetMaxSendBandwidth(int bps) {
max_bps_ = bps;
return true;
}
bool FakeVoiceMediaSendChannel::SetOptions(const AudioOptions& options) {
// Does a "merge" of current options and set options.
options_.SetAll(options);
return true;
}
bool FakeVoiceMediaSendChannel::SetLocalSource(uint32_t ssrc,
AudioSource* source) {
auto it = local_sinks_.find(ssrc);
if (source) {
if (it != local_sinks_.end()) {
RTC_CHECK(it->second->source() == source);
} else {
local_sinks_.insert(std::make_pair(
ssrc, std::make_unique<VoiceChannelAudioSink>(source)));
}
} else {
if (it != local_sinks_.end()) {
local_sinks_.erase(it);
}
}
return true;
}
bool CompareDtmfInfo(const FakeVoiceMediaSendChannel::DtmfInfo& info,
uint32_t ssrc,
int event_code,
int duration) {
return (info.duration == duration && info.event_code == event_code &&
info.ssrc == ssrc);
}
FakeVideoMediaSendChannel::FakeVideoMediaSendChannel(
const VideoOptions& options,
TaskQueueBase* network_thread)
: RtpSendChannelHelper<VideoMediaSendChannelInterface>(network_thread),
max_bps_(-1) {
SetOptions(options);
}
FakeVideoMediaSendChannel::~FakeVideoMediaSendChannel() = default;
const std::vector<VideoCodec>& FakeVideoMediaSendChannel::send_codecs() const {
return send_codecs_;
}
const std::vector<VideoCodec>& FakeVideoMediaSendChannel::codecs() const {
return send_codecs();
}
const VideoOptions& FakeVideoMediaSendChannel::options() const {
return options_;
}
int FakeVideoMediaSendChannel::max_bps() const {
return max_bps_;
}
bool FakeVideoMediaSendChannel::SetSenderParameters(
const VideoSenderParameters& params) {
set_send_rtcp_parameters(params.rtcp);
SetExtmapAllowMixed(params.extmap_allow_mixed);
return (SetSendCodecs(params.codecs) &&
SetSendRtpHeaderExtensions(params.extensions) &&
SetMaxSendBandwidth(params.max_bandwidth_bps));
}
absl::optional<Codec> FakeVideoMediaSendChannel::GetSendCodec() const {
if (send_codecs_.empty()) {
return absl::nullopt;
}
return send_codecs_[0];
}
bool FakeVideoMediaSendChannel::SetSend(bool send) {
return set_sending(send);
}
bool FakeVideoMediaSendChannel::SetVideoSend(
uint32_t ssrc,
const VideoOptions* options,
rtc::VideoSourceInterface<webrtc::VideoFrame>* source) {
if (options) {
if (!SetOptions(*options)) {
return false;
}
}
sources_[ssrc] = source;
return true;
}
bool FakeVideoMediaSendChannel::HasSource(uint32_t ssrc) const {
return sources_.find(ssrc) != sources_.end() && sources_.at(ssrc) != nullptr;
}
void FakeVideoMediaSendChannel::FillBitrateInfo(
BandwidthEstimationInfo* bwe_info) {}
bool FakeVideoMediaSendChannel::GetStats(VideoMediaSendInfo* info) {
return false;
}
bool FakeVideoMediaSendChannel::SetSendCodecs(
const std::vector<VideoCodec>& codecs) {
if (fail_set_send_codecs()) {
// Fake the failure in SetSendCodecs.
return false;
}
send_codecs_ = codecs;
return true;
}
bool FakeVideoMediaSendChannel::SetOptions(const VideoOptions& options) {
options_ = options;
return true;
}
bool FakeVideoMediaSendChannel::SetMaxSendBandwidth(int bps) {
max_bps_ = bps;
return true;
}
void FakeVideoMediaSendChannel::GenerateSendKeyFrame(
uint32_t ssrc,
const std::vector<std::string>& rids) {}
FakeVideoMediaReceiveChannel::FakeVideoMediaReceiveChannel(
const VideoOptions& options,
TaskQueueBase* network_thread)
: RtpReceiveChannelHelper<VideoMediaReceiveChannelInterface>(
network_thread),
max_bps_(-1) {
SetOptions(options);
}
FakeVideoMediaReceiveChannel::~FakeVideoMediaReceiveChannel() = default;
const std::vector<VideoCodec>& FakeVideoMediaReceiveChannel::recv_codecs()
const {
return recv_codecs_;
}
bool FakeVideoMediaReceiveChannel::rendering() const {
return playout();
}
const VideoOptions& FakeVideoMediaReceiveChannel::options() const {
return options_;
}
const std::map<uint32_t, rtc::VideoSinkInterface<webrtc::VideoFrame>*>&
FakeVideoMediaReceiveChannel::sinks() const {
return sinks_;
}
int FakeVideoMediaReceiveChannel::max_bps() const {
return max_bps_;
}
bool FakeVideoMediaReceiveChannel::SetReceiverParameters(
const VideoReceiverParameters& params) {
set_recv_rtcp_parameters(params.rtcp);
return (SetRecvCodecs(params.codecs) &&
SetRecvRtpHeaderExtensions(params.extensions));
}
bool FakeVideoMediaReceiveChannel::SetSink(
uint32_t ssrc,
rtc::VideoSinkInterface<webrtc::VideoFrame>* sink) {
auto it = sinks_.find(ssrc);
if (it == sinks_.end()) {
return false;
}
it->second = sink;
return true;
}
void FakeVideoMediaReceiveChannel::SetDefaultSink(
rtc::VideoSinkInterface<webrtc::VideoFrame>* sink) {}
bool FakeVideoMediaReceiveChannel::HasSink(uint32_t ssrc) const {
return sinks_.find(ssrc) != sinks_.end() && sinks_.at(ssrc) != nullptr;
}
bool FakeVideoMediaReceiveChannel::HasSource(uint32_t ssrc) const {
return sources_.find(ssrc) != sources_.end() && sources_.at(ssrc) != nullptr;
}
bool FakeVideoMediaReceiveChannel::AddRecvStream(const StreamParams& sp) {
if (!RtpReceiveChannelHelper<
VideoMediaReceiveChannelInterface>::AddRecvStream(sp))
return false;
sinks_[sp.first_ssrc()] = NULL;
output_delays_[sp.first_ssrc()] = 0;
return true;
}
bool FakeVideoMediaReceiveChannel::RemoveRecvStream(uint32_t ssrc) {
if (!RtpReceiveChannelHelper<
VideoMediaReceiveChannelInterface>::RemoveRecvStream(ssrc))
return false;
sinks_.erase(ssrc);
output_delays_.erase(ssrc);
return true;
}
std::vector<webrtc::RtpSource> FakeVideoMediaReceiveChannel::GetSources(
uint32_t ssrc) const {
return {};
}
bool FakeVideoMediaReceiveChannel::SetBaseMinimumPlayoutDelayMs(uint32_t ssrc,
int delay_ms) {
if (output_delays_.find(ssrc) == output_delays_.end()) {
return false;
} else {
output_delays_[ssrc] = delay_ms;
return true;
}
}
absl::optional<int> FakeVideoMediaReceiveChannel::GetBaseMinimumPlayoutDelayMs(
uint32_t ssrc) const {
const auto it = output_delays_.find(ssrc);
if (it != output_delays_.end()) {
return it->second;
}
return absl::nullopt;
}
bool FakeVideoMediaReceiveChannel::SetRecvCodecs(
const std::vector<VideoCodec>& codecs) {
if (fail_set_recv_codecs()) {
// Fake the failure in SetRecvCodecs.
return false;
}
recv_codecs_ = codecs;
return true;
}
bool FakeVideoMediaReceiveChannel::SetOptions(const VideoOptions& options) {
options_ = options;
return true;
}
bool FakeVideoMediaReceiveChannel::SetMaxSendBandwidth(int bps) {
max_bps_ = bps;
return true;
}
void FakeVideoMediaReceiveChannel::SetRecordableEncodedFrameCallback(
uint32_t ssrc,
std::function<void(const webrtc::RecordableEncodedFrame&)> callback) {}
void FakeVideoMediaReceiveChannel::ClearRecordableEncodedFrameCallback(
uint32_t ssrc) {}
void FakeVideoMediaReceiveChannel::RequestRecvKeyFrame(uint32_t ssrc) {}
bool FakeVideoMediaReceiveChannel::GetStats(VideoMediaReceiveInfo* info) {
return false;
}
FakeVoiceEngine::FakeVoiceEngine() : fail_create_channel_(false) {
// Add a fake audio codec. Note that the name must not be "" as there are
// sanity checks against that.
SetCodecs({cricket::CreateAudioCodec(101, "fake_audio_codec", 8000, 1)});
}
void FakeVoiceEngine::Init() {}
rtc::scoped_refptr<webrtc::AudioState> FakeVoiceEngine::GetAudioState() const {
return rtc::scoped_refptr<webrtc::AudioState>();
}
std::unique_ptr<VoiceMediaSendChannelInterface>
FakeVoiceEngine::CreateSendChannel(webrtc::Call* call,
const MediaConfig& config,
const AudioOptions& options,
const webrtc::CryptoOptions& crypto_options,
webrtc::AudioCodecPairId codec_pair_id) {
std::unique_ptr<FakeVoiceMediaSendChannel> ch =
std::make_unique<FakeVoiceMediaSendChannel>(options,
call->network_thread());
return ch;
}
std::unique_ptr<VoiceMediaReceiveChannelInterface>
FakeVoiceEngine::CreateReceiveChannel(
webrtc::Call* call,
const MediaConfig& config,
const AudioOptions& options,
const webrtc::CryptoOptions& crypto_options,
webrtc::AudioCodecPairId codec_pair_id) {
std::unique_ptr<FakeVoiceMediaReceiveChannel> ch =
std::make_unique<FakeVoiceMediaReceiveChannel>(options,
call->network_thread());
return ch;
}
const std::vector<AudioCodec>& FakeVoiceEngine::send_codecs() const {
return send_codecs_;
}
const std::vector<AudioCodec>& FakeVoiceEngine::recv_codecs() const {
return recv_codecs_;
}
void FakeVoiceEngine::SetCodecs(const std::vector<AudioCodec>& codecs) {
send_codecs_ = codecs;
recv_codecs_ = codecs;
}
void FakeVoiceEngine::SetRecvCodecs(const std::vector<AudioCodec>& codecs) {
recv_codecs_ = codecs;
}
void FakeVoiceEngine::SetSendCodecs(const std::vector<AudioCodec>& codecs) {
send_codecs_ = codecs;
}
int FakeVoiceEngine::GetInputLevel() {
return 0;
}
bool FakeVoiceEngine::StartAecDump(webrtc::FileWrapper file,
int64_t max_size_bytes) {
return false;
}
absl::optional<webrtc::AudioDeviceModule::Stats>
FakeVoiceEngine::GetAudioDeviceStats() {
return absl::nullopt;
}
void FakeVoiceEngine::StopAecDump() {}
std::vector<webrtc::RtpHeaderExtensionCapability>
FakeVoiceEngine::GetRtpHeaderExtensions() const {
return header_extensions_;
}
void FakeVoiceEngine::SetRtpHeaderExtensions(
std::vector<webrtc::RtpHeaderExtensionCapability> header_extensions) {
header_extensions_ = std::move(header_extensions);
}
FakeVideoEngine::FakeVideoEngine()
: capture_(false), fail_create_channel_(false) {
// Add a fake video codec. Note that the name must not be "" as there are
// sanity checks against that.
send_codecs_.push_back(cricket::CreateVideoCodec(111, "fake_video_codec"));
recv_codecs_.push_back(cricket::CreateVideoCodec(111, "fake_video_codec"));
}
bool FakeVideoEngine::SetOptions(const VideoOptions& options) {
options_ = options;
return true;
}
std::unique_ptr<VideoMediaSendChannelInterface>
FakeVideoEngine::CreateSendChannel(
webrtc::Call* call,
const MediaConfig& config,
const VideoOptions& options,
const webrtc::CryptoOptions& crypto_options,
webrtc::VideoBitrateAllocatorFactory* video_bitrate_allocator_factory) {
if (fail_create_channel_) {
return nullptr;
}
std::unique_ptr<FakeVideoMediaSendChannel> ch =
std::make_unique<FakeVideoMediaSendChannel>(options,
call->network_thread());
return ch;
}
std::unique_ptr<VideoMediaReceiveChannelInterface>
FakeVideoEngine::CreateReceiveChannel(
webrtc::Call* call,
const MediaConfig& config,
const VideoOptions& options,
const webrtc::CryptoOptions& crypto_options) {
if (fail_create_channel_) {
return nullptr;
}
std::unique_ptr<FakeVideoMediaReceiveChannel> ch =
std::make_unique<FakeVideoMediaReceiveChannel>(options,
call->network_thread());
return ch;
}
std::vector<VideoCodec> FakeVideoEngine::send_codecs(bool use_rtx) const {
return send_codecs_;
}
std::vector<VideoCodec> FakeVideoEngine::recv_codecs(bool use_rtx) const {
return recv_codecs_;
}
void FakeVideoEngine::SetSendCodecs(const std::vector<VideoCodec>& codecs) {
send_codecs_ = codecs;
}
void FakeVideoEngine::SetRecvCodecs(const std::vector<VideoCodec>& codecs) {
recv_codecs_ = codecs;
}
bool FakeVideoEngine::SetCapture(bool capture) {
capture_ = capture;
return true;
}
std::vector<webrtc::RtpHeaderExtensionCapability>
FakeVideoEngine::GetRtpHeaderExtensions() const {
return header_extensions_;
}
void FakeVideoEngine::SetRtpHeaderExtensions(
std::vector<webrtc::RtpHeaderExtensionCapability> header_extensions) {
header_extensions_ = std::move(header_extensions);
}
FakeMediaEngine::FakeMediaEngine()
: CompositeMediaEngine(std::make_unique<FakeVoiceEngine>(),
std::make_unique<FakeVideoEngine>()),
voice_(static_cast<FakeVoiceEngine*>(&voice())),
video_(static_cast<FakeVideoEngine*>(&video())) {}
FakeMediaEngine::~FakeMediaEngine() {}
void FakeMediaEngine::SetAudioCodecs(const std::vector<AudioCodec>& codecs) {
voice_->SetCodecs(codecs);
}
void FakeMediaEngine::SetAudioRecvCodecs(
const std::vector<AudioCodec>& codecs) {
voice_->SetRecvCodecs(codecs);
}
void FakeMediaEngine::SetAudioSendCodecs(
const std::vector<AudioCodec>& codecs) {
voice_->SetSendCodecs(codecs);
}
void FakeMediaEngine::SetVideoCodecs(const std::vector<VideoCodec>& codecs) {
video_->SetSendCodecs(codecs);
video_->SetRecvCodecs(codecs);
}
void FakeMediaEngine::set_fail_create_channel(bool fail) {
voice_->fail_create_channel_ = fail;
video_->fail_create_channel_ = fail;
}
} // namespace cricket

View file

@ -0,0 +1,876 @@
/*
* Copyright (c) 2004 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_BASE_FAKE_MEDIA_ENGINE_H_
#define MEDIA_BASE_FAKE_MEDIA_ENGINE_H_
#include <atomic>
#include <list>
#include <map>
#include <memory>
#include <set>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/functional/any_invocable.h"
#include "api/call/audio_sink.h"
#include "api/media_types.h"
#include "media/base/audio_source.h"
#include "media/base/media_channel.h"
#include "media/base/media_channel_impl.h"
#include "media/base/media_engine.h"
#include "media/base/rtp_utils.h"
#include "media/base/stream_params.h"
#include "media/engine/webrtc_video_engine.h"
#include "modules/audio_processing/include/audio_processing.h"
#include "modules/rtp_rtcp/source/rtp_packet_received.h"
#include "rtc_base/copy_on_write_buffer.h"
#include "rtc_base/network_route.h"
#include "rtc_base/thread.h"
using webrtc::RtpExtension;
namespace cricket {
class FakeMediaEngine;
class FakeVideoEngine;
class FakeVoiceEngine;
// A common helper class that handles sending and receiving RTP/RTCP packets.
template <class Base>
class RtpReceiveChannelHelper : public Base, public MediaChannelUtil {
public:
explicit RtpReceiveChannelHelper(webrtc::TaskQueueBase* network_thread)
: MediaChannelUtil(network_thread),
playout_(false),
fail_set_recv_codecs_(false),
transport_overhead_per_packet_(0),
num_network_route_changes_(0) {}
virtual ~RtpReceiveChannelHelper() = default;
const std::vector<RtpExtension>& recv_extensions() {
return recv_extensions_;
}
bool playout() const { return playout_; }
const std::list<std::string>& rtp_packets() const { return rtp_packets_; }
const std::list<std::string>& rtcp_packets() const { return rtcp_packets_; }
bool SendRtcp(const void* data, size_t len) {
rtc::CopyOnWriteBuffer packet(reinterpret_cast<const uint8_t*>(data), len,
kMaxRtpPacketLen);
return Base::SendRtcp(&packet, rtc::PacketOptions());
}
bool CheckRtp(const void* data, size_t len) {
bool success = !rtp_packets_.empty();
if (success) {
std::string packet = rtp_packets_.front();
rtp_packets_.pop_front();
success = (packet == std::string(static_cast<const char*>(data), len));
}
return success;
}
bool CheckRtcp(const void* data, size_t len) {
bool success = !rtcp_packets_.empty();
if (success) {
std::string packet = rtcp_packets_.front();
rtcp_packets_.pop_front();
success = (packet == std::string(static_cast<const char*>(data), len));
}
return success;
}
bool CheckNoRtp() { return rtp_packets_.empty(); }
bool CheckNoRtcp() { return rtcp_packets_.empty(); }
void set_fail_set_recv_codecs(bool fail) { fail_set_recv_codecs_ = fail; }
void ResetUnsignaledRecvStream() override {}
absl::optional<uint32_t> GetUnsignaledSsrc() const override {
return absl::nullopt;
}
void ChooseReceiverReportSsrc(const std::set<uint32_t>& choices) override {}
virtual bool SetLocalSsrc(const StreamParams& sp) { return true; }
void OnDemuxerCriteriaUpdatePending() override {}
void OnDemuxerCriteriaUpdateComplete() override {}
bool AddRecvStream(const StreamParams& sp) override {
if (absl::c_linear_search(receive_streams_, sp)) {
return false;
}
receive_streams_.push_back(sp);
rtp_receive_parameters_[sp.first_ssrc()] =
CreateRtpParametersWithEncodings(sp);
return true;
}
bool RemoveRecvStream(uint32_t ssrc) override {
auto parameters_iterator = rtp_receive_parameters_.find(ssrc);
if (parameters_iterator != rtp_receive_parameters_.end()) {
rtp_receive_parameters_.erase(parameters_iterator);
}
return RemoveStreamBySsrc(&receive_streams_, ssrc);
}
webrtc::RtpParameters GetRtpReceiverParameters(uint32_t ssrc) const override {
auto parameters_iterator = rtp_receive_parameters_.find(ssrc);
if (parameters_iterator != rtp_receive_parameters_.end()) {
return parameters_iterator->second;
}
return webrtc::RtpParameters();
}
webrtc::RtpParameters GetDefaultRtpReceiveParameters() const override {
return webrtc::RtpParameters();
}
const std::vector<StreamParams>& recv_streams() const {
return receive_streams_;
}
bool HasRecvStream(uint32_t ssrc) const {
return GetStreamBySsrc(receive_streams_, ssrc) != nullptr;
}
const RtcpParameters& recv_rtcp_parameters() { return recv_rtcp_parameters_; }
int transport_overhead_per_packet() const {
return transport_overhead_per_packet_;
}
rtc::NetworkRoute last_network_route() const { return last_network_route_; }
int num_network_route_changes() const { return num_network_route_changes_; }
void set_num_network_route_changes(int changes) {
num_network_route_changes_ = changes;
}
void OnRtcpPacketReceived(rtc::CopyOnWriteBuffer* packet,
int64_t packet_time_us) {
rtcp_packets_.push_back(std::string(packet->cdata<char>(), packet->size()));
}
void SetFrameDecryptor(uint32_t ssrc,
rtc::scoped_refptr<webrtc::FrameDecryptorInterface>
frame_decryptor) override {}
void SetDepacketizerToDecoderFrameTransformer(
uint32_t ssrc,
rtc::scoped_refptr<webrtc::FrameTransformerInterface> frame_transformer)
override {}
void SetInterface(MediaChannelNetworkInterface* iface) override {
network_interface_ = iface;
MediaChannelUtil::SetInterface(iface);
}
protected:
void set_playout(bool playout) { playout_ = playout; }
bool SetRecvRtpHeaderExtensions(const std::vector<RtpExtension>& extensions) {
recv_extensions_ = extensions;
return true;
}
void set_recv_rtcp_parameters(const RtcpParameters& params) {
recv_rtcp_parameters_ = params;
}
void OnPacketReceived(const webrtc::RtpPacketReceived& packet) override {
rtp_packets_.push_back(
std::string(packet.Buffer().cdata<char>(), packet.size()));
}
bool fail_set_recv_codecs() const { return fail_set_recv_codecs_; }
private:
bool playout_;
std::vector<RtpExtension> recv_extensions_;
std::list<std::string> rtp_packets_;
std::list<std::string> rtcp_packets_;
std::vector<StreamParams> receive_streams_;
RtcpParameters recv_rtcp_parameters_;
std::map<uint32_t, webrtc::RtpParameters> rtp_receive_parameters_;
bool fail_set_recv_codecs_;
std::string rtcp_cname_;
int transport_overhead_per_packet_;
rtc::NetworkRoute last_network_route_;
int num_network_route_changes_;
MediaChannelNetworkInterface* network_interface_ = nullptr;
};
// A common helper class that handles sending and receiving RTP/RTCP packets.
template <class Base>
class RtpSendChannelHelper : public Base, public MediaChannelUtil {
public:
explicit RtpSendChannelHelper(webrtc::TaskQueueBase* network_thread)
: MediaChannelUtil(network_thread),
sending_(false),
fail_set_send_codecs_(false),
send_ssrc_(0),
ready_to_send_(false),
transport_overhead_per_packet_(0),
num_network_route_changes_(0) {}
virtual ~RtpSendChannelHelper() = default;
const std::vector<RtpExtension>& send_extensions() {
return send_extensions_;
}
bool sending() const { return sending_; }
const std::list<std::string>& rtp_packets() const { return rtp_packets_; }
const std::list<std::string>& rtcp_packets() const { return rtcp_packets_; }
bool SendPacket(const void* data,
size_t len,
const rtc::PacketOptions& options) {
if (!sending_) {
return false;
}
rtc::CopyOnWriteBuffer packet(reinterpret_cast<const uint8_t*>(data), len,
kMaxRtpPacketLen);
return MediaChannelUtil::SendPacket(&packet, options);
}
bool SendRtcp(const void* data, size_t len) {
rtc::CopyOnWriteBuffer packet(reinterpret_cast<const uint8_t*>(data), len,
kMaxRtpPacketLen);
return MediaChannelUtil::SendRtcp(&packet, rtc::PacketOptions());
}
bool CheckRtp(const void* data, size_t len) {
bool success = !rtp_packets_.empty();
if (success) {
std::string packet = rtp_packets_.front();
rtp_packets_.pop_front();
success = (packet == std::string(static_cast<const char*>(data), len));
}
return success;
}
bool CheckRtcp(const void* data, size_t len) {
bool success = !rtcp_packets_.empty();
if (success) {
std::string packet = rtcp_packets_.front();
rtcp_packets_.pop_front();
success = (packet == std::string(static_cast<const char*>(data), len));
}
return success;
}
bool CheckNoRtp() { return rtp_packets_.empty(); }
bool CheckNoRtcp() { return rtcp_packets_.empty(); }
void set_fail_set_send_codecs(bool fail) { fail_set_send_codecs_ = fail; }
bool AddSendStream(const StreamParams& sp) override {
if (absl::c_linear_search(send_streams_, sp)) {
return false;
}
send_streams_.push_back(sp);
rtp_send_parameters_[sp.first_ssrc()] =
CreateRtpParametersWithEncodings(sp);
if (ssrc_list_changed_callback_) {
std::set<uint32_t> ssrcs_in_use;
for (const auto& send_stream : send_streams_) {
ssrcs_in_use.insert(send_stream.first_ssrc());
}
ssrc_list_changed_callback_(ssrcs_in_use);
}
return true;
}
bool RemoveSendStream(uint32_t ssrc) override {
auto parameters_iterator = rtp_send_parameters_.find(ssrc);
if (parameters_iterator != rtp_send_parameters_.end()) {
rtp_send_parameters_.erase(parameters_iterator);
}
return RemoveStreamBySsrc(&send_streams_, ssrc);
}
void SetSsrcListChangedCallback(
absl::AnyInvocable<void(const std::set<uint32_t>&)> callback) override {
ssrc_list_changed_callback_ = std::move(callback);
}
void SetExtmapAllowMixed(bool extmap_allow_mixed) override {
return MediaChannelUtil::SetExtmapAllowMixed(extmap_allow_mixed);
}
bool ExtmapAllowMixed() const override {
return MediaChannelUtil::ExtmapAllowMixed();
}
webrtc::RtpParameters GetRtpSendParameters(uint32_t ssrc) const override {
auto parameters_iterator = rtp_send_parameters_.find(ssrc);
if (parameters_iterator != rtp_send_parameters_.end()) {
return parameters_iterator->second;
}
return webrtc::RtpParameters();
}
webrtc::RTCError SetRtpSendParameters(
uint32_t ssrc,
const webrtc::RtpParameters& parameters,
webrtc::SetParametersCallback callback) override {
auto parameters_iterator = rtp_send_parameters_.find(ssrc);
if (parameters_iterator != rtp_send_parameters_.end()) {
auto result = CheckRtpParametersInvalidModificationAndValues(
parameters_iterator->second, parameters);
if (!result.ok()) {
return webrtc::InvokeSetParametersCallback(callback, result);
}
parameters_iterator->second = parameters;
return webrtc::InvokeSetParametersCallback(callback,
webrtc::RTCError::OK());
}
// Replicate the behavior of the real media channel: return false
// when setting parameters for unknown SSRCs.
return InvokeSetParametersCallback(
callback, webrtc::RTCError(webrtc::RTCErrorType::INTERNAL_ERROR));
}
bool IsStreamMuted(uint32_t ssrc) const {
bool ret = muted_streams_.find(ssrc) != muted_streams_.end();
// If |ssrc = 0| check if the first send stream is muted.
if (!ret && ssrc == 0 && !send_streams_.empty()) {
return muted_streams_.find(send_streams_[0].first_ssrc()) !=
muted_streams_.end();
}
return ret;
}
const std::vector<StreamParams>& send_streams() const {
return send_streams_;
}
bool HasSendStream(uint32_t ssrc) const {
return GetStreamBySsrc(send_streams_, ssrc) != nullptr;
}
// TODO(perkj): This is to support legacy unit test that only check one
// sending stream.
uint32_t send_ssrc() const {
if (send_streams_.empty())
return 0;
return send_streams_[0].first_ssrc();
}
const RtcpParameters& send_rtcp_parameters() { return send_rtcp_parameters_; }
bool ready_to_send() const { return ready_to_send_; }
int transport_overhead_per_packet() const {
return transport_overhead_per_packet_;
}
rtc::NetworkRoute last_network_route() const { return last_network_route_; }
int num_network_route_changes() const { return num_network_route_changes_; }
void set_num_network_route_changes(int changes) {
num_network_route_changes_ = changes;
}
void OnRtcpPacketReceived(rtc::CopyOnWriteBuffer* packet,
int64_t packet_time_us) {
rtcp_packets_.push_back(std::string(packet->cdata<char>(), packet->size()));
}
// Stuff that deals with encryptors, transformers and the like
void SetFrameEncryptor(uint32_t ssrc,
rtc::scoped_refptr<webrtc::FrameEncryptorInterface>
frame_encryptor) override {}
void SetEncoderToPacketizerFrameTransformer(
uint32_t ssrc,
rtc::scoped_refptr<webrtc::FrameTransformerInterface> frame_transformer)
override {}
void SetInterface(MediaChannelNetworkInterface* iface) override {
network_interface_ = iface;
MediaChannelUtil::SetInterface(iface);
}
bool HasNetworkInterface() const override {
return network_interface_ != nullptr;
}
protected:
bool MuteStream(uint32_t ssrc, bool mute) {
if (!HasSendStream(ssrc) && ssrc != 0) {
return false;
}
if (mute) {
muted_streams_.insert(ssrc);
} else {
muted_streams_.erase(ssrc);
}
return true;
}
bool set_sending(bool send) {
sending_ = send;
return true;
}
bool SetSendRtpHeaderExtensions(const std::vector<RtpExtension>& extensions) {
send_extensions_ = extensions;
return true;
}
void set_send_rtcp_parameters(const RtcpParameters& params) {
send_rtcp_parameters_ = params;
}
void OnPacketSent(const rtc::SentPacket& sent_packet) override {}
void OnReadyToSend(bool ready) override { ready_to_send_ = ready; }
void OnNetworkRouteChanged(absl::string_view transport_name,
const rtc::NetworkRoute& network_route) override {
last_network_route_ = network_route;
++num_network_route_changes_;
transport_overhead_per_packet_ = network_route.packet_overhead;
}
bool fail_set_send_codecs() const { return fail_set_send_codecs_; }
private:
// TODO(bugs.webrtc.org/12783): This flag is used from more than one thread.
// As a workaround for tsan, it's currently std::atomic but that might not
// be the appropriate fix.
std::atomic<bool> sending_;
std::vector<RtpExtension> send_extensions_;
std::list<std::string> rtp_packets_;
std::list<std::string> rtcp_packets_;
std::vector<StreamParams> send_streams_;
RtcpParameters send_rtcp_parameters_;
std::set<uint32_t> muted_streams_;
std::map<uint32_t, webrtc::RtpParameters> rtp_send_parameters_;
bool fail_set_send_codecs_;
uint32_t send_ssrc_;
std::string rtcp_cname_;
bool ready_to_send_;
int transport_overhead_per_packet_;
rtc::NetworkRoute last_network_route_;
int num_network_route_changes_;
MediaChannelNetworkInterface* network_interface_ = nullptr;
absl::AnyInvocable<void(const std::set<uint32_t>&)>
ssrc_list_changed_callback_ = nullptr;
};
class FakeVoiceMediaReceiveChannel
: public RtpReceiveChannelHelper<VoiceMediaReceiveChannelInterface> {
public:
struct DtmfInfo {
DtmfInfo(uint32_t ssrc, int event_code, int duration);
uint32_t ssrc;
int event_code;
int duration;
};
FakeVoiceMediaReceiveChannel(const AudioOptions& options,
webrtc::TaskQueueBase* network_thread);
virtual ~FakeVoiceMediaReceiveChannel();
// Test methods
const std::vector<AudioCodec>& recv_codecs() const;
const std::vector<DtmfInfo>& dtmf_info_queue() const;
const AudioOptions& options() const;
int max_bps() const;
bool HasSource(uint32_t ssrc) const;
// Overrides
VideoMediaReceiveChannelInterface* AsVideoReceiveChannel() override {
return nullptr;
}
VoiceMediaReceiveChannelInterface* AsVoiceReceiveChannel() override {
return this;
}
cricket::MediaType media_type() const override {
return cricket::MEDIA_TYPE_AUDIO;
}
bool SetReceiverParameters(const AudioReceiverParameters& params) override;
void SetPlayout(bool playout) override;
bool AddRecvStream(const StreamParams& sp) override;
bool RemoveRecvStream(uint32_t ssrc) override;
bool SetOutputVolume(uint32_t ssrc, double volume) override;
bool SetDefaultOutputVolume(double volume) override;
bool GetOutputVolume(uint32_t ssrc, double* volume);
bool SetBaseMinimumPlayoutDelayMs(uint32_t ssrc, int delay_ms) override;
absl::optional<int> GetBaseMinimumPlayoutDelayMs(
uint32_t ssrc) const override;
bool GetStats(VoiceMediaReceiveInfo* info,
bool get_and_clear_legacy_stats) override;
void SetRawAudioSink(
uint32_t ssrc,
std::unique_ptr<webrtc::AudioSinkInterface> sink) override;
void SetDefaultRawAudioSink(
std::unique_ptr<webrtc::AudioSinkInterface> sink) override;
std::vector<webrtc::RtpSource> GetSources(uint32_t ssrc) const override;
void SetReceiveNackEnabled(bool enabled) override {}
void SetReceiveNonSenderRttEnabled(bool enabled) override {}
private:
class VoiceChannelAudioSink : public AudioSource::Sink {
public:
explicit VoiceChannelAudioSink(AudioSource* source);
~VoiceChannelAudioSink() override;
void OnData(const void* audio_data,
int bits_per_sample,
int sample_rate,
size_t number_of_channels,
size_t number_of_frames,
absl::optional<int64_t> absolute_capture_timestamp_ms) override;
void OnClose() override;
int NumPreferredChannels() const override { return -1; }
AudioSource* source() const;
private:
AudioSource* source_;
};
bool SetRecvCodecs(const std::vector<AudioCodec>& codecs);
bool SetMaxSendBandwidth(int bps);
bool SetOptions(const AudioOptions& options);
std::vector<AudioCodec> recv_codecs_;
std::map<uint32_t, double> output_scalings_;
std::map<uint32_t, int> output_delays_;
std::vector<DtmfInfo> dtmf_info_queue_;
AudioOptions options_;
std::map<uint32_t, std::unique_ptr<VoiceChannelAudioSink>> local_sinks_;
std::unique_ptr<webrtc::AudioSinkInterface> sink_;
int max_bps_;
};
class FakeVoiceMediaSendChannel
: public RtpSendChannelHelper<VoiceMediaSendChannelInterface> {
public:
struct DtmfInfo {
DtmfInfo(uint32_t ssrc, int event_code, int duration);
uint32_t ssrc;
int event_code;
int duration;
};
FakeVoiceMediaSendChannel(const AudioOptions& options,
webrtc::TaskQueueBase* network_thread);
~FakeVoiceMediaSendChannel() override;
const std::vector<AudioCodec>& send_codecs() const;
const std::vector<DtmfInfo>& dtmf_info_queue() const;
const AudioOptions& options() const;
int max_bps() const;
bool HasSource(uint32_t ssrc) const;
bool GetOutputVolume(uint32_t ssrc, double* volume);
// Overrides
VideoMediaSendChannelInterface* AsVideoSendChannel() override {
return nullptr;
}
VoiceMediaSendChannelInterface* AsVoiceSendChannel() override { return this; }
cricket::MediaType media_type() const override {
return cricket::MEDIA_TYPE_AUDIO;
}
bool SetSenderParameters(const AudioSenderParameter& params) override;
void SetSend(bool send) override;
bool SetAudioSend(uint32_t ssrc,
bool enable,
const AudioOptions* options,
AudioSource* source) override;
bool CanInsertDtmf() override;
bool InsertDtmf(uint32_t ssrc, int event_code, int duration) override;
bool SenderNackEnabled() const override { return false; }
bool SenderNonSenderRttEnabled() const override { return false; }
void SetReceiveNackEnabled(bool enabled) {}
void SetReceiveNonSenderRttEnabled(bool enabled) {}
bool SendCodecHasNack() const override { return false; }
void SetSendCodecChangedCallback(
absl::AnyInvocable<void()> callback) override {}
absl::optional<Codec> GetSendCodec() const override;
bool GetStats(VoiceMediaSendInfo* stats) override;
private:
class VoiceChannelAudioSink : public AudioSource::Sink {
public:
explicit VoiceChannelAudioSink(AudioSource* source);
~VoiceChannelAudioSink() override;
void OnData(const void* audio_data,
int bits_per_sample,
int sample_rate,
size_t number_of_channels,
size_t number_of_frames,
absl::optional<int64_t> absolute_capture_timestamp_ms) override;
void OnClose() override;
int NumPreferredChannels() const override { return -1; }
AudioSource* source() const;
private:
AudioSource* source_;
};
bool SetSendCodecs(const std::vector<AudioCodec>& codecs);
bool SetMaxSendBandwidth(int bps);
bool SetOptions(const AudioOptions& options);
bool SetLocalSource(uint32_t ssrc, AudioSource* source);
std::vector<AudioCodec> send_codecs_;
std::map<uint32_t, double> output_scalings_;
std::map<uint32_t, int> output_delays_;
std::vector<DtmfInfo> dtmf_info_queue_;
AudioOptions options_;
std::map<uint32_t, std::unique_ptr<VoiceChannelAudioSink>> local_sinks_;
int max_bps_;
};
// A helper function to compare the FakeVoiceMediaChannel::DtmfInfo.
bool CompareDtmfInfo(const FakeVoiceMediaSendChannel::DtmfInfo& info,
uint32_t ssrc,
int event_code,
int duration);
class FakeVideoMediaReceiveChannel
: public RtpReceiveChannelHelper<VideoMediaReceiveChannelInterface> {
public:
FakeVideoMediaReceiveChannel(const VideoOptions& options,
webrtc::TaskQueueBase* network_thread);
virtual ~FakeVideoMediaReceiveChannel();
VideoMediaReceiveChannelInterface* AsVideoReceiveChannel() override {
return this;
}
VoiceMediaReceiveChannelInterface* AsVoiceReceiveChannel() override {
return nullptr;
}
cricket::MediaType media_type() const override {
return cricket::MEDIA_TYPE_VIDEO;
}
const std::vector<VideoCodec>& recv_codecs() const;
const std::vector<VideoCodec>& send_codecs() const;
bool rendering() const;
const VideoOptions& options() const;
const std::map<uint32_t, rtc::VideoSinkInterface<webrtc::VideoFrame>*>&
sinks() const;
int max_bps() const;
bool SetReceiverParameters(const VideoReceiverParameters& params) override;
bool SetSink(uint32_t ssrc,
rtc::VideoSinkInterface<webrtc::VideoFrame>* sink) override;
void SetDefaultSink(
rtc::VideoSinkInterface<webrtc::VideoFrame>* sink) override;
bool HasSink(uint32_t ssrc) const;
void SetReceive(bool receive) override {}
bool HasSource(uint32_t ssrc) const;
bool AddRecvStream(const StreamParams& sp) override;
bool RemoveRecvStream(uint32_t ssrc) override;
std::vector<webrtc::RtpSource> GetSources(uint32_t ssrc) const override;
bool SetBaseMinimumPlayoutDelayMs(uint32_t ssrc, int delay_ms) override;
absl::optional<int> GetBaseMinimumPlayoutDelayMs(
uint32_t ssrc) const override;
void SetRecordableEncodedFrameCallback(
uint32_t ssrc,
std::function<void(const webrtc::RecordableEncodedFrame&)> callback)
override;
void ClearRecordableEncodedFrameCallback(uint32_t ssrc) override;
void RequestRecvKeyFrame(uint32_t ssrc) override;
void SetReceiverFeedbackParameters(bool lntf_enabled,
bool nack_enabled,
webrtc::RtcpMode rtcp_mode,
absl::optional<int> rtx_time) override {}
bool GetStats(VideoMediaReceiveInfo* info) override;
bool AddDefaultRecvStreamForTesting(const StreamParams& sp) override {
RTC_CHECK_NOTREACHED();
return false;
}
private:
bool SetRecvCodecs(const std::vector<VideoCodec>& codecs);
bool SetSendCodecs(const std::vector<VideoCodec>& codecs);
bool SetOptions(const VideoOptions& options);
bool SetMaxSendBandwidth(int bps);
std::vector<VideoCodec> recv_codecs_;
std::map<uint32_t, rtc::VideoSinkInterface<webrtc::VideoFrame>*> sinks_;
std::map<uint32_t, rtc::VideoSourceInterface<webrtc::VideoFrame>*> sources_;
std::map<uint32_t, int> output_delays_;
VideoOptions options_;
int max_bps_;
};
class FakeVideoMediaSendChannel
: public RtpSendChannelHelper<VideoMediaSendChannelInterface> {
public:
FakeVideoMediaSendChannel(const VideoOptions& options,
webrtc::TaskQueueBase* network_thread);
virtual ~FakeVideoMediaSendChannel();
VideoMediaSendChannelInterface* AsVideoSendChannel() override { return this; }
VoiceMediaSendChannelInterface* AsVoiceSendChannel() override {
return nullptr;
}
cricket::MediaType media_type() const override {
return cricket::MEDIA_TYPE_VIDEO;
}
const std::vector<VideoCodec>& send_codecs() const;
const std::vector<VideoCodec>& codecs() const;
const VideoOptions& options() const;
const std::map<uint32_t, rtc::VideoSinkInterface<webrtc::VideoFrame>*>&
sinks() const;
int max_bps() const;
bool SetSenderParameters(const VideoSenderParameters& params) override;
absl::optional<Codec> GetSendCodec() const override;
bool SetSend(bool send) override;
bool SetVideoSend(
uint32_t ssrc,
const VideoOptions* options,
rtc::VideoSourceInterface<webrtc::VideoFrame>* source) override;
bool HasSource(uint32_t ssrc) const;
void FillBitrateInfo(BandwidthEstimationInfo* bwe_info) override;
void GenerateSendKeyFrame(uint32_t ssrc,
const std::vector<std::string>& rids) override;
webrtc::RtcpMode SendCodecRtcpMode() const override {
return webrtc::RtcpMode::kCompound;
}
void SetSendCodecChangedCallback(
absl::AnyInvocable<void()> callback) override {}
void SetSsrcListChangedCallback(
absl::AnyInvocable<void(const std::set<uint32_t>&)> callback) override {}
bool SendCodecHasLntf() const override { return false; }
bool SendCodecHasNack() const override { return false; }
absl::optional<int> SendCodecRtxTime() const override {
return absl::nullopt;
}
bool GetStats(VideoMediaSendInfo* info) override;
private:
bool SetSendCodecs(const std::vector<VideoCodec>& codecs);
bool SetOptions(const VideoOptions& options);
bool SetMaxSendBandwidth(int bps);
std::vector<VideoCodec> send_codecs_;
std::map<uint32_t, rtc::VideoSourceInterface<webrtc::VideoFrame>*> sources_;
VideoOptions options_;
int max_bps_;
};
class FakeVoiceEngine : public VoiceEngineInterface {
public:
FakeVoiceEngine();
void Init() override;
rtc::scoped_refptr<webrtc::AudioState> GetAudioState() const override;
std::unique_ptr<VoiceMediaSendChannelInterface> CreateSendChannel(
webrtc::Call* call,
const MediaConfig& config,
const AudioOptions& options,
const webrtc::CryptoOptions& crypto_options,
webrtc::AudioCodecPairId codec_pair_id) override;
std::unique_ptr<VoiceMediaReceiveChannelInterface> CreateReceiveChannel(
webrtc::Call* call,
const MediaConfig& config,
const AudioOptions& options,
const webrtc::CryptoOptions& crypto_options,
webrtc::AudioCodecPairId codec_pair_id) override;
// TODO(ossu): For proper testing, These should either individually settable
// or the voice engine should reference mockable factories.
const std::vector<AudioCodec>& send_codecs() const override;
const std::vector<AudioCodec>& recv_codecs() const override;
void SetCodecs(const std::vector<AudioCodec>& codecs);
void SetRecvCodecs(const std::vector<AudioCodec>& codecs);
void SetSendCodecs(const std::vector<AudioCodec>& codecs);
int GetInputLevel();
bool StartAecDump(webrtc::FileWrapper file, int64_t max_size_bytes) override;
void StopAecDump() override;
absl::optional<webrtc::AudioDeviceModule::Stats> GetAudioDeviceStats()
override;
std::vector<webrtc::RtpHeaderExtensionCapability> GetRtpHeaderExtensions()
const override;
void SetRtpHeaderExtensions(
std::vector<webrtc::RtpHeaderExtensionCapability> header_extensions);
private:
std::vector<AudioCodec> recv_codecs_;
std::vector<AudioCodec> send_codecs_;
bool fail_create_channel_;
std::vector<webrtc::RtpHeaderExtensionCapability> header_extensions_;
friend class FakeMediaEngine;
};
class FakeVideoEngine : public VideoEngineInterface {
public:
FakeVideoEngine();
bool SetOptions(const VideoOptions& options);
std::unique_ptr<VideoMediaSendChannelInterface> CreateSendChannel(
webrtc::Call* call,
const MediaConfig& config,
const VideoOptions& options,
const webrtc::CryptoOptions& crypto_options,
webrtc::VideoBitrateAllocatorFactory* video_bitrate_allocator_factory)
override;
std::unique_ptr<VideoMediaReceiveChannelInterface> CreateReceiveChannel(
webrtc::Call* call,
const MediaConfig& config,
const VideoOptions& options,
const webrtc::CryptoOptions& crypto_options) override;
FakeVideoMediaSendChannel* GetSendChannel(size_t index);
FakeVideoMediaReceiveChannel* GetReceiveChannel(size_t index);
std::vector<VideoCodec> send_codecs() const override {
return send_codecs(true);
}
std::vector<VideoCodec> recv_codecs() const override {
return recv_codecs(true);
}
std::vector<VideoCodec> send_codecs(bool include_rtx) const override;
std::vector<VideoCodec> recv_codecs(bool include_rtx) const override;
void SetSendCodecs(const std::vector<VideoCodec>& codecs);
void SetRecvCodecs(const std::vector<VideoCodec>& codecs);
bool SetCapture(bool capture);
std::vector<webrtc::RtpHeaderExtensionCapability> GetRtpHeaderExtensions()
const override;
void SetRtpHeaderExtensions(
std::vector<webrtc::RtpHeaderExtensionCapability> header_extensions);
private:
std::vector<VideoCodec> send_codecs_;
std::vector<VideoCodec> recv_codecs_;
bool capture_;
VideoOptions options_;
bool fail_create_channel_;
std::vector<webrtc::RtpHeaderExtensionCapability> header_extensions_;
friend class FakeMediaEngine;
};
class FakeMediaEngine : public CompositeMediaEngine {
public:
FakeMediaEngine();
~FakeMediaEngine() override;
void SetAudioCodecs(const std::vector<AudioCodec>& codecs);
void SetAudioRecvCodecs(const std::vector<AudioCodec>& codecs);
void SetAudioSendCodecs(const std::vector<AudioCodec>& codecs);
void SetVideoCodecs(const std::vector<VideoCodec>& codecs);
void set_fail_create_channel(bool fail);
FakeVoiceEngine* fake_voice_engine() { return voice_; }
FakeVideoEngine* fake_video_engine() { return video_; }
private:
FakeVoiceEngine* const voice_;
FakeVideoEngine* const video_;
};
} // namespace cricket
#endif // MEDIA_BASE_FAKE_MEDIA_ENGINE_H_

View file

@ -0,0 +1,232 @@
/*
* Copyright (c) 2004 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_BASE_FAKE_NETWORK_INTERFACE_H_
#define MEDIA_BASE_FAKE_NETWORK_INTERFACE_H_
#include <map>
#include <set>
#include <utility>
#include <vector>
#include "api/task_queue/pending_task_safety_flag.h"
#include "api/task_queue/task_queue_base.h"
#include "media/base/media_channel.h"
#include "media/base/rtp_utils.h"
#include "modules/rtp_rtcp/source/rtp_packet_received.h"
#include "modules/rtp_rtcp/source/rtp_util.h"
#include "rtc_base/byte_order.h"
#include "rtc_base/checks.h"
#include "rtc_base/copy_on_write_buffer.h"
#include "rtc_base/dscp.h"
#include "rtc_base/synchronization/mutex.h"
#include "rtc_base/thread.h"
#include "rtc_base/time_utils.h"
namespace cricket {
// Fake NetworkInterface that sends/receives RTP/RTCP packets.
class FakeNetworkInterface : public MediaChannelNetworkInterface {
public:
FakeNetworkInterface()
: thread_(rtc::Thread::Current()),
dest_(NULL),
conf_(false),
sendbuf_size_(-1),
recvbuf_size_(-1),
dscp_(rtc::DSCP_NO_CHANGE) {}
void SetDestination(MediaReceiveChannelInterface* dest) { dest_ = dest; }
// Conference mode is a mode where instead of simply forwarding the packets,
// the transport will send multiple copies of the packet with the specified
// SSRCs. This allows us to simulate receiving media from multiple sources.
void SetConferenceMode(bool conf, const std::vector<uint32_t>& ssrcs)
RTC_LOCKS_EXCLUDED(mutex_) {
webrtc::MutexLock lock(&mutex_);
conf_ = conf;
conf_sent_ssrcs_ = ssrcs;
}
int NumRtpBytes() RTC_LOCKS_EXCLUDED(mutex_) {
webrtc::MutexLock lock(&mutex_);
int bytes = 0;
for (size_t i = 0; i < rtp_packets_.size(); ++i) {
bytes += static_cast<int>(rtp_packets_[i].size());
}
return bytes;
}
int NumRtpBytes(uint32_t ssrc) RTC_LOCKS_EXCLUDED(mutex_) {
webrtc::MutexLock lock(&mutex_);
int bytes = 0;
GetNumRtpBytesAndPackets(ssrc, &bytes, NULL);
return bytes;
}
int NumRtpPackets() RTC_LOCKS_EXCLUDED(mutex_) {
webrtc::MutexLock lock(&mutex_);
return static_cast<int>(rtp_packets_.size());
}
int NumRtpPackets(uint32_t ssrc) RTC_LOCKS_EXCLUDED(mutex_) {
webrtc::MutexLock lock(&mutex_);
int packets = 0;
GetNumRtpBytesAndPackets(ssrc, NULL, &packets);
return packets;
}
int NumSentSsrcs() RTC_LOCKS_EXCLUDED(mutex_) {
webrtc::MutexLock lock(&mutex_);
return static_cast<int>(sent_ssrcs_.size());
}
rtc::CopyOnWriteBuffer GetRtpPacket(int index) RTC_LOCKS_EXCLUDED(mutex_) {
webrtc::MutexLock lock(&mutex_);
if (index >= static_cast<int>(rtp_packets_.size())) {
return {};
}
return rtp_packets_[index];
}
int NumRtcpPackets() RTC_LOCKS_EXCLUDED(mutex_) {
webrtc::MutexLock lock(&mutex_);
return static_cast<int>(rtcp_packets_.size());
}
// Note: callers are responsible for deleting the returned buffer.
const rtc::CopyOnWriteBuffer* GetRtcpPacket(int index)
RTC_LOCKS_EXCLUDED(mutex_) {
webrtc::MutexLock lock(&mutex_);
if (index >= static_cast<int>(rtcp_packets_.size())) {
return NULL;
}
return new rtc::CopyOnWriteBuffer(rtcp_packets_[index]);
}
int sendbuf_size() const { return sendbuf_size_; }
int recvbuf_size() const { return recvbuf_size_; }
rtc::DiffServCodePoint dscp() const { return dscp_; }
rtc::PacketOptions options() const { return options_; }
protected:
virtual bool SendPacket(rtc::CopyOnWriteBuffer* packet,
const rtc::PacketOptions& options)
RTC_LOCKS_EXCLUDED(mutex_) {
if (!webrtc::IsRtpPacket(*packet)) {
return false;
}
webrtc::MutexLock lock(&mutex_);
sent_ssrcs_[webrtc::ParseRtpSsrc(*packet)]++;
options_ = options;
rtp_packets_.push_back(*packet);
if (conf_) {
for (size_t i = 0; i < conf_sent_ssrcs_.size(); ++i) {
SetRtpSsrc(conf_sent_ssrcs_[i], *packet);
PostPacket(*packet);
}
} else {
PostPacket(*packet);
}
return true;
}
virtual bool SendRtcp(rtc::CopyOnWriteBuffer* packet,
const rtc::PacketOptions& options)
RTC_LOCKS_EXCLUDED(mutex_) {
webrtc::MutexLock lock(&mutex_);
rtcp_packets_.push_back(*packet);
options_ = options;
if (!conf_) {
// don't worry about RTCP in conf mode for now
RTC_LOG(LS_VERBOSE) << "Dropping RTCP packet, they are not handled by "
"MediaChannel anymore.";
}
return true;
}
virtual int SetOption(SocketType type, rtc::Socket::Option opt, int option) {
if (opt == rtc::Socket::OPT_SNDBUF) {
sendbuf_size_ = option;
} else if (opt == rtc::Socket::OPT_RCVBUF) {
recvbuf_size_ = option;
} else if (opt == rtc::Socket::OPT_DSCP) {
dscp_ = static_cast<rtc::DiffServCodePoint>(option);
}
return 0;
}
void PostPacket(rtc::CopyOnWriteBuffer packet) {
thread_->PostTask(
SafeTask(safety_.flag(), [this, packet = std::move(packet)]() mutable {
if (dest_) {
webrtc::RtpPacketReceived parsed_packet;
if (parsed_packet.Parse(packet)) {
parsed_packet.set_arrival_time(
webrtc::Timestamp::Micros(rtc::TimeMicros()));
dest_->OnPacketReceived(std::move(parsed_packet));
} else {
RTC_DCHECK_NOTREACHED();
}
}
}));
}
private:
void SetRtpSsrc(uint32_t ssrc, rtc::CopyOnWriteBuffer& buffer) {
RTC_CHECK_GE(buffer.size(), 12);
rtc::SetBE32(buffer.MutableData() + 8, ssrc);
}
void GetNumRtpBytesAndPackets(uint32_t ssrc, int* bytes, int* packets) {
if (bytes) {
*bytes = 0;
}
if (packets) {
*packets = 0;
}
for (size_t i = 0; i < rtp_packets_.size(); ++i) {
if (ssrc == webrtc::ParseRtpSsrc(rtp_packets_[i])) {
if (bytes) {
*bytes += static_cast<int>(rtp_packets_[i].size());
}
if (packets) {
++(*packets);
}
}
}
}
webrtc::TaskQueueBase* thread_;
MediaReceiveChannelInterface* dest_;
bool conf_;
// The ssrcs used in sending out packets in conference mode.
std::vector<uint32_t> conf_sent_ssrcs_;
// Map to track counts of packets that have been sent per ssrc.
// This includes packets that are dropped.
std::map<uint32_t, uint32_t> sent_ssrcs_;
// Map to track packet-number that needs to be dropped per ssrc.
std::map<uint32_t, std::set<uint32_t> > drop_map_;
webrtc::Mutex mutex_;
std::vector<rtc::CopyOnWriteBuffer> rtp_packets_;
std::vector<rtc::CopyOnWriteBuffer> rtcp_packets_;
int sendbuf_size_;
int recvbuf_size_;
rtc::DiffServCodePoint dscp_;
// Options of the most recently sent packet.
rtc::PacketOptions options_;
webrtc::ScopedTaskSafety safety_;
};
} // namespace cricket
#endif // MEDIA_BASE_FAKE_NETWORK_INTERFACE_H_

View file

@ -0,0 +1,71 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/base/fake_rtp.h"
#include <stdint.h>
#include <string.h>
#include "absl/algorithm/container.h"
#include "rtc_base/checks.h"
#include "test/gtest.h"
void CompareHeaderExtensions(const char* packet1,
size_t packet1_size,
const char* packet2,
size_t packet2_size,
const std::vector<int>& encrypted_headers,
bool expect_equal) {
// Sanity check: packets must be large enough to contain the RTP header and
// extensions header.
RTC_CHECK_GE(packet1_size, 12 + 4);
RTC_CHECK_GE(packet2_size, 12 + 4);
// RTP extension headers are the same.
EXPECT_EQ(0, memcmp(packet1 + 12, packet2 + 12, 4));
// Check for one-byte header extensions.
EXPECT_EQ('\xBE', packet1[12]);
EXPECT_EQ('\xDE', packet1[13]);
// Determine position and size of extension headers.
size_t extension_words = packet1[14] << 8 | packet1[15];
const char* extension_data1 = packet1 + 12 + 4;
const char* extension_end1 = extension_data1 + extension_words * 4;
const char* extension_data2 = packet2 + 12 + 4;
// Sanity check: packets must be large enough to contain the RTP header
// extensions.
RTC_CHECK_GE(packet1_size, 12 + 4 + extension_words * 4);
RTC_CHECK_GE(packet2_size, 12 + 4 + extension_words * 4);
while (extension_data1 < extension_end1) {
uint8_t id = (*extension_data1 & 0xf0) >> 4;
uint8_t len = (*extension_data1 & 0x0f) + 1;
extension_data1++;
extension_data2++;
EXPECT_LE(extension_data1, extension_end1);
if (id == 15) {
// Finished parsing.
break;
}
// The header extension doesn't get encrypted if the id is not in the
// list of header extensions to encrypt.
if (expect_equal || !absl::c_linear_search(encrypted_headers, id)) {
EXPECT_EQ(0, memcmp(extension_data1, extension_data2, len));
} else {
EXPECT_NE(0, memcmp(extension_data1, extension_data2, len));
}
extension_data1 += len;
extension_data2 += len;
// Skip padding.
while (extension_data1 < extension_end1 && *extension_data1 == 0) {
extension_data1++;
extension_data2++;
}
}
}

View file

@ -0,0 +1,301 @@
/*
* Copyright (c) 2004 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
// Fake RTP and RTCP packets to use in unit tests.
#ifndef MEDIA_BASE_FAKE_RTP_H_
#define MEDIA_BASE_FAKE_RTP_H_
#include <cstddef> // size_t
#include <vector>
// A typical PCMU RTP packet.
// PT=0, SN=1, TS=0, SSRC=1
// all data FF
static const unsigned char kPcmuFrame[] = {
0x80, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF,
};
static const int kHeaderExtensionIDs[] = {1, 4};
// A typical PCMU RTP packet with header extensions.
// PT=0, SN=1, TS=0, SSRC=1
// all data FF
static const unsigned char kPcmuFrameWithExtensions[] = {
0x90,
0x00,
0x00,
0x01,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x01,
// RFC 5285, section 4.2. One-Byte Header.
0xBE,
0xDE,
// Header extension length 6 * 32 bits.
0x00,
0x06,
// 8 bytes header id 1.
0x17,
0x41,
0x42,
0x73,
0xA4,
0x75,
0x26,
0x27,
0x48,
// 3 bytes header id 2.
0x22,
0x00,
0x00,
0xC8,
// 1 byte header id 3.
0x30,
0x8E,
// 7 bytes header id 4.
0x46,
0x55,
0x99,
0x63,
0x86,
0xB3,
0x95,
0xFB,
// 1 byte header padding.
0x00,
// Payload data.
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
};
// A typical Receiver Report RTCP packet.
// PT=RR, LN=1, SSRC=1
// send SSRC=2, all other fields 0
static const unsigned char kRtcpReport[] = {
0x80, 0xc9, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
// PT = 97, TS = 0, Seq = 1, SSRC = 2
// H264 - NRI = 1, Type = 1, bit stream = FF
static const unsigned char kH264Packet[] = {
0x80, 0x61, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02,
0x21, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF,
};
// PT= 101, SN=2, TS=3, SSRC = 4
static const unsigned char kDataPacket[] = {
0x80, 0x65, 0x00, 0x02, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x04,
0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
};
// This expects both packets to be based on kPcmuFrameWithExtensions.
// Header extensions with an id in "encrypted_headers" are expected to be
// different in the packets unless "expect_equal" is set to "true".
void CompareHeaderExtensions(const char* packet1,
size_t packet1_size,
const char* packet2,
size_t packet2_size,
const std::vector<int>& encrypted_headers,
bool expect_equal);
#endif // MEDIA_BASE_FAKE_RTP_H_

View file

@ -0,0 +1,87 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/base/fake_video_renderer.h"
namespace cricket {
namespace {
bool CheckFrameColorYuv(const webrtc::VideoFrame& frame) {
// TODO(zhurunz) Check with VP8 team to see if we can remove this
// tolerance on Y values. Some unit tests produce Y values close
// to 16 rather than close to zero, for supposedly black frames.
// Largest value observed is 34, e.g., running
// PeerConnectionIntegrationTest.SendAndReceive16To9AspectRatio.
static constexpr uint8_t y_min = 0;
static constexpr uint8_t y_max = 48;
static constexpr uint8_t u_min = 128;
static constexpr uint8_t u_max = 128;
static constexpr uint8_t v_min = 128;
static constexpr uint8_t v_max = 128;
if (!frame.video_frame_buffer()) {
return false;
}
rtc::scoped_refptr<const webrtc::I420BufferInterface> i420_buffer =
frame.video_frame_buffer()->ToI420();
// Y
int y_width = frame.width();
int y_height = frame.height();
const uint8_t* y_plane = i420_buffer->DataY();
const uint8_t* y_pos = y_plane;
int32_t y_pitch = i420_buffer->StrideY();
for (int i = 0; i < y_height; ++i) {
for (int j = 0; j < y_width; ++j) {
uint8_t y_value = *(y_pos + j);
if (y_value < y_min || y_value > y_max) {
return false;
}
}
y_pos += y_pitch;
}
// U and V
int chroma_width = i420_buffer->ChromaWidth();
int chroma_height = i420_buffer->ChromaHeight();
const uint8_t* u_plane = i420_buffer->DataU();
const uint8_t* v_plane = i420_buffer->DataV();
const uint8_t* u_pos = u_plane;
const uint8_t* v_pos = v_plane;
int32_t u_pitch = i420_buffer->StrideU();
int32_t v_pitch = i420_buffer->StrideV();
for (int i = 0; i < chroma_height; ++i) {
for (int j = 0; j < chroma_width; ++j) {
uint8_t u_value = *(u_pos + j);
if (u_value < u_min || u_value > u_max) {
return false;
}
uint8_t v_value = *(v_pos + j);
if (v_value < v_min || v_value > v_max) {
return false;
}
}
u_pos += u_pitch;
v_pos += v_pitch;
}
return true;
}
} // namespace
FakeVideoRenderer::FakeVideoRenderer() = default;
void FakeVideoRenderer::OnFrame(const webrtc::VideoFrame& frame) {
webrtc::MutexLock lock(&mutex_);
black_frame_ = CheckFrameColorYuv(frame);
++num_rendered_frames_;
width_ = frame.width();
height_ = frame.height();
rotation_ = frame.rotation();
timestamp_us_ = frame.timestamp_us();
}
} // namespace cricket

View file

@ -0,0 +1,73 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_BASE_FAKE_VIDEO_RENDERER_H_
#define MEDIA_BASE_FAKE_VIDEO_RENDERER_H_
#include <stdint.h>
#include "api/scoped_refptr.h"
#include "api/video/video_frame.h"
#include "api/video/video_frame_buffer.h"
#include "api/video/video_rotation.h"
#include "api/video/video_sink_interface.h"
#include "rtc_base/synchronization/mutex.h"
namespace cricket {
// Faked video renderer that has a callback for actions on rendering.
class FakeVideoRenderer : public rtc::VideoSinkInterface<webrtc::VideoFrame> {
public:
FakeVideoRenderer();
void OnFrame(const webrtc::VideoFrame& frame) override;
int width() const {
webrtc::MutexLock lock(&mutex_);
return width_;
}
int height() const {
webrtc::MutexLock lock(&mutex_);
return height_;
}
webrtc::VideoRotation rotation() const {
webrtc::MutexLock lock(&mutex_);
return rotation_;
}
int64_t timestamp_us() const {
webrtc::MutexLock lock(&mutex_);
return timestamp_us_;
}
int num_rendered_frames() const {
webrtc::MutexLock lock(&mutex_);
return num_rendered_frames_;
}
bool black_frame() const {
webrtc::MutexLock lock(&mutex_);
return black_frame_;
}
private:
int width_ = 0;
int height_ = 0;
webrtc::VideoRotation rotation_ = webrtc::kVideoRotation_0;
int64_t timestamp_us_ = 0;
int num_rendered_frames_ = 0;
bool black_frame_ = false;
mutable webrtc::Mutex mutex_;
};
} // namespace cricket
#endif // MEDIA_BASE_FAKE_VIDEO_RENDERER_H_

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,303 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/base/media_channel_impl.h"
#include <map>
#include <string>
#include <type_traits>
#include <utility>
#include "absl/functional/any_invocable.h"
#include "api/audio_options.h"
#include "api/media_stream_interface.h"
#include "api/rtc_error.h"
#include "api/rtp_sender_interface.h"
#include "api/units/time_delta.h"
#include "api/video/video_timing.h"
#include "api/video_codecs/scalability_mode.h"
#include "common_video/include/quality_limitation_reason.h"
#include "media/base/codec.h"
#include "media/base/media_channel.h"
#include "media/base/rtp_utils.h"
#include "media/base/stream_params.h"
#include "modules/rtp_rtcp/include/report_block_data.h"
#include "rtc_base/checks.h"
namespace webrtc {
webrtc::RTCError InvokeSetParametersCallback(SetParametersCallback& callback,
RTCError error) {
if (callback) {
std::move(callback)(error);
callback = nullptr;
}
return error;
}
} // namespace webrtc
namespace cricket {
using webrtc::FrameDecryptorInterface;
using webrtc::FrameEncryptorInterface;
using webrtc::FrameTransformerInterface;
using webrtc::PendingTaskSafetyFlag;
using webrtc::SafeTask;
using webrtc::TaskQueueBase;
using webrtc::VideoTrackInterface;
VideoOptions::VideoOptions()
: content_hint(VideoTrackInterface::ContentHint::kNone) {}
VideoOptions::~VideoOptions() = default;
MediaChannelUtil::MediaChannelUtil(TaskQueueBase* network_thread,
bool enable_dscp)
: transport_(network_thread, enable_dscp) {}
MediaChannelUtil::~MediaChannelUtil() {}
void MediaChannelUtil::SetInterface(MediaChannelNetworkInterface* iface) {
transport_.SetInterface(iface);
}
int MediaChannelUtil::GetRtpSendTimeExtnId() const {
return -1;
}
bool MediaChannelUtil::SendPacket(rtc::CopyOnWriteBuffer* packet,
const rtc::PacketOptions& options) {
return transport_.DoSendPacket(packet, false, options);
}
bool MediaChannelUtil::SendRtcp(rtc::CopyOnWriteBuffer* packet,
const rtc::PacketOptions& options) {
return transport_.DoSendPacket(packet, true, options);
}
int MediaChannelUtil::SetOption(MediaChannelNetworkInterface::SocketType type,
rtc::Socket::Option opt,
int option) {
return transport_.SetOption(type, opt, option);
}
// Corresponds to the SDP attribute extmap-allow-mixed, see RFC8285.
// Set to true if it's allowed to mix one- and two-byte RTP header extensions
// in the same stream. The setter and getter must only be called from
// worker_thread.
void MediaChannelUtil::SetExtmapAllowMixed(bool extmap_allow_mixed) {
extmap_allow_mixed_ = extmap_allow_mixed;
}
bool MediaChannelUtil::ExtmapAllowMixed() const {
return extmap_allow_mixed_;
}
bool MediaChannelUtil::HasNetworkInterface() const {
return transport_.HasNetworkInterface();
}
bool MediaChannelUtil::DscpEnabled() const {
return transport_.DscpEnabled();
}
void MediaChannelUtil::SetPreferredDscp(rtc::DiffServCodePoint new_dscp) {
transport_.SetPreferredDscp(new_dscp);
}
MediaSenderInfo::MediaSenderInfo() = default;
MediaSenderInfo::~MediaSenderInfo() = default;
MediaReceiverInfo::MediaReceiverInfo() = default;
MediaReceiverInfo::~MediaReceiverInfo() = default;
VoiceSenderInfo::VoiceSenderInfo() = default;
VoiceSenderInfo::~VoiceSenderInfo() = default;
VoiceReceiverInfo::VoiceReceiverInfo() = default;
VoiceReceiverInfo::~VoiceReceiverInfo() = default;
VideoSenderInfo::VideoSenderInfo() = default;
VideoSenderInfo::~VideoSenderInfo() = default;
VideoReceiverInfo::VideoReceiverInfo() = default;
VideoReceiverInfo::~VideoReceiverInfo() = default;
VoiceMediaInfo::VoiceMediaInfo() = default;
VoiceMediaInfo::~VoiceMediaInfo() = default;
VideoMediaInfo::VideoMediaInfo() = default;
VideoMediaInfo::~VideoMediaInfo() = default;
VideoMediaSendInfo::VideoMediaSendInfo() = default;
VideoMediaSendInfo::~VideoMediaSendInfo() = default;
VoiceMediaSendInfo::VoiceMediaSendInfo() = default;
VoiceMediaSendInfo::~VoiceMediaSendInfo() = default;
VideoMediaReceiveInfo::VideoMediaReceiveInfo() = default;
VideoMediaReceiveInfo::~VideoMediaReceiveInfo() = default;
VoiceMediaReceiveInfo::VoiceMediaReceiveInfo() = default;
VoiceMediaReceiveInfo::~VoiceMediaReceiveInfo() = default;
AudioSenderParameter::AudioSenderParameter() = default;
AudioSenderParameter::~AudioSenderParameter() = default;
std::map<std::string, std::string> AudioSenderParameter::ToStringMap() const {
auto params = SenderParameters::ToStringMap();
params["options"] = options.ToString();
return params;
}
VideoSenderParameters::VideoSenderParameters() = default;
VideoSenderParameters::~VideoSenderParameters() = default;
std::map<std::string, std::string> VideoSenderParameters::ToStringMap() const {
auto params = SenderParameters::ToStringMap();
params["conference_mode"] = (conference_mode ? "yes" : "no");
return params;
}
// --------------------- MediaChannelUtil::TransportForMediaChannels -----
MediaChannelUtil::TransportForMediaChannels::TransportForMediaChannels(
webrtc::TaskQueueBase* network_thread,
bool enable_dscp)
: network_safety_(webrtc::PendingTaskSafetyFlag::CreateDetachedInactive()),
network_thread_(network_thread),
enable_dscp_(enable_dscp) {}
MediaChannelUtil::TransportForMediaChannels::~TransportForMediaChannels() {
RTC_DCHECK(!network_interface_);
}
bool MediaChannelUtil::TransportForMediaChannels::SendRtcp(
rtc::ArrayView<const uint8_t> packet) {
auto send = [this, packet = rtc::CopyOnWriteBuffer(
packet, kMaxRtpPacketLen)]() mutable {
rtc::PacketOptions rtc_options;
if (DscpEnabled()) {
rtc_options.dscp = PreferredDscp();
}
DoSendPacket(&packet, true, rtc_options);
};
if (network_thread_->IsCurrent()) {
send();
} else {
network_thread_->PostTask(SafeTask(network_safety_, std::move(send)));
}
return true;
}
bool MediaChannelUtil::TransportForMediaChannels::SendRtp(
rtc::ArrayView<const uint8_t> packet,
const webrtc::PacketOptions& options) {
auto send =
[this, packet_id = options.packet_id,
included_in_feedback = options.included_in_feedback,
included_in_allocation = options.included_in_allocation,
batchable = options.batchable,
last_packet_in_batch = options.last_packet_in_batch,
packet = rtc::CopyOnWriteBuffer(packet, kMaxRtpPacketLen)]() mutable {
rtc::PacketOptions rtc_options;
rtc_options.packet_id = packet_id;
if (DscpEnabled()) {
rtc_options.dscp = PreferredDscp();
}
rtc_options.info_signaled_after_sent.included_in_feedback =
included_in_feedback;
rtc_options.info_signaled_after_sent.included_in_allocation =
included_in_allocation;
rtc_options.batchable = batchable;
rtc_options.last_packet_in_batch = last_packet_in_batch;
DoSendPacket(&packet, false, rtc_options);
};
// TODO(bugs.webrtc.org/11993): ModuleRtpRtcpImpl2 and related classes (e.g.
// RTCPSender) aren't aware of the network thread and may trigger calls to
// this function from different threads. Update those classes to keep
// network traffic on the network thread.
if (network_thread_->IsCurrent()) {
send();
} else {
network_thread_->PostTask(SafeTask(network_safety_, std::move(send)));
}
return true;
}
void MediaChannelUtil::TransportForMediaChannels::SetInterface(
MediaChannelNetworkInterface* iface) {
RTC_DCHECK_RUN_ON(network_thread_);
iface ? network_safety_->SetAlive() : network_safety_->SetNotAlive();
network_interface_ = iface;
UpdateDscp();
}
void MediaChannelUtil::TransportForMediaChannels::UpdateDscp() {
rtc::DiffServCodePoint value =
enable_dscp_ ? preferred_dscp_ : rtc::DSCP_DEFAULT;
int ret = SetOptionLocked(MediaChannelNetworkInterface::ST_RTP,
rtc::Socket::OPT_DSCP, value);
if (ret == 0)
SetOptionLocked(MediaChannelNetworkInterface::ST_RTCP,
rtc::Socket::OPT_DSCP, value);
}
bool MediaChannelUtil::TransportForMediaChannels::DoSendPacket(
rtc::CopyOnWriteBuffer* packet,
bool rtcp,
const rtc::PacketOptions& options) {
RTC_DCHECK_RUN_ON(network_thread_);
if (!network_interface_)
return false;
return (!rtcp) ? network_interface_->SendPacket(packet, options)
: network_interface_->SendRtcp(packet, options);
}
int MediaChannelUtil::TransportForMediaChannels::SetOption(
MediaChannelNetworkInterface::SocketType type,
rtc::Socket::Option opt,
int option) {
RTC_DCHECK_RUN_ON(network_thread_);
return SetOptionLocked(type, opt, option);
}
int MediaChannelUtil::TransportForMediaChannels::SetOptionLocked(
MediaChannelNetworkInterface::SocketType type,
rtc::Socket::Option opt,
int option) {
if (!network_interface_)
return -1;
return network_interface_->SetOption(type, opt, option);
}
void MediaChannelUtil::TransportForMediaChannels::SetPreferredDscp(
rtc::DiffServCodePoint new_dscp) {
if (!network_thread_->IsCurrent()) {
// This is currently the common path as the derived channel classes
// get called on the worker thread. There are still some tests though
// that call directly on the network thread.
network_thread_->PostTask(SafeTask(
network_safety_, [this, new_dscp]() { SetPreferredDscp(new_dscp); }));
return;
}
RTC_DCHECK_RUN_ON(network_thread_);
if (new_dscp == preferred_dscp_)
return;
preferred_dscp_ = new_dscp;
UpdateDscp();
}
} // namespace cricket

View file

@ -0,0 +1,181 @@
/*
* Copyright 2022 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_BASE_MEDIA_CHANNEL_IMPL_H_
#define MEDIA_BASE_MEDIA_CHANNEL_IMPL_H_
#include <stddef.h>
#include <stdint.h>
#include <functional>
#include <memory>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include "absl/functional/any_invocable.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "api/audio_options.h"
#include "api/call/audio_sink.h"
#include "api/call/transport.h"
#include "api/crypto/frame_decryptor_interface.h"
#include "api/crypto/frame_encryptor_interface.h"
#include "api/frame_transformer_interface.h"
#include "api/media_types.h"
#include "api/rtc_error.h"
#include "api/rtp_headers.h"
#include "api/rtp_parameters.h"
#include "api/rtp_sender_interface.h"
#include "api/scoped_refptr.h"
#include "api/sequence_checker.h"
#include "api/task_queue/pending_task_safety_flag.h"
#include "api/task_queue/task_queue_base.h"
#include "api/transport/rtp/rtp_source.h"
#include "api/video/recordable_encoded_frame.h"
#include "api/video/video_frame.h"
#include "api/video/video_sink_interface.h"
#include "api/video/video_source_interface.h"
#include "api/video_codecs/video_encoder_factory.h"
#include "media/base/codec.h"
#include "media/base/media_channel.h"
#include "media/base/stream_params.h"
#include "modules/rtp_rtcp/source/rtp_packet_received.h"
#include "rtc_base/async_packet_socket.h"
#include "rtc_base/checks.h"
#include "rtc_base/copy_on_write_buffer.h"
#include "rtc_base/dscp.h"
#include "rtc_base/logging.h"
#include "rtc_base/network/sent_packet.h"
#include "rtc_base/network_route.h"
#include "rtc_base/socket.h"
#include "rtc_base/thread_annotations.h"
// This file contains the base classes for classes that implement
// the channel interfaces.
// These implementation classes used to be the exposed interface names,
// but this is in the process of being changed.
namespace cricket {
// The `MediaChannelUtil` class provides functionality that is used by
// multiple MediaChannel-like objects, of both sending and receiving
// types.
class MediaChannelUtil {
public:
MediaChannelUtil(webrtc::TaskQueueBase* network_thread,
bool enable_dscp = false);
virtual ~MediaChannelUtil();
// Returns the absolute sendtime extension id value from media channel.
virtual int GetRtpSendTimeExtnId() const;
webrtc::Transport* transport() { return &transport_; }
// Base methods to send packet using MediaChannelNetworkInterface.
// These methods are used by some tests only.
bool SendPacket(rtc::CopyOnWriteBuffer* packet,
const rtc::PacketOptions& options);
bool SendRtcp(rtc::CopyOnWriteBuffer* packet,
const rtc::PacketOptions& options);
int SetOption(MediaChannelNetworkInterface::SocketType type,
rtc::Socket::Option opt,
int option);
// Functions that form part of one or more interface classes.
// Not marked override, since this class does not inherit from the
// interfaces.
// Corresponds to the SDP attribute extmap-allow-mixed, see RFC8285.
// Set to true if it's allowed to mix one- and two-byte RTP header extensions
// in the same stream. The setter and getter must only be called from
// worker_thread.
void SetExtmapAllowMixed(bool extmap_allow_mixed);
bool ExtmapAllowMixed() const;
void SetInterface(MediaChannelNetworkInterface* iface);
// Returns `true` if a non-null MediaChannelNetworkInterface pointer is held.
// Must be called on the network thread.
bool HasNetworkInterface() const;
protected:
bool DscpEnabled() const;
void SetPreferredDscp(rtc::DiffServCodePoint new_dscp);
private:
// Implementation of the webrtc::Transport interface required
// by Call().
class TransportForMediaChannels : public webrtc::Transport {
public:
TransportForMediaChannels(webrtc::TaskQueueBase* network_thread,
bool enable_dscp);
virtual ~TransportForMediaChannels();
// Implementation of webrtc::Transport
bool SendRtp(rtc::ArrayView<const uint8_t> packet,
const webrtc::PacketOptions& options) override;
bool SendRtcp(rtc::ArrayView<const uint8_t> packet) override;
// Not implementation of webrtc::Transport
void SetInterface(MediaChannelNetworkInterface* iface);
int SetOption(MediaChannelNetworkInterface::SocketType type,
rtc::Socket::Option opt,
int option);
bool DoSendPacket(rtc::CopyOnWriteBuffer* packet,
bool rtcp,
const rtc::PacketOptions& options);
bool HasNetworkInterface() const {
RTC_DCHECK_RUN_ON(network_thread_);
return network_interface_ != nullptr;
}
bool DscpEnabled() const { return enable_dscp_; }
void SetPreferredDscp(rtc::DiffServCodePoint new_dscp);
private:
// This is the DSCP value used for both RTP and RTCP channels if DSCP is
// enabled. It can be changed at any time via `SetPreferredDscp`.
rtc::DiffServCodePoint PreferredDscp() const {
RTC_DCHECK_RUN_ON(network_thread_);
return preferred_dscp_;
}
// Apply the preferred DSCP setting to the underlying network interface RTP
// and RTCP channels. If DSCP is disabled, then apply the default DSCP
// value.
void UpdateDscp() RTC_RUN_ON(network_thread_);
int SetOptionLocked(MediaChannelNetworkInterface::SocketType type,
rtc::Socket::Option opt,
int option) RTC_RUN_ON(network_thread_);
const rtc::scoped_refptr<webrtc::PendingTaskSafetyFlag> network_safety_
RTC_PT_GUARDED_BY(network_thread_);
webrtc::TaskQueueBase* const network_thread_;
const bool enable_dscp_;
MediaChannelNetworkInterface* network_interface_
RTC_GUARDED_BY(network_thread_) = nullptr;
rtc::DiffServCodePoint preferred_dscp_ RTC_GUARDED_BY(network_thread_) =
rtc::DSCP_DEFAULT;
};
bool extmap_allow_mixed_ = false;
TransportForMediaChannels transport_;
};
} // namespace cricket
#endif // MEDIA_BASE_MEDIA_CHANNEL_IMPL_H_

View file

@ -0,0 +1,98 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_BASE_MEDIA_CONFIG_H_
#define MEDIA_BASE_MEDIA_CONFIG_H_
namespace cricket {
// Construction-time settings, passed on when creating
// MediaChannels.
struct MediaConfig {
// Set DSCP value on packets. This flag comes from the
// PeerConnection constraint 'googDscp'.
// TODO(https://crbug.com/1315574): Remove the ability to set it in Chromium
// and delete this flag.
bool enable_dscp = true;
// Video-specific config.
struct Video {
// Enable WebRTC CPU Overuse Detection. This flag comes from the
// PeerConnection constraint 'googCpuOveruseDetection'.
// TODO(https://crbug.com/1315569): Remove the ability to set it in Chromium
// and delete this flag.
bool enable_cpu_adaptation = true;
// Enable WebRTC suspension of video. No video frames will be sent
// when the bitrate is below the configured minimum bitrate. This
// flag comes from the PeerConnection constraint
// 'googSuspendBelowMinBitrate', and WebRtcVideoChannel copies it
// to VideoSendStream::Config::suspend_below_min_bitrate.
// TODO(https://crbug.com/1315564): Remove the ability to set it in Chromium
// and delete this flag.
bool suspend_below_min_bitrate = false;
// Enable buffering and playout timing smoothing of decoded frames.
// If set to true, then WebRTC will buffer and potentially drop decoded
// frames in order to keep a smooth rendering.
// If set to false, then WebRTC will hand over the frame from the decoder
// to the renderer as soon as possible, meaning that the renderer is
// responsible for smooth rendering.
// Note that even if this flag is set to false, dropping of frames can
// still happen pre-decode, e.g., dropping of higher temporal layers.
// This flag comes from the PeerConnection RtcConfiguration.
bool enable_prerenderer_smoothing = true;
// Enables periodic bandwidth probing in application-limited region.
bool periodic_alr_bandwidth_probing = false;
// Enables the new method to estimate the cpu load from encoding, used for
// cpu adaptation. This flag is intended to be controlled primarily by a
// Chrome origin-trial.
// TODO(bugs.webrtc.org/8504): If all goes well, the flag will be removed
// together with the old method of estimation.
bool experiment_cpu_load_estimator = false;
// Time interval between RTCP report for video
int rtcp_report_interval_ms = 1000;
// Enables send packet batching from the egress RTP sender.
bool enable_send_packet_batching = false;
} video;
// Audio-specific config.
struct Audio {
// Time interval between RTCP report for audio
int rtcp_report_interval_ms = 5000;
} audio;
bool operator==(const MediaConfig& o) const {
return enable_dscp == o.enable_dscp &&
video.enable_cpu_adaptation == o.video.enable_cpu_adaptation &&
video.suspend_below_min_bitrate ==
o.video.suspend_below_min_bitrate &&
video.enable_prerenderer_smoothing ==
o.video.enable_prerenderer_smoothing &&
video.periodic_alr_bandwidth_probing ==
o.video.periodic_alr_bandwidth_probing &&
video.experiment_cpu_load_estimator ==
o.video.experiment_cpu_load_estimator &&
video.rtcp_report_interval_ms == o.video.rtcp_report_interval_ms &&
video.enable_send_packet_batching ==
o.video.enable_send_packet_batching &&
audio.rtcp_report_interval_ms == o.audio.rtcp_report_interval_ms;
}
bool operator!=(const MediaConfig& o) const { return !(*this == o); }
};
} // namespace cricket
#endif // MEDIA_BASE_MEDIA_CONFIG_H_

View file

@ -0,0 +1,149 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/base/media_constants.h"
namespace cricket {
const int kVideoCodecClockrate = 90000;
const int kVideoMtu = 1200;
const int kVideoRtpSendBufferSize = 262144;
const int kVideoRtpRecvBufferSize = 262144;
const float kHighSystemCpuThreshold = 0.85f;
const float kLowSystemCpuThreshold = 0.65f;
const float kProcessCpuThreshold = 0.10f;
const char kRedCodecName[] = "red";
const char kUlpfecCodecName[] = "ulpfec";
const char kMultiplexCodecName[] = "multiplex";
// TODO(brandtr): Change this to 'flexfec' when we are confident that the
// header format is not changing anymore.
const char kFlexfecCodecName[] = "flexfec-03";
// draft-ietf-payload-flexible-fec-scheme-02.txt
const char kFlexfecFmtpRepairWindow[] = "repair-window";
// RFC 4588 RTP Retransmission Payload Format
const char kRtxCodecName[] = "rtx";
const char kCodecParamRtxTime[] = "rtx-time";
const char kCodecParamAssociatedPayloadType[] = "apt";
const char kCodecParamAssociatedCodecName[] = "acn";
// Parameters that do not follow the key-value convention
// are treated as having the empty string as key.
const char kCodecParamNotInNameValueFormat[] = "";
const char kOpusCodecName[] = "opus";
const char kL16CodecName[] = "L16";
const char kG722CodecName[] = "G722";
const char kIlbcCodecName[] = "ILBC";
const char kPcmuCodecName[] = "PCMU";
const char kPcmaCodecName[] = "PCMA";
const char kCnCodecName[] = "CN";
const char kDtmfCodecName[] = "telephone-event";
// draft-spittka-payload-rtp-opus-03.txt
const char kCodecParamPTime[] = "ptime";
const char kCodecParamMaxPTime[] = "maxptime";
const char kCodecParamMinPTime[] = "minptime";
const char kCodecParamSPropStereo[] = "sprop-stereo";
const char kCodecParamStereo[] = "stereo";
const char kCodecParamUseInbandFec[] = "useinbandfec";
const char kCodecParamUseDtx[] = "usedtx";
const char kCodecParamMaxAverageBitrate[] = "maxaveragebitrate";
const char kCodecParamMaxPlaybackRate[] = "maxplaybackrate";
const char kParamValueTrue[] = "1";
const char kParamValueEmpty[] = "";
const int kOpusDefaultMaxPTime = 120;
const int kOpusDefaultPTime = 20;
const int kOpusDefaultMinPTime = 3;
const int kOpusDefaultSPropStereo = 0;
const int kOpusDefaultStereo = 0;
const int kOpusDefaultUseInbandFec = 0;
const int kOpusDefaultUseDtx = 0;
const int kOpusDefaultMaxPlaybackRate = 48000;
const int kPreferredMaxPTime = 120;
const int kPreferredMinPTime = 10;
const int kPreferredSPropStereo = 0;
const int kPreferredStereo = 0;
const int kPreferredUseInbandFec = 0;
const char kPacketizationParamRaw[] = "raw";
const char kRtcpFbParamLntf[] = "goog-lntf";
const char kRtcpFbParamNack[] = "nack";
const char kRtcpFbNackParamPli[] = "pli";
const char kRtcpFbParamRemb[] = "goog-remb";
const char kRtcpFbParamTransportCc[] = "transport-cc";
const char kRtcpFbParamCcm[] = "ccm";
const char kRtcpFbCcmParamFir[] = "fir";
const char kRtcpFbParamRrtr[] = "rrtr";
const char kCodecParamMaxBitrate[] = "x-google-max-bitrate";
const char kCodecParamMinBitrate[] = "x-google-min-bitrate";
const char kCodecParamStartBitrate[] = "x-google-start-bitrate";
const char kCodecParamMaxQuantization[] = "x-google-max-quantization";
const char kComfortNoiseCodecName[] = "CN";
const char kVp8CodecName[] = "VP8";
const char kVp9CodecName[] = "VP9";
const char kAv1CodecName[] = "AV1";
const char kH264CodecName[] = "H264";
const char kH265CodecName[] = "H265";
// RFC 6184 RTP Payload Format for H.264 video
const char kH264FmtpProfileLevelId[] = "profile-level-id";
const char kH264FmtpLevelAsymmetryAllowed[] = "level-asymmetry-allowed";
const char kH264FmtpPacketizationMode[] = "packetization-mode";
const char kH264FmtpSpropParameterSets[] = "sprop-parameter-sets";
const char kH264FmtpSpsPpsIdrInKeyframe[] = "sps-pps-idr-in-keyframe";
const char kH264ProfileLevelConstrainedBaseline[] = "42e01f";
const char kH264ProfileLevelConstrainedHigh[] = "640c1f";
// RFC 7798 RTP Payload Format for H.265 video
const char kH265FmtpProfileSpace[] = "profile-space";
const char kH265FmtpTierFlag[] = "tier-flag";
const char kH265FmtpProfileId[] = "profile-id";
const char kH265FmtpLevelId[] = "level-id";
const char kH265FmtpProfileCompatibilityIndicator[] =
"profile-compatibility-indicator";
const char kH265FmtpInteropConstraints[] = "interop-constraints";
const char kH265FmtpTxMode[] = "tx-mode";
// draft-ietf-payload-vp9
const char kVP9ProfileId[] = "profile-id";
// https://aomediacodec.github.io/av1-rtp-spec/
const char kAv1FmtpProfile[] = "profile";
const char kAv1FmtpLevelIdx[] = "level-idx";
const char kAv1FmtpTier[] = "tier";
const int kDefaultVideoMaxFramerate = 60;
// Max encode quantizer for VP8/9 and AV1 encoders assuming libvpx/libaom API
// range [0, 63]
const int kDefaultVideoMaxQpVpx = 56;
// Max encode quantizer for H264/5 assuming the bitstream range [0, 51].
const int kDefaultVideoMaxQpH26x = 51;
const size_t kConferenceMaxNumSpatialLayers = 3;
const size_t kConferenceMaxNumTemporalLayers = 3;
const size_t kConferenceDefaultNumTemporalLayers = 3;
// RFC 3556 and RFC 3890
const char kApplicationSpecificBandwidth[] = "AS";
const char kTransportSpecificBandwidth[] = "TIAS";
} // namespace cricket

View file

@ -0,0 +1,170 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_BASE_MEDIA_CONSTANTS_H_
#define MEDIA_BASE_MEDIA_CONSTANTS_H_
#include <stddef.h>
#include "rtc_base/system/rtc_export.h"
// This file contains constants related to media.
namespace cricket {
extern const int kVideoCodecClockrate;
extern const int kVideoMtu;
extern const int kVideoRtpSendBufferSize;
extern const int kVideoRtpRecvBufferSize;
// Default CPU thresholds.
extern const float kHighSystemCpuThreshold;
extern const float kLowSystemCpuThreshold;
extern const float kProcessCpuThreshold;
extern const char kRedCodecName[];
extern const char kUlpfecCodecName[];
extern const char kFlexfecCodecName[];
extern const char kMultiplexCodecName[];
extern const char kFlexfecFmtpRepairWindow[];
extern const char kRtxCodecName[];
extern const char kCodecParamRtxTime[];
extern const char kCodecParamAssociatedPayloadType[];
extern const char kCodecParamAssociatedCodecName[];
extern const char kCodecParamNotInNameValueFormat[];
extern const char kOpusCodecName[];
extern const char kL16CodecName[];
extern const char kG722CodecName[];
extern const char kIlbcCodecName[];
extern const char kPcmuCodecName[];
extern const char kPcmaCodecName[];
extern const char kCnCodecName[];
extern const char kDtmfCodecName[];
// Attribute parameters
extern const char kCodecParamPTime[];
extern const char kCodecParamMaxPTime[];
// fmtp parameters
extern const char kCodecParamMinPTime[];
extern const char kCodecParamSPropStereo[];
extern const char kCodecParamStereo[];
extern const char kCodecParamUseInbandFec[];
extern const char kCodecParamUseDtx[];
extern const char kCodecParamMaxAverageBitrate[];
extern const char kCodecParamMaxPlaybackRate[];
extern const char kParamValueTrue[];
// Parameters are stored as parameter/value pairs. For parameters who do not
// have a value, `kParamValueEmpty` should be used as value.
extern const char kParamValueEmpty[];
// opus parameters.
// Default value for maxptime according to
// http://tools.ietf.org/html/draft-spittka-payload-rtp-opus-03
extern const int kOpusDefaultMaxPTime;
extern const int kOpusDefaultPTime;
extern const int kOpusDefaultMinPTime;
extern const int kOpusDefaultSPropStereo;
extern const int kOpusDefaultStereo;
extern const int kOpusDefaultUseInbandFec;
extern const int kOpusDefaultUseDtx;
extern const int kOpusDefaultMaxPlaybackRate;
// Prefered values in this code base. Note that they may differ from the default
// values in http://tools.ietf.org/html/draft-spittka-payload-rtp-opus-03
// Only frames larger or equal to 10 ms are currently supported in this code
// base.
extern const int kPreferredMaxPTime;
extern const int kPreferredMinPTime;
extern const int kPreferredSPropStereo;
extern const int kPreferredStereo;
extern const int kPreferredUseInbandFec;
extern const char kPacketizationParamRaw[];
// rtcp-fb message in its first experimental stages. Documentation pending.
extern const char kRtcpFbParamLntf[];
// rtcp-fb messages according to RFC 4585
extern const char kRtcpFbParamNack[];
extern const char kRtcpFbNackParamPli[];
// rtcp-fb messages according to
// http://tools.ietf.org/html/draft-alvestrand-rmcat-remb-00
extern const char kRtcpFbParamRemb[];
// rtcp-fb messages according to
// https://tools.ietf.org/html/draft-holmer-rmcat-transport-wide-cc-extensions-01
extern const char kRtcpFbParamTransportCc[];
// ccm submessages according to RFC 5104
extern const char kRtcpFbParamCcm[];
extern const char kRtcpFbCcmParamFir[];
// Receiver reference time report
// https://tools.ietf.org/html/rfc3611 section 4.4
extern const char kRtcpFbParamRrtr[];
// Google specific parameters
extern const char kCodecParamMaxBitrate[];
extern const char kCodecParamMinBitrate[];
extern const char kCodecParamStartBitrate[];
extern const char kCodecParamMaxQuantization[];
extern const char kComfortNoiseCodecName[];
RTC_EXPORT extern const char kVp8CodecName[];
RTC_EXPORT extern const char kVp9CodecName[];
RTC_EXPORT extern const char kAv1CodecName[];
RTC_EXPORT extern const char kH264CodecName[];
RTC_EXPORT extern const char kH265CodecName[];
// RFC 6184 RTP Payload Format for H.264 video
RTC_EXPORT extern const char kH264FmtpProfileLevelId[];
RTC_EXPORT extern const char kH264FmtpLevelAsymmetryAllowed[];
RTC_EXPORT extern const char kH264FmtpPacketizationMode[];
extern const char kH264FmtpSpropParameterSets[];
extern const char kH264FmtpSpsPpsIdrInKeyframe[];
extern const char kH264ProfileLevelConstrainedBaseline[];
extern const char kH264ProfileLevelConstrainedHigh[];
// RFC 7798 RTP Payload Format for H.265 video.
// According to RFC 7742, the sprop parameters MUST NOT be included
// in SDP generated by WebRTC, so for H.265 we don't handle them, though
// current H.264 implementation honors them when receiving
// sprop-parameter-sets in SDP.
RTC_EXPORT extern const char kH265FmtpProfileSpace[];
RTC_EXPORT extern const char kH265FmtpTierFlag[];
RTC_EXPORT extern const char kH265FmtpProfileId[];
RTC_EXPORT extern const char kH265FmtpLevelId[];
RTC_EXPORT extern const char kH265FmtpProfileCompatibilityIndicator[];
RTC_EXPORT extern const char kH265FmtpInteropConstraints[];
RTC_EXPORT extern const char kH265FmtpTxMode[];
// draft-ietf-payload-vp9
extern const char kVP9ProfileId[];
// https://aomediacodec.github.io/av1-rtp-spec/
extern const char kAv1FmtpProfile[];
extern const char kAv1FmtpLevelIdx[];
extern const char kAv1FmtpTier[];
extern const int kDefaultVideoMaxFramerate;
extern const int kDefaultVideoMaxQpVpx;
extern const int kDefaultVideoMaxQpH26x;
extern const size_t kConferenceMaxNumSpatialLayers;
extern const size_t kConferenceMaxNumTemporalLayers;
extern const size_t kConferenceDefaultNumTemporalLayers;
extern const char kApplicationSpecificBandwidth[];
extern const char kTransportSpecificBandwidth[];
} // namespace cricket
#endif // MEDIA_BASE_MEDIA_CONSTANTS_H_

View file

@ -0,0 +1,291 @@
/*
* Copyright (c) 2004 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/base/media_engine.h"
#include <stddef.h>
#include <cstdint>
#include <string>
#include <utility>
#include "absl/algorithm/container.h"
#include "api/video/video_bitrate_allocation.h"
#include "rtc_base/checks.h"
#include "rtc_base/string_encode.h"
namespace cricket {
RtpCapabilities::RtpCapabilities() = default;
RtpCapabilities::~RtpCapabilities() = default;
webrtc::RtpParameters CreateRtpParametersWithOneEncoding() {
webrtc::RtpParameters parameters;
webrtc::RtpEncodingParameters encoding;
parameters.encodings.push_back(encoding);
return parameters;
}
webrtc::RtpParameters CreateRtpParametersWithEncodings(StreamParams sp) {
std::vector<uint32_t> primary_ssrcs;
sp.GetPrimarySsrcs(&primary_ssrcs);
size_t encoding_count = primary_ssrcs.size();
std::vector<webrtc::RtpEncodingParameters> encodings(encoding_count);
for (size_t i = 0; i < encodings.size(); ++i) {
encodings[i].ssrc = primary_ssrcs[i];
}
const std::vector<RidDescription>& rids = sp.rids();
RTC_DCHECK(rids.size() == 0 || rids.size() == encoding_count);
for (size_t i = 0; i < rids.size(); ++i) {
encodings[i].rid = rids[i].rid;
}
webrtc::RtpParameters parameters;
parameters.encodings = encodings;
parameters.rtcp.cname = sp.cname;
return parameters;
}
std::vector<webrtc::RtpExtension> GetDefaultEnabledRtpHeaderExtensions(
const RtpHeaderExtensionQueryInterface& query_interface) {
std::vector<webrtc::RtpExtension> extensions;
for (const auto& entry : query_interface.GetRtpHeaderExtensions()) {
if (entry.direction != webrtc::RtpTransceiverDirection::kStopped)
extensions.emplace_back(entry.uri, *entry.preferred_id);
}
return extensions;
}
webrtc::RTCError CheckScalabilityModeValues(
const webrtc::RtpParameters& rtp_parameters,
rtc::ArrayView<cricket::Codec> codec_preferences,
absl::optional<cricket::Codec> send_codec) {
using webrtc::RTCErrorType;
if (codec_preferences.empty()) {
// This is an audio sender or an extra check in the stack where the codec
// list is not available and we can't check the scalability_mode values.
return webrtc::RTCError::OK();
}
for (size_t i = 0; i < rtp_parameters.encodings.size(); ++i) {
if (rtp_parameters.encodings[i].codec) {
bool codecFound = false;
for (const cricket::VideoCodec& codec : codec_preferences) {
if (codec.MatchesRtpCodec(*rtp_parameters.encodings[i].codec)) {
codecFound = true;
send_codec = codec;
break;
}
}
if (!codecFound) {
LOG_AND_RETURN_ERROR(
RTCErrorType::INVALID_MODIFICATION,
"Attempted to use an unsupported codec for layer " +
std::to_string(i));
}
}
if (rtp_parameters.encodings[i].scalability_mode) {
if (!send_codec) {
bool scalabilityModeFound = false;
for (const cricket::VideoCodec& codec : codec_preferences) {
for (const auto& scalability_mode : codec.scalability_modes) {
if (ScalabilityModeToString(scalability_mode) ==
*rtp_parameters.encodings[i].scalability_mode) {
scalabilityModeFound = true;
break;
}
}
if (scalabilityModeFound)
break;
}
if (!scalabilityModeFound) {
LOG_AND_RETURN_ERROR(
RTCErrorType::INVALID_MODIFICATION,
"Attempted to set RtpParameters scalabilityMode "
"to an unsupported value for the current codecs.");
}
} else {
bool scalabilityModeFound = false;
for (const auto& scalability_mode : send_codec->scalability_modes) {
if (ScalabilityModeToString(scalability_mode) ==
*rtp_parameters.encodings[i].scalability_mode) {
scalabilityModeFound = true;
break;
}
}
if (!scalabilityModeFound) {
LOG_AND_RETURN_ERROR(
RTCErrorType::INVALID_MODIFICATION,
"Attempted to set RtpParameters scalabilityMode "
"to an unsupported value for the current codecs.");
}
}
}
}
return webrtc::RTCError::OK();
}
webrtc::RTCError CheckRtpParametersValues(
const webrtc::RtpParameters& rtp_parameters,
rtc::ArrayView<cricket::Codec> codec_preferences,
absl::optional<cricket::Codec> send_codec) {
using webrtc::RTCErrorType;
for (size_t i = 0; i < rtp_parameters.encodings.size(); ++i) {
if (rtp_parameters.encodings[i].bitrate_priority <= 0) {
LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_RANGE,
"Attempted to set RtpParameters bitrate_priority to "
"an invalid number. bitrate_priority must be > 0.");
}
if (rtp_parameters.encodings[i].scale_resolution_down_by &&
*rtp_parameters.encodings[i].scale_resolution_down_by < 1.0) {
LOG_AND_RETURN_ERROR(
RTCErrorType::INVALID_RANGE,
"Attempted to set RtpParameters scale_resolution_down_by to an "
"invalid value. scale_resolution_down_by must be >= 1.0");
}
if (rtp_parameters.encodings[i].max_framerate &&
*rtp_parameters.encodings[i].max_framerate < 0.0) {
LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_RANGE,
"Attempted to set RtpParameters max_framerate to an "
"invalid value. max_framerate must be >= 0.0");
}
if (rtp_parameters.encodings[i].min_bitrate_bps &&
rtp_parameters.encodings[i].max_bitrate_bps) {
if (*rtp_parameters.encodings[i].max_bitrate_bps <
*rtp_parameters.encodings[i].min_bitrate_bps) {
LOG_AND_RETURN_ERROR(webrtc::RTCErrorType::INVALID_RANGE,
"Attempted to set RtpParameters min bitrate "
"larger than max bitrate.");
}
}
if (rtp_parameters.encodings[i].num_temporal_layers) {
if (*rtp_parameters.encodings[i].num_temporal_layers < 1 ||
*rtp_parameters.encodings[i].num_temporal_layers >
webrtc::kMaxTemporalStreams) {
LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_RANGE,
"Attempted to set RtpParameters "
"num_temporal_layers to an invalid number.");
}
}
if (rtp_parameters.encodings[i].requested_resolution &&
rtp_parameters.encodings[i].scale_resolution_down_by) {
LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_RANGE,
"Attempted to set scale_resolution_down_by and "
"requested_resolution simultaniously.");
}
if (i > 0 && rtp_parameters.encodings[i - 1].codec !=
rtp_parameters.encodings[i].codec) {
LOG_AND_RETURN_ERROR(RTCErrorType::UNSUPPORTED_OPERATION,
"Attempted to use different codec values for "
"different encodings.");
}
}
return CheckScalabilityModeValues(rtp_parameters, codec_preferences,
send_codec);
}
webrtc::RTCError CheckRtpParametersInvalidModificationAndValues(
const webrtc::RtpParameters& old_rtp_parameters,
const webrtc::RtpParameters& rtp_parameters) {
return CheckRtpParametersInvalidModificationAndValues(
old_rtp_parameters, rtp_parameters, {}, absl::nullopt);
}
webrtc::RTCError CheckRtpParametersInvalidModificationAndValues(
const webrtc::RtpParameters& old_rtp_parameters,
const webrtc::RtpParameters& rtp_parameters,
rtc::ArrayView<cricket::Codec> codec_preferences,
absl::optional<cricket::Codec> send_codec) {
using webrtc::RTCErrorType;
if (rtp_parameters.encodings.size() != old_rtp_parameters.encodings.size()) {
LOG_AND_RETURN_ERROR(
RTCErrorType::INVALID_MODIFICATION,
"Attempted to set RtpParameters with different encoding count");
}
if (rtp_parameters.rtcp != old_rtp_parameters.rtcp) {
LOG_AND_RETURN_ERROR(
RTCErrorType::INVALID_MODIFICATION,
"Attempted to set RtpParameters with modified RTCP parameters");
}
if (rtp_parameters.header_extensions !=
old_rtp_parameters.header_extensions) {
LOG_AND_RETURN_ERROR(
RTCErrorType::INVALID_MODIFICATION,
"Attempted to set RtpParameters with modified header extensions");
}
if (!absl::c_equal(old_rtp_parameters.encodings, rtp_parameters.encodings,
[](const webrtc::RtpEncodingParameters& encoding1,
const webrtc::RtpEncodingParameters& encoding2) {
return encoding1.rid == encoding2.rid;
})) {
LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_MODIFICATION,
"Attempted to change RID values in the encodings.");
}
if (!absl::c_equal(old_rtp_parameters.encodings, rtp_parameters.encodings,
[](const webrtc::RtpEncodingParameters& encoding1,
const webrtc::RtpEncodingParameters& encoding2) {
return encoding1.ssrc == encoding2.ssrc;
})) {
LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_MODIFICATION,
"Attempted to set RtpParameters with modified SSRC");
}
return CheckRtpParametersValues(rtp_parameters, codec_preferences,
send_codec);
}
CompositeMediaEngine::CompositeMediaEngine(
std::unique_ptr<webrtc::FieldTrialsView> trials,
std::unique_ptr<VoiceEngineInterface> audio_engine,
std::unique_ptr<VideoEngineInterface> video_engine)
: trials_(std::move(trials)),
voice_engine_(std::move(audio_engine)),
video_engine_(std::move(video_engine)) {}
CompositeMediaEngine::CompositeMediaEngine(
std::unique_ptr<VoiceEngineInterface> audio_engine,
std::unique_ptr<VideoEngineInterface> video_engine)
: CompositeMediaEngine(nullptr,
std::move(audio_engine),
std::move(video_engine)) {}
CompositeMediaEngine::~CompositeMediaEngine() = default;
bool CompositeMediaEngine::Init() {
voice().Init();
return true;
}
VoiceEngineInterface& CompositeMediaEngine::voice() {
return *voice_engine_.get();
}
VideoEngineInterface& CompositeMediaEngine::video() {
return *video_engine_.get();
}
const VoiceEngineInterface& CompositeMediaEngine::voice() const {
return *voice_engine_.get();
}
const VideoEngineInterface& CompositeMediaEngine::video() const {
return *video_engine_.get();
}
} // namespace cricket

View file

@ -0,0 +1,239 @@
/*
* Copyright (c) 2004 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_BASE_MEDIA_ENGINE_H_
#define MEDIA_BASE_MEDIA_ENGINE_H_
#include <memory>
#include <string>
#include <vector>
#include "api/audio_codecs/audio_decoder_factory.h"
#include "api/audio_codecs/audio_encoder_factory.h"
#include "api/crypto/crypto_options.h"
#include "api/field_trials_view.h"
#include "api/rtp_parameters.h"
#include "api/video/video_bitrate_allocator_factory.h"
#include "call/audio_state.h"
#include "media/base/codec.h"
#include "media/base/media_channel.h"
#include "media/base/media_channel_impl.h"
#include "media/base/media_config.h"
#include "media/base/video_common.h"
#include "rtc_base/system/file_wrapper.h"
namespace webrtc {
class AudioDeviceModule;
class AudioMixer;
class AudioProcessing;
class Call;
} // namespace webrtc
namespace cricket {
// Checks that the scalability_mode value of each encoding is supported by at
// least one video codec of the list. If the list is empty, no check is done.
webrtc::RTCError CheckScalabilityModeValues(
const webrtc::RtpParameters& new_parameters,
rtc::ArrayView<cricket::Codec> codec_preferences,
absl::optional<cricket::Codec> send_codec);
// Checks the parameters have valid and supported values, and checks parameters
// with CheckScalabilityModeValues().
webrtc::RTCError CheckRtpParametersValues(
const webrtc::RtpParameters& new_parameters,
rtc::ArrayView<cricket::Codec> codec_preferences,
absl::optional<cricket::Codec> send_codec);
// Checks that the immutable values have not changed in new_parameters and
// checks all parameters with CheckRtpParametersValues().
webrtc::RTCError CheckRtpParametersInvalidModificationAndValues(
const webrtc::RtpParameters& old_parameters,
const webrtc::RtpParameters& new_parameters,
rtc::ArrayView<cricket::Codec> codec_preferences,
absl::optional<cricket::Codec> send_codec);
// Checks that the immutable values have not changed in new_parameters and
// checks parameters (except SVC) with CheckRtpParametersValues(). It should
// usually be paired with a call to CheckScalabilityModeValues().
webrtc::RTCError CheckRtpParametersInvalidModificationAndValues(
const webrtc::RtpParameters& old_parameters,
const webrtc::RtpParameters& new_parameters);
struct RtpCapabilities {
RtpCapabilities();
~RtpCapabilities();
std::vector<webrtc::RtpExtension> header_extensions;
};
class RtpHeaderExtensionQueryInterface {
public:
virtual ~RtpHeaderExtensionQueryInterface() = default;
// Returns a vector of RtpHeaderExtensionCapability, whose direction is
// kStopped if the extension is stopped (not used) by default.
virtual std::vector<webrtc::RtpHeaderExtensionCapability>
GetRtpHeaderExtensions() const = 0;
};
class VoiceEngineInterface : public RtpHeaderExtensionQueryInterface {
public:
VoiceEngineInterface() = default;
virtual ~VoiceEngineInterface() = default;
VoiceEngineInterface(const VoiceEngineInterface&) = delete;
VoiceEngineInterface& operator=(const VoiceEngineInterface&) = delete;
// Initialization
// Starts the engine.
virtual void Init() = 0;
// TODO(solenberg): Remove once VoE API refactoring is done.
virtual rtc::scoped_refptr<webrtc::AudioState> GetAudioState() const = 0;
virtual std::unique_ptr<VoiceMediaSendChannelInterface> CreateSendChannel(
webrtc::Call* call,
const MediaConfig& config,
const AudioOptions& options,
const webrtc::CryptoOptions& crypto_options,
webrtc::AudioCodecPairId codec_pair_id) {
// TODO(hta): Make pure virtual when all downstream has updated
RTC_CHECK_NOTREACHED();
return nullptr;
}
virtual std::unique_ptr<VoiceMediaReceiveChannelInterface>
CreateReceiveChannel(webrtc::Call* call,
const MediaConfig& config,
const AudioOptions& options,
const webrtc::CryptoOptions& crypto_options,
webrtc::AudioCodecPairId codec_pair_id) {
// TODO(hta): Make pure virtual when all downstream has updated
RTC_CHECK_NOTREACHED();
return nullptr;
}
virtual const std::vector<AudioCodec>& send_codecs() const = 0;
virtual const std::vector<AudioCodec>& recv_codecs() const = 0;
// Starts AEC dump using existing file, a maximum file size in bytes can be
// specified. Logging is stopped just before the size limit is exceeded.
// If max_size_bytes is set to a value <= 0, no limit will be used.
virtual bool StartAecDump(webrtc::FileWrapper file,
int64_t max_size_bytes) = 0;
// Stops recording AEC dump.
virtual void StopAecDump() = 0;
virtual absl::optional<webrtc::AudioDeviceModule::Stats>
GetAudioDeviceStats() = 0;
};
class VideoEngineInterface : public RtpHeaderExtensionQueryInterface {
public:
VideoEngineInterface() = default;
virtual ~VideoEngineInterface() = default;
VideoEngineInterface(const VideoEngineInterface&) = delete;
VideoEngineInterface& operator=(const VideoEngineInterface&) = delete;
virtual std::unique_ptr<VideoMediaSendChannelInterface> CreateSendChannel(
webrtc::Call* call,
const MediaConfig& config,
const VideoOptions& options,
const webrtc::CryptoOptions& crypto_options,
webrtc::VideoBitrateAllocatorFactory* video_bitrate_allocator_factory) {
// Default implementation, delete when all is updated
RTC_CHECK_NOTREACHED();
return nullptr;
}
virtual std::unique_ptr<VideoMediaReceiveChannelInterface>
CreateReceiveChannel(webrtc::Call* call,
const MediaConfig& config,
const VideoOptions& options,
const webrtc::CryptoOptions& crypto_options) {
// Default implementation, delete when all is updated
RTC_CHECK_NOTREACHED();
return nullptr;
}
// Retrieve list of supported codecs.
virtual std::vector<VideoCodec> send_codecs() const = 0;
virtual std::vector<VideoCodec> recv_codecs() const = 0;
// As above, but if include_rtx is false, don't include RTX codecs.
// TODO(bugs.webrtc.org/13931): Remove default implementation once
// upstream subclasses have converted.
virtual std::vector<VideoCodec> send_codecs(bool include_rtx) const {
RTC_DCHECK(include_rtx);
return send_codecs();
}
virtual std::vector<VideoCodec> recv_codecs(bool include_rtx) const {
RTC_DCHECK(include_rtx);
return recv_codecs();
}
};
// MediaEngineInterface is an abstraction of a media engine which can be
// subclassed to support different media componentry backends.
// It supports voice and video operations in the same class to facilitate
// proper synchronization between both media types.
class MediaEngineInterface {
public:
virtual ~MediaEngineInterface() {}
// Initialization. Needs to be called on the worker thread.
virtual bool Init() = 0;
virtual VoiceEngineInterface& voice() = 0;
virtual VideoEngineInterface& video() = 0;
virtual const VoiceEngineInterface& voice() const = 0;
virtual const VideoEngineInterface& video() const = 0;
};
// CompositeMediaEngine constructs a MediaEngine from separate
// voice and video engine classes.
// Optionally owns a FieldTrialsView trials map.
class CompositeMediaEngine : public MediaEngineInterface {
public:
CompositeMediaEngine(std::unique_ptr<webrtc::FieldTrialsView> trials,
std::unique_ptr<VoiceEngineInterface> audio_engine,
std::unique_ptr<VideoEngineInterface> video_engine);
CompositeMediaEngine(std::unique_ptr<VoiceEngineInterface> audio_engine,
std::unique_ptr<VideoEngineInterface> video_engine);
~CompositeMediaEngine() override;
// Always succeeds.
bool Init() override;
VoiceEngineInterface& voice() override;
VideoEngineInterface& video() override;
const VoiceEngineInterface& voice() const override;
const VideoEngineInterface& video() const override;
private:
const std::unique_ptr<webrtc::FieldTrialsView> trials_;
const std::unique_ptr<VoiceEngineInterface> voice_engine_;
const std::unique_ptr<VideoEngineInterface> video_engine_;
};
webrtc::RtpParameters CreateRtpParametersWithOneEncoding();
webrtc::RtpParameters CreateRtpParametersWithEncodings(StreamParams sp);
// Returns a vector of RTP extensions as visible from RtpSender/Receiver
// GetCapabilities(). The returned vector only shows what will definitely be
// offered by default, i.e. the list of extensions returned from
// GetRtpHeaderExtensions() that are not kStopped.
std::vector<webrtc::RtpExtension> GetDefaultEnabledRtpHeaderExtensions(
const RtpHeaderExtensionQueryInterface& query_interface);
} // namespace cricket
#endif // MEDIA_BASE_MEDIA_ENGINE_H_

View file

@ -0,0 +1,28 @@
/*
* Copyright 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/base/rid_description.h"
namespace cricket {
RidDescription::RidDescription() = default;
RidDescription::RidDescription(const std::string& rid, RidDirection direction)
: rid{rid}, direction{direction} {}
RidDescription::RidDescription(const RidDescription& other) = default;
RidDescription::~RidDescription() = default;
RidDescription& RidDescription::operator=(const RidDescription& other) =
default;
bool RidDescription::operator==(const RidDescription& other) const {
return rid == other.rid && direction == other.direction &&
payload_types == other.payload_types &&
restrictions == other.restrictions;
}
} // namespace cricket

View file

@ -0,0 +1,93 @@
/*
* Copyright 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_BASE_RID_DESCRIPTION_H_
#define MEDIA_BASE_RID_DESCRIPTION_H_
#include <map>
#include <string>
#include <vector>
namespace cricket {
enum class RidDirection { kSend, kReceive };
// Description of a Restriction Id (RID) according to:
// https://tools.ietf.org/html/draft-ietf-mmusic-rid-15
// A Restriction Identifier serves two purposes:
// 1. Uniquely identifies an RTP stream inside an RTP session.
// When combined with MIDs (https://tools.ietf.org/html/rfc5888),
// RIDs uniquely identify an RTP stream within an RTP session.
// The MID will identify the media section and the RID will identify
// the stream within the section.
// RID identifiers must be unique within the media section.
// 2. Allows indicating further restrictions to the stream.
// These restrictions are added according to the direction specified.
// The direction field identifies the direction of the RTP stream packets
// to which the restrictions apply. The direction is independent of the
// transceiver direction and can be one of {send, recv}.
// The following are some examples of these restrictions:
// a. max-width, max-height, max-fps, max-br, ...
// b. further restricting the codec set (from what m= section specified)
//
// Note: Indicating dependencies between streams (using depend) will not be
// supported, since the WG is adopting a different approach to achieve this.
// As of 2018-12-04, the new SVC (Scalable Video Coder) approach is still not
// mature enough to be implemented as part of this work.
// See: https://w3c.github.io/webrtc-svc/ for more details.
struct RidDescription final {
RidDescription();
RidDescription(const std::string& rid, RidDirection direction);
RidDescription(const RidDescription& other);
~RidDescription();
RidDescription& operator=(const RidDescription& other);
// This is currently required for unit tests of StreamParams which contains
// RidDescription objects and checks for equality using operator==.
bool operator==(const RidDescription& other) const;
bool operator!=(const RidDescription& other) const {
return !(*this == other);
}
// The RID identifier that uniquely identifies the stream within the session.
std::string rid;
// Specifies the direction for which the specified restrictions hold.
// This direction is either send or receive and is independent of the
// direction of the transceiver.
// https://tools.ietf.org/html/draft-ietf-mmusic-rid-15#section-4 :
// The "direction" field identifies the direction of the RTP Stream
// packets to which the indicated restrictions are applied. It may be
// either "send" or "recv". Note that these restriction directions are
// expressed independently of any "inactive", "sendonly", "recvonly", or
// "sendrecv" attributes associated with the media section. It is, for
// example, valid to indicate "recv" restrictions on a "sendonly"
// stream; those restrictions would apply if, at a future point in time,
// the stream were changed to "sendrecv" or "recvonly".
RidDirection direction;
// The list of codec payload types for this stream.
// It should be a subset of the payloads supported for the media section.
std::vector<int> payload_types;
// Contains key-value pairs for restrictions.
// The keys are not validated against a known set.
// The meaning to infer for the values depends on each key.
// Examples:
// 1. An entry for max-width will have a value that is interpreted as an int.
// 2. An entry for max-bpp (bits per pixel) will have a float value.
// Interpretation (and validation of value) is left for the implementation.
// I.E. the media engines should validate values for parameters they support.
std::map<std::string, std::string> restrictions;
};
} // namespace cricket
#endif // MEDIA_BASE_RID_DESCRIPTION_H_

View file

@ -0,0 +1,401 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/base/rtp_utils.h"
#include <string.h>
#include <vector>
// PacketTimeUpdateParams is defined in asyncpacketsocket.h.
// TODO(sergeyu): Find more appropriate place for PacketTimeUpdateParams.
#include "media/base/turn_utils.h"
#include "modules/rtp_rtcp/source/rtp_util.h"
#include "rtc_base/async_packet_socket.h"
#include "rtc_base/byte_order.h"
#include "rtc_base/checks.h"
#include "rtc_base/message_digest.h"
namespace cricket {
static const size_t kRtcpPayloadTypeOffset = 1;
static const size_t kRtpExtensionHeaderLen = 4;
static const size_t kAbsSendTimeExtensionLen = 3;
static const size_t kOneByteExtensionHeaderLen = 1;
static const size_t kTwoByteExtensionHeaderLen = 2;
namespace {
// Fake auth tag written by the sender when external authentication is enabled.
// HMAC in packet will be compared against this value before updating packet
// with actual HMAC value.
static const uint8_t kFakeAuthTag[10] = {0xba, 0xdd, 0xba, 0xdd, 0xba,
0xdd, 0xba, 0xdd, 0xba, 0xdd};
void UpdateAbsSendTimeExtensionValue(uint8_t* extension_data,
size_t length,
uint64_t time_us) {
// Absolute send time in RTP streams.
//
// The absolute send time is signaled to the receiver in-band using the
// general mechanism for RTP header extensions [RFC5285]. The payload
// of this extension (the transmitted value) is a 24-bit unsigned integer
// containing the sender's current time in seconds as a fixed point number
// with 18 bits fractional part.
//
// The form of the absolute send time extension block:
//
// 0 1 2 3
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// | ID | len=2 | absolute send time |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
if (length != kAbsSendTimeExtensionLen) {
RTC_DCHECK_NOTREACHED();
return;
}
// Convert microseconds to a 6.18 fixed point value in seconds.
uint32_t send_time = ((time_us << 18) / 1000000) & 0x00FFFFFF;
extension_data[0] = static_cast<uint8_t>(send_time >> 16);
extension_data[1] = static_cast<uint8_t>(send_time >> 8);
extension_data[2] = static_cast<uint8_t>(send_time);
}
// Assumes `length` is actual packet length + tag length. Updates HMAC at end of
// the RTP packet.
void UpdateRtpAuthTag(uint8_t* rtp,
size_t length,
const rtc::PacketTimeUpdateParams& packet_time_params) {
// If there is no key, return.
if (packet_time_params.srtp_auth_key.empty()) {
return;
}
size_t tag_length = packet_time_params.srtp_auth_tag_len;
// ROC (rollover counter) is at the beginning of the auth tag.
const size_t kRocLength = 4;
if (tag_length < kRocLength || tag_length > length) {
RTC_DCHECK_NOTREACHED();
return;
}
uint8_t* auth_tag = rtp + (length - tag_length);
// We should have a fake HMAC value @ auth_tag.
RTC_DCHECK_EQ(0, memcmp(auth_tag, kFakeAuthTag, tag_length));
// Copy ROC after end of rtp packet.
memcpy(auth_tag, &packet_time_params.srtp_packet_index, kRocLength);
// Authentication of a RTP packet will have RTP packet + ROC size.
size_t auth_required_length = length - tag_length + kRocLength;
uint8_t output[64];
size_t result =
rtc::ComputeHmac(rtc::DIGEST_SHA_1, &packet_time_params.srtp_auth_key[0],
packet_time_params.srtp_auth_key.size(), rtp,
auth_required_length, output, sizeof(output));
if (result < tag_length) {
RTC_DCHECK_NOTREACHED();
return;
}
// Copy HMAC from output to packet. This is required as auth tag length
// may not be equal to the actual HMAC length.
memcpy(auth_tag, output, tag_length);
}
bool GetUint8(const void* data, size_t offset, int* value) {
if (!data || !value) {
return false;
}
*value = *(static_cast<const uint8_t*>(data) + offset);
return true;
}
} // namespace
bool GetRtcpType(const void* data, size_t len, int* value) {
if (len < kMinRtcpPacketLen) {
return false;
}
return GetUint8(data, kRtcpPayloadTypeOffset, value);
}
// This method returns SSRC first of RTCP packet, except if packet is SDES.
// TODO(mallinath) - Fully implement RFC 5506. This standard doesn't restrict
// to send non-compound packets only to feedback messages.
bool GetRtcpSsrc(const void* data, size_t len, uint32_t* value) {
// Packet should be at least of 8 bytes, to get SSRC from a RTCP packet.
if (!data || len < kMinRtcpPacketLen + 4 || !value)
return false;
int pl_type;
if (!GetRtcpType(data, len, &pl_type))
return false;
// SDES packet parsing is not supported.
if (pl_type == kRtcpTypeSDES)
return false;
*value = rtc::GetBE32(static_cast<const uint8_t*>(data) + 4);
return true;
}
bool IsValidRtpPayloadType(int payload_type) {
return payload_type >= 0 && payload_type <= 127;
}
bool IsValidRtpPacketSize(RtpPacketType packet_type, size_t size) {
RTC_DCHECK_NE(RtpPacketType::kUnknown, packet_type);
size_t min_packet_length = packet_type == RtpPacketType::kRtcp
? kMinRtcpPacketLen
: kMinRtpPacketLen;
return size >= min_packet_length && size <= kMaxRtpPacketLen;
}
absl::string_view RtpPacketTypeToString(RtpPacketType packet_type) {
switch (packet_type) {
case RtpPacketType::kRtp:
return "RTP";
case RtpPacketType::kRtcp:
return "RTCP";
case RtpPacketType::kUnknown:
return "Unknown";
}
RTC_CHECK_NOTREACHED();
}
RtpPacketType InferRtpPacketType(rtc::ArrayView<const char> packet) {
if (webrtc::IsRtcpPacket(
rtc::reinterpret_array_view<const uint8_t>(packet))) {
return RtpPacketType::kRtcp;
}
if (webrtc::IsRtpPacket(rtc::reinterpret_array_view<const uint8_t>(packet))) {
return RtpPacketType::kRtp;
}
return RtpPacketType::kUnknown;
}
bool ValidateRtpHeader(const uint8_t* rtp,
size_t length,
size_t* header_length) {
if (header_length) {
*header_length = 0;
}
if (length < kMinRtpPacketLen) {
return false;
}
size_t cc_count = rtp[0] & 0x0F;
size_t header_length_without_extension = kMinRtpPacketLen + 4 * cc_count;
if (header_length_without_extension > length) {
return false;
}
// If extension bit is not set, we are done with header processing, as input
// length is verified above.
if (!(rtp[0] & 0x10)) {
if (header_length)
*header_length = header_length_without_extension;
return true;
}
rtp += header_length_without_extension;
if (header_length_without_extension + kRtpExtensionHeaderLen > length) {
return false;
}
// Getting extension profile length.
// Length is in 32 bit words.
uint16_t extension_length_in_32bits = rtc::GetBE16(rtp + 2);
size_t extension_length = extension_length_in_32bits * 4;
size_t rtp_header_length = extension_length +
header_length_without_extension +
kRtpExtensionHeaderLen;
// Verify input length against total header size.
if (rtp_header_length > length) {
return false;
}
if (header_length) {
*header_length = rtp_header_length;
}
return true;
}
// ValidateRtpHeader() must be called before this method to make sure, we have
// a sane rtp packet.
bool UpdateRtpAbsSendTimeExtension(uint8_t* rtp,
size_t length,
int extension_id,
uint64_t time_us) {
// 0 1 2 3
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// |V=2|P|X| CC |M| PT | sequence number |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// | timestamp |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// | synchronization source (SSRC) identifier |
// +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
// | contributing source (CSRC) identifiers |
// | .... |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// Return if extension bit is not set.
if (!(rtp[0] & 0x10)) {
return true;
}
size_t cc_count = rtp[0] & 0x0F;
size_t header_length_without_extension = kMinRtpPacketLen + 4 * cc_count;
rtp += header_length_without_extension;
// Getting extension profile ID and length.
uint16_t profile_id = rtc::GetBE16(rtp);
// Length is in 32 bit words.
uint16_t extension_length_in_32bits = rtc::GetBE16(rtp + 2);
size_t extension_length = extension_length_in_32bits * 4;
rtp += kRtpExtensionHeaderLen; // Moving past extension header.
constexpr uint16_t kOneByteExtensionProfileId = 0xBEDE;
constexpr uint16_t kTwoByteExtensionProfileId = 0x1000;
bool found = false;
if (profile_id == kOneByteExtensionProfileId ||
profile_id == kTwoByteExtensionProfileId) {
// OneByte extension header
// 0
// 0 1 2 3 4 5 6 7
// +-+-+-+-+-+-+-+-+
// | ID |length |
// +-+-+-+-+-+-+-+-+
// 0 1 2 3
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// | 0xBE | 0xDE | length=3 |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// | ID | L=0 | data | ID | L=1 | data...
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// ...data | 0 (pad) | 0 (pad) | ID | L=3 |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// | data |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// TwoByte extension header
// 0
// 0 1 2 3 4 5 6 7
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// | ID | length |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// 0 1 2 3
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// | 0x10 | 0x00 | length=3 |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// | ID | L=1 | data | ID |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// | L=2 | data | 0 (pad) |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// | ID | L=2 | data |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
size_t extension_header_length = profile_id == kOneByteExtensionProfileId
? kOneByteExtensionHeaderLen
: kTwoByteExtensionHeaderLen;
const uint8_t* extension_start = rtp;
const uint8_t* extension_end = extension_start + extension_length;
// rtp + 1 since the minimum size per header extension is two bytes for both
// one- and two-byte header extensions.
while (rtp + 1 < extension_end) {
// See RFC8285 Section 4.2-4.3 for more information about one- and
// two-byte header extensions.
const int id =
profile_id == kOneByteExtensionProfileId ? (*rtp & 0xF0) >> 4 : *rtp;
const size_t length = profile_id == kOneByteExtensionProfileId
? (*rtp & 0x0F) + 1
: *(rtp + 1);
if (rtp + extension_header_length + length > extension_end) {
return false;
}
if (id == extension_id) {
UpdateAbsSendTimeExtensionValue(rtp + extension_header_length, length,
time_us);
found = true;
break;
}
rtp += extension_header_length + length;
// Counting padding bytes.
while ((rtp < extension_end) && (*rtp == 0)) {
++rtp;
}
}
}
return found;
}
bool ApplyPacketOptions(uint8_t* data,
size_t length,
const rtc::PacketTimeUpdateParams& packet_time_params,
uint64_t time_us) {
RTC_DCHECK(data);
RTC_DCHECK(length);
// if there is no valid `rtp_sendtime_extension_id` and `srtp_auth_key` in
// PacketOptions, nothing to be updated in this packet.
if (packet_time_params.rtp_sendtime_extension_id == -1 &&
packet_time_params.srtp_auth_key.empty()) {
return true;
}
// If there is a srtp auth key present then the packet must be an RTP packet.
// RTP packet may have been wrapped in a TURN Channel Data or TURN send
// indication.
size_t rtp_start_pos;
size_t rtp_length;
if (!UnwrapTurnPacket(data, length, &rtp_start_pos, &rtp_length)) {
RTC_DCHECK_NOTREACHED();
return false;
}
// Making sure we have a valid RTP packet at the end.
auto packet = rtc::MakeArrayView(data + rtp_start_pos, rtp_length);
if (!webrtc::IsRtpPacket(packet) ||
!ValidateRtpHeader(data + rtp_start_pos, rtp_length, nullptr)) {
RTC_DCHECK_NOTREACHED();
return false;
}
uint8_t* start = data + rtp_start_pos;
// If packet option has non default value (-1) for sendtime extension id,
// then we should parse the rtp packet to update the timestamp. Otherwise
// just calculate HMAC and update packet with it.
if (packet_time_params.rtp_sendtime_extension_id != -1) {
UpdateRtpAbsSendTimeExtension(start, rtp_length,
packet_time_params.rtp_sendtime_extension_id,
time_us);
}
UpdateRtpAuthTag(start, rtp_length, packet_time_params);
return true;
}
} // namespace cricket

View file

@ -0,0 +1,80 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_BASE_RTP_UTILS_H_
#define MEDIA_BASE_RTP_UTILS_H_
#include "absl/strings/string_view.h"
#include "api/array_view.h"
#include "rtc_base/byte_order.h"
#include "rtc_base/system/rtc_export.h"
namespace rtc {
struct PacketTimeUpdateParams;
} // namespace rtc
namespace cricket {
const size_t kMinRtpPacketLen = 12;
const size_t kMaxRtpPacketLen = 2048;
const size_t kMinRtcpPacketLen = 4;
enum RtcpTypes {
kRtcpTypeSR = 200, // Sender report payload type.
kRtcpTypeRR = 201, // Receiver report payload type.
kRtcpTypeSDES = 202, // SDES payload type.
kRtcpTypeBye = 203, // BYE payload type.
kRtcpTypeApp = 204, // APP payload type.
kRtcpTypeRTPFB = 205, // Transport layer Feedback message payload type.
kRtcpTypePSFB = 206, // Payload-specific Feedback message payload type.
};
enum class RtpPacketType {
kRtp,
kRtcp,
kUnknown,
};
bool GetRtcpType(const void* data, size_t len, int* value);
bool GetRtcpSsrc(const void* data, size_t len, uint32_t* value);
// Checks the packet header to determine if it can be an RTP or RTCP packet.
RtpPacketType InferRtpPacketType(rtc::ArrayView<const char> packet);
// True if |payload type| is 0-127.
bool IsValidRtpPayloadType(int payload_type);
// True if `size` is appropriate for the indicated packet type.
bool IsValidRtpPacketSize(RtpPacketType packet_type, size_t size);
// Returns "RTCP", "RTP" or "Unknown" according to `packet_type`.
absl::string_view RtpPacketTypeToString(RtpPacketType packet_type);
// Verifies that a packet has a valid RTP header.
bool RTC_EXPORT ValidateRtpHeader(const uint8_t* rtp,
size_t length,
size_t* header_length);
// Helper method which updates the absolute send time extension if present.
bool UpdateRtpAbsSendTimeExtension(uint8_t* rtp,
size_t length,
int extension_id,
uint64_t time_us);
// Applies specified `options` to the packet. It updates the absolute send time
// extension header if it is present present then updates HMAC.
bool RTC_EXPORT
ApplyPacketOptions(uint8_t* data,
size_t length,
const rtc::PacketTimeUpdateParams& packet_time_params,
uint64_t time_us);
} // namespace cricket
#endif // MEDIA_BASE_RTP_UTILS_H_

View file

@ -0,0 +1,180 @@
/*
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/base/sdp_video_format_utils.h"
#include <cstring>
#include <map>
#include <utility>
#include "api/video_codecs/h264_profile_level_id.h"
#ifdef RTC_ENABLE_H265
#include "api/video_codecs/h265_profile_tier_level.h"
#endif
#include "rtc_base/checks.h"
#include "rtc_base/string_to_number.h"
namespace webrtc {
namespace {
const char kProfileLevelId[] = "profile-level-id";
const char kH264LevelAsymmetryAllowed[] = "level-asymmetry-allowed";
// Max frame rate for VP8 and VP9 video.
const char kVPxFmtpMaxFrameRate[] = "max-fr";
// Max frame size for VP8 and VP9 video.
const char kVPxFmtpMaxFrameSize[] = "max-fs";
const int kVPxFmtpFrameSizeSubBlockPixels = 256;
#ifdef RTC_ENABLE_H265
constexpr char kH265ProfileId[] = "profile-id";
constexpr char kH265TierFlag[] = "tier-flag";
constexpr char kH265LevelId[] = "level-id";
#endif
bool IsH264LevelAsymmetryAllowed(const CodecParameterMap& params) {
const auto it = params.find(kH264LevelAsymmetryAllowed);
return it != params.end() && strcmp(it->second.c_str(), "1") == 0;
}
// Compare H264 levels and handle the level 1b case.
bool H264LevelIsLess(H264Level a, H264Level b) {
if (a == H264Level::kLevel1_b)
return b != H264Level::kLevel1 && b != H264Level::kLevel1_b;
if (b == H264Level::kLevel1_b)
return a == H264Level::kLevel1;
return a < b;
}
H264Level H264LevelMin(H264Level a, H264Level b) {
return H264LevelIsLess(a, b) ? a : b;
}
absl::optional<int> ParsePositiveNumberFromParams(
const CodecParameterMap& params,
const char* parameter_name) {
const auto max_frame_rate_it = params.find(parameter_name);
if (max_frame_rate_it == params.end())
return absl::nullopt;
const absl::optional<int> i =
rtc::StringToNumber<int>(max_frame_rate_it->second);
if (!i.has_value() || i.value() <= 0)
return absl::nullopt;
return i;
}
#ifdef RTC_ENABLE_H265
// Compares two H265Level and return the smaller.
H265Level H265LevelMin(H265Level a, H265Level b) {
return a <= b ? a : b;
}
// Returns true if none of profile-id/tier-flag/level-id is specified
// explicitly in the param.
bool IsDefaultH265PTL(const CodecParameterMap& params) {
return !params.count(kH265ProfileId) && !params.count(kH265TierFlag) &&
!params.count(kH265LevelId);
}
#endif
} // namespace
#ifdef RTC_ENABLE_H265
// Set level according to https://tools.ietf.org/html/rfc7798#section-7.1
void H265GenerateProfileTierLevelForAnswer(
const CodecParameterMap& local_supported_params,
const CodecParameterMap& remote_offered_params,
CodecParameterMap* answer_params) {
// If local and remote haven't set profile-id/tier-flag/level-id, they
// are both using the default PTL In this case, don't set PTL in answer
// either.
if (IsDefaultH265PTL(local_supported_params) &&
IsDefaultH265PTL(remote_offered_params)) {
return;
}
// Parse profile-tier-level.
const absl::optional<H265ProfileTierLevel> local_profile_tier_level =
ParseSdpForH265ProfileTierLevel(local_supported_params);
const absl::optional<H265ProfileTierLevel> remote_profile_tier_level =
ParseSdpForH265ProfileTierLevel(remote_offered_params);
// Profile and tier for local and remote codec must be valid and equal.
RTC_DCHECK(local_profile_tier_level);
RTC_DCHECK(remote_profile_tier_level);
RTC_DCHECK_EQ(local_profile_tier_level->profile,
remote_profile_tier_level->profile);
RTC_DCHECK_EQ(local_profile_tier_level->tier,
remote_profile_tier_level->tier);
const H265Level answer_level = H265LevelMin(local_profile_tier_level->level,
remote_profile_tier_level->level);
// Level-id in answer is changable as long as the highest level indicated by
// the answer is not higher than that indicated by the offer. See
// https://tools.ietf.org/html/rfc7798#section-7.2.2, sub-clause 2.
(*answer_params)[kH265LevelId] = H265LevelToString(answer_level);
}
#endif
// Set level according to https://tools.ietf.org/html/rfc6184#section-8.2.2.
void H264GenerateProfileLevelIdForAnswer(
const CodecParameterMap& local_supported_params,
const CodecParameterMap& remote_offered_params,
CodecParameterMap* answer_params) {
// If both local and remote haven't set profile-level-id, they are both using
// the default profile. In this case, don't set profile-level-id in answer
// either.
if (!local_supported_params.count(kProfileLevelId) &&
!remote_offered_params.count(kProfileLevelId)) {
return;
}
// Parse profile-level-ids.
const absl::optional<H264ProfileLevelId> local_profile_level_id =
ParseSdpForH264ProfileLevelId(local_supported_params);
const absl::optional<H264ProfileLevelId> remote_profile_level_id =
ParseSdpForH264ProfileLevelId(remote_offered_params);
// The local and remote codec must have valid and equal H264 Profiles.
RTC_DCHECK(local_profile_level_id);
RTC_DCHECK(remote_profile_level_id);
RTC_DCHECK_EQ(local_profile_level_id->profile,
remote_profile_level_id->profile);
// Parse level information.
const bool level_asymmetry_allowed =
IsH264LevelAsymmetryAllowed(local_supported_params) &&
IsH264LevelAsymmetryAllowed(remote_offered_params);
const H264Level local_level = local_profile_level_id->level;
const H264Level remote_level = remote_profile_level_id->level;
const H264Level min_level = H264LevelMin(local_level, remote_level);
// Determine answer level. When level asymmetry is not allowed, level upgrade
// is not allowed, i.e., the level in the answer must be equal to or lower
// than the level in the offer.
const H264Level answer_level =
level_asymmetry_allowed ? local_level : min_level;
// Set the resulting profile-level-id in the answer parameters.
(*answer_params)[kProfileLevelId] = *H264ProfileLevelIdToString(
H264ProfileLevelId(local_profile_level_id->profile, answer_level));
}
absl::optional<int> ParseSdpForVPxMaxFrameRate(
const CodecParameterMap& params) {
return ParsePositiveNumberFromParams(params, kVPxFmtpMaxFrameRate);
}
absl::optional<int> ParseSdpForVPxMaxFrameSize(
const CodecParameterMap& params) {
const absl::optional<int> i =
ParsePositiveNumberFromParams(params, kVPxFmtpMaxFrameSize);
return i ? absl::make_optional(i.value() * kVPxFmtpFrameSizeSubBlockPixels)
: absl::nullopt;
}
} // namespace webrtc

View file

@ -0,0 +1,62 @@
/*
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_BASE_SDP_VIDEO_FORMAT_UTILS_H_
#define MEDIA_BASE_SDP_VIDEO_FORMAT_UTILS_H_
#include "absl/types/optional.h"
#include "api/video_codecs/sdp_video_format.h"
namespace webrtc {
// Generate codec parameters that will be used as answer in an SDP negotiation
// based on local supported parameters and remote offered parameters. Both
// `local_supported_params`, `remote_offered_params`, and `answer_params`
// represent sendrecv media descriptions, i.e they are a mix of both encode and
// decode capabilities. In theory, when the profile in `local_supported_params`
// represent a strict superset of the profile in `remote_offered_params`, we
// could limit the profile in `answer_params` to the profile in
// `remote_offered_params`. However, to simplify the code, each supported H264
// profile should be listed explicitly in the list of local supported codecs,
// even if they are redundant. Then each local codec in the list should be
// tested one at a time against the remote codec, and only when the profiles are
// equal should this function be called. Therefore, this function does not need
// to handle profile intersection, and the profile of `local_supported_params`
// and `remote_offered_params` must be equal before calling this function. The
// parameters that are used when negotiating are the level part of
// profile-level-id and level-asymmetry-allowed.
void H264GenerateProfileLevelIdForAnswer(
const CodecParameterMap& local_supported_params,
const CodecParameterMap& remote_offered_params,
CodecParameterMap* answer_params);
#ifdef RTC_ENABLE_H265
// Works similarly as H264GenerateProfileLevelIdForAnswer, but generates codec
// parameters that will be used as answer for H.265.
// Media configuration parameters, except level-id, must be used symmetrically.
// For level-id, the highest level indicated by the answer must not be higher
// than that indicated by the offer.
void H265GenerateProfileTierLevelForAnswer(
const CodecParameterMap& local_supported_params,
const CodecParameterMap& remote_offered_params,
CodecParameterMap* answer_params);
#endif
// Parse max frame rate from SDP FMTP line. absl::nullopt is returned if the
// field is missing or not a number.
absl::optional<int> ParseSdpForVPxMaxFrameRate(const CodecParameterMap& params);
// Parse max frame size from SDP FMTP line. absl::nullopt is returned if the
// field is missing or not a number. Please note that the value is stored in sub
// blocks but the returned value is in total number of pixels.
absl::optional<int> ParseSdpForVPxMaxFrameSize(const CodecParameterMap& params);
} // namespace webrtc
#endif // MEDIA_BASE_SDP_VIDEO_FORMAT_UTILS_H_

View file

@ -0,0 +1,240 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/base/stream_params.h"
#include <stdint.h>
#include <list>
#include "absl/algorithm/container.h"
#include "api/array_view.h"
#include "rtc_base/strings/string_builder.h"
namespace cricket {
namespace {
void AppendSsrcs(rtc::ArrayView<const uint32_t> ssrcs,
rtc::SimpleStringBuilder* sb) {
*sb << "ssrcs:[";
const char* delimiter = "";
for (uint32_t ssrc : ssrcs) {
*sb << delimiter << ssrc;
delimiter = ",";
}
*sb << "]";
}
void AppendSsrcGroups(rtc::ArrayView<const SsrcGroup> ssrc_groups,
rtc::SimpleStringBuilder* sb) {
*sb << "ssrc_groups:";
const char* delimiter = "";
for (const SsrcGroup& ssrc_group : ssrc_groups) {
*sb << delimiter << ssrc_group.ToString();
delimiter = ",";
}
}
void AppendStreamIds(rtc::ArrayView<const std::string> stream_ids,
rtc::SimpleStringBuilder* sb) {
*sb << "stream_ids:";
const char* delimiter = "";
for (const std::string& stream_id : stream_ids) {
*sb << delimiter << stream_id;
delimiter = ",";
}
}
void AppendRids(rtc::ArrayView<const RidDescription> rids,
rtc::SimpleStringBuilder* sb) {
*sb << "rids:[";
const char* delimiter = "";
for (const RidDescription& rid : rids) {
*sb << delimiter << rid.rid;
delimiter = ",";
}
*sb << "]";
}
} // namespace
const char kFecSsrcGroupSemantics[] = "FEC";
const char kFecFrSsrcGroupSemantics[] = "FEC-FR";
const char kFidSsrcGroupSemantics[] = "FID";
const char kSimSsrcGroupSemantics[] = "SIM";
bool GetStream(const StreamParamsVec& streams,
const StreamSelector& selector,
StreamParams* stream_out) {
const StreamParams* found = GetStream(streams, selector);
if (found && stream_out)
*stream_out = *found;
return found != nullptr;
}
SsrcGroup::SsrcGroup(const std::string& usage,
const std::vector<uint32_t>& ssrcs)
: semantics(usage), ssrcs(ssrcs) {}
SsrcGroup::SsrcGroup(const SsrcGroup&) = default;
SsrcGroup::SsrcGroup(SsrcGroup&&) = default;
SsrcGroup::~SsrcGroup() = default;
SsrcGroup& SsrcGroup::operator=(const SsrcGroup&) = default;
SsrcGroup& SsrcGroup::operator=(SsrcGroup&&) = default;
bool SsrcGroup::has_semantics(const std::string& semantics_in) const {
return (semantics == semantics_in && ssrcs.size() > 0);
}
std::string SsrcGroup::ToString() const {
char buf[1024];
rtc::SimpleStringBuilder sb(buf);
sb << "{";
sb << "semantics:" << semantics << ";";
AppendSsrcs(ssrcs, &sb);
sb << "}";
return sb.str();
}
StreamParams::StreamParams() = default;
StreamParams::StreamParams(const StreamParams&) = default;
StreamParams::StreamParams(StreamParams&&) = default;
StreamParams::~StreamParams() = default;
StreamParams& StreamParams::operator=(const StreamParams&) = default;
StreamParams& StreamParams::operator=(StreamParams&&) = default;
bool StreamParams::operator==(const StreamParams& other) const {
return (id == other.id && ssrcs == other.ssrcs &&
ssrc_groups == other.ssrc_groups && cname == other.cname &&
stream_ids_ == other.stream_ids_ &&
// RIDs are not required to be in the same order for equality.
absl::c_is_permutation(rids_, other.rids_));
}
std::string StreamParams::ToString() const {
char buf[2 * 1024];
rtc::SimpleStringBuilder sb(buf);
sb << "{";
if (!id.empty()) {
sb << "id:" << id << ";";
}
AppendSsrcs(ssrcs, &sb);
sb << ";";
AppendSsrcGroups(ssrc_groups, &sb);
sb << ";";
if (!cname.empty()) {
sb << "cname:" << cname << ";";
}
AppendStreamIds(stream_ids_, &sb);
sb << ";";
if (!rids_.empty()) {
AppendRids(rids_, &sb);
sb << ";";
}
sb << "}";
return sb.str();
}
void StreamParams::GenerateSsrcs(int num_layers,
bool generate_fid,
bool generate_fec_fr,
rtc::UniqueRandomIdGenerator* ssrc_generator) {
RTC_DCHECK_GE(num_layers, 0);
RTC_DCHECK(ssrc_generator);
std::vector<uint32_t> primary_ssrcs;
for (int i = 0; i < num_layers; ++i) {
uint32_t ssrc = ssrc_generator->GenerateId();
primary_ssrcs.push_back(ssrc);
add_ssrc(ssrc);
}
if (num_layers > 1) {
SsrcGroup simulcast(kSimSsrcGroupSemantics, primary_ssrcs);
ssrc_groups.push_back(simulcast);
}
if (generate_fid) {
for (uint32_t ssrc : primary_ssrcs) {
AddFidSsrc(ssrc, ssrc_generator->GenerateId());
}
}
if (generate_fec_fr) {
for (uint32_t ssrc : primary_ssrcs) {
AddFecFrSsrc(ssrc, ssrc_generator->GenerateId());
}
}
}
void StreamParams::GetPrimarySsrcs(std::vector<uint32_t>* ssrcs) const {
const SsrcGroup* sim_group = get_ssrc_group(kSimSsrcGroupSemantics);
if (sim_group == NULL) {
ssrcs->push_back(first_ssrc());
} else {
ssrcs->insert(ssrcs->end(), sim_group->ssrcs.begin(),
sim_group->ssrcs.end());
}
}
void StreamParams::GetSecondarySsrcs(
const std::string& semantics,
const std::vector<uint32_t>& primary_ssrcs,
std::vector<uint32_t>* secondary_ssrcs) const {
for (uint32_t primary_ssrc : primary_ssrcs) {
uint32_t secondary_ssrc;
if (GetSecondarySsrc(semantics, primary_ssrc, &secondary_ssrc)) {
secondary_ssrcs->push_back(secondary_ssrc);
}
}
}
void StreamParams::GetFidSsrcs(const std::vector<uint32_t>& primary_ssrcs,
std::vector<uint32_t>* fid_ssrcs) const {
return GetSecondarySsrcs(kFidSsrcGroupSemantics, primary_ssrcs, fid_ssrcs);
}
bool StreamParams::AddSecondarySsrc(const std::string& semantics,
uint32_t primary_ssrc,
uint32_t secondary_ssrc) {
if (!has_ssrc(primary_ssrc)) {
return false;
}
ssrcs.push_back(secondary_ssrc);
ssrc_groups.push_back(SsrcGroup(semantics, {primary_ssrc, secondary_ssrc}));
return true;
}
bool StreamParams::GetSecondarySsrc(const std::string& semantics,
uint32_t primary_ssrc,
uint32_t* secondary_ssrc) const {
for (const SsrcGroup& ssrc_group : ssrc_groups) {
if (ssrc_group.has_semantics(semantics) && ssrc_group.ssrcs.size() >= 2 &&
ssrc_group.ssrcs[0] == primary_ssrc) {
*secondary_ssrc = ssrc_group.ssrcs[1];
return true;
}
}
return false;
}
std::vector<std::string> StreamParams::stream_ids() const {
return stream_ids_;
}
void StreamParams::set_stream_ids(const std::vector<std::string>& stream_ids) {
stream_ids_ = stream_ids;
}
std::string StreamParams::first_stream_id() const {
return stream_ids_.empty() ? "" : stream_ids_[0];
}
} // namespace cricket

View file

@ -0,0 +1,321 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
// This file contains structures for describing SSRCs from a media source such
// as a MediaStreamTrack when it is sent across an RTP session. Multiple media
// sources may be sent across the same RTP session, each of them will be
// described by one StreamParams object
// SsrcGroup is used to describe the relationship between the SSRCs that
// are used for this media source.
// E.x: Consider a source that is sent as 3 simulcast streams
// Let the simulcast elements have SSRC 10, 20, 30.
// Let each simulcast element use FEC and let the protection packets have
// SSRC 11,21,31.
// To describe this 4 SsrcGroups are needed,
// StreamParams would then contain ssrc = {10,11,20,21,30,31} and
// ssrc_groups = {{SIM,{10,20,30}, {FEC,{10,11}, {FEC, {20,21}, {FEC {30,31}}}
// Please see RFC 5576.
// A spec-compliant way to achieve this is to use RIDs and Simulcast attribute
// instead of the ssrc-group. In this method, the StreamParam object will
// have multiple RidDescriptions, each corresponding to a simulcast layer
// and the media section will have a simulcast attribute that indicates
// that these layers are for the same source. This also removes the extra
// lines for redundancy streams, as the same RIDs appear in the redundancy
// packets.
// Note: in the spec compliant simulcast scenario, some of the RIDs might be
// alternatives for one another (such as different encodings for same data).
// In the context of the StreamParams class, the notion of alternatives does
// not exist and all the RIDs will describe different layers of the same source.
// When the StreamParams class is used to configure the media engine, simulcast
// considerations will be used to remove the alternative layers outside of this
// class.
// As an example, let the simulcast layers have RID 10, 20, 30.
// StreamParams would contain rid = { 10, 20, 30 }.
// MediaSection would contain SimulcastDescription specifying these rids.
// a=simulcast:send 10;20;30 (or a=simulcast:send 10,20;30 or similar).
// See https://tools.ietf.org/html/draft-ietf-mmusic-sdp-simulcast-13
// and https://tools.ietf.org/html/draft-ietf-mmusic-rid-15.
#ifndef MEDIA_BASE_STREAM_PARAMS_H_
#define MEDIA_BASE_STREAM_PARAMS_H_
#include <stddef.h>
#include <cstdint>
#include <string>
#include <vector>
#include "absl/algorithm/container.h"
#include "media/base/rid_description.h"
#include "rtc_base/unique_id_generator.h"
namespace cricket {
extern const char kFecSsrcGroupSemantics[];
extern const char kFecFrSsrcGroupSemantics[];
extern const char kFidSsrcGroupSemantics[];
extern const char kSimSsrcGroupSemantics[];
struct SsrcGroup {
SsrcGroup(const std::string& usage, const std::vector<uint32_t>& ssrcs);
SsrcGroup(const SsrcGroup&);
SsrcGroup(SsrcGroup&&);
~SsrcGroup();
SsrcGroup& operator=(const SsrcGroup&);
SsrcGroup& operator=(SsrcGroup&&);
bool operator==(const SsrcGroup& other) const {
return (semantics == other.semantics && ssrcs == other.ssrcs);
}
bool operator!=(const SsrcGroup& other) const { return !(*this == other); }
bool has_semantics(const std::string& semantics) const;
std::string ToString() const;
std::string semantics; // e.g FIX, FEC, SIM.
std::vector<uint32_t> ssrcs; // SSRCs of this type.
};
// StreamParams is used to represent a sender/track in a SessionDescription.
// In Plan B, this means that multiple StreamParams can exist within one
// MediaContentDescription, while in UnifiedPlan this means that there is one
// StreamParams per MediaContentDescription.
struct StreamParams {
StreamParams();
StreamParams(const StreamParams&);
StreamParams(StreamParams&&);
~StreamParams();
StreamParams& operator=(const StreamParams&);
StreamParams& operator=(StreamParams&&);
static StreamParams CreateLegacy(uint32_t ssrc) {
StreamParams stream;
stream.ssrcs.push_back(ssrc);
return stream;
}
bool operator==(const StreamParams& other) const;
bool operator!=(const StreamParams& other) const { return !(*this == other); }
uint32_t first_ssrc() const {
if (ssrcs.empty()) {
return 0;
}
return ssrcs[0];
}
bool has_ssrcs() const { return !ssrcs.empty(); }
bool has_ssrc(uint32_t ssrc) const {
return absl::c_linear_search(ssrcs, ssrc);
}
void add_ssrc(uint32_t ssrc) { ssrcs.push_back(ssrc); }
bool has_ssrc_groups() const { return !ssrc_groups.empty(); }
bool has_ssrc_group(const std::string& semantics) const {
return (get_ssrc_group(semantics) != NULL);
}
const SsrcGroup* get_ssrc_group(const std::string& semantics) const {
for (const SsrcGroup& ssrc_group : ssrc_groups) {
if (ssrc_group.has_semantics(semantics)) {
return &ssrc_group;
}
}
return NULL;
}
// Convenience function to add an FID ssrc for a primary_ssrc
// that's already been added.
bool AddFidSsrc(uint32_t primary_ssrc, uint32_t fid_ssrc) {
return AddSecondarySsrc(kFidSsrcGroupSemantics, primary_ssrc, fid_ssrc);
}
// Convenience function to lookup the FID ssrc for a primary_ssrc.
// Returns false if primary_ssrc not found or FID not defined for it.
bool GetFidSsrc(uint32_t primary_ssrc, uint32_t* fid_ssrc) const {
return GetSecondarySsrc(kFidSsrcGroupSemantics, primary_ssrc, fid_ssrc);
}
// Convenience function to add an FEC-FR ssrc for a primary_ssrc
// that's already been added.
bool AddFecFrSsrc(uint32_t primary_ssrc, uint32_t fecfr_ssrc) {
return AddSecondarySsrc(kFecFrSsrcGroupSemantics, primary_ssrc, fecfr_ssrc);
}
// Convenience function to lookup the FEC-FR ssrc for a primary_ssrc.
// Returns false if primary_ssrc not found or FEC-FR not defined for it.
bool GetFecFrSsrc(uint32_t primary_ssrc, uint32_t* fecfr_ssrc) const {
return GetSecondarySsrc(kFecFrSsrcGroupSemantics, primary_ssrc, fecfr_ssrc);
}
// Convenience function to populate the StreamParams with the requested number
// of SSRCs along with accompanying FID and FEC-FR ssrcs if requested.
// SSRCs are generated using the given generator.
void GenerateSsrcs(int num_layers,
bool generate_fid,
bool generate_fec_fr,
rtc::UniqueRandomIdGenerator* ssrc_generator);
// Convenience to get all the SIM SSRCs if there are SIM ssrcs, or
// the first SSRC otherwise.
void GetPrimarySsrcs(std::vector<uint32_t>* ssrcs) const;
// Convenience to get all the secondary SSRCs for the given primary ssrcs
// of a particular semantic.
// If a given primary SSRC does not have a secondary SSRC, the list of
// secondary SSRCS will be smaller than the list of primary SSRCs.
void GetSecondarySsrcs(const std::string& semantic,
const std::vector<uint32_t>& primary_ssrcs,
std::vector<uint32_t>* fid_ssrcs) const;
// Convenience to get all the FID SSRCs for the given primary ssrcs.
// If a given primary SSRC does not have a FID SSRC, the list of FID
// SSRCS will be smaller than the list of primary SSRCs.
void GetFidSsrcs(const std::vector<uint32_t>& primary_ssrcs,
std::vector<uint32_t>* fid_ssrcs) const;
// Stream ids serialized to SDP.
std::vector<std::string> stream_ids() const;
void set_stream_ids(const std::vector<std::string>& stream_ids);
// Returns the first stream id or "" if none exist. This method exists only
// as temporary backwards compatibility with the old sync_label.
std::string first_stream_id() const;
std::string ToString() const;
// A unique identifier of the StreamParams object. When the SDP is created,
// this comes from the track ID of the sender that the StreamParams object
// is associated with.
std::string id;
// There may be no SSRCs stored in unsignaled case when stream_ids are
// signaled with a=msid lines.
std::vector<uint32_t> ssrcs; // All SSRCs for this source
std::vector<SsrcGroup> ssrc_groups; // e.g. FID, FEC, SIM
std::string cname; // RTCP CNAME
// RID functionality according to
// https://tools.ietf.org/html/draft-ietf-mmusic-rid-15
// Each layer can be represented by a RID identifier and can also have
// restrictions (such as max-width, max-height, etc.)
// If the track has multiple layers (ex. Simulcast), each layer will be
// represented by a RID.
bool has_rids() const { return !rids_.empty(); }
const std::vector<RidDescription>& rids() const { return rids_; }
void set_rids(const std::vector<RidDescription>& rids) { rids_ = rids; }
private:
bool AddSecondarySsrc(const std::string& semantics,
uint32_t primary_ssrc,
uint32_t secondary_ssrc);
bool GetSecondarySsrc(const std::string& semantics,
uint32_t primary_ssrc,
uint32_t* secondary_ssrc) const;
// The stream IDs of the sender that the StreamParams object is associated
// with. In Plan B this should always be size of 1, while in Unified Plan this
// could be none or multiple stream IDs.
std::vector<std::string> stream_ids_;
std::vector<RidDescription> rids_;
};
// A Stream can be selected by either id or ssrc.
struct StreamSelector {
explicit StreamSelector(uint32_t ssrc) : ssrc(ssrc) {}
explicit StreamSelector(const std::string& streamid)
: ssrc(0), streamid(streamid) {}
bool Matches(const StreamParams& stream) const {
if (ssrc == 0) {
return stream.id == streamid;
} else {
return stream.has_ssrc(ssrc);
}
}
uint32_t ssrc;
std::string streamid;
};
typedef std::vector<StreamParams> StreamParamsVec;
template <class Condition>
const StreamParams* GetStream(const StreamParamsVec& streams,
Condition condition) {
auto found = absl::c_find_if(streams, condition);
return found == streams.end() ? nullptr : &(*found);
}
template <class Condition>
StreamParams* GetStream(StreamParamsVec& streams, Condition condition) {
auto found = absl::c_find_if(streams, condition);
return found == streams.end() ? nullptr : &(*found);
}
inline bool HasStreamWithNoSsrcs(const StreamParamsVec& streams) {
return GetStream(streams,
[](const StreamParams& sp) { return !sp.has_ssrcs(); });
}
inline const StreamParams* GetStreamBySsrc(const StreamParamsVec& streams,
uint32_t ssrc) {
return GetStream(
streams, [&ssrc](const StreamParams& sp) { return sp.has_ssrc(ssrc); });
}
inline const StreamParams* GetStreamByIds(const StreamParamsVec& streams,
const std::string& id) {
return GetStream(streams,
[&id](const StreamParams& sp) { return sp.id == id; });
}
inline StreamParams* GetStreamByIds(StreamParamsVec& streams,
const std::string& id) {
return GetStream(streams,
[&id](const StreamParams& sp) { return sp.id == id; });
}
inline const StreamParams* GetStream(const StreamParamsVec& streams,
const StreamSelector& selector) {
return GetStream(streams, [&selector](const StreamParams& sp) {
return selector.Matches(sp);
});
}
template <class Condition>
bool RemoveStream(StreamParamsVec* streams, Condition condition) {
auto iter(std::remove_if(streams->begin(), streams->end(), condition));
if (iter == streams->end())
return false;
streams->erase(iter, streams->end());
return true;
}
// Removes the stream from streams. Returns true if a stream is
// found and removed.
inline bool RemoveStream(StreamParamsVec* streams,
const StreamSelector& selector) {
return RemoveStream(streams, [&selector](const StreamParams& sp) {
return selector.Matches(sp);
});
}
inline bool RemoveStreamBySsrc(StreamParamsVec* streams, uint32_t ssrc) {
return RemoveStream(
streams, [&ssrc](const StreamParams& sp) { return sp.has_ssrc(ssrc); });
}
inline bool RemoveStreamByIds(StreamParamsVec* streams, const std::string& id) {
return RemoveStream(streams,
[&id](const StreamParams& sp) { return sp.id == id; });
}
} // namespace cricket
#endif // MEDIA_BASE_STREAM_PARAMS_H_

View file

@ -0,0 +1,55 @@
/*
* Copyright (c) 2004 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/base/test_utils.h"
#include <cstdint>
#include "api/video/video_frame.h"
#include "api/video/video_source_interface.h"
namespace cricket {
cricket::StreamParams CreateSimStreamParams(
const std::string& cname,
const std::vector<uint32_t>& ssrcs) {
cricket::StreamParams sp;
cricket::SsrcGroup sg(cricket::kSimSsrcGroupSemantics, ssrcs);
sp.ssrcs = ssrcs;
sp.ssrc_groups.push_back(sg);
sp.cname = cname;
return sp;
}
// There should be an rtx_ssrc per ssrc.
cricket::StreamParams CreateSimWithRtxStreamParams(
const std::string& cname,
const std::vector<uint32_t>& ssrcs,
const std::vector<uint32_t>& rtx_ssrcs) {
cricket::StreamParams sp = CreateSimStreamParams(cname, ssrcs);
for (size_t i = 0; i < ssrcs.size(); ++i) {
sp.AddFidSsrc(ssrcs[i], rtx_ssrcs[i]);
}
return sp;
}
// There should be one fec ssrc per ssrc.
cricket::StreamParams CreatePrimaryWithFecFrStreamParams(
const std::string& cname,
uint32_t primary_ssrc,
uint32_t flexfec_ssrc) {
cricket::StreamParams sp;
sp.ssrcs = {primary_ssrc};
sp.cname = cname;
sp.AddFecFrSsrc(primary_ssrc, flexfec_ssrc);
return sp;
}
} // namespace cricket

View file

@ -0,0 +1,56 @@
/*
* Copyright (c) 2004 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_BASE_TEST_UTILS_H_
#define MEDIA_BASE_TEST_UTILS_H_
#include <string>
#include <vector>
#include "media/base/media_channel.h"
#include "media/base/video_common.h"
#include "rtc_base/arraysize.h"
namespace webrtc {
class VideoFrame;
}
namespace cricket {
// Returns size of 420 image with rounding on chroma for odd sizes.
#define I420_SIZE(w, h) (w * h + (((w + 1) / 2) * ((h + 1) / 2)) * 2)
// Returns size of ARGB image.
#define ARGB_SIZE(w, h) (w * h * 4)
template <class T>
inline std::vector<T> MakeVector(const T a[], size_t s) {
return std::vector<T>(a, a + s);
}
#define MAKE_VECTOR(a) cricket::MakeVector(a, arraysize(a))
// Create Simulcast StreamParams with given `ssrcs` and `cname`.
cricket::StreamParams CreateSimStreamParams(const std::string& cname,
const std::vector<uint32_t>& ssrcs);
// Create Simulcast stream with given `ssrcs` and `rtx_ssrcs`.
// The number of `rtx_ssrcs` must match number of `ssrcs`.
cricket::StreamParams CreateSimWithRtxStreamParams(
const std::string& cname,
const std::vector<uint32_t>& ssrcs,
const std::vector<uint32_t>& rtx_ssrcs);
// Create StreamParams with single primary SSRC and corresponding FlexFEC SSRC.
cricket::StreamParams CreatePrimaryWithFecFrStreamParams(
const std::string& cname,
uint32_t primary_ssrc,
uint32_t flexfec_ssrc);
} // namespace cricket
#endif // MEDIA_BASE_TEST_UTILS_H_

View file

@ -0,0 +1,126 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/base/turn_utils.h"
#include "api/transport/stun.h"
#include "rtc_base/byte_order.h"
namespace cricket {
namespace {
const size_t kTurnChannelHeaderLength = 4;
bool IsTurnChannelData(const uint8_t* data, size_t length) {
return length >= kTurnChannelHeaderLength && ((*data & 0xC0) == 0x40);
}
bool IsTurnSendIndicationPacket(const uint8_t* data, size_t length) {
if (length < kStunHeaderSize) {
return false;
}
uint16_t type = rtc::GetBE16(data);
return (type == TURN_SEND_INDICATION);
}
} // namespace
bool UnwrapTurnPacket(const uint8_t* packet,
size_t packet_size,
size_t* content_position,
size_t* content_size) {
if (IsTurnChannelData(packet, packet_size)) {
// Turn Channel Message header format.
// 0 1 2 3
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// | Channel Number | Length |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// | |
// / Application Data /
// / /
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
size_t length = rtc::GetBE16(&packet[2]);
if (length + kTurnChannelHeaderLength > packet_size) {
return false;
}
*content_position = kTurnChannelHeaderLength;
*content_size = length;
return true;
}
if (IsTurnSendIndicationPacket(packet, packet_size)) {
// Validate STUN message length.
const size_t stun_message_length = rtc::GetBE16(&packet[2]);
if (stun_message_length + kStunHeaderSize != packet_size) {
return false;
}
// First skip mandatory stun header which is of 20 bytes.
size_t pos = kStunHeaderSize;
// Loop through STUN attributes until we find STUN DATA attribute.
while (pos < packet_size) {
// Keep reading STUN attributes until we hit DATA attribute.
// Attribute will be a TLV structure.
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// | Type | Length |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// | Value (variable) ....
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// The value in the length field MUST contain the length of the Value
// part of the attribute, prior to padding, measured in bytes. Since
// STUN aligns attributes on 32-bit boundaries, attributes whose content
// is not a multiple of 4 bytes are padded with 1, 2, or 3 bytes of
// padding so that its value contains a multiple of 4 bytes. The
// padding bits are ignored, and may be any value.
uint16_t attr_type, attr_length;
const int kAttrHeaderLength = sizeof(attr_type) + sizeof(attr_length);
if (packet_size < pos + kAttrHeaderLength) {
return false;
}
// Getting attribute type and length.
attr_type = rtc::GetBE16(&packet[pos]);
attr_length = rtc::GetBE16(&packet[pos + sizeof(attr_type)]);
pos += kAttrHeaderLength; // Skip STUN_DATA_ATTR header.
// Checking for bogus attribute length.
if (pos + attr_length > packet_size) {
return false;
}
if (attr_type == STUN_ATTR_DATA) {
*content_position = pos;
*content_size = attr_length;
return true;
}
pos += attr_length;
if ((attr_length % 4) != 0) {
pos += (4 - (attr_length % 4));
}
}
// There is no data attribute present in the message.
return false;
}
// This is not a TURN packet.
*content_position = 0;
*content_size = packet_size;
return true;
}
} // namespace cricket

View file

@ -0,0 +1,30 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_BASE_TURN_UTILS_H_
#define MEDIA_BASE_TURN_UTILS_H_
#include <cstddef>
#include <cstdint>
#include "rtc_base/system/rtc_export.h"
namespace cricket {
// Finds data location within a TURN Channel Message or TURN Send Indication
// message.
bool RTC_EXPORT UnwrapTurnPacket(const uint8_t* packet,
size_t packet_size,
size_t* content_position,
size_t* content_size);
} // namespace cricket
#endif // MEDIA_BASE_TURN_UTILS_H_

View file

@ -0,0 +1,470 @@
/*
* Copyright (c) 2010 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/base/video_adapter.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <cstdlib>
#include <limits>
#include <utility>
#include "absl/types/optional.h"
#include "media/base/video_common.h"
#include "rtc_base/checks.h"
#include "rtc_base/logging.h"
#include "rtc_base/strings/string_builder.h"
#include "rtc_base/time_utils.h"
#include "system_wrappers/include/field_trial.h"
namespace {
struct Fraction {
int numerator;
int denominator;
void DivideByGcd() {
int g = cricket::GreatestCommonDivisor(numerator, denominator);
numerator /= g;
denominator /= g;
}
// Determines number of output pixels if both width and height of an input of
// `input_pixels` pixels is scaled with the fraction numerator / denominator.
int scale_pixel_count(int input_pixels) {
return (numerator * numerator * static_cast<int64_t>(input_pixels)) /
(denominator * denominator);
}
};
// Round `value_to_round` to a multiple of `multiple`. Prefer rounding upwards,
// but never more than `max_value`.
int roundUp(int value_to_round, int multiple, int max_value) {
const int rounded_value =
(value_to_round + multiple - 1) / multiple * multiple;
return rounded_value <= max_value ? rounded_value
: (max_value / multiple * multiple);
}
// Generates a scale factor that makes `input_pixels` close to `target_pixels`,
// but no higher than `max_pixels`.
Fraction FindScale(int input_width,
int input_height,
int target_pixels,
int max_pixels,
bool variable_start_scale_factor) {
// This function only makes sense for a positive target.
RTC_DCHECK_GT(target_pixels, 0);
RTC_DCHECK_GT(max_pixels, 0);
RTC_DCHECK_GE(max_pixels, target_pixels);
const int input_pixels = input_width * input_height;
// Don't scale up original.
if (target_pixels >= input_pixels)
return Fraction{1, 1};
Fraction current_scale = Fraction{1, 1};
Fraction best_scale = Fraction{1, 1};
if (variable_start_scale_factor) {
// Start scaling down by 2/3 depending on `input_width` and `input_height`.
if (input_width % 3 == 0 && input_height % 3 == 0) {
// 2/3 (then alternates 3/4, 2/3, 3/4,...).
current_scale = Fraction{6, 6};
}
if (input_width % 9 == 0 && input_height % 9 == 0) {
// 2/3, 2/3 (then alternates 3/4, 2/3, 3/4,...).
current_scale = Fraction{36, 36};
}
}
// The minimum (absolute) difference between the number of output pixels and
// the target pixel count.
int min_pixel_diff = std::numeric_limits<int>::max();
if (input_pixels <= max_pixels) {
// Start condition for 1/1 case, if it is less than max.
min_pixel_diff = std::abs(input_pixels - target_pixels);
}
// Alternately scale down by 3/4 and 2/3. This results in fractions which are
// effectively scalable. For instance, starting at 1280x720 will result in
// the series (3/4) => 960x540, (1/2) => 640x360, (3/8) => 480x270,
// (1/4) => 320x180, (3/16) => 240x125, (1/8) => 160x90.
while (current_scale.scale_pixel_count(input_pixels) > target_pixels) {
if (current_scale.numerator % 3 == 0 &&
current_scale.denominator % 2 == 0) {
// Multiply by 2/3.
current_scale.numerator /= 3;
current_scale.denominator /= 2;
} else {
// Multiply by 3/4.
current_scale.numerator *= 3;
current_scale.denominator *= 4;
}
int output_pixels = current_scale.scale_pixel_count(input_pixels);
if (output_pixels <= max_pixels) {
int diff = std::abs(target_pixels - output_pixels);
if (diff < min_pixel_diff) {
min_pixel_diff = diff;
best_scale = current_scale;
}
}
}
best_scale.DivideByGcd();
return best_scale;
}
absl::optional<std::pair<int, int>> Swap(
const absl::optional<std::pair<int, int>>& in) {
if (!in) {
return absl::nullopt;
}
return std::make_pair(in->second, in->first);
}
} // namespace
namespace cricket {
VideoAdapter::VideoAdapter(int source_resolution_alignment)
: frames_in_(0),
frames_out_(0),
frames_scaled_(0),
adaption_changes_(0),
previous_width_(0),
previous_height_(0),
variable_start_scale_factor_(!webrtc::field_trial::IsDisabled(
"WebRTC-Video-VariableStartScaleFactor")),
source_resolution_alignment_(source_resolution_alignment),
resolution_alignment_(source_resolution_alignment),
resolution_request_target_pixel_count_(std::numeric_limits<int>::max()),
resolution_request_max_pixel_count_(std::numeric_limits<int>::max()),
max_framerate_request_(std::numeric_limits<int>::max()) {}
VideoAdapter::VideoAdapter() : VideoAdapter(1) {}
VideoAdapter::~VideoAdapter() {}
bool VideoAdapter::DropFrame(int64_t in_timestamp_ns) {
int max_fps = max_framerate_request_;
if (output_format_request_.max_fps)
max_fps = std::min(max_fps, *output_format_request_.max_fps);
framerate_controller_.SetMaxFramerate(max_fps);
return framerate_controller_.ShouldDropFrame(in_timestamp_ns);
}
bool VideoAdapter::AdaptFrameResolution(int in_width,
int in_height,
int64_t in_timestamp_ns,
int* cropped_width,
int* cropped_height,
int* out_width,
int* out_height) {
webrtc::MutexLock lock(&mutex_);
++frames_in_;
// The max output pixel count is the minimum of the requests from
// OnOutputFormatRequest and OnResolutionFramerateRequest.
int max_pixel_count = resolution_request_max_pixel_count_;
// Select target aspect ratio and max pixel count depending on input frame
// orientation.
absl::optional<std::pair<int, int>> target_aspect_ratio;
if (in_width > in_height) {
target_aspect_ratio = output_format_request_.target_landscape_aspect_ratio;
if (output_format_request_.max_landscape_pixel_count)
max_pixel_count = std::min(
max_pixel_count, *output_format_request_.max_landscape_pixel_count);
} else {
target_aspect_ratio = output_format_request_.target_portrait_aspect_ratio;
if (output_format_request_.max_portrait_pixel_count)
max_pixel_count = std::min(
max_pixel_count, *output_format_request_.max_portrait_pixel_count);
}
int target_pixel_count =
std::min(resolution_request_target_pixel_count_, max_pixel_count);
// Drop the input frame if necessary.
if (max_pixel_count <= 0 || DropFrame(in_timestamp_ns)) {
// Show VAdapt log every 90 frames dropped. (3 seconds)
if ((frames_in_ - frames_out_) % 90 == 0) {
// TODO(fbarchard): Reduce to LS_VERBOSE when adapter info is not needed
// in default calls.
RTC_LOG(LS_INFO) << "VAdapt Drop Frame: scaled " << frames_scaled_
<< " / out " << frames_out_ << " / in " << frames_in_
<< " Changes: " << adaption_changes_
<< " Input: " << in_width << "x" << in_height
<< " timestamp: " << in_timestamp_ns
<< " Output fps: " << max_framerate_request_ << "/"
<< output_format_request_.max_fps.value_or(-1)
<< " alignment: " << resolution_alignment_;
}
// Drop frame.
return false;
}
// Calculate how the input should be cropped.
if (!target_aspect_ratio || target_aspect_ratio->first <= 0 ||
target_aspect_ratio->second <= 0) {
*cropped_width = in_width;
*cropped_height = in_height;
} else {
const float requested_aspect =
target_aspect_ratio->first /
static_cast<float>(target_aspect_ratio->second);
*cropped_width =
std::min(in_width, static_cast<int>(in_height * requested_aspect));
*cropped_height =
std::min(in_height, static_cast<int>(in_width / requested_aspect));
}
const Fraction scale =
FindScale(*cropped_width, *cropped_height, target_pixel_count,
max_pixel_count, variable_start_scale_factor_);
// Adjust cropping slightly to get correctly aligned output size and a perfect
// scale factor.
*cropped_width = roundUp(*cropped_width,
scale.denominator * resolution_alignment_, in_width);
*cropped_height = roundUp(
*cropped_height, scale.denominator * resolution_alignment_, in_height);
RTC_DCHECK_EQ(0, *cropped_width % scale.denominator);
RTC_DCHECK_EQ(0, *cropped_height % scale.denominator);
// Calculate final output size.
*out_width = *cropped_width / scale.denominator * scale.numerator;
*out_height = *cropped_height / scale.denominator * scale.numerator;
RTC_DCHECK_EQ(0, *out_width % resolution_alignment_);
RTC_DCHECK_EQ(0, *out_height % resolution_alignment_);
++frames_out_;
if (scale.numerator != scale.denominator)
++frames_scaled_;
if (previous_width_ &&
(previous_width_ != *out_width || previous_height_ != *out_height)) {
++adaption_changes_;
RTC_LOG(LS_INFO) << "Frame size changed: scaled " << frames_scaled_
<< " / out " << frames_out_ << " / in " << frames_in_
<< " Changes: " << adaption_changes_
<< " Input: " << in_width << "x" << in_height
<< " Scale: " << scale.numerator << "/"
<< scale.denominator << " Output: " << *out_width << "x"
<< *out_height << " fps: " << max_framerate_request_ << "/"
<< output_format_request_.max_fps.value_or(-1)
<< " alignment: " << resolution_alignment_;
}
previous_width_ = *out_width;
previous_height_ = *out_height;
return true;
}
void VideoAdapter::OnOutputFormatRequest(
const absl::optional<VideoFormat>& format) {
absl::optional<std::pair<int, int>> target_aspect_ratio;
absl::optional<int> max_pixel_count;
absl::optional<int> max_fps;
if (format) {
target_aspect_ratio = std::make_pair(format->width, format->height);
max_pixel_count = format->width * format->height;
if (format->interval > 0)
max_fps = rtc::kNumNanosecsPerSec / format->interval;
}
OnOutputFormatRequest(target_aspect_ratio, max_pixel_count, max_fps);
}
void VideoAdapter::OnOutputFormatRequest(
const absl::optional<std::pair<int, int>>& target_aspect_ratio,
const absl::optional<int>& max_pixel_count,
const absl::optional<int>& max_fps) {
absl::optional<std::pair<int, int>> target_landscape_aspect_ratio;
absl::optional<std::pair<int, int>> target_portrait_aspect_ratio;
if (target_aspect_ratio && target_aspect_ratio->first > 0 &&
target_aspect_ratio->second > 0) {
// Maintain input orientation.
const int max_side =
std::max(target_aspect_ratio->first, target_aspect_ratio->second);
const int min_side =
std::min(target_aspect_ratio->first, target_aspect_ratio->second);
target_landscape_aspect_ratio = std::make_pair(max_side, min_side);
target_portrait_aspect_ratio = std::make_pair(min_side, max_side);
}
OnOutputFormatRequest(target_landscape_aspect_ratio, max_pixel_count,
target_portrait_aspect_ratio, max_pixel_count, max_fps);
}
void VideoAdapter::OnOutputFormatRequest(
const absl::optional<std::pair<int, int>>& target_landscape_aspect_ratio,
const absl::optional<int>& max_landscape_pixel_count,
const absl::optional<std::pair<int, int>>& target_portrait_aspect_ratio,
const absl::optional<int>& max_portrait_pixel_count,
const absl::optional<int>& max_fps) {
webrtc::MutexLock lock(&mutex_);
OutputFormatRequest request = {
.target_landscape_aspect_ratio = target_landscape_aspect_ratio,
.max_landscape_pixel_count = max_landscape_pixel_count,
.target_portrait_aspect_ratio = target_portrait_aspect_ratio,
.max_portrait_pixel_count = max_portrait_pixel_count,
.max_fps = max_fps};
if (stashed_output_format_request_) {
// Save the output format request for later use in case the encoder making
// this call would become active, because currently all active encoders use
// requested_resolution instead.
stashed_output_format_request_ = request;
RTC_LOG(LS_INFO) << "Stashing OnOutputFormatRequest: "
<< stashed_output_format_request_->ToString();
} else {
output_format_request_ = request;
RTC_LOG(LS_INFO) << "Setting output_format_request_: "
<< output_format_request_.ToString();
}
framerate_controller_.Reset();
}
void VideoAdapter::OnSinkWants(const rtc::VideoSinkWants& sink_wants) {
webrtc::MutexLock lock(&mutex_);
resolution_request_max_pixel_count_ = sink_wants.max_pixel_count;
resolution_request_target_pixel_count_ =
sink_wants.target_pixel_count.value_or(
resolution_request_max_pixel_count_);
max_framerate_request_ = sink_wants.max_framerate_fps;
resolution_alignment_ = cricket::LeastCommonMultiple(
source_resolution_alignment_, sink_wants.resolution_alignment);
if (!sink_wants.aggregates) {
RTC_LOG(LS_WARNING)
<< "These should always be created by VideoBroadcaster!";
return;
}
// If requested_resolution is used, and there are no active encoders
// that are NOT using requested_resolution (aka newapi), then override
// calls to OnOutputFormatRequest and use values from requested_resolution
// instead (combined with qualityscaling based on pixel counts above).
if (webrtc::field_trial::IsDisabled(
"WebRTC-Video-RequestedResolutionOverrideOutputFormatRequest")) {
// kill-switch...
return;
}
if (!sink_wants.requested_resolution) {
if (stashed_output_format_request_) {
// because current active_output_format_request is based on
// requested_resolution logic, while current encoder(s) doesn't want that,
// we have to restore the stashed request.
RTC_LOG(LS_INFO) << "Unstashing OnOutputFormatRequest: "
<< stashed_output_format_request_->ToString();
output_format_request_ = *stashed_output_format_request_;
stashed_output_format_request_.reset();
}
return;
}
if (sink_wants.aggregates->any_active_without_requested_resolution) {
return;
}
if (!stashed_output_format_request_) {
// The active output format request is about to be rewritten by
// request_resolution. We need to save it for later use in case the encoder
// which doesn't use request_resolution logic become active in the future.
stashed_output_format_request_ = output_format_request_;
RTC_LOG(LS_INFO) << "Stashing OnOutputFormatRequest: "
<< stashed_output_format_request_->ToString();
}
auto res = *sink_wants.requested_resolution;
auto pixel_count = res.width * res.height;
output_format_request_.target_landscape_aspect_ratio =
std::make_pair(res.width, res.height);
output_format_request_.max_landscape_pixel_count = pixel_count;
output_format_request_.target_portrait_aspect_ratio =
std::make_pair(res.height, res.width);
output_format_request_.max_portrait_pixel_count = pixel_count;
output_format_request_.max_fps = max_framerate_request_;
RTC_LOG(LS_INFO) << "Setting output_format_request_ based on sink_wants: "
<< output_format_request_.ToString();
}
int VideoAdapter::GetTargetPixels() const {
webrtc::MutexLock lock(&mutex_);
return resolution_request_target_pixel_count_;
}
float VideoAdapter::GetMaxFramerate() const {
webrtc::MutexLock lock(&mutex_);
// Minimum of `output_format_request_.max_fps` and `max_framerate_request_` is
// used to throttle frame-rate.
int framerate =
std::min(max_framerate_request_,
output_format_request_.max_fps.value_or(max_framerate_request_));
if (framerate == std::numeric_limits<int>::max()) {
return std::numeric_limits<float>::infinity();
} else {
return max_framerate_request_;
}
}
std::string VideoAdapter::OutputFormatRequest::ToString() const {
rtc::StringBuilder oss;
oss << "[ ";
if (target_landscape_aspect_ratio == Swap(target_portrait_aspect_ratio) &&
max_landscape_pixel_count == max_portrait_pixel_count) {
if (target_landscape_aspect_ratio) {
oss << target_landscape_aspect_ratio->first << "x"
<< target_landscape_aspect_ratio->second;
} else {
oss << "unset-resolution";
}
if (max_landscape_pixel_count) {
oss << " max_pixel_count: " << *max_landscape_pixel_count;
}
} else {
oss << "[ landscape: ";
if (target_landscape_aspect_ratio) {
oss << target_landscape_aspect_ratio->first << "x"
<< target_landscape_aspect_ratio->second;
} else {
oss << "unset";
}
if (max_landscape_pixel_count) {
oss << " max_pixel_count: " << *max_landscape_pixel_count;
}
oss << " ] [ portrait: ";
if (target_portrait_aspect_ratio) {
oss << target_portrait_aspect_ratio->first << "x"
<< target_portrait_aspect_ratio->second;
}
if (max_portrait_pixel_count) {
oss << " max_pixel_count: " << *max_portrait_pixel_count;
}
oss << " ]";
}
oss << " max_fps: ";
if (max_fps) {
oss << *max_fps;
} else {
oss << "unset";
}
oss << " ]";
return oss.Release();
}
} // namespace cricket

View file

@ -0,0 +1,172 @@
/*
* Copyright (c) 2010 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_BASE_VIDEO_ADAPTER_H_
#define MEDIA_BASE_VIDEO_ADAPTER_H_
#include <stdint.h>
#include <string>
#include <utility>
#include "absl/types/optional.h"
#include "api/video/video_source_interface.h"
#include "common_video/framerate_controller.h"
#include "media/base/video_common.h"
#include "rtc_base/synchronization/mutex.h"
#include "rtc_base/system/rtc_export.h"
#include "rtc_base/thread_annotations.h"
namespace cricket {
// VideoAdapter adapts an input video frame to an output frame based on the
// specified input and output formats. The adaptation includes dropping frames
// to reduce frame rate and scaling frames.
// VideoAdapter is thread safe.
class RTC_EXPORT VideoAdapter {
public:
VideoAdapter();
// The source requests output frames whose width and height are divisible
// by `source_resolution_alignment`.
explicit VideoAdapter(int source_resolution_alignment);
virtual ~VideoAdapter();
VideoAdapter(const VideoAdapter&) = delete;
VideoAdapter& operator=(const VideoAdapter&) = delete;
// Return the adapted resolution and cropping parameters given the
// input resolution. The input frame should first be cropped, then
// scaled to the final output resolution. Returns true if the frame
// should be adapted, and false if it should be dropped.
bool AdaptFrameResolution(int in_width,
int in_height,
int64_t in_timestamp_ns,
int* cropped_width,
int* cropped_height,
int* out_width,
int* out_height) RTC_LOCKS_EXCLUDED(mutex_);
// DEPRECATED. Please use OnOutputFormatRequest below.
// TODO(asapersson): Remove this once it is no longer used.
// Requests the output frame size and frame interval from
// `AdaptFrameResolution` to not be larger than `format`. Also, the input
// frame size will be cropped to match the requested aspect ratio. The
// requested aspect ratio is orientation agnostic and will be adjusted to
// maintain the input orientation, so it doesn't matter if e.g. 1280x720 or
// 720x1280 is requested.
// Note: Should be called from the source only.
void OnOutputFormatRequest(const absl::optional<VideoFormat>& format)
RTC_LOCKS_EXCLUDED(mutex_);
// Requests output frame size and frame interval from `AdaptFrameResolution`.
// `target_aspect_ratio`: The input frame size will be cropped to match the
// requested aspect ratio. The aspect ratio is orientation agnostic and will
// be adjusted to maintain the input orientation (i.e. it doesn't matter if
// e.g. <1280,720> or <720,1280> is requested).
// `max_pixel_count`: The maximum output frame size.
// `max_fps`: The maximum output framerate.
// Note: Should be called from the source only.
void OnOutputFormatRequest(
const absl::optional<std::pair<int, int>>& target_aspect_ratio,
const absl::optional<int>& max_pixel_count,
const absl::optional<int>& max_fps) RTC_LOCKS_EXCLUDED(mutex_);
// Same as above, but allows setting two different target aspect ratios
// depending on incoming frame orientation. This gives more fine-grained
// control and can e.g. be used to force landscape video to be cropped to
// portrait video.
void OnOutputFormatRequest(
const absl::optional<std::pair<int, int>>& target_landscape_aspect_ratio,
const absl::optional<int>& max_landscape_pixel_count,
const absl::optional<std::pair<int, int>>& target_portrait_aspect_ratio,
const absl::optional<int>& max_portrait_pixel_count,
const absl::optional<int>& max_fps) RTC_LOCKS_EXCLUDED(mutex_);
// Requests the output frame size from `AdaptFrameResolution` to have as close
// as possible to `sink_wants.target_pixel_count` pixels (if set)
// but no more than `sink_wants.max_pixel_count`.
// `sink_wants.max_framerate_fps` is essentially analogous to
// `sink_wants.max_pixel_count`, but for framerate rather than resolution.
// Set `sink_wants.max_pixel_count` and/or `sink_wants.max_framerate_fps` to
// std::numeric_limit<int>::max() if no upper limit is desired.
// The sink resolution alignment requirement is given by
// `sink_wants.resolution_alignment`.
// Note: Should be called from the sink only.
void OnSinkWants(const rtc::VideoSinkWants& sink_wants)
RTC_LOCKS_EXCLUDED(mutex_);
// Returns maximum image area, which shouldn't impose any adaptations.
// Can return `numeric_limits<int>::max()` if no limit is set.
int GetTargetPixels() const;
// Returns current frame-rate limit.
// Can return `numeric_limits<float>::infinity()` if no limit is set.
float GetMaxFramerate() const;
private:
// Determine if frame should be dropped based on input fps and requested fps.
bool DropFrame(int64_t in_timestamp_ns) RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
int frames_in_ RTC_GUARDED_BY(mutex_); // Number of input frames.
int frames_out_ RTC_GUARDED_BY(mutex_); // Number of output frames.
int frames_scaled_ RTC_GUARDED_BY(mutex_); // Number of frames scaled.
int adaption_changes_
RTC_GUARDED_BY(mutex_); // Number of changes in scale factor.
int previous_width_ RTC_GUARDED_BY(mutex_); // Previous adapter output width.
int previous_height_
RTC_GUARDED_BY(mutex_); // Previous adapter output height.
const bool variable_start_scale_factor_;
// The fixed source resolution alignment requirement.
const int source_resolution_alignment_;
// The currently applied resolution alignment, as given by the requirements:
// - the fixed `source_resolution_alignment_`; and
// - the latest `sink_wants.resolution_alignment`.
int resolution_alignment_ RTC_GUARDED_BY(mutex_);
// Max number of pixels/fps requested via calls to OnOutputFormatRequest,
// OnResolutionFramerateRequest respectively.
// The adapted output format is the minimum of these.
struct OutputFormatRequest {
absl::optional<std::pair<int, int>> target_landscape_aspect_ratio;
absl::optional<int> max_landscape_pixel_count;
absl::optional<std::pair<int, int>> target_portrait_aspect_ratio;
absl::optional<int> max_portrait_pixel_count;
absl::optional<int> max_fps;
// For logging.
std::string ToString() const;
};
OutputFormatRequest output_format_request_ RTC_GUARDED_BY(mutex_);
int resolution_request_target_pixel_count_ RTC_GUARDED_BY(mutex_);
int resolution_request_max_pixel_count_ RTC_GUARDED_BY(mutex_);
int max_framerate_request_ RTC_GUARDED_BY(mutex_);
// Stashed OutputFormatRequest that is used to save value of
// OnOutputFormatRequest in case all active encoders are using
// requested_resolution. I.e when all active encoders are using
// requested_resolution, the call to OnOutputFormatRequest is ignored
// and the value from requested_resolution is used instead (to scale/crop
// frame). This allows for an application to only use
// RtpEncodingParameters::request_resolution and get the same behavior as if
// it had used VideoAdapter::OnOutputFormatRequest.
absl::optional<OutputFormatRequest> stashed_output_format_request_
RTC_GUARDED_BY(mutex_);
webrtc::FramerateController framerate_controller_ RTC_GUARDED_BY(mutex_);
// The critical section to protect the above variables.
mutable webrtc::Mutex mutex_;
};
} // namespace cricket
#endif // MEDIA_BASE_VIDEO_ADAPTER_H_

View file

@ -0,0 +1,214 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/base/video_broadcaster.h"
#include <algorithm>
#include <vector>
#include "absl/types/optional.h"
#include "api/video/i420_buffer.h"
#include "api/video/video_rotation.h"
#include "media/base/video_common.h"
#include "rtc_base/checks.h"
#include "rtc_base/logging.h"
namespace rtc {
VideoBroadcaster::VideoBroadcaster() = default;
VideoBroadcaster::~VideoBroadcaster() = default;
void VideoBroadcaster::AddOrUpdateSink(
VideoSinkInterface<webrtc::VideoFrame>* sink,
const VideoSinkWants& wants) {
RTC_DCHECK(sink != nullptr);
webrtc::MutexLock lock(&sinks_and_wants_lock_);
if (!FindSinkPair(sink)) {
// `Sink` is a new sink, which didn't receive previous frame.
previous_frame_sent_to_all_sinks_ = false;
if (last_constraints_.has_value()) {
RTC_LOG(LS_INFO) << __func__ << " forwarding stored constraints min_fps "
<< last_constraints_->min_fps.value_or(-1) << " max_fps "
<< last_constraints_->max_fps.value_or(-1);
sink->OnConstraintsChanged(*last_constraints_);
}
}
VideoSourceBase::AddOrUpdateSink(sink, wants);
UpdateWants();
}
void VideoBroadcaster::RemoveSink(
VideoSinkInterface<webrtc::VideoFrame>* sink) {
RTC_DCHECK(sink != nullptr);
webrtc::MutexLock lock(&sinks_and_wants_lock_);
VideoSourceBase::RemoveSink(sink);
UpdateWants();
}
bool VideoBroadcaster::frame_wanted() const {
webrtc::MutexLock lock(&sinks_and_wants_lock_);
return !sink_pairs().empty();
}
VideoSinkWants VideoBroadcaster::wants() const {
webrtc::MutexLock lock(&sinks_and_wants_lock_);
return current_wants_;
}
void VideoBroadcaster::OnFrame(const webrtc::VideoFrame& frame) {
webrtc::MutexLock lock(&sinks_and_wants_lock_);
bool current_frame_was_discarded = false;
for (auto& sink_pair : sink_pairs()) {
if (sink_pair.wants.rotation_applied &&
frame.rotation() != webrtc::kVideoRotation_0) {
// Calls to OnFrame are not synchronized with changes to the sink wants.
// When rotation_applied is set to true, one or a few frames may get here
// with rotation still pending. Protect sinks that don't expect any
// pending rotation.
RTC_LOG(LS_VERBOSE) << "Discarding frame with unexpected rotation.";
sink_pair.sink->OnDiscardedFrame();
current_frame_was_discarded = true;
continue;
}
if (sink_pair.wants.black_frames) {
webrtc::VideoFrame black_frame =
webrtc::VideoFrame::Builder()
.set_video_frame_buffer(
GetBlackFrameBuffer(frame.width(), frame.height()))
.set_rotation(frame.rotation())
.set_timestamp_us(frame.timestamp_us())
.set_id(frame.id())
.build();
sink_pair.sink->OnFrame(black_frame);
} else if (!previous_frame_sent_to_all_sinks_ && frame.has_update_rect()) {
// Since last frame was not sent to some sinks, no reliable update
// information is available, so we need to clear the update rect.
webrtc::VideoFrame copy = frame;
copy.clear_update_rect();
sink_pair.sink->OnFrame(copy);
} else {
sink_pair.sink->OnFrame(frame);
}
}
previous_frame_sent_to_all_sinks_ = !current_frame_was_discarded;
}
void VideoBroadcaster::OnDiscardedFrame() {
webrtc::MutexLock lock(&sinks_and_wants_lock_);
for (auto& sink_pair : sink_pairs()) {
sink_pair.sink->OnDiscardedFrame();
}
}
void VideoBroadcaster::ProcessConstraints(
const webrtc::VideoTrackSourceConstraints& constraints) {
webrtc::MutexLock lock(&sinks_and_wants_lock_);
RTC_LOG(LS_INFO) << __func__ << " min_fps "
<< constraints.min_fps.value_or(-1) << " max_fps "
<< constraints.max_fps.value_or(-1) << " broadcasting to "
<< sink_pairs().size() << " sinks.";
last_constraints_ = constraints;
for (auto& sink_pair : sink_pairs())
sink_pair.sink->OnConstraintsChanged(constraints);
}
void VideoBroadcaster::UpdateWants() {
VideoSinkWants wants;
wants.rotation_applied = false;
wants.resolution_alignment = 1;
wants.aggregates.emplace(VideoSinkWants::Aggregates());
wants.is_active = false;
// TODO(webrtc:14451) : I think it makes sense to always
// "ignore" encoders that are not active. But that would
// probably require a controlled roll out with a field trials?
// To play it safe, only ignore inactive encoders is there is an
// active encoder using the new api (requested_resolution),
// this means that there is only a behavioural change when using new
// api.
bool ignore_inactive_encoders_old_api = false;
for (auto& sink : sink_pairs()) {
if (sink.wants.is_active && sink.wants.requested_resolution.has_value()) {
ignore_inactive_encoders_old_api = true;
break;
}
}
for (auto& sink : sink_pairs()) {
if (!sink.wants.is_active &&
(sink.wants.requested_resolution || ignore_inactive_encoders_old_api)) {
continue;
}
// wants.rotation_applied == ANY(sink.wants.rotation_applied)
if (sink.wants.rotation_applied) {
wants.rotation_applied = true;
}
// wants.max_pixel_count == MIN(sink.wants.max_pixel_count)
if (sink.wants.max_pixel_count < wants.max_pixel_count) {
wants.max_pixel_count = sink.wants.max_pixel_count;
}
// Select the minimum requested target_pixel_count, if any, of all sinks so
// that we don't over utilize the resources for any one.
// TODO(sprang): Consider using the median instead, since the limit can be
// expressed by max_pixel_count.
if (sink.wants.target_pixel_count &&
(!wants.target_pixel_count ||
(*sink.wants.target_pixel_count < *wants.target_pixel_count))) {
wants.target_pixel_count = sink.wants.target_pixel_count;
}
// Select the minimum for the requested max framerates.
if (sink.wants.max_framerate_fps < wants.max_framerate_fps) {
wants.max_framerate_fps = sink.wants.max_framerate_fps;
}
wants.resolution_alignment = cricket::LeastCommonMultiple(
wants.resolution_alignment, sink.wants.resolution_alignment);
// Pick MAX(requested_resolution) since the actual can be downscaled
// in encoder instead.
if (sink.wants.requested_resolution) {
if (!wants.requested_resolution) {
wants.requested_resolution = sink.wants.requested_resolution;
} else {
wants.requested_resolution->width =
std::max(wants.requested_resolution->width,
sink.wants.requested_resolution->width);
wants.requested_resolution->height =
std::max(wants.requested_resolution->height,
sink.wants.requested_resolution->height);
}
} else if (sink.wants.is_active) {
wants.aggregates->any_active_without_requested_resolution = true;
}
wants.is_active |= sink.wants.is_active;
}
if (wants.target_pixel_count &&
*wants.target_pixel_count >= wants.max_pixel_count) {
wants.target_pixel_count.emplace(wants.max_pixel_count);
}
current_wants_ = wants;
}
const rtc::scoped_refptr<webrtc::VideoFrameBuffer>&
VideoBroadcaster::GetBlackFrameBuffer(int width, int height) {
if (!black_frame_buffer_ || black_frame_buffer_->width() != width ||
black_frame_buffer_->height() != height) {
rtc::scoped_refptr<webrtc::I420Buffer> buffer =
webrtc::I420Buffer::Create(width, height);
webrtc::I420Buffer::SetBlack(buffer.get());
black_frame_buffer_ = buffer;
}
return black_frame_buffer_;
}
} // namespace rtc

View file

@ -0,0 +1,82 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_BASE_VIDEO_BROADCASTER_H_
#define MEDIA_BASE_VIDEO_BROADCASTER_H_
#include "api/media_stream_interface.h"
#include "api/scoped_refptr.h"
#include "api/sequence_checker.h"
#include "api/video/video_frame_buffer.h"
#include "api/video/video_source_interface.h"
#include "media/base/video_source_base.h"
#include "rtc_base/synchronization/mutex.h"
#include "rtc_base/thread_annotations.h"
namespace rtc {
// VideoBroadcaster broadcast video frames to sinks and combines VideoSinkWants
// from its sinks. It does that by implementing rtc::VideoSourceInterface and
// rtc::VideoSinkInterface. The class is threadsafe; methods may be called on
// any thread. This is needed because VideoStreamEncoder calls AddOrUpdateSink
// both on the worker thread and on the encoder task queue.
class VideoBroadcaster : public VideoSourceBase,
public VideoSinkInterface<webrtc::VideoFrame> {
public:
VideoBroadcaster();
~VideoBroadcaster() override;
// Adds a new, or updates an already existing sink. If the sink is new and
// ProcessConstraints has been called previously, the new sink's
// OnConstraintsCalled method will be invoked with the most recent
// constraints.
void AddOrUpdateSink(VideoSinkInterface<webrtc::VideoFrame>* sink,
const VideoSinkWants& wants) override;
void RemoveSink(VideoSinkInterface<webrtc::VideoFrame>* sink) override;
// Returns true if the next frame will be delivered to at least one sink.
bool frame_wanted() const;
// Returns VideoSinkWants a source is requested to fulfill. They are
// aggregated by all VideoSinkWants from all sinks.
VideoSinkWants wants() const;
// This method ensures that if a sink sets rotation_applied == true,
// it will never receive a frame with pending rotation. Our caller
// may pass in frames without precise synchronization with changes
// to the VideoSinkWants.
void OnFrame(const webrtc::VideoFrame& frame) override;
void OnDiscardedFrame() override;
// Called on the network thread when constraints change. Forwards the
// constraints to sinks added with AddOrUpdateSink via OnConstraintsChanged.
void ProcessConstraints(
const webrtc::VideoTrackSourceConstraints& constraints);
protected:
void UpdateWants() RTC_EXCLUSIVE_LOCKS_REQUIRED(sinks_and_wants_lock_);
const rtc::scoped_refptr<webrtc::VideoFrameBuffer>& GetBlackFrameBuffer(
int width,
int height) RTC_EXCLUSIVE_LOCKS_REQUIRED(sinks_and_wants_lock_);
mutable webrtc::Mutex sinks_and_wants_lock_;
VideoSinkWants current_wants_ RTC_GUARDED_BY(sinks_and_wants_lock_);
rtc::scoped_refptr<webrtc::VideoFrameBuffer> black_frame_buffer_;
bool previous_frame_sent_to_all_sinks_ RTC_GUARDED_BY(sinks_and_wants_lock_) =
true;
absl::optional<webrtc::VideoTrackSourceConstraints> last_constraints_
RTC_GUARDED_BY(sinks_and_wants_lock_);
};
} // namespace rtc
#endif // MEDIA_BASE_VIDEO_BROADCASTER_H_

View file

@ -0,0 +1,97 @@
/*
* Copyright (c) 2010 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/base/video_common.h"
#include "api/array_view.h"
#include "rtc_base/arraysize.h"
#include "rtc_base/checks.h"
#include "rtc_base/strings/string_builder.h"
namespace cricket {
struct FourCCAliasEntry {
uint32_t alias;
uint32_t canonical;
};
static const FourCCAliasEntry kFourCCAliases[] = {
{FOURCC_IYUV, FOURCC_I420},
{FOURCC_YU16, FOURCC_I422},
{FOURCC_YU24, FOURCC_I444},
{FOURCC_YUYV, FOURCC_YUY2},
{FOURCC_YUVS, FOURCC_YUY2},
{FOURCC_HDYC, FOURCC_UYVY},
{FOURCC_2VUY, FOURCC_UYVY},
{FOURCC_JPEG, FOURCC_MJPG}, // Note: JPEG has DHT while MJPG does not.
{FOURCC_DMB1, FOURCC_MJPG},
{FOURCC_BA81, FOURCC_BGGR},
{FOURCC_RGB3, FOURCC_RAW},
{FOURCC_BGR3, FOURCC_24BG},
{FOURCC_CM32, FOURCC_BGRA},
{FOURCC_CM24, FOURCC_RAW},
};
uint32_t CanonicalFourCC(uint32_t fourcc) {
for (uint32_t i = 0; i < arraysize(kFourCCAliases); ++i) {
if (kFourCCAliases[i].alias == fourcc) {
return kFourCCAliases[i].canonical;
}
}
// Not an alias, so return it as-is.
return fourcc;
}
// The C++ standard requires a namespace-scope definition of static const
// integral types even when they are initialized in the declaration (see
// [class.static.data]/4), but MSVC with /Ze is non-conforming and treats that
// as a multiply defined symbol error. See Also:
// http://msdn.microsoft.com/en-us/library/34h23df8.aspx
#ifndef _MSC_EXTENSIONS
const int64_t VideoFormat::kMinimumInterval; // Initialized in header.
#endif
std::string VideoFormat::ToString() const {
std::string fourcc_name = GetFourccName(fourcc) + " ";
for (std::string::const_iterator i = fourcc_name.begin();
i < fourcc_name.end(); ++i) {
// Test character is printable; Avoid isprint() which asserts on negatives.
if (*i < 32 || *i >= 127) {
fourcc_name = "";
break;
}
}
char buf[256];
rtc::SimpleStringBuilder sb(buf);
sb << fourcc_name << width << "x" << height << "x"
<< IntervalToFpsFloat(interval);
return sb.str();
}
int GreatestCommonDivisor(int a, int b) {
RTC_DCHECK_GE(a, 0);
RTC_DCHECK_GT(b, 0);
int c = a % b;
while (c != 0) {
a = b;
b = c;
c = a % b;
}
return b;
}
int LeastCommonMultiple(int a, int b) {
RTC_DCHECK_GT(a, 0);
RTC_DCHECK_GT(b, 0);
return a * (b / GreatestCommonDivisor(a, b));
}
} // namespace cricket

View file

@ -0,0 +1,224 @@
/*
* Copyright (c) 2004 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
// Common definition for video, including fourcc and VideoFormat.
#ifndef MEDIA_BASE_VIDEO_COMMON_H_
#define MEDIA_BASE_VIDEO_COMMON_H_
#include <stdint.h>
#include <string>
#include "rtc_base/system/rtc_export.h"
#include "rtc_base/time_utils.h"
namespace cricket {
//////////////////////////////////////////////////////////////////////////////
// Definition of FourCC codes
//////////////////////////////////////////////////////////////////////////////
// Convert four characters to a FourCC code.
// Needs to be a macro otherwise the OS X compiler complains when the kFormat*
// constants are used in a switch.
#define CRICKET_FOURCC(a, b, c, d) \
((static_cast<uint32_t>(a)) | (static_cast<uint32_t>(b) << 8) | \
(static_cast<uint32_t>(c) << 16) | (static_cast<uint32_t>(d) << 24))
// Some pages discussing FourCC codes:
// http://www.fourcc.org/yuv.php
// http://v4l2spec.bytesex.org/spec/book1.htm
// http://developer.apple.com/quicktime/icefloe/dispatch020.html
// http://msdn.microsoft.com/library/windows/desktop/dd206750.aspx#nv12
// http://people.xiph.org/~xiphmont/containers/nut/nut4cc.txt
// FourCC codes grouped according to implementation efficiency.
// Primary formats should convert in 1 efficient step.
// Secondary formats are converted in 2 steps.
// Auxilliary formats call primary converters.
enum FourCC {
// 9 Primary YUV formats: 5 planar, 2 biplanar, 2 packed.
FOURCC_I420 = CRICKET_FOURCC('I', '4', '2', '0'),
FOURCC_I422 = CRICKET_FOURCC('I', '4', '2', '2'),
FOURCC_I444 = CRICKET_FOURCC('I', '4', '4', '4'),
FOURCC_I411 = CRICKET_FOURCC('I', '4', '1', '1'),
FOURCC_I400 = CRICKET_FOURCC('I', '4', '0', '0'),
FOURCC_NV21 = CRICKET_FOURCC('N', 'V', '2', '1'),
FOURCC_NV12 = CRICKET_FOURCC('N', 'V', '1', '2'),
FOURCC_YUY2 = CRICKET_FOURCC('Y', 'U', 'Y', '2'),
FOURCC_UYVY = CRICKET_FOURCC('U', 'Y', 'V', 'Y'),
// 2 Secondary YUV formats: row biplanar.
FOURCC_M420 = CRICKET_FOURCC('M', '4', '2', '0'),
// 9 Primary RGB formats: 4 32 bpp, 2 24 bpp, 3 16 bpp.
FOURCC_ARGB = CRICKET_FOURCC('A', 'R', 'G', 'B'),
FOURCC_BGRA = CRICKET_FOURCC('B', 'G', 'R', 'A'),
FOURCC_ABGR = CRICKET_FOURCC('A', 'B', 'G', 'R'),
FOURCC_24BG = CRICKET_FOURCC('2', '4', 'B', 'G'),
FOURCC_RAW = CRICKET_FOURCC('r', 'a', 'w', ' '),
FOURCC_RGBA = CRICKET_FOURCC('R', 'G', 'B', 'A'),
FOURCC_RGBP = CRICKET_FOURCC('R', 'G', 'B', 'P'), // bgr565.
FOURCC_RGBO = CRICKET_FOURCC('R', 'G', 'B', 'O'), // abgr1555.
FOURCC_R444 = CRICKET_FOURCC('R', '4', '4', '4'), // argb4444.
// 4 Secondary RGB formats: 4 Bayer Patterns.
FOURCC_RGGB = CRICKET_FOURCC('R', 'G', 'G', 'B'),
FOURCC_BGGR = CRICKET_FOURCC('B', 'G', 'G', 'R'),
FOURCC_GRBG = CRICKET_FOURCC('G', 'R', 'B', 'G'),
FOURCC_GBRG = CRICKET_FOURCC('G', 'B', 'R', 'G'),
// 1 Primary Compressed YUV format.
FOURCC_MJPG = CRICKET_FOURCC('M', 'J', 'P', 'G'),
// 5 Auxiliary YUV variations: 3 with U and V planes are swapped, 1 Alias.
FOURCC_YV12 = CRICKET_FOURCC('Y', 'V', '1', '2'),
FOURCC_YV16 = CRICKET_FOURCC('Y', 'V', '1', '6'),
FOURCC_YV24 = CRICKET_FOURCC('Y', 'V', '2', '4'),
FOURCC_YU12 = CRICKET_FOURCC('Y', 'U', '1', '2'), // Linux version of I420.
FOURCC_J420 = CRICKET_FOURCC('J', '4', '2', '0'),
FOURCC_J400 = CRICKET_FOURCC('J', '4', '0', '0'),
// 14 Auxiliary aliases. CanonicalFourCC() maps these to canonical FOURCC.
FOURCC_IYUV = CRICKET_FOURCC('I', 'Y', 'U', 'V'), // Alias for I420.
FOURCC_YU16 = CRICKET_FOURCC('Y', 'U', '1', '6'), // Alias for I422.
FOURCC_YU24 = CRICKET_FOURCC('Y', 'U', '2', '4'), // Alias for I444.
FOURCC_YUYV = CRICKET_FOURCC('Y', 'U', 'Y', 'V'), // Alias for YUY2.
FOURCC_YUVS = CRICKET_FOURCC('y', 'u', 'v', 's'), // Alias for YUY2 on Mac.
FOURCC_HDYC = CRICKET_FOURCC('H', 'D', 'Y', 'C'), // Alias for UYVY.
FOURCC_2VUY = CRICKET_FOURCC('2', 'v', 'u', 'y'), // Alias for UYVY on Mac.
FOURCC_JPEG = CRICKET_FOURCC('J', 'P', 'E', 'G'), // Alias for MJPG.
FOURCC_DMB1 = CRICKET_FOURCC('d', 'm', 'b', '1'), // Alias for MJPG on Mac.
FOURCC_BA81 = CRICKET_FOURCC('B', 'A', '8', '1'), // Alias for BGGR.
FOURCC_RGB3 = CRICKET_FOURCC('R', 'G', 'B', '3'), // Alias for RAW.
FOURCC_BGR3 = CRICKET_FOURCC('B', 'G', 'R', '3'), // Alias for 24BG.
FOURCC_CM32 = CRICKET_FOURCC(0, 0, 0, 32), // BGRA kCMPixelFormat_32ARGB
FOURCC_CM24 = CRICKET_FOURCC(0, 0, 0, 24), // RAW kCMPixelFormat_24RGB
// 1 Auxiliary compressed YUV format set aside for capturer.
FOURCC_H264 = CRICKET_FOURCC('H', '2', '6', '4'),
};
#undef CRICKET_FOURCC
// Match any fourcc.
// We move this out of the enum because using it in many places caused
// the compiler to get grumpy, presumably since the above enum is
// backed by an int.
static const uint32_t FOURCC_ANY = 0xFFFFFFFF;
// Converts fourcc aliases into canonical ones.
uint32_t CanonicalFourCC(uint32_t fourcc);
// Get FourCC code as a string.
inline std::string GetFourccName(uint32_t fourcc) {
std::string name;
name.push_back(static_cast<char>(fourcc & 0xFF));
name.push_back(static_cast<char>((fourcc >> 8) & 0xFF));
name.push_back(static_cast<char>((fourcc >> 16) & 0xFF));
name.push_back(static_cast<char>((fourcc >> 24) & 0xFF));
return name;
}
//////////////////////////////////////////////////////////////////////////////
// Definition of VideoFormat.
//////////////////////////////////////////////////////////////////////////////
// VideoFormat with Plain Old Data for global variables.
struct VideoFormatPod {
int width; // Number of pixels.
int height; // Number of pixels.
int64_t interval; // Nanoseconds.
uint32_t fourcc; // Color space. FOURCC_ANY means that any color space is OK.
};
struct RTC_EXPORT VideoFormat : VideoFormatPod {
static const int64_t kMinimumInterval =
rtc::kNumNanosecsPerSec / 10000; // 10k fps.
VideoFormat() { Construct(0, 0, 0, 0); }
VideoFormat(int w, int h, int64_t interval_ns, uint32_t cc) {
Construct(w, h, interval_ns, cc);
}
explicit VideoFormat(const VideoFormatPod& format) {
Construct(format.width, format.height, format.interval, format.fourcc);
}
void Construct(int w, int h, int64_t interval_ns, uint32_t cc) {
width = w;
height = h;
interval = interval_ns;
fourcc = cc;
}
static int64_t FpsToInterval(int fps) {
return fps ? rtc::kNumNanosecsPerSec / fps : kMinimumInterval;
}
static int IntervalToFps(int64_t interval) {
if (!interval) {
return 0;
}
return static_cast<int>(rtc::kNumNanosecsPerSec / interval);
}
static float IntervalToFpsFloat(int64_t interval) {
if (!interval) {
return 0.f;
}
return static_cast<float>(rtc::kNumNanosecsPerSec) /
static_cast<float>(interval);
}
bool operator==(const VideoFormat& format) const {
return width == format.width && height == format.height &&
interval == format.interval && fourcc == format.fourcc;
}
bool operator!=(const VideoFormat& format) const {
return !(*this == format);
}
bool operator<(const VideoFormat& format) const {
return (fourcc < format.fourcc) ||
(fourcc == format.fourcc && width < format.width) ||
(fourcc == format.fourcc && width == format.width &&
height < format.height) ||
(fourcc == format.fourcc && width == format.width &&
height == format.height && interval > format.interval);
}
int framerate() const { return IntervalToFps(interval); }
// Check if both width and height are 0.
bool IsSize0x0() const { return 0 == width && 0 == height; }
// Check if this format is less than another one by comparing the resolution
// and frame rate.
bool IsPixelRateLess(const VideoFormat& format) const {
return width * height * framerate() <
format.width * format.height * format.framerate();
}
// Get a string presentation in the form of "fourcc width x height x fps"
std::string ToString() const;
};
// Returns the largest positive integer that divides both `a` and `b`.
int GreatestCommonDivisor(int a, int b);
// Returns the smallest positive integer that is divisible by both `a` and `b`.
int LeastCommonMultiple(int a, int b);
} // namespace cricket
#endif // MEDIA_BASE_VIDEO_COMMON_H_

View file

@ -0,0 +1,104 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/base/video_source_base.h"
#include <algorithm>
#include "absl/algorithm/container.h"
#include "rtc_base/checks.h"
namespace rtc {
VideoSourceBase::VideoSourceBase() = default;
VideoSourceBase::~VideoSourceBase() = default;
void VideoSourceBase::AddOrUpdateSink(
VideoSinkInterface<webrtc::VideoFrame>* sink,
const VideoSinkWants& wants) {
RTC_DCHECK(sink != nullptr);
SinkPair* sink_pair = FindSinkPair(sink);
if (!sink_pair) {
sinks_.push_back(SinkPair(sink, wants));
} else {
sink_pair->wants = wants;
}
}
void VideoSourceBase::RemoveSink(VideoSinkInterface<webrtc::VideoFrame>* sink) {
RTC_DCHECK(sink != nullptr);
RTC_DCHECK(FindSinkPair(sink));
sinks_.erase(std::remove_if(sinks_.begin(), sinks_.end(),
[sink](const SinkPair& sink_pair) {
return sink_pair.sink == sink;
}),
sinks_.end());
}
VideoSourceBase::SinkPair* VideoSourceBase::FindSinkPair(
const VideoSinkInterface<webrtc::VideoFrame>* sink) {
auto sink_pair_it = absl::c_find_if(
sinks_,
[sink](const SinkPair& sink_pair) { return sink_pair.sink == sink; });
if (sink_pair_it != sinks_.end()) {
return &*sink_pair_it;
}
return nullptr;
}
VideoSourceBaseGuarded::VideoSourceBaseGuarded() = default;
VideoSourceBaseGuarded::~VideoSourceBaseGuarded() = default;
void VideoSourceBaseGuarded::AddOrUpdateSink(
VideoSinkInterface<webrtc::VideoFrame>* sink,
const VideoSinkWants& wants) {
RTC_DCHECK_RUN_ON(&source_sequence_);
RTC_DCHECK(sink != nullptr);
SinkPair* sink_pair = FindSinkPair(sink);
if (!sink_pair) {
sinks_.push_back(SinkPair(sink, wants));
} else {
sink_pair->wants = wants;
}
}
void VideoSourceBaseGuarded::RemoveSink(
VideoSinkInterface<webrtc::VideoFrame>* sink) {
RTC_DCHECK_RUN_ON(&source_sequence_);
RTC_DCHECK(sink != nullptr);
RTC_DCHECK(FindSinkPair(sink));
sinks_.erase(std::remove_if(sinks_.begin(), sinks_.end(),
[sink](const SinkPair& sink_pair) {
return sink_pair.sink == sink;
}),
sinks_.end());
}
VideoSourceBaseGuarded::SinkPair* VideoSourceBaseGuarded::FindSinkPair(
const VideoSinkInterface<webrtc::VideoFrame>* sink) {
RTC_DCHECK_RUN_ON(&source_sequence_);
auto sink_pair_it = absl::c_find_if(
sinks_,
[sink](const SinkPair& sink_pair) { return sink_pair.sink == sink; });
if (sink_pair_it != sinks_.end()) {
return &*sink_pair_it;
}
return nullptr;
}
const std::vector<VideoSourceBaseGuarded::SinkPair>&
VideoSourceBaseGuarded::sink_pairs() const {
RTC_DCHECK_RUN_ON(&source_sequence_);
return sinks_;
}
} // namespace rtc

View file

@ -0,0 +1,83 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_BASE_VIDEO_SOURCE_BASE_H_
#define MEDIA_BASE_VIDEO_SOURCE_BASE_H_
#include <vector>
#include "api/sequence_checker.h"
#include "api/video/video_frame.h"
#include "api/video/video_sink_interface.h"
#include "api/video/video_source_interface.h"
#include "rtc_base/system/no_unique_address.h"
namespace rtc {
// VideoSourceBase is not thread safe. Before using this class, consider using
// VideoSourceBaseGuarded below instead, which is an identical implementation
// but applies a sequence checker to help protect internal state.
// TODO(bugs.webrtc.org/12780): Delete this class.
class VideoSourceBase : public VideoSourceInterface<webrtc::VideoFrame> {
public:
VideoSourceBase();
~VideoSourceBase() override;
void AddOrUpdateSink(VideoSinkInterface<webrtc::VideoFrame>* sink,
const VideoSinkWants& wants) override;
void RemoveSink(VideoSinkInterface<webrtc::VideoFrame>* sink) override;
protected:
struct SinkPair {
SinkPair(VideoSinkInterface<webrtc::VideoFrame>* sink, VideoSinkWants wants)
: sink(sink), wants(wants) {}
VideoSinkInterface<webrtc::VideoFrame>* sink;
VideoSinkWants wants;
};
SinkPair* FindSinkPair(const VideoSinkInterface<webrtc::VideoFrame>* sink);
const std::vector<SinkPair>& sink_pairs() const { return sinks_; }
private:
std::vector<SinkPair> sinks_;
};
// VideoSourceBaseGuarded assumes that operations related to sinks, occur on the
// same TQ/thread that the object was constructed on.
class VideoSourceBaseGuarded : public VideoSourceInterface<webrtc::VideoFrame> {
public:
VideoSourceBaseGuarded();
~VideoSourceBaseGuarded() override;
void AddOrUpdateSink(VideoSinkInterface<webrtc::VideoFrame>* sink,
const VideoSinkWants& wants) override;
void RemoveSink(VideoSinkInterface<webrtc::VideoFrame>* sink) override;
protected:
struct SinkPair {
SinkPair(VideoSinkInterface<webrtc::VideoFrame>* sink, VideoSinkWants wants)
: sink(sink), wants(wants) {}
VideoSinkInterface<webrtc::VideoFrame>* sink;
VideoSinkWants wants;
};
SinkPair* FindSinkPair(const VideoSinkInterface<webrtc::VideoFrame>* sink);
const std::vector<SinkPair>& sink_pairs() const;
// Keep the `source_sequence_` checker protected to allow sub classes the
// ability to call Detach() if/when appropriate.
RTC_NO_UNIQUE_ADDRESS webrtc::SequenceChecker source_sequence_;
private:
std::vector<SinkPair> sinks_ RTC_GUARDED_BY(&source_sequence_);
};
} // namespace rtc
#endif // MEDIA_BASE_VIDEO_SOURCE_BASE_H_

View file

@ -0,0 +1,82 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/engine/adm_helpers.h"
#include "modules/audio_device/include/audio_device.h"
#include "rtc_base/checks.h"
#include "rtc_base/logging.h"
namespace webrtc {
namespace adm_helpers {
// On Windows Vista and newer, Microsoft introduced the concept of "Default
// Communications Device". This means that there are two types of default
// devices (old Wave Audio style default and Default Communications Device).
//
// On Windows systems which only support Wave Audio style default, uses either
// -1 or 0 to select the default device.
//
// Using a #define for AUDIO_DEVICE since we will call *different* versions of
// the ADM functions, depending on the ID type.
#if defined(WEBRTC_WIN)
#define AUDIO_DEVICE_ID \
(AudioDeviceModule::WindowsDeviceType::kDefaultCommunicationDevice)
#else
#define AUDIO_DEVICE_ID (0u)
#endif // defined(WEBRTC_WIN)
void Init(AudioDeviceModule* adm) {
RTC_DCHECK(adm);
RTC_CHECK_EQ(0, adm->Init()) << "Failed to initialize the ADM.";
// Playout device.
{
if (adm->SetPlayoutDevice(AUDIO_DEVICE_ID) != 0) {
RTC_LOG(LS_ERROR) << "Unable to set playout device.";
return;
}
if (adm->InitSpeaker() != 0) {
RTC_LOG(LS_ERROR) << "Unable to access speaker.";
}
// Set number of channels
bool available = false;
if (adm->StereoPlayoutIsAvailable(&available) != 0) {
RTC_LOG(LS_ERROR) << "Failed to query stereo playout.";
}
if (adm->SetStereoPlayout(available) != 0) {
RTC_LOG(LS_ERROR) << "Failed to set stereo playout mode.";
}
}
// Recording device.
{
if (adm->SetRecordingDevice(AUDIO_DEVICE_ID) != 0) {
RTC_LOG(LS_ERROR) << "Unable to set recording device.";
return;
}
if (adm->InitMicrophone() != 0) {
RTC_LOG(LS_ERROR) << "Unable to access microphone.";
}
// Set number of channels
bool available = false;
if (adm->StereoRecordingIsAvailable(&available) != 0) {
RTC_LOG(LS_ERROR) << "Failed to query stereo recording.";
}
if (adm->SetStereoRecording(available) != 0) {
RTC_LOG(LS_ERROR) << "Failed to set stereo recording mode.";
}
}
}
} // namespace adm_helpers
} // namespace webrtc

View file

@ -0,0 +1,25 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_ENGINE_ADM_HELPERS_H_
#define MEDIA_ENGINE_ADM_HELPERS_H_
namespace webrtc {
class AudioDeviceModule;
namespace adm_helpers {
void Init(AudioDeviceModule* adm);
} // namespace adm_helpers
} // namespace webrtc
#endif // MEDIA_ENGINE_ADM_HELPERS_H_

View file

@ -0,0 +1,69 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/engine/fake_video_codec_factory.h"
#include <memory>
#include "api/video_codecs/sdp_video_format.h"
#include "api/video_codecs/video_decoder.h"
#include "api/video_codecs/video_encoder.h"
#include "modules/video_coding/include/video_codec_interface.h"
#include "modules/video_coding/include/video_error_codes.h"
#include "rtc_base/checks.h"
#include "rtc_base/logging.h"
#include "test/fake_decoder.h"
#include "test/fake_encoder.h"
namespace {
static const char kFakeCodecFactoryCodecName[] = "FakeCodec";
} // anonymous namespace
namespace webrtc {
FakeVideoEncoderFactory::FakeVideoEncoderFactory() = default;
// static
std::unique_ptr<VideoEncoder> FakeVideoEncoderFactory::CreateVideoEncoder() {
return std::make_unique<test::FakeEncoder>(Clock::GetRealTimeClock());
}
std::vector<SdpVideoFormat> FakeVideoEncoderFactory::GetSupportedFormats()
const {
return std::vector<SdpVideoFormat>(
1, SdpVideoFormat(kFakeCodecFactoryCodecName));
}
std::unique_ptr<VideoEncoder> FakeVideoEncoderFactory::CreateVideoEncoder(
const SdpVideoFormat& format) {
return std::make_unique<test::FakeEncoder>(Clock::GetRealTimeClock());
}
FakeVideoDecoderFactory::FakeVideoDecoderFactory() = default;
// static
std::unique_ptr<VideoDecoder> FakeVideoDecoderFactory::CreateVideoDecoder() {
return std::make_unique<test::FakeDecoder>();
}
std::vector<SdpVideoFormat> FakeVideoDecoderFactory::GetSupportedFormats()
const {
return std::vector<SdpVideoFormat>(
1, SdpVideoFormat(kFakeCodecFactoryCodecName));
}
std::unique_ptr<VideoDecoder> FakeVideoDecoderFactory::CreateVideoDecoder(
const SdpVideoFormat& format) {
return std::make_unique<test::FakeDecoder>();
}
} // namespace webrtc

View file

@ -0,0 +1,53 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_ENGINE_FAKE_VIDEO_CODEC_FACTORY_H_
#define MEDIA_ENGINE_FAKE_VIDEO_CODEC_FACTORY_H_
#include <memory>
#include <vector>
#include "api/video_codecs/video_decoder_factory.h"
#include "api/video_codecs/video_encoder_factory.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// Provides a fake video encoder instance that produces frames large enough for
// the given bitrate constraints.
class RTC_EXPORT FakeVideoEncoderFactory : public VideoEncoderFactory {
public:
FakeVideoEncoderFactory();
static std::unique_ptr<VideoEncoder> CreateVideoEncoder();
// VideoEncoderFactory implementation
std::vector<SdpVideoFormat> GetSupportedFormats() const override;
std::unique_ptr<VideoEncoder> CreateVideoEncoder(
const SdpVideoFormat& format) override;
};
// Provides a fake video decoder instance that ignores the given bitstream and
// produces frames.
class RTC_EXPORT FakeVideoDecoderFactory : public VideoDecoderFactory {
public:
FakeVideoDecoderFactory();
static std::unique_ptr<VideoDecoder> CreateVideoDecoder();
// VideoDecoderFactory implementation
std::vector<SdpVideoFormat> GetSupportedFormats() const override;
std::unique_ptr<VideoDecoder> CreateVideoDecoder(
const SdpVideoFormat& format) override;
};
} // namespace webrtc
#endif // MEDIA_ENGINE_FAKE_VIDEO_CODEC_FACTORY_H_

View file

@ -0,0 +1,763 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/engine/fake_webrtc_call.h"
#include <cstdint>
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/strings/string_view.h"
#include "api/call/audio_sink.h"
#include "api/units/timestamp.h"
#include "call/packet_receiver.h"
#include "media/base/media_channel.h"
#include "modules/rtp_rtcp/source/rtp_util.h"
#include "rtc_base/checks.h"
#include "rtc_base/gunit.h"
#include "rtc_base/thread.h"
#include "video/config/encoder_stream_factory.h"
namespace cricket {
using ::webrtc::ParseRtpSsrc;
FakeAudioSendStream::FakeAudioSendStream(
int id,
const webrtc::AudioSendStream::Config& config)
: id_(id), config_(config) {}
void FakeAudioSendStream::Reconfigure(
const webrtc::AudioSendStream::Config& config,
webrtc::SetParametersCallback callback) {
config_ = config;
webrtc::InvokeSetParametersCallback(callback, webrtc::RTCError::OK());
}
const webrtc::AudioSendStream::Config& FakeAudioSendStream::GetConfig() const {
return config_;
}
void FakeAudioSendStream::SetStats(
const webrtc::AudioSendStream::Stats& stats) {
stats_ = stats;
}
FakeAudioSendStream::TelephoneEvent
FakeAudioSendStream::GetLatestTelephoneEvent() const {
return latest_telephone_event_;
}
bool FakeAudioSendStream::SendTelephoneEvent(int payload_type,
int payload_frequency,
int event,
int duration_ms) {
latest_telephone_event_.payload_type = payload_type;
latest_telephone_event_.payload_frequency = payload_frequency;
latest_telephone_event_.event_code = event;
latest_telephone_event_.duration_ms = duration_ms;
return true;
}
void FakeAudioSendStream::SetMuted(bool muted) {
muted_ = muted;
}
webrtc::AudioSendStream::Stats FakeAudioSendStream::GetStats() const {
return stats_;
}
webrtc::AudioSendStream::Stats FakeAudioSendStream::GetStats(
bool /*has_remote_tracks*/) const {
return stats_;
}
FakeAudioReceiveStream::FakeAudioReceiveStream(
int id,
const webrtc::AudioReceiveStreamInterface::Config& config)
: id_(id), config_(config) {}
const webrtc::AudioReceiveStreamInterface::Config&
FakeAudioReceiveStream::GetConfig() const {
return config_;
}
void FakeAudioReceiveStream::SetStats(
const webrtc::AudioReceiveStreamInterface::Stats& stats) {
stats_ = stats;
}
bool FakeAudioReceiveStream::VerifyLastPacket(const uint8_t* data,
size_t length) const {
return last_packet_ == rtc::Buffer(data, length);
}
bool FakeAudioReceiveStream::DeliverRtp(const uint8_t* packet,
size_t length,
int64_t /* packet_time_us */) {
++received_packets_;
last_packet_.SetData(packet, length);
return true;
}
void FakeAudioReceiveStream::SetDepacketizerToDecoderFrameTransformer(
rtc::scoped_refptr<webrtc::FrameTransformerInterface> frame_transformer) {
config_.frame_transformer = std::move(frame_transformer);
}
void FakeAudioReceiveStream::SetDecoderMap(
std::map<int, webrtc::SdpAudioFormat> decoder_map) {
config_.decoder_map = std::move(decoder_map);
}
void FakeAudioReceiveStream::SetNackHistory(int history_ms) {
config_.rtp.nack.rtp_history_ms = history_ms;
}
void FakeAudioReceiveStream::SetNonSenderRttMeasurement(bool enabled) {
config_.enable_non_sender_rtt = enabled;
}
void FakeAudioReceiveStream::SetFrameDecryptor(
rtc::scoped_refptr<webrtc::FrameDecryptorInterface> frame_decryptor) {
config_.frame_decryptor = std::move(frame_decryptor);
}
webrtc::AudioReceiveStreamInterface::Stats FakeAudioReceiveStream::GetStats(
bool get_and_clear_legacy_stats) const {
return stats_;
}
void FakeAudioReceiveStream::SetSink(webrtc::AudioSinkInterface* sink) {
sink_ = sink;
}
void FakeAudioReceiveStream::SetGain(float gain) {
gain_ = gain;
}
FakeVideoSendStream::FakeVideoSendStream(
webrtc::VideoSendStream::Config config,
webrtc::VideoEncoderConfig encoder_config)
: sending_(false),
config_(std::move(config)),
codec_settings_set_(false),
resolution_scaling_enabled_(false),
framerate_scaling_enabled_(false),
source_(nullptr),
num_swapped_frames_(0) {
RTC_DCHECK(config.encoder_settings.encoder_factory != nullptr);
RTC_DCHECK(config.encoder_settings.bitrate_allocator_factory != nullptr);
ReconfigureVideoEncoder(std::move(encoder_config));
}
FakeVideoSendStream::~FakeVideoSendStream() {
if (source_)
source_->RemoveSink(this);
}
const webrtc::VideoSendStream::Config& FakeVideoSendStream::GetConfig() const {
return config_;
}
const webrtc::VideoEncoderConfig& FakeVideoSendStream::GetEncoderConfig()
const {
return encoder_config_;
}
const std::vector<webrtc::VideoStream>& FakeVideoSendStream::GetVideoStreams()
const {
return video_streams_;
}
bool FakeVideoSendStream::IsSending() const {
return sending_;
}
bool FakeVideoSendStream::GetVp8Settings(
webrtc::VideoCodecVP8* settings) const {
if (!codec_settings_set_) {
return false;
}
*settings = codec_specific_settings_.vp8;
return true;
}
bool FakeVideoSendStream::GetVp9Settings(
webrtc::VideoCodecVP9* settings) const {
if (!codec_settings_set_) {
return false;
}
*settings = codec_specific_settings_.vp9;
return true;
}
bool FakeVideoSendStream::GetH264Settings(
webrtc::VideoCodecH264* settings) const {
if (!codec_settings_set_) {
return false;
}
*settings = codec_specific_settings_.h264;
return true;
}
bool FakeVideoSendStream::GetAv1Settings(
webrtc::VideoCodecAV1* settings) const {
if (!codec_settings_set_) {
return false;
}
*settings = codec_specific_settings_.av1;
return true;
}
int FakeVideoSendStream::GetNumberOfSwappedFrames() const {
return num_swapped_frames_;
}
int FakeVideoSendStream::GetLastWidth() const {
return last_frame_->width();
}
int FakeVideoSendStream::GetLastHeight() const {
return last_frame_->height();
}
int64_t FakeVideoSendStream::GetLastTimestamp() const {
RTC_DCHECK(last_frame_->ntp_time_ms() == 0);
return last_frame_->render_time_ms();
}
void FakeVideoSendStream::OnFrame(const webrtc::VideoFrame& frame) {
++num_swapped_frames_;
if (!last_frame_ || frame.width() != last_frame_->width() ||
frame.height() != last_frame_->height() ||
frame.rotation() != last_frame_->rotation()) {
if (encoder_config_.video_stream_factory) {
// Note: only tests set their own EncoderStreamFactory...
video_streams_ =
encoder_config_.video_stream_factory->CreateEncoderStreams(
frame.width(), frame.height(), encoder_config_);
} else {
webrtc::VideoEncoder::EncoderInfo encoder_info;
rtc::scoped_refptr<
webrtc::VideoEncoderConfig::VideoStreamFactoryInterface>
factory = rtc::make_ref_counted<cricket::EncoderStreamFactory>(
encoder_config_.video_format.name, encoder_config_.max_qp,
encoder_config_.content_type ==
webrtc::VideoEncoderConfig::ContentType::kScreen,
encoder_config_.legacy_conference_mode, encoder_info);
video_streams_ = factory->CreateEncoderStreams(
frame.width(), frame.height(), encoder_config_);
}
}
last_frame_ = frame;
}
void FakeVideoSendStream::SetStats(
const webrtc::VideoSendStream::Stats& stats) {
stats_ = stats;
}
webrtc::VideoSendStream::Stats FakeVideoSendStream::GetStats() {
return stats_;
}
void FakeVideoSendStream::ReconfigureVideoEncoder(
webrtc::VideoEncoderConfig config) {
ReconfigureVideoEncoder(std::move(config), nullptr);
}
void FakeVideoSendStream::ReconfigureVideoEncoder(
webrtc::VideoEncoderConfig config,
webrtc::SetParametersCallback callback) {
int width, height;
if (last_frame_) {
width = last_frame_->width();
height = last_frame_->height();
} else {
width = height = 0;
}
if (config.video_stream_factory) {
// Note: only tests set their own EncoderStreamFactory...
video_streams_ = config.video_stream_factory->CreateEncoderStreams(
width, height, config);
} else {
webrtc::VideoEncoder::EncoderInfo encoder_info;
rtc::scoped_refptr<webrtc::VideoEncoderConfig::VideoStreamFactoryInterface>
factory = rtc::make_ref_counted<cricket::EncoderStreamFactory>(
config.video_format.name, config.max_qp,
config.content_type ==
webrtc::VideoEncoderConfig::ContentType::kScreen,
config.legacy_conference_mode, encoder_info);
video_streams_ = factory->CreateEncoderStreams(width, height, config);
}
if (config.encoder_specific_settings != nullptr) {
const unsigned char num_temporal_layers = static_cast<unsigned char>(
video_streams_.back().num_temporal_layers.value_or(1));
if (config_.rtp.payload_name == "VP8") {
config.encoder_specific_settings->FillVideoCodecVp8(
&codec_specific_settings_.vp8);
if (!video_streams_.empty()) {
codec_specific_settings_.vp8.numberOfTemporalLayers =
num_temporal_layers;
}
} else if (config_.rtp.payload_name == "VP9") {
config.encoder_specific_settings->FillVideoCodecVp9(
&codec_specific_settings_.vp9);
if (!video_streams_.empty()) {
codec_specific_settings_.vp9.numberOfTemporalLayers =
num_temporal_layers;
}
} else if (config_.rtp.payload_name == "H264") {
codec_specific_settings_.h264.numberOfTemporalLayers =
num_temporal_layers;
} else if (config_.rtp.payload_name == "AV1") {
config.encoder_specific_settings->FillVideoCodecAv1(
&codec_specific_settings_.av1);
} else {
ADD_FAILURE() << "Unsupported encoder payload: "
<< config_.rtp.payload_name;
}
}
codec_settings_set_ = config.encoder_specific_settings != nullptr;
encoder_config_ = std::move(config);
++num_encoder_reconfigurations_;
webrtc::InvokeSetParametersCallback(callback, webrtc::RTCError::OK());
}
void FakeVideoSendStream::Start() {
sending_ = true;
}
void FakeVideoSendStream::Stop() {
sending_ = false;
}
void FakeVideoSendStream::AddAdaptationResource(
rtc::scoped_refptr<webrtc::Resource> resource) {}
std::vector<rtc::scoped_refptr<webrtc::Resource>>
FakeVideoSendStream::GetAdaptationResources() {
return {};
}
void FakeVideoSendStream::SetSource(
rtc::VideoSourceInterface<webrtc::VideoFrame>* source,
const webrtc::DegradationPreference& degradation_preference) {
if (source_)
source_->RemoveSink(this);
source_ = source;
switch (degradation_preference) {
case webrtc::DegradationPreference::MAINTAIN_FRAMERATE:
resolution_scaling_enabled_ = true;
framerate_scaling_enabled_ = false;
break;
case webrtc::DegradationPreference::MAINTAIN_RESOLUTION:
resolution_scaling_enabled_ = false;
framerate_scaling_enabled_ = true;
break;
case webrtc::DegradationPreference::BALANCED:
resolution_scaling_enabled_ = true;
framerate_scaling_enabled_ = true;
break;
case webrtc::DegradationPreference::DISABLED:
resolution_scaling_enabled_ = false;
framerate_scaling_enabled_ = false;
break;
}
if (source)
source->AddOrUpdateSink(this, resolution_scaling_enabled_
? sink_wants_
: rtc::VideoSinkWants());
}
void FakeVideoSendStream::GenerateKeyFrame(
const std::vector<std::string>& rids) {
keyframes_requested_by_rid_ = rids;
}
void FakeVideoSendStream::InjectVideoSinkWants(
const rtc::VideoSinkWants& wants) {
sink_wants_ = wants;
source_->AddOrUpdateSink(this, wants);
}
FakeVideoReceiveStream::FakeVideoReceiveStream(
webrtc::VideoReceiveStreamInterface::Config config)
: config_(std::move(config)), receiving_(false) {}
const webrtc::VideoReceiveStreamInterface::Config&
FakeVideoReceiveStream::GetConfig() const {
return config_;
}
bool FakeVideoReceiveStream::IsReceiving() const {
return receiving_;
}
void FakeVideoReceiveStream::InjectFrame(const webrtc::VideoFrame& frame) {
config_.renderer->OnFrame(frame);
}
webrtc::VideoReceiveStreamInterface::Stats FakeVideoReceiveStream::GetStats()
const {
return stats_;
}
void FakeVideoReceiveStream::Start() {
receiving_ = true;
}
void FakeVideoReceiveStream::Stop() {
receiving_ = false;
}
void FakeVideoReceiveStream::SetStats(
const webrtc::VideoReceiveStreamInterface::Stats& stats) {
stats_ = stats;
}
FakeFlexfecReceiveStream::FakeFlexfecReceiveStream(
const webrtc::FlexfecReceiveStream::Config config)
: config_(std::move(config)) {}
const webrtc::FlexfecReceiveStream::Config&
FakeFlexfecReceiveStream::GetConfig() const {
return config_;
}
void FakeFlexfecReceiveStream::OnRtpPacket(const webrtc::RtpPacketReceived&) {
RTC_DCHECK_NOTREACHED() << "Not implemented.";
}
FakeCall::FakeCall(webrtc::test::ScopedKeyValueConfig* field_trials)
: FakeCall(rtc::Thread::Current(), rtc::Thread::Current(), field_trials) {}
FakeCall::FakeCall(webrtc::TaskQueueBase* worker_thread,
webrtc::TaskQueueBase* network_thread,
webrtc::test::ScopedKeyValueConfig* field_trials)
: network_thread_(network_thread),
worker_thread_(worker_thread),
audio_network_state_(webrtc::kNetworkUp),
video_network_state_(webrtc::kNetworkUp),
num_created_send_streams_(0),
num_created_receive_streams_(0),
trials_(field_trials ? field_trials : &fallback_trials_) {}
FakeCall::~FakeCall() {
EXPECT_EQ(0u, video_send_streams_.size());
EXPECT_EQ(0u, audio_send_streams_.size());
EXPECT_EQ(0u, video_receive_streams_.size());
EXPECT_EQ(0u, audio_receive_streams_.size());
}
const std::vector<FakeVideoSendStream*>& FakeCall::GetVideoSendStreams() {
return video_send_streams_;
}
const std::vector<FakeVideoReceiveStream*>& FakeCall::GetVideoReceiveStreams() {
return video_receive_streams_;
}
const FakeVideoReceiveStream* FakeCall::GetVideoReceiveStream(uint32_t ssrc) {
for (const auto* p : GetVideoReceiveStreams()) {
if (p->GetConfig().rtp.remote_ssrc == ssrc) {
return p;
}
}
return nullptr;
}
const std::vector<FakeAudioSendStream*>& FakeCall::GetAudioSendStreams() {
return audio_send_streams_;
}
const FakeAudioSendStream* FakeCall::GetAudioSendStream(uint32_t ssrc) {
for (const auto* p : GetAudioSendStreams()) {
if (p->GetConfig().rtp.ssrc == ssrc) {
return p;
}
}
return nullptr;
}
const std::vector<FakeAudioReceiveStream*>& FakeCall::GetAudioReceiveStreams() {
return audio_receive_streams_;
}
const FakeAudioReceiveStream* FakeCall::GetAudioReceiveStream(uint32_t ssrc) {
for (const auto* p : GetAudioReceiveStreams()) {
if (p->GetConfig().rtp.remote_ssrc == ssrc) {
return p;
}
}
return nullptr;
}
const std::vector<FakeFlexfecReceiveStream*>&
FakeCall::GetFlexfecReceiveStreams() {
return flexfec_receive_streams_;
}
webrtc::NetworkState FakeCall::GetNetworkState(webrtc::MediaType media) const {
switch (media) {
case webrtc::MediaType::AUDIO:
return audio_network_state_;
case webrtc::MediaType::VIDEO:
return video_network_state_;
case webrtc::MediaType::DATA:
case webrtc::MediaType::ANY:
ADD_FAILURE() << "GetNetworkState called with unknown parameter.";
return webrtc::kNetworkDown;
}
// Even though all the values for the enum class are listed above,the compiler
// will emit a warning as the method may be called with a value outside of the
// valid enum range, unless this case is also handled.
ADD_FAILURE() << "GetNetworkState called with unknown parameter.";
return webrtc::kNetworkDown;
}
webrtc::AudioSendStream* FakeCall::CreateAudioSendStream(
const webrtc::AudioSendStream::Config& config) {
FakeAudioSendStream* fake_stream =
new FakeAudioSendStream(next_stream_id_++, config);
audio_send_streams_.push_back(fake_stream);
++num_created_send_streams_;
return fake_stream;
}
void FakeCall::DestroyAudioSendStream(webrtc::AudioSendStream* send_stream) {
auto it = absl::c_find(audio_send_streams_,
static_cast<FakeAudioSendStream*>(send_stream));
if (it == audio_send_streams_.end()) {
ADD_FAILURE() << "DestroyAudioSendStream called with unknown parameter.";
} else {
delete *it;
audio_send_streams_.erase(it);
}
}
webrtc::AudioReceiveStreamInterface* FakeCall::CreateAudioReceiveStream(
const webrtc::AudioReceiveStreamInterface::Config& config) {
audio_receive_streams_.push_back(
new FakeAudioReceiveStream(next_stream_id_++, config));
++num_created_receive_streams_;
return audio_receive_streams_.back();
}
void FakeCall::DestroyAudioReceiveStream(
webrtc::AudioReceiveStreamInterface* receive_stream) {
auto it = absl::c_find(audio_receive_streams_,
static_cast<FakeAudioReceiveStream*>(receive_stream));
if (it == audio_receive_streams_.end()) {
ADD_FAILURE() << "DestroyAudioReceiveStream called with unknown parameter.";
} else {
delete *it;
audio_receive_streams_.erase(it);
}
}
webrtc::VideoSendStream* FakeCall::CreateVideoSendStream(
webrtc::VideoSendStream::Config config,
webrtc::VideoEncoderConfig encoder_config) {
FakeVideoSendStream* fake_stream =
new FakeVideoSendStream(std::move(config), std::move(encoder_config));
video_send_streams_.push_back(fake_stream);
++num_created_send_streams_;
return fake_stream;
}
void FakeCall::DestroyVideoSendStream(webrtc::VideoSendStream* send_stream) {
auto it = absl::c_find(video_send_streams_,
static_cast<FakeVideoSendStream*>(send_stream));
if (it == video_send_streams_.end()) {
ADD_FAILURE() << "DestroyVideoSendStream called with unknown parameter.";
} else {
delete *it;
video_send_streams_.erase(it);
}
}
webrtc::VideoReceiveStreamInterface* FakeCall::CreateVideoReceiveStream(
webrtc::VideoReceiveStreamInterface::Config config) {
video_receive_streams_.push_back(
new FakeVideoReceiveStream(std::move(config)));
++num_created_receive_streams_;
return video_receive_streams_.back();
}
void FakeCall::DestroyVideoReceiveStream(
webrtc::VideoReceiveStreamInterface* receive_stream) {
auto it = absl::c_find(video_receive_streams_,
static_cast<FakeVideoReceiveStream*>(receive_stream));
if (it == video_receive_streams_.end()) {
ADD_FAILURE() << "DestroyVideoReceiveStream called with unknown parameter.";
} else {
delete *it;
video_receive_streams_.erase(it);
}
}
webrtc::FlexfecReceiveStream* FakeCall::CreateFlexfecReceiveStream(
const webrtc::FlexfecReceiveStream::Config config) {
FakeFlexfecReceiveStream* fake_stream =
new FakeFlexfecReceiveStream(std::move(config));
flexfec_receive_streams_.push_back(fake_stream);
++num_created_receive_streams_;
return fake_stream;
}
void FakeCall::DestroyFlexfecReceiveStream(
webrtc::FlexfecReceiveStream* receive_stream) {
auto it =
absl::c_find(flexfec_receive_streams_,
static_cast<FakeFlexfecReceiveStream*>(receive_stream));
if (it == flexfec_receive_streams_.end()) {
ADD_FAILURE()
<< "DestroyFlexfecReceiveStream called with unknown parameter.";
} else {
delete *it;
flexfec_receive_streams_.erase(it);
}
}
void FakeCall::AddAdaptationResource(
rtc::scoped_refptr<webrtc::Resource> resource) {}
webrtc::PacketReceiver* FakeCall::Receiver() {
return this;
}
void FakeCall::DeliverRtpPacket(
webrtc::MediaType media_type,
webrtc::RtpPacketReceived packet,
OnUndemuxablePacketHandler undemuxable_packet_handler) {
if (!DeliverPacketInternal(media_type, packet.Ssrc(), packet.Buffer(),
packet.arrival_time())) {
if (undemuxable_packet_handler(packet)) {
DeliverPacketInternal(media_type, packet.Ssrc(), packet.Buffer(),
packet.arrival_time());
}
}
last_received_rtp_packet_ = packet;
}
bool FakeCall::DeliverPacketInternal(webrtc::MediaType media_type,
uint32_t ssrc,
const rtc::CopyOnWriteBuffer& packet,
webrtc::Timestamp arrival_time) {
EXPECT_GE(packet.size(), 12u);
RTC_DCHECK(arrival_time.IsFinite());
RTC_DCHECK(media_type == webrtc::MediaType::AUDIO ||
media_type == webrtc::MediaType::VIDEO);
if (media_type == webrtc::MediaType::VIDEO) {
for (auto receiver : video_receive_streams_) {
if (receiver->GetConfig().rtp.remote_ssrc == ssrc ||
receiver->GetConfig().rtp.rtx_ssrc == ssrc) {
++delivered_packets_by_ssrc_[ssrc];
return true;
}
}
}
if (media_type == webrtc::MediaType::AUDIO) {
for (auto receiver : audio_receive_streams_) {
if (receiver->GetConfig().rtp.remote_ssrc == ssrc) {
receiver->DeliverRtp(packet.cdata(), packet.size(), arrival_time.us());
++delivered_packets_by_ssrc_[ssrc];
return true;
}
}
}
return false;
}
void FakeCall::SetStats(const webrtc::Call::Stats& stats) {
stats_ = stats;
}
int FakeCall::GetNumCreatedSendStreams() const {
return num_created_send_streams_;
}
int FakeCall::GetNumCreatedReceiveStreams() const {
return num_created_receive_streams_;
}
webrtc::Call::Stats FakeCall::GetStats() const {
return stats_;
}
webrtc::TaskQueueBase* FakeCall::network_thread() const {
return network_thread_;
}
webrtc::TaskQueueBase* FakeCall::worker_thread() const {
return worker_thread_;
}
void FakeCall::SignalChannelNetworkState(webrtc::MediaType media,
webrtc::NetworkState state) {
switch (media) {
case webrtc::MediaType::AUDIO:
audio_network_state_ = state;
break;
case webrtc::MediaType::VIDEO:
video_network_state_ = state;
break;
case webrtc::MediaType::DATA:
case webrtc::MediaType::ANY:
ADD_FAILURE()
<< "SignalChannelNetworkState called with unknown parameter.";
}
}
void FakeCall::OnAudioTransportOverheadChanged(
int transport_overhead_per_packet) {}
void FakeCall::OnLocalSsrcUpdated(webrtc::AudioReceiveStreamInterface& stream,
uint32_t local_ssrc) {
auto& fake_stream = static_cast<FakeAudioReceiveStream&>(stream);
fake_stream.SetLocalSsrc(local_ssrc);
}
void FakeCall::OnLocalSsrcUpdated(webrtc::VideoReceiveStreamInterface& stream,
uint32_t local_ssrc) {
auto& fake_stream = static_cast<FakeVideoReceiveStream&>(stream);
fake_stream.SetLocalSsrc(local_ssrc);
}
void FakeCall::OnLocalSsrcUpdated(webrtc::FlexfecReceiveStream& stream,
uint32_t local_ssrc) {
auto& fake_stream = static_cast<FakeFlexfecReceiveStream&>(stream);
fake_stream.SetLocalSsrc(local_ssrc);
}
void FakeCall::OnUpdateSyncGroup(webrtc::AudioReceiveStreamInterface& stream,
absl::string_view sync_group) {
auto& fake_stream = static_cast<FakeAudioReceiveStream&>(stream);
fake_stream.SetSyncGroup(sync_group);
}
void FakeCall::OnSentPacket(const rtc::SentPacket& sent_packet) {
last_sent_packet_ = sent_packet;
if (sent_packet.packet_id >= 0) {
last_sent_nonnegative_packet_id_ = sent_packet.packet_id;
}
}
} // namespace cricket

View file

@ -0,0 +1,518 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
// This file contains fake implementations, for use in unit tests, of the
// following classes:
//
// webrtc::Call
// webrtc::AudioSendStream
// webrtc::AudioReceiveStreamInterface
// webrtc::VideoSendStream
// webrtc::VideoReceiveStreamInterface
#ifndef MEDIA_ENGINE_FAKE_WEBRTC_CALL_H_
#define MEDIA_ENGINE_FAKE_WEBRTC_CALL_H_
#include <map>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/string_view.h"
#include "api/transport/field_trial_based_config.h"
#include "api/video/video_frame.h"
#include "call/audio_receive_stream.h"
#include "call/audio_send_stream.h"
#include "call/call.h"
#include "call/flexfec_receive_stream.h"
#include "call/test/mock_rtp_transport_controller_send.h"
#include "call/video_receive_stream.h"
#include "call/video_send_stream.h"
#include "modules/rtp_rtcp/source/rtp_packet_received.h"
#include "rtc_base/buffer.h"
#include "test/scoped_key_value_config.h"
namespace cricket {
class FakeAudioSendStream final : public webrtc::AudioSendStream {
public:
struct TelephoneEvent {
int payload_type = -1;
int payload_frequency = -1;
int event_code = 0;
int duration_ms = 0;
};
explicit FakeAudioSendStream(int id,
const webrtc::AudioSendStream::Config& config);
int id() const { return id_; }
const webrtc::AudioSendStream::Config& GetConfig() const override;
void SetStats(const webrtc::AudioSendStream::Stats& stats);
TelephoneEvent GetLatestTelephoneEvent() const;
bool IsSending() const { return sending_; }
bool muted() const { return muted_; }
private:
// webrtc::AudioSendStream implementation.
void Reconfigure(const webrtc::AudioSendStream::Config& config,
webrtc::SetParametersCallback callback) override;
void Start() override { sending_ = true; }
void Stop() override { sending_ = false; }
void SendAudioData(std::unique_ptr<webrtc::AudioFrame> audio_frame) override {
}
bool SendTelephoneEvent(int payload_type,
int payload_frequency,
int event,
int duration_ms) override;
void SetMuted(bool muted) override;
webrtc::AudioSendStream::Stats GetStats() const override;
webrtc::AudioSendStream::Stats GetStats(
bool has_remote_tracks) const override;
int id_ = -1;
TelephoneEvent latest_telephone_event_;
webrtc::AudioSendStream::Config config_;
webrtc::AudioSendStream::Stats stats_;
bool sending_ = false;
bool muted_ = false;
};
class FakeAudioReceiveStream final
: public webrtc::AudioReceiveStreamInterface {
public:
explicit FakeAudioReceiveStream(
int id,
const webrtc::AudioReceiveStreamInterface::Config& config);
int id() const { return id_; }
const webrtc::AudioReceiveStreamInterface::Config& GetConfig() const;
void SetStats(const webrtc::AudioReceiveStreamInterface::Stats& stats);
int received_packets() const { return received_packets_; }
bool VerifyLastPacket(const uint8_t* data, size_t length) const;
const webrtc::AudioSinkInterface* sink() const { return sink_; }
float gain() const { return gain_; }
bool DeliverRtp(const uint8_t* packet, size_t length, int64_t packet_time_us);
bool started() const { return started_; }
int base_mininum_playout_delay_ms() const {
return base_mininum_playout_delay_ms_;
}
void SetLocalSsrc(uint32_t local_ssrc) {
config_.rtp.local_ssrc = local_ssrc;
}
void SetSyncGroup(absl::string_view sync_group) {
config_.sync_group = std::string(sync_group);
}
uint32_t remote_ssrc() const override { return config_.rtp.remote_ssrc; }
void Start() override { started_ = true; }
void Stop() override { started_ = false; }
bool IsRunning() const override { return started_; }
void SetDepacketizerToDecoderFrameTransformer(
rtc::scoped_refptr<webrtc::FrameTransformerInterface> frame_transformer)
override;
void SetDecoderMap(
std::map<int, webrtc::SdpAudioFormat> decoder_map) override;
void SetNackHistory(int history_ms) override;
void SetNonSenderRttMeasurement(bool enabled) override;
void SetFrameDecryptor(rtc::scoped_refptr<webrtc::FrameDecryptorInterface>
frame_decryptor) override;
webrtc::AudioReceiveStreamInterface::Stats GetStats(
bool get_and_clear_legacy_stats) const override;
void SetSink(webrtc::AudioSinkInterface* sink) override;
void SetGain(float gain) override;
bool SetBaseMinimumPlayoutDelayMs(int delay_ms) override {
base_mininum_playout_delay_ms_ = delay_ms;
return true;
}
int GetBaseMinimumPlayoutDelayMs() const override {
return base_mininum_playout_delay_ms_;
}
std::vector<webrtc::RtpSource> GetSources() const override {
return std::vector<webrtc::RtpSource>();
}
private:
int id_ = -1;
webrtc::AudioReceiveStreamInterface::Config config_;
webrtc::AudioReceiveStreamInterface::Stats stats_;
int received_packets_ = 0;
webrtc::AudioSinkInterface* sink_ = nullptr;
float gain_ = 1.0f;
rtc::Buffer last_packet_;
bool started_ = false;
int base_mininum_playout_delay_ms_ = 0;
};
class FakeVideoSendStream final
: public webrtc::VideoSendStream,
public rtc::VideoSinkInterface<webrtc::VideoFrame> {
public:
FakeVideoSendStream(webrtc::VideoSendStream::Config config,
webrtc::VideoEncoderConfig encoder_config);
~FakeVideoSendStream() override;
const webrtc::VideoSendStream::Config& GetConfig() const;
const webrtc::VideoEncoderConfig& GetEncoderConfig() const;
const std::vector<webrtc::VideoStream>& GetVideoStreams() const;
bool IsSending() const;
bool GetVp8Settings(webrtc::VideoCodecVP8* settings) const;
bool GetVp9Settings(webrtc::VideoCodecVP9* settings) const;
bool GetH264Settings(webrtc::VideoCodecH264* settings) const;
bool GetAv1Settings(webrtc::VideoCodecAV1* settings) const;
int GetNumberOfSwappedFrames() const;
int GetLastWidth() const;
int GetLastHeight() const;
int64_t GetLastTimestamp() const;
void SetStats(const webrtc::VideoSendStream::Stats& stats);
int num_encoder_reconfigurations() const {
return num_encoder_reconfigurations_;
}
bool resolution_scaling_enabled() const {
return resolution_scaling_enabled_;
}
bool framerate_scaling_enabled() const { return framerate_scaling_enabled_; }
void InjectVideoSinkWants(const rtc::VideoSinkWants& wants);
rtc::VideoSourceInterface<webrtc::VideoFrame>* source() const {
return source_;
}
void GenerateKeyFrame(const std::vector<std::string>& rids);
const std::vector<std::string>& GetKeyFramesRequested() const {
return keyframes_requested_by_rid_;
}
private:
// rtc::VideoSinkInterface<VideoFrame> implementation.
void OnFrame(const webrtc::VideoFrame& frame) override;
// webrtc::VideoSendStream implementation.
void Start() override;
void Stop() override;
bool started() override { return IsSending(); }
void AddAdaptationResource(
rtc::scoped_refptr<webrtc::Resource> resource) override;
std::vector<rtc::scoped_refptr<webrtc::Resource>> GetAdaptationResources()
override;
void SetSource(
rtc::VideoSourceInterface<webrtc::VideoFrame>* source,
const webrtc::DegradationPreference& degradation_preference) override;
webrtc::VideoSendStream::Stats GetStats() override;
void ReconfigureVideoEncoder(webrtc::VideoEncoderConfig config) override;
void ReconfigureVideoEncoder(webrtc::VideoEncoderConfig config,
webrtc::SetParametersCallback callback) override;
bool sending_;
webrtc::VideoSendStream::Config config_;
webrtc::VideoEncoderConfig encoder_config_;
std::vector<webrtc::VideoStream> video_streams_;
rtc::VideoSinkWants sink_wants_;
bool codec_settings_set_;
union CodecSpecificSettings {
webrtc::VideoCodecVP8 vp8;
webrtc::VideoCodecVP9 vp9;
webrtc::VideoCodecH264 h264;
webrtc::VideoCodecAV1 av1;
} codec_specific_settings_;
bool resolution_scaling_enabled_;
bool framerate_scaling_enabled_;
rtc::VideoSourceInterface<webrtc::VideoFrame>* source_;
int num_swapped_frames_;
absl::optional<webrtc::VideoFrame> last_frame_;
webrtc::VideoSendStream::Stats stats_;
int num_encoder_reconfigurations_ = 0;
std::vector<std::string> keyframes_requested_by_rid_;
};
class FakeVideoReceiveStream final
: public webrtc::VideoReceiveStreamInterface {
public:
explicit FakeVideoReceiveStream(
webrtc::VideoReceiveStreamInterface::Config config);
const webrtc::VideoReceiveStreamInterface::Config& GetConfig() const;
bool IsReceiving() const;
void InjectFrame(const webrtc::VideoFrame& frame);
void SetStats(const webrtc::VideoReceiveStreamInterface::Stats& stats);
std::vector<webrtc::RtpSource> GetSources() const override {
return std::vector<webrtc::RtpSource>();
}
int base_mininum_playout_delay_ms() const {
return base_mininum_playout_delay_ms_;
}
void SetLocalSsrc(uint32_t local_ssrc) {
config_.rtp.local_ssrc = local_ssrc;
}
void UpdateRtxSsrc(uint32_t ssrc) { config_.rtp.rtx_ssrc = ssrc; }
void SetFrameDecryptor(rtc::scoped_refptr<webrtc::FrameDecryptorInterface>
frame_decryptor) override {}
void SetDepacketizerToDecoderFrameTransformer(
rtc::scoped_refptr<webrtc::FrameTransformerInterface> frame_transformer)
override {}
RecordingState SetAndGetRecordingState(RecordingState state,
bool generate_key_frame) override {
return RecordingState();
}
void GenerateKeyFrame() override {}
void SetRtcpMode(webrtc::RtcpMode mode) override {
config_.rtp.rtcp_mode = mode;
}
void SetFlexFecProtection(webrtc::RtpPacketSinkInterface* sink) override {
config_.rtp.packet_sink_ = sink;
config_.rtp.protected_by_flexfec = (sink != nullptr);
}
void SetLossNotificationEnabled(bool enabled) override {
config_.rtp.lntf.enabled = enabled;
}
void SetNackHistory(webrtc::TimeDelta history) override {
config_.rtp.nack.rtp_history_ms = history.ms();
}
void SetProtectionPayloadTypes(int red_payload_type,
int ulpfec_payload_type) override {
config_.rtp.red_payload_type = red_payload_type;
config_.rtp.ulpfec_payload_type = ulpfec_payload_type;
}
void SetRtcpXr(Config::Rtp::RtcpXr rtcp_xr) override {
config_.rtp.rtcp_xr = rtcp_xr;
}
void SetAssociatedPayloadTypes(std::map<int, int> associated_payload_types) {
config_.rtp.rtx_associated_payload_types =
std::move(associated_payload_types);
}
void Start() override;
void Stop() override;
webrtc::VideoReceiveStreamInterface::Stats GetStats() const override;
bool SetBaseMinimumPlayoutDelayMs(int delay_ms) override {
base_mininum_playout_delay_ms_ = delay_ms;
return true;
}
int GetBaseMinimumPlayoutDelayMs() const override {
return base_mininum_playout_delay_ms_;
}
private:
webrtc::VideoReceiveStreamInterface::Config config_;
bool receiving_;
webrtc::VideoReceiveStreamInterface::Stats stats_;
int base_mininum_playout_delay_ms_ = 0;
};
class FakeFlexfecReceiveStream final : public webrtc::FlexfecReceiveStream {
public:
explicit FakeFlexfecReceiveStream(
const webrtc::FlexfecReceiveStream::Config config);
void SetLocalSsrc(uint32_t local_ssrc) {
config_.rtp.local_ssrc = local_ssrc;
}
void SetRtcpMode(webrtc::RtcpMode mode) override { config_.rtcp_mode = mode; }
int payload_type() const override { return config_.payload_type; }
void SetPayloadType(int payload_type) override {
config_.payload_type = payload_type;
}
const webrtc::FlexfecReceiveStream::Config& GetConfig() const;
uint32_t remote_ssrc() const { return config_.rtp.remote_ssrc; }
const webrtc::ReceiveStatistics* GetStats() const override { return nullptr; }
private:
void OnRtpPacket(const webrtc::RtpPacketReceived& packet) override;
webrtc::FlexfecReceiveStream::Config config_;
};
class FakeCall final : public webrtc::Call, public webrtc::PacketReceiver {
public:
explicit FakeCall(webrtc::test::ScopedKeyValueConfig* field_trials = nullptr);
FakeCall(webrtc::TaskQueueBase* worker_thread,
webrtc::TaskQueueBase* network_thread,
webrtc::test::ScopedKeyValueConfig* field_trials = nullptr);
~FakeCall() override;
webrtc::MockRtpTransportControllerSend* GetMockTransportControllerSend() {
return &transport_controller_send_;
}
const std::vector<FakeVideoSendStream*>& GetVideoSendStreams();
const std::vector<FakeVideoReceiveStream*>& GetVideoReceiveStreams();
const std::vector<FakeAudioSendStream*>& GetAudioSendStreams();
const FakeAudioSendStream* GetAudioSendStream(uint32_t ssrc);
const std::vector<FakeAudioReceiveStream*>& GetAudioReceiveStreams();
const FakeAudioReceiveStream* GetAudioReceiveStream(uint32_t ssrc);
const FakeVideoReceiveStream* GetVideoReceiveStream(uint32_t ssrc);
const std::vector<FakeFlexfecReceiveStream*>& GetFlexfecReceiveStreams();
rtc::SentPacket last_sent_packet() const { return last_sent_packet_; }
const webrtc::RtpPacketReceived& last_received_rtp_packet() const {
return last_received_rtp_packet_;
}
size_t GetDeliveredPacketsForSsrc(uint32_t ssrc) const {
auto it = delivered_packets_by_ssrc_.find(ssrc);
return it != delivered_packets_by_ssrc_.end() ? it->second : 0u;
}
// This is useful if we care about the last media packet (with id populated)
// but not the last ICE packet (with -1 ID).
int last_sent_nonnegative_packet_id() const {
return last_sent_nonnegative_packet_id_;
}
webrtc::NetworkState GetNetworkState(webrtc::MediaType media) const;
int GetNumCreatedSendStreams() const;
int GetNumCreatedReceiveStreams() const;
void SetStats(const webrtc::Call::Stats& stats);
void SetClientBitratePreferences(
const webrtc::BitrateSettings& preferences) override {}
void SetFieldTrial(const std::string& field_trial_string) {
trials_overrides_ = std::make_unique<webrtc::test::ScopedKeyValueConfig>(
*trials_, field_trial_string);
}
const webrtc::FieldTrialsView& trials() const override { return *trials_; }
private:
webrtc::AudioSendStream* CreateAudioSendStream(
const webrtc::AudioSendStream::Config& config) override;
void DestroyAudioSendStream(webrtc::AudioSendStream* send_stream) override;
webrtc::AudioReceiveStreamInterface* CreateAudioReceiveStream(
const webrtc::AudioReceiveStreamInterface::Config& config) override;
void DestroyAudioReceiveStream(
webrtc::AudioReceiveStreamInterface* receive_stream) override;
webrtc::VideoSendStream* CreateVideoSendStream(
webrtc::VideoSendStream::Config config,
webrtc::VideoEncoderConfig encoder_config) override;
void DestroyVideoSendStream(webrtc::VideoSendStream* send_stream) override;
webrtc::VideoReceiveStreamInterface* CreateVideoReceiveStream(
webrtc::VideoReceiveStreamInterface::Config config) override;
void DestroyVideoReceiveStream(
webrtc::VideoReceiveStreamInterface* receive_stream) override;
webrtc::FlexfecReceiveStream* CreateFlexfecReceiveStream(
const webrtc::FlexfecReceiveStream::Config config) override;
void DestroyFlexfecReceiveStream(
webrtc::FlexfecReceiveStream* receive_stream) override;
void AddAdaptationResource(
rtc::scoped_refptr<webrtc::Resource> resource) override;
webrtc::PacketReceiver* Receiver() override;
void DeliverRtcpPacket(rtc::CopyOnWriteBuffer packet) override {}
void DeliverRtpPacket(
webrtc::MediaType media_type,
webrtc::RtpPacketReceived packet,
OnUndemuxablePacketHandler un_demuxable_packet_handler) override;
bool DeliverPacketInternal(webrtc::MediaType media_type,
uint32_t ssrc,
const rtc::CopyOnWriteBuffer& packet,
webrtc::Timestamp arrival_time);
webrtc::RtpTransportControllerSendInterface* GetTransportControllerSend()
override {
return &transport_controller_send_;
}
webrtc::Call::Stats GetStats() const override;
webrtc::TaskQueueBase* network_thread() const override;
webrtc::TaskQueueBase* worker_thread() const override;
void SignalChannelNetworkState(webrtc::MediaType media,
webrtc::NetworkState state) override;
void OnAudioTransportOverheadChanged(
int transport_overhead_per_packet) override;
void OnLocalSsrcUpdated(webrtc::AudioReceiveStreamInterface& stream,
uint32_t local_ssrc) override;
void OnLocalSsrcUpdated(webrtc::VideoReceiveStreamInterface& stream,
uint32_t local_ssrc) override;
void OnLocalSsrcUpdated(webrtc::FlexfecReceiveStream& stream,
uint32_t local_ssrc) override;
void OnUpdateSyncGroup(webrtc::AudioReceiveStreamInterface& stream,
absl::string_view sync_group) override;
void OnSentPacket(const rtc::SentPacket& sent_packet) override;
webrtc::TaskQueueBase* const network_thread_;
webrtc::TaskQueueBase* const worker_thread_;
::testing::NiceMock<webrtc::MockRtpTransportControllerSend>
transport_controller_send_;
webrtc::NetworkState audio_network_state_;
webrtc::NetworkState video_network_state_;
rtc::SentPacket last_sent_packet_;
webrtc::RtpPacketReceived last_received_rtp_packet_;
int last_sent_nonnegative_packet_id_ = -1;
int next_stream_id_ = 665;
webrtc::Call::Stats stats_;
std::vector<FakeVideoSendStream*> video_send_streams_;
std::vector<FakeAudioSendStream*> audio_send_streams_;
std::vector<FakeVideoReceiveStream*> video_receive_streams_;
std::vector<FakeAudioReceiveStream*> audio_receive_streams_;
std::vector<FakeFlexfecReceiveStream*> flexfec_receive_streams_;
std::map<uint32_t, size_t> delivered_packets_by_ssrc_;
int num_created_send_streams_;
int num_created_receive_streams_;
// The field trials that are in use, either supplied by caller
// or pointer to &fallback_trials_.
webrtc::test::ScopedKeyValueConfig* trials_;
// fallback_trials_ is used if caller does not provide any field trials.
webrtc::test::ScopedKeyValueConfig fallback_trials_;
// An extra field trial that can be set using SetFieldTrial.
std::unique_ptr<webrtc::test::ScopedKeyValueConfig> trials_overrides_;
};
} // namespace cricket
#endif // MEDIA_ENGINE_FAKE_WEBRTC_CALL_H_

View file

@ -0,0 +1,304 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/engine/fake_webrtc_video_engine.h"
#include <algorithm>
#include <memory>
#include "absl/strings/match.h"
#include "media/base/codec.h"
#include "media/base/media_constants.h"
#include "media/engine/simulcast_encoder_adapter.h"
#include "modules/video_coding/include/video_error_codes.h"
#include "rtc_base/time_utils.h"
namespace cricket {
namespace {
static constexpr webrtc::TimeDelta kEventTimeout =
webrtc::TimeDelta::Seconds(10);
bool IsScalabilityModeSupported(
const std::vector<webrtc::SdpVideoFormat>& formats,
absl::optional<std::string> scalability_mode) {
if (!scalability_mode.has_value()) {
return true;
}
for (const auto& format : formats) {
for (const auto& mode : format.scalability_modes) {
if (ScalabilityModeToString(mode) == scalability_mode)
return true;
}
}
return false;
}
} // namespace
// Decoder.
FakeWebRtcVideoDecoder::FakeWebRtcVideoDecoder(
FakeWebRtcVideoDecoderFactory* factory)
: num_frames_received_(0), factory_(factory) {}
FakeWebRtcVideoDecoder::~FakeWebRtcVideoDecoder() {
if (factory_) {
factory_->DecoderDestroyed(this);
}
}
bool FakeWebRtcVideoDecoder::Configure(const Settings& settings) {
return true;
}
int32_t FakeWebRtcVideoDecoder::Decode(const webrtc::EncodedImage&,
int64_t) {
num_frames_received_++;
return WEBRTC_VIDEO_CODEC_OK;
}
int32_t FakeWebRtcVideoDecoder::RegisterDecodeCompleteCallback(
webrtc::DecodedImageCallback*) {
return WEBRTC_VIDEO_CODEC_OK;
}
int32_t FakeWebRtcVideoDecoder::Release() {
return WEBRTC_VIDEO_CODEC_OK;
}
int FakeWebRtcVideoDecoder::GetNumFramesReceived() const {
return num_frames_received_;
}
// Decoder factory.
FakeWebRtcVideoDecoderFactory::FakeWebRtcVideoDecoderFactory()
: num_created_decoders_(0) {}
std::vector<webrtc::SdpVideoFormat>
FakeWebRtcVideoDecoderFactory::GetSupportedFormats() const {
std::vector<webrtc::SdpVideoFormat> formats;
for (const webrtc::SdpVideoFormat& format : supported_codec_formats_) {
// Don't add same codec twice.
if (!format.IsCodecInList(formats))
formats.push_back(format);
}
return formats;
}
std::unique_ptr<webrtc::VideoDecoder>
FakeWebRtcVideoDecoderFactory::CreateVideoDecoder(
const webrtc::SdpVideoFormat& format) {
if (format.IsCodecInList(supported_codec_formats_)) {
num_created_decoders_++;
std::unique_ptr<FakeWebRtcVideoDecoder> decoder =
std::make_unique<FakeWebRtcVideoDecoder>(this);
decoders_.push_back(decoder.get());
return decoder;
}
return nullptr;
}
void FakeWebRtcVideoDecoderFactory::DecoderDestroyed(
FakeWebRtcVideoDecoder* decoder) {
decoders_.erase(std::remove(decoders_.begin(), decoders_.end(), decoder),
decoders_.end());
}
void FakeWebRtcVideoDecoderFactory::AddSupportedVideoCodecType(
const std::string& name) {
// This is to match the default H264 params of cricket::VideoCodec.
cricket::VideoCodec video_codec = cricket::CreateVideoCodec(name);
supported_codec_formats_.push_back(
webrtc::SdpVideoFormat(video_codec.name, video_codec.params));
}
int FakeWebRtcVideoDecoderFactory::GetNumCreatedDecoders() {
return num_created_decoders_;
}
const std::vector<FakeWebRtcVideoDecoder*>&
FakeWebRtcVideoDecoderFactory::decoders() {
return decoders_;
}
// Encoder.
FakeWebRtcVideoEncoder::FakeWebRtcVideoEncoder(
FakeWebRtcVideoEncoderFactory* factory)
: num_frames_encoded_(0), factory_(factory) {}
FakeWebRtcVideoEncoder::~FakeWebRtcVideoEncoder() {
if (factory_) {
factory_->EncoderDestroyed(this);
}
}
void FakeWebRtcVideoEncoder::SetFecControllerOverride(
webrtc::FecControllerOverride* fec_controller_override) {
// Ignored.
}
int32_t FakeWebRtcVideoEncoder::InitEncode(
const webrtc::VideoCodec* codecSettings,
const VideoEncoder::Settings& settings) {
webrtc::MutexLock lock(&mutex_);
codec_settings_ = *codecSettings;
init_encode_event_.Set();
return WEBRTC_VIDEO_CODEC_OK;
}
int32_t FakeWebRtcVideoEncoder::Encode(
const webrtc::VideoFrame& inputImage,
const std::vector<webrtc::VideoFrameType>* frame_types) {
webrtc::MutexLock lock(&mutex_);
++num_frames_encoded_;
init_encode_event_.Set();
return WEBRTC_VIDEO_CODEC_OK;
}
int32_t FakeWebRtcVideoEncoder::RegisterEncodeCompleteCallback(
webrtc::EncodedImageCallback* callback) {
return WEBRTC_VIDEO_CODEC_OK;
}
int32_t FakeWebRtcVideoEncoder::Release() {
return WEBRTC_VIDEO_CODEC_OK;
}
void FakeWebRtcVideoEncoder::SetRates(const RateControlParameters& parameters) {
}
webrtc::VideoEncoder::EncoderInfo FakeWebRtcVideoEncoder::GetEncoderInfo()
const {
EncoderInfo info;
info.is_hardware_accelerated = true;
return info;
}
bool FakeWebRtcVideoEncoder::WaitForInitEncode() {
return init_encode_event_.Wait(kEventTimeout);
}
webrtc::VideoCodec FakeWebRtcVideoEncoder::GetCodecSettings() {
webrtc::MutexLock lock(&mutex_);
return codec_settings_;
}
int FakeWebRtcVideoEncoder::GetNumEncodedFrames() {
webrtc::MutexLock lock(&mutex_);
return num_frames_encoded_;
}
// Video encoder factory.
FakeWebRtcVideoEncoderFactory::FakeWebRtcVideoEncoderFactory()
: num_created_encoders_(0), vp8_factory_mode_(false) {}
std::vector<webrtc::SdpVideoFormat>
FakeWebRtcVideoEncoderFactory::GetSupportedFormats() const {
std::vector<webrtc::SdpVideoFormat> formats;
for (const webrtc::SdpVideoFormat& format : formats_) {
// Don't add same codec twice.
if (!format.IsCodecInList(formats))
formats.push_back(format);
}
return formats;
}
webrtc::VideoEncoderFactory::CodecSupport
FakeWebRtcVideoEncoderFactory::QueryCodecSupport(
const webrtc::SdpVideoFormat& format,
absl::optional<std::string> scalability_mode) const {
std::vector<webrtc::SdpVideoFormat> supported_formats;
for (const auto& f : formats_) {
if (format.IsSameCodec(f))
supported_formats.push_back(f);
}
if (format.IsCodecInList(formats_)) {
return {.is_supported = IsScalabilityModeSupported(supported_formats,
scalability_mode)};
}
return {.is_supported = false};
}
std::unique_ptr<webrtc::VideoEncoder>
FakeWebRtcVideoEncoderFactory::CreateVideoEncoder(
const webrtc::SdpVideoFormat& format) {
webrtc::MutexLock lock(&mutex_);
std::unique_ptr<webrtc::VideoEncoder> encoder;
if (format.IsCodecInList(formats_)) {
if (absl::EqualsIgnoreCase(format.name, kVp8CodecName) &&
!vp8_factory_mode_) {
// The simulcast adapter will ask this factory for multiple VP8
// encoders. Enter vp8_factory_mode so that we now create these encoders
// instead of more adapters.
vp8_factory_mode_ = true;
encoder = std::make_unique<webrtc::SimulcastEncoderAdapter>(this, format);
} else {
num_created_encoders_++;
created_video_encoder_event_.Set();
encoder = std::make_unique<FakeWebRtcVideoEncoder>(this);
encoders_.push_back(static_cast<FakeWebRtcVideoEncoder*>(encoder.get()));
}
}
return encoder;
}
bool FakeWebRtcVideoEncoderFactory::WaitForCreatedVideoEncoders(
int num_encoders) {
int64_t start_offset_ms = rtc::TimeMillis();
int64_t wait_time = kEventTimeout.ms();
do {
if (GetNumCreatedEncoders() >= num_encoders)
return true;
wait_time = kEventTimeout.ms() - (rtc::TimeMillis() - start_offset_ms);
} while (wait_time > 0 && created_video_encoder_event_.Wait(
webrtc::TimeDelta::Millis(wait_time)));
return false;
}
void FakeWebRtcVideoEncoderFactory::EncoderDestroyed(
FakeWebRtcVideoEncoder* encoder) {
webrtc::MutexLock lock(&mutex_);
encoders_.erase(std::remove(encoders_.begin(), encoders_.end(), encoder),
encoders_.end());
}
void FakeWebRtcVideoEncoderFactory::AddSupportedVideoCodec(
const webrtc::SdpVideoFormat& format) {
formats_.push_back(format);
}
void FakeWebRtcVideoEncoderFactory::AddSupportedVideoCodecType(
const std::string& name,
const std::vector<webrtc::ScalabilityMode>& scalability_modes) {
// This is to match the default H264 params of cricket::VideoCodec.
cricket::VideoCodec video_codec = cricket::CreateVideoCodec(name);
formats_.push_back(webrtc::SdpVideoFormat(
video_codec.name, video_codec.params,
{scalability_modes.begin(), scalability_modes.end()}));
}
int FakeWebRtcVideoEncoderFactory::GetNumCreatedEncoders() {
webrtc::MutexLock lock(&mutex_);
return num_created_encoders_;
}
const std::vector<FakeWebRtcVideoEncoder*>
FakeWebRtcVideoEncoderFactory::encoders() {
webrtc::MutexLock lock(&mutex_);
return encoders_;
}
} // namespace cricket

View file

@ -0,0 +1,144 @@
/*
* Copyright (c) 2010 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_ENGINE_FAKE_WEBRTC_VIDEO_ENGINE_H_
#define MEDIA_ENGINE_FAKE_WEBRTC_VIDEO_ENGINE_H_
#include <stddef.h>
#include <stdint.h>
#include <memory>
#include <string>
#include <vector>
#include "api/fec_controller_override.h"
#include "api/video/encoded_image.h"
#include "api/video/video_bitrate_allocation.h"
#include "api/video/video_frame.h"
#include "api/video_codecs/sdp_video_format.h"
#include "api/video_codecs/video_codec.h"
#include "api/video_codecs/video_decoder.h"
#include "api/video_codecs/video_decoder_factory.h"
#include "api/video_codecs/video_encoder.h"
#include "api/video_codecs/video_encoder_factory.h"
#include "modules/video_coding/include/video_codec_interface.h"
#include "rtc_base/event.h"
#include "rtc_base/synchronization/mutex.h"
#include "rtc_base/thread_annotations.h"
namespace cricket {
class FakeWebRtcVideoDecoderFactory;
class FakeWebRtcVideoEncoderFactory;
// Fake class for mocking out webrtc::VideoDecoder
class FakeWebRtcVideoDecoder : public webrtc::VideoDecoder {
public:
explicit FakeWebRtcVideoDecoder(FakeWebRtcVideoDecoderFactory* factory);
~FakeWebRtcVideoDecoder();
bool Configure(const Settings& settings) override;
int32_t Decode(const webrtc::EncodedImage&, int64_t) override;
int32_t RegisterDecodeCompleteCallback(
webrtc::DecodedImageCallback*) override;
int32_t Release() override;
int GetNumFramesReceived() const;
private:
int num_frames_received_;
FakeWebRtcVideoDecoderFactory* factory_;
};
// Fake class for mocking out webrtc::VideoDecoderFactory.
class FakeWebRtcVideoDecoderFactory : public webrtc::VideoDecoderFactory {
public:
FakeWebRtcVideoDecoderFactory();
std::vector<webrtc::SdpVideoFormat> GetSupportedFormats() const override;
std::unique_ptr<webrtc::VideoDecoder> CreateVideoDecoder(
const webrtc::SdpVideoFormat& format) override;
void DecoderDestroyed(FakeWebRtcVideoDecoder* decoder);
void AddSupportedVideoCodecType(const std::string& name);
int GetNumCreatedDecoders();
const std::vector<FakeWebRtcVideoDecoder*>& decoders();
private:
std::vector<webrtc::SdpVideoFormat> supported_codec_formats_;
std::vector<FakeWebRtcVideoDecoder*> decoders_;
int num_created_decoders_;
};
// Fake class for mocking out webrtc::VideoEnoder
class FakeWebRtcVideoEncoder : public webrtc::VideoEncoder {
public:
explicit FakeWebRtcVideoEncoder(FakeWebRtcVideoEncoderFactory* factory);
~FakeWebRtcVideoEncoder();
void SetFecControllerOverride(
webrtc::FecControllerOverride* fec_controller_override) override;
int32_t InitEncode(const webrtc::VideoCodec* codecSettings,
const VideoEncoder::Settings& settings) override;
int32_t Encode(
const webrtc::VideoFrame& inputImage,
const std::vector<webrtc::VideoFrameType>* frame_types) override;
int32_t RegisterEncodeCompleteCallback(
webrtc::EncodedImageCallback* callback) override;
int32_t Release() override;
void SetRates(const RateControlParameters& parameters) override;
webrtc::VideoEncoder::EncoderInfo GetEncoderInfo() const override;
bool WaitForInitEncode();
webrtc::VideoCodec GetCodecSettings();
int GetNumEncodedFrames();
private:
webrtc::Mutex mutex_;
rtc::Event init_encode_event_;
int num_frames_encoded_ RTC_GUARDED_BY(mutex_);
webrtc::VideoCodec codec_settings_ RTC_GUARDED_BY(mutex_);
FakeWebRtcVideoEncoderFactory* factory_;
};
// Fake class for mocking out webrtc::VideoEncoderFactory.
class FakeWebRtcVideoEncoderFactory : public webrtc::VideoEncoderFactory {
public:
FakeWebRtcVideoEncoderFactory();
std::vector<webrtc::SdpVideoFormat> GetSupportedFormats() const override;
webrtc::VideoEncoderFactory::CodecSupport QueryCodecSupport(
const webrtc::SdpVideoFormat& format,
absl::optional<std::string> scalability_mode) const override;
std::unique_ptr<webrtc::VideoEncoder> CreateVideoEncoder(
const webrtc::SdpVideoFormat& format) override;
bool WaitForCreatedVideoEncoders(int num_encoders);
void EncoderDestroyed(FakeWebRtcVideoEncoder* encoder);
void set_encoders_have_internal_sources(bool internal_source);
void AddSupportedVideoCodec(const webrtc::SdpVideoFormat& format);
void AddSupportedVideoCodecType(
const std::string& name,
const std::vector<webrtc::ScalabilityMode>& scalability_modes = {});
int GetNumCreatedEncoders();
const std::vector<FakeWebRtcVideoEncoder*> encoders();
private:
webrtc::Mutex mutex_;
rtc::Event created_video_encoder_event_;
std::vector<webrtc::SdpVideoFormat> formats_;
std::vector<FakeWebRtcVideoEncoder*> encoders_ RTC_GUARDED_BY(mutex_);
int num_created_encoders_ RTC_GUARDED_BY(mutex_);
bool vp8_factory_mode_;
};
} // namespace cricket
#endif // MEDIA_ENGINE_FAKE_WEBRTC_VIDEO_ENGINE_H_

View file

@ -0,0 +1,105 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/engine/internal_decoder_factory.h"
#include "absl/strings/match.h"
#include "api/video_codecs/av1_profile.h"
#include "api/video_codecs/sdp_video_format.h"
#include "api/video_codecs/video_codec.h"
#include "media/base/codec.h"
#include "media/base/media_constants.h"
#include "modules/video_coding/codecs/h264/include/h264.h"
#include "modules/video_coding/codecs/vp8/include/vp8.h"
#include "modules/video_coding/codecs/vp9/include/vp9.h"
#include "rtc_base/checks.h"
#include "rtc_base/logging.h"
#include "system_wrappers/include/field_trial.h"
#if defined(RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY)
#include "modules/video_coding/codecs/av1/dav1d_decoder.h" // nogncheck
#endif
namespace webrtc {
namespace {
#if defined(RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY)
constexpr bool kDav1dIsIncluded = true;
#else
constexpr bool kDav1dIsIncluded = false;
std::unique_ptr<VideoDecoder> CreateDav1dDecoder() {
return nullptr;
}
#endif
} // namespace
std::vector<SdpVideoFormat> InternalDecoderFactory::GetSupportedFormats()
const {
std::vector<SdpVideoFormat> formats;
formats.push_back(SdpVideoFormat(cricket::kVp8CodecName));
for (const SdpVideoFormat& format : SupportedVP9DecoderCodecs())
formats.push_back(format);
for (const SdpVideoFormat& h264_format : SupportedH264DecoderCodecs())
formats.push_back(h264_format);
if (kDav1dIsIncluded) {
formats.push_back(SdpVideoFormat(cricket::kAv1CodecName));
formats.push_back(
SdpVideoFormat(cricket::kAv1CodecName,
{{cricket::kAv1FmtpProfile,
AV1ProfileToString(AV1Profile::kProfile1).data()}}));
}
return formats;
}
VideoDecoderFactory::CodecSupport InternalDecoderFactory::QueryCodecSupport(
const SdpVideoFormat& format,
bool reference_scaling) const {
// Query for supported formats and check if the specified format is supported.
// Return unsupported if an invalid combination of format and
// reference_scaling is specified.
if (reference_scaling) {
VideoCodecType codec = PayloadStringToCodecType(format.name);
if (codec != kVideoCodecVP9 && codec != kVideoCodecAV1) {
return {/*is_supported=*/false, /*is_power_efficient=*/false};
}
}
CodecSupport codec_support;
codec_support.is_supported = format.IsCodecInList(GetSupportedFormats());
return codec_support;
}
std::unique_ptr<VideoDecoder> InternalDecoderFactory::CreateVideoDecoder(
const SdpVideoFormat& format) {
if (!format.IsCodecInList(GetSupportedFormats())) {
RTC_LOG(LS_WARNING) << "Trying to create decoder for unsupported format. "
<< format.ToString();
return nullptr;
}
if (absl::EqualsIgnoreCase(format.name, cricket::kVp8CodecName))
return VP8Decoder::Create();
if (absl::EqualsIgnoreCase(format.name, cricket::kVp9CodecName))
return VP9Decoder::Create();
if (absl::EqualsIgnoreCase(format.name, cricket::kH264CodecName))
return H264Decoder::Create();
if (absl::EqualsIgnoreCase(format.name, cricket::kAv1CodecName) &&
kDav1dIsIncluded) {
return CreateDav1dDecoder();
}
RTC_DCHECK_NOTREACHED();
return nullptr;
}
} // namespace webrtc

View file

@ -0,0 +1,35 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_ENGINE_INTERNAL_DECODER_FACTORY_H_
#define MEDIA_ENGINE_INTERNAL_DECODER_FACTORY_H_
#include <memory>
#include <vector>
#include "api/video_codecs/sdp_video_format.h"
#include "api/video_codecs/video_decoder.h"
#include "api/video_codecs/video_decoder_factory.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
class RTC_EXPORT InternalDecoderFactory : public VideoDecoderFactory {
public:
std::vector<SdpVideoFormat> GetSupportedFormats() const override;
CodecSupport QueryCodecSupport(const SdpVideoFormat& format,
bool reference_scaling) const override;
std::unique_ptr<VideoDecoder> CreateVideoDecoder(
const SdpVideoFormat& format) override;
};
} // namespace webrtc
#endif // MEDIA_ENGINE_INTERNAL_DECODER_FACTORY_H_

View file

@ -0,0 +1,66 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/engine/internal_encoder_factory.h"
#include <memory>
#include <string>
#include <vector>
#include "absl/strings/match.h"
#include "api/video_codecs/video_encoder_factory.h"
#include "api/video_codecs/video_encoder_factory_template.h"
#if defined(RTC_USE_LIBAOM_AV1_ENCODER)
#include "api/video_codecs/video_encoder_factory_template_libaom_av1_adapter.h" // nogncheck
#endif
#include "api/video_codecs/video_encoder_factory_template_libvpx_vp8_adapter.h"
#include "api/video_codecs/video_encoder_factory_template_libvpx_vp9_adapter.h"
#if defined(WEBRTC_USE_H264)
#include "api/video_codecs/video_encoder_factory_template_open_h264_adapter.h" // nogncheck
#endif
namespace webrtc {
namespace {
using Factory =
VideoEncoderFactoryTemplate<webrtc::LibvpxVp8EncoderTemplateAdapter,
#if defined(WEBRTC_USE_H264)
webrtc::OpenH264EncoderTemplateAdapter,
#endif
#if defined(RTC_USE_LIBAOM_AV1_ENCODER)
webrtc::LibaomAv1EncoderTemplateAdapter,
#endif
webrtc::LibvpxVp9EncoderTemplateAdapter>;
} // namespace
std::vector<SdpVideoFormat> InternalEncoderFactory::GetSupportedFormats()
const {
return Factory().GetSupportedFormats();
}
std::unique_ptr<VideoEncoder> InternalEncoderFactory::CreateVideoEncoder(
const SdpVideoFormat& format) {
auto original_format =
FuzzyMatchSdpVideoFormat(Factory().GetSupportedFormats(), format);
return original_format ? Factory().CreateVideoEncoder(*original_format)
: nullptr;
}
VideoEncoderFactory::CodecSupport InternalEncoderFactory::QueryCodecSupport(
const SdpVideoFormat& format,
absl::optional<std::string> scalability_mode) const {
auto original_format =
FuzzyMatchSdpVideoFormat(Factory().GetSupportedFormats(), format);
return original_format
? Factory().QueryCodecSupport(*original_format, scalability_mode)
: VideoEncoderFactory::CodecSupport{.is_supported = false};
}
} // namespace webrtc

View file

@ -0,0 +1,34 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_ENGINE_INTERNAL_ENCODER_FACTORY_H_
#define MEDIA_ENGINE_INTERNAL_ENCODER_FACTORY_H_
#include <memory>
#include <string>
#include <vector>
#include "api/video_codecs/video_encoder_factory.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
class RTC_EXPORT InternalEncoderFactory : public VideoEncoderFactory {
public:
std::vector<SdpVideoFormat> GetSupportedFormats() const override;
CodecSupport QueryCodecSupport(
const SdpVideoFormat& format,
absl::optional<std::string> scalability_mode) const override;
std::unique_ptr<VideoEncoder> CreateVideoEncoder(
const SdpVideoFormat& format) override;
};
} // namespace webrtc
#endif // MEDIA_ENGINE_INTERNAL_ENCODER_FACTORY_H_

View file

@ -0,0 +1,117 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/engine/multiplex_codec_factory.h"
#include <map>
#include <memory>
#include <string>
#include <utility>
#include "absl/strings/match.h"
#include "api/environment/environment.h"
#include "api/video_codecs/sdp_video_format.h"
#include "media/base/codec.h"
#include "media/base/media_constants.h"
#include "modules/video_coding/codecs/multiplex/include/multiplex_decoder_adapter.h"
#include "modules/video_coding/codecs/multiplex/include/multiplex_encoder_adapter.h"
#include "rtc_base/logging.h"
namespace {
bool IsMultiplexCodec(const cricket::VideoCodec& codec) {
return absl::EqualsIgnoreCase(codec.name.c_str(),
cricket::kMultiplexCodecName);
}
} // anonymous namespace
namespace webrtc {
constexpr const char* kMultiplexAssociatedCodecName = cricket::kVp9CodecName;
MultiplexEncoderFactory::MultiplexEncoderFactory(
std::unique_ptr<VideoEncoderFactory> factory,
bool supports_augmenting_data)
: factory_(std::move(factory)),
supports_augmenting_data_(supports_augmenting_data) {}
std::vector<SdpVideoFormat> MultiplexEncoderFactory::GetSupportedFormats()
const {
std::vector<SdpVideoFormat> formats = factory_->GetSupportedFormats();
for (const auto& format : formats) {
if (absl::EqualsIgnoreCase(format.name, kMultiplexAssociatedCodecName)) {
SdpVideoFormat multiplex_format = format;
multiplex_format.parameters[cricket::kCodecParamAssociatedCodecName] =
format.name;
multiplex_format.name = cricket::kMultiplexCodecName;
formats.push_back(multiplex_format);
break;
}
}
return formats;
}
std::unique_ptr<VideoEncoder> MultiplexEncoderFactory::CreateVideoEncoder(
const SdpVideoFormat& format) {
if (!IsMultiplexCodec(cricket::CreateVideoCodec(format)))
return factory_->CreateVideoEncoder(format);
const auto& it =
format.parameters.find(cricket::kCodecParamAssociatedCodecName);
if (it == format.parameters.end()) {
RTC_LOG(LS_ERROR) << "No assicated codec for multiplex.";
return nullptr;
}
SdpVideoFormat associated_format = format;
associated_format.name = it->second;
return std::unique_ptr<VideoEncoder>(new MultiplexEncoderAdapter(
factory_.get(), associated_format, supports_augmenting_data_));
}
MultiplexDecoderFactory::MultiplexDecoderFactory(
std::unique_ptr<VideoDecoderFactory> factory,
bool supports_augmenting_data)
: factory_(std::move(factory)),
supports_augmenting_data_(supports_augmenting_data) {}
std::vector<SdpVideoFormat> MultiplexDecoderFactory::GetSupportedFormats()
const {
std::vector<SdpVideoFormat> formats = factory_->GetSupportedFormats();
std::vector<SdpVideoFormat> augmented_formats = formats;
for (const auto& format : formats) {
if (absl::EqualsIgnoreCase(format.name, kMultiplexAssociatedCodecName)) {
SdpVideoFormat multiplex_format = format;
multiplex_format.parameters[cricket::kCodecParamAssociatedCodecName] =
format.name;
multiplex_format.name = cricket::kMultiplexCodecName;
augmented_formats.push_back(multiplex_format);
}
}
return augmented_formats;
}
std::unique_ptr<VideoDecoder> MultiplexDecoderFactory::Create(
const Environment& env,
const SdpVideoFormat& format) {
if (!IsMultiplexCodec(cricket::CreateVideoCodec(format))) {
return factory_->Create(env, format);
}
auto it = format.parameters.find(cricket::kCodecParamAssociatedCodecName);
if (it == format.parameters.end()) {
RTC_LOG(LS_ERROR) << "No assicated codec for multiplex.";
return nullptr;
}
SdpVideoFormat associated_format = format;
associated_format.name = it->second;
return std::make_unique<MultiplexDecoderAdapter>(
env, factory_.get(), associated_format, supports_augmenting_data_);
}
} // namespace webrtc

View file

@ -0,0 +1,80 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_ENGINE_MULTIPLEX_CODEC_FACTORY_H_
#define MEDIA_ENGINE_MULTIPLEX_CODEC_FACTORY_H_
#include <memory>
#include <vector>
#include "api/environment/environment.h"
#include "api/video_codecs/sdp_video_format.h"
#include "api/video_codecs/video_decoder.h"
#include "api/video_codecs/video_decoder_factory.h"
#include "api/video_codecs/video_encoder.h"
#include "api/video_codecs/video_encoder_factory.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// Multiplex codec is a completely modular/optional codec that allows users to
// send more than a frame's opaque content(RGB/YUV) over video channels.
// - Allows sending Alpha channel over the wire iff input is
// I420ABufferInterface. Users can expect to receive I420ABufferInterface as the
// decoded video frame buffer. I420A data is split into YUV/AXX portions,
// encoded/decoded seperately and bitstreams are concatanated.
// - Allows sending augmenting data over the wire attached to the frame. This
// attached data portion is not encoded in any way and sent as it is. Users can
// input AugmentedVideoFrameBuffer and can expect the same interface as the
// decoded video frame buffer.
// - Showcases an example of how to add a custom codec in webrtc video channel.
// How to use it end-to-end:
// - Wrap your existing VideoEncoderFactory implemention with
// MultiplexEncoderFactory and VideoDecoderFactory implemention with
// MultiplexDecoderFactory below. For actual coding, multiplex creates encoder
// and decoder instance(s) using these factories.
// - Use Multiplex*coderFactory classes in CreatePeerConnectionFactory() calls.
// - Select "multiplex" codec in SDP negotiation.
class RTC_EXPORT MultiplexEncoderFactory : public VideoEncoderFactory {
public:
// `supports_augmenting_data` defines if the encoder would support augmenting
// data. If set, the encoder expects to receive video frame buffers of type
// AugmentedVideoFrameBuffer.
MultiplexEncoderFactory(std::unique_ptr<VideoEncoderFactory> factory,
bool supports_augmenting_data = false);
std::vector<SdpVideoFormat> GetSupportedFormats() const override;
std::unique_ptr<VideoEncoder> CreateVideoEncoder(
const SdpVideoFormat& format) override;
private:
std::unique_ptr<VideoEncoderFactory> factory_;
const bool supports_augmenting_data_;
};
class RTC_EXPORT MultiplexDecoderFactory : public VideoDecoderFactory {
public:
// `supports_augmenting_data` defines if the decoder would support augmenting
// data. If set, the decoder is expected to output video frame buffers of type
// AugmentedVideoFrameBuffer.
MultiplexDecoderFactory(std::unique_ptr<VideoDecoderFactory> factory,
bool supports_augmenting_data = false);
std::vector<SdpVideoFormat> GetSupportedFormats() const override;
std::unique_ptr<VideoDecoder> Create(const Environment& env,
const SdpVideoFormat& format) override;
private:
std::unique_ptr<VideoDecoderFactory> factory_;
const bool supports_augmenting_data_;
};
} // namespace webrtc
#endif // MEDIA_ENGINE_MULTIPLEX_CODEC_FACTORY_H_

View file

@ -0,0 +1,54 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_ENGINE_NULL_WEBRTC_VIDEO_ENGINE_H_
#define MEDIA_ENGINE_NULL_WEBRTC_VIDEO_ENGINE_H_
#include <vector>
#include "media/base/media_channel.h"
#include "media/base/media_engine.h"
namespace webrtc {
class Call;
} // namespace webrtc
namespace cricket {
// Video engine implementation that does nothing and can be used in
// CompositeMediaEngine.
class NullWebRtcVideoEngine : public VideoEngineInterface {
public:
std::vector<VideoCodec> send_codecs(bool) const override {
return std::vector<VideoCodec>();
}
std::vector<VideoCodec> recv_codecs(bool) const override {
return std::vector<VideoCodec>();
}
std::vector<VideoCodec> send_codecs() const override {
return std::vector<VideoCodec>();
}
std::vector<VideoCodec> recv_codecs() const override {
return std::vector<VideoCodec>();
}
std::vector<webrtc::RtpHeaderExtensionCapability> GetRtpHeaderExtensions()
const override {
return {};
}
};
} // namespace cricket
#endif // MEDIA_ENGINE_NULL_WEBRTC_VIDEO_ENGINE_H_

View file

@ -0,0 +1,160 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/engine/payload_type_mapper.h"
#include <utility>
#include "absl/strings/ascii.h"
#include "api/audio_codecs/audio_format.h"
#include "media/base/codec.h"
#include "media/base/media_constants.h"
namespace cricket {
webrtc::SdpAudioFormat AudioCodecToSdpAudioFormat(const AudioCodec& ac) {
return webrtc::SdpAudioFormat(ac.name, ac.clockrate, ac.channels, ac.params);
}
PayloadTypeMapper::PayloadTypeMapper()
// RFC 3551 reserves payload type numbers in the range 96-127 exclusively
// for dynamic assignment. Once those are used up, it is recommended that
// payload types unassigned by the RFC are used for dynamic payload type
// mapping, before any static payload ids. At this point, we only support
// mapping within the exclusive range.
: next_unused_payload_type_(96),
max_payload_type_(127),
mappings_(
{// Static payload type assignments according to RFC 3551.
{{kPcmuCodecName, 8000, 1}, 0},
{{"GSM", 8000, 1}, 3},
{{"G723", 8000, 1}, 4},
{{"DVI4", 8000, 1}, 5},
{{"DVI4", 16000, 1}, 6},
{{"LPC", 8000, 1}, 7},
{{kPcmaCodecName, 8000, 1}, 8},
{{kG722CodecName, 8000, 1}, 9},
{{kL16CodecName, 44100, 2}, 10},
{{kL16CodecName, 44100, 1}, 11},
{{"QCELP", 8000, 1}, 12},
{{kCnCodecName, 8000, 1}, 13},
// RFC 4566 is a bit ambiguous on the contents of the "encoding
// parameters" field, which, for audio, encodes the number of
// channels. It is "optional and may be omitted if the number of
// channels is one". Does that necessarily imply that an omitted
// encoding parameter means one channel? Since RFC 3551 doesn't
// specify a value for this parameter for MPA, I've included both 0
// and 1 here, to increase the chances it will be correctly used if
// someone implements an MPEG audio encoder/decoder.
{{"MPA", 90000, 0}, 14},
{{"MPA", 90000, 1}, 14},
{{"G728", 8000, 1}, 15},
{{"DVI4", 11025, 1}, 16},
{{"DVI4", 22050, 1}, 17},
{{"G729", 8000, 1}, 18},
// Payload type assignments currently used by WebRTC.
// Includes data to reduce collisions (and thus reassignments)
{{kIlbcCodecName, 8000, 1}, 102},
{{kCnCodecName, 16000, 1}, 105},
{{kCnCodecName, 32000, 1}, 106},
{{kOpusCodecName,
48000,
2,
{{kCodecParamMinPTime, "10"},
{kCodecParamUseInbandFec, kParamValueTrue}}},
111},
// RED for opus is assigned in the lower range, starting at the top.
// Note that the FMTP refers to the opus payload type.
{{kRedCodecName,
48000,
2,
{{kCodecParamNotInNameValueFormat, "111/111"}}},
63},
// TODO(solenberg): Remove the hard coded 16k,32k,48k DTMF once we
// assign payload types dynamically for send side as well.
{{kDtmfCodecName, 48000, 1}, 110},
{{kDtmfCodecName, 32000, 1}, 112},
{{kDtmfCodecName, 16000, 1}, 113},
{{kDtmfCodecName, 8000, 1}, 126}}) {
// TODO(ossu): Try to keep this as change-proof as possible until we're able
// to remove the payload type constants from everywhere in the code.
for (const auto& mapping : mappings_) {
used_payload_types_.insert(mapping.second);
}
}
PayloadTypeMapper::~PayloadTypeMapper() = default;
absl::optional<int> PayloadTypeMapper::GetMappingFor(
const webrtc::SdpAudioFormat& format) {
auto iter = mappings_.find(format);
if (iter != mappings_.end())
return iter->second;
for (; next_unused_payload_type_ <= max_payload_type_;
++next_unused_payload_type_) {
int payload_type = next_unused_payload_type_;
if (used_payload_types_.find(payload_type) == used_payload_types_.end()) {
used_payload_types_.insert(payload_type);
mappings_[format] = payload_type;
++next_unused_payload_type_;
return payload_type;
}
}
return absl::nullopt;
}
absl::optional<int> PayloadTypeMapper::FindMappingFor(
const webrtc::SdpAudioFormat& format) const {
auto iter = mappings_.find(format);
if (iter != mappings_.end())
return iter->second;
return absl::nullopt;
}
absl::optional<AudioCodec> PayloadTypeMapper::ToAudioCodec(
const webrtc::SdpAudioFormat& format) {
// TODO(ossu): We can safely set bitrate to zero here, since that field is
// not presented in the SDP. It is used to ferry around some target bitrate
// values for certain codecs (ISAC and Opus) and in ways it really
// shouldn't. It should be removed once we no longer use CodecInsts in the
// ACM or NetEq.
auto opt_payload_type = GetMappingFor(format);
if (opt_payload_type) {
AudioCodec codec =
cricket::CreateAudioCodec(*opt_payload_type, format.name,
format.clockrate_hz, format.num_channels);
codec.params = format.parameters;
return std::move(codec);
}
return absl::nullopt;
}
bool PayloadTypeMapper::SdpAudioFormatOrdering::operator()(
const webrtc::SdpAudioFormat& a,
const webrtc::SdpAudioFormat& b) const {
if (a.clockrate_hz == b.clockrate_hz) {
if (a.num_channels == b.num_channels) {
int name_cmp =
absl::AsciiStrToLower(a.name).compare(absl::AsciiStrToLower(b.name));
if (name_cmp == 0)
return a.parameters < b.parameters;
return name_cmp < 0;
}
return a.num_channels < b.num_channels;
}
return a.clockrate_hz < b.clockrate_hz;
}
} // namespace cricket

View file

@ -0,0 +1,57 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_ENGINE_PAYLOAD_TYPE_MAPPER_H_
#define MEDIA_ENGINE_PAYLOAD_TYPE_MAPPER_H_
#include <map>
#include <set>
#include "absl/types/optional.h"
#include "api/audio_codecs/audio_format.h"
#include "media/base/codec.h"
namespace cricket {
webrtc::SdpAudioFormat AudioCodecToSdpAudioFormat(const AudioCodec& ac);
class PayloadTypeMapper {
public:
PayloadTypeMapper();
~PayloadTypeMapper();
// Finds the current payload type for `format` or assigns a new one, if no
// current mapping exists. Will return an empty value if it was unable to
// create a mapping, i.e. if all dynamic payload type ids have been used up.
absl::optional<int> GetMappingFor(const webrtc::SdpAudioFormat& format);
// Finds the current payload type for `format`, if any. Returns an empty value
// if no payload type mapping exists for the format.
absl::optional<int> FindMappingFor(
const webrtc::SdpAudioFormat& format) const;
// Like GetMappingFor, but fills in an AudioCodec structure with the necessary
// information instead.
absl::optional<AudioCodec> ToAudioCodec(const webrtc::SdpAudioFormat& format);
private:
struct SdpAudioFormatOrdering {
bool operator()(const webrtc::SdpAudioFormat& a,
const webrtc::SdpAudioFormat& b) const;
};
int next_unused_payload_type_;
int max_payload_type_;
std::map<webrtc::SdpAudioFormat, int, SdpAudioFormatOrdering> mappings_;
std::set<int> used_payload_types_;
};
} // namespace cricket
#endif // MEDIA_ENGINE_PAYLOAD_TYPE_MAPPER_H_

View file

@ -0,0 +1,995 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/engine/simulcast_encoder_adapter.h"
#include <stdio.h>
#include <string.h>
#include <algorithm>
#include <cstdint>
#include <string>
#include <utility>
#include "absl/algorithm/container.h"
#include "api/field_trials_view.h"
#include "api/scoped_refptr.h"
#include "api/transport/field_trial_based_config.h"
#include "api/video/i420_buffer.h"
#include "api/video/video_codec_constants.h"
#include "api/video/video_frame_buffer.h"
#include "api/video/video_rotation.h"
#include "api/video_codecs/video_encoder.h"
#include "api/video_codecs/video_encoder_factory.h"
#include "api/video_codecs/video_encoder_software_fallback_wrapper.h"
#include "media/base/media_constants.h"
#include "media/base/video_common.h"
#include "modules/video_coding/include/video_error_codes.h"
#include "modules/video_coding/include/video_error_codes_utils.h"
#include "modules/video_coding/utility/simulcast_rate_allocator.h"
#include "rtc_base/checks.h"
#include "rtc_base/experiments/rate_control_settings.h"
#include "rtc_base/logging.h"
namespace {
// Max qp for lowest spatial resolution when doing simulcast.
const unsigned int kLowestResMaxQp = 45;
absl::optional<unsigned int> GetScreenshareBoostedQpValue(
const webrtc::FieldTrialsView& field_trials) {
std::string experiment_group =
field_trials.Lookup("WebRTC-BoostedScreenshareQp");
unsigned int qp;
if (sscanf(experiment_group.c_str(), "%u", &qp) != 1)
return absl::nullopt;
qp = std::min(qp, 63u);
qp = std::max(qp, 1u);
return qp;
}
uint32_t SumStreamMaxBitrate(int streams, const webrtc::VideoCodec& codec) {
uint32_t bitrate_sum = 0;
for (int i = 0; i < streams; ++i) {
bitrate_sum += codec.simulcastStream[i].maxBitrate;
}
return bitrate_sum;
}
int CountAllStreams(const webrtc::VideoCodec& codec) {
int total_streams_count =
codec.numberOfSimulcastStreams < 1 ? 1 : codec.numberOfSimulcastStreams;
uint32_t simulcast_max_bitrate =
SumStreamMaxBitrate(total_streams_count, codec);
if (simulcast_max_bitrate == 0) {
total_streams_count = 1;
}
return total_streams_count;
}
int CountActiveStreams(const webrtc::VideoCodec& codec) {
if (codec.numberOfSimulcastStreams < 1) {
return 1;
}
int total_streams_count = CountAllStreams(codec);
int active_streams_count = 0;
for (int i = 0; i < total_streams_count; ++i) {
if (codec.simulcastStream[i].active) {
++active_streams_count;
}
}
return active_streams_count;
}
int VerifyCodec(const webrtc::VideoCodec* codec_settings) {
if (codec_settings == nullptr) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
if (codec_settings->maxFramerate < 1) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
// allow zero to represent an unspecified maxBitRate
if (codec_settings->maxBitrate > 0 &&
codec_settings->startBitrate > codec_settings->maxBitrate) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
if (codec_settings->width <= 1 || codec_settings->height <= 1) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
if (codec_settings->codecType == webrtc::kVideoCodecVP8 &&
codec_settings->VP8().automaticResizeOn &&
CountActiveStreams(*codec_settings) > 1) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
return WEBRTC_VIDEO_CODEC_OK;
}
bool StreamQualityCompare(const webrtc::SimulcastStream& a,
const webrtc::SimulcastStream& b) {
return std::tie(a.height, a.width, a.maxBitrate, a.maxFramerate) <
std::tie(b.height, b.width, b.maxBitrate, b.maxFramerate);
}
void GetLowestAndHighestQualityStreamIndixes(
rtc::ArrayView<webrtc::SimulcastStream> streams,
int* lowest_quality_stream_idx,
int* highest_quality_stream_idx) {
const auto lowest_highest_quality_streams =
absl::c_minmax_element(streams, StreamQualityCompare);
*lowest_quality_stream_idx =
std::distance(streams.begin(), lowest_highest_quality_streams.first);
*highest_quality_stream_idx =
std::distance(streams.begin(), lowest_highest_quality_streams.second);
}
std::vector<uint32_t> GetStreamStartBitratesKbps(
const webrtc::VideoCodec& codec) {
std::vector<uint32_t> start_bitrates;
std::unique_ptr<webrtc::VideoBitrateAllocator> rate_allocator =
std::make_unique<webrtc::SimulcastRateAllocator>(codec);
webrtc::VideoBitrateAllocation allocation =
rate_allocator->Allocate(webrtc::VideoBitrateAllocationParameters(
codec.startBitrate * 1000, codec.maxFramerate));
int total_streams_count = CountAllStreams(codec);
for (int i = 0; i < total_streams_count; ++i) {
uint32_t stream_bitrate = allocation.GetSpatialLayerSum(i) / 1000;
start_bitrates.push_back(stream_bitrate);
}
return start_bitrates;
}
} // namespace
namespace webrtc {
SimulcastEncoderAdapter::EncoderContext::EncoderContext(
std::unique_ptr<VideoEncoder> encoder,
bool prefer_temporal_support,
VideoEncoder::EncoderInfo primary_info,
VideoEncoder::EncoderInfo fallback_info)
: encoder_(std::move(encoder)),
prefer_temporal_support_(prefer_temporal_support),
primary_info_(std::move(primary_info)),
fallback_info_(std::move(fallback_info)) {}
void SimulcastEncoderAdapter::EncoderContext::Release() {
if (encoder_) {
encoder_->Release();
encoder_->RegisterEncodeCompleteCallback(nullptr);
}
}
SimulcastEncoderAdapter::StreamContext::StreamContext(
SimulcastEncoderAdapter* parent,
std::unique_ptr<EncoderContext> encoder_context,
std::unique_ptr<FramerateController> framerate_controller,
int stream_idx,
uint16_t width,
uint16_t height,
bool is_paused)
: parent_(parent),
encoder_context_(std::move(encoder_context)),
framerate_controller_(std::move(framerate_controller)),
stream_idx_(stream_idx),
width_(width),
height_(height),
is_keyframe_needed_(false),
is_paused_(is_paused) {
if (parent_) {
encoder_context_->encoder().RegisterEncodeCompleteCallback(this);
}
}
SimulcastEncoderAdapter::StreamContext::StreamContext(StreamContext&& rhs)
: parent_(rhs.parent_),
encoder_context_(std::move(rhs.encoder_context_)),
framerate_controller_(std::move(rhs.framerate_controller_)),
stream_idx_(rhs.stream_idx_),
width_(rhs.width_),
height_(rhs.height_),
is_keyframe_needed_(rhs.is_keyframe_needed_),
is_paused_(rhs.is_paused_) {
if (parent_) {
encoder_context_->encoder().RegisterEncodeCompleteCallback(this);
}
}
SimulcastEncoderAdapter::StreamContext::~StreamContext() {
if (encoder_context_) {
encoder_context_->Release();
}
}
std::unique_ptr<SimulcastEncoderAdapter::EncoderContext>
SimulcastEncoderAdapter::StreamContext::ReleaseEncoderContext() && {
encoder_context_->Release();
return std::move(encoder_context_);
}
void SimulcastEncoderAdapter::StreamContext::OnKeyframe(Timestamp timestamp) {
is_keyframe_needed_ = false;
if (framerate_controller_) {
framerate_controller_->KeepFrame(timestamp.us() * 1000);
}
}
bool SimulcastEncoderAdapter::StreamContext::ShouldDropFrame(
Timestamp timestamp) {
if (!framerate_controller_) {
return false;
}
return framerate_controller_->ShouldDropFrame(timestamp.us() * 1000);
}
EncodedImageCallback::Result
SimulcastEncoderAdapter::StreamContext::OnEncodedImage(
const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info) {
RTC_CHECK(parent_); // If null, this method should never be called.
return parent_->OnEncodedImage(stream_idx_, encoded_image,
codec_specific_info);
}
void SimulcastEncoderAdapter::StreamContext::OnDroppedFrame(
DropReason /*reason*/) {
RTC_CHECK(parent_); // If null, this method should never be called.
parent_->OnDroppedFrame(stream_idx_);
}
SimulcastEncoderAdapter::SimulcastEncoderAdapter(VideoEncoderFactory* factory,
const SdpVideoFormat& format)
: SimulcastEncoderAdapter(factory,
nullptr,
format,
FieldTrialBasedConfig()) {}
SimulcastEncoderAdapter::SimulcastEncoderAdapter(
VideoEncoderFactory* primary_factory,
VideoEncoderFactory* fallback_factory,
const SdpVideoFormat& format,
const FieldTrialsView& field_trials)
: inited_(0),
primary_encoder_factory_(primary_factory),
fallback_encoder_factory_(fallback_factory),
video_format_(format),
total_streams_count_(0),
bypass_mode_(false),
encoded_complete_callback_(nullptr),
experimental_boosted_screenshare_qp_(
GetScreenshareBoostedQpValue(field_trials)),
boost_base_layer_quality_(
RateControlSettings::ParseFromKeyValueConfig(&field_trials)
.Vp8BoostBaseLayerQuality()),
prefer_temporal_support_on_base_layer_(field_trials.IsEnabled(
"WebRTC-Video-PreferTemporalSupportOnBaseLayer")) {
RTC_DCHECK(primary_factory);
// The adapter is typically created on the worker thread, but operated on
// the encoder task queue.
encoder_queue_.Detach();
}
SimulcastEncoderAdapter::~SimulcastEncoderAdapter() {
RTC_DCHECK(!Initialized());
DestroyStoredEncoders();
}
void SimulcastEncoderAdapter::SetFecControllerOverride(
FecControllerOverride* /*fec_controller_override*/) {
// Ignored.
}
int SimulcastEncoderAdapter::Release() {
RTC_DCHECK_RUN_ON(&encoder_queue_);
while (!stream_contexts_.empty()) {
// Move the encoder instances and put it on the `cached_encoder_contexts_`
// where it may possibly be reused from (ordering does not matter).
cached_encoder_contexts_.push_front(
std::move(stream_contexts_.back()).ReleaseEncoderContext());
stream_contexts_.pop_back();
}
bypass_mode_ = false;
// It's legal to move the encoder to another queue now.
encoder_queue_.Detach();
inited_.store(0);
return WEBRTC_VIDEO_CODEC_OK;
}
int SimulcastEncoderAdapter::InitEncode(
const VideoCodec* codec_settings,
const VideoEncoder::Settings& settings) {
RTC_DCHECK_RUN_ON(&encoder_queue_);
if (settings.number_of_cores < 1) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
int ret = VerifyCodec(codec_settings);
if (ret < 0) {
return ret;
}
Release();
codec_ = *codec_settings;
total_streams_count_ = CountAllStreams(*codec_settings);
bool is_legacy_singlecast = codec_.numberOfSimulcastStreams == 0;
int lowest_quality_stream_idx = 0;
int highest_quality_stream_idx = 0;
if (!is_legacy_singlecast) {
GetLowestAndHighestQualityStreamIndixes(
rtc::ArrayView<SimulcastStream>(codec_.simulcastStream,
total_streams_count_),
&lowest_quality_stream_idx, &highest_quality_stream_idx);
}
std::unique_ptr<EncoderContext> encoder_context = FetchOrCreateEncoderContext(
/*is_lowest_quality_stream=*/(
is_legacy_singlecast ||
codec_.simulcastStream[lowest_quality_stream_idx].active));
if (encoder_context == nullptr) {
return WEBRTC_VIDEO_CODEC_MEMORY;
}
// Two distinct scenarios:
// * Singlecast (total_streams_count == 1) or simulcast with simulcast-capable
// underlaying encoder implementation if active_streams_count > 1. SEA
// operates in bypass mode: original settings are passed to the underlaying
// encoder, frame encode complete callback is not intercepted.
// * Multi-encoder simulcast or singlecast if layers are deactivated
// (active_streams_count >= 1). SEA creates N=active_streams_count encoders
// and configures each to produce a single stream.
int active_streams_count = CountActiveStreams(*codec_settings);
// If we only have a single active layer it is better to create an encoder
// with only one configured layer than creating it with all-but-one disabled
// layers because that way we control scaling.
bool separate_encoders_needed =
!encoder_context->encoder().GetEncoderInfo().supports_simulcast ||
active_streams_count == 1;
RTC_LOG(LS_INFO) << "[SEA] InitEncode: total_streams_count: "
<< total_streams_count_
<< ", active_streams_count: " << active_streams_count
<< ", separate_encoders_needed: "
<< (separate_encoders_needed ? "true" : "false");
// Singlecast or simulcast with simulcast-capable underlaying encoder.
if (total_streams_count_ == 1 || !separate_encoders_needed) {
RTC_LOG(LS_INFO) << "[SEA] InitEncode: Single-encoder mode";
int ret = encoder_context->encoder().InitEncode(&codec_, settings);
if (ret >= 0) {
stream_contexts_.emplace_back(
/*parent=*/nullptr, std::move(encoder_context),
/*framerate_controller=*/nullptr, /*stream_idx=*/0, codec_.width,
codec_.height, /*is_paused=*/active_streams_count == 0);
bypass_mode_ = true;
DestroyStoredEncoders();
inited_.store(1);
return WEBRTC_VIDEO_CODEC_OK;
}
encoder_context->Release();
if (total_streams_count_ == 1) {
RTC_LOG(LS_ERROR) << "[SEA] InitEncode: failed with error code: "
<< WebRtcVideoCodecErrorToString(ret);
return ret;
}
}
// Multi-encoder simulcast or singlecast (deactivated layers).
std::vector<uint32_t> stream_start_bitrate_kbps =
GetStreamStartBitratesKbps(codec_);
for (int stream_idx = 0; stream_idx < total_streams_count_; ++stream_idx) {
if (!is_legacy_singlecast && !codec_.simulcastStream[stream_idx].active) {
continue;
}
if (encoder_context == nullptr) {
encoder_context = FetchOrCreateEncoderContext(
/*is_lowest_quality_stream=*/stream_idx == lowest_quality_stream_idx);
}
if (encoder_context == nullptr) {
Release();
return WEBRTC_VIDEO_CODEC_MEMORY;
}
VideoCodec stream_codec = MakeStreamCodec(
codec_, stream_idx, stream_start_bitrate_kbps[stream_idx],
/*is_lowest_quality_stream=*/stream_idx == lowest_quality_stream_idx,
/*is_highest_quality_stream=*/stream_idx == highest_quality_stream_idx);
RTC_LOG(LS_INFO) << "[SEA] Multi-encoder mode: initializing stream: "
<< stream_idx << ", active: "
<< (codec_.simulcastStream[stream_idx].active ? "true"
: "false");
int ret = encoder_context->encoder().InitEncode(&stream_codec, settings);
if (ret < 0) {
encoder_context.reset();
Release();
RTC_LOG(LS_ERROR) << "[SEA] InitEncode: failed with error code: "
<< WebRtcVideoCodecErrorToString(ret);
return ret;
}
// Intercept frame encode complete callback only for upper streams, where
// we need to set a correct stream index. Set `parent` to nullptr for the
// lowest stream to bypass the callback.
SimulcastEncoderAdapter* parent = stream_idx > 0 ? this : nullptr;
bool is_paused = stream_start_bitrate_kbps[stream_idx] == 0;
stream_contexts_.emplace_back(
parent, std::move(encoder_context),
std::make_unique<FramerateController>(stream_codec.maxFramerate),
stream_idx, stream_codec.width, stream_codec.height, is_paused);
}
// To save memory, don't store encoders that we don't use.
DestroyStoredEncoders();
inited_.store(1);
return WEBRTC_VIDEO_CODEC_OK;
}
int SimulcastEncoderAdapter::Encode(
const VideoFrame& input_image,
const std::vector<VideoFrameType>* frame_types) {
RTC_DCHECK_RUN_ON(&encoder_queue_);
if (!Initialized()) {
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
if (encoded_complete_callback_ == nullptr) {
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
if (encoder_info_override_.requested_resolution_alignment()) {
const int alignment =
*encoder_info_override_.requested_resolution_alignment();
if (input_image.width() % alignment != 0 ||
input_image.height() % alignment != 0) {
RTC_LOG(LS_WARNING) << "Frame " << input_image.width() << "x"
<< input_image.height() << " not divisible by "
<< alignment;
return WEBRTC_VIDEO_CODEC_ERROR;
}
if (encoder_info_override_.apply_alignment_to_all_simulcast_layers()) {
for (const auto& layer : stream_contexts_) {
if (layer.width() % alignment != 0 || layer.height() % alignment != 0) {
RTC_LOG(LS_WARNING)
<< "Codec " << layer.width() << "x" << layer.height()
<< " not divisible by " << alignment;
return WEBRTC_VIDEO_CODEC_ERROR;
}
}
}
}
bool is_keyframe_needed = false;
for (const auto& layer : stream_contexts_) {
if (layer.is_keyframe_needed()) {
// This is legacy behavior, generating a keyframe on all layers
// when generating one for a layer that became active for the first time
// or after being disabled.
is_keyframe_needed = true;
break;
}
}
// Temporary thay may hold the result of texture to i420 buffer conversion.
rtc::scoped_refptr<VideoFrameBuffer> src_buffer;
int src_width = input_image.width();
int src_height = input_image.height();
for (auto& layer : stream_contexts_) {
// Don't encode frames in resolutions that we don't intend to send.
if (layer.is_paused()) {
continue;
}
// Convert timestamp from RTP 90kHz clock.
const Timestamp frame_timestamp =
Timestamp::Micros((1000 * input_image.timestamp()) / 90);
// If adapter is passed through and only one sw encoder does simulcast,
// frame types for all streams should be passed to the encoder unchanged.
// Otherwise a single per-encoder frame type is passed.
std::vector<VideoFrameType> stream_frame_types(
bypass_mode_
? std::max<unsigned char>(codec_.numberOfSimulcastStreams, 1)
: 1,
VideoFrameType::kVideoFrameDelta);
bool keyframe_requested = false;
if (is_keyframe_needed) {
std::fill(stream_frame_types.begin(), stream_frame_types.end(),
VideoFrameType::kVideoFrameKey);
keyframe_requested = true;
} else if (frame_types) {
if (bypass_mode_) {
// In bypass mode, we effectively pass on frame_types.
RTC_DCHECK_EQ(frame_types->size(), stream_frame_types.size());
stream_frame_types = *frame_types;
keyframe_requested =
absl::c_any_of(*frame_types, [](const VideoFrameType frame_type) {
return frame_type == VideoFrameType::kVideoFrameKey;
});
} else {
size_t stream_idx = static_cast<size_t>(layer.stream_idx());
if (frame_types->size() >= stream_idx &&
(*frame_types)[stream_idx] == VideoFrameType::kVideoFrameKey) {
stream_frame_types[0] = VideoFrameType::kVideoFrameKey;
keyframe_requested = true;
}
}
}
if (keyframe_requested) {
layer.OnKeyframe(frame_timestamp);
} else if (layer.ShouldDropFrame(frame_timestamp)) {
continue;
}
// If scaling isn't required, because the input resolution
// matches the destination or the input image is empty (e.g.
// a keyframe request for encoders with internal camera
// sources) or the source image has a native handle, pass the image on
// directly. Otherwise, we'll scale it to match what the encoder expects
// (below).
// For texture frames, the underlying encoder is expected to be able to
// correctly sample/scale the source texture.
// TODO(perkj): ensure that works going forward, and figure out how this
// affects webrtc:5683.
if ((layer.width() == src_width && layer.height() == src_height) ||
(input_image.video_frame_buffer()->type() ==
VideoFrameBuffer::Type::kNative &&
layer.encoder().GetEncoderInfo().supports_native_handle)) {
int ret = layer.encoder().Encode(input_image, &stream_frame_types);
if (ret != WEBRTC_VIDEO_CODEC_OK) {
return ret;
}
} else {
if (src_buffer == nullptr) {
src_buffer = input_image.video_frame_buffer();
}
rtc::scoped_refptr<VideoFrameBuffer> dst_buffer =
src_buffer->Scale(layer.width(), layer.height());
if (!dst_buffer) {
RTC_LOG(LS_ERROR) << "Failed to scale video frame";
return WEBRTC_VIDEO_CODEC_ENCODER_FAILURE;
}
// UpdateRect is not propagated to lower simulcast layers currently.
// TODO(ilnik): Consider scaling UpdateRect together with the buffer.
VideoFrame frame(input_image);
frame.set_video_frame_buffer(dst_buffer);
frame.set_rotation(webrtc::kVideoRotation_0);
frame.set_update_rect(
VideoFrame::UpdateRect{0, 0, frame.width(), frame.height()});
int ret = layer.encoder().Encode(frame, &stream_frame_types);
if (ret != WEBRTC_VIDEO_CODEC_OK) {
return ret;
}
}
}
return WEBRTC_VIDEO_CODEC_OK;
}
int SimulcastEncoderAdapter::RegisterEncodeCompleteCallback(
EncodedImageCallback* callback) {
RTC_DCHECK_RUN_ON(&encoder_queue_);
encoded_complete_callback_ = callback;
if (!stream_contexts_.empty() && stream_contexts_.front().stream_idx() == 0) {
// Bypass frame encode complete callback for the lowest layer since there is
// no need to override frame's spatial index.
stream_contexts_.front().encoder().RegisterEncodeCompleteCallback(callback);
}
return WEBRTC_VIDEO_CODEC_OK;
}
void SimulcastEncoderAdapter::SetRates(
const RateControlParameters& parameters) {
RTC_DCHECK_RUN_ON(&encoder_queue_);
if (!Initialized()) {
RTC_LOG(LS_WARNING) << "SetRates while not initialized";
return;
}
if (parameters.framerate_fps < 1.0) {
RTC_LOG(LS_WARNING) << "Invalid framerate: " << parameters.framerate_fps;
return;
}
codec_.maxFramerate = static_cast<uint32_t>(parameters.framerate_fps + 0.5);
if (bypass_mode_) {
stream_contexts_.front().encoder().SetRates(parameters);
return;
}
for (StreamContext& layer_context : stream_contexts_) {
int stream_idx = layer_context.stream_idx();
uint32_t stream_bitrate_kbps =
parameters.bitrate.GetSpatialLayerSum(stream_idx) / 1000;
// Need a key frame if we have not sent this stream before.
if (stream_bitrate_kbps > 0 && layer_context.is_paused()) {
layer_context.set_is_keyframe_needed();
}
layer_context.set_is_paused(stream_bitrate_kbps == 0);
// Slice the temporal layers out of the full allocation and pass it on to
// the encoder handling the current simulcast stream.
RateControlParameters stream_parameters = parameters;
stream_parameters.bitrate = VideoBitrateAllocation();
for (int i = 0; i < kMaxTemporalStreams; ++i) {
if (parameters.bitrate.HasBitrate(stream_idx, i)) {
stream_parameters.bitrate.SetBitrate(
0, i, parameters.bitrate.GetBitrate(stream_idx, i));
}
}
// Assign link allocation proportionally to spatial layer allocation.
if (!parameters.bandwidth_allocation.IsZero() &&
parameters.bitrate.get_sum_bps() > 0) {
stream_parameters.bandwidth_allocation =
DataRate::BitsPerSec((parameters.bandwidth_allocation.bps() *
stream_parameters.bitrate.get_sum_bps()) /
parameters.bitrate.get_sum_bps());
// Make sure we don't allocate bandwidth lower than target bitrate.
if (stream_parameters.bandwidth_allocation.bps() <
stream_parameters.bitrate.get_sum_bps()) {
stream_parameters.bandwidth_allocation =
DataRate::BitsPerSec(stream_parameters.bitrate.get_sum_bps());
}
}
stream_parameters.framerate_fps = std::min<double>(
parameters.framerate_fps,
layer_context.target_fps().value_or(parameters.framerate_fps));
layer_context.encoder().SetRates(stream_parameters);
}
}
void SimulcastEncoderAdapter::OnPacketLossRateUpdate(float packet_loss_rate) {
for (auto& c : stream_contexts_) {
c.encoder().OnPacketLossRateUpdate(packet_loss_rate);
}
}
void SimulcastEncoderAdapter::OnRttUpdate(int64_t rtt_ms) {
for (auto& c : stream_contexts_) {
c.encoder().OnRttUpdate(rtt_ms);
}
}
void SimulcastEncoderAdapter::OnLossNotification(
const LossNotification& loss_notification) {
for (auto& c : stream_contexts_) {
c.encoder().OnLossNotification(loss_notification);
}
}
// TODO(brandtr): Add task checker to this member function, when all encoder
// callbacks are coming in on the encoder queue.
EncodedImageCallback::Result SimulcastEncoderAdapter::OnEncodedImage(
size_t stream_idx,
const EncodedImage& encodedImage,
const CodecSpecificInfo* codecSpecificInfo) {
EncodedImage stream_image(encodedImage);
CodecSpecificInfo stream_codec_specific = *codecSpecificInfo;
stream_image.SetSimulcastIndex(stream_idx);
return encoded_complete_callback_->OnEncodedImage(stream_image,
&stream_codec_specific);
}
void SimulcastEncoderAdapter::OnDroppedFrame(size_t stream_idx) {
// Not yet implemented.
}
bool SimulcastEncoderAdapter::Initialized() const {
return inited_.load() == 1;
}
void SimulcastEncoderAdapter::DestroyStoredEncoders() {
while (!cached_encoder_contexts_.empty()) {
cached_encoder_contexts_.pop_back();
}
}
std::unique_ptr<SimulcastEncoderAdapter::EncoderContext>
SimulcastEncoderAdapter::FetchOrCreateEncoderContext(
bool is_lowest_quality_stream) const {
bool prefer_temporal_support = fallback_encoder_factory_ != nullptr &&
is_lowest_quality_stream &&
prefer_temporal_support_on_base_layer_;
// Toggling of `prefer_temporal_support` requires encoder recreation. Find
// and reuse encoder with desired `prefer_temporal_support`. Otherwise, if
// there is no such encoder in the cache, create a new instance.
auto encoder_context_iter =
std::find_if(cached_encoder_contexts_.begin(),
cached_encoder_contexts_.end(), [&](auto& encoder_context) {
return encoder_context->prefer_temporal_support() ==
prefer_temporal_support;
});
std::unique_ptr<SimulcastEncoderAdapter::EncoderContext> encoder_context;
if (encoder_context_iter != cached_encoder_contexts_.end()) {
encoder_context = std::move(*encoder_context_iter);
cached_encoder_contexts_.erase(encoder_context_iter);
} else {
std::unique_ptr<VideoEncoder> primary_encoder =
primary_encoder_factory_->CreateVideoEncoder(video_format_);
std::unique_ptr<VideoEncoder> fallback_encoder;
if (fallback_encoder_factory_ != nullptr) {
fallback_encoder =
fallback_encoder_factory_->CreateVideoEncoder(video_format_);
}
std::unique_ptr<VideoEncoder> encoder;
VideoEncoder::EncoderInfo primary_info;
VideoEncoder::EncoderInfo fallback_info;
if (primary_encoder != nullptr) {
primary_info = primary_encoder->GetEncoderInfo();
fallback_info = primary_info;
if (fallback_encoder == nullptr) {
encoder = std::move(primary_encoder);
} else {
encoder = CreateVideoEncoderSoftwareFallbackWrapper(
std::move(fallback_encoder), std::move(primary_encoder),
prefer_temporal_support);
}
} else if (fallback_encoder != nullptr) {
RTC_LOG(LS_WARNING) << "Failed to create primary " << video_format_.name
<< " encoder. Use fallback encoder.";
fallback_info = fallback_encoder->GetEncoderInfo();
primary_info = fallback_info;
encoder = std::move(fallback_encoder);
} else {
RTC_LOG(LS_ERROR) << "Failed to create primary and fallback "
<< video_format_.name << " encoders.";
return nullptr;
}
encoder_context = std::make_unique<SimulcastEncoderAdapter::EncoderContext>(
std::move(encoder), prefer_temporal_support, primary_info,
fallback_info);
}
encoder_context->encoder().RegisterEncodeCompleteCallback(
encoded_complete_callback_);
return encoder_context;
}
webrtc::VideoCodec SimulcastEncoderAdapter::MakeStreamCodec(
const webrtc::VideoCodec& codec,
int stream_idx,
uint32_t start_bitrate_kbps,
bool is_lowest_quality_stream,
bool is_highest_quality_stream) {
webrtc::VideoCodec codec_params = codec;
const SimulcastStream& stream_params = codec.simulcastStream[stream_idx];
codec_params.numberOfSimulcastStreams = 0;
codec_params.width = stream_params.width;
codec_params.height = stream_params.height;
codec_params.maxBitrate = stream_params.maxBitrate;
codec_params.minBitrate = stream_params.minBitrate;
codec_params.maxFramerate = stream_params.maxFramerate;
codec_params.qpMax = stream_params.qpMax;
codec_params.active = stream_params.active;
// By default, `scalability_mode` comes from SimulcastStream when
// SimulcastEncoderAdapter is used. This allows multiple encodings of L1Tx,
// but SimulcastStream currently does not support multiple spatial layers.
ScalabilityMode scalability_mode = stream_params.GetScalabilityMode();
// To support the full set of scalability modes in the event that this is the
// only active encoding, prefer VideoCodec::GetScalabilityMode() if all other
// encodings are inactive.
if (codec.GetScalabilityMode().has_value()) {
bool only_active_stream = true;
for (int i = 0; i < codec.numberOfSimulcastStreams; ++i) {
if (i != stream_idx && codec.simulcastStream[i].active) {
only_active_stream = false;
break;
}
}
if (only_active_stream) {
scalability_mode = codec.GetScalabilityMode().value();
}
}
codec_params.SetScalabilityMode(scalability_mode);
// Settings that are based on stream/resolution.
if (is_lowest_quality_stream) {
// Settings for lowest spatial resolutions.
if (codec.mode == VideoCodecMode::kScreensharing) {
if (experimental_boosted_screenshare_qp_) {
codec_params.qpMax = *experimental_boosted_screenshare_qp_;
}
} else if (boost_base_layer_quality_) {
codec_params.qpMax = kLowestResMaxQp;
}
}
if (codec.codecType == webrtc::kVideoCodecVP8) {
codec_params.VP8()->numberOfTemporalLayers =
stream_params.numberOfTemporalLayers;
if (!is_highest_quality_stream) {
// For resolutions below CIF, set the codec `complexity` parameter to
// kComplexityHigher, which maps to cpu_used = -4.
int pixels_per_frame = codec_params.width * codec_params.height;
if (pixels_per_frame < 352 * 288) {
codec_params.SetVideoEncoderComplexity(
webrtc::VideoCodecComplexity::kComplexityHigher);
}
// Turn off denoising for all streams but the highest resolution.
codec_params.VP8()->denoisingOn = false;
}
} else if (codec.codecType == webrtc::kVideoCodecH264) {
codec_params.H264()->numberOfTemporalLayers =
stream_params.numberOfTemporalLayers;
}
// Cap start bitrate to the min bitrate in order to avoid strange codec
// behavior.
codec_params.startBitrate =
std::max(stream_params.minBitrate, start_bitrate_kbps);
// Legacy screenshare mode is only enabled for the first simulcast layer
codec_params.legacy_conference_mode =
codec.legacy_conference_mode && stream_idx == 0;
return codec_params;
}
void SimulcastEncoderAdapter::OverrideFromFieldTrial(
VideoEncoder::EncoderInfo* info) const {
if (encoder_info_override_.requested_resolution_alignment()) {
info->requested_resolution_alignment = cricket::LeastCommonMultiple(
info->requested_resolution_alignment,
*encoder_info_override_.requested_resolution_alignment());
info->apply_alignment_to_all_simulcast_layers =
info->apply_alignment_to_all_simulcast_layers ||
encoder_info_override_.apply_alignment_to_all_simulcast_layers();
}
// Override resolution bitrate limits unless they're set already.
if (info->resolution_bitrate_limits.empty() &&
!encoder_info_override_.resolution_bitrate_limits().empty()) {
info->resolution_bitrate_limits =
encoder_info_override_.resolution_bitrate_limits();
}
}
VideoEncoder::EncoderInfo SimulcastEncoderAdapter::GetEncoderInfo() const {
if (stream_contexts_.size() == 1) {
// Not using simulcast adapting functionality, just pass through.
VideoEncoder::EncoderInfo info =
stream_contexts_.front().encoder().GetEncoderInfo();
OverrideFromFieldTrial(&info);
return info;
}
VideoEncoder::EncoderInfo encoder_info;
encoder_info.implementation_name = "SimulcastEncoderAdapter";
encoder_info.requested_resolution_alignment = 1;
encoder_info.apply_alignment_to_all_simulcast_layers = false;
encoder_info.supports_native_handle = true;
encoder_info.scaling_settings.thresholds = absl::nullopt;
if (stream_contexts_.empty()) {
// GetEncoderInfo queried before InitEncode. Only alignment info is needed
// to be filled.
// Create one encoder and query it.
std::unique_ptr<SimulcastEncoderAdapter::EncoderContext> encoder_context =
FetchOrCreateEncoderContext(/*is_lowest_quality_stream=*/true);
if (encoder_context == nullptr) {
return encoder_info;
}
const VideoEncoder::EncoderInfo& primary_info =
encoder_context->PrimaryInfo();
const VideoEncoder::EncoderInfo& fallback_info =
encoder_context->FallbackInfo();
encoder_info.requested_resolution_alignment = cricket::LeastCommonMultiple(
primary_info.requested_resolution_alignment,
fallback_info.requested_resolution_alignment);
encoder_info.apply_alignment_to_all_simulcast_layers =
primary_info.apply_alignment_to_all_simulcast_layers ||
fallback_info.apply_alignment_to_all_simulcast_layers;
if (!primary_info.supports_simulcast || !fallback_info.supports_simulcast) {
encoder_info.apply_alignment_to_all_simulcast_layers = true;
}
cached_encoder_contexts_.emplace_back(std::move(encoder_context));
OverrideFromFieldTrial(&encoder_info);
return encoder_info;
}
encoder_info.scaling_settings = VideoEncoder::ScalingSettings::kOff;
for (size_t i = 0; i < stream_contexts_.size(); ++i) {
VideoEncoder::EncoderInfo encoder_impl_info =
stream_contexts_[i].encoder().GetEncoderInfo();
if (i == 0) {
// Encoder name indicates names of all sub-encoders.
encoder_info.implementation_name += " (";
encoder_info.implementation_name += encoder_impl_info.implementation_name;
encoder_info.supports_native_handle =
encoder_impl_info.supports_native_handle;
encoder_info.has_trusted_rate_controller =
encoder_impl_info.has_trusted_rate_controller;
encoder_info.is_hardware_accelerated =
encoder_impl_info.is_hardware_accelerated;
encoder_info.is_qp_trusted = encoder_impl_info.is_qp_trusted;
} else {
encoder_info.implementation_name += ", ";
encoder_info.implementation_name += encoder_impl_info.implementation_name;
// Native handle supported if any encoder supports it.
encoder_info.supports_native_handle |=
encoder_impl_info.supports_native_handle;
// Trusted rate controller only if all encoders have it.
encoder_info.has_trusted_rate_controller &=
encoder_impl_info.has_trusted_rate_controller;
// Uses hardware support if any of the encoders uses it.
// For example, if we are having issues with down-scaling due to
// pipelining delay in HW encoders we need higher encoder usage
// thresholds in CPU adaptation.
encoder_info.is_hardware_accelerated |=
encoder_impl_info.is_hardware_accelerated;
// Treat QP from frame/slice/tile header as average QP only if all
// encoders report it as average QP.
encoder_info.is_qp_trusted =
encoder_info.is_qp_trusted.value_or(true) &&
encoder_impl_info.is_qp_trusted.value_or(true);
}
encoder_info.fps_allocation[i] = encoder_impl_info.fps_allocation[0];
encoder_info.requested_resolution_alignment = cricket::LeastCommonMultiple(
encoder_info.requested_resolution_alignment,
encoder_impl_info.requested_resolution_alignment);
// request alignment on all layers if any of the encoders may need it, or
// if any non-top layer encoder requests a non-trivial alignment.
if (encoder_impl_info.apply_alignment_to_all_simulcast_layers ||
(encoder_impl_info.requested_resolution_alignment > 1 &&
(codec_.simulcastStream[i].height < codec_.height ||
codec_.simulcastStream[i].width < codec_.width))) {
encoder_info.apply_alignment_to_all_simulcast_layers = true;
}
}
encoder_info.implementation_name += ")";
OverrideFromFieldTrial(&encoder_info);
return encoder_info;
}
} // namespace webrtc

View file

@ -0,0 +1,200 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*
*/
#ifndef MEDIA_ENGINE_SIMULCAST_ENCODER_ADAPTER_H_
#define MEDIA_ENGINE_SIMULCAST_ENCODER_ADAPTER_H_
#include <atomic>
#include <list>
#include <memory>
#include <stack>
#include <string>
#include <utility>
#include <vector>
#include "absl/types/optional.h"
#include "api/fec_controller_override.h"
#include "api/field_trials_view.h"
#include "api/sequence_checker.h"
#include "api/video_codecs/sdp_video_format.h"
#include "api/video_codecs/video_encoder.h"
#include "api/video_codecs/video_encoder_factory.h"
#include "common_video/framerate_controller.h"
#include "modules/video_coding/include/video_codec_interface.h"
#include "rtc_base/experiments/encoder_info_settings.h"
#include "rtc_base/system/no_unique_address.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// SimulcastEncoderAdapter implements simulcast support by creating multiple
// webrtc::VideoEncoder instances with the given VideoEncoderFactory.
// The object is created and destroyed on the worker thread, but all public
// interfaces should be called from the encoder task queue.
class RTC_EXPORT SimulcastEncoderAdapter : public VideoEncoder {
public:
// TODO(bugs.webrtc.org/11000): Remove when downstream usage is gone.
SimulcastEncoderAdapter(VideoEncoderFactory* primarty_factory,
const SdpVideoFormat& format);
// `primary_factory` produces the first-choice encoders to use.
// `fallback_factory`, if non-null, is used to create fallback encoder that
// will be used if InitEncode() fails for the primary encoder.
SimulcastEncoderAdapter(VideoEncoderFactory* primary_factory,
VideoEncoderFactory* fallback_factory,
const SdpVideoFormat& format,
const FieldTrialsView& field_trials);
~SimulcastEncoderAdapter() override;
// Implements VideoEncoder.
void SetFecControllerOverride(
FecControllerOverride* fec_controller_override) override;
int Release() override;
int InitEncode(const VideoCodec* codec_settings,
const VideoEncoder::Settings& settings) override;
int Encode(const VideoFrame& input_image,
const std::vector<VideoFrameType>* frame_types) override;
int RegisterEncodeCompleteCallback(EncodedImageCallback* callback) override;
void SetRates(const RateControlParameters& parameters) override;
void OnPacketLossRateUpdate(float packet_loss_rate) override;
void OnRttUpdate(int64_t rtt_ms) override;
void OnLossNotification(const LossNotification& loss_notification) override;
EncoderInfo GetEncoderInfo() const override;
private:
class EncoderContext {
public:
EncoderContext(std::unique_ptr<VideoEncoder> encoder,
bool prefer_temporal_support,
VideoEncoder::EncoderInfo primary_info,
VideoEncoder::EncoderInfo fallback_info);
EncoderContext& operator=(EncoderContext&&) = delete;
VideoEncoder& encoder() { return *encoder_; }
bool prefer_temporal_support() { return prefer_temporal_support_; }
void Release();
const VideoEncoder::EncoderInfo& PrimaryInfo() { return primary_info_; }
const VideoEncoder::EncoderInfo& FallbackInfo() { return fallback_info_; }
private:
std::unique_ptr<VideoEncoder> encoder_;
bool prefer_temporal_support_;
const VideoEncoder::EncoderInfo primary_info_;
const VideoEncoder::EncoderInfo fallback_info_;
};
class StreamContext : public EncodedImageCallback {
public:
StreamContext(SimulcastEncoderAdapter* parent,
std::unique_ptr<EncoderContext> encoder_context,
std::unique_ptr<FramerateController> framerate_controller,
int stream_idx,
uint16_t width,
uint16_t height,
bool send_stream);
StreamContext(StreamContext&& rhs);
StreamContext& operator=(StreamContext&&) = delete;
~StreamContext() override;
Result OnEncodedImage(
const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info) override;
void OnDroppedFrame(DropReason reason) override;
VideoEncoder& encoder() { return encoder_context_->encoder(); }
const VideoEncoder& encoder() const { return encoder_context_->encoder(); }
int stream_idx() const { return stream_idx_; }
uint16_t width() const { return width_; }
uint16_t height() const { return height_; }
bool is_keyframe_needed() const {
return !is_paused_ && is_keyframe_needed_;
}
void set_is_keyframe_needed() { is_keyframe_needed_ = true; }
bool is_paused() const { return is_paused_; }
void set_is_paused(bool is_paused) { is_paused_ = is_paused; }
absl::optional<double> target_fps() const {
return framerate_controller_ == nullptr
? absl::nullopt
: absl::optional<double>(
framerate_controller_->GetMaxFramerate());
}
std::unique_ptr<EncoderContext> ReleaseEncoderContext() &&;
void OnKeyframe(Timestamp timestamp);
bool ShouldDropFrame(Timestamp timestamp);
private:
SimulcastEncoderAdapter* const parent_;
std::unique_ptr<EncoderContext> encoder_context_;
std::unique_ptr<FramerateController> framerate_controller_;
const int stream_idx_;
const uint16_t width_;
const uint16_t height_;
bool is_keyframe_needed_;
bool is_paused_;
};
bool Initialized() const;
void DestroyStoredEncoders();
// This method creates encoder. May reuse previously created encoders from
// `cached_encoder_contexts_`. It's const because it's used from
// const GetEncoderInfo().
std::unique_ptr<EncoderContext> FetchOrCreateEncoderContext(
bool is_lowest_quality_stream) const;
webrtc::VideoCodec MakeStreamCodec(const webrtc::VideoCodec& codec,
int stream_idx,
uint32_t start_bitrate_kbps,
bool is_lowest_quality_stream,
bool is_highest_quality_stream);
EncodedImageCallback::Result OnEncodedImage(
size_t stream_idx,
const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info);
void OnDroppedFrame(size_t stream_idx);
void OverrideFromFieldTrial(VideoEncoder::EncoderInfo* info) const;
std::atomic<int> inited_;
VideoEncoderFactory* const primary_encoder_factory_;
VideoEncoderFactory* const fallback_encoder_factory_;
const SdpVideoFormat video_format_;
VideoCodec codec_;
int total_streams_count_;
bool bypass_mode_;
std::vector<StreamContext> stream_contexts_;
EncodedImageCallback* encoded_complete_callback_;
// Used for checking the single-threaded access of the encoder interface.
RTC_NO_UNIQUE_ADDRESS SequenceChecker encoder_queue_;
// Store previously created and released encoders , so they don't have to be
// recreated. Remaining encoders are destroyed by the destructor.
// Marked as `mutable` becuase we may need to temporarily create encoder in
// GetEncoderInfo(), which is const.
mutable std::list<std::unique_ptr<EncoderContext>> cached_encoder_contexts_;
const absl::optional<unsigned int> experimental_boosted_screenshare_qp_;
const bool boost_base_layer_quality_;
const bool prefer_temporal_support_on_base_layer_;
const SimulcastEncoderAdapterEncoderInfoSettings encoder_info_override_;
};
} // namespace webrtc
#endif // MEDIA_ENGINE_SIMULCAST_ENCODER_ADAPTER_H_

View file

@ -0,0 +1,185 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/engine/webrtc_media_engine.h"
#include <algorithm>
#include <map>
#include <string>
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/strings/match.h"
#include "media/base/media_constants.h"
#include "rtc_base/checks.h"
#include "rtc_base/logging.h"
namespace cricket {
namespace {
// Remove mutually exclusive extensions with lower priority.
void DiscardRedundantExtensions(
std::vector<webrtc::RtpExtension>* extensions,
rtc::ArrayView<const char* const> extensions_decreasing_prio) {
RTC_DCHECK(extensions);
bool found = false;
for (const char* uri : extensions_decreasing_prio) {
auto it = absl::c_find_if(
*extensions,
[uri](const webrtc::RtpExtension& rhs) { return rhs.uri == uri; });
if (it != extensions->end()) {
if (found) {
extensions->erase(it);
}
found = true;
}
}
}
} // namespace
bool ValidateRtpExtensions(
rtc::ArrayView<const webrtc::RtpExtension> extensions,
rtc::ArrayView<const webrtc::RtpExtension> old_extensions) {
bool id_used[1 + webrtc::RtpExtension::kMaxId] = {false};
for (const auto& extension : extensions) {
if (extension.id < webrtc::RtpExtension::kMinId ||
extension.id > webrtc::RtpExtension::kMaxId) {
RTC_LOG(LS_ERROR) << "Bad RTP extension ID: " << extension.ToString();
return false;
}
if (id_used[extension.id]) {
RTC_LOG(LS_ERROR) << "Duplicate RTP extension ID: "
<< extension.ToString();
return false;
}
id_used[extension.id] = true;
}
// Validate the extension list against the already negotiated extensions.
// Re-registering is OK, re-mapping (either same URL at new ID or same
// ID used with new URL) is an illegal remap.
// This is required in order to avoid a crash when registering an
// extension. A better structure would use the registered extensions
// in the RTPSender. This requires spinning through:
//
// WebRtcVoiceMediaChannel::::WebRtcAudioSendStream::stream_ (pointer)
// AudioSendStream::rtp_rtcp_module_ (pointer)
// ModuleRtpRtcpImpl2::rtp_sender_ (pointer)
// RtpSenderContext::packet_generator (struct member)
// RTPSender::rtp_header_extension_map_ (class member)
//
// Getting at this seems like a hard slog.
if (!old_extensions.empty()) {
absl::string_view urimap[1 + webrtc::RtpExtension::kMaxId];
std::map<absl::string_view, int> idmap;
for (const auto& old_extension : old_extensions) {
urimap[old_extension.id] = old_extension.uri;
idmap[old_extension.uri] = old_extension.id;
}
for (const auto& extension : extensions) {
if (!urimap[extension.id].empty() &&
urimap[extension.id] != extension.uri) {
RTC_LOG(LS_ERROR) << "Extension negotiation failure: " << extension.id
<< " was mapped to " << urimap[extension.id]
<< " but is proposed changed to " << extension.uri;
return false;
}
const auto& it = idmap.find(extension.uri);
if (it != idmap.end() && it->second != extension.id) {
RTC_LOG(LS_ERROR) << "Extension negotation failure: " << extension.uri
<< " was identified by " << it->second
<< " but is proposed changed to " << extension.id;
return false;
}
}
}
return true;
}
std::vector<webrtc::RtpExtension> FilterRtpExtensions(
const std::vector<webrtc::RtpExtension>& extensions,
bool (*supported)(absl::string_view),
bool filter_redundant_extensions,
const webrtc::FieldTrialsView& trials) {
// Don't check against old parameters; this should have been done earlier.
RTC_DCHECK(ValidateRtpExtensions(extensions, {}));
RTC_DCHECK(supported);
std::vector<webrtc::RtpExtension> result;
// Ignore any extensions that we don't recognize.
for (const auto& extension : extensions) {
if (supported(extension.uri)) {
result.push_back(extension);
} else {
RTC_LOG(LS_WARNING) << "Unsupported RTP extension: "
<< extension.ToString();
}
}
// Sort by name, ascending (prioritise encryption), so that we don't reset
// extensions if they were specified in a different order (also allows us
// to use std::unique below).
absl::c_sort(result, [](const webrtc::RtpExtension& rhs,
const webrtc::RtpExtension& lhs) {
return rhs.encrypt == lhs.encrypt ? rhs.uri < lhs.uri
: rhs.encrypt > lhs.encrypt;
});
// Remove unnecessary extensions (used on send side).
if (filter_redundant_extensions) {
auto it = std::unique(
result.begin(), result.end(),
[](const webrtc::RtpExtension& rhs, const webrtc::RtpExtension& lhs) {
return rhs.uri == lhs.uri && rhs.encrypt == lhs.encrypt;
});
result.erase(it, result.end());
// Keep just the highest priority extension of any in the following lists.
if (absl::StartsWith(trials.Lookup("WebRTC-FilterAbsSendTimeExtension"),
"Enabled")) {
static const char* const kBweExtensionPriorities[] = {
webrtc::RtpExtension::kTransportSequenceNumberUri,
webrtc::RtpExtension::kAbsSendTimeUri,
webrtc::RtpExtension::kTimestampOffsetUri};
DiscardRedundantExtensions(&result, kBweExtensionPriorities);
} else {
static const char* const kBweExtensionPriorities[] = {
webrtc::RtpExtension::kAbsSendTimeUri,
webrtc::RtpExtension::kTimestampOffsetUri};
DiscardRedundantExtensions(&result, kBweExtensionPriorities);
}
}
return result;
}
webrtc::BitrateConstraints GetBitrateConfigForCodec(const Codec& codec) {
webrtc::BitrateConstraints config;
int bitrate_kbps = 0;
if (codec.GetParam(kCodecParamMinBitrate, &bitrate_kbps) &&
bitrate_kbps > 0) {
config.min_bitrate_bps = bitrate_kbps * 1000;
} else {
config.min_bitrate_bps = 0;
}
if (codec.GetParam(kCodecParamStartBitrate, &bitrate_kbps) &&
bitrate_kbps > 0) {
config.start_bitrate_bps = bitrate_kbps * 1000;
} else {
// Do not reconfigure start bitrate unless it's specified and positive.
config.start_bitrate_bps = -1;
}
if (codec.GetParam(kCodecParamMaxBitrate, &bitrate_kbps) &&
bitrate_kbps > 0) {
config.max_bitrate_bps = bitrate_kbps * 1000;
} else {
config.max_bitrate_bps = -1;
}
return config;
}
} // namespace cricket

View file

@ -0,0 +1,45 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_ENGINE_WEBRTC_MEDIA_ENGINE_H_
#define MEDIA_ENGINE_WEBRTC_MEDIA_ENGINE_H_
#include <vector>
#include "absl/strings/string_view.h"
#include "api/array_view.h"
#include "api/field_trials_view.h"
#include "api/rtp_parameters.h"
#include "api/transport/bitrate_settings.h"
#include "media/base/codec.h"
namespace cricket {
// Verify that extension IDs are within 1-byte extension range and are not
// overlapping, and that they form a legal change from previously registerd
// extensions (if any).
bool ValidateRtpExtensions(
rtc::ArrayView<const webrtc::RtpExtension> extennsions,
rtc::ArrayView<const webrtc::RtpExtension> old_extensions);
// Discard any extensions not validated by the 'supported' predicate. Duplicate
// extensions are removed if 'filter_redundant_extensions' is set, and also any
// mutually exclusive extensions (see implementation for details) are removed.
std::vector<webrtc::RtpExtension> FilterRtpExtensions(
const std::vector<webrtc::RtpExtension>& extensions,
bool (*supported)(absl::string_view),
bool filter_redundant_extensions,
const webrtc::FieldTrialsView& trials);
webrtc::BitrateConstraints GetBitrateConfigForCodec(const Codec& codec);
} // namespace cricket
#endif // MEDIA_ENGINE_WEBRTC_MEDIA_ENGINE_H_

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,901 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_ENGINE_WEBRTC_VIDEO_ENGINE_H_
#define MEDIA_ENGINE_WEBRTC_VIDEO_ENGINE_H_
#include <stddef.h>
#include <cstdint>
#include <functional>
#include <map>
#include <memory>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include "absl/functional/any_invocable.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "api/array_view.h"
#include "api/call/transport.h"
#include "api/crypto/crypto_options.h"
#include "api/crypto/frame_decryptor_interface.h"
#include "api/crypto/frame_encryptor_interface.h"
#include "api/field_trials_view.h"
#include "api/frame_transformer_interface.h"
#include "api/rtc_error.h"
#include "api/rtp_headers.h"
#include "api/rtp_parameters.h"
#include "api/rtp_sender_interface.h"
#include "api/scoped_refptr.h"
#include "api/sequence_checker.h"
#include "api/task_queue/pending_task_safety_flag.h"
#include "api/task_queue/task_queue_base.h"
#include "api/transport/bitrate_settings.h"
#include "api/transport/field_trial_based_config.h"
#include "api/transport/rtp/rtp_source.h"
#include "api/video/recordable_encoded_frame.h"
#include "api/video/video_bitrate_allocator_factory.h"
#include "api/video/video_frame.h"
#include "api/video/video_sink_interface.h"
#include "api/video/video_source_interface.h"
#include "api/video/video_stream_encoder_settings.h"
#include "api/video_codecs/sdp_video_format.h"
#include "api/video_codecs/video_encoder_factory.h"
#include "call/call.h"
#include "call/flexfec_receive_stream.h"
#include "call/rtp_config.h"
#include "call/video_receive_stream.h"
#include "call/video_send_stream.h"
#include "media/base/codec.h"
#include "media/base/media_channel.h"
#include "media/base/media_channel_impl.h"
#include "media/base/media_config.h"
#include "media/base/media_engine.h"
#include "media/base/stream_params.h"
#include "modules/rtp_rtcp/include/rtp_header_extension_map.h"
#include "modules/rtp_rtcp/source/rtp_packet_received.h"
#include "rtc_base/network/sent_packet.h"
#include "rtc_base/network_route.h"
#include "rtc_base/synchronization/mutex.h"
#include "rtc_base/system/no_unique_address.h"
#include "rtc_base/thread_annotations.h"
#include "video/config/video_encoder_config.h"
namespace webrtc {
class VideoDecoderFactory;
class VideoEncoderFactory;
} // namespace webrtc
namespace cricket {
// Public for testing.
// Inputs StreamStats for all types of substreams (kMedia, kRtx, kFlexfec) and
// merges any non-kMedia substream stats object into its referenced kMedia-type
// substream. The resulting substreams are all kMedia. This means, for example,
// that packet and byte counters of RTX and FlexFEC streams are accounted for in
// the relevant RTP media stream's stats. This makes the resulting StreamStats
// objects ready to be turned into "outbound-rtp" stats objects for GetStats()
// which does not create separate stream stats objects for complementary
// streams.
std::map<uint32_t, webrtc::VideoSendStream::StreamStats>
MergeInfoAboutOutboundRtpSubstreamsForTesting(
const std::map<uint32_t, webrtc::VideoSendStream::StreamStats>& substreams);
// WebRtcVideoEngine is used for the new native WebRTC Video API (webrtc:1667).
class WebRtcVideoEngine : public VideoEngineInterface {
public:
// These video codec factories represents all video codecs, i.e. both software
// and external hardware codecs.
WebRtcVideoEngine(
std::unique_ptr<webrtc::VideoEncoderFactory> video_encoder_factory,
std::unique_ptr<webrtc::VideoDecoderFactory> video_decoder_factory,
const webrtc::FieldTrialsView& trials);
~WebRtcVideoEngine() override;
std::unique_ptr<VideoMediaSendChannelInterface> CreateSendChannel(
webrtc::Call* call,
const MediaConfig& config,
const VideoOptions& options,
const webrtc::CryptoOptions& crypto_options,
webrtc::VideoBitrateAllocatorFactory* video_bitrate_allocator_factory)
override;
std::unique_ptr<VideoMediaReceiveChannelInterface> CreateReceiveChannel(
webrtc::Call* call,
const MediaConfig& config,
const VideoOptions& options,
const webrtc::CryptoOptions& crypto_options) override;
std::vector<VideoCodec> send_codecs() const override {
return send_codecs(true);
}
std::vector<VideoCodec> recv_codecs() const override {
return recv_codecs(true);
}
std::vector<VideoCodec> send_codecs(bool include_rtx) const override;
std::vector<VideoCodec> recv_codecs(bool include_rtx) const override;
std::vector<webrtc::RtpHeaderExtensionCapability> GetRtpHeaderExtensions()
const override;
private:
const std::unique_ptr<webrtc::VideoDecoderFactory> decoder_factory_;
const std::unique_ptr<webrtc::VideoEncoderFactory> encoder_factory_;
const std::unique_ptr<webrtc::VideoBitrateAllocatorFactory>
bitrate_allocator_factory_;
const webrtc::FieldTrialsView& trials_;
};
struct VideoCodecSettings {
explicit VideoCodecSettings(const VideoCodec& codec);
// Checks if all members of |*this| are equal to the corresponding members
// of `other`.
bool operator==(const VideoCodecSettings& other) const;
bool operator!=(const VideoCodecSettings& other) const;
// Checks if all members of `a`, except `flexfec_payload_type`, are equal
// to the corresponding members of `b`.
static bool EqualsDisregardingFlexfec(const VideoCodecSettings& a,
const VideoCodecSettings& b);
VideoCodec codec;
webrtc::UlpfecConfig ulpfec;
int flexfec_payload_type; // -1 if absent.
int rtx_payload_type; // -1 if absent.
absl::optional<int> rtx_time;
};
class WebRtcVideoSendChannel : public MediaChannelUtil,
public VideoMediaSendChannelInterface,
public webrtc::EncoderSwitchRequestCallback {
public:
WebRtcVideoSendChannel(
webrtc::Call* call,
const MediaConfig& config,
const VideoOptions& options,
const webrtc::CryptoOptions& crypto_options,
webrtc::VideoEncoderFactory* encoder_factory,
webrtc::VideoDecoderFactory* decoder_factory,
webrtc::VideoBitrateAllocatorFactory* bitrate_allocator_factory);
~WebRtcVideoSendChannel() override;
MediaType media_type() const override { return MEDIA_TYPE_VIDEO; }
// Type manipulations
VideoMediaSendChannelInterface* AsVideoSendChannel() override { return this; }
VoiceMediaSendChannelInterface* AsVoiceSendChannel() override {
RTC_CHECK_NOTREACHED();
return nullptr;
}
// Functions imported from MediaChannelUtil
bool HasNetworkInterface() const override {
return MediaChannelUtil::HasNetworkInterface();
}
void SetExtmapAllowMixed(bool extmap_allow_mixed) override {
MediaChannelUtil::SetExtmapAllowMixed(extmap_allow_mixed);
}
bool ExtmapAllowMixed() const override {
return MediaChannelUtil::ExtmapAllowMixed();
}
// Common functions between sender and receiver
void SetInterface(MediaChannelNetworkInterface* iface) override;
// VideoMediaSendChannelInterface implementation
bool SetSenderParameters(const VideoSenderParameters& params) override;
webrtc::RTCError SetRtpSendParameters(
uint32_t ssrc,
const webrtc::RtpParameters& parameters,
webrtc::SetParametersCallback callback) override;
webrtc::RtpParameters GetRtpSendParameters(uint32_t ssrc) const override;
absl::optional<Codec> GetSendCodec() const override;
bool SetSend(bool send) override;
bool SetVideoSend(
uint32_t ssrc,
const VideoOptions* options,
rtc::VideoSourceInterface<webrtc::VideoFrame>* source) override;
bool AddSendStream(const StreamParams& sp) override;
bool RemoveSendStream(uint32_t ssrc) override;
void FillBitrateInfo(BandwidthEstimationInfo* bwe_info) override;
bool GetStats(VideoMediaSendInfo* info) override;
void OnPacketSent(const rtc::SentPacket& sent_packet) override;
void OnReadyToSend(bool ready) override;
void OnNetworkRouteChanged(absl::string_view transport_name,
const rtc::NetworkRoute& network_route) override;
// Set a frame encryptor to a particular ssrc that will intercept all
// outgoing video frames and attempt to encrypt them and forward the result
// to the packetizer.
void SetFrameEncryptor(uint32_t ssrc,
rtc::scoped_refptr<webrtc::FrameEncryptorInterface>
frame_encryptor) override;
// note: The encoder_selector object must remain valid for the lifetime of the
// MediaChannel, unless replaced.
void SetEncoderSelector(uint32_t ssrc,
webrtc::VideoEncoderFactory::EncoderSelectorInterface*
encoder_selector) override;
void SetSendCodecChangedCallback(
absl::AnyInvocable<void()> callback) override {
send_codec_changed_callback_ = std::move(callback);
}
void SetSsrcListChangedCallback(
absl::AnyInvocable<void(const std::set<uint32_t>&)> callback) override {
ssrc_list_changed_callback_ = std::move(callback);
}
// Implemented for VideoMediaChannelTest.
bool sending() const {
RTC_DCHECK_RUN_ON(&thread_checker_);
return sending_;
}
// AdaptReason is used for expressing why a WebRtcVideoSendStream request
// a lower input frame size than the currently configured camera input frame
// size. There can be more than one reason OR:ed together.
enum AdaptReason {
ADAPTREASON_NONE = 0,
ADAPTREASON_CPU = 1,
ADAPTREASON_BANDWIDTH = 2,
};
// Implements webrtc::EncoderSwitchRequestCallback.
void RequestEncoderFallback() override;
void RequestEncoderSwitch(const webrtc::SdpVideoFormat& format,
bool allow_default_fallback) override;
void GenerateSendKeyFrame(uint32_t ssrc,
const std::vector<std::string>& rids) override;
void SetEncoderToPacketizerFrameTransformer(
uint32_t ssrc,
rtc::scoped_refptr<webrtc::FrameTransformerInterface> frame_transformer)
override;
// Information queries to support SetReceiverFeedbackParameters
webrtc::RtcpMode SendCodecRtcpMode() const override {
RTC_DCHECK_RUN_ON(&thread_checker_);
return send_params_.rtcp.reduced_size ? webrtc::RtcpMode::kReducedSize
: webrtc::RtcpMode::kCompound;
}
bool SendCodecHasLntf() const override {
RTC_DCHECK_RUN_ON(&thread_checker_);
if (!send_codec()) {
return false;
}
return HasLntf(send_codec()->codec);
}
bool SendCodecHasNack() const override {
RTC_DCHECK_RUN_ON(&thread_checker_);
if (!send_codec()) {
return false;
}
return HasNack(send_codec()->codec);
}
absl::optional<int> SendCodecRtxTime() const override {
RTC_DCHECK_RUN_ON(&thread_checker_);
if (!send_codec()) {
return absl::nullopt;
}
return send_codec()->rtx_time;
}
private:
struct ChangedSenderParameters {
// These optionals are unset if not changed.
absl::optional<VideoCodecSettings> send_codec;
absl::optional<std::vector<VideoCodecSettings>> negotiated_codecs;
absl::optional<std::vector<webrtc::RtpExtension>> rtp_header_extensions;
absl::optional<std::string> mid;
absl::optional<bool> extmap_allow_mixed;
absl::optional<int> max_bandwidth_bps;
absl::optional<bool> conference_mode;
absl::optional<webrtc::RtcpMode> rtcp_mode;
};
bool GetChangedSenderParameters(const VideoSenderParameters& params,
ChangedSenderParameters* changed_params) const
RTC_EXCLUSIVE_LOCKS_REQUIRED(thread_checker_);
bool ApplyChangedParams(const ChangedSenderParameters& changed_params);
bool ValidateSendSsrcAvailability(const StreamParams& sp) const
RTC_EXCLUSIVE_LOCKS_REQUIRED(thread_checker_);
// Populates `rtx_associated_payload_types`, `raw_payload_types` and
// `decoders` based on codec settings provided by `recv_codecs`.
// `recv_codecs` must be non-empty and all other parameters must be empty.
static void ExtractCodecInformation(
rtc::ArrayView<const VideoCodecSettings> recv_codecs,
std::map<int, int>& rtx_associated_payload_types,
std::set<int>& raw_payload_types,
std::vector<webrtc::VideoReceiveStreamInterface::Decoder>& decoders);
// Wrapper for the sender part.
class WebRtcVideoSendStream {
public:
WebRtcVideoSendStream(
webrtc::Call* call,
const StreamParams& sp,
webrtc::VideoSendStream::Config config,
const VideoOptions& options,
bool enable_cpu_overuse_detection,
int max_bitrate_bps,
const absl::optional<VideoCodecSettings>& codec_settings,
const absl::optional<std::vector<webrtc::RtpExtension>>& rtp_extensions,
const VideoSenderParameters& send_params);
~WebRtcVideoSendStream();
void SetSenderParameters(const ChangedSenderParameters& send_params);
webrtc::RTCError SetRtpParameters(const webrtc::RtpParameters& parameters,
webrtc::SetParametersCallback callback);
webrtc::RtpParameters GetRtpParameters() const;
void SetFrameEncryptor(
rtc::scoped_refptr<webrtc::FrameEncryptorInterface> frame_encryptor);
bool SetVideoSend(const VideoOptions* options,
rtc::VideoSourceInterface<webrtc::VideoFrame>* source);
// note: The encoder_selector object must remain valid for the lifetime of
// the MediaChannel, unless replaced.
void SetEncoderSelector(
webrtc::VideoEncoderFactory::EncoderSelectorInterface*
encoder_selector);
void SetSend(bool send);
const std::vector<uint32_t>& GetSsrcs() const;
// Returns per ssrc VideoSenderInfos. Useful for simulcast scenario.
std::vector<VideoSenderInfo> GetPerLayerVideoSenderInfos(bool log_stats);
// Aggregates per ssrc VideoSenderInfos to single VideoSenderInfo for
// legacy reasons. Used in old GetStats API and track stats.
VideoSenderInfo GetAggregatedVideoSenderInfo(
const std::vector<VideoSenderInfo>& infos) const;
void FillBitrateInfo(BandwidthEstimationInfo* bwe_info);
void SetEncoderToPacketizerFrameTransformer(
rtc::scoped_refptr<webrtc::FrameTransformerInterface>
frame_transformer);
void GenerateKeyFrame(const std::vector<std::string>& rids);
private:
// Parameters needed to reconstruct the underlying stream.
// webrtc::VideoSendStream doesn't support setting a lot of options on the
// fly, so when those need to be changed we tear down and reconstruct with
// similar parameters depending on which options changed etc.
struct VideoSendStreamParameters {
VideoSendStreamParameters(
webrtc::VideoSendStream::Config config,
const VideoOptions& options,
int max_bitrate_bps,
const absl::optional<VideoCodecSettings>& codec_settings);
webrtc::VideoSendStream::Config config;
VideoOptions options;
int max_bitrate_bps;
bool conference_mode;
absl::optional<VideoCodecSettings> codec_settings;
// Sent resolutions + bitrates etc. by the underlying VideoSendStream,
// typically changes when setting a new resolution or reconfiguring
// bitrates.
webrtc::VideoEncoderConfig encoder_config;
};
rtc::scoped_refptr<webrtc::VideoEncoderConfig::EncoderSpecificSettings>
ConfigureVideoEncoderSettings(const VideoCodec& codec);
void SetCodec(const VideoCodecSettings& codec);
void RecreateWebRtcStream();
webrtc::VideoEncoderConfig CreateVideoEncoderConfig(
const VideoCodec& codec) const;
void ReconfigureEncoder(webrtc::SetParametersCallback callback);
// Calls Start or Stop according to whether or not `sending_` is true.
void UpdateSendState();
webrtc::DegradationPreference GetDegradationPreference() const
RTC_EXCLUSIVE_LOCKS_REQUIRED(&thread_checker_);
RTC_NO_UNIQUE_ADDRESS webrtc::SequenceChecker thread_checker_;
webrtc::TaskQueueBase* const worker_thread_;
const std::vector<uint32_t> ssrcs_ RTC_GUARDED_BY(&thread_checker_);
const std::vector<SsrcGroup> ssrc_groups_ RTC_GUARDED_BY(&thread_checker_);
webrtc::Call* const call_;
const bool enable_cpu_overuse_detection_;
rtc::VideoSourceInterface<webrtc::VideoFrame>* source_
RTC_GUARDED_BY(&thread_checker_);
webrtc::VideoSendStream* stream_ RTC_GUARDED_BY(&thread_checker_);
// Contains settings that are the same for all streams in the MediaChannel,
// such as codecs, header extensions, and the global bitrate limit for the
// entire channel.
VideoSendStreamParameters parameters_ RTC_GUARDED_BY(&thread_checker_);
// Contains settings that are unique for each stream, such as max_bitrate.
// Does *not* contain codecs, however.
// TODO(skvlad): Move ssrcs_ and ssrc_groups_ into rtp_parameters_.
// TODO(skvlad): Combine parameters_ and rtp_parameters_ once we have only
// one stream per MediaChannel.
webrtc::RtpParameters rtp_parameters_ RTC_GUARDED_BY(&thread_checker_);
bool sending_ RTC_GUARDED_BY(&thread_checker_);
// TODO(asapersson): investigate why setting
// DegrationPreferences::MAINTAIN_RESOLUTION isn't sufficient to disable
// downscaling everywhere in the pipeline.
const bool disable_automatic_resize_;
};
void Construct(webrtc::Call* call, WebRtcVideoEngine* engine);
// Get all codecs that are compatible with the receiver.
std::vector<VideoCodecSettings> SelectSendVideoCodecs(
const std::vector<VideoCodecSettings>& remote_mapped_codecs) const
RTC_EXCLUSIVE_LOCKS_REQUIRED(thread_checker_);
void FillSenderStats(VideoMediaSendInfo* info, bool log_stats)
RTC_EXCLUSIVE_LOCKS_REQUIRED(thread_checker_);
void FillBandwidthEstimationStats(const webrtc::Call::Stats& stats,
VideoMediaInfo* info)
RTC_EXCLUSIVE_LOCKS_REQUIRED(thread_checker_);
void FillSendCodecStats(VideoMediaSendInfo* video_media_info)
RTC_EXCLUSIVE_LOCKS_REQUIRED(thread_checker_);
// Accessor function for send_codec_. Introduced in order to ensure
// that a receive channel does not touch the send codec directly.
// Can go away once these are different classes.
// TODO(bugs.webrtc.org/13931): Remove this function
absl::optional<VideoCodecSettings>& send_codec() { return send_codec_; }
const absl::optional<VideoCodecSettings>& send_codec() const {
return send_codec_;
}
webrtc::TaskQueueBase* const worker_thread_;
webrtc::ScopedTaskSafety task_safety_;
RTC_NO_UNIQUE_ADDRESS webrtc::SequenceChecker network_thread_checker_{
webrtc::SequenceChecker::kDetached};
RTC_NO_UNIQUE_ADDRESS webrtc::SequenceChecker thread_checker_;
uint32_t rtcp_receiver_report_ssrc_ RTC_GUARDED_BY(thread_checker_);
bool sending_ RTC_GUARDED_BY(thread_checker_);
bool receiving_ RTC_GUARDED_BY(&thread_checker_);
webrtc::Call* const call_;
rtc::VideoSinkInterface<webrtc::VideoFrame>* default_sink_
RTC_GUARDED_BY(thread_checker_);
// Delay for unsignaled streams, which may be set before the stream exists.
int default_recv_base_minimum_delay_ms_ RTC_GUARDED_BY(thread_checker_) = 0;
const MediaConfig::Video video_config_ RTC_GUARDED_BY(thread_checker_);
// Using primary-ssrc (first ssrc) as key.
std::map<uint32_t, WebRtcVideoSendStream*> send_streams_
RTC_GUARDED_BY(thread_checker_);
// When the channel and demuxer get reconfigured, there is a window of time
// where we have to be prepared for packets arriving based on the old demuxer
// criteria because the streams live on the worker thread and the demuxer
// lives on the network thread. Because packets are posted from the network
// thread to the worker thread, they can still be in-flight when streams are
// reconfgured. This can happen when `demuxer_criteria_id_` and
// `demuxer_criteria_completed_id_` don't match. During this time, we do not
// want to create unsignalled receive streams and should instead drop the
// packets. E.g:
// * If RemoveRecvStream(old_ssrc) was recently called, there may be packets
// in-flight for that ssrc. This happens when a receiver becomes inactive.
// * If we go from one to many m= sections, the demuxer may change from
// forwarding all packets to only forwarding the configured ssrcs, so there
// is a risk of receiving ssrcs for other, recently added m= sections.
uint32_t demuxer_criteria_id_ RTC_GUARDED_BY(thread_checker_) = 0;
uint32_t demuxer_criteria_completed_id_ RTC_GUARDED_BY(thread_checker_) = 0;
absl::optional<int64_t> last_unsignalled_ssrc_creation_time_ms_
RTC_GUARDED_BY(thread_checker_);
std::set<uint32_t> send_ssrcs_ RTC_GUARDED_BY(thread_checker_);
std::set<uint32_t> receive_ssrcs_ RTC_GUARDED_BY(thread_checker_);
absl::optional<VideoCodecSettings> send_codec_
RTC_GUARDED_BY(thread_checker_);
std::vector<VideoCodecSettings> negotiated_codecs_
RTC_GUARDED_BY(thread_checker_);
std::vector<webrtc::RtpExtension> send_rtp_extensions_
RTC_GUARDED_BY(thread_checker_);
webrtc::VideoEncoderFactory* const encoder_factory_
RTC_GUARDED_BY(thread_checker_);
webrtc::VideoDecoderFactory* const decoder_factory_
RTC_GUARDED_BY(thread_checker_);
webrtc::VideoBitrateAllocatorFactory* const bitrate_allocator_factory_
RTC_GUARDED_BY(thread_checker_);
std::vector<VideoCodecSettings> recv_codecs_ RTC_GUARDED_BY(thread_checker_);
webrtc::RtpHeaderExtensionMap recv_rtp_extension_map_
RTC_GUARDED_BY(thread_checker_);
std::vector<webrtc::RtpExtension> recv_rtp_extensions_
RTC_GUARDED_BY(thread_checker_);
// See reason for keeping track of the FlexFEC payload type separately in
// comment in WebRtcVideoChannel::ChangedReceiverParameters.
int recv_flexfec_payload_type_ RTC_GUARDED_BY(thread_checker_);
webrtc::BitrateConstraints bitrate_config_ RTC_GUARDED_BY(thread_checker_);
// TODO(deadbeef): Don't duplicate information between
// send_params/recv_params, rtp_extensions, options, etc.
VideoSenderParameters send_params_ RTC_GUARDED_BY(thread_checker_);
VideoOptions default_send_options_ RTC_GUARDED_BY(thread_checker_);
VideoReceiverParameters recv_params_ RTC_GUARDED_BY(thread_checker_);
int64_t last_send_stats_log_ms_ RTC_GUARDED_BY(thread_checker_);
int64_t last_receive_stats_log_ms_ RTC_GUARDED_BY(thread_checker_);
const bool discard_unknown_ssrc_packets_ RTC_GUARDED_BY(thread_checker_);
// This is a stream param that comes from the remote description, but wasn't
// signaled with any a=ssrc lines. It holds information that was signaled
// before the unsignaled receive stream is created when the first packet is
// received.
StreamParams unsignaled_stream_params_ RTC_GUARDED_BY(thread_checker_);
// Per peer connection crypto options that last for the lifetime of the peer
// connection.
const webrtc::CryptoOptions crypto_options_ RTC_GUARDED_BY(thread_checker_);
// Optional frame transformer set on unsignaled streams.
rtc::scoped_refptr<webrtc::FrameTransformerInterface>
unsignaled_frame_transformer_ RTC_GUARDED_BY(thread_checker_);
// RTP parameters that need to be set when creating a video receive stream.
// Only used in Receiver mode - in Both mode, it reads those things from the
// codec.
webrtc::VideoReceiveStreamInterface::Config::Rtp rtp_config_;
// Callback invoked whenever the send codec changes.
// TODO(bugs.webrtc.org/13931): Remove again when coupling isn't needed.
absl::AnyInvocable<void()> send_codec_changed_callback_;
// Callback invoked whenever the list of SSRCs changes.
absl::AnyInvocable<void(const std::set<uint32_t>&)>
ssrc_list_changed_callback_;
};
class WebRtcVideoReceiveChannel : public MediaChannelUtil,
public VideoMediaReceiveChannelInterface {
public:
WebRtcVideoReceiveChannel(webrtc::Call* call,
const MediaConfig& config,
const VideoOptions& options,
const webrtc::CryptoOptions& crypto_options,
webrtc::VideoDecoderFactory* decoder_factory);
~WebRtcVideoReceiveChannel() override;
public:
MediaType media_type() const override { return MEDIA_TYPE_VIDEO; }
VideoMediaReceiveChannelInterface* AsVideoReceiveChannel() override {
return this;
}
VoiceMediaReceiveChannelInterface* AsVoiceReceiveChannel() override {
RTC_CHECK_NOTREACHED();
return nullptr;
}
// Common functions between sender and receiver
void SetInterface(MediaChannelNetworkInterface* iface) override;
// VideoMediaReceiveChannelInterface implementation
bool SetReceiverParameters(const VideoReceiverParameters& params) override;
webrtc::RtpParameters GetRtpReceiverParameters(uint32_t ssrc) const override;
webrtc::RtpParameters GetDefaultRtpReceiveParameters() const override;
void SetReceive(bool receive) override;
bool AddRecvStream(const StreamParams& sp) override;
bool AddDefaultRecvStreamForTesting(const StreamParams& sp) override {
// Invokes private AddRecvStream variant function
return AddRecvStream(sp, true);
}
bool RemoveRecvStream(uint32_t ssrc) override;
void ResetUnsignaledRecvStream() override;
absl::optional<uint32_t> GetUnsignaledSsrc() const override;
void OnDemuxerCriteriaUpdatePending() override;
void OnDemuxerCriteriaUpdateComplete() override;
bool SetSink(uint32_t ssrc,
rtc::VideoSinkInterface<webrtc::VideoFrame>* sink) override;
void SetDefaultSink(
rtc::VideoSinkInterface<webrtc::VideoFrame>* sink) override;
bool GetStats(VideoMediaReceiveInfo* info) override;
void OnPacketReceived(const webrtc::RtpPacketReceived& packet) override;
bool SetBaseMinimumPlayoutDelayMs(uint32_t ssrc, int delay_ms) override;
absl::optional<int> GetBaseMinimumPlayoutDelayMs(
uint32_t ssrc) const override;
// Choose one of the available SSRCs (or default if none) as the current
// receiver report SSRC.
void ChooseReceiverReportSsrc(const std::set<uint32_t>& choices) override;
// E2E Encrypted Video Frame API
// Set a frame decryptor to a particular ssrc that will intercept all
// incoming video frames and attempt to decrypt them before forwarding the
// result.
void SetFrameDecryptor(uint32_t ssrc,
rtc::scoped_refptr<webrtc::FrameDecryptorInterface>
frame_decryptor) override;
void SetRecordableEncodedFrameCallback(
uint32_t ssrc,
std::function<void(const webrtc::RecordableEncodedFrame&)> callback)
override;
void ClearRecordableEncodedFrameCallback(uint32_t ssrc) override;
void RequestRecvKeyFrame(uint32_t ssrc) override;
void SetDepacketizerToDecoderFrameTransformer(
uint32_t ssrc,
rtc::scoped_refptr<webrtc::FrameTransformerInterface> frame_transformer)
override;
std::vector<webrtc::RtpSource> GetSources(uint32_t ssrc) const override;
void SetReceiverFeedbackParameters(bool lntf_enabled,
bool nack_enabled,
webrtc::RtcpMode rtcp_mode,
absl::optional<int> rtx_time) override;
private:
class WebRtcVideoReceiveStream;
struct ChangedReceiverParameters {
// These optionals are unset if not changed.
absl::optional<std::vector<VideoCodecSettings>> codec_settings;
absl::optional<std::vector<webrtc::RtpExtension>> rtp_header_extensions;
// Keep track of the FlexFEC payload type separately from `codec_settings`.
// This allows us to recreate the FlexfecReceiveStream separately from the
// VideoReceiveStreamInterface when the FlexFEC payload type is changed.
absl::optional<int> flexfec_payload_type;
};
// Finds VideoReceiveStreamInterface corresponding to ssrc. Aware of
// unsignalled ssrc handling.
WebRtcVideoReceiveStream* FindReceiveStream(uint32_t ssrc)
RTC_EXCLUSIVE_LOCKS_REQUIRED(thread_checker_);
void ProcessReceivedPacket(webrtc::RtpPacketReceived packet)
RTC_RUN_ON(thread_checker_);
// Expected to be invoked once per packet that belongs to this channel that
// can not be demuxed.
// Returns true if a new default stream has been created.
bool MaybeCreateDefaultReceiveStream(
const webrtc::RtpPacketReceived& parsed_packet)
RTC_EXCLUSIVE_LOCKS_REQUIRED(thread_checker_);
void ReCreateDefaultReceiveStream(uint32_t ssrc,
absl::optional<uint32_t> rtx_ssrc);
// Add a receive stream. Used for testing.
bool AddRecvStream(const StreamParams& sp, bool default_stream);
void ConfigureReceiverRtp(
webrtc::VideoReceiveStreamInterface::Config* config,
webrtc::FlexfecReceiveStream::Config* flexfec_config,
const StreamParams& sp) const
RTC_EXCLUSIVE_LOCKS_REQUIRED(thread_checker_);
bool ValidateReceiveSsrcAvailability(const StreamParams& sp) const
RTC_EXCLUSIVE_LOCKS_REQUIRED(thread_checker_);
void DeleteReceiveStream(WebRtcVideoReceiveStream* stream)
RTC_EXCLUSIVE_LOCKS_REQUIRED(thread_checker_);
// Called when the local ssrc changes. Sets `rtcp_receiver_report_ssrc_` and
// updates the receive streams.
void SetReceiverReportSsrc(uint32_t ssrc) RTC_RUN_ON(&thread_checker_);
// Wrapper for the receiver part, contains configs etc. that are needed to
// reconstruct the underlying VideoReceiveStreamInterface.
class WebRtcVideoReceiveStream
: public rtc::VideoSinkInterface<webrtc::VideoFrame> {
public:
WebRtcVideoReceiveStream(
webrtc::Call* call,
const StreamParams& sp,
webrtc::VideoReceiveStreamInterface::Config config,
bool default_stream,
const std::vector<VideoCodecSettings>& recv_codecs,
const webrtc::FlexfecReceiveStream::Config& flexfec_config);
~WebRtcVideoReceiveStream();
webrtc::VideoReceiveStreamInterface& stream();
// Return value may be nullptr.
webrtc::FlexfecReceiveStream* flexfec_stream();
const std::vector<uint32_t>& GetSsrcs() const;
std::vector<webrtc::RtpSource> GetSources();
// Does not return codecs, nor header extensions, they are filled by the
// owning WebRtcVideoChannel.
webrtc::RtpParameters GetRtpParameters() const;
// TODO(deadbeef): Move these feedback parameters into the recv parameters.
void SetFeedbackParameters(bool lntf_enabled,
bool nack_enabled,
webrtc::RtcpMode rtcp_mode,
absl::optional<int> rtx_time);
void SetReceiverParameters(const ChangedReceiverParameters& recv_params);
void OnFrame(const webrtc::VideoFrame& frame) override;
bool IsDefaultStream() const;
void SetFrameDecryptor(
rtc::scoped_refptr<webrtc::FrameDecryptorInterface> frame_decryptor);
bool SetBaseMinimumPlayoutDelayMs(int delay_ms);
int GetBaseMinimumPlayoutDelayMs() const;
void SetSink(rtc::VideoSinkInterface<webrtc::VideoFrame>* sink);
VideoReceiverInfo GetVideoReceiverInfo(bool log_stats);
void SetRecordableEncodedFrameCallback(
std::function<void(const webrtc::RecordableEncodedFrame&)> callback);
void ClearRecordableEncodedFrameCallback();
void GenerateKeyFrame();
void SetDepacketizerToDecoderFrameTransformer(
rtc::scoped_refptr<webrtc::FrameTransformerInterface>
frame_transformer);
void SetLocalSsrc(uint32_t local_ssrc);
void UpdateRtxSsrc(uint32_t ssrc);
void StartReceiveStream();
void StopReceiveStream();
private:
// Attempts to reconfigure an already existing `flexfec_stream_`, create
// one if the configuration is now complete or remove a flexfec stream
// when disabled.
void SetFlexFecPayload(int payload_type);
void RecreateReceiveStream();
void CreateReceiveStream();
// Applies a new receive codecs configration to `config_`. Returns true
// if the internal stream needs to be reconstructed, or false if no changes
// were applied.
bool ReconfigureCodecs(const std::vector<VideoCodecSettings>& recv_codecs);
webrtc::Call* const call_;
const StreamParams stream_params_;
// Both `stream_` and `flexfec_stream_` are managed by `this`. They are
// destroyed by calling call_->DestroyVideoReceiveStream and
// call_->DestroyFlexfecReceiveStream, respectively.
webrtc::VideoReceiveStreamInterface* stream_;
const bool default_stream_;
webrtc::VideoReceiveStreamInterface::Config config_;
webrtc::FlexfecReceiveStream::Config flexfec_config_;
webrtc::FlexfecReceiveStream* flexfec_stream_;
webrtc::Mutex sink_lock_;
rtc::VideoSinkInterface<webrtc::VideoFrame>* sink_
RTC_GUARDED_BY(sink_lock_);
int64_t first_frame_timestamp_ RTC_GUARDED_BY(sink_lock_);
// Start NTP time is estimated as current remote NTP time (estimated from
// RTCP) minus the elapsed time, as soon as remote NTP time is available.
int64_t estimated_remote_start_ntp_time_ms_ RTC_GUARDED_BY(sink_lock_);
RTC_NO_UNIQUE_ADDRESS webrtc::SequenceChecker thread_checker_;
bool receiving_ RTC_GUARDED_BY(&thread_checker_);
};
bool GetChangedReceiverParameters(const VideoReceiverParameters& params,
ChangedReceiverParameters* changed_params)
const RTC_EXCLUSIVE_LOCKS_REQUIRED(thread_checker_);
std::map<uint32_t, WebRtcVideoReceiveStream*> receive_streams_
RTC_GUARDED_BY(thread_checker_);
void FillReceiverStats(VideoMediaReceiveInfo* info, bool log_stats)
RTC_EXCLUSIVE_LOCKS_REQUIRED(thread_checker_);
void FillReceiveCodecStats(VideoMediaReceiveInfo* video_media_info)
RTC_EXCLUSIVE_LOCKS_REQUIRED(thread_checker_);
StreamParams unsignaled_stream_params() {
RTC_DCHECK_RUN_ON(&thread_checker_);
return unsignaled_stream_params_;
}
// Variables.
webrtc::TaskQueueBase* const worker_thread_;
webrtc::ScopedTaskSafety task_safety_;
RTC_NO_UNIQUE_ADDRESS webrtc::SequenceChecker network_thread_checker_{
webrtc::SequenceChecker::kDetached};
RTC_NO_UNIQUE_ADDRESS webrtc::SequenceChecker thread_checker_;
uint32_t rtcp_receiver_report_ssrc_ RTC_GUARDED_BY(thread_checker_);
bool receiving_ RTC_GUARDED_BY(&thread_checker_);
webrtc::Call* const call_;
rtc::VideoSinkInterface<webrtc::VideoFrame>* default_sink_
RTC_GUARDED_BY(thread_checker_);
// Delay for unsignaled streams, which may be set before the stream exists.
int default_recv_base_minimum_delay_ms_ RTC_GUARDED_BY(thread_checker_) = 0;
const MediaConfig::Video video_config_ RTC_GUARDED_BY(thread_checker_);
// When the channel and demuxer get reconfigured, there is a window of time
// where we have to be prepared for packets arriving based on the old demuxer
// criteria because the streams live on the worker thread and the demuxer
// lives on the network thread. Because packets are posted from the network
// thread to the worker thread, they can still be in-flight when streams are
// reconfgured. This can happen when `demuxer_criteria_id_` and
// `demuxer_criteria_completed_id_` don't match. During this time, we do not
// want to create unsignalled receive streams and should instead drop the
// packets. E.g:
// * If RemoveRecvStream(old_ssrc) was recently called, there may be packets
// in-flight for that ssrc. This happens when a receiver becomes inactive.
// * If we go from one to many m= sections, the demuxer may change from
// forwarding all packets to only forwarding the configured ssrcs, so there
// is a risk of receiving ssrcs for other, recently added m= sections.
uint32_t demuxer_criteria_id_ RTC_GUARDED_BY(thread_checker_) = 0;
uint32_t demuxer_criteria_completed_id_ RTC_GUARDED_BY(thread_checker_) = 0;
absl::optional<int64_t> last_unsignalled_ssrc_creation_time_ms_
RTC_GUARDED_BY(thread_checker_);
std::set<uint32_t> send_ssrcs_ RTC_GUARDED_BY(thread_checker_);
std::set<uint32_t> receive_ssrcs_ RTC_GUARDED_BY(thread_checker_);
absl::optional<VideoCodecSettings> send_codec_
RTC_GUARDED_BY(thread_checker_);
std::vector<VideoCodecSettings> negotiated_codecs_
RTC_GUARDED_BY(thread_checker_);
std::vector<webrtc::RtpExtension> send_rtp_extensions_
RTC_GUARDED_BY(thread_checker_);
webrtc::VideoDecoderFactory* const decoder_factory_
RTC_GUARDED_BY(thread_checker_);
std::vector<VideoCodecSettings> recv_codecs_ RTC_GUARDED_BY(thread_checker_);
webrtc::RtpHeaderExtensionMap recv_rtp_extension_map_
RTC_GUARDED_BY(thread_checker_);
std::vector<webrtc::RtpExtension> recv_rtp_extensions_
RTC_GUARDED_BY(thread_checker_);
// See reason for keeping track of the FlexFEC payload type separately in
// comment in WebRtcVideoChannel::ChangedReceiverParameters.
int recv_flexfec_payload_type_ RTC_GUARDED_BY(thread_checker_);
webrtc::BitrateConstraints bitrate_config_ RTC_GUARDED_BY(thread_checker_);
// TODO(deadbeef): Don't duplicate information between
// send_params/recv_params, rtp_extensions, options, etc.
VideoSenderParameters send_params_ RTC_GUARDED_BY(thread_checker_);
VideoOptions default_send_options_ RTC_GUARDED_BY(thread_checker_);
VideoReceiverParameters recv_params_ RTC_GUARDED_BY(thread_checker_);
int64_t last_receive_stats_log_ms_ RTC_GUARDED_BY(thread_checker_);
const bool discard_unknown_ssrc_packets_ RTC_GUARDED_BY(thread_checker_);
// This is a stream param that comes from the remote description, but wasn't
// signaled with any a=ssrc lines. It holds information that was signaled
// before the unsignaled receive stream is created when the first packet is
// received.
StreamParams unsignaled_stream_params_ RTC_GUARDED_BY(thread_checker_);
// Per peer connection crypto options that last for the lifetime of the peer
// connection.
const webrtc::CryptoOptions crypto_options_ RTC_GUARDED_BY(thread_checker_);
// Optional frame transformer set on unsignaled streams.
rtc::scoped_refptr<webrtc::FrameTransformerInterface>
unsignaled_frame_transformer_ RTC_GUARDED_BY(thread_checker_);
// RTP parameters that need to be set when creating a video receive stream.
// Only used in Receiver mode - in Both mode, it reads those things from the
// codec.
webrtc::VideoReceiveStreamInterface::Config::Rtp rtp_config_;
// Callback invoked whenever the send codec changes.
// TODO(bugs.webrtc.org/13931): Remove again when coupling isn't needed.
absl::AnyInvocable<void()> send_codec_changed_callback_;
// Callback invoked whenever the list of SSRCs changes.
absl::AnyInvocable<void(const std::set<uint32_t>&)>
ssrc_list_changed_callback_;
const int receive_buffer_size_;
};
// Keeping the old name "WebRtcVideoChannel" around because some external
// customers are using cricket::WebRtcVideoChannel::AdaptReason
// TODO(bugs.webrtc.org/15216): Move this enum to an interface class and
// delete this workaround.
class WebRtcVideoChannel : public WebRtcVideoSendChannel {
public:
// Make all the values of AdaptReason available as
// WebRtcVideoChannel::ADAPT_xxx.
using WebRtcVideoSendChannel::AdaptReason;
};
} // namespace cricket
#endif // MEDIA_ENGINE_WEBRTC_VIDEO_ENGINE_H_

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,516 @@
/*
* Copyright (c) 2004 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_ENGINE_WEBRTC_VOICE_ENGINE_H_
#define MEDIA_ENGINE_WEBRTC_VOICE_ENGINE_H_
#include <stddef.h>
#include <stdint.h>
#include <map>
#include <memory>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include "absl/functional/any_invocable.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "api/audio/audio_frame_processor.h"
#include "api/audio/audio_mixer.h"
#include "api/audio_codecs/audio_codec_pair_id.h"
#include "api/audio_codecs/audio_decoder_factory.h"
#include "api/audio_codecs/audio_encoder_factory.h"
#include "api/audio_codecs/audio_format.h"
#include "api/audio_options.h"
#include "api/call/audio_sink.h"
#include "api/call/transport.h"
#include "api/crypto/crypto_options.h"
#include "api/crypto/frame_decryptor_interface.h"
#include "api/crypto/frame_encryptor_interface.h"
#include "api/field_trials_view.h"
#include "api/frame_transformer_interface.h"
#include "api/rtc_error.h"
#include "api/rtp_parameters.h"
#include "api/rtp_sender_interface.h"
#include "api/scoped_refptr.h"
#include "api/sequence_checker.h"
#include "api/task_queue/pending_task_safety_flag.h"
#include "api/task_queue/task_queue_base.h"
#include "api/task_queue/task_queue_factory.h"
#include "api/transport/rtp/rtp_source.h"
#include "call/audio_send_stream.h"
#include "call/audio_state.h"
#include "call/call.h"
#include "media/base/codec.h"
#include "media/base/media_channel.h"
#include "media/base/media_channel_impl.h"
#include "media/base/media_config.h"
#include "media/base/media_engine.h"
#include "media/base/rtp_utils.h"
#include "media/base/stream_params.h"
#include "modules/async_audio_processing/async_audio_processing.h"
#include "modules/audio_device/include/audio_device.h"
#include "modules/audio_processing/include/audio_processing.h"
#include "modules/rtp_rtcp/include/rtp_header_extension_map.h"
#include "modules/rtp_rtcp/source/rtp_packet_received.h"
#include "rtc_base/buffer.h"
#include "rtc_base/network/sent_packet.h"
#include "rtc_base/network_route.h"
#include "rtc_base/system/file_wrapper.h"
namespace webrtc {
class AudioFrameProcessor;
}
namespace cricket {
class AudioSource;
// WebRtcVoiceEngine is a class to be used with CompositeMediaEngine.
// It uses the WebRtc VoiceEngine library for audio handling.
class WebRtcVoiceEngine final : public VoiceEngineInterface {
friend class WebRtcVoiceSendChannel;
friend class WebRtcVoiceReceiveChannel;
public:
WebRtcVoiceEngine(
webrtc::TaskQueueFactory* task_queue_factory,
webrtc::AudioDeviceModule* adm,
const rtc::scoped_refptr<webrtc::AudioEncoderFactory>& encoder_factory,
const rtc::scoped_refptr<webrtc::AudioDecoderFactory>& decoder_factory,
rtc::scoped_refptr<webrtc::AudioMixer> audio_mixer,
rtc::scoped_refptr<webrtc::AudioProcessing> audio_processing,
std::unique_ptr<webrtc::AudioFrameProcessor> owned_audio_frame_processor,
const webrtc::FieldTrialsView& trials);
WebRtcVoiceEngine() = delete;
WebRtcVoiceEngine(const WebRtcVoiceEngine&) = delete;
WebRtcVoiceEngine& operator=(const WebRtcVoiceEngine&) = delete;
~WebRtcVoiceEngine() override;
// Does initialization that needs to occur on the worker thread.
void Init() override;
rtc::scoped_refptr<webrtc::AudioState> GetAudioState() const override;
std::unique_ptr<VoiceMediaSendChannelInterface> CreateSendChannel(
webrtc::Call* call,
const MediaConfig& config,
const AudioOptions& options,
const webrtc::CryptoOptions& crypto_options,
webrtc::AudioCodecPairId codec_pair_id) override;
std::unique_ptr<VoiceMediaReceiveChannelInterface> CreateReceiveChannel(
webrtc::Call* call,
const MediaConfig& config,
const AudioOptions& options,
const webrtc::CryptoOptions& crypto_options,
webrtc::AudioCodecPairId codec_pair_id) override;
const std::vector<AudioCodec>& send_codecs() const override;
const std::vector<AudioCodec>& recv_codecs() const override;
std::vector<webrtc::RtpHeaderExtensionCapability> GetRtpHeaderExtensions()
const override;
// Starts AEC dump using an existing file. A maximum file size in bytes can be
// specified. When the maximum file size is reached, logging is stopped and
// the file is closed. If max_size_bytes is set to <= 0, no limit will be
// used.
bool StartAecDump(webrtc::FileWrapper file, int64_t max_size_bytes) override;
// Stops AEC dump.
void StopAecDump() override;
absl::optional<webrtc::AudioDeviceModule::Stats> GetAudioDeviceStats()
override;
private:
// Every option that is "set" will be applied. Every option not "set" will be
// ignored. This allows us to selectively turn on and off different options
// easily at any time.
void ApplyOptions(const AudioOptions& options);
webrtc::TaskQueueFactory* const task_queue_factory_;
std::unique_ptr<webrtc::TaskQueueBase, webrtc::TaskQueueDeleter>
low_priority_worker_queue_;
webrtc::AudioDeviceModule* adm();
webrtc::AudioProcessing* apm() const;
webrtc::AudioState* audio_state();
std::vector<AudioCodec> CollectCodecs(
const std::vector<webrtc::AudioCodecSpec>& specs) const;
webrtc::SequenceChecker signal_thread_checker_{
webrtc::SequenceChecker::kDetached};
webrtc::SequenceChecker worker_thread_checker_{
webrtc::SequenceChecker::kDetached};
// The audio device module.
rtc::scoped_refptr<webrtc::AudioDeviceModule> adm_;
rtc::scoped_refptr<webrtc::AudioEncoderFactory> encoder_factory_;
rtc::scoped_refptr<webrtc::AudioDecoderFactory> decoder_factory_;
rtc::scoped_refptr<webrtc::AudioMixer> audio_mixer_;
// The audio processing module.
rtc::scoped_refptr<webrtc::AudioProcessing> apm_;
// Asynchronous audio processing.
std::unique_ptr<webrtc::AudioFrameProcessor> audio_frame_processor_;
// The primary instance of WebRtc VoiceEngine.
rtc::scoped_refptr<webrtc::AudioState> audio_state_;
std::vector<AudioCodec> send_codecs_;
std::vector<AudioCodec> recv_codecs_;
bool is_dumping_aec_ = false;
bool initialized_ = false;
// Jitter buffer settings for new streams.
size_t audio_jitter_buffer_max_packets_ = 200;
bool audio_jitter_buffer_fast_accelerate_ = false;
int audio_jitter_buffer_min_delay_ms_ = 0;
const bool minimized_remsampling_on_mobile_trial_enabled_;
};
class WebRtcVoiceSendChannel final : public MediaChannelUtil,
public VoiceMediaSendChannelInterface {
public:
WebRtcVoiceSendChannel(WebRtcVoiceEngine* engine,
const MediaConfig& config,
const AudioOptions& options,
const webrtc::CryptoOptions& crypto_options,
webrtc::Call* call,
webrtc::AudioCodecPairId codec_pair_id);
WebRtcVoiceSendChannel() = delete;
WebRtcVoiceSendChannel(const WebRtcVoiceSendChannel&) = delete;
WebRtcVoiceSendChannel& operator=(const WebRtcVoiceSendChannel&) = delete;
~WebRtcVoiceSendChannel() override;
MediaType media_type() const override { return MEDIA_TYPE_AUDIO; }
VideoMediaSendChannelInterface* AsVideoSendChannel() override {
RTC_CHECK_NOTREACHED();
return nullptr;
}
VoiceMediaSendChannelInterface* AsVoiceSendChannel() override { return this; }
absl::optional<Codec> GetSendCodec() const override;
// Functions imported from MediaChannelUtil
void SetInterface(MediaChannelNetworkInterface* iface) override {
MediaChannelUtil::SetInterface(iface);
}
bool HasNetworkInterface() const override {
return MediaChannelUtil::HasNetworkInterface();
}
void SetExtmapAllowMixed(bool extmap_allow_mixed) override {
MediaChannelUtil::SetExtmapAllowMixed(extmap_allow_mixed);
}
bool ExtmapAllowMixed() const override {
return MediaChannelUtil::ExtmapAllowMixed();
}
const AudioOptions& options() const { return options_; }
bool SetSenderParameters(const AudioSenderParameter& params) override;
webrtc::RtpParameters GetRtpSendParameters(uint32_t ssrc) const override;
webrtc::RTCError SetRtpSendParameters(
uint32_t ssrc,
const webrtc::RtpParameters& parameters,
webrtc::SetParametersCallback callback) override;
void SetSend(bool send) override;
bool SetAudioSend(uint32_t ssrc,
bool enable,
const AudioOptions* options,
AudioSource* source) override;
bool AddSendStream(const StreamParams& sp) override;
bool RemoveSendStream(uint32_t ssrc) override;
void SetSsrcListChangedCallback(
absl::AnyInvocable<void(const std::set<uint32_t>&)> callback) override;
// E2EE Frame API
// Set a frame encryptor to a particular ssrc that will intercept all
// outgoing audio payloads frames and attempt to encrypt them and forward the
// result to the packetizer.
void SetFrameEncryptor(uint32_t ssrc,
rtc::scoped_refptr<webrtc::FrameEncryptorInterface>
frame_encryptor) override;
bool CanInsertDtmf() override;
bool InsertDtmf(uint32_t ssrc, int event, int duration) override;
void OnPacketSent(const rtc::SentPacket& sent_packet) override;
void OnNetworkRouteChanged(absl::string_view transport_name,
const rtc::NetworkRoute& network_route) override;
void OnReadyToSend(bool ready) override;
bool GetStats(VoiceMediaSendInfo* info) override;
// Sets a frame transformer between encoder and packetizer, to transform
// encoded frames before sending them out the network.
void SetEncoderToPacketizerFrameTransformer(
uint32_t ssrc,
rtc::scoped_refptr<webrtc::FrameTransformerInterface> frame_transformer)
override;
bool SenderNackEnabled() const override {
if (!send_codec_spec_) {
return false;
}
return send_codec_spec_->nack_enabled;
}
bool SenderNonSenderRttEnabled() const override {
if (!send_codec_spec_) {
return false;
}
return send_codec_spec_->enable_non_sender_rtt;
}
bool SendCodecHasNack() const override { return SenderNackEnabled(); }
void SetSendCodecChangedCallback(
absl::AnyInvocable<void()> callback) override {
send_codec_changed_callback_ = std::move(callback);
}
private:
bool SetOptions(const AudioOptions& options);
bool SetSendCodecs(const std::vector<Codec>& codecs,
absl::optional<Codec> preferred_codec);
bool SetLocalSource(uint32_t ssrc, AudioSource* source);
bool MuteStream(uint32_t ssrc, bool mute);
WebRtcVoiceEngine* engine() { return engine_; }
bool SetMaxSendBitrate(int bps);
void SetupRecording();
webrtc::TaskQueueBase* const worker_thread_;
webrtc::ScopedTaskSafety task_safety_;
webrtc::SequenceChecker network_thread_checker_{
webrtc::SequenceChecker::kDetached};
WebRtcVoiceEngine* const engine_ = nullptr;
std::vector<AudioCodec> send_codecs_;
int max_send_bitrate_bps_ = 0;
AudioOptions options_;
absl::optional<int> dtmf_payload_type_;
int dtmf_payload_freq_ = -1;
bool enable_non_sender_rtt_ = false;
bool send_ = false;
webrtc::Call* const call_ = nullptr;
const MediaConfig::Audio audio_config_;
class WebRtcAudioSendStream;
std::map<uint32_t, WebRtcAudioSendStream*> send_streams_;
std::vector<webrtc::RtpExtension> send_rtp_extensions_;
std::string mid_;
absl::optional<webrtc::AudioSendStream::Config::SendCodecSpec>
send_codec_spec_;
// TODO(kwiberg): Per-SSRC codec pair IDs?
const webrtc::AudioCodecPairId codec_pair_id_;
// Per peer connection crypto options that last for the lifetime of the peer
// connection.
const webrtc::CryptoOptions crypto_options_;
rtc::scoped_refptr<webrtc::FrameTransformerInterface>
unsignaled_frame_transformer_;
void FillSendCodecStats(VoiceMediaSendInfo* voice_media_info);
// Callback invoked whenever the send codec changes.
// TODO(bugs.webrtc.org/13931): Remove again when coupling isn't needed.
absl::AnyInvocable<void()> send_codec_changed_callback_;
// Callback invoked whenever the list of SSRCs changes.
absl::AnyInvocable<void(const std::set<uint32_t>&)>
ssrc_list_changed_callback_;
};
class WebRtcVoiceReceiveChannel final
: public MediaChannelUtil,
public VoiceMediaReceiveChannelInterface {
public:
WebRtcVoiceReceiveChannel(WebRtcVoiceEngine* engine,
const MediaConfig& config,
const AudioOptions& options,
const webrtc::CryptoOptions& crypto_options,
webrtc::Call* call,
webrtc::AudioCodecPairId codec_pair_id);
WebRtcVoiceReceiveChannel() = delete;
WebRtcVoiceReceiveChannel(const WebRtcVoiceReceiveChannel&) = delete;
WebRtcVoiceReceiveChannel& operator=(const WebRtcVoiceReceiveChannel&) =
delete;
~WebRtcVoiceReceiveChannel() override;
MediaType media_type() const override { return MEDIA_TYPE_AUDIO; }
VideoMediaReceiveChannelInterface* AsVideoReceiveChannel() override {
RTC_CHECK_NOTREACHED();
return nullptr;
}
VoiceMediaReceiveChannelInterface* AsVoiceReceiveChannel() override {
return this;
}
const AudioOptions& options() const { return options_; }
void SetInterface(MediaChannelNetworkInterface* iface) override {
MediaChannelUtil::SetInterface(iface);
}
bool SetReceiverParameters(const AudioReceiverParameters& params) override;
webrtc::RtpParameters GetRtpReceiverParameters(uint32_t ssrc) const override;
webrtc::RtpParameters GetDefaultRtpReceiveParameters() const override;
void SetPlayout(bool playout) override;
bool AddRecvStream(const StreamParams& sp) override;
bool RemoveRecvStream(uint32_t ssrc) override;
void ResetUnsignaledRecvStream() override;
absl::optional<uint32_t> GetUnsignaledSsrc() const override;
void ChooseReceiverReportSsrc(const std::set<uint32_t>& choices) override;
void OnDemuxerCriteriaUpdatePending() override;
void OnDemuxerCriteriaUpdateComplete() override;
// E2EE Frame API
// Set a frame decryptor to a particular ssrc that will intercept all
// incoming audio payloads and attempt to decrypt them before forwarding the
// result.
void SetFrameDecryptor(uint32_t ssrc,
rtc::scoped_refptr<webrtc::FrameDecryptorInterface>
frame_decryptor) override;
bool SetOutputVolume(uint32_t ssrc, double volume) override;
// Applies the new volume to current and future unsignaled streams.
bool SetDefaultOutputVolume(double volume) override;
bool SetBaseMinimumPlayoutDelayMs(uint32_t ssrc, int delay_ms) override;
absl::optional<int> GetBaseMinimumPlayoutDelayMs(
uint32_t ssrc) const override;
void OnPacketReceived(const webrtc::RtpPacketReceived& packet) override;
bool GetStats(VoiceMediaReceiveInfo* info,
bool get_and_clear_legacy_stats) override;
// Set the audio sink for an existing stream.
void SetRawAudioSink(
uint32_t ssrc,
std::unique_ptr<webrtc::AudioSinkInterface> sink) override;
// Will set the audio sink on the latest unsignaled stream, future or
// current. Only one stream at a time will use the sink.
void SetDefaultRawAudioSink(
std::unique_ptr<webrtc::AudioSinkInterface> sink) override;
std::vector<webrtc::RtpSource> GetSources(uint32_t ssrc) const override;
void SetDepacketizerToDecoderFrameTransformer(
uint32_t ssrc,
rtc::scoped_refptr<webrtc::FrameTransformerInterface> frame_transformer)
override;
void SetReceiveNackEnabled(bool enabled) override;
void SetReceiveNonSenderRttEnabled(bool enabled) override;
private:
bool SetOptions(const AudioOptions& options);
bool SetRecvCodecs(const std::vector<AudioCodec>& codecs);
bool SetLocalSource(uint32_t ssrc, AudioSource* source);
bool MuteStream(uint32_t ssrc, bool mute);
WebRtcVoiceEngine* engine() { return engine_; }
void SetupRecording();
// Expected to be invoked once per packet that belongs to this channel that
// can not be demuxed. Returns true if a default receive stream has been
// created.
bool MaybeCreateDefaultReceiveStream(const webrtc::RtpPacketReceived& packet);
// Check if 'ssrc' is an unsignaled stream, and if so mark it as not being
// unsignaled anymore (i.e. it is now removed, or signaled), and return true.
bool MaybeDeregisterUnsignaledRecvStream(uint32_t ssrc);
webrtc::TaskQueueBase* const worker_thread_;
webrtc::ScopedTaskSafety task_safety_;
webrtc::SequenceChecker network_thread_checker_{
webrtc::SequenceChecker::kDetached};
WebRtcVoiceEngine* const engine_ = nullptr;
// TODO(kwiberg): decoder_map_ and recv_codecs_ store the exact same
// information, in slightly different formats. Eliminate recv_codecs_.
std::map<int, webrtc::SdpAudioFormat> decoder_map_;
std::vector<AudioCodec> recv_codecs_;
AudioOptions options_;
bool recv_nack_enabled_ = false;
bool enable_non_sender_rtt_ = false;
bool playout_ = false;
webrtc::Call* const call_ = nullptr;
const MediaConfig::Audio audio_config_;
// Queue of unsignaled SSRCs; oldest at the beginning.
std::vector<uint32_t> unsignaled_recv_ssrcs_;
// This is a stream param that comes from the remote description, but wasn't
// signaled with any a=ssrc lines. It holds the information that was signaled
// before the unsignaled receive stream is created when the first packet is
// received.
StreamParams unsignaled_stream_params_;
// Volume for unsignaled streams, which may be set before the stream exists.
double default_recv_volume_ = 1.0;
// Delay for unsignaled streams, which may be set before the stream exists.
int default_recv_base_minimum_delay_ms_ = 0;
// Sink for latest unsignaled stream - may be set before the stream exists.
std::unique_ptr<webrtc::AudioSinkInterface> default_sink_;
// Default SSRC to use for RTCP receiver reports in case of no signaled
// send streams. See: https://code.google.com/p/webrtc/issues/detail?id=4740
// and https://code.google.com/p/chromium/issues/detail?id=547661
uint32_t receiver_reports_ssrc_ = 0xFA17FA17u;
std::string mid_;
class WebRtcAudioReceiveStream;
std::map<uint32_t, WebRtcAudioReceiveStream*> recv_streams_;
std::vector<webrtc::RtpExtension> recv_rtp_extensions_;
webrtc::RtpHeaderExtensionMap recv_rtp_extension_map_;
absl::optional<webrtc::AudioSendStream::Config::SendCodecSpec>
send_codec_spec_;
// TODO(kwiberg): Per-SSRC codec pair IDs?
const webrtc::AudioCodecPairId codec_pair_id_;
// Per peer connection crypto options that last for the lifetime of the peer
// connection.
const webrtc::CryptoOptions crypto_options_;
// Unsignaled streams have an option to have a frame decryptor set on them.
rtc::scoped_refptr<webrtc::FrameDecryptorInterface>
unsignaled_frame_decryptor_;
rtc::scoped_refptr<webrtc::FrameTransformerInterface>
unsignaled_frame_transformer_;
void FillReceiveCodecStats(VoiceMediaReceiveInfo* voice_media_info);
};
} // namespace cricket
#endif // MEDIA_ENGINE_WEBRTC_VOICE_ENGINE_H_

View file

@ -0,0 +1,3 @@
boivie@webrtc.org
deadbeef@webrtc.org
orphis@webrtc.org

View file

@ -0,0 +1,668 @@
/*
* Copyright 2021 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/sctp/dcsctp_transport.h"
#include <atomic>
#include <cstdint>
#include <limits>
#include <utility>
#include <vector>
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "api/array_view.h"
#include "media/base/media_channel.h"
#include "net/dcsctp/public/dcsctp_socket_factory.h"
#include "net/dcsctp/public/packet_observer.h"
#include "net/dcsctp/public/text_pcap_packet_observer.h"
#include "net/dcsctp/public/types.h"
#include "p2p/base/packet_transport_internal.h"
#include "rtc_base/checks.h"
#include "rtc_base/logging.h"
#include "rtc_base/socket.h"
#include "rtc_base/strings/string_builder.h"
#include "rtc_base/thread.h"
#include "rtc_base/trace_event.h"
#include "system_wrappers/include/clock.h"
namespace webrtc {
namespace {
using ::dcsctp::SendPacketStatus;
// When there is packet loss for a long time, the SCTP retry timers will use
// exponential backoff, which can grow to very long durations and when the
// connection recovers, it may take a long time to reach the new backoff
// duration. By limiting it to a reasonable limit, the time to recover reduces.
constexpr dcsctp::DurationMs kMaxTimerBackoffDuration =
dcsctp::DurationMs(3000);
enum class WebrtcPPID : dcsctp::PPID::UnderlyingType {
// https://www.rfc-editor.org/rfc/rfc8832.html#section-8.1
kDCEP = 50,
// https://www.rfc-editor.org/rfc/rfc8831.html#section-8
kString = 51,
kBinaryPartial = 52, // Deprecated
kBinary = 53,
kStringPartial = 54, // Deprecated
kStringEmpty = 56,
kBinaryEmpty = 57,
};
WebrtcPPID ToPPID(DataMessageType message_type, size_t size) {
switch (message_type) {
case DataMessageType::kControl:
return WebrtcPPID::kDCEP;
case DataMessageType::kText:
return size > 0 ? WebrtcPPID::kString : WebrtcPPID::kStringEmpty;
case DataMessageType::kBinary:
return size > 0 ? WebrtcPPID::kBinary : WebrtcPPID::kBinaryEmpty;
}
}
absl::optional<DataMessageType> ToDataMessageType(dcsctp::PPID ppid) {
switch (static_cast<WebrtcPPID>(ppid.value())) {
case WebrtcPPID::kDCEP:
return DataMessageType::kControl;
case WebrtcPPID::kString:
case WebrtcPPID::kStringPartial:
case WebrtcPPID::kStringEmpty:
return DataMessageType::kText;
case WebrtcPPID::kBinary:
case WebrtcPPID::kBinaryPartial:
case WebrtcPPID::kBinaryEmpty:
return DataMessageType::kBinary;
}
return absl::nullopt;
}
absl::optional<cricket::SctpErrorCauseCode> ToErrorCauseCode(
dcsctp::ErrorKind error) {
switch (error) {
case dcsctp::ErrorKind::kParseFailed:
return cricket::SctpErrorCauseCode::kUnrecognizedParameters;
case dcsctp::ErrorKind::kPeerReported:
return cricket::SctpErrorCauseCode::kUserInitiatedAbort;
case dcsctp::ErrorKind::kWrongSequence:
case dcsctp::ErrorKind::kProtocolViolation:
return cricket::SctpErrorCauseCode::kProtocolViolation;
case dcsctp::ErrorKind::kResourceExhaustion:
return cricket::SctpErrorCauseCode::kOutOfResource;
case dcsctp::ErrorKind::kTooManyRetries:
case dcsctp::ErrorKind::kUnsupportedOperation:
case dcsctp::ErrorKind::kNoError:
case dcsctp::ErrorKind::kNotConnected:
// No SCTP error cause code matches those
break;
}
return absl::nullopt;
}
bool IsEmptyPPID(dcsctp::PPID ppid) {
WebrtcPPID webrtc_ppid = static_cast<WebrtcPPID>(ppid.value());
return webrtc_ppid == WebrtcPPID::kStringEmpty ||
webrtc_ppid == WebrtcPPID::kBinaryEmpty;
}
} // namespace
DcSctpTransport::DcSctpTransport(rtc::Thread* network_thread,
rtc::PacketTransportInternal* transport,
Clock* clock)
: DcSctpTransport(network_thread,
transport,
clock,
std::make_unique<dcsctp::DcSctpSocketFactory>()) {}
DcSctpTransport::DcSctpTransport(
rtc::Thread* network_thread,
rtc::PacketTransportInternal* transport,
Clock* clock,
std::unique_ptr<dcsctp::DcSctpSocketFactory> socket_factory)
: network_thread_(network_thread),
transport_(transport),
clock_(clock),
random_(clock_->TimeInMicroseconds()),
socket_factory_(std::move(socket_factory)),
task_queue_timeout_factory_(
*network_thread,
[this]() { return TimeMillis(); },
[this](dcsctp::TimeoutID timeout_id) {
socket_->HandleTimeout(timeout_id);
}) {
RTC_DCHECK_RUN_ON(network_thread_);
static std::atomic<int> instance_count = 0;
rtc::StringBuilder sb;
sb << debug_name_ << instance_count++;
debug_name_ = sb.Release();
ConnectTransportSignals();
}
DcSctpTransport::~DcSctpTransport() {
if (socket_) {
socket_->Close();
}
}
void DcSctpTransport::SetOnConnectedCallback(std::function<void()> callback) {
RTC_DCHECK_RUN_ON(network_thread_);
on_connected_callback_ = std::move(callback);
}
void DcSctpTransport::SetDataChannelSink(DataChannelSink* sink) {
RTC_DCHECK_RUN_ON(network_thread_);
data_channel_sink_ = sink;
if (data_channel_sink_ && ready_to_send_data_) {
data_channel_sink_->OnReadyToSend();
}
}
void DcSctpTransport::SetDtlsTransport(
rtc::PacketTransportInternal* transport) {
RTC_DCHECK_RUN_ON(network_thread_);
DisconnectTransportSignals();
transport_ = transport;
ConnectTransportSignals();
MaybeConnectSocket();
}
bool DcSctpTransport::Start(int local_sctp_port,
int remote_sctp_port,
int max_message_size) {
RTC_DCHECK_RUN_ON(network_thread_);
RTC_DCHECK(max_message_size > 0);
RTC_DLOG(LS_INFO) << debug_name_ << "->Start(local=" << local_sctp_port
<< ", remote=" << remote_sctp_port
<< ", max_message_size=" << max_message_size << ")";
if (!socket_) {
dcsctp::DcSctpOptions options;
options.local_port = local_sctp_port;
options.remote_port = remote_sctp_port;
options.max_message_size = max_message_size;
options.max_timer_backoff_duration = kMaxTimerBackoffDuration;
// Don't close the connection automatically on too many retransmissions.
options.max_retransmissions = absl::nullopt;
options.max_init_retransmits = absl::nullopt;
std::unique_ptr<dcsctp::PacketObserver> packet_observer;
if (RTC_LOG_CHECK_LEVEL(LS_VERBOSE)) {
packet_observer =
std::make_unique<dcsctp::TextPcapPacketObserver>(debug_name_);
}
socket_ = socket_factory_->Create(debug_name_, *this,
std::move(packet_observer), options);
} else {
if (local_sctp_port != socket_->options().local_port ||
remote_sctp_port != socket_->options().remote_port) {
RTC_LOG(LS_ERROR)
<< debug_name_ << "->Start(local=" << local_sctp_port
<< ", remote=" << remote_sctp_port
<< "): Can't change ports on already started transport.";
return false;
}
socket_->SetMaxMessageSize(max_message_size);
}
MaybeConnectSocket();
return true;
}
bool DcSctpTransport::OpenStream(int sid) {
RTC_DCHECK_RUN_ON(network_thread_);
RTC_DLOG(LS_INFO) << debug_name_ << "->OpenStream(" << sid << ").";
StreamState stream_state;
stream_states_.insert_or_assign(dcsctp::StreamID(static_cast<uint16_t>(sid)),
stream_state);
return true;
}
bool DcSctpTransport::ResetStream(int sid) {
RTC_DCHECK_RUN_ON(network_thread_);
RTC_DLOG(LS_INFO) << debug_name_ << "->ResetStream(" << sid << ").";
if (!socket_) {
RTC_LOG(LS_ERROR) << debug_name_ << "->ResetStream(sid=" << sid
<< "): Transport is not started.";
return false;
}
dcsctp::StreamID streams[1] = {dcsctp::StreamID(static_cast<uint16_t>(sid))};
auto it = stream_states_.find(streams[0]);
if (it == stream_states_.end()) {
RTC_LOG(LS_ERROR) << debug_name_ << "->ResetStream(sid=" << sid
<< "): Stream is not open.";
return false;
}
StreamState& stream_state = it->second;
if (stream_state.closure_initiated || stream_state.incoming_reset_done ||
stream_state.outgoing_reset_done) {
// The closing procedure was already initiated by the remote, don't do
// anything.
return false;
}
stream_state.closure_initiated = true;
socket_->ResetStreams(streams);
return true;
}
RTCError DcSctpTransport::SendData(int sid,
const SendDataParams& params,
const rtc::CopyOnWriteBuffer& payload) {
RTC_DCHECK_RUN_ON(network_thread_);
RTC_DLOG(LS_VERBOSE) << debug_name_ << "->SendData(sid=" << sid
<< ", type=" << static_cast<int>(params.type)
<< ", length=" << payload.size() << ").";
if (!socket_) {
RTC_LOG(LS_ERROR) << debug_name_
<< "->SendData(...): Transport is not started.";
return RTCError(RTCErrorType::INVALID_STATE);
}
// It is possible for a message to be sent from the signaling thread at the
// same time a data-channel is closing, but before the signaling thread is
// aware of it. So we need to keep track of currently active data channels and
// skip sending messages for the ones that are not open or closing.
// The sending errors are not impacting the data channel API contract as
// it is allowed to discard queued messages when the channel is closing.
auto stream_state =
stream_states_.find(dcsctp::StreamID(static_cast<uint16_t>(sid)));
if (stream_state == stream_states_.end()) {
RTC_LOG(LS_VERBOSE) << "Skipping message on non-open stream with sid: "
<< sid;
return RTCError(RTCErrorType::INVALID_STATE);
}
if (stream_state->second.closure_initiated ||
stream_state->second.incoming_reset_done ||
stream_state->second.outgoing_reset_done) {
RTC_LOG(LS_VERBOSE) << "Skipping message on closing stream with sid: "
<< sid;
return RTCError(RTCErrorType::INVALID_STATE);
}
auto max_message_size = socket_->options().max_message_size;
if (max_message_size > 0 && payload.size() > max_message_size) {
RTC_LOG(LS_WARNING) << debug_name_
<< "->SendData(...): "
"Trying to send packet bigger "
"than the max message size: "
<< payload.size() << " vs max of " << max_message_size;
return RTCError(RTCErrorType::INVALID_RANGE);
}
std::vector<uint8_t> message_payload(payload.cdata(),
payload.cdata() + payload.size());
if (message_payload.empty()) {
// https://www.rfc-editor.org/rfc/rfc8831.html#section-6.6
// SCTP does not support the sending of empty user messages. Therefore, if
// an empty message has to be sent, the appropriate PPID (WebRTC String
// Empty or WebRTC Binary Empty) is used, and the SCTP user message of one
// zero byte is sent.
message_payload.push_back('\0');
}
dcsctp::DcSctpMessage message(
dcsctp::StreamID(static_cast<uint16_t>(sid)),
dcsctp::PPID(static_cast<uint16_t>(ToPPID(params.type, payload.size()))),
std::move(message_payload));
dcsctp::SendOptions send_options;
send_options.unordered = dcsctp::IsUnordered(!params.ordered);
if (params.max_rtx_ms.has_value()) {
RTC_DCHECK(*params.max_rtx_ms >= 0 &&
*params.max_rtx_ms <= std::numeric_limits<uint16_t>::max());
send_options.lifetime = dcsctp::DurationMs(*params.max_rtx_ms);
}
if (params.max_rtx_count.has_value()) {
RTC_DCHECK(*params.max_rtx_count >= 0 &&
*params.max_rtx_count <= std::numeric_limits<uint16_t>::max());
send_options.max_retransmissions = *params.max_rtx_count;
}
dcsctp::SendStatus error = socket_->Send(std::move(message), send_options);
switch (error) {
case dcsctp::SendStatus::kSuccess:
return RTCError::OK();
case dcsctp::SendStatus::kErrorResourceExhaustion:
ready_to_send_data_ = false;
return RTCError(RTCErrorType::RESOURCE_EXHAUSTED);
default:
absl::string_view message = dcsctp::ToString(error);
RTC_LOG(LS_ERROR) << debug_name_
<< "->SendData(...): send() failed with error "
<< message << ".";
return RTCError(RTCErrorType::NETWORK_ERROR, message);
}
}
bool DcSctpTransport::ReadyToSendData() {
RTC_DCHECK_RUN_ON(network_thread_);
return ready_to_send_data_;
}
int DcSctpTransport::max_message_size() const {
if (!socket_) {
RTC_LOG(LS_ERROR) << debug_name_
<< "->max_message_size(...): Transport is not started.";
return 0;
}
return socket_->options().max_message_size;
}
absl::optional<int> DcSctpTransport::max_outbound_streams() const {
if (!socket_)
return absl::nullopt;
return socket_->options().announced_maximum_outgoing_streams;
}
absl::optional<int> DcSctpTransport::max_inbound_streams() const {
if (!socket_)
return absl::nullopt;
return socket_->options().announced_maximum_incoming_streams;
}
void DcSctpTransport::set_debug_name_for_testing(const char* debug_name) {
debug_name_ = debug_name;
}
SendPacketStatus DcSctpTransport::SendPacketWithStatus(
rtc::ArrayView<const uint8_t> data) {
RTC_DCHECK_RUN_ON(network_thread_);
RTC_DCHECK(socket_);
if (data.size() > (socket_->options().mtu)) {
RTC_LOG(LS_ERROR) << debug_name_
<< "->SendPacket(...): "
"SCTP seems to have made a packet that is bigger "
"than its official MTU: "
<< data.size() << " vs max of " << socket_->options().mtu;
return SendPacketStatus::kError;
}
TRACE_EVENT0("webrtc", "DcSctpTransport::SendPacket");
if (!transport_ || !transport_->writable())
return SendPacketStatus::kError;
RTC_DLOG(LS_VERBOSE) << debug_name_ << "->SendPacket(length=" << data.size()
<< ")";
auto result =
transport_->SendPacket(reinterpret_cast<const char*>(data.data()),
data.size(), rtc::PacketOptions(), 0);
if (result < 0) {
RTC_LOG(LS_WARNING) << debug_name_ << "->SendPacket(length=" << data.size()
<< ") failed with error: " << transport_->GetError()
<< ".";
if (rtc::IsBlockingError(transport_->GetError())) {
return SendPacketStatus::kTemporaryFailure;
}
return SendPacketStatus::kError;
}
return SendPacketStatus::kSuccess;
}
std::unique_ptr<dcsctp::Timeout> DcSctpTransport::CreateTimeout(
TaskQueueBase::DelayPrecision precision) {
return task_queue_timeout_factory_.CreateTimeout(precision);
}
dcsctp::TimeMs DcSctpTransport::TimeMillis() {
return dcsctp::TimeMs(clock_->TimeInMilliseconds());
}
uint32_t DcSctpTransport::GetRandomInt(uint32_t low, uint32_t high) {
return random_.Rand(low, high);
}
void DcSctpTransport::OnTotalBufferedAmountLow() {
RTC_DCHECK_RUN_ON(network_thread_);
if (!ready_to_send_data_) {
ready_to_send_data_ = true;
if (data_channel_sink_) {
data_channel_sink_->OnReadyToSend();
}
}
}
void DcSctpTransport::OnMessageReceived(dcsctp::DcSctpMessage message) {
RTC_DCHECK_RUN_ON(network_thread_);
RTC_DLOG(LS_VERBOSE) << debug_name_ << "->OnMessageReceived(sid="
<< message.stream_id().value()
<< ", ppid=" << message.ppid().value()
<< ", length=" << message.payload().size() << ").";
auto type = ToDataMessageType(message.ppid());
if (!type.has_value()) {
RTC_LOG(LS_VERBOSE) << debug_name_
<< "->OnMessageReceived(): Received an unknown PPID "
<< message.ppid().value()
<< " on an SCTP packet. Dropping.";
return;
}
receive_buffer_.Clear();
if (!IsEmptyPPID(message.ppid()))
receive_buffer_.AppendData(message.payload().data(),
message.payload().size());
if (data_channel_sink_) {
data_channel_sink_->OnDataReceived(message.stream_id().value(), *type,
receive_buffer_);
}
}
void DcSctpTransport::OnError(dcsctp::ErrorKind error,
absl::string_view message) {
if (error == dcsctp::ErrorKind::kResourceExhaustion) {
// Indicates that a message failed to be enqueued, because the send buffer
// is full, which is a very common (and wanted) state for high throughput
// sending/benchmarks.
RTC_LOG(LS_VERBOSE) << debug_name_
<< "->OnError(error=" << dcsctp::ToString(error)
<< ", message=" << message << ").";
} else {
RTC_LOG(LS_ERROR) << debug_name_
<< "->OnError(error=" << dcsctp::ToString(error)
<< ", message=" << message << ").";
}
}
void DcSctpTransport::OnAborted(dcsctp::ErrorKind error,
absl::string_view message) {
RTC_DCHECK_RUN_ON(network_thread_);
RTC_LOG(LS_ERROR) << debug_name_
<< "->OnAborted(error=" << dcsctp::ToString(error)
<< ", message=" << message << ").";
ready_to_send_data_ = false;
RTCError rtc_error(RTCErrorType::OPERATION_ERROR_WITH_DATA,
std::string(message));
rtc_error.set_error_detail(RTCErrorDetailType::SCTP_FAILURE);
auto code = ToErrorCauseCode(error);
if (code.has_value()) {
rtc_error.set_sctp_cause_code(static_cast<uint16_t>(*code));
}
if (data_channel_sink_) {
data_channel_sink_->OnTransportClosed(rtc_error);
}
}
void DcSctpTransport::OnConnected() {
RTC_DCHECK_RUN_ON(network_thread_);
RTC_DLOG(LS_INFO) << debug_name_ << "->OnConnected().";
ready_to_send_data_ = true;
if (data_channel_sink_) {
data_channel_sink_->OnReadyToSend();
}
if (on_connected_callback_) {
on_connected_callback_();
}
}
void DcSctpTransport::OnClosed() {
RTC_DCHECK_RUN_ON(network_thread_);
RTC_DLOG(LS_INFO) << debug_name_ << "->OnClosed().";
ready_to_send_data_ = false;
}
void DcSctpTransport::OnConnectionRestarted() {
RTC_DLOG(LS_INFO) << debug_name_ << "->OnConnectionRestarted().";
}
void DcSctpTransport::OnStreamsResetFailed(
rtc::ArrayView<const dcsctp::StreamID> outgoing_streams,
absl::string_view reason) {
// TODO(orphis): Need a test to check for correct behavior
for (auto& stream_id : outgoing_streams) {
RTC_LOG(LS_WARNING)
<< debug_name_
<< "->OnStreamsResetFailed(...): Outgoing stream reset failed"
<< ", sid=" << stream_id.value() << ", reason: " << reason << ".";
}
}
void DcSctpTransport::OnStreamsResetPerformed(
rtc::ArrayView<const dcsctp::StreamID> outgoing_streams) {
RTC_DCHECK_RUN_ON(network_thread_);
for (auto& stream_id : outgoing_streams) {
RTC_LOG(LS_INFO) << debug_name_
<< "->OnStreamsResetPerformed(...): Outgoing stream reset"
<< ", sid=" << stream_id.value();
auto it = stream_states_.find(stream_id);
if (it == stream_states_.end()) {
// Ignoring an outgoing stream reset for a closed stream
return;
}
StreamState& stream_state = it->second;
stream_state.outgoing_reset_done = true;
if (stream_state.incoming_reset_done) {
// When the close was not initiated locally, we can signal the end of the
// data channel close procedure when the remote ACKs the reset.
if (data_channel_sink_) {
data_channel_sink_->OnChannelClosed(stream_id.value());
}
stream_states_.erase(stream_id);
}
}
}
void DcSctpTransport::OnIncomingStreamsReset(
rtc::ArrayView<const dcsctp::StreamID> incoming_streams) {
RTC_DCHECK_RUN_ON(network_thread_);
for (auto& stream_id : incoming_streams) {
RTC_LOG(LS_INFO) << debug_name_
<< "->OnIncomingStreamsReset(...): Incoming stream reset"
<< ", sid=" << stream_id.value();
auto it = stream_states_.find(stream_id);
if (it == stream_states_.end())
return;
StreamState& stream_state = it->second;
stream_state.incoming_reset_done = true;
if (!stream_state.closure_initiated) {
// When receiving an incoming stream reset event for a non local close
// procedure, the transport needs to reset the stream in the other
// direction too.
dcsctp::StreamID streams[1] = {stream_id};
socket_->ResetStreams(streams);
if (data_channel_sink_) {
data_channel_sink_->OnChannelClosing(stream_id.value());
}
}
if (stream_state.outgoing_reset_done) {
// The close procedure that was initiated locally is complete when we
// receive and incoming reset event.
if (data_channel_sink_) {
data_channel_sink_->OnChannelClosed(stream_id.value());
}
stream_states_.erase(stream_id);
}
}
}
void DcSctpTransport::ConnectTransportSignals() {
RTC_DCHECK_RUN_ON(network_thread_);
if (!transport_) {
return;
}
transport_->SignalWritableState.connect(
this, &DcSctpTransport::OnTransportWritableState);
transport_->SignalReadPacket.connect(this,
&DcSctpTransport::OnTransportReadPacket);
transport_->SignalClosed.connect(this, &DcSctpTransport::OnTransportClosed);
}
void DcSctpTransport::DisconnectTransportSignals() {
RTC_DCHECK_RUN_ON(network_thread_);
if (!transport_) {
return;
}
transport_->SignalWritableState.disconnect(this);
transport_->SignalReadPacket.disconnect(this);
transport_->SignalClosed.disconnect(this);
}
void DcSctpTransport::OnTransportWritableState(
rtc::PacketTransportInternal* transport) {
RTC_DCHECK_RUN_ON(network_thread_);
RTC_DCHECK_EQ(transport_, transport);
RTC_DLOG(LS_VERBOSE) << debug_name_
<< "->OnTransportWritableState(), writable="
<< transport->writable();
MaybeConnectSocket();
}
void DcSctpTransport::OnTransportReadPacket(
rtc::PacketTransportInternal* transport,
const char* data,
size_t length,
const int64_t& /* packet_time_us */,
int flags) {
RTC_DCHECK_RUN_ON(network_thread_);
if (flags) {
// We are only interested in SCTP packets.
return;
}
RTC_DLOG(LS_VERBOSE) << debug_name_
<< "->OnTransportReadPacket(), length=" << length;
if (socket_) {
socket_->ReceivePacket(rtc::ArrayView<const uint8_t>(
reinterpret_cast<const uint8_t*>(data), length));
}
}
void DcSctpTransport::OnTransportClosed(
rtc::PacketTransportInternal* transport) {
RTC_DCHECK_RUN_ON(network_thread_);
RTC_DLOG(LS_VERBOSE) << debug_name_ << "->OnTransportClosed().";
if (data_channel_sink_) {
data_channel_sink_->OnTransportClosed({});
}
}
void DcSctpTransport::MaybeConnectSocket() {
if (transport_ && transport_->writable() && socket_ &&
socket_->state() == dcsctp::SocketState::kClosed) {
socket_->Connect();
}
}
} // namespace webrtc

View file

@ -0,0 +1,141 @@
/*
* Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_SCTP_DCSCTP_TRANSPORT_H_
#define MEDIA_SCTP_DCSCTP_TRANSPORT_H_
#include <memory>
#include <string>
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "api/array_view.h"
#include "api/task_queue/task_queue_base.h"
#include "media/sctp/sctp_transport_internal.h"
#include "net/dcsctp/public/dcsctp_options.h"
#include "net/dcsctp/public/dcsctp_socket.h"
#include "net/dcsctp/public/dcsctp_socket_factory.h"
#include "net/dcsctp/public/types.h"
#include "net/dcsctp/timer/task_queue_timeout.h"
#include "p2p/base/packet_transport_internal.h"
#include "rtc_base/containers/flat_map.h"
#include "rtc_base/copy_on_write_buffer.h"
#include "rtc_base/random.h"
#include "rtc_base/third_party/sigslot/sigslot.h"
#include "rtc_base/thread.h"
#include "rtc_base/thread_annotations.h"
#include "system_wrappers/include/clock.h"
namespace webrtc {
class DcSctpTransport : public cricket::SctpTransportInternal,
public dcsctp::DcSctpSocketCallbacks,
public sigslot::has_slots<> {
public:
DcSctpTransport(rtc::Thread* network_thread,
rtc::PacketTransportInternal* transport,
Clock* clock);
DcSctpTransport(rtc::Thread* network_thread,
rtc::PacketTransportInternal* transport,
Clock* clock,
std::unique_ptr<dcsctp::DcSctpSocketFactory> socket_factory);
~DcSctpTransport() override;
// cricket::SctpTransportInternal
void SetOnConnectedCallback(std::function<void()> callback) override;
void SetDataChannelSink(DataChannelSink* sink) override;
void SetDtlsTransport(rtc::PacketTransportInternal* transport) override;
bool Start(int local_sctp_port,
int remote_sctp_port,
int max_message_size) override;
bool OpenStream(int sid) override;
bool ResetStream(int sid) override;
RTCError SendData(int sid,
const SendDataParams& params,
const rtc::CopyOnWriteBuffer& payload) override;
bool ReadyToSendData() override;
int max_message_size() const override;
absl::optional<int> max_outbound_streams() const override;
absl::optional<int> max_inbound_streams() const override;
void set_debug_name_for_testing(const char* debug_name) override;
private:
// dcsctp::DcSctpSocketCallbacks
dcsctp::SendPacketStatus SendPacketWithStatus(
rtc::ArrayView<const uint8_t> data) override;
std::unique_ptr<dcsctp::Timeout> CreateTimeout(
TaskQueueBase::DelayPrecision precision) override;
dcsctp::TimeMs TimeMillis() override;
uint32_t GetRandomInt(uint32_t low, uint32_t high) override;
void OnTotalBufferedAmountLow() override;
void OnMessageReceived(dcsctp::DcSctpMessage message) override;
void OnError(dcsctp::ErrorKind error, absl::string_view message) override;
void OnAborted(dcsctp::ErrorKind error, absl::string_view message) override;
void OnConnected() override;
void OnClosed() override;
void OnConnectionRestarted() override;
void OnStreamsResetFailed(
rtc::ArrayView<const dcsctp::StreamID> outgoing_streams,
absl::string_view reason) override;
void OnStreamsResetPerformed(
rtc::ArrayView<const dcsctp::StreamID> outgoing_streams) override;
void OnIncomingStreamsReset(
rtc::ArrayView<const dcsctp::StreamID> incoming_streams) override;
// Transport callbacks
void ConnectTransportSignals();
void DisconnectTransportSignals();
void OnTransportWritableState(rtc::PacketTransportInternal* transport);
void OnTransportReadPacket(rtc::PacketTransportInternal* transport,
const char* data,
size_t length,
const int64_t& /* packet_time_us */,
int flags);
void OnTransportClosed(rtc::PacketTransportInternal* transport);
void MaybeConnectSocket();
rtc::Thread* network_thread_;
rtc::PacketTransportInternal* transport_;
Clock* clock_;
Random random_;
std::unique_ptr<dcsctp::DcSctpSocketFactory> socket_factory_;
dcsctp::TaskQueueTimeoutFactory task_queue_timeout_factory_;
std::unique_ptr<dcsctp::DcSctpSocketInterface> socket_;
std::string debug_name_ = "DcSctpTransport";
rtc::CopyOnWriteBuffer receive_buffer_;
// Used to keep track of the state of data channels.
// Reset needs to happen both ways before signaling the transport
// is closed.
struct StreamState {
// True when the local connection has initiated the reset.
// If a connection receives a reset for a stream that isn't
// already being reset locally, it needs to fire the signal
// SignalClosingProcedureStartedRemotely.
bool closure_initiated = false;
// True when the local connection received OnIncomingStreamsReset
bool incoming_reset_done = false;
// True when the local connection received OnStreamsResetPerformed
bool outgoing_reset_done = false;
};
// Map of all currently open or closing data channels
flat_map<dcsctp::StreamID, StreamState> stream_states_
RTC_GUARDED_BY(network_thread_);
bool ready_to_send_data_ RTC_GUARDED_BY(network_thread_) = false;
std::function<void()> on_connected_callback_ RTC_GUARDED_BY(network_thread_);
DataChannelSink* data_channel_sink_ RTC_GUARDED_BY(network_thread_) = nullptr;
};
} // namespace webrtc
#endif // MEDIA_SCTP_DCSCTP_TRANSPORT_H_

View file

@ -0,0 +1,38 @@
/*
* Copyright 2021 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/sctp/sctp_transport_factory.h"
#include "rtc_base/system/unused.h"
#ifdef WEBRTC_HAVE_DCSCTP
#include "media/sctp/dcsctp_transport.h" // nogncheck
#include "system_wrappers/include/clock.h" // nogncheck
#endif
namespace cricket {
SctpTransportFactory::SctpTransportFactory(rtc::Thread* network_thread)
: network_thread_(network_thread) {
RTC_UNUSED(network_thread_);
}
std::unique_ptr<SctpTransportInternal>
SctpTransportFactory::CreateSctpTransport(
rtc::PacketTransportInternal* transport) {
std::unique_ptr<SctpTransportInternal> result;
#ifdef WEBRTC_HAVE_DCSCTP
result = std::unique_ptr<SctpTransportInternal>(new webrtc::DcSctpTransport(
network_thread_, transport, webrtc::Clock::GetRealTimeClock()));
#endif
return result;
}
} // namespace cricket

View file

@ -0,0 +1,35 @@
/*
* Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_SCTP_SCTP_TRANSPORT_FACTORY_H_
#define MEDIA_SCTP_SCTP_TRANSPORT_FACTORY_H_
#include <memory>
#include "api/transport/sctp_transport_factory_interface.h"
#include "media/sctp/sctp_transport_internal.h"
#include "rtc_base/thread.h"
namespace cricket {
class SctpTransportFactory : public webrtc::SctpTransportFactoryInterface {
public:
explicit SctpTransportFactory(rtc::Thread* network_thread);
std::unique_ptr<SctpTransportInternal> CreateSctpTransport(
rtc::PacketTransportInternal* transport) override;
private:
rtc::Thread* network_thread_;
};
} // namespace cricket
#endif // MEDIA_SCTP_SCTP_TRANSPORT_FACTORY_H__

View file

@ -0,0 +1,150 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_SCTP_SCTP_TRANSPORT_INTERNAL_H_
#define MEDIA_SCTP_SCTP_TRANSPORT_INTERNAL_H_
// TODO(deadbeef): Move SCTP code out of media/, and make it not depend on
// anything in media/.
#include <memory>
#include <string>
#include <vector>
#include "api/rtc_error.h"
#include "api/transport/data_channel_transport_interface.h"
#include "media/base/media_channel.h"
#include "p2p/base/packet_transport_internal.h"
#include "rtc_base/copy_on_write_buffer.h"
#include "rtc_base/thread.h"
namespace cricket {
// Constants that are important to API users
// The size of the SCTP association send buffer. 256kB, the usrsctp default.
constexpr int kSctpSendBufferSize = 256 * 1024;
// The number of outgoing streams that we'll negotiate. Since stream IDs (SIDs)
// are 0-based, the highest usable SID is 1023.
//
// It's recommended to use the maximum of 65535 in:
// https://tools.ietf.org/html/draft-ietf-rtcweb-data-channel-13#section-6.2
// However, we use 1024 in order to save memory. usrsctp allocates 104 bytes
// for each pair of incoming/outgoing streams (on a 64-bit system), so 65535
// streams would waste ~6MB.
//
// Note: "max" and "min" here are inclusive.
constexpr uint16_t kMaxSctpStreams = 1024;
constexpr uint16_t kMaxSctpSid = kMaxSctpStreams - 1;
constexpr uint16_t kMinSctpSid = 0;
// The maximum number of streams that can be negotiated according to spec.
constexpr uint16_t kSpecMaxSctpSid = 65535;
// This is the default SCTP port to use. It is passed along the wire and the
// connectee and connector must be using the same port. It is not related to the
// ports at the IP level. (Corresponds to: sockaddr_conn.sconn_port in
// usrsctp.h)
const int kSctpDefaultPort = 5000;
// Error cause codes defined at
// https://www.iana.org/assignments/sctp-parameters/sctp-parameters.xhtml#sctp-parameters-24
enum class SctpErrorCauseCode : uint16_t {
kInvalidStreamIdentifier = 1,
kMissingMandatoryParameter = 2,
kStaleCookieError = 3,
kOutOfResource = 4,
kUnresolvableAddress = 5,
kUnrecognizedChunkType = 6,
kInvalidMandatoryParameter = 7,
kUnrecognizedParameters = 8,
kNoUserData = 9,
kCookieReceivedWhileShuttingDown = 10,
kRestartWithNewAddresses = 11,
kUserInitiatedAbort = 12,
kProtocolViolation = 13,
};
// Abstract SctpTransport interface for use internally (by PeerConnection etc.).
// Exists to allow mock/fake SctpTransports to be created.
class SctpTransportInternal {
public:
virtual ~SctpTransportInternal() {}
virtual void SetOnConnectedCallback(std::function<void()> callback) = 0;
virtual void SetDataChannelSink(webrtc::DataChannelSink* sink) = 0;
// Changes what underlying DTLS transport is uses. Used when switching which
// bundled transport the SctpTransport uses.
virtual void SetDtlsTransport(rtc::PacketTransportInternal* transport) = 0;
// When Start is called, connects as soon as possible; this can be called
// before DTLS completes, in which case the connection will begin when DTLS
// completes. This method can be called multiple times, though not if either
// of the ports are changed.
//
// `local_sctp_port` and `remote_sctp_port` are passed along the wire and the
// listener and connector must be using the same port. They are not related
// to the ports at the IP level. If set to -1, we default to
// kSctpDefaultPort.
// `max_message_size_` sets the max message size on the connection.
// It must be smaller than or equal to kSctpSendBufferSize.
// It can be changed by a secons Start() call.
//
// TODO(deadbeef): Support calling Start with different local/remote ports
// and create a new association? Not clear if this is something we need to
// support though. See: https://github.com/w3c/webrtc-pc/issues/979
virtual bool Start(int local_sctp_port,
int remote_sctp_port,
int max_message_size) = 0;
// NOTE: Initially there was a "Stop" method here, but it was never used, so
// it was removed.
// Informs SctpTransport that `sid` will start being used. Returns false if
// it is impossible to use `sid`, or if it's already in use.
// Until calling this, can't send data using `sid`.
// TODO(deadbeef): Actually implement the "returns false if `sid` can't be
// used" part. See:
// https://bugs.chromium.org/p/chromium/issues/detail?id=619849
virtual bool OpenStream(int sid) = 0;
// The inverse of OpenStream. Begins the closing procedure, which will
// eventually result in SignalClosingProcedureComplete on the side that
// initiates it, and both SignalClosingProcedureStartedRemotely and
// SignalClosingProcedureComplete on the other side.
virtual bool ResetStream(int sid) = 0;
// Send data down this channel.
// Returns RTCError::OK() if successful an error otherwise. Notably
// RTCErrorType::RESOURCE_EXHAUSTED for blocked operations.
virtual webrtc::RTCError SendData(int sid,
const webrtc::SendDataParams& params,
const rtc::CopyOnWriteBuffer& payload) = 0;
// Indicates when the SCTP socket is created and not blocked by congestion
// control. This changes to false when SDR_BLOCK is returned from SendData,
// and
// changes to true when SignalReadyToSendData is fired. The underlying DTLS/
// ICE channels may be unwritable while ReadyToSendData is true, because data
// can still be queued in usrsctp.
virtual bool ReadyToSendData() = 0;
// Returns the current max message size, set with Start().
virtual int max_message_size() const = 0;
// Returns the current negotiated max # of outbound streams.
// Will return absl::nullopt if negotiation is incomplete.
virtual absl::optional<int> max_outbound_streams() const = 0;
// Returns the current negotiated max # of inbound streams.
virtual absl::optional<int> max_inbound_streams() const = 0;
// Helper for debugging.
virtual void set_debug_name_for_testing(const char* debug_name) = 0;
};
} // namespace cricket
#endif // MEDIA_SCTP_SCTP_TRANSPORT_INTERNAL_H_