Repo created

This commit is contained in:
Fr4nz D13trich 2025-11-22 14:04:28 +01:00
parent 81b91f4139
commit f8c34fa5ee
22732 changed files with 4815320 additions and 2 deletions

View file

@ -0,0 +1,126 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/base/adapted_video_track_source.h"
#include "api/scoped_refptr.h"
#include "api/video/i420_buffer.h"
#include "api/video/video_frame_buffer.h"
#include "api/video/video_rotation.h"
#include "rtc_base/checks.h"
#include "rtc_base/time_utils.h"
namespace rtc {
AdaptedVideoTrackSource::AdaptedVideoTrackSource() = default;
AdaptedVideoTrackSource::AdaptedVideoTrackSource(int required_alignment)
: video_adapter_(required_alignment) {}
AdaptedVideoTrackSource::~AdaptedVideoTrackSource() = default;
bool AdaptedVideoTrackSource::GetStats(Stats* stats) {
webrtc::MutexLock lock(&stats_mutex_);
if (!stats_) {
return false;
}
*stats = *stats_;
return true;
}
void AdaptedVideoTrackSource::OnFrame(const webrtc::VideoFrame& frame) {
rtc::scoped_refptr<webrtc::VideoFrameBuffer> buffer(
frame.video_frame_buffer());
/* Note that this is a "best effort" approach to
wants.rotation_applied; apply_rotation_ can change from false to
true between the check of apply_rotation() and the call to
broadcaster_.OnFrame(), in which case we generate a frame with
pending rotation despite some sink with wants.rotation_applied ==
true was just added. The VideoBroadcaster enforces
synchronization for us in this case, by not passing the frame on
to sinks which don't want it. */
if (apply_rotation() && frame.rotation() != webrtc::kVideoRotation_0 &&
buffer->type() == webrtc::VideoFrameBuffer::Type::kI420) {
/* Apply pending rotation. */
webrtc::VideoFrame rotated_frame(frame);
rotated_frame.set_video_frame_buffer(
webrtc::I420Buffer::Rotate(*buffer->GetI420(), frame.rotation()));
rotated_frame.set_rotation(webrtc::kVideoRotation_0);
broadcaster_.OnFrame(rotated_frame);
} else {
broadcaster_.OnFrame(frame);
}
}
void AdaptedVideoTrackSource::OnFrameDropped() {
broadcaster_.OnDiscardedFrame();
}
void AdaptedVideoTrackSource::AddOrUpdateSink(
rtc::VideoSinkInterface<webrtc::VideoFrame>* sink,
const rtc::VideoSinkWants& wants) {
broadcaster_.AddOrUpdateSink(sink, wants);
OnSinkWantsChanged(broadcaster_.wants());
}
void AdaptedVideoTrackSource::RemoveSink(
rtc::VideoSinkInterface<webrtc::VideoFrame>* sink) {
broadcaster_.RemoveSink(sink);
OnSinkWantsChanged(broadcaster_.wants());
}
bool AdaptedVideoTrackSource::apply_rotation() {
return broadcaster_.wants().rotation_applied;
}
void AdaptedVideoTrackSource::OnSinkWantsChanged(
const rtc::VideoSinkWants& wants) {
video_adapter_.OnSinkWants(wants);
}
bool AdaptedVideoTrackSource::AdaptFrame(int width,
int height,
int64_t time_us,
int* out_width,
int* out_height,
int* crop_width,
int* crop_height,
int* crop_x,
int* crop_y) {
{
webrtc::MutexLock lock(&stats_mutex_);
stats_ = Stats{width, height};
}
if (!broadcaster_.frame_wanted()) {
return false;
}
if (!video_adapter_.AdaptFrameResolution(
width, height, time_us * rtc::kNumNanosecsPerMicrosec, crop_width,
crop_height, out_width, out_height)) {
broadcaster_.OnDiscardedFrame();
// VideoAdapter dropped the frame.
return false;
}
*crop_x = (width - *crop_width) / 2;
*crop_y = (height - *crop_height) / 2;
return true;
}
void AdaptedVideoTrackSource::ProcessConstraints(
const webrtc::VideoTrackSourceConstraints& constraints) {
broadcaster_.ProcessConstraints(constraints);
}
} // namespace rtc

View file

@ -0,0 +1,104 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_BASE_ADAPTED_VIDEO_TRACK_SOURCE_H_
#define MEDIA_BASE_ADAPTED_VIDEO_TRACK_SOURCE_H_
#include <stdint.h>
#include "absl/types/optional.h"
#include "api/media_stream_interface.h"
#include "api/notifier.h"
#include "api/video/video_frame.h"
#include "api/video/video_sink_interface.h"
#include "api/video/video_source_interface.h"
#include "media/base/video_adapter.h"
#include "media/base/video_broadcaster.h"
#include "rtc_base/synchronization/mutex.h"
#include "rtc_base/system/rtc_export.h"
#include "rtc_base/thread_annotations.h"
namespace rtc {
// Base class for sources which needs video adaptation, e.g., video
// capture sources. Sinks must be added and removed on one and only
// one thread, while AdaptFrame and OnFrame may be called on any
// thread.
class RTC_EXPORT AdaptedVideoTrackSource
: public webrtc::Notifier<webrtc::VideoTrackSourceInterface> {
public:
AdaptedVideoTrackSource();
~AdaptedVideoTrackSource() override;
protected:
// Allows derived classes to initialize `video_adapter_` with a custom
// alignment.
explicit AdaptedVideoTrackSource(int required_alignment);
// Checks the apply_rotation() flag. If the frame needs rotation, and it is a
// plain memory frame, it is rotated. Subclasses producing native frames must
// handle apply_rotation() themselves.
void OnFrame(const webrtc::VideoFrame& frame);
// Indication from source that a frame was dropped.
void OnFrameDropped();
// Reports the appropriate frame size after adaptation. Returns true
// if a frame is wanted. Returns false if there are no interested
// sinks, or if the VideoAdapter decides to drop the frame.
bool AdaptFrame(int width,
int height,
int64_t time_us,
int* out_width,
int* out_height,
int* crop_width,
int* crop_height,
int* crop_x,
int* crop_y);
// Returns the current value of the apply_rotation flag, derived
// from the VideoSinkWants of registered sinks. The value is derived
// from sinks' wants, in AddOrUpdateSink and RemoveSink. Beware that
// when using this method from a different thread, the value may
// become stale before it is used.
bool apply_rotation();
cricket::VideoAdapter* video_adapter() { return &video_adapter_; }
private:
// Implements rtc::VideoSourceInterface.
void AddOrUpdateSink(rtc::VideoSinkInterface<webrtc::VideoFrame>* sink,
const rtc::VideoSinkWants& wants) override;
void RemoveSink(rtc::VideoSinkInterface<webrtc::VideoFrame>* sink) override;
// Part of VideoTrackSourceInterface.
bool GetStats(Stats* stats) override;
void OnSinkWantsChanged(const rtc::VideoSinkWants& wants);
// Encoded sinks not implemented for AdaptedVideoTrackSource.
bool SupportsEncodedOutput() const override { return false; }
void GenerateKeyFrame() override {}
void AddEncodedSink(
rtc::VideoSinkInterface<webrtc::RecordableEncodedFrame>* sink) override {}
void RemoveEncodedSink(
rtc::VideoSinkInterface<webrtc::RecordableEncodedFrame>* sink) override {}
void ProcessConstraints(
const webrtc::VideoTrackSourceConstraints& constraints) override;
cricket::VideoAdapter video_adapter_;
webrtc::Mutex stats_mutex_;
absl::optional<Stats> stats_ RTC_GUARDED_BY(stats_mutex_);
VideoBroadcaster broadcaster_;
};
} // namespace rtc
#endif // MEDIA_BASE_ADAPTED_VIDEO_TRACK_SOURCE_H_

View file

@ -0,0 +1,58 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_BASE_AUDIO_SOURCE_H_
#define MEDIA_BASE_AUDIO_SOURCE_H_
#include <cstddef>
#include "absl/types/optional.h"
namespace cricket {
// Abstract interface for providing the audio data.
// TODO(deadbeef): Rename this to AudioSourceInterface, and rename
// webrtc::AudioSourceInterface to AudioTrackSourceInterface.
class AudioSource {
public:
class Sink {
public:
// Callback to receive data from the AudioSource.
virtual void OnData(
const void* audio_data,
int bits_per_sample,
int sample_rate,
size_t number_of_channels,
size_t number_of_frames,
absl::optional<int64_t> absolute_capture_timestamp_ms) = 0;
// Called when the AudioSource is going away.
virtual void OnClose() = 0;
// Returns the number of channels encoded by the sink. This can be less than
// the number_of_channels if down-mixing occur. A value of -1 means an
// unknown number.
virtual int NumPreferredChannels() const = 0;
protected:
virtual ~Sink() {}
};
// Sets a sink to the AudioSource. There can be only one sink connected
// to the source at a time.
virtual void SetSink(Sink* sink) = 0;
protected:
virtual ~AudioSource() {}
};
} // namespace cricket
#endif // MEDIA_BASE_AUDIO_SOURCE_H_

View file

@ -0,0 +1,522 @@
/*
* Copyright (c) 2004 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/base/codec.h"
#include "absl/algorithm/container.h"
#include "absl/strings/match.h"
#include "api/audio_codecs/audio_format.h"
#include "api/video_codecs/av1_profile.h"
#include "api/video_codecs/h264_profile_level_id.h"
#ifdef RTC_ENABLE_H265
#include "api/video_codecs/h265_profile_tier_level.h"
#endif
#include "api/video_codecs/vp9_profile.h"
#include "media/base/media_constants.h"
#include "rtc_base/checks.h"
#include "rtc_base/logging.h"
#include "rtc_base/string_encode.h"
#include "rtc_base/strings/string_builder.h"
namespace cricket {
namespace {
std::string GetH264PacketizationModeOrDefault(
const webrtc::CodecParameterMap& params) {
auto it = params.find(kH264FmtpPacketizationMode);
if (it != params.end()) {
return it->second;
}
// If packetization-mode is not present, default to "0".
// https://tools.ietf.org/html/rfc6184#section-6.2
return "0";
}
bool IsSameH264PacketizationMode(const webrtc::CodecParameterMap& left,
const webrtc::CodecParameterMap& right) {
return GetH264PacketizationModeOrDefault(left) ==
GetH264PacketizationModeOrDefault(right);
}
#ifdef RTC_ENABLE_H265
std::string GetH265TxModeOrDefault(const webrtc::CodecParameterMap& params) {
auto it = params.find(kH265FmtpTxMode);
if (it != params.end()) {
return it->second;
}
// If TxMode is not present, a value of "SRST" must be inferred.
// https://tools.ietf.org/html/rfc7798@section-7.1
return "SRST";
}
bool IsSameH265TxMode(const webrtc::CodecParameterMap& left,
const webrtc::CodecParameterMap& right) {
return absl::EqualsIgnoreCase(GetH265TxModeOrDefault(left),
GetH265TxModeOrDefault(right));
}
#endif
// Some (video) codecs are actually families of codecs and rely on parameters
// to distinguish different incompatible family members.
bool IsSameCodecSpecific(const std::string& name1,
const webrtc::CodecParameterMap& params1,
const std::string& name2,
const webrtc::CodecParameterMap& params2) {
// The names might not necessarily match, so check both.
auto either_name_matches = [&](const std::string name) {
return absl::EqualsIgnoreCase(name, name1) ||
absl::EqualsIgnoreCase(name, name2);
};
if (either_name_matches(kH264CodecName))
return webrtc::H264IsSameProfile(params1, params2) &&
IsSameH264PacketizationMode(params1, params2);
if (either_name_matches(kVp9CodecName))
return webrtc::VP9IsSameProfile(params1, params2);
if (either_name_matches(kAv1CodecName))
return webrtc::AV1IsSameProfile(params1, params2);
#ifdef RTC_ENABLE_H265
if (either_name_matches(kH265CodecName)) {
return webrtc::H265IsSameProfileTierLevel(params1, params2) &&
IsSameH265TxMode(params1, params2);
}
#endif
return true;
}
} // namespace
FeedbackParams::FeedbackParams() = default;
FeedbackParams::~FeedbackParams() = default;
bool FeedbackParam::operator==(const FeedbackParam& other) const {
return absl::EqualsIgnoreCase(other.id(), id()) &&
absl::EqualsIgnoreCase(other.param(), param());
}
bool FeedbackParams::operator==(const FeedbackParams& other) const {
return params_ == other.params_;
}
bool FeedbackParams::Has(const FeedbackParam& param) const {
return absl::c_linear_search(params_, param);
}
void FeedbackParams::Add(const FeedbackParam& param) {
if (param.id().empty()) {
return;
}
if (Has(param)) {
// Param already in `this`.
return;
}
params_.push_back(param);
RTC_CHECK(!HasDuplicateEntries());
}
void FeedbackParams::Intersect(const FeedbackParams& from) {
std::vector<FeedbackParam>::iterator iter_to = params_.begin();
while (iter_to != params_.end()) {
if (!from.Has(*iter_to)) {
iter_to = params_.erase(iter_to);
} else {
++iter_to;
}
}
}
bool FeedbackParams::HasDuplicateEntries() const {
for (std::vector<FeedbackParam>::const_iterator iter = params_.begin();
iter != params_.end(); ++iter) {
for (std::vector<FeedbackParam>::const_iterator found = iter + 1;
found != params_.end(); ++found) {
if (*found == *iter) {
return true;
}
}
}
return false;
}
Codec::Codec(Type type, int id, const std::string& name, int clockrate)
: Codec(type, id, name, clockrate, 0) {}
Codec::Codec(Type type,
int id,
const std::string& name,
int clockrate,
size_t channels)
: type(type),
id(id),
name(name),
clockrate(clockrate),
bitrate(0),
channels(channels) {}
Codec::Codec(Type type) : Codec(type, 0, "", 0) {}
Codec::Codec(const webrtc::SdpAudioFormat& c)
: Codec(Type::kAudio, 0, c.name, c.clockrate_hz, c.num_channels) {
params = c.parameters;
}
Codec::Codec(const webrtc::SdpVideoFormat& c)
: Codec(Type::kVideo, 0, c.name, kVideoCodecClockrate) {
params = c.parameters;
scalability_modes = c.scalability_modes;
}
Codec::Codec(const Codec& c) = default;
Codec::Codec(Codec&& c) = default;
Codec::~Codec() = default;
Codec& Codec::operator=(const Codec& c) = default;
Codec& Codec::operator=(Codec&& c) = default;
bool Codec::operator==(const Codec& c) const {
return type == c.type && this->id == c.id && // id is reserved in objective-c
name == c.name && clockrate == c.clockrate && params == c.params &&
feedback_params == c.feedback_params &&
(type == Type::kAudio
? (bitrate == c.bitrate && channels == c.channels)
: (packetization == c.packetization));
}
bool Codec::Matches(const Codec& codec) const {
// Match the codec id/name based on the typical static/dynamic name rules.
// Matching is case-insensitive.
// We support the ranges [96, 127] and more recently [35, 65].
// https://www.iana.org/assignments/rtp-parameters/rtp-parameters.xhtml#rtp-parameters-1
// Within those ranges we match by codec name, outside by codec id.
// Since no codecs are assigned an id in the range [66, 95] by us, these will
// never match.
const int kLowerDynamicRangeMin = 35;
const int kLowerDynamicRangeMax = 65;
const int kUpperDynamicRangeMin = 96;
const int kUpperDynamicRangeMax = 127;
const bool is_id_in_dynamic_range =
(id >= kLowerDynamicRangeMin && id <= kLowerDynamicRangeMax) ||
(id >= kUpperDynamicRangeMin && id <= kUpperDynamicRangeMax);
const bool is_codec_id_in_dynamic_range =
(codec.id >= kLowerDynamicRangeMin &&
codec.id <= kLowerDynamicRangeMax) ||
(codec.id >= kUpperDynamicRangeMin && codec.id <= kUpperDynamicRangeMax);
bool matches_id = is_id_in_dynamic_range && is_codec_id_in_dynamic_range
? (absl::EqualsIgnoreCase(name, codec.name))
: (id == codec.id);
auto matches_type_specific = [&]() {
switch (type) {
case Type::kAudio:
// If a nonzero clockrate is specified, it must match the actual
// clockrate. If a nonzero bitrate is specified, it must match the
// actual bitrate, unless the codec is VBR (0), where we just force the
// supplied value. The number of channels must match exactly, with the
// exception that channels=0 is treated synonymously as channels=1, per
// RFC 4566 section 6: " [The channels] parameter is OPTIONAL and may be
// omitted if the number of channels is one."
// Preference is ignored.
// TODO(juberti): Treat a zero clockrate as 8000Hz, the RTP default
// clockrate.
return ((codec.clockrate == 0 /*&& clockrate == 8000*/) ||
clockrate == codec.clockrate) &&
(codec.bitrate == 0 || bitrate <= 0 ||
bitrate == codec.bitrate) &&
((codec.channels < 2 && channels < 2) ||
channels == codec.channels);
case Type::kVideo:
return IsSameCodecSpecific(name, params, codec.name, codec.params);
}
};
return matches_id && matches_type_specific();
}
bool Codec::MatchesRtpCodec(const webrtc::RtpCodec& codec_capability) const {
webrtc::RtpCodecParameters codec_parameters = ToCodecParameters();
return codec_parameters.name == codec_capability.name &&
codec_parameters.kind == codec_capability.kind &&
(codec_parameters.name == cricket::kRtxCodecName ||
(codec_parameters.num_channels == codec_capability.num_channels &&
codec_parameters.clock_rate == codec_capability.clock_rate &&
codec_parameters.parameters == codec_capability.parameters));
}
bool Codec::GetParam(const std::string& name, std::string* out) const {
webrtc::CodecParameterMap::const_iterator iter = params.find(name);
if (iter == params.end())
return false;
*out = iter->second;
return true;
}
bool Codec::GetParam(const std::string& name, int* out) const {
webrtc::CodecParameterMap::const_iterator iter = params.find(name);
if (iter == params.end())
return false;
return rtc::FromString(iter->second, out);
}
void Codec::SetParam(const std::string& name, const std::string& value) {
params[name] = value;
}
void Codec::SetParam(const std::string& name, int value) {
params[name] = rtc::ToString(value);
}
bool Codec::RemoveParam(const std::string& name) {
return params.erase(name) == 1;
}
void Codec::AddFeedbackParam(const FeedbackParam& param) {
feedback_params.Add(param);
}
bool Codec::HasFeedbackParam(const FeedbackParam& param) const {
return feedback_params.Has(param);
}
void Codec::IntersectFeedbackParams(const Codec& other) {
feedback_params.Intersect(other.feedback_params);
}
webrtc::RtpCodecParameters Codec::ToCodecParameters() const {
webrtc::RtpCodecParameters codec_params;
codec_params.payload_type = id;
codec_params.name = name;
codec_params.clock_rate = clockrate;
codec_params.parameters.insert(params.begin(), params.end());
switch (type) {
case Type::kAudio: {
codec_params.num_channels = static_cast<int>(channels);
codec_params.kind = MEDIA_TYPE_AUDIO;
break;
}
case Type::kVideo: {
codec_params.kind = MEDIA_TYPE_VIDEO;
break;
}
}
return codec_params;
}
bool Codec::IsMediaCodec() const {
return !IsResiliencyCodec() &&
!absl::EqualsIgnoreCase(name, kComfortNoiseCodecName);
}
bool Codec::IsResiliencyCodec() const {
return GetResiliencyType() != ResiliencyType::kNone;
}
Codec::ResiliencyType Codec::GetResiliencyType() const {
if (absl::EqualsIgnoreCase(name, kRedCodecName)) {
return ResiliencyType::kRed;
}
if (absl::EqualsIgnoreCase(name, kUlpfecCodecName)) {
return ResiliencyType::kUlpfec;
}
if (absl::EqualsIgnoreCase(name, kFlexfecCodecName)) {
return ResiliencyType::kFlexfec;
}
if (absl::EqualsIgnoreCase(name, kRtxCodecName)) {
return ResiliencyType::kRtx;
}
return ResiliencyType::kNone;
}
bool Codec::ValidateCodecFormat() const {
if (id < 0 || id > 127) {
RTC_LOG(LS_ERROR) << "Codec with invalid payload type: " << ToString();
return false;
}
if (IsResiliencyCodec()) {
return true;
}
int min_bitrate = -1;
int max_bitrate = -1;
if (GetParam(kCodecParamMinBitrate, &min_bitrate) &&
GetParam(kCodecParamMaxBitrate, &max_bitrate)) {
if (max_bitrate < min_bitrate) {
RTC_LOG(LS_ERROR) << "Codec with max < min bitrate: " << ToString();
return false;
}
}
return true;
}
std::string Codec::ToString() const {
char buf[256];
rtc::SimpleStringBuilder sb(buf);
switch (type) {
case Type::kAudio: {
sb << "AudioCodec[" << id << ":" << name << ":" << clockrate << ":"
<< bitrate << ":" << channels << "]";
break;
}
case Type::kVideo: {
sb << "VideoCodec[" << id << ":" << name;
if (packetization.has_value()) {
sb << ":" << *packetization;
}
sb << "]";
break;
}
}
return sb.str();
}
Codec CreateAudioRtxCodec(int rtx_payload_type, int associated_payload_type) {
Codec rtx_codec = CreateAudioCodec(rtx_payload_type, kRtxCodecName, 0, 1);
rtx_codec.SetParam(kCodecParamAssociatedPayloadType, associated_payload_type);
return rtx_codec;
}
Codec CreateVideoRtxCodec(int rtx_payload_type, int associated_payload_type) {
Codec rtx_codec = CreateVideoCodec(rtx_payload_type, kRtxCodecName);
rtx_codec.SetParam(kCodecParamAssociatedPayloadType, associated_payload_type);
return rtx_codec;
}
const Codec* FindCodecById(const std::vector<Codec>& codecs, int payload_type) {
for (const auto& codec : codecs) {
if (codec.id == payload_type)
return &codec;
}
return nullptr;
}
bool HasLntf(const Codec& codec) {
return codec.HasFeedbackParam(
FeedbackParam(kRtcpFbParamLntf, kParamValueEmpty));
}
bool HasNack(const Codec& codec) {
return codec.HasFeedbackParam(
FeedbackParam(kRtcpFbParamNack, kParamValueEmpty));
}
bool HasRemb(const Codec& codec) {
return codec.HasFeedbackParam(
FeedbackParam(kRtcpFbParamRemb, kParamValueEmpty));
}
bool HasRrtr(const Codec& codec) {
return codec.HasFeedbackParam(
FeedbackParam(kRtcpFbParamRrtr, kParamValueEmpty));
}
bool HasTransportCc(const Codec& codec) {
return codec.HasFeedbackParam(
FeedbackParam(kRtcpFbParamTransportCc, kParamValueEmpty));
}
const Codec* FindMatchingVideoCodec(const std::vector<Codec>& supported_codecs,
const Codec& codec) {
webrtc::SdpVideoFormat sdp_video_format{codec.name, codec.params};
for (const Codec& supported_codec : supported_codecs) {
if (sdp_video_format.IsSameCodec(
{supported_codec.name, supported_codec.params})) {
return &supported_codec;
}
}
return nullptr;
}
std::vector<const Codec*> FindAllMatchingCodecs(
const std::vector<Codec>& supported_codecs,
const Codec& codec) {
std::vector<const Codec*> result;
webrtc::SdpVideoFormat sdp(codec.name, codec.params);
for (const Codec& supported_codec : supported_codecs) {
if (sdp.IsSameCodec({supported_codec.name, supported_codec.params})) {
result.push_back(&supported_codec);
}
}
return result;
}
// If a decoder supports any H264 profile, it is implicitly assumed to also
// support constrained base line even though it's not explicitly listed.
void AddH264ConstrainedBaselineProfileToSupportedFormats(
std::vector<webrtc::SdpVideoFormat>* supported_formats) {
std::vector<webrtc::SdpVideoFormat> cbr_supported_formats;
// For any H264 supported profile, add the corresponding constrained baseline
// profile.
for (auto it = supported_formats->cbegin(); it != supported_formats->cend();
++it) {
if (it->name == cricket::kH264CodecName) {
const absl::optional<webrtc::H264ProfileLevelId> profile_level_id =
webrtc::ParseSdpForH264ProfileLevelId(it->parameters);
if (profile_level_id &&
profile_level_id->profile !=
webrtc::H264Profile::kProfileConstrainedBaseline) {
webrtc::SdpVideoFormat cbp_format = *it;
webrtc::H264ProfileLevelId cbp_profile = *profile_level_id;
cbp_profile.profile = webrtc::H264Profile::kProfileConstrainedBaseline;
cbp_format.parameters[cricket::kH264FmtpProfileLevelId] =
*webrtc::H264ProfileLevelIdToString(cbp_profile);
cbr_supported_formats.push_back(cbp_format);
}
}
}
size_t original_size = supported_formats->size();
// ...if it's not already in the list.
std::copy_if(cbr_supported_formats.begin(), cbr_supported_formats.end(),
std::back_inserter(*supported_formats),
[supported_formats](const webrtc::SdpVideoFormat& format) {
return !format.IsCodecInList(*supported_formats);
});
if (supported_formats->size() > original_size) {
RTC_LOG(LS_WARNING) << "Explicitly added H264 constrained baseline to list "
"of supported formats.";
}
}
Codec CreateAudioCodec(int id,
const std::string& name,
int clockrate,
size_t channels) {
return Codec(Codec::Type::kAudio, id, name, clockrate, channels);
}
Codec CreateAudioCodec(const webrtc::SdpAudioFormat& c) {
return Codec(c);
}
Codec CreateVideoCodec(const std::string& name) {
return CreateVideoCodec(0, name);
}
Codec CreateVideoCodec(int id, const std::string& name) {
Codec c(Codec::Type::kVideo, id, name, kVideoCodecClockrate);
if (absl::EqualsIgnoreCase(kH264CodecName, name)) {
// This default is set for all H.264 codecs created because
// that was the default before packetization mode support was added.
// TODO(hta): Move this to the places that create VideoCodecs from
// SDP or from knowledge of implementation capabilities.
c.SetParam(kH264FmtpPacketizationMode, "1");
}
return c;
}
Codec CreateVideoCodec(const webrtc::SdpVideoFormat& c) {
return Codec(c);
}
} // namespace cricket

View file

@ -0,0 +1,230 @@
/*
* Copyright (c) 2004 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_BASE_CODEC_H_
#define MEDIA_BASE_CODEC_H_
#include <map>
#include <set>
#include <string>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "api/audio_codecs/audio_format.h"
#include "api/rtp_parameters.h"
#include "api/video_codecs/sdp_video_format.h"
#include "media/base/media_constants.h"
#include "rtc_base/system/rtc_export.h"
namespace cricket {
class FeedbackParam {
public:
FeedbackParam() = default;
FeedbackParam(absl::string_view id, const std::string& param)
: id_(id), param_(param) {}
explicit FeedbackParam(absl::string_view id)
: id_(id), param_(kParamValueEmpty) {}
bool operator==(const FeedbackParam& other) const;
bool operator!=(const FeedbackParam& c) const { return !(*this == c); }
const std::string& id() const { return id_; }
const std::string& param() const { return param_; }
private:
std::string id_; // e.g. "nack", "ccm"
std::string param_; // e.g. "", "rpsi", "fir"
};
class FeedbackParams {
public:
FeedbackParams();
~FeedbackParams();
bool operator==(const FeedbackParams& other) const;
bool operator!=(const FeedbackParams& c) const { return !(*this == c); }
bool Has(const FeedbackParam& param) const;
void Add(const FeedbackParam& param);
void Intersect(const FeedbackParams& from);
const std::vector<FeedbackParam>& params() const { return params_; }
private:
bool HasDuplicateEntries() const;
std::vector<FeedbackParam> params_;
};
struct RTC_EXPORT Codec {
enum class Type {
kAudio,
kVideo,
};
enum class ResiliencyType {
kNone,
kRed,
kUlpfec,
kFlexfec,
kRtx,
};
Type type;
int id;
std::string name;
int clockrate;
// Audio only
// Can be used to override the target bitrate in the encoder.
// TODO(orphis): Remove in favor of alternative APIs
int bitrate;
size_t channels;
// Video only
absl::optional<std::string> packetization;
absl::InlinedVector<webrtc::ScalabilityMode, webrtc::kScalabilityModeCount>
scalability_modes;
// H.265 only
absl::optional<std::string> tx_mode;
// Non key-value parameters such as the telephone-event "015" are
// represented using an empty string as key, i.e. {"": "0-15"}.
webrtc::CodecParameterMap params;
FeedbackParams feedback_params;
Codec(const Codec& c);
Codec(Codec&& c);
virtual ~Codec();
// Indicates if this codec is compatible with the specified codec by
// checking the assigned id and profile values for the relevant video codecs.
// For H.264, packetization modes will be compared; If H.265 is enabled,
// TxModes will be compared.
// H.264(and H.265, if enabled) levels are not compared.
bool Matches(const Codec& codec) const;
bool MatchesRtpCodec(const webrtc::RtpCodec& capability) const;
// Find the parameter for `name` and write the value to `out`.
bool GetParam(const std::string& name, std::string* out) const;
bool GetParam(const std::string& name, int* out) const;
void SetParam(const std::string& name, const std::string& value);
void SetParam(const std::string& name, int value);
// It is safe to input a non-existent parameter.
// Returns true if the parameter existed, false if it did not exist.
bool RemoveParam(const std::string& name);
bool HasFeedbackParam(const FeedbackParam& param) const;
void AddFeedbackParam(const FeedbackParam& param);
// Filter `this` feedbacks params such that only those shared by both `this`
// and `other` are kept.
void IntersectFeedbackParams(const Codec& other);
virtual webrtc::RtpCodecParameters ToCodecParameters() const;
// The codec represent an actual media codec, and not a resiliency codec.
bool IsMediaCodec() const;
// The codec represent a resiliency codec such as RED, RTX or FEC variants.
bool IsResiliencyCodec() const;
ResiliencyType GetResiliencyType() const;
// Validates a VideoCodec's payload type, dimensions and bitrates etc. If they
// don't make sense (such as max < min bitrate), and error is logged and
// ValidateCodecFormat returns false.
bool ValidateCodecFormat() const;
std::string ToString() const;
Codec& operator=(const Codec& c);
Codec& operator=(Codec&& c);
bool operator==(const Codec& c) const;
bool operator!=(const Codec& c) const { return !(*this == c); }
protected:
// Creates an empty codec.
explicit Codec(Type type);
// Creates a codec with the given parameters.
Codec(Type type, int id, const std::string& name, int clockrate);
Codec(Type type,
int id,
const std::string& name,
int clockrate,
size_t channels);
explicit Codec(const webrtc::SdpAudioFormat& c);
explicit Codec(const webrtc::SdpVideoFormat& c);
friend Codec CreateAudioCodec(int id,
const std::string& name,
int clockrate,
size_t channels);
friend Codec CreateAudioCodec(const webrtc::SdpAudioFormat& c);
friend Codec CreateAudioRtxCodec(int rtx_payload_type,
int associated_payload_type);
friend Codec CreateVideoCodec(int id, const std::string& name);
friend Codec CreateVideoCodec(const webrtc::SdpVideoFormat& c);
friend Codec CreateVideoRtxCodec(int rtx_payload_type,
int associated_payload_type);
};
// TODO(webrtc:15214): Compatibility names, to be migrated away and removed.
using VideoCodec = Codec;
using AudioCodec = Codec;
using VideoCodecs = std::vector<Codec>;
using AudioCodecs = std::vector<Codec>;
Codec CreateAudioCodec(int id,
const std::string& name,
int clockrate,
size_t channels);
Codec CreateAudioCodec(const webrtc::SdpAudioFormat& c);
Codec CreateAudioRtxCodec(int rtx_payload_type, int associated_payload_type);
Codec CreateVideoCodec(const std::string& name);
Codec CreateVideoCodec(int id, const std::string& name);
Codec CreateVideoCodec(const webrtc::SdpVideoFormat& c);
Codec CreateVideoRtxCodec(int rtx_payload_type, int associated_payload_type);
// Get the codec setting associated with `payload_type`. If there
// is no codec associated with that payload type it returns nullptr.
const Codec* FindCodecById(const std::vector<Codec>& codecs, int payload_type);
bool HasLntf(const Codec& codec);
bool HasNack(const Codec& codec);
bool HasRemb(const Codec& codec);
bool HasRrtr(const Codec& codec);
bool HasTransportCc(const Codec& codec);
// Returns the first codec in `supported_codecs` that matches `codec`, or
// nullptr if no codec matches.
const Codec* FindMatchingVideoCodec(const std::vector<Codec>& supported_codecs,
const Codec& codec);
// Returns all codecs in `supported_codecs` that matches `codec`.
std::vector<const Codec*> FindAllMatchingCodecs(
const std::vector<Codec>& supported_codecs,
const Codec& codec);
RTC_EXPORT void AddH264ConstrainedBaselineProfileToSupportedFormats(
std::vector<webrtc::SdpVideoFormat>* supported_formats);
} // namespace cricket
#endif // MEDIA_BASE_CODEC_H_

View file

@ -0,0 +1,88 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/base/fake_frame_source.h"
#include "api/scoped_refptr.h"
#include "api/video/i420_buffer.h"
#include "api/video/video_frame_buffer.h"
#include "rtc_base/checks.h"
#include "rtc_base/time_utils.h"
namespace cricket {
FakeFrameSource::FakeFrameSource(int width,
int height,
int interval_us,
int64_t timestamp_offset_us)
: width_(width),
height_(height),
interval_us_(interval_us),
next_timestamp_us_(timestamp_offset_us) {
RTC_CHECK_GT(width_, 0);
RTC_CHECK_GT(height_, 0);
RTC_CHECK_GT(interval_us_, 0);
RTC_CHECK_GE(next_timestamp_us_, 0);
}
FakeFrameSource::FakeFrameSource(int width, int height, int interval_us)
: FakeFrameSource(width, height, interval_us, rtc::TimeMicros()) {}
webrtc::VideoRotation FakeFrameSource::GetRotation() const {
return rotation_;
}
void FakeFrameSource::SetRotation(webrtc::VideoRotation rotation) {
rotation_ = rotation;
}
webrtc::VideoFrame FakeFrameSource::GetFrameRotationApplied() {
switch (rotation_) {
case webrtc::kVideoRotation_0:
case webrtc::kVideoRotation_180:
return GetFrame(width_, height_, webrtc::kVideoRotation_0, interval_us_);
case webrtc::kVideoRotation_90:
case webrtc::kVideoRotation_270:
return GetFrame(height_, width_, webrtc::kVideoRotation_0, interval_us_);
}
RTC_DCHECK_NOTREACHED() << "Invalid rotation value: "
<< static_cast<int>(rotation_);
// Without this return, the Windows Visual Studio compiler complains
// "not all control paths return a value".
return GetFrame();
}
webrtc::VideoFrame FakeFrameSource::GetFrame() {
return GetFrame(width_, height_, rotation_, interval_us_);
}
webrtc::VideoFrame FakeFrameSource::GetFrame(int width,
int height,
webrtc::VideoRotation rotation,
int interval_us) {
RTC_CHECK_GT(width, 0);
RTC_CHECK_GT(height, 0);
RTC_CHECK_GT(interval_us, 0);
rtc::scoped_refptr<webrtc::I420Buffer> buffer(
webrtc::I420Buffer::Create(width, height));
buffer->InitializeData();
webrtc::VideoFrame frame = webrtc::VideoFrame::Builder()
.set_video_frame_buffer(buffer)
.set_rotation(rotation)
.set_timestamp_us(next_timestamp_us_)
.build();
next_timestamp_us_ += interval_us;
return frame;
}
} // namespace cricket

View file

@ -0,0 +1,50 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_BASE_FAKE_FRAME_SOURCE_H_
#define MEDIA_BASE_FAKE_FRAME_SOURCE_H_
#include "api/video/video_frame.h"
#include "rtc_base/time_utils.h"
namespace cricket {
class FakeFrameSource {
public:
FakeFrameSource(int width,
int height,
int interval_us,
int64_t timestamp_offset_us);
FakeFrameSource(int width, int height, int interval_us);
webrtc::VideoRotation GetRotation() const;
void SetRotation(webrtc::VideoRotation rotation);
webrtc::VideoFrame GetFrame();
webrtc::VideoFrame GetFrameRotationApplied();
// Override configuration.
webrtc::VideoFrame GetFrame(int width,
int height,
webrtc::VideoRotation rotation,
int interval_us);
private:
const int width_;
const int height_;
const int interval_us_;
webrtc::VideoRotation rotation_ = webrtc::kVideoRotation_0;
int64_t next_timestamp_us_;
};
} // namespace cricket
#endif // MEDIA_BASE_FAKE_FRAME_SOURCE_H_

View file

@ -0,0 +1,705 @@
/*
* Copyright 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/base/fake_media_engine.h"
#include <memory>
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/strings/match.h"
#include "absl/types/optional.h"
#include "media/base/media_channel.h"
#include "rtc_base/checks.h"
namespace cricket {
using webrtc::TaskQueueBase;
FakeVoiceMediaReceiveChannel::DtmfInfo::DtmfInfo(uint32_t ssrc,
int event_code,
int duration)
: ssrc(ssrc), event_code(event_code), duration(duration) {}
FakeVoiceMediaReceiveChannel::VoiceChannelAudioSink::VoiceChannelAudioSink(
AudioSource* source)
: source_(source) {
source_->SetSink(this);
}
FakeVoiceMediaReceiveChannel::VoiceChannelAudioSink::~VoiceChannelAudioSink() {
if (source_) {
source_->SetSink(nullptr);
}
}
void FakeVoiceMediaReceiveChannel::VoiceChannelAudioSink::OnData(
const void* audio_data,
int bits_per_sample,
int sample_rate,
size_t number_of_channels,
size_t number_of_frames,
absl::optional<int64_t> absolute_capture_timestamp_ms) {}
void FakeVoiceMediaReceiveChannel::VoiceChannelAudioSink::OnClose() {
source_ = nullptr;
}
AudioSource* FakeVoiceMediaReceiveChannel::VoiceChannelAudioSink::source()
const {
return source_;
}
FakeVoiceMediaReceiveChannel::FakeVoiceMediaReceiveChannel(
const AudioOptions& options,
TaskQueueBase* network_thread)
: RtpReceiveChannelHelper<VoiceMediaReceiveChannelInterface>(
network_thread),
max_bps_(-1) {
output_scalings_[0] = 1.0; // For default channel.
SetOptions(options);
}
FakeVoiceMediaReceiveChannel::~FakeVoiceMediaReceiveChannel() = default;
const std::vector<AudioCodec>& FakeVoiceMediaReceiveChannel::recv_codecs()
const {
return recv_codecs_;
}
const std::vector<FakeVoiceMediaReceiveChannel::DtmfInfo>&
FakeVoiceMediaReceiveChannel::dtmf_info_queue() const {
return dtmf_info_queue_;
}
const AudioOptions& FakeVoiceMediaReceiveChannel::options() const {
return options_;
}
int FakeVoiceMediaReceiveChannel::max_bps() const {
return max_bps_;
}
bool FakeVoiceMediaReceiveChannel::SetReceiverParameters(
const AudioReceiverParameters& params) {
set_recv_rtcp_parameters(params.rtcp);
return (SetRecvCodecs(params.codecs) &&
SetRecvRtpHeaderExtensions(params.extensions));
}
void FakeVoiceMediaReceiveChannel::SetPlayout(bool playout) {
set_playout(playout);
}
bool FakeVoiceMediaReceiveChannel::HasSource(uint32_t ssrc) const {
return local_sinks_.find(ssrc) != local_sinks_.end();
}
bool FakeVoiceMediaReceiveChannel::AddRecvStream(const StreamParams& sp) {
if (!RtpReceiveChannelHelper<
VoiceMediaReceiveChannelInterface>::AddRecvStream(sp))
return false;
output_scalings_[sp.first_ssrc()] = 1.0;
output_delays_[sp.first_ssrc()] = 0;
return true;
}
bool FakeVoiceMediaReceiveChannel::RemoveRecvStream(uint32_t ssrc) {
if (!RtpReceiveChannelHelper<
VoiceMediaReceiveChannelInterface>::RemoveRecvStream(ssrc))
return false;
output_scalings_.erase(ssrc);
output_delays_.erase(ssrc);
return true;
}
bool FakeVoiceMediaReceiveChannel::SetOutputVolume(uint32_t ssrc,
double volume) {
if (output_scalings_.find(ssrc) != output_scalings_.end()) {
output_scalings_[ssrc] = volume;
return true;
}
return false;
}
bool FakeVoiceMediaReceiveChannel::SetDefaultOutputVolume(double volume) {
for (auto& entry : output_scalings_) {
entry.second = volume;
}
return true;
}
bool FakeVoiceMediaReceiveChannel::GetOutputVolume(uint32_t ssrc,
double* volume) {
if (output_scalings_.find(ssrc) == output_scalings_.end())
return false;
*volume = output_scalings_[ssrc];
return true;
}
bool FakeVoiceMediaReceiveChannel::SetBaseMinimumPlayoutDelayMs(uint32_t ssrc,
int delay_ms) {
if (output_delays_.find(ssrc) == output_delays_.end()) {
return false;
} else {
output_delays_[ssrc] = delay_ms;
return true;
}
}
absl::optional<int> FakeVoiceMediaReceiveChannel::GetBaseMinimumPlayoutDelayMs(
uint32_t ssrc) const {
const auto it = output_delays_.find(ssrc);
if (it != output_delays_.end()) {
return it->second;
}
return absl::nullopt;
}
bool FakeVoiceMediaReceiveChannel::GetStats(VoiceMediaReceiveInfo* info,
bool get_and_clear_legacy_stats) {
return false;
}
void FakeVoiceMediaReceiveChannel::SetRawAudioSink(
uint32_t ssrc,
std::unique_ptr<webrtc::AudioSinkInterface> sink) {
sink_ = std::move(sink);
}
void FakeVoiceMediaReceiveChannel::SetDefaultRawAudioSink(
std::unique_ptr<webrtc::AudioSinkInterface> sink) {
sink_ = std::move(sink);
}
std::vector<webrtc::RtpSource> FakeVoiceMediaReceiveChannel::GetSources(
uint32_t ssrc) const {
return std::vector<webrtc::RtpSource>();
}
bool FakeVoiceMediaReceiveChannel::SetRecvCodecs(
const std::vector<AudioCodec>& codecs) {
if (fail_set_recv_codecs()) {
// Fake the failure in SetRecvCodecs.
return false;
}
recv_codecs_ = codecs;
return true;
}
bool FakeVoiceMediaReceiveChannel::SetMaxSendBandwidth(int bps) {
max_bps_ = bps;
return true;
}
bool FakeVoiceMediaReceiveChannel::SetOptions(const AudioOptions& options) {
// Does a "merge" of current options and set options.
options_.SetAll(options);
return true;
}
FakeVoiceMediaSendChannel::DtmfInfo::DtmfInfo(uint32_t ssrc,
int event_code,
int duration)
: ssrc(ssrc), event_code(event_code), duration(duration) {}
FakeVoiceMediaSendChannel::VoiceChannelAudioSink::VoiceChannelAudioSink(
AudioSource* source)
: source_(source) {
source_->SetSink(this);
}
FakeVoiceMediaSendChannel::VoiceChannelAudioSink::~VoiceChannelAudioSink() {
if (source_) {
source_->SetSink(nullptr);
}
}
void FakeVoiceMediaSendChannel::VoiceChannelAudioSink::OnData(
const void* audio_data,
int bits_per_sample,
int sample_rate,
size_t number_of_channels,
size_t number_of_frames,
absl::optional<int64_t> absolute_capture_timestamp_ms) {}
void FakeVoiceMediaSendChannel::VoiceChannelAudioSink::OnClose() {
source_ = nullptr;
}
AudioSource* FakeVoiceMediaSendChannel::VoiceChannelAudioSink::source() const {
return source_;
}
FakeVoiceMediaSendChannel::FakeVoiceMediaSendChannel(
const AudioOptions& options,
TaskQueueBase* network_thread)
: RtpSendChannelHelper<VoiceMediaSendChannelInterface>(network_thread),
max_bps_(-1) {
output_scalings_[0] = 1.0; // For default channel.
SetOptions(options);
}
FakeVoiceMediaSendChannel::~FakeVoiceMediaSendChannel() = default;
const std::vector<AudioCodec>& FakeVoiceMediaSendChannel::send_codecs() const {
return send_codecs_;
}
absl::optional<Codec> FakeVoiceMediaSendChannel::GetSendCodec() const {
if (!send_codecs_.empty()) {
return send_codecs_.front();
}
return absl::nullopt;
}
const std::vector<FakeVoiceMediaSendChannel::DtmfInfo>&
FakeVoiceMediaSendChannel::dtmf_info_queue() const {
return dtmf_info_queue_;
}
const AudioOptions& FakeVoiceMediaSendChannel::options() const {
return options_;
}
int FakeVoiceMediaSendChannel::max_bps() const {
return max_bps_;
}
bool FakeVoiceMediaSendChannel::SetSenderParameters(
const AudioSenderParameter& params) {
set_send_rtcp_parameters(params.rtcp);
SetExtmapAllowMixed(params.extmap_allow_mixed);
return (SetSendCodecs(params.codecs) &&
SetSendRtpHeaderExtensions(params.extensions) &&
SetMaxSendBandwidth(params.max_bandwidth_bps) &&
SetOptions(params.options));
}
void FakeVoiceMediaSendChannel::SetSend(bool send) {
set_sending(send);
}
bool FakeVoiceMediaSendChannel::SetAudioSend(uint32_t ssrc,
bool enable,
const AudioOptions* options,
AudioSource* source) {
if (!SetLocalSource(ssrc, source)) {
return false;
}
if (!RtpSendChannelHelper<VoiceMediaSendChannelInterface>::MuteStream(
ssrc, !enable)) {
return false;
}
if (enable && options) {
return SetOptions(*options);
}
return true;
}
bool FakeVoiceMediaSendChannel::HasSource(uint32_t ssrc) const {
return local_sinks_.find(ssrc) != local_sinks_.end();
}
bool FakeVoiceMediaSendChannel::CanInsertDtmf() {
for (std::vector<AudioCodec>::const_iterator it = send_codecs_.begin();
it != send_codecs_.end(); ++it) {
// Find the DTMF telephone event "codec".
if (absl::EqualsIgnoreCase(it->name, "telephone-event")) {
return true;
}
}
return false;
}
bool FakeVoiceMediaSendChannel::InsertDtmf(uint32_t ssrc,
int event_code,
int duration) {
dtmf_info_queue_.push_back(DtmfInfo(ssrc, event_code, duration));
return true;
}
bool FakeVoiceMediaSendChannel::GetOutputVolume(uint32_t ssrc, double* volume) {
if (output_scalings_.find(ssrc) == output_scalings_.end())
return false;
*volume = output_scalings_[ssrc];
return true;
}
bool FakeVoiceMediaSendChannel::GetStats(VoiceMediaSendInfo* info) {
return false;
}
bool FakeVoiceMediaSendChannel::SetSendCodecs(
const std::vector<AudioCodec>& codecs) {
if (fail_set_send_codecs()) {
// Fake the failure in SetSendCodecs.
return false;
}
send_codecs_ = codecs;
return true;
}
bool FakeVoiceMediaSendChannel::SetMaxSendBandwidth(int bps) {
max_bps_ = bps;
return true;
}
bool FakeVoiceMediaSendChannel::SetOptions(const AudioOptions& options) {
// Does a "merge" of current options and set options.
options_.SetAll(options);
return true;
}
bool FakeVoiceMediaSendChannel::SetLocalSource(uint32_t ssrc,
AudioSource* source) {
auto it = local_sinks_.find(ssrc);
if (source) {
if (it != local_sinks_.end()) {
RTC_CHECK(it->second->source() == source);
} else {
local_sinks_.insert(std::make_pair(
ssrc, std::make_unique<VoiceChannelAudioSink>(source)));
}
} else {
if (it != local_sinks_.end()) {
local_sinks_.erase(it);
}
}
return true;
}
bool CompareDtmfInfo(const FakeVoiceMediaSendChannel::DtmfInfo& info,
uint32_t ssrc,
int event_code,
int duration) {
return (info.duration == duration && info.event_code == event_code &&
info.ssrc == ssrc);
}
FakeVideoMediaSendChannel::FakeVideoMediaSendChannel(
const VideoOptions& options,
TaskQueueBase* network_thread)
: RtpSendChannelHelper<VideoMediaSendChannelInterface>(network_thread),
max_bps_(-1) {
SetOptions(options);
}
FakeVideoMediaSendChannel::~FakeVideoMediaSendChannel() = default;
const std::vector<VideoCodec>& FakeVideoMediaSendChannel::send_codecs() const {
return send_codecs_;
}
const std::vector<VideoCodec>& FakeVideoMediaSendChannel::codecs() const {
return send_codecs();
}
const VideoOptions& FakeVideoMediaSendChannel::options() const {
return options_;
}
int FakeVideoMediaSendChannel::max_bps() const {
return max_bps_;
}
bool FakeVideoMediaSendChannel::SetSenderParameters(
const VideoSenderParameters& params) {
set_send_rtcp_parameters(params.rtcp);
SetExtmapAllowMixed(params.extmap_allow_mixed);
return (SetSendCodecs(params.codecs) &&
SetSendRtpHeaderExtensions(params.extensions) &&
SetMaxSendBandwidth(params.max_bandwidth_bps));
}
absl::optional<Codec> FakeVideoMediaSendChannel::GetSendCodec() const {
if (send_codecs_.empty()) {
return absl::nullopt;
}
return send_codecs_[0];
}
bool FakeVideoMediaSendChannel::SetSend(bool send) {
return set_sending(send);
}
bool FakeVideoMediaSendChannel::SetVideoSend(
uint32_t ssrc,
const VideoOptions* options,
rtc::VideoSourceInterface<webrtc::VideoFrame>* source) {
if (options) {
if (!SetOptions(*options)) {
return false;
}
}
sources_[ssrc] = source;
return true;
}
bool FakeVideoMediaSendChannel::HasSource(uint32_t ssrc) const {
return sources_.find(ssrc) != sources_.end() && sources_.at(ssrc) != nullptr;
}
void FakeVideoMediaSendChannel::FillBitrateInfo(
BandwidthEstimationInfo* bwe_info) {}
bool FakeVideoMediaSendChannel::GetStats(VideoMediaSendInfo* info) {
return false;
}
bool FakeVideoMediaSendChannel::SetSendCodecs(
const std::vector<VideoCodec>& codecs) {
if (fail_set_send_codecs()) {
// Fake the failure in SetSendCodecs.
return false;
}
send_codecs_ = codecs;
return true;
}
bool FakeVideoMediaSendChannel::SetOptions(const VideoOptions& options) {
options_ = options;
return true;
}
bool FakeVideoMediaSendChannel::SetMaxSendBandwidth(int bps) {
max_bps_ = bps;
return true;
}
void FakeVideoMediaSendChannel::GenerateSendKeyFrame(
uint32_t ssrc,
const std::vector<std::string>& rids) {}
FakeVideoMediaReceiveChannel::FakeVideoMediaReceiveChannel(
const VideoOptions& options,
TaskQueueBase* network_thread)
: RtpReceiveChannelHelper<VideoMediaReceiveChannelInterface>(
network_thread),
max_bps_(-1) {
SetOptions(options);
}
FakeVideoMediaReceiveChannel::~FakeVideoMediaReceiveChannel() = default;
const std::vector<VideoCodec>& FakeVideoMediaReceiveChannel::recv_codecs()
const {
return recv_codecs_;
}
bool FakeVideoMediaReceiveChannel::rendering() const {
return playout();
}
const VideoOptions& FakeVideoMediaReceiveChannel::options() const {
return options_;
}
const std::map<uint32_t, rtc::VideoSinkInterface<webrtc::VideoFrame>*>&
FakeVideoMediaReceiveChannel::sinks() const {
return sinks_;
}
int FakeVideoMediaReceiveChannel::max_bps() const {
return max_bps_;
}
bool FakeVideoMediaReceiveChannel::SetReceiverParameters(
const VideoReceiverParameters& params) {
set_recv_rtcp_parameters(params.rtcp);
return (SetRecvCodecs(params.codecs) &&
SetRecvRtpHeaderExtensions(params.extensions));
}
bool FakeVideoMediaReceiveChannel::SetSink(
uint32_t ssrc,
rtc::VideoSinkInterface<webrtc::VideoFrame>* sink) {
auto it = sinks_.find(ssrc);
if (it == sinks_.end()) {
return false;
}
it->second = sink;
return true;
}
void FakeVideoMediaReceiveChannel::SetDefaultSink(
rtc::VideoSinkInterface<webrtc::VideoFrame>* sink) {}
bool FakeVideoMediaReceiveChannel::HasSink(uint32_t ssrc) const {
return sinks_.find(ssrc) != sinks_.end() && sinks_.at(ssrc) != nullptr;
}
bool FakeVideoMediaReceiveChannel::HasSource(uint32_t ssrc) const {
return sources_.find(ssrc) != sources_.end() && sources_.at(ssrc) != nullptr;
}
bool FakeVideoMediaReceiveChannel::AddRecvStream(const StreamParams& sp) {
if (!RtpReceiveChannelHelper<
VideoMediaReceiveChannelInterface>::AddRecvStream(sp))
return false;
sinks_[sp.first_ssrc()] = NULL;
output_delays_[sp.first_ssrc()] = 0;
return true;
}
bool FakeVideoMediaReceiveChannel::RemoveRecvStream(uint32_t ssrc) {
if (!RtpReceiveChannelHelper<
VideoMediaReceiveChannelInterface>::RemoveRecvStream(ssrc))
return false;
sinks_.erase(ssrc);
output_delays_.erase(ssrc);
return true;
}
std::vector<webrtc::RtpSource> FakeVideoMediaReceiveChannel::GetSources(
uint32_t ssrc) const {
return {};
}
bool FakeVideoMediaReceiveChannel::SetBaseMinimumPlayoutDelayMs(uint32_t ssrc,
int delay_ms) {
if (output_delays_.find(ssrc) == output_delays_.end()) {
return false;
} else {
output_delays_[ssrc] = delay_ms;
return true;
}
}
absl::optional<int> FakeVideoMediaReceiveChannel::GetBaseMinimumPlayoutDelayMs(
uint32_t ssrc) const {
const auto it = output_delays_.find(ssrc);
if (it != output_delays_.end()) {
return it->second;
}
return absl::nullopt;
}
bool FakeVideoMediaReceiveChannel::SetRecvCodecs(
const std::vector<VideoCodec>& codecs) {
if (fail_set_recv_codecs()) {
// Fake the failure in SetRecvCodecs.
return false;
}
recv_codecs_ = codecs;
return true;
}
bool FakeVideoMediaReceiveChannel::SetOptions(const VideoOptions& options) {
options_ = options;
return true;
}
bool FakeVideoMediaReceiveChannel::SetMaxSendBandwidth(int bps) {
max_bps_ = bps;
return true;
}
void FakeVideoMediaReceiveChannel::SetRecordableEncodedFrameCallback(
uint32_t ssrc,
std::function<void(const webrtc::RecordableEncodedFrame&)> callback) {}
void FakeVideoMediaReceiveChannel::ClearRecordableEncodedFrameCallback(
uint32_t ssrc) {}
void FakeVideoMediaReceiveChannel::RequestRecvKeyFrame(uint32_t ssrc) {}
bool FakeVideoMediaReceiveChannel::GetStats(VideoMediaReceiveInfo* info) {
return false;
}
FakeVoiceEngine::FakeVoiceEngine() : fail_create_channel_(false) {
// Add a fake audio codec. Note that the name must not be "" as there are
// sanity checks against that.
SetCodecs({cricket::CreateAudioCodec(101, "fake_audio_codec", 8000, 1)});
}
void FakeVoiceEngine::Init() {}
rtc::scoped_refptr<webrtc::AudioState> FakeVoiceEngine::GetAudioState() const {
return rtc::scoped_refptr<webrtc::AudioState>();
}
std::unique_ptr<VoiceMediaSendChannelInterface>
FakeVoiceEngine::CreateSendChannel(webrtc::Call* call,
const MediaConfig& config,
const AudioOptions& options,
const webrtc::CryptoOptions& crypto_options,
webrtc::AudioCodecPairId codec_pair_id) {
std::unique_ptr<FakeVoiceMediaSendChannel> ch =
std::make_unique<FakeVoiceMediaSendChannel>(options,
call->network_thread());
return ch;
}
std::unique_ptr<VoiceMediaReceiveChannelInterface>
FakeVoiceEngine::CreateReceiveChannel(
webrtc::Call* call,
const MediaConfig& config,
const AudioOptions& options,
const webrtc::CryptoOptions& crypto_options,
webrtc::AudioCodecPairId codec_pair_id) {
std::unique_ptr<FakeVoiceMediaReceiveChannel> ch =
std::make_unique<FakeVoiceMediaReceiveChannel>(options,
call->network_thread());
return ch;
}
const std::vector<AudioCodec>& FakeVoiceEngine::send_codecs() const {
return send_codecs_;
}
const std::vector<AudioCodec>& FakeVoiceEngine::recv_codecs() const {
return recv_codecs_;
}
void FakeVoiceEngine::SetCodecs(const std::vector<AudioCodec>& codecs) {
send_codecs_ = codecs;
recv_codecs_ = codecs;
}
void FakeVoiceEngine::SetRecvCodecs(const std::vector<AudioCodec>& codecs) {
recv_codecs_ = codecs;
}
void FakeVoiceEngine::SetSendCodecs(const std::vector<AudioCodec>& codecs) {
send_codecs_ = codecs;
}
int FakeVoiceEngine::GetInputLevel() {
return 0;
}
bool FakeVoiceEngine::StartAecDump(webrtc::FileWrapper file,
int64_t max_size_bytes) {
return false;
}
absl::optional<webrtc::AudioDeviceModule::Stats>
FakeVoiceEngine::GetAudioDeviceStats() {
return absl::nullopt;
}
void FakeVoiceEngine::StopAecDump() {}
std::vector<webrtc::RtpHeaderExtensionCapability>
FakeVoiceEngine::GetRtpHeaderExtensions() const {
return header_extensions_;
}
void FakeVoiceEngine::SetRtpHeaderExtensions(
std::vector<webrtc::RtpHeaderExtensionCapability> header_extensions) {
header_extensions_ = std::move(header_extensions);
}
FakeVideoEngine::FakeVideoEngine()
: capture_(false), fail_create_channel_(false) {
// Add a fake video codec. Note that the name must not be "" as there are
// sanity checks against that.
send_codecs_.push_back(cricket::CreateVideoCodec(111, "fake_video_codec"));
recv_codecs_.push_back(cricket::CreateVideoCodec(111, "fake_video_codec"));
}
bool FakeVideoEngine::SetOptions(const VideoOptions& options) {
options_ = options;
return true;
}
std::unique_ptr<VideoMediaSendChannelInterface>
FakeVideoEngine::CreateSendChannel(
webrtc::Call* call,
const MediaConfig& config,
const VideoOptions& options,
const webrtc::CryptoOptions& crypto_options,
webrtc::VideoBitrateAllocatorFactory* video_bitrate_allocator_factory) {
if (fail_create_channel_) {
return nullptr;
}
std::unique_ptr<FakeVideoMediaSendChannel> ch =
std::make_unique<FakeVideoMediaSendChannel>(options,
call->network_thread());
return ch;
}
std::unique_ptr<VideoMediaReceiveChannelInterface>
FakeVideoEngine::CreateReceiveChannel(
webrtc::Call* call,
const MediaConfig& config,
const VideoOptions& options,
const webrtc::CryptoOptions& crypto_options) {
if (fail_create_channel_) {
return nullptr;
}
std::unique_ptr<FakeVideoMediaReceiveChannel> ch =
std::make_unique<FakeVideoMediaReceiveChannel>(options,
call->network_thread());
return ch;
}
std::vector<VideoCodec> FakeVideoEngine::send_codecs(bool use_rtx) const {
return send_codecs_;
}
std::vector<VideoCodec> FakeVideoEngine::recv_codecs(bool use_rtx) const {
return recv_codecs_;
}
void FakeVideoEngine::SetSendCodecs(const std::vector<VideoCodec>& codecs) {
send_codecs_ = codecs;
}
void FakeVideoEngine::SetRecvCodecs(const std::vector<VideoCodec>& codecs) {
recv_codecs_ = codecs;
}
bool FakeVideoEngine::SetCapture(bool capture) {
capture_ = capture;
return true;
}
std::vector<webrtc::RtpHeaderExtensionCapability>
FakeVideoEngine::GetRtpHeaderExtensions() const {
return header_extensions_;
}
void FakeVideoEngine::SetRtpHeaderExtensions(
std::vector<webrtc::RtpHeaderExtensionCapability> header_extensions) {
header_extensions_ = std::move(header_extensions);
}
FakeMediaEngine::FakeMediaEngine()
: CompositeMediaEngine(std::make_unique<FakeVoiceEngine>(),
std::make_unique<FakeVideoEngine>()),
voice_(static_cast<FakeVoiceEngine*>(&voice())),
video_(static_cast<FakeVideoEngine*>(&video())) {}
FakeMediaEngine::~FakeMediaEngine() {}
void FakeMediaEngine::SetAudioCodecs(const std::vector<AudioCodec>& codecs) {
voice_->SetCodecs(codecs);
}
void FakeMediaEngine::SetAudioRecvCodecs(
const std::vector<AudioCodec>& codecs) {
voice_->SetRecvCodecs(codecs);
}
void FakeMediaEngine::SetAudioSendCodecs(
const std::vector<AudioCodec>& codecs) {
voice_->SetSendCodecs(codecs);
}
void FakeMediaEngine::SetVideoCodecs(const std::vector<VideoCodec>& codecs) {
video_->SetSendCodecs(codecs);
video_->SetRecvCodecs(codecs);
}
void FakeMediaEngine::set_fail_create_channel(bool fail) {
voice_->fail_create_channel_ = fail;
video_->fail_create_channel_ = fail;
}
} // namespace cricket

View file

@ -0,0 +1,876 @@
/*
* Copyright (c) 2004 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_BASE_FAKE_MEDIA_ENGINE_H_
#define MEDIA_BASE_FAKE_MEDIA_ENGINE_H_
#include <atomic>
#include <list>
#include <map>
#include <memory>
#include <set>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/functional/any_invocable.h"
#include "api/call/audio_sink.h"
#include "api/media_types.h"
#include "media/base/audio_source.h"
#include "media/base/media_channel.h"
#include "media/base/media_channel_impl.h"
#include "media/base/media_engine.h"
#include "media/base/rtp_utils.h"
#include "media/base/stream_params.h"
#include "media/engine/webrtc_video_engine.h"
#include "modules/audio_processing/include/audio_processing.h"
#include "modules/rtp_rtcp/source/rtp_packet_received.h"
#include "rtc_base/copy_on_write_buffer.h"
#include "rtc_base/network_route.h"
#include "rtc_base/thread.h"
using webrtc::RtpExtension;
namespace cricket {
class FakeMediaEngine;
class FakeVideoEngine;
class FakeVoiceEngine;
// A common helper class that handles sending and receiving RTP/RTCP packets.
template <class Base>
class RtpReceiveChannelHelper : public Base, public MediaChannelUtil {
public:
explicit RtpReceiveChannelHelper(webrtc::TaskQueueBase* network_thread)
: MediaChannelUtil(network_thread),
playout_(false),
fail_set_recv_codecs_(false),
transport_overhead_per_packet_(0),
num_network_route_changes_(0) {}
virtual ~RtpReceiveChannelHelper() = default;
const std::vector<RtpExtension>& recv_extensions() {
return recv_extensions_;
}
bool playout() const { return playout_; }
const std::list<std::string>& rtp_packets() const { return rtp_packets_; }
const std::list<std::string>& rtcp_packets() const { return rtcp_packets_; }
bool SendRtcp(const void* data, size_t len) {
rtc::CopyOnWriteBuffer packet(reinterpret_cast<const uint8_t*>(data), len,
kMaxRtpPacketLen);
return Base::SendRtcp(&packet, rtc::PacketOptions());
}
bool CheckRtp(const void* data, size_t len) {
bool success = !rtp_packets_.empty();
if (success) {
std::string packet = rtp_packets_.front();
rtp_packets_.pop_front();
success = (packet == std::string(static_cast<const char*>(data), len));
}
return success;
}
bool CheckRtcp(const void* data, size_t len) {
bool success = !rtcp_packets_.empty();
if (success) {
std::string packet = rtcp_packets_.front();
rtcp_packets_.pop_front();
success = (packet == std::string(static_cast<const char*>(data), len));
}
return success;
}
bool CheckNoRtp() { return rtp_packets_.empty(); }
bool CheckNoRtcp() { return rtcp_packets_.empty(); }
void set_fail_set_recv_codecs(bool fail) { fail_set_recv_codecs_ = fail; }
void ResetUnsignaledRecvStream() override {}
absl::optional<uint32_t> GetUnsignaledSsrc() const override {
return absl::nullopt;
}
void ChooseReceiverReportSsrc(const std::set<uint32_t>& choices) override {}
virtual bool SetLocalSsrc(const StreamParams& sp) { return true; }
void OnDemuxerCriteriaUpdatePending() override {}
void OnDemuxerCriteriaUpdateComplete() override {}
bool AddRecvStream(const StreamParams& sp) override {
if (absl::c_linear_search(receive_streams_, sp)) {
return false;
}
receive_streams_.push_back(sp);
rtp_receive_parameters_[sp.first_ssrc()] =
CreateRtpParametersWithEncodings(sp);
return true;
}
bool RemoveRecvStream(uint32_t ssrc) override {
auto parameters_iterator = rtp_receive_parameters_.find(ssrc);
if (parameters_iterator != rtp_receive_parameters_.end()) {
rtp_receive_parameters_.erase(parameters_iterator);
}
return RemoveStreamBySsrc(&receive_streams_, ssrc);
}
webrtc::RtpParameters GetRtpReceiverParameters(uint32_t ssrc) const override {
auto parameters_iterator = rtp_receive_parameters_.find(ssrc);
if (parameters_iterator != rtp_receive_parameters_.end()) {
return parameters_iterator->second;
}
return webrtc::RtpParameters();
}
webrtc::RtpParameters GetDefaultRtpReceiveParameters() const override {
return webrtc::RtpParameters();
}
const std::vector<StreamParams>& recv_streams() const {
return receive_streams_;
}
bool HasRecvStream(uint32_t ssrc) const {
return GetStreamBySsrc(receive_streams_, ssrc) != nullptr;
}
const RtcpParameters& recv_rtcp_parameters() { return recv_rtcp_parameters_; }
int transport_overhead_per_packet() const {
return transport_overhead_per_packet_;
}
rtc::NetworkRoute last_network_route() const { return last_network_route_; }
int num_network_route_changes() const { return num_network_route_changes_; }
void set_num_network_route_changes(int changes) {
num_network_route_changes_ = changes;
}
void OnRtcpPacketReceived(rtc::CopyOnWriteBuffer* packet,
int64_t packet_time_us) {
rtcp_packets_.push_back(std::string(packet->cdata<char>(), packet->size()));
}
void SetFrameDecryptor(uint32_t ssrc,
rtc::scoped_refptr<webrtc::FrameDecryptorInterface>
frame_decryptor) override {}
void SetDepacketizerToDecoderFrameTransformer(
uint32_t ssrc,
rtc::scoped_refptr<webrtc::FrameTransformerInterface> frame_transformer)
override {}
void SetInterface(MediaChannelNetworkInterface* iface) override {
network_interface_ = iface;
MediaChannelUtil::SetInterface(iface);
}
protected:
void set_playout(bool playout) { playout_ = playout; }
bool SetRecvRtpHeaderExtensions(const std::vector<RtpExtension>& extensions) {
recv_extensions_ = extensions;
return true;
}
void set_recv_rtcp_parameters(const RtcpParameters& params) {
recv_rtcp_parameters_ = params;
}
void OnPacketReceived(const webrtc::RtpPacketReceived& packet) override {
rtp_packets_.push_back(
std::string(packet.Buffer().cdata<char>(), packet.size()));
}
bool fail_set_recv_codecs() const { return fail_set_recv_codecs_; }
private:
bool playout_;
std::vector<RtpExtension> recv_extensions_;
std::list<std::string> rtp_packets_;
std::list<std::string> rtcp_packets_;
std::vector<StreamParams> receive_streams_;
RtcpParameters recv_rtcp_parameters_;
std::map<uint32_t, webrtc::RtpParameters> rtp_receive_parameters_;
bool fail_set_recv_codecs_;
std::string rtcp_cname_;
int transport_overhead_per_packet_;
rtc::NetworkRoute last_network_route_;
int num_network_route_changes_;
MediaChannelNetworkInterface* network_interface_ = nullptr;
};
// A common helper class that handles sending and receiving RTP/RTCP packets.
template <class Base>
class RtpSendChannelHelper : public Base, public MediaChannelUtil {
public:
explicit RtpSendChannelHelper(webrtc::TaskQueueBase* network_thread)
: MediaChannelUtil(network_thread),
sending_(false),
fail_set_send_codecs_(false),
send_ssrc_(0),
ready_to_send_(false),
transport_overhead_per_packet_(0),
num_network_route_changes_(0) {}
virtual ~RtpSendChannelHelper() = default;
const std::vector<RtpExtension>& send_extensions() {
return send_extensions_;
}
bool sending() const { return sending_; }
const std::list<std::string>& rtp_packets() const { return rtp_packets_; }
const std::list<std::string>& rtcp_packets() const { return rtcp_packets_; }
bool SendPacket(const void* data,
size_t len,
const rtc::PacketOptions& options) {
if (!sending_) {
return false;
}
rtc::CopyOnWriteBuffer packet(reinterpret_cast<const uint8_t*>(data), len,
kMaxRtpPacketLen);
return MediaChannelUtil::SendPacket(&packet, options);
}
bool SendRtcp(const void* data, size_t len) {
rtc::CopyOnWriteBuffer packet(reinterpret_cast<const uint8_t*>(data), len,
kMaxRtpPacketLen);
return MediaChannelUtil::SendRtcp(&packet, rtc::PacketOptions());
}
bool CheckRtp(const void* data, size_t len) {
bool success = !rtp_packets_.empty();
if (success) {
std::string packet = rtp_packets_.front();
rtp_packets_.pop_front();
success = (packet == std::string(static_cast<const char*>(data), len));
}
return success;
}
bool CheckRtcp(const void* data, size_t len) {
bool success = !rtcp_packets_.empty();
if (success) {
std::string packet = rtcp_packets_.front();
rtcp_packets_.pop_front();
success = (packet == std::string(static_cast<const char*>(data), len));
}
return success;
}
bool CheckNoRtp() { return rtp_packets_.empty(); }
bool CheckNoRtcp() { return rtcp_packets_.empty(); }
void set_fail_set_send_codecs(bool fail) { fail_set_send_codecs_ = fail; }
bool AddSendStream(const StreamParams& sp) override {
if (absl::c_linear_search(send_streams_, sp)) {
return false;
}
send_streams_.push_back(sp);
rtp_send_parameters_[sp.first_ssrc()] =
CreateRtpParametersWithEncodings(sp);
if (ssrc_list_changed_callback_) {
std::set<uint32_t> ssrcs_in_use;
for (const auto& send_stream : send_streams_) {
ssrcs_in_use.insert(send_stream.first_ssrc());
}
ssrc_list_changed_callback_(ssrcs_in_use);
}
return true;
}
bool RemoveSendStream(uint32_t ssrc) override {
auto parameters_iterator = rtp_send_parameters_.find(ssrc);
if (parameters_iterator != rtp_send_parameters_.end()) {
rtp_send_parameters_.erase(parameters_iterator);
}
return RemoveStreamBySsrc(&send_streams_, ssrc);
}
void SetSsrcListChangedCallback(
absl::AnyInvocable<void(const std::set<uint32_t>&)> callback) override {
ssrc_list_changed_callback_ = std::move(callback);
}
void SetExtmapAllowMixed(bool extmap_allow_mixed) override {
return MediaChannelUtil::SetExtmapAllowMixed(extmap_allow_mixed);
}
bool ExtmapAllowMixed() const override {
return MediaChannelUtil::ExtmapAllowMixed();
}
webrtc::RtpParameters GetRtpSendParameters(uint32_t ssrc) const override {
auto parameters_iterator = rtp_send_parameters_.find(ssrc);
if (parameters_iterator != rtp_send_parameters_.end()) {
return parameters_iterator->second;
}
return webrtc::RtpParameters();
}
webrtc::RTCError SetRtpSendParameters(
uint32_t ssrc,
const webrtc::RtpParameters& parameters,
webrtc::SetParametersCallback callback) override {
auto parameters_iterator = rtp_send_parameters_.find(ssrc);
if (parameters_iterator != rtp_send_parameters_.end()) {
auto result = CheckRtpParametersInvalidModificationAndValues(
parameters_iterator->second, parameters);
if (!result.ok()) {
return webrtc::InvokeSetParametersCallback(callback, result);
}
parameters_iterator->second = parameters;
return webrtc::InvokeSetParametersCallback(callback,
webrtc::RTCError::OK());
}
// Replicate the behavior of the real media channel: return false
// when setting parameters for unknown SSRCs.
return InvokeSetParametersCallback(
callback, webrtc::RTCError(webrtc::RTCErrorType::INTERNAL_ERROR));
}
bool IsStreamMuted(uint32_t ssrc) const {
bool ret = muted_streams_.find(ssrc) != muted_streams_.end();
// If |ssrc = 0| check if the first send stream is muted.
if (!ret && ssrc == 0 && !send_streams_.empty()) {
return muted_streams_.find(send_streams_[0].first_ssrc()) !=
muted_streams_.end();
}
return ret;
}
const std::vector<StreamParams>& send_streams() const {
return send_streams_;
}
bool HasSendStream(uint32_t ssrc) const {
return GetStreamBySsrc(send_streams_, ssrc) != nullptr;
}
// TODO(perkj): This is to support legacy unit test that only check one
// sending stream.
uint32_t send_ssrc() const {
if (send_streams_.empty())
return 0;
return send_streams_[0].first_ssrc();
}
const RtcpParameters& send_rtcp_parameters() { return send_rtcp_parameters_; }
bool ready_to_send() const { return ready_to_send_; }
int transport_overhead_per_packet() const {
return transport_overhead_per_packet_;
}
rtc::NetworkRoute last_network_route() const { return last_network_route_; }
int num_network_route_changes() const { return num_network_route_changes_; }
void set_num_network_route_changes(int changes) {
num_network_route_changes_ = changes;
}
void OnRtcpPacketReceived(rtc::CopyOnWriteBuffer* packet,
int64_t packet_time_us) {
rtcp_packets_.push_back(std::string(packet->cdata<char>(), packet->size()));
}
// Stuff that deals with encryptors, transformers and the like
void SetFrameEncryptor(uint32_t ssrc,
rtc::scoped_refptr<webrtc::FrameEncryptorInterface>
frame_encryptor) override {}
void SetEncoderToPacketizerFrameTransformer(
uint32_t ssrc,
rtc::scoped_refptr<webrtc::FrameTransformerInterface> frame_transformer)
override {}
void SetInterface(MediaChannelNetworkInterface* iface) override {
network_interface_ = iface;
MediaChannelUtil::SetInterface(iface);
}
bool HasNetworkInterface() const override {
return network_interface_ != nullptr;
}
protected:
bool MuteStream(uint32_t ssrc, bool mute) {
if (!HasSendStream(ssrc) && ssrc != 0) {
return false;
}
if (mute) {
muted_streams_.insert(ssrc);
} else {
muted_streams_.erase(ssrc);
}
return true;
}
bool set_sending(bool send) {
sending_ = send;
return true;
}
bool SetSendRtpHeaderExtensions(const std::vector<RtpExtension>& extensions) {
send_extensions_ = extensions;
return true;
}
void set_send_rtcp_parameters(const RtcpParameters& params) {
send_rtcp_parameters_ = params;
}
void OnPacketSent(const rtc::SentPacket& sent_packet) override {}
void OnReadyToSend(bool ready) override { ready_to_send_ = ready; }
void OnNetworkRouteChanged(absl::string_view transport_name,
const rtc::NetworkRoute& network_route) override {
last_network_route_ = network_route;
++num_network_route_changes_;
transport_overhead_per_packet_ = network_route.packet_overhead;
}
bool fail_set_send_codecs() const { return fail_set_send_codecs_; }
private:
// TODO(bugs.webrtc.org/12783): This flag is used from more than one thread.
// As a workaround for tsan, it's currently std::atomic but that might not
// be the appropriate fix.
std::atomic<bool> sending_;
std::vector<RtpExtension> send_extensions_;
std::list<std::string> rtp_packets_;
std::list<std::string> rtcp_packets_;
std::vector<StreamParams> send_streams_;
RtcpParameters send_rtcp_parameters_;
std::set<uint32_t> muted_streams_;
std::map<uint32_t, webrtc::RtpParameters> rtp_send_parameters_;
bool fail_set_send_codecs_;
uint32_t send_ssrc_;
std::string rtcp_cname_;
bool ready_to_send_;
int transport_overhead_per_packet_;
rtc::NetworkRoute last_network_route_;
int num_network_route_changes_;
MediaChannelNetworkInterface* network_interface_ = nullptr;
absl::AnyInvocable<void(const std::set<uint32_t>&)>
ssrc_list_changed_callback_ = nullptr;
};
class FakeVoiceMediaReceiveChannel
: public RtpReceiveChannelHelper<VoiceMediaReceiveChannelInterface> {
public:
struct DtmfInfo {
DtmfInfo(uint32_t ssrc, int event_code, int duration);
uint32_t ssrc;
int event_code;
int duration;
};
FakeVoiceMediaReceiveChannel(const AudioOptions& options,
webrtc::TaskQueueBase* network_thread);
virtual ~FakeVoiceMediaReceiveChannel();
// Test methods
const std::vector<AudioCodec>& recv_codecs() const;
const std::vector<DtmfInfo>& dtmf_info_queue() const;
const AudioOptions& options() const;
int max_bps() const;
bool HasSource(uint32_t ssrc) const;
// Overrides
VideoMediaReceiveChannelInterface* AsVideoReceiveChannel() override {
return nullptr;
}
VoiceMediaReceiveChannelInterface* AsVoiceReceiveChannel() override {
return this;
}
cricket::MediaType media_type() const override {
return cricket::MEDIA_TYPE_AUDIO;
}
bool SetReceiverParameters(const AudioReceiverParameters& params) override;
void SetPlayout(bool playout) override;
bool AddRecvStream(const StreamParams& sp) override;
bool RemoveRecvStream(uint32_t ssrc) override;
bool SetOutputVolume(uint32_t ssrc, double volume) override;
bool SetDefaultOutputVolume(double volume) override;
bool GetOutputVolume(uint32_t ssrc, double* volume);
bool SetBaseMinimumPlayoutDelayMs(uint32_t ssrc, int delay_ms) override;
absl::optional<int> GetBaseMinimumPlayoutDelayMs(
uint32_t ssrc) const override;
bool GetStats(VoiceMediaReceiveInfo* info,
bool get_and_clear_legacy_stats) override;
void SetRawAudioSink(
uint32_t ssrc,
std::unique_ptr<webrtc::AudioSinkInterface> sink) override;
void SetDefaultRawAudioSink(
std::unique_ptr<webrtc::AudioSinkInterface> sink) override;
std::vector<webrtc::RtpSource> GetSources(uint32_t ssrc) const override;
void SetReceiveNackEnabled(bool enabled) override {}
void SetReceiveNonSenderRttEnabled(bool enabled) override {}
private:
class VoiceChannelAudioSink : public AudioSource::Sink {
public:
explicit VoiceChannelAudioSink(AudioSource* source);
~VoiceChannelAudioSink() override;
void OnData(const void* audio_data,
int bits_per_sample,
int sample_rate,
size_t number_of_channels,
size_t number_of_frames,
absl::optional<int64_t> absolute_capture_timestamp_ms) override;
void OnClose() override;
int NumPreferredChannels() const override { return -1; }
AudioSource* source() const;
private:
AudioSource* source_;
};
bool SetRecvCodecs(const std::vector<AudioCodec>& codecs);
bool SetMaxSendBandwidth(int bps);
bool SetOptions(const AudioOptions& options);
std::vector<AudioCodec> recv_codecs_;
std::map<uint32_t, double> output_scalings_;
std::map<uint32_t, int> output_delays_;
std::vector<DtmfInfo> dtmf_info_queue_;
AudioOptions options_;
std::map<uint32_t, std::unique_ptr<VoiceChannelAudioSink>> local_sinks_;
std::unique_ptr<webrtc::AudioSinkInterface> sink_;
int max_bps_;
};
class FakeVoiceMediaSendChannel
: public RtpSendChannelHelper<VoiceMediaSendChannelInterface> {
public:
struct DtmfInfo {
DtmfInfo(uint32_t ssrc, int event_code, int duration);
uint32_t ssrc;
int event_code;
int duration;
};
FakeVoiceMediaSendChannel(const AudioOptions& options,
webrtc::TaskQueueBase* network_thread);
~FakeVoiceMediaSendChannel() override;
const std::vector<AudioCodec>& send_codecs() const;
const std::vector<DtmfInfo>& dtmf_info_queue() const;
const AudioOptions& options() const;
int max_bps() const;
bool HasSource(uint32_t ssrc) const;
bool GetOutputVolume(uint32_t ssrc, double* volume);
// Overrides
VideoMediaSendChannelInterface* AsVideoSendChannel() override {
return nullptr;
}
VoiceMediaSendChannelInterface* AsVoiceSendChannel() override { return this; }
cricket::MediaType media_type() const override {
return cricket::MEDIA_TYPE_AUDIO;
}
bool SetSenderParameters(const AudioSenderParameter& params) override;
void SetSend(bool send) override;
bool SetAudioSend(uint32_t ssrc,
bool enable,
const AudioOptions* options,
AudioSource* source) override;
bool CanInsertDtmf() override;
bool InsertDtmf(uint32_t ssrc, int event_code, int duration) override;
bool SenderNackEnabled() const override { return false; }
bool SenderNonSenderRttEnabled() const override { return false; }
void SetReceiveNackEnabled(bool enabled) {}
void SetReceiveNonSenderRttEnabled(bool enabled) {}
bool SendCodecHasNack() const override { return false; }
void SetSendCodecChangedCallback(
absl::AnyInvocable<void()> callback) override {}
absl::optional<Codec> GetSendCodec() const override;
bool GetStats(VoiceMediaSendInfo* stats) override;
private:
class VoiceChannelAudioSink : public AudioSource::Sink {
public:
explicit VoiceChannelAudioSink(AudioSource* source);
~VoiceChannelAudioSink() override;
void OnData(const void* audio_data,
int bits_per_sample,
int sample_rate,
size_t number_of_channels,
size_t number_of_frames,
absl::optional<int64_t> absolute_capture_timestamp_ms) override;
void OnClose() override;
int NumPreferredChannels() const override { return -1; }
AudioSource* source() const;
private:
AudioSource* source_;
};
bool SetSendCodecs(const std::vector<AudioCodec>& codecs);
bool SetMaxSendBandwidth(int bps);
bool SetOptions(const AudioOptions& options);
bool SetLocalSource(uint32_t ssrc, AudioSource* source);
std::vector<AudioCodec> send_codecs_;
std::map<uint32_t, double> output_scalings_;
std::map<uint32_t, int> output_delays_;
std::vector<DtmfInfo> dtmf_info_queue_;
AudioOptions options_;
std::map<uint32_t, std::unique_ptr<VoiceChannelAudioSink>> local_sinks_;
int max_bps_;
};
// A helper function to compare the FakeVoiceMediaChannel::DtmfInfo.
bool CompareDtmfInfo(const FakeVoiceMediaSendChannel::DtmfInfo& info,
uint32_t ssrc,
int event_code,
int duration);
class FakeVideoMediaReceiveChannel
: public RtpReceiveChannelHelper<VideoMediaReceiveChannelInterface> {
public:
FakeVideoMediaReceiveChannel(const VideoOptions& options,
webrtc::TaskQueueBase* network_thread);
virtual ~FakeVideoMediaReceiveChannel();
VideoMediaReceiveChannelInterface* AsVideoReceiveChannel() override {
return this;
}
VoiceMediaReceiveChannelInterface* AsVoiceReceiveChannel() override {
return nullptr;
}
cricket::MediaType media_type() const override {
return cricket::MEDIA_TYPE_VIDEO;
}
const std::vector<VideoCodec>& recv_codecs() const;
const std::vector<VideoCodec>& send_codecs() const;
bool rendering() const;
const VideoOptions& options() const;
const std::map<uint32_t, rtc::VideoSinkInterface<webrtc::VideoFrame>*>&
sinks() const;
int max_bps() const;
bool SetReceiverParameters(const VideoReceiverParameters& params) override;
bool SetSink(uint32_t ssrc,
rtc::VideoSinkInterface<webrtc::VideoFrame>* sink) override;
void SetDefaultSink(
rtc::VideoSinkInterface<webrtc::VideoFrame>* sink) override;
bool HasSink(uint32_t ssrc) const;
void SetReceive(bool receive) override {}
bool HasSource(uint32_t ssrc) const;
bool AddRecvStream(const StreamParams& sp) override;
bool RemoveRecvStream(uint32_t ssrc) override;
std::vector<webrtc::RtpSource> GetSources(uint32_t ssrc) const override;
bool SetBaseMinimumPlayoutDelayMs(uint32_t ssrc, int delay_ms) override;
absl::optional<int> GetBaseMinimumPlayoutDelayMs(
uint32_t ssrc) const override;
void SetRecordableEncodedFrameCallback(
uint32_t ssrc,
std::function<void(const webrtc::RecordableEncodedFrame&)> callback)
override;
void ClearRecordableEncodedFrameCallback(uint32_t ssrc) override;
void RequestRecvKeyFrame(uint32_t ssrc) override;
void SetReceiverFeedbackParameters(bool lntf_enabled,
bool nack_enabled,
webrtc::RtcpMode rtcp_mode,
absl::optional<int> rtx_time) override {}
bool GetStats(VideoMediaReceiveInfo* info) override;
bool AddDefaultRecvStreamForTesting(const StreamParams& sp) override {
RTC_CHECK_NOTREACHED();
return false;
}
private:
bool SetRecvCodecs(const std::vector<VideoCodec>& codecs);
bool SetSendCodecs(const std::vector<VideoCodec>& codecs);
bool SetOptions(const VideoOptions& options);
bool SetMaxSendBandwidth(int bps);
std::vector<VideoCodec> recv_codecs_;
std::map<uint32_t, rtc::VideoSinkInterface<webrtc::VideoFrame>*> sinks_;
std::map<uint32_t, rtc::VideoSourceInterface<webrtc::VideoFrame>*> sources_;
std::map<uint32_t, int> output_delays_;
VideoOptions options_;
int max_bps_;
};
class FakeVideoMediaSendChannel
: public RtpSendChannelHelper<VideoMediaSendChannelInterface> {
public:
FakeVideoMediaSendChannel(const VideoOptions& options,
webrtc::TaskQueueBase* network_thread);
virtual ~FakeVideoMediaSendChannel();
VideoMediaSendChannelInterface* AsVideoSendChannel() override { return this; }
VoiceMediaSendChannelInterface* AsVoiceSendChannel() override {
return nullptr;
}
cricket::MediaType media_type() const override {
return cricket::MEDIA_TYPE_VIDEO;
}
const std::vector<VideoCodec>& send_codecs() const;
const std::vector<VideoCodec>& codecs() const;
const VideoOptions& options() const;
const std::map<uint32_t, rtc::VideoSinkInterface<webrtc::VideoFrame>*>&
sinks() const;
int max_bps() const;
bool SetSenderParameters(const VideoSenderParameters& params) override;
absl::optional<Codec> GetSendCodec() const override;
bool SetSend(bool send) override;
bool SetVideoSend(
uint32_t ssrc,
const VideoOptions* options,
rtc::VideoSourceInterface<webrtc::VideoFrame>* source) override;
bool HasSource(uint32_t ssrc) const;
void FillBitrateInfo(BandwidthEstimationInfo* bwe_info) override;
void GenerateSendKeyFrame(uint32_t ssrc,
const std::vector<std::string>& rids) override;
webrtc::RtcpMode SendCodecRtcpMode() const override {
return webrtc::RtcpMode::kCompound;
}
void SetSendCodecChangedCallback(
absl::AnyInvocable<void()> callback) override {}
void SetSsrcListChangedCallback(
absl::AnyInvocable<void(const std::set<uint32_t>&)> callback) override {}
bool SendCodecHasLntf() const override { return false; }
bool SendCodecHasNack() const override { return false; }
absl::optional<int> SendCodecRtxTime() const override {
return absl::nullopt;
}
bool GetStats(VideoMediaSendInfo* info) override;
private:
bool SetSendCodecs(const std::vector<VideoCodec>& codecs);
bool SetOptions(const VideoOptions& options);
bool SetMaxSendBandwidth(int bps);
std::vector<VideoCodec> send_codecs_;
std::map<uint32_t, rtc::VideoSourceInterface<webrtc::VideoFrame>*> sources_;
VideoOptions options_;
int max_bps_;
};
class FakeVoiceEngine : public VoiceEngineInterface {
public:
FakeVoiceEngine();
void Init() override;
rtc::scoped_refptr<webrtc::AudioState> GetAudioState() const override;
std::unique_ptr<VoiceMediaSendChannelInterface> CreateSendChannel(
webrtc::Call* call,
const MediaConfig& config,
const AudioOptions& options,
const webrtc::CryptoOptions& crypto_options,
webrtc::AudioCodecPairId codec_pair_id) override;
std::unique_ptr<VoiceMediaReceiveChannelInterface> CreateReceiveChannel(
webrtc::Call* call,
const MediaConfig& config,
const AudioOptions& options,
const webrtc::CryptoOptions& crypto_options,
webrtc::AudioCodecPairId codec_pair_id) override;
// TODO(ossu): For proper testing, These should either individually settable
// or the voice engine should reference mockable factories.
const std::vector<AudioCodec>& send_codecs() const override;
const std::vector<AudioCodec>& recv_codecs() const override;
void SetCodecs(const std::vector<AudioCodec>& codecs);
void SetRecvCodecs(const std::vector<AudioCodec>& codecs);
void SetSendCodecs(const std::vector<AudioCodec>& codecs);
int GetInputLevel();
bool StartAecDump(webrtc::FileWrapper file, int64_t max_size_bytes) override;
void StopAecDump() override;
absl::optional<webrtc::AudioDeviceModule::Stats> GetAudioDeviceStats()
override;
std::vector<webrtc::RtpHeaderExtensionCapability> GetRtpHeaderExtensions()
const override;
void SetRtpHeaderExtensions(
std::vector<webrtc::RtpHeaderExtensionCapability> header_extensions);
private:
std::vector<AudioCodec> recv_codecs_;
std::vector<AudioCodec> send_codecs_;
bool fail_create_channel_;
std::vector<webrtc::RtpHeaderExtensionCapability> header_extensions_;
friend class FakeMediaEngine;
};
class FakeVideoEngine : public VideoEngineInterface {
public:
FakeVideoEngine();
bool SetOptions(const VideoOptions& options);
std::unique_ptr<VideoMediaSendChannelInterface> CreateSendChannel(
webrtc::Call* call,
const MediaConfig& config,
const VideoOptions& options,
const webrtc::CryptoOptions& crypto_options,
webrtc::VideoBitrateAllocatorFactory* video_bitrate_allocator_factory)
override;
std::unique_ptr<VideoMediaReceiveChannelInterface> CreateReceiveChannel(
webrtc::Call* call,
const MediaConfig& config,
const VideoOptions& options,
const webrtc::CryptoOptions& crypto_options) override;
FakeVideoMediaSendChannel* GetSendChannel(size_t index);
FakeVideoMediaReceiveChannel* GetReceiveChannel(size_t index);
std::vector<VideoCodec> send_codecs() const override {
return send_codecs(true);
}
std::vector<VideoCodec> recv_codecs() const override {
return recv_codecs(true);
}
std::vector<VideoCodec> send_codecs(bool include_rtx) const override;
std::vector<VideoCodec> recv_codecs(bool include_rtx) const override;
void SetSendCodecs(const std::vector<VideoCodec>& codecs);
void SetRecvCodecs(const std::vector<VideoCodec>& codecs);
bool SetCapture(bool capture);
std::vector<webrtc::RtpHeaderExtensionCapability> GetRtpHeaderExtensions()
const override;
void SetRtpHeaderExtensions(
std::vector<webrtc::RtpHeaderExtensionCapability> header_extensions);
private:
std::vector<VideoCodec> send_codecs_;
std::vector<VideoCodec> recv_codecs_;
bool capture_;
VideoOptions options_;
bool fail_create_channel_;
std::vector<webrtc::RtpHeaderExtensionCapability> header_extensions_;
friend class FakeMediaEngine;
};
class FakeMediaEngine : public CompositeMediaEngine {
public:
FakeMediaEngine();
~FakeMediaEngine() override;
void SetAudioCodecs(const std::vector<AudioCodec>& codecs);
void SetAudioRecvCodecs(const std::vector<AudioCodec>& codecs);
void SetAudioSendCodecs(const std::vector<AudioCodec>& codecs);
void SetVideoCodecs(const std::vector<VideoCodec>& codecs);
void set_fail_create_channel(bool fail);
FakeVoiceEngine* fake_voice_engine() { return voice_; }
FakeVideoEngine* fake_video_engine() { return video_; }
private:
FakeVoiceEngine* const voice_;
FakeVideoEngine* const video_;
};
} // namespace cricket
#endif // MEDIA_BASE_FAKE_MEDIA_ENGINE_H_

View file

@ -0,0 +1,232 @@
/*
* Copyright (c) 2004 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_BASE_FAKE_NETWORK_INTERFACE_H_
#define MEDIA_BASE_FAKE_NETWORK_INTERFACE_H_
#include <map>
#include <set>
#include <utility>
#include <vector>
#include "api/task_queue/pending_task_safety_flag.h"
#include "api/task_queue/task_queue_base.h"
#include "media/base/media_channel.h"
#include "media/base/rtp_utils.h"
#include "modules/rtp_rtcp/source/rtp_packet_received.h"
#include "modules/rtp_rtcp/source/rtp_util.h"
#include "rtc_base/byte_order.h"
#include "rtc_base/checks.h"
#include "rtc_base/copy_on_write_buffer.h"
#include "rtc_base/dscp.h"
#include "rtc_base/synchronization/mutex.h"
#include "rtc_base/thread.h"
#include "rtc_base/time_utils.h"
namespace cricket {
// Fake NetworkInterface that sends/receives RTP/RTCP packets.
class FakeNetworkInterface : public MediaChannelNetworkInterface {
public:
FakeNetworkInterface()
: thread_(rtc::Thread::Current()),
dest_(NULL),
conf_(false),
sendbuf_size_(-1),
recvbuf_size_(-1),
dscp_(rtc::DSCP_NO_CHANGE) {}
void SetDestination(MediaReceiveChannelInterface* dest) { dest_ = dest; }
// Conference mode is a mode where instead of simply forwarding the packets,
// the transport will send multiple copies of the packet with the specified
// SSRCs. This allows us to simulate receiving media from multiple sources.
void SetConferenceMode(bool conf, const std::vector<uint32_t>& ssrcs)
RTC_LOCKS_EXCLUDED(mutex_) {
webrtc::MutexLock lock(&mutex_);
conf_ = conf;
conf_sent_ssrcs_ = ssrcs;
}
int NumRtpBytes() RTC_LOCKS_EXCLUDED(mutex_) {
webrtc::MutexLock lock(&mutex_);
int bytes = 0;
for (size_t i = 0; i < rtp_packets_.size(); ++i) {
bytes += static_cast<int>(rtp_packets_[i].size());
}
return bytes;
}
int NumRtpBytes(uint32_t ssrc) RTC_LOCKS_EXCLUDED(mutex_) {
webrtc::MutexLock lock(&mutex_);
int bytes = 0;
GetNumRtpBytesAndPackets(ssrc, &bytes, NULL);
return bytes;
}
int NumRtpPackets() RTC_LOCKS_EXCLUDED(mutex_) {
webrtc::MutexLock lock(&mutex_);
return static_cast<int>(rtp_packets_.size());
}
int NumRtpPackets(uint32_t ssrc) RTC_LOCKS_EXCLUDED(mutex_) {
webrtc::MutexLock lock(&mutex_);
int packets = 0;
GetNumRtpBytesAndPackets(ssrc, NULL, &packets);
return packets;
}
int NumSentSsrcs() RTC_LOCKS_EXCLUDED(mutex_) {
webrtc::MutexLock lock(&mutex_);
return static_cast<int>(sent_ssrcs_.size());
}
rtc::CopyOnWriteBuffer GetRtpPacket(int index) RTC_LOCKS_EXCLUDED(mutex_) {
webrtc::MutexLock lock(&mutex_);
if (index >= static_cast<int>(rtp_packets_.size())) {
return {};
}
return rtp_packets_[index];
}
int NumRtcpPackets() RTC_LOCKS_EXCLUDED(mutex_) {
webrtc::MutexLock lock(&mutex_);
return static_cast<int>(rtcp_packets_.size());
}
// Note: callers are responsible for deleting the returned buffer.
const rtc::CopyOnWriteBuffer* GetRtcpPacket(int index)
RTC_LOCKS_EXCLUDED(mutex_) {
webrtc::MutexLock lock(&mutex_);
if (index >= static_cast<int>(rtcp_packets_.size())) {
return NULL;
}
return new rtc::CopyOnWriteBuffer(rtcp_packets_[index]);
}
int sendbuf_size() const { return sendbuf_size_; }
int recvbuf_size() const { return recvbuf_size_; }
rtc::DiffServCodePoint dscp() const { return dscp_; }
rtc::PacketOptions options() const { return options_; }
protected:
virtual bool SendPacket(rtc::CopyOnWriteBuffer* packet,
const rtc::PacketOptions& options)
RTC_LOCKS_EXCLUDED(mutex_) {
if (!webrtc::IsRtpPacket(*packet)) {
return false;
}
webrtc::MutexLock lock(&mutex_);
sent_ssrcs_[webrtc::ParseRtpSsrc(*packet)]++;
options_ = options;
rtp_packets_.push_back(*packet);
if (conf_) {
for (size_t i = 0; i < conf_sent_ssrcs_.size(); ++i) {
SetRtpSsrc(conf_sent_ssrcs_[i], *packet);
PostPacket(*packet);
}
} else {
PostPacket(*packet);
}
return true;
}
virtual bool SendRtcp(rtc::CopyOnWriteBuffer* packet,
const rtc::PacketOptions& options)
RTC_LOCKS_EXCLUDED(mutex_) {
webrtc::MutexLock lock(&mutex_);
rtcp_packets_.push_back(*packet);
options_ = options;
if (!conf_) {
// don't worry about RTCP in conf mode for now
RTC_LOG(LS_VERBOSE) << "Dropping RTCP packet, they are not handled by "
"MediaChannel anymore.";
}
return true;
}
virtual int SetOption(SocketType type, rtc::Socket::Option opt, int option) {
if (opt == rtc::Socket::OPT_SNDBUF) {
sendbuf_size_ = option;
} else if (opt == rtc::Socket::OPT_RCVBUF) {
recvbuf_size_ = option;
} else if (opt == rtc::Socket::OPT_DSCP) {
dscp_ = static_cast<rtc::DiffServCodePoint>(option);
}
return 0;
}
void PostPacket(rtc::CopyOnWriteBuffer packet) {
thread_->PostTask(
SafeTask(safety_.flag(), [this, packet = std::move(packet)]() mutable {
if (dest_) {
webrtc::RtpPacketReceived parsed_packet;
if (parsed_packet.Parse(packet)) {
parsed_packet.set_arrival_time(
webrtc::Timestamp::Micros(rtc::TimeMicros()));
dest_->OnPacketReceived(std::move(parsed_packet));
} else {
RTC_DCHECK_NOTREACHED();
}
}
}));
}
private:
void SetRtpSsrc(uint32_t ssrc, rtc::CopyOnWriteBuffer& buffer) {
RTC_CHECK_GE(buffer.size(), 12);
rtc::SetBE32(buffer.MutableData() + 8, ssrc);
}
void GetNumRtpBytesAndPackets(uint32_t ssrc, int* bytes, int* packets) {
if (bytes) {
*bytes = 0;
}
if (packets) {
*packets = 0;
}
for (size_t i = 0; i < rtp_packets_.size(); ++i) {
if (ssrc == webrtc::ParseRtpSsrc(rtp_packets_[i])) {
if (bytes) {
*bytes += static_cast<int>(rtp_packets_[i].size());
}
if (packets) {
++(*packets);
}
}
}
}
webrtc::TaskQueueBase* thread_;
MediaReceiveChannelInterface* dest_;
bool conf_;
// The ssrcs used in sending out packets in conference mode.
std::vector<uint32_t> conf_sent_ssrcs_;
// Map to track counts of packets that have been sent per ssrc.
// This includes packets that are dropped.
std::map<uint32_t, uint32_t> sent_ssrcs_;
// Map to track packet-number that needs to be dropped per ssrc.
std::map<uint32_t, std::set<uint32_t> > drop_map_;
webrtc::Mutex mutex_;
std::vector<rtc::CopyOnWriteBuffer> rtp_packets_;
std::vector<rtc::CopyOnWriteBuffer> rtcp_packets_;
int sendbuf_size_;
int recvbuf_size_;
rtc::DiffServCodePoint dscp_;
// Options of the most recently sent packet.
rtc::PacketOptions options_;
webrtc::ScopedTaskSafety safety_;
};
} // namespace cricket
#endif // MEDIA_BASE_FAKE_NETWORK_INTERFACE_H_

View file

@ -0,0 +1,71 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/base/fake_rtp.h"
#include <stdint.h>
#include <string.h>
#include "absl/algorithm/container.h"
#include "rtc_base/checks.h"
#include "test/gtest.h"
void CompareHeaderExtensions(const char* packet1,
size_t packet1_size,
const char* packet2,
size_t packet2_size,
const std::vector<int>& encrypted_headers,
bool expect_equal) {
// Sanity check: packets must be large enough to contain the RTP header and
// extensions header.
RTC_CHECK_GE(packet1_size, 12 + 4);
RTC_CHECK_GE(packet2_size, 12 + 4);
// RTP extension headers are the same.
EXPECT_EQ(0, memcmp(packet1 + 12, packet2 + 12, 4));
// Check for one-byte header extensions.
EXPECT_EQ('\xBE', packet1[12]);
EXPECT_EQ('\xDE', packet1[13]);
// Determine position and size of extension headers.
size_t extension_words = packet1[14] << 8 | packet1[15];
const char* extension_data1 = packet1 + 12 + 4;
const char* extension_end1 = extension_data1 + extension_words * 4;
const char* extension_data2 = packet2 + 12 + 4;
// Sanity check: packets must be large enough to contain the RTP header
// extensions.
RTC_CHECK_GE(packet1_size, 12 + 4 + extension_words * 4);
RTC_CHECK_GE(packet2_size, 12 + 4 + extension_words * 4);
while (extension_data1 < extension_end1) {
uint8_t id = (*extension_data1 & 0xf0) >> 4;
uint8_t len = (*extension_data1 & 0x0f) + 1;
extension_data1++;
extension_data2++;
EXPECT_LE(extension_data1, extension_end1);
if (id == 15) {
// Finished parsing.
break;
}
// The header extension doesn't get encrypted if the id is not in the
// list of header extensions to encrypt.
if (expect_equal || !absl::c_linear_search(encrypted_headers, id)) {
EXPECT_EQ(0, memcmp(extension_data1, extension_data2, len));
} else {
EXPECT_NE(0, memcmp(extension_data1, extension_data2, len));
}
extension_data1 += len;
extension_data2 += len;
// Skip padding.
while (extension_data1 < extension_end1 && *extension_data1 == 0) {
extension_data1++;
extension_data2++;
}
}
}

View file

@ -0,0 +1,301 @@
/*
* Copyright (c) 2004 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
// Fake RTP and RTCP packets to use in unit tests.
#ifndef MEDIA_BASE_FAKE_RTP_H_
#define MEDIA_BASE_FAKE_RTP_H_
#include <cstddef> // size_t
#include <vector>
// A typical PCMU RTP packet.
// PT=0, SN=1, TS=0, SSRC=1
// all data FF
static const unsigned char kPcmuFrame[] = {
0x80, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF,
};
static const int kHeaderExtensionIDs[] = {1, 4};
// A typical PCMU RTP packet with header extensions.
// PT=0, SN=1, TS=0, SSRC=1
// all data FF
static const unsigned char kPcmuFrameWithExtensions[] = {
0x90,
0x00,
0x00,
0x01,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x01,
// RFC 5285, section 4.2. One-Byte Header.
0xBE,
0xDE,
// Header extension length 6 * 32 bits.
0x00,
0x06,
// 8 bytes header id 1.
0x17,
0x41,
0x42,
0x73,
0xA4,
0x75,
0x26,
0x27,
0x48,
// 3 bytes header id 2.
0x22,
0x00,
0x00,
0xC8,
// 1 byte header id 3.
0x30,
0x8E,
// 7 bytes header id 4.
0x46,
0x55,
0x99,
0x63,
0x86,
0xB3,
0x95,
0xFB,
// 1 byte header padding.
0x00,
// Payload data.
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
0xFF,
};
// A typical Receiver Report RTCP packet.
// PT=RR, LN=1, SSRC=1
// send SSRC=2, all other fields 0
static const unsigned char kRtcpReport[] = {
0x80, 0xc9, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
// PT = 97, TS = 0, Seq = 1, SSRC = 2
// H264 - NRI = 1, Type = 1, bit stream = FF
static const unsigned char kH264Packet[] = {
0x80, 0x61, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02,
0x21, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF,
};
// PT= 101, SN=2, TS=3, SSRC = 4
static const unsigned char kDataPacket[] = {
0x80, 0x65, 0x00, 0x02, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x04,
0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
};
// This expects both packets to be based on kPcmuFrameWithExtensions.
// Header extensions with an id in "encrypted_headers" are expected to be
// different in the packets unless "expect_equal" is set to "true".
void CompareHeaderExtensions(const char* packet1,
size_t packet1_size,
const char* packet2,
size_t packet2_size,
const std::vector<int>& encrypted_headers,
bool expect_equal);
#endif // MEDIA_BASE_FAKE_RTP_H_

View file

@ -0,0 +1,87 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/base/fake_video_renderer.h"
namespace cricket {
namespace {
bool CheckFrameColorYuv(const webrtc::VideoFrame& frame) {
// TODO(zhurunz) Check with VP8 team to see if we can remove this
// tolerance on Y values. Some unit tests produce Y values close
// to 16 rather than close to zero, for supposedly black frames.
// Largest value observed is 34, e.g., running
// PeerConnectionIntegrationTest.SendAndReceive16To9AspectRatio.
static constexpr uint8_t y_min = 0;
static constexpr uint8_t y_max = 48;
static constexpr uint8_t u_min = 128;
static constexpr uint8_t u_max = 128;
static constexpr uint8_t v_min = 128;
static constexpr uint8_t v_max = 128;
if (!frame.video_frame_buffer()) {
return false;
}
rtc::scoped_refptr<const webrtc::I420BufferInterface> i420_buffer =
frame.video_frame_buffer()->ToI420();
// Y
int y_width = frame.width();
int y_height = frame.height();
const uint8_t* y_plane = i420_buffer->DataY();
const uint8_t* y_pos = y_plane;
int32_t y_pitch = i420_buffer->StrideY();
for (int i = 0; i < y_height; ++i) {
for (int j = 0; j < y_width; ++j) {
uint8_t y_value = *(y_pos + j);
if (y_value < y_min || y_value > y_max) {
return false;
}
}
y_pos += y_pitch;
}
// U and V
int chroma_width = i420_buffer->ChromaWidth();
int chroma_height = i420_buffer->ChromaHeight();
const uint8_t* u_plane = i420_buffer->DataU();
const uint8_t* v_plane = i420_buffer->DataV();
const uint8_t* u_pos = u_plane;
const uint8_t* v_pos = v_plane;
int32_t u_pitch = i420_buffer->StrideU();
int32_t v_pitch = i420_buffer->StrideV();
for (int i = 0; i < chroma_height; ++i) {
for (int j = 0; j < chroma_width; ++j) {
uint8_t u_value = *(u_pos + j);
if (u_value < u_min || u_value > u_max) {
return false;
}
uint8_t v_value = *(v_pos + j);
if (v_value < v_min || v_value > v_max) {
return false;
}
}
u_pos += u_pitch;
v_pos += v_pitch;
}
return true;
}
} // namespace
FakeVideoRenderer::FakeVideoRenderer() = default;
void FakeVideoRenderer::OnFrame(const webrtc::VideoFrame& frame) {
webrtc::MutexLock lock(&mutex_);
black_frame_ = CheckFrameColorYuv(frame);
++num_rendered_frames_;
width_ = frame.width();
height_ = frame.height();
rotation_ = frame.rotation();
timestamp_us_ = frame.timestamp_us();
}
} // namespace cricket

View file

@ -0,0 +1,73 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_BASE_FAKE_VIDEO_RENDERER_H_
#define MEDIA_BASE_FAKE_VIDEO_RENDERER_H_
#include <stdint.h>
#include "api/scoped_refptr.h"
#include "api/video/video_frame.h"
#include "api/video/video_frame_buffer.h"
#include "api/video/video_rotation.h"
#include "api/video/video_sink_interface.h"
#include "rtc_base/synchronization/mutex.h"
namespace cricket {
// Faked video renderer that has a callback for actions on rendering.
class FakeVideoRenderer : public rtc::VideoSinkInterface<webrtc::VideoFrame> {
public:
FakeVideoRenderer();
void OnFrame(const webrtc::VideoFrame& frame) override;
int width() const {
webrtc::MutexLock lock(&mutex_);
return width_;
}
int height() const {
webrtc::MutexLock lock(&mutex_);
return height_;
}
webrtc::VideoRotation rotation() const {
webrtc::MutexLock lock(&mutex_);
return rotation_;
}
int64_t timestamp_us() const {
webrtc::MutexLock lock(&mutex_);
return timestamp_us_;
}
int num_rendered_frames() const {
webrtc::MutexLock lock(&mutex_);
return num_rendered_frames_;
}
bool black_frame() const {
webrtc::MutexLock lock(&mutex_);
return black_frame_;
}
private:
int width_ = 0;
int height_ = 0;
webrtc::VideoRotation rotation_ = webrtc::kVideoRotation_0;
int64_t timestamp_us_ = 0;
int num_rendered_frames_ = 0;
bool black_frame_ = false;
mutable webrtc::Mutex mutex_;
};
} // namespace cricket
#endif // MEDIA_BASE_FAKE_VIDEO_RENDERER_H_

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,303 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/base/media_channel_impl.h"
#include <map>
#include <string>
#include <type_traits>
#include <utility>
#include "absl/functional/any_invocable.h"
#include "api/audio_options.h"
#include "api/media_stream_interface.h"
#include "api/rtc_error.h"
#include "api/rtp_sender_interface.h"
#include "api/units/time_delta.h"
#include "api/video/video_timing.h"
#include "api/video_codecs/scalability_mode.h"
#include "common_video/include/quality_limitation_reason.h"
#include "media/base/codec.h"
#include "media/base/media_channel.h"
#include "media/base/rtp_utils.h"
#include "media/base/stream_params.h"
#include "modules/rtp_rtcp/include/report_block_data.h"
#include "rtc_base/checks.h"
namespace webrtc {
webrtc::RTCError InvokeSetParametersCallback(SetParametersCallback& callback,
RTCError error) {
if (callback) {
std::move(callback)(error);
callback = nullptr;
}
return error;
}
} // namespace webrtc
namespace cricket {
using webrtc::FrameDecryptorInterface;
using webrtc::FrameEncryptorInterface;
using webrtc::FrameTransformerInterface;
using webrtc::PendingTaskSafetyFlag;
using webrtc::SafeTask;
using webrtc::TaskQueueBase;
using webrtc::VideoTrackInterface;
VideoOptions::VideoOptions()
: content_hint(VideoTrackInterface::ContentHint::kNone) {}
VideoOptions::~VideoOptions() = default;
MediaChannelUtil::MediaChannelUtil(TaskQueueBase* network_thread,
bool enable_dscp)
: transport_(network_thread, enable_dscp) {}
MediaChannelUtil::~MediaChannelUtil() {}
void MediaChannelUtil::SetInterface(MediaChannelNetworkInterface* iface) {
transport_.SetInterface(iface);
}
int MediaChannelUtil::GetRtpSendTimeExtnId() const {
return -1;
}
bool MediaChannelUtil::SendPacket(rtc::CopyOnWriteBuffer* packet,
const rtc::PacketOptions& options) {
return transport_.DoSendPacket(packet, false, options);
}
bool MediaChannelUtil::SendRtcp(rtc::CopyOnWriteBuffer* packet,
const rtc::PacketOptions& options) {
return transport_.DoSendPacket(packet, true, options);
}
int MediaChannelUtil::SetOption(MediaChannelNetworkInterface::SocketType type,
rtc::Socket::Option opt,
int option) {
return transport_.SetOption(type, opt, option);
}
// Corresponds to the SDP attribute extmap-allow-mixed, see RFC8285.
// Set to true if it's allowed to mix one- and two-byte RTP header extensions
// in the same stream. The setter and getter must only be called from
// worker_thread.
void MediaChannelUtil::SetExtmapAllowMixed(bool extmap_allow_mixed) {
extmap_allow_mixed_ = extmap_allow_mixed;
}
bool MediaChannelUtil::ExtmapAllowMixed() const {
return extmap_allow_mixed_;
}
bool MediaChannelUtil::HasNetworkInterface() const {
return transport_.HasNetworkInterface();
}
bool MediaChannelUtil::DscpEnabled() const {
return transport_.DscpEnabled();
}
void MediaChannelUtil::SetPreferredDscp(rtc::DiffServCodePoint new_dscp) {
transport_.SetPreferredDscp(new_dscp);
}
MediaSenderInfo::MediaSenderInfo() = default;
MediaSenderInfo::~MediaSenderInfo() = default;
MediaReceiverInfo::MediaReceiverInfo() = default;
MediaReceiverInfo::~MediaReceiverInfo() = default;
VoiceSenderInfo::VoiceSenderInfo() = default;
VoiceSenderInfo::~VoiceSenderInfo() = default;
VoiceReceiverInfo::VoiceReceiverInfo() = default;
VoiceReceiverInfo::~VoiceReceiverInfo() = default;
VideoSenderInfo::VideoSenderInfo() = default;
VideoSenderInfo::~VideoSenderInfo() = default;
VideoReceiverInfo::VideoReceiverInfo() = default;
VideoReceiverInfo::~VideoReceiverInfo() = default;
VoiceMediaInfo::VoiceMediaInfo() = default;
VoiceMediaInfo::~VoiceMediaInfo() = default;
VideoMediaInfo::VideoMediaInfo() = default;
VideoMediaInfo::~VideoMediaInfo() = default;
VideoMediaSendInfo::VideoMediaSendInfo() = default;
VideoMediaSendInfo::~VideoMediaSendInfo() = default;
VoiceMediaSendInfo::VoiceMediaSendInfo() = default;
VoiceMediaSendInfo::~VoiceMediaSendInfo() = default;
VideoMediaReceiveInfo::VideoMediaReceiveInfo() = default;
VideoMediaReceiveInfo::~VideoMediaReceiveInfo() = default;
VoiceMediaReceiveInfo::VoiceMediaReceiveInfo() = default;
VoiceMediaReceiveInfo::~VoiceMediaReceiveInfo() = default;
AudioSenderParameter::AudioSenderParameter() = default;
AudioSenderParameter::~AudioSenderParameter() = default;
std::map<std::string, std::string> AudioSenderParameter::ToStringMap() const {
auto params = SenderParameters::ToStringMap();
params["options"] = options.ToString();
return params;
}
VideoSenderParameters::VideoSenderParameters() = default;
VideoSenderParameters::~VideoSenderParameters() = default;
std::map<std::string, std::string> VideoSenderParameters::ToStringMap() const {
auto params = SenderParameters::ToStringMap();
params["conference_mode"] = (conference_mode ? "yes" : "no");
return params;
}
// --------------------- MediaChannelUtil::TransportForMediaChannels -----
MediaChannelUtil::TransportForMediaChannels::TransportForMediaChannels(
webrtc::TaskQueueBase* network_thread,
bool enable_dscp)
: network_safety_(webrtc::PendingTaskSafetyFlag::CreateDetachedInactive()),
network_thread_(network_thread),
enable_dscp_(enable_dscp) {}
MediaChannelUtil::TransportForMediaChannels::~TransportForMediaChannels() {
RTC_DCHECK(!network_interface_);
}
bool MediaChannelUtil::TransportForMediaChannels::SendRtcp(
rtc::ArrayView<const uint8_t> packet) {
auto send = [this, packet = rtc::CopyOnWriteBuffer(
packet, kMaxRtpPacketLen)]() mutable {
rtc::PacketOptions rtc_options;
if (DscpEnabled()) {
rtc_options.dscp = PreferredDscp();
}
DoSendPacket(&packet, true, rtc_options);
};
if (network_thread_->IsCurrent()) {
send();
} else {
network_thread_->PostTask(SafeTask(network_safety_, std::move(send)));
}
return true;
}
bool MediaChannelUtil::TransportForMediaChannels::SendRtp(
rtc::ArrayView<const uint8_t> packet,
const webrtc::PacketOptions& options) {
auto send =
[this, packet_id = options.packet_id,
included_in_feedback = options.included_in_feedback,
included_in_allocation = options.included_in_allocation,
batchable = options.batchable,
last_packet_in_batch = options.last_packet_in_batch,
packet = rtc::CopyOnWriteBuffer(packet, kMaxRtpPacketLen)]() mutable {
rtc::PacketOptions rtc_options;
rtc_options.packet_id = packet_id;
if (DscpEnabled()) {
rtc_options.dscp = PreferredDscp();
}
rtc_options.info_signaled_after_sent.included_in_feedback =
included_in_feedback;
rtc_options.info_signaled_after_sent.included_in_allocation =
included_in_allocation;
rtc_options.batchable = batchable;
rtc_options.last_packet_in_batch = last_packet_in_batch;
DoSendPacket(&packet, false, rtc_options);
};
// TODO(bugs.webrtc.org/11993): ModuleRtpRtcpImpl2 and related classes (e.g.
// RTCPSender) aren't aware of the network thread and may trigger calls to
// this function from different threads. Update those classes to keep
// network traffic on the network thread.
if (network_thread_->IsCurrent()) {
send();
} else {
network_thread_->PostTask(SafeTask(network_safety_, std::move(send)));
}
return true;
}
void MediaChannelUtil::TransportForMediaChannels::SetInterface(
MediaChannelNetworkInterface* iface) {
RTC_DCHECK_RUN_ON(network_thread_);
iface ? network_safety_->SetAlive() : network_safety_->SetNotAlive();
network_interface_ = iface;
UpdateDscp();
}
void MediaChannelUtil::TransportForMediaChannels::UpdateDscp() {
rtc::DiffServCodePoint value =
enable_dscp_ ? preferred_dscp_ : rtc::DSCP_DEFAULT;
int ret = SetOptionLocked(MediaChannelNetworkInterface::ST_RTP,
rtc::Socket::OPT_DSCP, value);
if (ret == 0)
SetOptionLocked(MediaChannelNetworkInterface::ST_RTCP,
rtc::Socket::OPT_DSCP, value);
}
bool MediaChannelUtil::TransportForMediaChannels::DoSendPacket(
rtc::CopyOnWriteBuffer* packet,
bool rtcp,
const rtc::PacketOptions& options) {
RTC_DCHECK_RUN_ON(network_thread_);
if (!network_interface_)
return false;
return (!rtcp) ? network_interface_->SendPacket(packet, options)
: network_interface_->SendRtcp(packet, options);
}
int MediaChannelUtil::TransportForMediaChannels::SetOption(
MediaChannelNetworkInterface::SocketType type,
rtc::Socket::Option opt,
int option) {
RTC_DCHECK_RUN_ON(network_thread_);
return SetOptionLocked(type, opt, option);
}
int MediaChannelUtil::TransportForMediaChannels::SetOptionLocked(
MediaChannelNetworkInterface::SocketType type,
rtc::Socket::Option opt,
int option) {
if (!network_interface_)
return -1;
return network_interface_->SetOption(type, opt, option);
}
void MediaChannelUtil::TransportForMediaChannels::SetPreferredDscp(
rtc::DiffServCodePoint new_dscp) {
if (!network_thread_->IsCurrent()) {
// This is currently the common path as the derived channel classes
// get called on the worker thread. There are still some tests though
// that call directly on the network thread.
network_thread_->PostTask(SafeTask(
network_safety_, [this, new_dscp]() { SetPreferredDscp(new_dscp); }));
return;
}
RTC_DCHECK_RUN_ON(network_thread_);
if (new_dscp == preferred_dscp_)
return;
preferred_dscp_ = new_dscp;
UpdateDscp();
}
} // namespace cricket

View file

@ -0,0 +1,181 @@
/*
* Copyright 2022 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_BASE_MEDIA_CHANNEL_IMPL_H_
#define MEDIA_BASE_MEDIA_CHANNEL_IMPL_H_
#include <stddef.h>
#include <stdint.h>
#include <functional>
#include <memory>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include "absl/functional/any_invocable.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "api/audio_options.h"
#include "api/call/audio_sink.h"
#include "api/call/transport.h"
#include "api/crypto/frame_decryptor_interface.h"
#include "api/crypto/frame_encryptor_interface.h"
#include "api/frame_transformer_interface.h"
#include "api/media_types.h"
#include "api/rtc_error.h"
#include "api/rtp_headers.h"
#include "api/rtp_parameters.h"
#include "api/rtp_sender_interface.h"
#include "api/scoped_refptr.h"
#include "api/sequence_checker.h"
#include "api/task_queue/pending_task_safety_flag.h"
#include "api/task_queue/task_queue_base.h"
#include "api/transport/rtp/rtp_source.h"
#include "api/video/recordable_encoded_frame.h"
#include "api/video/video_frame.h"
#include "api/video/video_sink_interface.h"
#include "api/video/video_source_interface.h"
#include "api/video_codecs/video_encoder_factory.h"
#include "media/base/codec.h"
#include "media/base/media_channel.h"
#include "media/base/stream_params.h"
#include "modules/rtp_rtcp/source/rtp_packet_received.h"
#include "rtc_base/async_packet_socket.h"
#include "rtc_base/checks.h"
#include "rtc_base/copy_on_write_buffer.h"
#include "rtc_base/dscp.h"
#include "rtc_base/logging.h"
#include "rtc_base/network/sent_packet.h"
#include "rtc_base/network_route.h"
#include "rtc_base/socket.h"
#include "rtc_base/thread_annotations.h"
// This file contains the base classes for classes that implement
// the channel interfaces.
// These implementation classes used to be the exposed interface names,
// but this is in the process of being changed.
namespace cricket {
// The `MediaChannelUtil` class provides functionality that is used by
// multiple MediaChannel-like objects, of both sending and receiving
// types.
class MediaChannelUtil {
public:
MediaChannelUtil(webrtc::TaskQueueBase* network_thread,
bool enable_dscp = false);
virtual ~MediaChannelUtil();
// Returns the absolute sendtime extension id value from media channel.
virtual int GetRtpSendTimeExtnId() const;
webrtc::Transport* transport() { return &transport_; }
// Base methods to send packet using MediaChannelNetworkInterface.
// These methods are used by some tests only.
bool SendPacket(rtc::CopyOnWriteBuffer* packet,
const rtc::PacketOptions& options);
bool SendRtcp(rtc::CopyOnWriteBuffer* packet,
const rtc::PacketOptions& options);
int SetOption(MediaChannelNetworkInterface::SocketType type,
rtc::Socket::Option opt,
int option);
// Functions that form part of one or more interface classes.
// Not marked override, since this class does not inherit from the
// interfaces.
// Corresponds to the SDP attribute extmap-allow-mixed, see RFC8285.
// Set to true if it's allowed to mix one- and two-byte RTP header extensions
// in the same stream. The setter and getter must only be called from
// worker_thread.
void SetExtmapAllowMixed(bool extmap_allow_mixed);
bool ExtmapAllowMixed() const;
void SetInterface(MediaChannelNetworkInterface* iface);
// Returns `true` if a non-null MediaChannelNetworkInterface pointer is held.
// Must be called on the network thread.
bool HasNetworkInterface() const;
protected:
bool DscpEnabled() const;
void SetPreferredDscp(rtc::DiffServCodePoint new_dscp);
private:
// Implementation of the webrtc::Transport interface required
// by Call().
class TransportForMediaChannels : public webrtc::Transport {
public:
TransportForMediaChannels(webrtc::TaskQueueBase* network_thread,
bool enable_dscp);
virtual ~TransportForMediaChannels();
// Implementation of webrtc::Transport
bool SendRtp(rtc::ArrayView<const uint8_t> packet,
const webrtc::PacketOptions& options) override;
bool SendRtcp(rtc::ArrayView<const uint8_t> packet) override;
// Not implementation of webrtc::Transport
void SetInterface(MediaChannelNetworkInterface* iface);
int SetOption(MediaChannelNetworkInterface::SocketType type,
rtc::Socket::Option opt,
int option);
bool DoSendPacket(rtc::CopyOnWriteBuffer* packet,
bool rtcp,
const rtc::PacketOptions& options);
bool HasNetworkInterface() const {
RTC_DCHECK_RUN_ON(network_thread_);
return network_interface_ != nullptr;
}
bool DscpEnabled() const { return enable_dscp_; }
void SetPreferredDscp(rtc::DiffServCodePoint new_dscp);
private:
// This is the DSCP value used for both RTP and RTCP channels if DSCP is
// enabled. It can be changed at any time via `SetPreferredDscp`.
rtc::DiffServCodePoint PreferredDscp() const {
RTC_DCHECK_RUN_ON(network_thread_);
return preferred_dscp_;
}
// Apply the preferred DSCP setting to the underlying network interface RTP
// and RTCP channels. If DSCP is disabled, then apply the default DSCP
// value.
void UpdateDscp() RTC_RUN_ON(network_thread_);
int SetOptionLocked(MediaChannelNetworkInterface::SocketType type,
rtc::Socket::Option opt,
int option) RTC_RUN_ON(network_thread_);
const rtc::scoped_refptr<webrtc::PendingTaskSafetyFlag> network_safety_
RTC_PT_GUARDED_BY(network_thread_);
webrtc::TaskQueueBase* const network_thread_;
const bool enable_dscp_;
MediaChannelNetworkInterface* network_interface_
RTC_GUARDED_BY(network_thread_) = nullptr;
rtc::DiffServCodePoint preferred_dscp_ RTC_GUARDED_BY(network_thread_) =
rtc::DSCP_DEFAULT;
};
bool extmap_allow_mixed_ = false;
TransportForMediaChannels transport_;
};
} // namespace cricket
#endif // MEDIA_BASE_MEDIA_CHANNEL_IMPL_H_

View file

@ -0,0 +1,98 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_BASE_MEDIA_CONFIG_H_
#define MEDIA_BASE_MEDIA_CONFIG_H_
namespace cricket {
// Construction-time settings, passed on when creating
// MediaChannels.
struct MediaConfig {
// Set DSCP value on packets. This flag comes from the
// PeerConnection constraint 'googDscp'.
// TODO(https://crbug.com/1315574): Remove the ability to set it in Chromium
// and delete this flag.
bool enable_dscp = true;
// Video-specific config.
struct Video {
// Enable WebRTC CPU Overuse Detection. This flag comes from the
// PeerConnection constraint 'googCpuOveruseDetection'.
// TODO(https://crbug.com/1315569): Remove the ability to set it in Chromium
// and delete this flag.
bool enable_cpu_adaptation = true;
// Enable WebRTC suspension of video. No video frames will be sent
// when the bitrate is below the configured minimum bitrate. This
// flag comes from the PeerConnection constraint
// 'googSuspendBelowMinBitrate', and WebRtcVideoChannel copies it
// to VideoSendStream::Config::suspend_below_min_bitrate.
// TODO(https://crbug.com/1315564): Remove the ability to set it in Chromium
// and delete this flag.
bool suspend_below_min_bitrate = false;
// Enable buffering and playout timing smoothing of decoded frames.
// If set to true, then WebRTC will buffer and potentially drop decoded
// frames in order to keep a smooth rendering.
// If set to false, then WebRTC will hand over the frame from the decoder
// to the renderer as soon as possible, meaning that the renderer is
// responsible for smooth rendering.
// Note that even if this flag is set to false, dropping of frames can
// still happen pre-decode, e.g., dropping of higher temporal layers.
// This flag comes from the PeerConnection RtcConfiguration.
bool enable_prerenderer_smoothing = true;
// Enables periodic bandwidth probing in application-limited region.
bool periodic_alr_bandwidth_probing = false;
// Enables the new method to estimate the cpu load from encoding, used for
// cpu adaptation. This flag is intended to be controlled primarily by a
// Chrome origin-trial.
// TODO(bugs.webrtc.org/8504): If all goes well, the flag will be removed
// together with the old method of estimation.
bool experiment_cpu_load_estimator = false;
// Time interval between RTCP report for video
int rtcp_report_interval_ms = 1000;
// Enables send packet batching from the egress RTP sender.
bool enable_send_packet_batching = false;
} video;
// Audio-specific config.
struct Audio {
// Time interval between RTCP report for audio
int rtcp_report_interval_ms = 5000;
} audio;
bool operator==(const MediaConfig& o) const {
return enable_dscp == o.enable_dscp &&
video.enable_cpu_adaptation == o.video.enable_cpu_adaptation &&
video.suspend_below_min_bitrate ==
o.video.suspend_below_min_bitrate &&
video.enable_prerenderer_smoothing ==
o.video.enable_prerenderer_smoothing &&
video.periodic_alr_bandwidth_probing ==
o.video.periodic_alr_bandwidth_probing &&
video.experiment_cpu_load_estimator ==
o.video.experiment_cpu_load_estimator &&
video.rtcp_report_interval_ms == o.video.rtcp_report_interval_ms &&
video.enable_send_packet_batching ==
o.video.enable_send_packet_batching &&
audio.rtcp_report_interval_ms == o.audio.rtcp_report_interval_ms;
}
bool operator!=(const MediaConfig& o) const { return !(*this == o); }
};
} // namespace cricket
#endif // MEDIA_BASE_MEDIA_CONFIG_H_

View file

@ -0,0 +1,149 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/base/media_constants.h"
namespace cricket {
const int kVideoCodecClockrate = 90000;
const int kVideoMtu = 1200;
const int kVideoRtpSendBufferSize = 262144;
const int kVideoRtpRecvBufferSize = 262144;
const float kHighSystemCpuThreshold = 0.85f;
const float kLowSystemCpuThreshold = 0.65f;
const float kProcessCpuThreshold = 0.10f;
const char kRedCodecName[] = "red";
const char kUlpfecCodecName[] = "ulpfec";
const char kMultiplexCodecName[] = "multiplex";
// TODO(brandtr): Change this to 'flexfec' when we are confident that the
// header format is not changing anymore.
const char kFlexfecCodecName[] = "flexfec-03";
// draft-ietf-payload-flexible-fec-scheme-02.txt
const char kFlexfecFmtpRepairWindow[] = "repair-window";
// RFC 4588 RTP Retransmission Payload Format
const char kRtxCodecName[] = "rtx";
const char kCodecParamRtxTime[] = "rtx-time";
const char kCodecParamAssociatedPayloadType[] = "apt";
const char kCodecParamAssociatedCodecName[] = "acn";
// Parameters that do not follow the key-value convention
// are treated as having the empty string as key.
const char kCodecParamNotInNameValueFormat[] = "";
const char kOpusCodecName[] = "opus";
const char kL16CodecName[] = "L16";
const char kG722CodecName[] = "G722";
const char kIlbcCodecName[] = "ILBC";
const char kPcmuCodecName[] = "PCMU";
const char kPcmaCodecName[] = "PCMA";
const char kCnCodecName[] = "CN";
const char kDtmfCodecName[] = "telephone-event";
// draft-spittka-payload-rtp-opus-03.txt
const char kCodecParamPTime[] = "ptime";
const char kCodecParamMaxPTime[] = "maxptime";
const char kCodecParamMinPTime[] = "minptime";
const char kCodecParamSPropStereo[] = "sprop-stereo";
const char kCodecParamStereo[] = "stereo";
const char kCodecParamUseInbandFec[] = "useinbandfec";
const char kCodecParamUseDtx[] = "usedtx";
const char kCodecParamMaxAverageBitrate[] = "maxaveragebitrate";
const char kCodecParamMaxPlaybackRate[] = "maxplaybackrate";
const char kParamValueTrue[] = "1";
const char kParamValueEmpty[] = "";
const int kOpusDefaultMaxPTime = 120;
const int kOpusDefaultPTime = 20;
const int kOpusDefaultMinPTime = 3;
const int kOpusDefaultSPropStereo = 0;
const int kOpusDefaultStereo = 0;
const int kOpusDefaultUseInbandFec = 0;
const int kOpusDefaultUseDtx = 0;
const int kOpusDefaultMaxPlaybackRate = 48000;
const int kPreferredMaxPTime = 120;
const int kPreferredMinPTime = 10;
const int kPreferredSPropStereo = 0;
const int kPreferredStereo = 0;
const int kPreferredUseInbandFec = 0;
const char kPacketizationParamRaw[] = "raw";
const char kRtcpFbParamLntf[] = "goog-lntf";
const char kRtcpFbParamNack[] = "nack";
const char kRtcpFbNackParamPli[] = "pli";
const char kRtcpFbParamRemb[] = "goog-remb";
const char kRtcpFbParamTransportCc[] = "transport-cc";
const char kRtcpFbParamCcm[] = "ccm";
const char kRtcpFbCcmParamFir[] = "fir";
const char kRtcpFbParamRrtr[] = "rrtr";
const char kCodecParamMaxBitrate[] = "x-google-max-bitrate";
const char kCodecParamMinBitrate[] = "x-google-min-bitrate";
const char kCodecParamStartBitrate[] = "x-google-start-bitrate";
const char kCodecParamMaxQuantization[] = "x-google-max-quantization";
const char kComfortNoiseCodecName[] = "CN";
const char kVp8CodecName[] = "VP8";
const char kVp9CodecName[] = "VP9";
const char kAv1CodecName[] = "AV1";
const char kH264CodecName[] = "H264";
const char kH265CodecName[] = "H265";
// RFC 6184 RTP Payload Format for H.264 video
const char kH264FmtpProfileLevelId[] = "profile-level-id";
const char kH264FmtpLevelAsymmetryAllowed[] = "level-asymmetry-allowed";
const char kH264FmtpPacketizationMode[] = "packetization-mode";
const char kH264FmtpSpropParameterSets[] = "sprop-parameter-sets";
const char kH264FmtpSpsPpsIdrInKeyframe[] = "sps-pps-idr-in-keyframe";
const char kH264ProfileLevelConstrainedBaseline[] = "42e01f";
const char kH264ProfileLevelConstrainedHigh[] = "640c1f";
// RFC 7798 RTP Payload Format for H.265 video
const char kH265FmtpProfileSpace[] = "profile-space";
const char kH265FmtpTierFlag[] = "tier-flag";
const char kH265FmtpProfileId[] = "profile-id";
const char kH265FmtpLevelId[] = "level-id";
const char kH265FmtpProfileCompatibilityIndicator[] =
"profile-compatibility-indicator";
const char kH265FmtpInteropConstraints[] = "interop-constraints";
const char kH265FmtpTxMode[] = "tx-mode";
// draft-ietf-payload-vp9
const char kVP9ProfileId[] = "profile-id";
// https://aomediacodec.github.io/av1-rtp-spec/
const char kAv1FmtpProfile[] = "profile";
const char kAv1FmtpLevelIdx[] = "level-idx";
const char kAv1FmtpTier[] = "tier";
const int kDefaultVideoMaxFramerate = 60;
// Max encode quantizer for VP8/9 and AV1 encoders assuming libvpx/libaom API
// range [0, 63]
const int kDefaultVideoMaxQpVpx = 56;
// Max encode quantizer for H264/5 assuming the bitstream range [0, 51].
const int kDefaultVideoMaxQpH26x = 51;
const size_t kConferenceMaxNumSpatialLayers = 3;
const size_t kConferenceMaxNumTemporalLayers = 3;
const size_t kConferenceDefaultNumTemporalLayers = 3;
// RFC 3556 and RFC 3890
const char kApplicationSpecificBandwidth[] = "AS";
const char kTransportSpecificBandwidth[] = "TIAS";
} // namespace cricket

View file

@ -0,0 +1,170 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_BASE_MEDIA_CONSTANTS_H_
#define MEDIA_BASE_MEDIA_CONSTANTS_H_
#include <stddef.h>
#include "rtc_base/system/rtc_export.h"
// This file contains constants related to media.
namespace cricket {
extern const int kVideoCodecClockrate;
extern const int kVideoMtu;
extern const int kVideoRtpSendBufferSize;
extern const int kVideoRtpRecvBufferSize;
// Default CPU thresholds.
extern const float kHighSystemCpuThreshold;
extern const float kLowSystemCpuThreshold;
extern const float kProcessCpuThreshold;
extern const char kRedCodecName[];
extern const char kUlpfecCodecName[];
extern const char kFlexfecCodecName[];
extern const char kMultiplexCodecName[];
extern const char kFlexfecFmtpRepairWindow[];
extern const char kRtxCodecName[];
extern const char kCodecParamRtxTime[];
extern const char kCodecParamAssociatedPayloadType[];
extern const char kCodecParamAssociatedCodecName[];
extern const char kCodecParamNotInNameValueFormat[];
extern const char kOpusCodecName[];
extern const char kL16CodecName[];
extern const char kG722CodecName[];
extern const char kIlbcCodecName[];
extern const char kPcmuCodecName[];
extern const char kPcmaCodecName[];
extern const char kCnCodecName[];
extern const char kDtmfCodecName[];
// Attribute parameters
extern const char kCodecParamPTime[];
extern const char kCodecParamMaxPTime[];
// fmtp parameters
extern const char kCodecParamMinPTime[];
extern const char kCodecParamSPropStereo[];
extern const char kCodecParamStereo[];
extern const char kCodecParamUseInbandFec[];
extern const char kCodecParamUseDtx[];
extern const char kCodecParamMaxAverageBitrate[];
extern const char kCodecParamMaxPlaybackRate[];
extern const char kParamValueTrue[];
// Parameters are stored as parameter/value pairs. For parameters who do not
// have a value, `kParamValueEmpty` should be used as value.
extern const char kParamValueEmpty[];
// opus parameters.
// Default value for maxptime according to
// http://tools.ietf.org/html/draft-spittka-payload-rtp-opus-03
extern const int kOpusDefaultMaxPTime;
extern const int kOpusDefaultPTime;
extern const int kOpusDefaultMinPTime;
extern const int kOpusDefaultSPropStereo;
extern const int kOpusDefaultStereo;
extern const int kOpusDefaultUseInbandFec;
extern const int kOpusDefaultUseDtx;
extern const int kOpusDefaultMaxPlaybackRate;
// Prefered values in this code base. Note that they may differ from the default
// values in http://tools.ietf.org/html/draft-spittka-payload-rtp-opus-03
// Only frames larger or equal to 10 ms are currently supported in this code
// base.
extern const int kPreferredMaxPTime;
extern const int kPreferredMinPTime;
extern const int kPreferredSPropStereo;
extern const int kPreferredStereo;
extern const int kPreferredUseInbandFec;
extern const char kPacketizationParamRaw[];
// rtcp-fb message in its first experimental stages. Documentation pending.
extern const char kRtcpFbParamLntf[];
// rtcp-fb messages according to RFC 4585
extern const char kRtcpFbParamNack[];
extern const char kRtcpFbNackParamPli[];
// rtcp-fb messages according to
// http://tools.ietf.org/html/draft-alvestrand-rmcat-remb-00
extern const char kRtcpFbParamRemb[];
// rtcp-fb messages according to
// https://tools.ietf.org/html/draft-holmer-rmcat-transport-wide-cc-extensions-01
extern const char kRtcpFbParamTransportCc[];
// ccm submessages according to RFC 5104
extern const char kRtcpFbParamCcm[];
extern const char kRtcpFbCcmParamFir[];
// Receiver reference time report
// https://tools.ietf.org/html/rfc3611 section 4.4
extern const char kRtcpFbParamRrtr[];
// Google specific parameters
extern const char kCodecParamMaxBitrate[];
extern const char kCodecParamMinBitrate[];
extern const char kCodecParamStartBitrate[];
extern const char kCodecParamMaxQuantization[];
extern const char kComfortNoiseCodecName[];
RTC_EXPORT extern const char kVp8CodecName[];
RTC_EXPORT extern const char kVp9CodecName[];
RTC_EXPORT extern const char kAv1CodecName[];
RTC_EXPORT extern const char kH264CodecName[];
RTC_EXPORT extern const char kH265CodecName[];
// RFC 6184 RTP Payload Format for H.264 video
RTC_EXPORT extern const char kH264FmtpProfileLevelId[];
RTC_EXPORT extern const char kH264FmtpLevelAsymmetryAllowed[];
RTC_EXPORT extern const char kH264FmtpPacketizationMode[];
extern const char kH264FmtpSpropParameterSets[];
extern const char kH264FmtpSpsPpsIdrInKeyframe[];
extern const char kH264ProfileLevelConstrainedBaseline[];
extern const char kH264ProfileLevelConstrainedHigh[];
// RFC 7798 RTP Payload Format for H.265 video.
// According to RFC 7742, the sprop parameters MUST NOT be included
// in SDP generated by WebRTC, so for H.265 we don't handle them, though
// current H.264 implementation honors them when receiving
// sprop-parameter-sets in SDP.
RTC_EXPORT extern const char kH265FmtpProfileSpace[];
RTC_EXPORT extern const char kH265FmtpTierFlag[];
RTC_EXPORT extern const char kH265FmtpProfileId[];
RTC_EXPORT extern const char kH265FmtpLevelId[];
RTC_EXPORT extern const char kH265FmtpProfileCompatibilityIndicator[];
RTC_EXPORT extern const char kH265FmtpInteropConstraints[];
RTC_EXPORT extern const char kH265FmtpTxMode[];
// draft-ietf-payload-vp9
extern const char kVP9ProfileId[];
// https://aomediacodec.github.io/av1-rtp-spec/
extern const char kAv1FmtpProfile[];
extern const char kAv1FmtpLevelIdx[];
extern const char kAv1FmtpTier[];
extern const int kDefaultVideoMaxFramerate;
extern const int kDefaultVideoMaxQpVpx;
extern const int kDefaultVideoMaxQpH26x;
extern const size_t kConferenceMaxNumSpatialLayers;
extern const size_t kConferenceMaxNumTemporalLayers;
extern const size_t kConferenceDefaultNumTemporalLayers;
extern const char kApplicationSpecificBandwidth[];
extern const char kTransportSpecificBandwidth[];
} // namespace cricket
#endif // MEDIA_BASE_MEDIA_CONSTANTS_H_

View file

@ -0,0 +1,291 @@
/*
* Copyright (c) 2004 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/base/media_engine.h"
#include <stddef.h>
#include <cstdint>
#include <string>
#include <utility>
#include "absl/algorithm/container.h"
#include "api/video/video_bitrate_allocation.h"
#include "rtc_base/checks.h"
#include "rtc_base/string_encode.h"
namespace cricket {
RtpCapabilities::RtpCapabilities() = default;
RtpCapabilities::~RtpCapabilities() = default;
webrtc::RtpParameters CreateRtpParametersWithOneEncoding() {
webrtc::RtpParameters parameters;
webrtc::RtpEncodingParameters encoding;
parameters.encodings.push_back(encoding);
return parameters;
}
webrtc::RtpParameters CreateRtpParametersWithEncodings(StreamParams sp) {
std::vector<uint32_t> primary_ssrcs;
sp.GetPrimarySsrcs(&primary_ssrcs);
size_t encoding_count = primary_ssrcs.size();
std::vector<webrtc::RtpEncodingParameters> encodings(encoding_count);
for (size_t i = 0; i < encodings.size(); ++i) {
encodings[i].ssrc = primary_ssrcs[i];
}
const std::vector<RidDescription>& rids = sp.rids();
RTC_DCHECK(rids.size() == 0 || rids.size() == encoding_count);
for (size_t i = 0; i < rids.size(); ++i) {
encodings[i].rid = rids[i].rid;
}
webrtc::RtpParameters parameters;
parameters.encodings = encodings;
parameters.rtcp.cname = sp.cname;
return parameters;
}
std::vector<webrtc::RtpExtension> GetDefaultEnabledRtpHeaderExtensions(
const RtpHeaderExtensionQueryInterface& query_interface) {
std::vector<webrtc::RtpExtension> extensions;
for (const auto& entry : query_interface.GetRtpHeaderExtensions()) {
if (entry.direction != webrtc::RtpTransceiverDirection::kStopped)
extensions.emplace_back(entry.uri, *entry.preferred_id);
}
return extensions;
}
webrtc::RTCError CheckScalabilityModeValues(
const webrtc::RtpParameters& rtp_parameters,
rtc::ArrayView<cricket::Codec> codec_preferences,
absl::optional<cricket::Codec> send_codec) {
using webrtc::RTCErrorType;
if (codec_preferences.empty()) {
// This is an audio sender or an extra check in the stack where the codec
// list is not available and we can't check the scalability_mode values.
return webrtc::RTCError::OK();
}
for (size_t i = 0; i < rtp_parameters.encodings.size(); ++i) {
if (rtp_parameters.encodings[i].codec) {
bool codecFound = false;
for (const cricket::VideoCodec& codec : codec_preferences) {
if (codec.MatchesRtpCodec(*rtp_parameters.encodings[i].codec)) {
codecFound = true;
send_codec = codec;
break;
}
}
if (!codecFound) {
LOG_AND_RETURN_ERROR(
RTCErrorType::INVALID_MODIFICATION,
"Attempted to use an unsupported codec for layer " +
std::to_string(i));
}
}
if (rtp_parameters.encodings[i].scalability_mode) {
if (!send_codec) {
bool scalabilityModeFound = false;
for (const cricket::VideoCodec& codec : codec_preferences) {
for (const auto& scalability_mode : codec.scalability_modes) {
if (ScalabilityModeToString(scalability_mode) ==
*rtp_parameters.encodings[i].scalability_mode) {
scalabilityModeFound = true;
break;
}
}
if (scalabilityModeFound)
break;
}
if (!scalabilityModeFound) {
LOG_AND_RETURN_ERROR(
RTCErrorType::INVALID_MODIFICATION,
"Attempted to set RtpParameters scalabilityMode "
"to an unsupported value for the current codecs.");
}
} else {
bool scalabilityModeFound = false;
for (const auto& scalability_mode : send_codec->scalability_modes) {
if (ScalabilityModeToString(scalability_mode) ==
*rtp_parameters.encodings[i].scalability_mode) {
scalabilityModeFound = true;
break;
}
}
if (!scalabilityModeFound) {
LOG_AND_RETURN_ERROR(
RTCErrorType::INVALID_MODIFICATION,
"Attempted to set RtpParameters scalabilityMode "
"to an unsupported value for the current codecs.");
}
}
}
}
return webrtc::RTCError::OK();
}
webrtc::RTCError CheckRtpParametersValues(
const webrtc::RtpParameters& rtp_parameters,
rtc::ArrayView<cricket::Codec> codec_preferences,
absl::optional<cricket::Codec> send_codec) {
using webrtc::RTCErrorType;
for (size_t i = 0; i < rtp_parameters.encodings.size(); ++i) {
if (rtp_parameters.encodings[i].bitrate_priority <= 0) {
LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_RANGE,
"Attempted to set RtpParameters bitrate_priority to "
"an invalid number. bitrate_priority must be > 0.");
}
if (rtp_parameters.encodings[i].scale_resolution_down_by &&
*rtp_parameters.encodings[i].scale_resolution_down_by < 1.0) {
LOG_AND_RETURN_ERROR(
RTCErrorType::INVALID_RANGE,
"Attempted to set RtpParameters scale_resolution_down_by to an "
"invalid value. scale_resolution_down_by must be >= 1.0");
}
if (rtp_parameters.encodings[i].max_framerate &&
*rtp_parameters.encodings[i].max_framerate < 0.0) {
LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_RANGE,
"Attempted to set RtpParameters max_framerate to an "
"invalid value. max_framerate must be >= 0.0");
}
if (rtp_parameters.encodings[i].min_bitrate_bps &&
rtp_parameters.encodings[i].max_bitrate_bps) {
if (*rtp_parameters.encodings[i].max_bitrate_bps <
*rtp_parameters.encodings[i].min_bitrate_bps) {
LOG_AND_RETURN_ERROR(webrtc::RTCErrorType::INVALID_RANGE,
"Attempted to set RtpParameters min bitrate "
"larger than max bitrate.");
}
}
if (rtp_parameters.encodings[i].num_temporal_layers) {
if (*rtp_parameters.encodings[i].num_temporal_layers < 1 ||
*rtp_parameters.encodings[i].num_temporal_layers >
webrtc::kMaxTemporalStreams) {
LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_RANGE,
"Attempted to set RtpParameters "
"num_temporal_layers to an invalid number.");
}
}
if (rtp_parameters.encodings[i].requested_resolution &&
rtp_parameters.encodings[i].scale_resolution_down_by) {
LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_RANGE,
"Attempted to set scale_resolution_down_by and "
"requested_resolution simultaniously.");
}
if (i > 0 && rtp_parameters.encodings[i - 1].codec !=
rtp_parameters.encodings[i].codec) {
LOG_AND_RETURN_ERROR(RTCErrorType::UNSUPPORTED_OPERATION,
"Attempted to use different codec values for "
"different encodings.");
}
}
return CheckScalabilityModeValues(rtp_parameters, codec_preferences,
send_codec);
}
webrtc::RTCError CheckRtpParametersInvalidModificationAndValues(
const webrtc::RtpParameters& old_rtp_parameters,
const webrtc::RtpParameters& rtp_parameters) {
return CheckRtpParametersInvalidModificationAndValues(
old_rtp_parameters, rtp_parameters, {}, absl::nullopt);
}
webrtc::RTCError CheckRtpParametersInvalidModificationAndValues(
const webrtc::RtpParameters& old_rtp_parameters,
const webrtc::RtpParameters& rtp_parameters,
rtc::ArrayView<cricket::Codec> codec_preferences,
absl::optional<cricket::Codec> send_codec) {
using webrtc::RTCErrorType;
if (rtp_parameters.encodings.size() != old_rtp_parameters.encodings.size()) {
LOG_AND_RETURN_ERROR(
RTCErrorType::INVALID_MODIFICATION,
"Attempted to set RtpParameters with different encoding count");
}
if (rtp_parameters.rtcp != old_rtp_parameters.rtcp) {
LOG_AND_RETURN_ERROR(
RTCErrorType::INVALID_MODIFICATION,
"Attempted to set RtpParameters with modified RTCP parameters");
}
if (rtp_parameters.header_extensions !=
old_rtp_parameters.header_extensions) {
LOG_AND_RETURN_ERROR(
RTCErrorType::INVALID_MODIFICATION,
"Attempted to set RtpParameters with modified header extensions");
}
if (!absl::c_equal(old_rtp_parameters.encodings, rtp_parameters.encodings,
[](const webrtc::RtpEncodingParameters& encoding1,
const webrtc::RtpEncodingParameters& encoding2) {
return encoding1.rid == encoding2.rid;
})) {
LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_MODIFICATION,
"Attempted to change RID values in the encodings.");
}
if (!absl::c_equal(old_rtp_parameters.encodings, rtp_parameters.encodings,
[](const webrtc::RtpEncodingParameters& encoding1,
const webrtc::RtpEncodingParameters& encoding2) {
return encoding1.ssrc == encoding2.ssrc;
})) {
LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_MODIFICATION,
"Attempted to set RtpParameters with modified SSRC");
}
return CheckRtpParametersValues(rtp_parameters, codec_preferences,
send_codec);
}
CompositeMediaEngine::CompositeMediaEngine(
std::unique_ptr<webrtc::FieldTrialsView> trials,
std::unique_ptr<VoiceEngineInterface> audio_engine,
std::unique_ptr<VideoEngineInterface> video_engine)
: trials_(std::move(trials)),
voice_engine_(std::move(audio_engine)),
video_engine_(std::move(video_engine)) {}
CompositeMediaEngine::CompositeMediaEngine(
std::unique_ptr<VoiceEngineInterface> audio_engine,
std::unique_ptr<VideoEngineInterface> video_engine)
: CompositeMediaEngine(nullptr,
std::move(audio_engine),
std::move(video_engine)) {}
CompositeMediaEngine::~CompositeMediaEngine() = default;
bool CompositeMediaEngine::Init() {
voice().Init();
return true;
}
VoiceEngineInterface& CompositeMediaEngine::voice() {
return *voice_engine_.get();
}
VideoEngineInterface& CompositeMediaEngine::video() {
return *video_engine_.get();
}
const VoiceEngineInterface& CompositeMediaEngine::voice() const {
return *voice_engine_.get();
}
const VideoEngineInterface& CompositeMediaEngine::video() const {
return *video_engine_.get();
}
} // namespace cricket

View file

@ -0,0 +1,239 @@
/*
* Copyright (c) 2004 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_BASE_MEDIA_ENGINE_H_
#define MEDIA_BASE_MEDIA_ENGINE_H_
#include <memory>
#include <string>
#include <vector>
#include "api/audio_codecs/audio_decoder_factory.h"
#include "api/audio_codecs/audio_encoder_factory.h"
#include "api/crypto/crypto_options.h"
#include "api/field_trials_view.h"
#include "api/rtp_parameters.h"
#include "api/video/video_bitrate_allocator_factory.h"
#include "call/audio_state.h"
#include "media/base/codec.h"
#include "media/base/media_channel.h"
#include "media/base/media_channel_impl.h"
#include "media/base/media_config.h"
#include "media/base/video_common.h"
#include "rtc_base/system/file_wrapper.h"
namespace webrtc {
class AudioDeviceModule;
class AudioMixer;
class AudioProcessing;
class Call;
} // namespace webrtc
namespace cricket {
// Checks that the scalability_mode value of each encoding is supported by at
// least one video codec of the list. If the list is empty, no check is done.
webrtc::RTCError CheckScalabilityModeValues(
const webrtc::RtpParameters& new_parameters,
rtc::ArrayView<cricket::Codec> codec_preferences,
absl::optional<cricket::Codec> send_codec);
// Checks the parameters have valid and supported values, and checks parameters
// with CheckScalabilityModeValues().
webrtc::RTCError CheckRtpParametersValues(
const webrtc::RtpParameters& new_parameters,
rtc::ArrayView<cricket::Codec> codec_preferences,
absl::optional<cricket::Codec> send_codec);
// Checks that the immutable values have not changed in new_parameters and
// checks all parameters with CheckRtpParametersValues().
webrtc::RTCError CheckRtpParametersInvalidModificationAndValues(
const webrtc::RtpParameters& old_parameters,
const webrtc::RtpParameters& new_parameters,
rtc::ArrayView<cricket::Codec> codec_preferences,
absl::optional<cricket::Codec> send_codec);
// Checks that the immutable values have not changed in new_parameters and
// checks parameters (except SVC) with CheckRtpParametersValues(). It should
// usually be paired with a call to CheckScalabilityModeValues().
webrtc::RTCError CheckRtpParametersInvalidModificationAndValues(
const webrtc::RtpParameters& old_parameters,
const webrtc::RtpParameters& new_parameters);
struct RtpCapabilities {
RtpCapabilities();
~RtpCapabilities();
std::vector<webrtc::RtpExtension> header_extensions;
};
class RtpHeaderExtensionQueryInterface {
public:
virtual ~RtpHeaderExtensionQueryInterface() = default;
// Returns a vector of RtpHeaderExtensionCapability, whose direction is
// kStopped if the extension is stopped (not used) by default.
virtual std::vector<webrtc::RtpHeaderExtensionCapability>
GetRtpHeaderExtensions() const = 0;
};
class VoiceEngineInterface : public RtpHeaderExtensionQueryInterface {
public:
VoiceEngineInterface() = default;
virtual ~VoiceEngineInterface() = default;
VoiceEngineInterface(const VoiceEngineInterface&) = delete;
VoiceEngineInterface& operator=(const VoiceEngineInterface&) = delete;
// Initialization
// Starts the engine.
virtual void Init() = 0;
// TODO(solenberg): Remove once VoE API refactoring is done.
virtual rtc::scoped_refptr<webrtc::AudioState> GetAudioState() const = 0;
virtual std::unique_ptr<VoiceMediaSendChannelInterface> CreateSendChannel(
webrtc::Call* call,
const MediaConfig& config,
const AudioOptions& options,
const webrtc::CryptoOptions& crypto_options,
webrtc::AudioCodecPairId codec_pair_id) {
// TODO(hta): Make pure virtual when all downstream has updated
RTC_CHECK_NOTREACHED();
return nullptr;
}
virtual std::unique_ptr<VoiceMediaReceiveChannelInterface>
CreateReceiveChannel(webrtc::Call* call,
const MediaConfig& config,
const AudioOptions& options,
const webrtc::CryptoOptions& crypto_options,
webrtc::AudioCodecPairId codec_pair_id) {
// TODO(hta): Make pure virtual when all downstream has updated
RTC_CHECK_NOTREACHED();
return nullptr;
}
virtual const std::vector<AudioCodec>& send_codecs() const = 0;
virtual const std::vector<AudioCodec>& recv_codecs() const = 0;
// Starts AEC dump using existing file, a maximum file size in bytes can be
// specified. Logging is stopped just before the size limit is exceeded.
// If max_size_bytes is set to a value <= 0, no limit will be used.
virtual bool StartAecDump(webrtc::FileWrapper file,
int64_t max_size_bytes) = 0;
// Stops recording AEC dump.
virtual void StopAecDump() = 0;
virtual absl::optional<webrtc::AudioDeviceModule::Stats>
GetAudioDeviceStats() = 0;
};
class VideoEngineInterface : public RtpHeaderExtensionQueryInterface {
public:
VideoEngineInterface() = default;
virtual ~VideoEngineInterface() = default;
VideoEngineInterface(const VideoEngineInterface&) = delete;
VideoEngineInterface& operator=(const VideoEngineInterface&) = delete;
virtual std::unique_ptr<VideoMediaSendChannelInterface> CreateSendChannel(
webrtc::Call* call,
const MediaConfig& config,
const VideoOptions& options,
const webrtc::CryptoOptions& crypto_options,
webrtc::VideoBitrateAllocatorFactory* video_bitrate_allocator_factory) {
// Default implementation, delete when all is updated
RTC_CHECK_NOTREACHED();
return nullptr;
}
virtual std::unique_ptr<VideoMediaReceiveChannelInterface>
CreateReceiveChannel(webrtc::Call* call,
const MediaConfig& config,
const VideoOptions& options,
const webrtc::CryptoOptions& crypto_options) {
// Default implementation, delete when all is updated
RTC_CHECK_NOTREACHED();
return nullptr;
}
// Retrieve list of supported codecs.
virtual std::vector<VideoCodec> send_codecs() const = 0;
virtual std::vector<VideoCodec> recv_codecs() const = 0;
// As above, but if include_rtx is false, don't include RTX codecs.
// TODO(bugs.webrtc.org/13931): Remove default implementation once
// upstream subclasses have converted.
virtual std::vector<VideoCodec> send_codecs(bool include_rtx) const {
RTC_DCHECK(include_rtx);
return send_codecs();
}
virtual std::vector<VideoCodec> recv_codecs(bool include_rtx) const {
RTC_DCHECK(include_rtx);
return recv_codecs();
}
};
// MediaEngineInterface is an abstraction of a media engine which can be
// subclassed to support different media componentry backends.
// It supports voice and video operations in the same class to facilitate
// proper synchronization between both media types.
class MediaEngineInterface {
public:
virtual ~MediaEngineInterface() {}
// Initialization. Needs to be called on the worker thread.
virtual bool Init() = 0;
virtual VoiceEngineInterface& voice() = 0;
virtual VideoEngineInterface& video() = 0;
virtual const VoiceEngineInterface& voice() const = 0;
virtual const VideoEngineInterface& video() const = 0;
};
// CompositeMediaEngine constructs a MediaEngine from separate
// voice and video engine classes.
// Optionally owns a FieldTrialsView trials map.
class CompositeMediaEngine : public MediaEngineInterface {
public:
CompositeMediaEngine(std::unique_ptr<webrtc::FieldTrialsView> trials,
std::unique_ptr<VoiceEngineInterface> audio_engine,
std::unique_ptr<VideoEngineInterface> video_engine);
CompositeMediaEngine(std::unique_ptr<VoiceEngineInterface> audio_engine,
std::unique_ptr<VideoEngineInterface> video_engine);
~CompositeMediaEngine() override;
// Always succeeds.
bool Init() override;
VoiceEngineInterface& voice() override;
VideoEngineInterface& video() override;
const VoiceEngineInterface& voice() const override;
const VideoEngineInterface& video() const override;
private:
const std::unique_ptr<webrtc::FieldTrialsView> trials_;
const std::unique_ptr<VoiceEngineInterface> voice_engine_;
const std::unique_ptr<VideoEngineInterface> video_engine_;
};
webrtc::RtpParameters CreateRtpParametersWithOneEncoding();
webrtc::RtpParameters CreateRtpParametersWithEncodings(StreamParams sp);
// Returns a vector of RTP extensions as visible from RtpSender/Receiver
// GetCapabilities(). The returned vector only shows what will definitely be
// offered by default, i.e. the list of extensions returned from
// GetRtpHeaderExtensions() that are not kStopped.
std::vector<webrtc::RtpExtension> GetDefaultEnabledRtpHeaderExtensions(
const RtpHeaderExtensionQueryInterface& query_interface);
} // namespace cricket
#endif // MEDIA_BASE_MEDIA_ENGINE_H_

View file

@ -0,0 +1,28 @@
/*
* Copyright 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/base/rid_description.h"
namespace cricket {
RidDescription::RidDescription() = default;
RidDescription::RidDescription(const std::string& rid, RidDirection direction)
: rid{rid}, direction{direction} {}
RidDescription::RidDescription(const RidDescription& other) = default;
RidDescription::~RidDescription() = default;
RidDescription& RidDescription::operator=(const RidDescription& other) =
default;
bool RidDescription::operator==(const RidDescription& other) const {
return rid == other.rid && direction == other.direction &&
payload_types == other.payload_types &&
restrictions == other.restrictions;
}
} // namespace cricket

View file

@ -0,0 +1,93 @@
/*
* Copyright 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_BASE_RID_DESCRIPTION_H_
#define MEDIA_BASE_RID_DESCRIPTION_H_
#include <map>
#include <string>
#include <vector>
namespace cricket {
enum class RidDirection { kSend, kReceive };
// Description of a Restriction Id (RID) according to:
// https://tools.ietf.org/html/draft-ietf-mmusic-rid-15
// A Restriction Identifier serves two purposes:
// 1. Uniquely identifies an RTP stream inside an RTP session.
// When combined with MIDs (https://tools.ietf.org/html/rfc5888),
// RIDs uniquely identify an RTP stream within an RTP session.
// The MID will identify the media section and the RID will identify
// the stream within the section.
// RID identifiers must be unique within the media section.
// 2. Allows indicating further restrictions to the stream.
// These restrictions are added according to the direction specified.
// The direction field identifies the direction of the RTP stream packets
// to which the restrictions apply. The direction is independent of the
// transceiver direction and can be one of {send, recv}.
// The following are some examples of these restrictions:
// a. max-width, max-height, max-fps, max-br, ...
// b. further restricting the codec set (from what m= section specified)
//
// Note: Indicating dependencies between streams (using depend) will not be
// supported, since the WG is adopting a different approach to achieve this.
// As of 2018-12-04, the new SVC (Scalable Video Coder) approach is still not
// mature enough to be implemented as part of this work.
// See: https://w3c.github.io/webrtc-svc/ for more details.
struct RidDescription final {
RidDescription();
RidDescription(const std::string& rid, RidDirection direction);
RidDescription(const RidDescription& other);
~RidDescription();
RidDescription& operator=(const RidDescription& other);
// This is currently required for unit tests of StreamParams which contains
// RidDescription objects and checks for equality using operator==.
bool operator==(const RidDescription& other) const;
bool operator!=(const RidDescription& other) const {
return !(*this == other);
}
// The RID identifier that uniquely identifies the stream within the session.
std::string rid;
// Specifies the direction for which the specified restrictions hold.
// This direction is either send or receive and is independent of the
// direction of the transceiver.
// https://tools.ietf.org/html/draft-ietf-mmusic-rid-15#section-4 :
// The "direction" field identifies the direction of the RTP Stream
// packets to which the indicated restrictions are applied. It may be
// either "send" or "recv". Note that these restriction directions are
// expressed independently of any "inactive", "sendonly", "recvonly", or
// "sendrecv" attributes associated with the media section. It is, for
// example, valid to indicate "recv" restrictions on a "sendonly"
// stream; those restrictions would apply if, at a future point in time,
// the stream were changed to "sendrecv" or "recvonly".
RidDirection direction;
// The list of codec payload types for this stream.
// It should be a subset of the payloads supported for the media section.
std::vector<int> payload_types;
// Contains key-value pairs for restrictions.
// The keys are not validated against a known set.
// The meaning to infer for the values depends on each key.
// Examples:
// 1. An entry for max-width will have a value that is interpreted as an int.
// 2. An entry for max-bpp (bits per pixel) will have a float value.
// Interpretation (and validation of value) is left for the implementation.
// I.E. the media engines should validate values for parameters they support.
std::map<std::string, std::string> restrictions;
};
} // namespace cricket
#endif // MEDIA_BASE_RID_DESCRIPTION_H_

View file

@ -0,0 +1,401 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/base/rtp_utils.h"
#include <string.h>
#include <vector>
// PacketTimeUpdateParams is defined in asyncpacketsocket.h.
// TODO(sergeyu): Find more appropriate place for PacketTimeUpdateParams.
#include "media/base/turn_utils.h"
#include "modules/rtp_rtcp/source/rtp_util.h"
#include "rtc_base/async_packet_socket.h"
#include "rtc_base/byte_order.h"
#include "rtc_base/checks.h"
#include "rtc_base/message_digest.h"
namespace cricket {
static const size_t kRtcpPayloadTypeOffset = 1;
static const size_t kRtpExtensionHeaderLen = 4;
static const size_t kAbsSendTimeExtensionLen = 3;
static const size_t kOneByteExtensionHeaderLen = 1;
static const size_t kTwoByteExtensionHeaderLen = 2;
namespace {
// Fake auth tag written by the sender when external authentication is enabled.
// HMAC in packet will be compared against this value before updating packet
// with actual HMAC value.
static const uint8_t kFakeAuthTag[10] = {0xba, 0xdd, 0xba, 0xdd, 0xba,
0xdd, 0xba, 0xdd, 0xba, 0xdd};
void UpdateAbsSendTimeExtensionValue(uint8_t* extension_data,
size_t length,
uint64_t time_us) {
// Absolute send time in RTP streams.
//
// The absolute send time is signaled to the receiver in-band using the
// general mechanism for RTP header extensions [RFC5285]. The payload
// of this extension (the transmitted value) is a 24-bit unsigned integer
// containing the sender's current time in seconds as a fixed point number
// with 18 bits fractional part.
//
// The form of the absolute send time extension block:
//
// 0 1 2 3
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// | ID | len=2 | absolute send time |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
if (length != kAbsSendTimeExtensionLen) {
RTC_DCHECK_NOTREACHED();
return;
}
// Convert microseconds to a 6.18 fixed point value in seconds.
uint32_t send_time = ((time_us << 18) / 1000000) & 0x00FFFFFF;
extension_data[0] = static_cast<uint8_t>(send_time >> 16);
extension_data[1] = static_cast<uint8_t>(send_time >> 8);
extension_data[2] = static_cast<uint8_t>(send_time);
}
// Assumes `length` is actual packet length + tag length. Updates HMAC at end of
// the RTP packet.
void UpdateRtpAuthTag(uint8_t* rtp,
size_t length,
const rtc::PacketTimeUpdateParams& packet_time_params) {
// If there is no key, return.
if (packet_time_params.srtp_auth_key.empty()) {
return;
}
size_t tag_length = packet_time_params.srtp_auth_tag_len;
// ROC (rollover counter) is at the beginning of the auth tag.
const size_t kRocLength = 4;
if (tag_length < kRocLength || tag_length > length) {
RTC_DCHECK_NOTREACHED();
return;
}
uint8_t* auth_tag = rtp + (length - tag_length);
// We should have a fake HMAC value @ auth_tag.
RTC_DCHECK_EQ(0, memcmp(auth_tag, kFakeAuthTag, tag_length));
// Copy ROC after end of rtp packet.
memcpy(auth_tag, &packet_time_params.srtp_packet_index, kRocLength);
// Authentication of a RTP packet will have RTP packet + ROC size.
size_t auth_required_length = length - tag_length + kRocLength;
uint8_t output[64];
size_t result =
rtc::ComputeHmac(rtc::DIGEST_SHA_1, &packet_time_params.srtp_auth_key[0],
packet_time_params.srtp_auth_key.size(), rtp,
auth_required_length, output, sizeof(output));
if (result < tag_length) {
RTC_DCHECK_NOTREACHED();
return;
}
// Copy HMAC from output to packet. This is required as auth tag length
// may not be equal to the actual HMAC length.
memcpy(auth_tag, output, tag_length);
}
bool GetUint8(const void* data, size_t offset, int* value) {
if (!data || !value) {
return false;
}
*value = *(static_cast<const uint8_t*>(data) + offset);
return true;
}
} // namespace
bool GetRtcpType(const void* data, size_t len, int* value) {
if (len < kMinRtcpPacketLen) {
return false;
}
return GetUint8(data, kRtcpPayloadTypeOffset, value);
}
// This method returns SSRC first of RTCP packet, except if packet is SDES.
// TODO(mallinath) - Fully implement RFC 5506. This standard doesn't restrict
// to send non-compound packets only to feedback messages.
bool GetRtcpSsrc(const void* data, size_t len, uint32_t* value) {
// Packet should be at least of 8 bytes, to get SSRC from a RTCP packet.
if (!data || len < kMinRtcpPacketLen + 4 || !value)
return false;
int pl_type;
if (!GetRtcpType(data, len, &pl_type))
return false;
// SDES packet parsing is not supported.
if (pl_type == kRtcpTypeSDES)
return false;
*value = rtc::GetBE32(static_cast<const uint8_t*>(data) + 4);
return true;
}
bool IsValidRtpPayloadType(int payload_type) {
return payload_type >= 0 && payload_type <= 127;
}
bool IsValidRtpPacketSize(RtpPacketType packet_type, size_t size) {
RTC_DCHECK_NE(RtpPacketType::kUnknown, packet_type);
size_t min_packet_length = packet_type == RtpPacketType::kRtcp
? kMinRtcpPacketLen
: kMinRtpPacketLen;
return size >= min_packet_length && size <= kMaxRtpPacketLen;
}
absl::string_view RtpPacketTypeToString(RtpPacketType packet_type) {
switch (packet_type) {
case RtpPacketType::kRtp:
return "RTP";
case RtpPacketType::kRtcp:
return "RTCP";
case RtpPacketType::kUnknown:
return "Unknown";
}
RTC_CHECK_NOTREACHED();
}
RtpPacketType InferRtpPacketType(rtc::ArrayView<const char> packet) {
if (webrtc::IsRtcpPacket(
rtc::reinterpret_array_view<const uint8_t>(packet))) {
return RtpPacketType::kRtcp;
}
if (webrtc::IsRtpPacket(rtc::reinterpret_array_view<const uint8_t>(packet))) {
return RtpPacketType::kRtp;
}
return RtpPacketType::kUnknown;
}
bool ValidateRtpHeader(const uint8_t* rtp,
size_t length,
size_t* header_length) {
if (header_length) {
*header_length = 0;
}
if (length < kMinRtpPacketLen) {
return false;
}
size_t cc_count = rtp[0] & 0x0F;
size_t header_length_without_extension = kMinRtpPacketLen + 4 * cc_count;
if (header_length_without_extension > length) {
return false;
}
// If extension bit is not set, we are done with header processing, as input
// length is verified above.
if (!(rtp[0] & 0x10)) {
if (header_length)
*header_length = header_length_without_extension;
return true;
}
rtp += header_length_without_extension;
if (header_length_without_extension + kRtpExtensionHeaderLen > length) {
return false;
}
// Getting extension profile length.
// Length is in 32 bit words.
uint16_t extension_length_in_32bits = rtc::GetBE16(rtp + 2);
size_t extension_length = extension_length_in_32bits * 4;
size_t rtp_header_length = extension_length +
header_length_without_extension +
kRtpExtensionHeaderLen;
// Verify input length against total header size.
if (rtp_header_length > length) {
return false;
}
if (header_length) {
*header_length = rtp_header_length;
}
return true;
}
// ValidateRtpHeader() must be called before this method to make sure, we have
// a sane rtp packet.
bool UpdateRtpAbsSendTimeExtension(uint8_t* rtp,
size_t length,
int extension_id,
uint64_t time_us) {
// 0 1 2 3
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// |V=2|P|X| CC |M| PT | sequence number |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// | timestamp |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// | synchronization source (SSRC) identifier |
// +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
// | contributing source (CSRC) identifiers |
// | .... |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// Return if extension bit is not set.
if (!(rtp[0] & 0x10)) {
return true;
}
size_t cc_count = rtp[0] & 0x0F;
size_t header_length_without_extension = kMinRtpPacketLen + 4 * cc_count;
rtp += header_length_without_extension;
// Getting extension profile ID and length.
uint16_t profile_id = rtc::GetBE16(rtp);
// Length is in 32 bit words.
uint16_t extension_length_in_32bits = rtc::GetBE16(rtp + 2);
size_t extension_length = extension_length_in_32bits * 4;
rtp += kRtpExtensionHeaderLen; // Moving past extension header.
constexpr uint16_t kOneByteExtensionProfileId = 0xBEDE;
constexpr uint16_t kTwoByteExtensionProfileId = 0x1000;
bool found = false;
if (profile_id == kOneByteExtensionProfileId ||
profile_id == kTwoByteExtensionProfileId) {
// OneByte extension header
// 0
// 0 1 2 3 4 5 6 7
// +-+-+-+-+-+-+-+-+
// | ID |length |
// +-+-+-+-+-+-+-+-+
// 0 1 2 3
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// | 0xBE | 0xDE | length=3 |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// | ID | L=0 | data | ID | L=1 | data...
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// ...data | 0 (pad) | 0 (pad) | ID | L=3 |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// | data |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// TwoByte extension header
// 0
// 0 1 2 3 4 5 6 7
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// | ID | length |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// 0 1 2 3
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// | 0x10 | 0x00 | length=3 |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// | ID | L=1 | data | ID |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// | L=2 | data | 0 (pad) |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// | ID | L=2 | data |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
size_t extension_header_length = profile_id == kOneByteExtensionProfileId
? kOneByteExtensionHeaderLen
: kTwoByteExtensionHeaderLen;
const uint8_t* extension_start = rtp;
const uint8_t* extension_end = extension_start + extension_length;
// rtp + 1 since the minimum size per header extension is two bytes for both
// one- and two-byte header extensions.
while (rtp + 1 < extension_end) {
// See RFC8285 Section 4.2-4.3 for more information about one- and
// two-byte header extensions.
const int id =
profile_id == kOneByteExtensionProfileId ? (*rtp & 0xF0) >> 4 : *rtp;
const size_t length = profile_id == kOneByteExtensionProfileId
? (*rtp & 0x0F) + 1
: *(rtp + 1);
if (rtp + extension_header_length + length > extension_end) {
return false;
}
if (id == extension_id) {
UpdateAbsSendTimeExtensionValue(rtp + extension_header_length, length,
time_us);
found = true;
break;
}
rtp += extension_header_length + length;
// Counting padding bytes.
while ((rtp < extension_end) && (*rtp == 0)) {
++rtp;
}
}
}
return found;
}
bool ApplyPacketOptions(uint8_t* data,
size_t length,
const rtc::PacketTimeUpdateParams& packet_time_params,
uint64_t time_us) {
RTC_DCHECK(data);
RTC_DCHECK(length);
// if there is no valid `rtp_sendtime_extension_id` and `srtp_auth_key` in
// PacketOptions, nothing to be updated in this packet.
if (packet_time_params.rtp_sendtime_extension_id == -1 &&
packet_time_params.srtp_auth_key.empty()) {
return true;
}
// If there is a srtp auth key present then the packet must be an RTP packet.
// RTP packet may have been wrapped in a TURN Channel Data or TURN send
// indication.
size_t rtp_start_pos;
size_t rtp_length;
if (!UnwrapTurnPacket(data, length, &rtp_start_pos, &rtp_length)) {
RTC_DCHECK_NOTREACHED();
return false;
}
// Making sure we have a valid RTP packet at the end.
auto packet = rtc::MakeArrayView(data + rtp_start_pos, rtp_length);
if (!webrtc::IsRtpPacket(packet) ||
!ValidateRtpHeader(data + rtp_start_pos, rtp_length, nullptr)) {
RTC_DCHECK_NOTREACHED();
return false;
}
uint8_t* start = data + rtp_start_pos;
// If packet option has non default value (-1) for sendtime extension id,
// then we should parse the rtp packet to update the timestamp. Otherwise
// just calculate HMAC and update packet with it.
if (packet_time_params.rtp_sendtime_extension_id != -1) {
UpdateRtpAbsSendTimeExtension(start, rtp_length,
packet_time_params.rtp_sendtime_extension_id,
time_us);
}
UpdateRtpAuthTag(start, rtp_length, packet_time_params);
return true;
}
} // namespace cricket

View file

@ -0,0 +1,80 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_BASE_RTP_UTILS_H_
#define MEDIA_BASE_RTP_UTILS_H_
#include "absl/strings/string_view.h"
#include "api/array_view.h"
#include "rtc_base/byte_order.h"
#include "rtc_base/system/rtc_export.h"
namespace rtc {
struct PacketTimeUpdateParams;
} // namespace rtc
namespace cricket {
const size_t kMinRtpPacketLen = 12;
const size_t kMaxRtpPacketLen = 2048;
const size_t kMinRtcpPacketLen = 4;
enum RtcpTypes {
kRtcpTypeSR = 200, // Sender report payload type.
kRtcpTypeRR = 201, // Receiver report payload type.
kRtcpTypeSDES = 202, // SDES payload type.
kRtcpTypeBye = 203, // BYE payload type.
kRtcpTypeApp = 204, // APP payload type.
kRtcpTypeRTPFB = 205, // Transport layer Feedback message payload type.
kRtcpTypePSFB = 206, // Payload-specific Feedback message payload type.
};
enum class RtpPacketType {
kRtp,
kRtcp,
kUnknown,
};
bool GetRtcpType(const void* data, size_t len, int* value);
bool GetRtcpSsrc(const void* data, size_t len, uint32_t* value);
// Checks the packet header to determine if it can be an RTP or RTCP packet.
RtpPacketType InferRtpPacketType(rtc::ArrayView<const char> packet);
// True if |payload type| is 0-127.
bool IsValidRtpPayloadType(int payload_type);
// True if `size` is appropriate for the indicated packet type.
bool IsValidRtpPacketSize(RtpPacketType packet_type, size_t size);
// Returns "RTCP", "RTP" or "Unknown" according to `packet_type`.
absl::string_view RtpPacketTypeToString(RtpPacketType packet_type);
// Verifies that a packet has a valid RTP header.
bool RTC_EXPORT ValidateRtpHeader(const uint8_t* rtp,
size_t length,
size_t* header_length);
// Helper method which updates the absolute send time extension if present.
bool UpdateRtpAbsSendTimeExtension(uint8_t* rtp,
size_t length,
int extension_id,
uint64_t time_us);
// Applies specified `options` to the packet. It updates the absolute send time
// extension header if it is present present then updates HMAC.
bool RTC_EXPORT
ApplyPacketOptions(uint8_t* data,
size_t length,
const rtc::PacketTimeUpdateParams& packet_time_params,
uint64_t time_us);
} // namespace cricket
#endif // MEDIA_BASE_RTP_UTILS_H_

View file

@ -0,0 +1,180 @@
/*
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/base/sdp_video_format_utils.h"
#include <cstring>
#include <map>
#include <utility>
#include "api/video_codecs/h264_profile_level_id.h"
#ifdef RTC_ENABLE_H265
#include "api/video_codecs/h265_profile_tier_level.h"
#endif
#include "rtc_base/checks.h"
#include "rtc_base/string_to_number.h"
namespace webrtc {
namespace {
const char kProfileLevelId[] = "profile-level-id";
const char kH264LevelAsymmetryAllowed[] = "level-asymmetry-allowed";
// Max frame rate for VP8 and VP9 video.
const char kVPxFmtpMaxFrameRate[] = "max-fr";
// Max frame size for VP8 and VP9 video.
const char kVPxFmtpMaxFrameSize[] = "max-fs";
const int kVPxFmtpFrameSizeSubBlockPixels = 256;
#ifdef RTC_ENABLE_H265
constexpr char kH265ProfileId[] = "profile-id";
constexpr char kH265TierFlag[] = "tier-flag";
constexpr char kH265LevelId[] = "level-id";
#endif
bool IsH264LevelAsymmetryAllowed(const CodecParameterMap& params) {
const auto it = params.find(kH264LevelAsymmetryAllowed);
return it != params.end() && strcmp(it->second.c_str(), "1") == 0;
}
// Compare H264 levels and handle the level 1b case.
bool H264LevelIsLess(H264Level a, H264Level b) {
if (a == H264Level::kLevel1_b)
return b != H264Level::kLevel1 && b != H264Level::kLevel1_b;
if (b == H264Level::kLevel1_b)
return a == H264Level::kLevel1;
return a < b;
}
H264Level H264LevelMin(H264Level a, H264Level b) {
return H264LevelIsLess(a, b) ? a : b;
}
absl::optional<int> ParsePositiveNumberFromParams(
const CodecParameterMap& params,
const char* parameter_name) {
const auto max_frame_rate_it = params.find(parameter_name);
if (max_frame_rate_it == params.end())
return absl::nullopt;
const absl::optional<int> i =
rtc::StringToNumber<int>(max_frame_rate_it->second);
if (!i.has_value() || i.value() <= 0)
return absl::nullopt;
return i;
}
#ifdef RTC_ENABLE_H265
// Compares two H265Level and return the smaller.
H265Level H265LevelMin(H265Level a, H265Level b) {
return a <= b ? a : b;
}
// Returns true if none of profile-id/tier-flag/level-id is specified
// explicitly in the param.
bool IsDefaultH265PTL(const CodecParameterMap& params) {
return !params.count(kH265ProfileId) && !params.count(kH265TierFlag) &&
!params.count(kH265LevelId);
}
#endif
} // namespace
#ifdef RTC_ENABLE_H265
// Set level according to https://tools.ietf.org/html/rfc7798#section-7.1
void H265GenerateProfileTierLevelForAnswer(
const CodecParameterMap& local_supported_params,
const CodecParameterMap& remote_offered_params,
CodecParameterMap* answer_params) {
// If local and remote haven't set profile-id/tier-flag/level-id, they
// are both using the default PTL In this case, don't set PTL in answer
// either.
if (IsDefaultH265PTL(local_supported_params) &&
IsDefaultH265PTL(remote_offered_params)) {
return;
}
// Parse profile-tier-level.
const absl::optional<H265ProfileTierLevel> local_profile_tier_level =
ParseSdpForH265ProfileTierLevel(local_supported_params);
const absl::optional<H265ProfileTierLevel> remote_profile_tier_level =
ParseSdpForH265ProfileTierLevel(remote_offered_params);
// Profile and tier for local and remote codec must be valid and equal.
RTC_DCHECK(local_profile_tier_level);
RTC_DCHECK(remote_profile_tier_level);
RTC_DCHECK_EQ(local_profile_tier_level->profile,
remote_profile_tier_level->profile);
RTC_DCHECK_EQ(local_profile_tier_level->tier,
remote_profile_tier_level->tier);
const H265Level answer_level = H265LevelMin(local_profile_tier_level->level,
remote_profile_tier_level->level);
// Level-id in answer is changable as long as the highest level indicated by
// the answer is not higher than that indicated by the offer. See
// https://tools.ietf.org/html/rfc7798#section-7.2.2, sub-clause 2.
(*answer_params)[kH265LevelId] = H265LevelToString(answer_level);
}
#endif
// Set level according to https://tools.ietf.org/html/rfc6184#section-8.2.2.
void H264GenerateProfileLevelIdForAnswer(
const CodecParameterMap& local_supported_params,
const CodecParameterMap& remote_offered_params,
CodecParameterMap* answer_params) {
// If both local and remote haven't set profile-level-id, they are both using
// the default profile. In this case, don't set profile-level-id in answer
// either.
if (!local_supported_params.count(kProfileLevelId) &&
!remote_offered_params.count(kProfileLevelId)) {
return;
}
// Parse profile-level-ids.
const absl::optional<H264ProfileLevelId> local_profile_level_id =
ParseSdpForH264ProfileLevelId(local_supported_params);
const absl::optional<H264ProfileLevelId> remote_profile_level_id =
ParseSdpForH264ProfileLevelId(remote_offered_params);
// The local and remote codec must have valid and equal H264 Profiles.
RTC_DCHECK(local_profile_level_id);
RTC_DCHECK(remote_profile_level_id);
RTC_DCHECK_EQ(local_profile_level_id->profile,
remote_profile_level_id->profile);
// Parse level information.
const bool level_asymmetry_allowed =
IsH264LevelAsymmetryAllowed(local_supported_params) &&
IsH264LevelAsymmetryAllowed(remote_offered_params);
const H264Level local_level = local_profile_level_id->level;
const H264Level remote_level = remote_profile_level_id->level;
const H264Level min_level = H264LevelMin(local_level, remote_level);
// Determine answer level. When level asymmetry is not allowed, level upgrade
// is not allowed, i.e., the level in the answer must be equal to or lower
// than the level in the offer.
const H264Level answer_level =
level_asymmetry_allowed ? local_level : min_level;
// Set the resulting profile-level-id in the answer parameters.
(*answer_params)[kProfileLevelId] = *H264ProfileLevelIdToString(
H264ProfileLevelId(local_profile_level_id->profile, answer_level));
}
absl::optional<int> ParseSdpForVPxMaxFrameRate(
const CodecParameterMap& params) {
return ParsePositiveNumberFromParams(params, kVPxFmtpMaxFrameRate);
}
absl::optional<int> ParseSdpForVPxMaxFrameSize(
const CodecParameterMap& params) {
const absl::optional<int> i =
ParsePositiveNumberFromParams(params, kVPxFmtpMaxFrameSize);
return i ? absl::make_optional(i.value() * kVPxFmtpFrameSizeSubBlockPixels)
: absl::nullopt;
}
} // namespace webrtc

View file

@ -0,0 +1,62 @@
/*
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_BASE_SDP_VIDEO_FORMAT_UTILS_H_
#define MEDIA_BASE_SDP_VIDEO_FORMAT_UTILS_H_
#include "absl/types/optional.h"
#include "api/video_codecs/sdp_video_format.h"
namespace webrtc {
// Generate codec parameters that will be used as answer in an SDP negotiation
// based on local supported parameters and remote offered parameters. Both
// `local_supported_params`, `remote_offered_params`, and `answer_params`
// represent sendrecv media descriptions, i.e they are a mix of both encode and
// decode capabilities. In theory, when the profile in `local_supported_params`
// represent a strict superset of the profile in `remote_offered_params`, we
// could limit the profile in `answer_params` to the profile in
// `remote_offered_params`. However, to simplify the code, each supported H264
// profile should be listed explicitly in the list of local supported codecs,
// even if they are redundant. Then each local codec in the list should be
// tested one at a time against the remote codec, and only when the profiles are
// equal should this function be called. Therefore, this function does not need
// to handle profile intersection, and the profile of `local_supported_params`
// and `remote_offered_params` must be equal before calling this function. The
// parameters that are used when negotiating are the level part of
// profile-level-id and level-asymmetry-allowed.
void H264GenerateProfileLevelIdForAnswer(
const CodecParameterMap& local_supported_params,
const CodecParameterMap& remote_offered_params,
CodecParameterMap* answer_params);
#ifdef RTC_ENABLE_H265
// Works similarly as H264GenerateProfileLevelIdForAnswer, but generates codec
// parameters that will be used as answer for H.265.
// Media configuration parameters, except level-id, must be used symmetrically.
// For level-id, the highest level indicated by the answer must not be higher
// than that indicated by the offer.
void H265GenerateProfileTierLevelForAnswer(
const CodecParameterMap& local_supported_params,
const CodecParameterMap& remote_offered_params,
CodecParameterMap* answer_params);
#endif
// Parse max frame rate from SDP FMTP line. absl::nullopt is returned if the
// field is missing or not a number.
absl::optional<int> ParseSdpForVPxMaxFrameRate(const CodecParameterMap& params);
// Parse max frame size from SDP FMTP line. absl::nullopt is returned if the
// field is missing or not a number. Please note that the value is stored in sub
// blocks but the returned value is in total number of pixels.
absl::optional<int> ParseSdpForVPxMaxFrameSize(const CodecParameterMap& params);
} // namespace webrtc
#endif // MEDIA_BASE_SDP_VIDEO_FORMAT_UTILS_H_

View file

@ -0,0 +1,240 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/base/stream_params.h"
#include <stdint.h>
#include <list>
#include "absl/algorithm/container.h"
#include "api/array_view.h"
#include "rtc_base/strings/string_builder.h"
namespace cricket {
namespace {
void AppendSsrcs(rtc::ArrayView<const uint32_t> ssrcs,
rtc::SimpleStringBuilder* sb) {
*sb << "ssrcs:[";
const char* delimiter = "";
for (uint32_t ssrc : ssrcs) {
*sb << delimiter << ssrc;
delimiter = ",";
}
*sb << "]";
}
void AppendSsrcGroups(rtc::ArrayView<const SsrcGroup> ssrc_groups,
rtc::SimpleStringBuilder* sb) {
*sb << "ssrc_groups:";
const char* delimiter = "";
for (const SsrcGroup& ssrc_group : ssrc_groups) {
*sb << delimiter << ssrc_group.ToString();
delimiter = ",";
}
}
void AppendStreamIds(rtc::ArrayView<const std::string> stream_ids,
rtc::SimpleStringBuilder* sb) {
*sb << "stream_ids:";
const char* delimiter = "";
for (const std::string& stream_id : stream_ids) {
*sb << delimiter << stream_id;
delimiter = ",";
}
}
void AppendRids(rtc::ArrayView<const RidDescription> rids,
rtc::SimpleStringBuilder* sb) {
*sb << "rids:[";
const char* delimiter = "";
for (const RidDescription& rid : rids) {
*sb << delimiter << rid.rid;
delimiter = ",";
}
*sb << "]";
}
} // namespace
const char kFecSsrcGroupSemantics[] = "FEC";
const char kFecFrSsrcGroupSemantics[] = "FEC-FR";
const char kFidSsrcGroupSemantics[] = "FID";
const char kSimSsrcGroupSemantics[] = "SIM";
bool GetStream(const StreamParamsVec& streams,
const StreamSelector& selector,
StreamParams* stream_out) {
const StreamParams* found = GetStream(streams, selector);
if (found && stream_out)
*stream_out = *found;
return found != nullptr;
}
SsrcGroup::SsrcGroup(const std::string& usage,
const std::vector<uint32_t>& ssrcs)
: semantics(usage), ssrcs(ssrcs) {}
SsrcGroup::SsrcGroup(const SsrcGroup&) = default;
SsrcGroup::SsrcGroup(SsrcGroup&&) = default;
SsrcGroup::~SsrcGroup() = default;
SsrcGroup& SsrcGroup::operator=(const SsrcGroup&) = default;
SsrcGroup& SsrcGroup::operator=(SsrcGroup&&) = default;
bool SsrcGroup::has_semantics(const std::string& semantics_in) const {
return (semantics == semantics_in && ssrcs.size() > 0);
}
std::string SsrcGroup::ToString() const {
char buf[1024];
rtc::SimpleStringBuilder sb(buf);
sb << "{";
sb << "semantics:" << semantics << ";";
AppendSsrcs(ssrcs, &sb);
sb << "}";
return sb.str();
}
StreamParams::StreamParams() = default;
StreamParams::StreamParams(const StreamParams&) = default;
StreamParams::StreamParams(StreamParams&&) = default;
StreamParams::~StreamParams() = default;
StreamParams& StreamParams::operator=(const StreamParams&) = default;
StreamParams& StreamParams::operator=(StreamParams&&) = default;
bool StreamParams::operator==(const StreamParams& other) const {
return (id == other.id && ssrcs == other.ssrcs &&
ssrc_groups == other.ssrc_groups && cname == other.cname &&
stream_ids_ == other.stream_ids_ &&
// RIDs are not required to be in the same order for equality.
absl::c_is_permutation(rids_, other.rids_));
}
std::string StreamParams::ToString() const {
char buf[2 * 1024];
rtc::SimpleStringBuilder sb(buf);
sb << "{";
if (!id.empty()) {
sb << "id:" << id << ";";
}
AppendSsrcs(ssrcs, &sb);
sb << ";";
AppendSsrcGroups(ssrc_groups, &sb);
sb << ";";
if (!cname.empty()) {
sb << "cname:" << cname << ";";
}
AppendStreamIds(stream_ids_, &sb);
sb << ";";
if (!rids_.empty()) {
AppendRids(rids_, &sb);
sb << ";";
}
sb << "}";
return sb.str();
}
void StreamParams::GenerateSsrcs(int num_layers,
bool generate_fid,
bool generate_fec_fr,
rtc::UniqueRandomIdGenerator* ssrc_generator) {
RTC_DCHECK_GE(num_layers, 0);
RTC_DCHECK(ssrc_generator);
std::vector<uint32_t> primary_ssrcs;
for (int i = 0; i < num_layers; ++i) {
uint32_t ssrc = ssrc_generator->GenerateId();
primary_ssrcs.push_back(ssrc);
add_ssrc(ssrc);
}
if (num_layers > 1) {
SsrcGroup simulcast(kSimSsrcGroupSemantics, primary_ssrcs);
ssrc_groups.push_back(simulcast);
}
if (generate_fid) {
for (uint32_t ssrc : primary_ssrcs) {
AddFidSsrc(ssrc, ssrc_generator->GenerateId());
}
}
if (generate_fec_fr) {
for (uint32_t ssrc : primary_ssrcs) {
AddFecFrSsrc(ssrc, ssrc_generator->GenerateId());
}
}
}
void StreamParams::GetPrimarySsrcs(std::vector<uint32_t>* ssrcs) const {
const SsrcGroup* sim_group = get_ssrc_group(kSimSsrcGroupSemantics);
if (sim_group == NULL) {
ssrcs->push_back(first_ssrc());
} else {
ssrcs->insert(ssrcs->end(), sim_group->ssrcs.begin(),
sim_group->ssrcs.end());
}
}
void StreamParams::GetSecondarySsrcs(
const std::string& semantics,
const std::vector<uint32_t>& primary_ssrcs,
std::vector<uint32_t>* secondary_ssrcs) const {
for (uint32_t primary_ssrc : primary_ssrcs) {
uint32_t secondary_ssrc;
if (GetSecondarySsrc(semantics, primary_ssrc, &secondary_ssrc)) {
secondary_ssrcs->push_back(secondary_ssrc);
}
}
}
void StreamParams::GetFidSsrcs(const std::vector<uint32_t>& primary_ssrcs,
std::vector<uint32_t>* fid_ssrcs) const {
return GetSecondarySsrcs(kFidSsrcGroupSemantics, primary_ssrcs, fid_ssrcs);
}
bool StreamParams::AddSecondarySsrc(const std::string& semantics,
uint32_t primary_ssrc,
uint32_t secondary_ssrc) {
if (!has_ssrc(primary_ssrc)) {
return false;
}
ssrcs.push_back(secondary_ssrc);
ssrc_groups.push_back(SsrcGroup(semantics, {primary_ssrc, secondary_ssrc}));
return true;
}
bool StreamParams::GetSecondarySsrc(const std::string& semantics,
uint32_t primary_ssrc,
uint32_t* secondary_ssrc) const {
for (const SsrcGroup& ssrc_group : ssrc_groups) {
if (ssrc_group.has_semantics(semantics) && ssrc_group.ssrcs.size() >= 2 &&
ssrc_group.ssrcs[0] == primary_ssrc) {
*secondary_ssrc = ssrc_group.ssrcs[1];
return true;
}
}
return false;
}
std::vector<std::string> StreamParams::stream_ids() const {
return stream_ids_;
}
void StreamParams::set_stream_ids(const std::vector<std::string>& stream_ids) {
stream_ids_ = stream_ids;
}
std::string StreamParams::first_stream_id() const {
return stream_ids_.empty() ? "" : stream_ids_[0];
}
} // namespace cricket

View file

@ -0,0 +1,321 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
// This file contains structures for describing SSRCs from a media source such
// as a MediaStreamTrack when it is sent across an RTP session. Multiple media
// sources may be sent across the same RTP session, each of them will be
// described by one StreamParams object
// SsrcGroup is used to describe the relationship between the SSRCs that
// are used for this media source.
// E.x: Consider a source that is sent as 3 simulcast streams
// Let the simulcast elements have SSRC 10, 20, 30.
// Let each simulcast element use FEC and let the protection packets have
// SSRC 11,21,31.
// To describe this 4 SsrcGroups are needed,
// StreamParams would then contain ssrc = {10,11,20,21,30,31} and
// ssrc_groups = {{SIM,{10,20,30}, {FEC,{10,11}, {FEC, {20,21}, {FEC {30,31}}}
// Please see RFC 5576.
// A spec-compliant way to achieve this is to use RIDs and Simulcast attribute
// instead of the ssrc-group. In this method, the StreamParam object will
// have multiple RidDescriptions, each corresponding to a simulcast layer
// and the media section will have a simulcast attribute that indicates
// that these layers are for the same source. This also removes the extra
// lines for redundancy streams, as the same RIDs appear in the redundancy
// packets.
// Note: in the spec compliant simulcast scenario, some of the RIDs might be
// alternatives for one another (such as different encodings for same data).
// In the context of the StreamParams class, the notion of alternatives does
// not exist and all the RIDs will describe different layers of the same source.
// When the StreamParams class is used to configure the media engine, simulcast
// considerations will be used to remove the alternative layers outside of this
// class.
// As an example, let the simulcast layers have RID 10, 20, 30.
// StreamParams would contain rid = { 10, 20, 30 }.
// MediaSection would contain SimulcastDescription specifying these rids.
// a=simulcast:send 10;20;30 (or a=simulcast:send 10,20;30 or similar).
// See https://tools.ietf.org/html/draft-ietf-mmusic-sdp-simulcast-13
// and https://tools.ietf.org/html/draft-ietf-mmusic-rid-15.
#ifndef MEDIA_BASE_STREAM_PARAMS_H_
#define MEDIA_BASE_STREAM_PARAMS_H_
#include <stddef.h>
#include <cstdint>
#include <string>
#include <vector>
#include "absl/algorithm/container.h"
#include "media/base/rid_description.h"
#include "rtc_base/unique_id_generator.h"
namespace cricket {
extern const char kFecSsrcGroupSemantics[];
extern const char kFecFrSsrcGroupSemantics[];
extern const char kFidSsrcGroupSemantics[];
extern const char kSimSsrcGroupSemantics[];
struct SsrcGroup {
SsrcGroup(const std::string& usage, const std::vector<uint32_t>& ssrcs);
SsrcGroup(const SsrcGroup&);
SsrcGroup(SsrcGroup&&);
~SsrcGroup();
SsrcGroup& operator=(const SsrcGroup&);
SsrcGroup& operator=(SsrcGroup&&);
bool operator==(const SsrcGroup& other) const {
return (semantics == other.semantics && ssrcs == other.ssrcs);
}
bool operator!=(const SsrcGroup& other) const { return !(*this == other); }
bool has_semantics(const std::string& semantics) const;
std::string ToString() const;
std::string semantics; // e.g FIX, FEC, SIM.
std::vector<uint32_t> ssrcs; // SSRCs of this type.
};
// StreamParams is used to represent a sender/track in a SessionDescription.
// In Plan B, this means that multiple StreamParams can exist within one
// MediaContentDescription, while in UnifiedPlan this means that there is one
// StreamParams per MediaContentDescription.
struct StreamParams {
StreamParams();
StreamParams(const StreamParams&);
StreamParams(StreamParams&&);
~StreamParams();
StreamParams& operator=(const StreamParams&);
StreamParams& operator=(StreamParams&&);
static StreamParams CreateLegacy(uint32_t ssrc) {
StreamParams stream;
stream.ssrcs.push_back(ssrc);
return stream;
}
bool operator==(const StreamParams& other) const;
bool operator!=(const StreamParams& other) const { return !(*this == other); }
uint32_t first_ssrc() const {
if (ssrcs.empty()) {
return 0;
}
return ssrcs[0];
}
bool has_ssrcs() const { return !ssrcs.empty(); }
bool has_ssrc(uint32_t ssrc) const {
return absl::c_linear_search(ssrcs, ssrc);
}
void add_ssrc(uint32_t ssrc) { ssrcs.push_back(ssrc); }
bool has_ssrc_groups() const { return !ssrc_groups.empty(); }
bool has_ssrc_group(const std::string& semantics) const {
return (get_ssrc_group(semantics) != NULL);
}
const SsrcGroup* get_ssrc_group(const std::string& semantics) const {
for (const SsrcGroup& ssrc_group : ssrc_groups) {
if (ssrc_group.has_semantics(semantics)) {
return &ssrc_group;
}
}
return NULL;
}
// Convenience function to add an FID ssrc for a primary_ssrc
// that's already been added.
bool AddFidSsrc(uint32_t primary_ssrc, uint32_t fid_ssrc) {
return AddSecondarySsrc(kFidSsrcGroupSemantics, primary_ssrc, fid_ssrc);
}
// Convenience function to lookup the FID ssrc for a primary_ssrc.
// Returns false if primary_ssrc not found or FID not defined for it.
bool GetFidSsrc(uint32_t primary_ssrc, uint32_t* fid_ssrc) const {
return GetSecondarySsrc(kFidSsrcGroupSemantics, primary_ssrc, fid_ssrc);
}
// Convenience function to add an FEC-FR ssrc for a primary_ssrc
// that's already been added.
bool AddFecFrSsrc(uint32_t primary_ssrc, uint32_t fecfr_ssrc) {
return AddSecondarySsrc(kFecFrSsrcGroupSemantics, primary_ssrc, fecfr_ssrc);
}
// Convenience function to lookup the FEC-FR ssrc for a primary_ssrc.
// Returns false if primary_ssrc not found or FEC-FR not defined for it.
bool GetFecFrSsrc(uint32_t primary_ssrc, uint32_t* fecfr_ssrc) const {
return GetSecondarySsrc(kFecFrSsrcGroupSemantics, primary_ssrc, fecfr_ssrc);
}
// Convenience function to populate the StreamParams with the requested number
// of SSRCs along with accompanying FID and FEC-FR ssrcs if requested.
// SSRCs are generated using the given generator.
void GenerateSsrcs(int num_layers,
bool generate_fid,
bool generate_fec_fr,
rtc::UniqueRandomIdGenerator* ssrc_generator);
// Convenience to get all the SIM SSRCs if there are SIM ssrcs, or
// the first SSRC otherwise.
void GetPrimarySsrcs(std::vector<uint32_t>* ssrcs) const;
// Convenience to get all the secondary SSRCs for the given primary ssrcs
// of a particular semantic.
// If a given primary SSRC does not have a secondary SSRC, the list of
// secondary SSRCS will be smaller than the list of primary SSRCs.
void GetSecondarySsrcs(const std::string& semantic,
const std::vector<uint32_t>& primary_ssrcs,
std::vector<uint32_t>* fid_ssrcs) const;
// Convenience to get all the FID SSRCs for the given primary ssrcs.
// If a given primary SSRC does not have a FID SSRC, the list of FID
// SSRCS will be smaller than the list of primary SSRCs.
void GetFidSsrcs(const std::vector<uint32_t>& primary_ssrcs,
std::vector<uint32_t>* fid_ssrcs) const;
// Stream ids serialized to SDP.
std::vector<std::string> stream_ids() const;
void set_stream_ids(const std::vector<std::string>& stream_ids);
// Returns the first stream id or "" if none exist. This method exists only
// as temporary backwards compatibility with the old sync_label.
std::string first_stream_id() const;
std::string ToString() const;
// A unique identifier of the StreamParams object. When the SDP is created,
// this comes from the track ID of the sender that the StreamParams object
// is associated with.
std::string id;
// There may be no SSRCs stored in unsignaled case when stream_ids are
// signaled with a=msid lines.
std::vector<uint32_t> ssrcs; // All SSRCs for this source
std::vector<SsrcGroup> ssrc_groups; // e.g. FID, FEC, SIM
std::string cname; // RTCP CNAME
// RID functionality according to
// https://tools.ietf.org/html/draft-ietf-mmusic-rid-15
// Each layer can be represented by a RID identifier and can also have
// restrictions (such as max-width, max-height, etc.)
// If the track has multiple layers (ex. Simulcast), each layer will be
// represented by a RID.
bool has_rids() const { return !rids_.empty(); }
const std::vector<RidDescription>& rids() const { return rids_; }
void set_rids(const std::vector<RidDescription>& rids) { rids_ = rids; }
private:
bool AddSecondarySsrc(const std::string& semantics,
uint32_t primary_ssrc,
uint32_t secondary_ssrc);
bool GetSecondarySsrc(const std::string& semantics,
uint32_t primary_ssrc,
uint32_t* secondary_ssrc) const;
// The stream IDs of the sender that the StreamParams object is associated
// with. In Plan B this should always be size of 1, while in Unified Plan this
// could be none or multiple stream IDs.
std::vector<std::string> stream_ids_;
std::vector<RidDescription> rids_;
};
// A Stream can be selected by either id or ssrc.
struct StreamSelector {
explicit StreamSelector(uint32_t ssrc) : ssrc(ssrc) {}
explicit StreamSelector(const std::string& streamid)
: ssrc(0), streamid(streamid) {}
bool Matches(const StreamParams& stream) const {
if (ssrc == 0) {
return stream.id == streamid;
} else {
return stream.has_ssrc(ssrc);
}
}
uint32_t ssrc;
std::string streamid;
};
typedef std::vector<StreamParams> StreamParamsVec;
template <class Condition>
const StreamParams* GetStream(const StreamParamsVec& streams,
Condition condition) {
auto found = absl::c_find_if(streams, condition);
return found == streams.end() ? nullptr : &(*found);
}
template <class Condition>
StreamParams* GetStream(StreamParamsVec& streams, Condition condition) {
auto found = absl::c_find_if(streams, condition);
return found == streams.end() ? nullptr : &(*found);
}
inline bool HasStreamWithNoSsrcs(const StreamParamsVec& streams) {
return GetStream(streams,
[](const StreamParams& sp) { return !sp.has_ssrcs(); });
}
inline const StreamParams* GetStreamBySsrc(const StreamParamsVec& streams,
uint32_t ssrc) {
return GetStream(
streams, [&ssrc](const StreamParams& sp) { return sp.has_ssrc(ssrc); });
}
inline const StreamParams* GetStreamByIds(const StreamParamsVec& streams,
const std::string& id) {
return GetStream(streams,
[&id](const StreamParams& sp) { return sp.id == id; });
}
inline StreamParams* GetStreamByIds(StreamParamsVec& streams,
const std::string& id) {
return GetStream(streams,
[&id](const StreamParams& sp) { return sp.id == id; });
}
inline const StreamParams* GetStream(const StreamParamsVec& streams,
const StreamSelector& selector) {
return GetStream(streams, [&selector](const StreamParams& sp) {
return selector.Matches(sp);
});
}
template <class Condition>
bool RemoveStream(StreamParamsVec* streams, Condition condition) {
auto iter(std::remove_if(streams->begin(), streams->end(), condition));
if (iter == streams->end())
return false;
streams->erase(iter, streams->end());
return true;
}
// Removes the stream from streams. Returns true if a stream is
// found and removed.
inline bool RemoveStream(StreamParamsVec* streams,
const StreamSelector& selector) {
return RemoveStream(streams, [&selector](const StreamParams& sp) {
return selector.Matches(sp);
});
}
inline bool RemoveStreamBySsrc(StreamParamsVec* streams, uint32_t ssrc) {
return RemoveStream(
streams, [&ssrc](const StreamParams& sp) { return sp.has_ssrc(ssrc); });
}
inline bool RemoveStreamByIds(StreamParamsVec* streams, const std::string& id) {
return RemoveStream(streams,
[&id](const StreamParams& sp) { return sp.id == id; });
}
} // namespace cricket
#endif // MEDIA_BASE_STREAM_PARAMS_H_

View file

@ -0,0 +1,55 @@
/*
* Copyright (c) 2004 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/base/test_utils.h"
#include <cstdint>
#include "api/video/video_frame.h"
#include "api/video/video_source_interface.h"
namespace cricket {
cricket::StreamParams CreateSimStreamParams(
const std::string& cname,
const std::vector<uint32_t>& ssrcs) {
cricket::StreamParams sp;
cricket::SsrcGroup sg(cricket::kSimSsrcGroupSemantics, ssrcs);
sp.ssrcs = ssrcs;
sp.ssrc_groups.push_back(sg);
sp.cname = cname;
return sp;
}
// There should be an rtx_ssrc per ssrc.
cricket::StreamParams CreateSimWithRtxStreamParams(
const std::string& cname,
const std::vector<uint32_t>& ssrcs,
const std::vector<uint32_t>& rtx_ssrcs) {
cricket::StreamParams sp = CreateSimStreamParams(cname, ssrcs);
for (size_t i = 0; i < ssrcs.size(); ++i) {
sp.AddFidSsrc(ssrcs[i], rtx_ssrcs[i]);
}
return sp;
}
// There should be one fec ssrc per ssrc.
cricket::StreamParams CreatePrimaryWithFecFrStreamParams(
const std::string& cname,
uint32_t primary_ssrc,
uint32_t flexfec_ssrc) {
cricket::StreamParams sp;
sp.ssrcs = {primary_ssrc};
sp.cname = cname;
sp.AddFecFrSsrc(primary_ssrc, flexfec_ssrc);
return sp;
}
} // namespace cricket

View file

@ -0,0 +1,56 @@
/*
* Copyright (c) 2004 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_BASE_TEST_UTILS_H_
#define MEDIA_BASE_TEST_UTILS_H_
#include <string>
#include <vector>
#include "media/base/media_channel.h"
#include "media/base/video_common.h"
#include "rtc_base/arraysize.h"
namespace webrtc {
class VideoFrame;
}
namespace cricket {
// Returns size of 420 image with rounding on chroma for odd sizes.
#define I420_SIZE(w, h) (w * h + (((w + 1) / 2) * ((h + 1) / 2)) * 2)
// Returns size of ARGB image.
#define ARGB_SIZE(w, h) (w * h * 4)
template <class T>
inline std::vector<T> MakeVector(const T a[], size_t s) {
return std::vector<T>(a, a + s);
}
#define MAKE_VECTOR(a) cricket::MakeVector(a, arraysize(a))
// Create Simulcast StreamParams with given `ssrcs` and `cname`.
cricket::StreamParams CreateSimStreamParams(const std::string& cname,
const std::vector<uint32_t>& ssrcs);
// Create Simulcast stream with given `ssrcs` and `rtx_ssrcs`.
// The number of `rtx_ssrcs` must match number of `ssrcs`.
cricket::StreamParams CreateSimWithRtxStreamParams(
const std::string& cname,
const std::vector<uint32_t>& ssrcs,
const std::vector<uint32_t>& rtx_ssrcs);
// Create StreamParams with single primary SSRC and corresponding FlexFEC SSRC.
cricket::StreamParams CreatePrimaryWithFecFrStreamParams(
const std::string& cname,
uint32_t primary_ssrc,
uint32_t flexfec_ssrc);
} // namespace cricket
#endif // MEDIA_BASE_TEST_UTILS_H_

View file

@ -0,0 +1,126 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/base/turn_utils.h"
#include "api/transport/stun.h"
#include "rtc_base/byte_order.h"
namespace cricket {
namespace {
const size_t kTurnChannelHeaderLength = 4;
bool IsTurnChannelData(const uint8_t* data, size_t length) {
return length >= kTurnChannelHeaderLength && ((*data & 0xC0) == 0x40);
}
bool IsTurnSendIndicationPacket(const uint8_t* data, size_t length) {
if (length < kStunHeaderSize) {
return false;
}
uint16_t type = rtc::GetBE16(data);
return (type == TURN_SEND_INDICATION);
}
} // namespace
bool UnwrapTurnPacket(const uint8_t* packet,
size_t packet_size,
size_t* content_position,
size_t* content_size) {
if (IsTurnChannelData(packet, packet_size)) {
// Turn Channel Message header format.
// 0 1 2 3
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// | Channel Number | Length |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// | |
// / Application Data /
// / /
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
size_t length = rtc::GetBE16(&packet[2]);
if (length + kTurnChannelHeaderLength > packet_size) {
return false;
}
*content_position = kTurnChannelHeaderLength;
*content_size = length;
return true;
}
if (IsTurnSendIndicationPacket(packet, packet_size)) {
// Validate STUN message length.
const size_t stun_message_length = rtc::GetBE16(&packet[2]);
if (stun_message_length + kStunHeaderSize != packet_size) {
return false;
}
// First skip mandatory stun header which is of 20 bytes.
size_t pos = kStunHeaderSize;
// Loop through STUN attributes until we find STUN DATA attribute.
while (pos < packet_size) {
// Keep reading STUN attributes until we hit DATA attribute.
// Attribute will be a TLV structure.
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// | Type | Length |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// | Value (variable) ....
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// The value in the length field MUST contain the length of the Value
// part of the attribute, prior to padding, measured in bytes. Since
// STUN aligns attributes on 32-bit boundaries, attributes whose content
// is not a multiple of 4 bytes are padded with 1, 2, or 3 bytes of
// padding so that its value contains a multiple of 4 bytes. The
// padding bits are ignored, and may be any value.
uint16_t attr_type, attr_length;
const int kAttrHeaderLength = sizeof(attr_type) + sizeof(attr_length);
if (packet_size < pos + kAttrHeaderLength) {
return false;
}
// Getting attribute type and length.
attr_type = rtc::GetBE16(&packet[pos]);
attr_length = rtc::GetBE16(&packet[pos + sizeof(attr_type)]);
pos += kAttrHeaderLength; // Skip STUN_DATA_ATTR header.
// Checking for bogus attribute length.
if (pos + attr_length > packet_size) {
return false;
}
if (attr_type == STUN_ATTR_DATA) {
*content_position = pos;
*content_size = attr_length;
return true;
}
pos += attr_length;
if ((attr_length % 4) != 0) {
pos += (4 - (attr_length % 4));
}
}
// There is no data attribute present in the message.
return false;
}
// This is not a TURN packet.
*content_position = 0;
*content_size = packet_size;
return true;
}
} // namespace cricket

View file

@ -0,0 +1,30 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_BASE_TURN_UTILS_H_
#define MEDIA_BASE_TURN_UTILS_H_
#include <cstddef>
#include <cstdint>
#include "rtc_base/system/rtc_export.h"
namespace cricket {
// Finds data location within a TURN Channel Message or TURN Send Indication
// message.
bool RTC_EXPORT UnwrapTurnPacket(const uint8_t* packet,
size_t packet_size,
size_t* content_position,
size_t* content_size);
} // namespace cricket
#endif // MEDIA_BASE_TURN_UTILS_H_

View file

@ -0,0 +1,470 @@
/*
* Copyright (c) 2010 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/base/video_adapter.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <cstdlib>
#include <limits>
#include <utility>
#include "absl/types/optional.h"
#include "media/base/video_common.h"
#include "rtc_base/checks.h"
#include "rtc_base/logging.h"
#include "rtc_base/strings/string_builder.h"
#include "rtc_base/time_utils.h"
#include "system_wrappers/include/field_trial.h"
namespace {
struct Fraction {
int numerator;
int denominator;
void DivideByGcd() {
int g = cricket::GreatestCommonDivisor(numerator, denominator);
numerator /= g;
denominator /= g;
}
// Determines number of output pixels if both width and height of an input of
// `input_pixels` pixels is scaled with the fraction numerator / denominator.
int scale_pixel_count(int input_pixels) {
return (numerator * numerator * static_cast<int64_t>(input_pixels)) /
(denominator * denominator);
}
};
// Round `value_to_round` to a multiple of `multiple`. Prefer rounding upwards,
// but never more than `max_value`.
int roundUp(int value_to_round, int multiple, int max_value) {
const int rounded_value =
(value_to_round + multiple - 1) / multiple * multiple;
return rounded_value <= max_value ? rounded_value
: (max_value / multiple * multiple);
}
// Generates a scale factor that makes `input_pixels` close to `target_pixels`,
// but no higher than `max_pixels`.
Fraction FindScale(int input_width,
int input_height,
int target_pixels,
int max_pixels,
bool variable_start_scale_factor) {
// This function only makes sense for a positive target.
RTC_DCHECK_GT(target_pixels, 0);
RTC_DCHECK_GT(max_pixels, 0);
RTC_DCHECK_GE(max_pixels, target_pixels);
const int input_pixels = input_width * input_height;
// Don't scale up original.
if (target_pixels >= input_pixels)
return Fraction{1, 1};
Fraction current_scale = Fraction{1, 1};
Fraction best_scale = Fraction{1, 1};
if (variable_start_scale_factor) {
// Start scaling down by 2/3 depending on `input_width` and `input_height`.
if (input_width % 3 == 0 && input_height % 3 == 0) {
// 2/3 (then alternates 3/4, 2/3, 3/4,...).
current_scale = Fraction{6, 6};
}
if (input_width % 9 == 0 && input_height % 9 == 0) {
// 2/3, 2/3 (then alternates 3/4, 2/3, 3/4,...).
current_scale = Fraction{36, 36};
}
}
// The minimum (absolute) difference between the number of output pixels and
// the target pixel count.
int min_pixel_diff = std::numeric_limits<int>::max();
if (input_pixels <= max_pixels) {
// Start condition for 1/1 case, if it is less than max.
min_pixel_diff = std::abs(input_pixels - target_pixels);
}
// Alternately scale down by 3/4 and 2/3. This results in fractions which are
// effectively scalable. For instance, starting at 1280x720 will result in
// the series (3/4) => 960x540, (1/2) => 640x360, (3/8) => 480x270,
// (1/4) => 320x180, (3/16) => 240x125, (1/8) => 160x90.
while (current_scale.scale_pixel_count(input_pixels) > target_pixels) {
if (current_scale.numerator % 3 == 0 &&
current_scale.denominator % 2 == 0) {
// Multiply by 2/3.
current_scale.numerator /= 3;
current_scale.denominator /= 2;
} else {
// Multiply by 3/4.
current_scale.numerator *= 3;
current_scale.denominator *= 4;
}
int output_pixels = current_scale.scale_pixel_count(input_pixels);
if (output_pixels <= max_pixels) {
int diff = std::abs(target_pixels - output_pixels);
if (diff < min_pixel_diff) {
min_pixel_diff = diff;
best_scale = current_scale;
}
}
}
best_scale.DivideByGcd();
return best_scale;
}
absl::optional<std::pair<int, int>> Swap(
const absl::optional<std::pair<int, int>>& in) {
if (!in) {
return absl::nullopt;
}
return std::make_pair(in->second, in->first);
}
} // namespace
namespace cricket {
VideoAdapter::VideoAdapter(int source_resolution_alignment)
: frames_in_(0),
frames_out_(0),
frames_scaled_(0),
adaption_changes_(0),
previous_width_(0),
previous_height_(0),
variable_start_scale_factor_(!webrtc::field_trial::IsDisabled(
"WebRTC-Video-VariableStartScaleFactor")),
source_resolution_alignment_(source_resolution_alignment),
resolution_alignment_(source_resolution_alignment),
resolution_request_target_pixel_count_(std::numeric_limits<int>::max()),
resolution_request_max_pixel_count_(std::numeric_limits<int>::max()),
max_framerate_request_(std::numeric_limits<int>::max()) {}
VideoAdapter::VideoAdapter() : VideoAdapter(1) {}
VideoAdapter::~VideoAdapter() {}
bool VideoAdapter::DropFrame(int64_t in_timestamp_ns) {
int max_fps = max_framerate_request_;
if (output_format_request_.max_fps)
max_fps = std::min(max_fps, *output_format_request_.max_fps);
framerate_controller_.SetMaxFramerate(max_fps);
return framerate_controller_.ShouldDropFrame(in_timestamp_ns);
}
bool VideoAdapter::AdaptFrameResolution(int in_width,
int in_height,
int64_t in_timestamp_ns,
int* cropped_width,
int* cropped_height,
int* out_width,
int* out_height) {
webrtc::MutexLock lock(&mutex_);
++frames_in_;
// The max output pixel count is the minimum of the requests from
// OnOutputFormatRequest and OnResolutionFramerateRequest.
int max_pixel_count = resolution_request_max_pixel_count_;
// Select target aspect ratio and max pixel count depending on input frame
// orientation.
absl::optional<std::pair<int, int>> target_aspect_ratio;
if (in_width > in_height) {
target_aspect_ratio = output_format_request_.target_landscape_aspect_ratio;
if (output_format_request_.max_landscape_pixel_count)
max_pixel_count = std::min(
max_pixel_count, *output_format_request_.max_landscape_pixel_count);
} else {
target_aspect_ratio = output_format_request_.target_portrait_aspect_ratio;
if (output_format_request_.max_portrait_pixel_count)
max_pixel_count = std::min(
max_pixel_count, *output_format_request_.max_portrait_pixel_count);
}
int target_pixel_count =
std::min(resolution_request_target_pixel_count_, max_pixel_count);
// Drop the input frame if necessary.
if (max_pixel_count <= 0 || DropFrame(in_timestamp_ns)) {
// Show VAdapt log every 90 frames dropped. (3 seconds)
if ((frames_in_ - frames_out_) % 90 == 0) {
// TODO(fbarchard): Reduce to LS_VERBOSE when adapter info is not needed
// in default calls.
RTC_LOG(LS_INFO) << "VAdapt Drop Frame: scaled " << frames_scaled_
<< " / out " << frames_out_ << " / in " << frames_in_
<< " Changes: " << adaption_changes_
<< " Input: " << in_width << "x" << in_height
<< " timestamp: " << in_timestamp_ns
<< " Output fps: " << max_framerate_request_ << "/"
<< output_format_request_.max_fps.value_or(-1)
<< " alignment: " << resolution_alignment_;
}
// Drop frame.
return false;
}
// Calculate how the input should be cropped.
if (!target_aspect_ratio || target_aspect_ratio->first <= 0 ||
target_aspect_ratio->second <= 0) {
*cropped_width = in_width;
*cropped_height = in_height;
} else {
const float requested_aspect =
target_aspect_ratio->first /
static_cast<float>(target_aspect_ratio->second);
*cropped_width =
std::min(in_width, static_cast<int>(in_height * requested_aspect));
*cropped_height =
std::min(in_height, static_cast<int>(in_width / requested_aspect));
}
const Fraction scale =
FindScale(*cropped_width, *cropped_height, target_pixel_count,
max_pixel_count, variable_start_scale_factor_);
// Adjust cropping slightly to get correctly aligned output size and a perfect
// scale factor.
*cropped_width = roundUp(*cropped_width,
scale.denominator * resolution_alignment_, in_width);
*cropped_height = roundUp(
*cropped_height, scale.denominator * resolution_alignment_, in_height);
RTC_DCHECK_EQ(0, *cropped_width % scale.denominator);
RTC_DCHECK_EQ(0, *cropped_height % scale.denominator);
// Calculate final output size.
*out_width = *cropped_width / scale.denominator * scale.numerator;
*out_height = *cropped_height / scale.denominator * scale.numerator;
RTC_DCHECK_EQ(0, *out_width % resolution_alignment_);
RTC_DCHECK_EQ(0, *out_height % resolution_alignment_);
++frames_out_;
if (scale.numerator != scale.denominator)
++frames_scaled_;
if (previous_width_ &&
(previous_width_ != *out_width || previous_height_ != *out_height)) {
++adaption_changes_;
RTC_LOG(LS_INFO) << "Frame size changed: scaled " << frames_scaled_
<< " / out " << frames_out_ << " / in " << frames_in_
<< " Changes: " << adaption_changes_
<< " Input: " << in_width << "x" << in_height
<< " Scale: " << scale.numerator << "/"
<< scale.denominator << " Output: " << *out_width << "x"
<< *out_height << " fps: " << max_framerate_request_ << "/"
<< output_format_request_.max_fps.value_or(-1)
<< " alignment: " << resolution_alignment_;
}
previous_width_ = *out_width;
previous_height_ = *out_height;
return true;
}
void VideoAdapter::OnOutputFormatRequest(
const absl::optional<VideoFormat>& format) {
absl::optional<std::pair<int, int>> target_aspect_ratio;
absl::optional<int> max_pixel_count;
absl::optional<int> max_fps;
if (format) {
target_aspect_ratio = std::make_pair(format->width, format->height);
max_pixel_count = format->width * format->height;
if (format->interval > 0)
max_fps = rtc::kNumNanosecsPerSec / format->interval;
}
OnOutputFormatRequest(target_aspect_ratio, max_pixel_count, max_fps);
}
void VideoAdapter::OnOutputFormatRequest(
const absl::optional<std::pair<int, int>>& target_aspect_ratio,
const absl::optional<int>& max_pixel_count,
const absl::optional<int>& max_fps) {
absl::optional<std::pair<int, int>> target_landscape_aspect_ratio;
absl::optional<std::pair<int, int>> target_portrait_aspect_ratio;
if (target_aspect_ratio && target_aspect_ratio->first > 0 &&
target_aspect_ratio->second > 0) {
// Maintain input orientation.
const int max_side =
std::max(target_aspect_ratio->first, target_aspect_ratio->second);
const int min_side =
std::min(target_aspect_ratio->first, target_aspect_ratio->second);
target_landscape_aspect_ratio = std::make_pair(max_side, min_side);
target_portrait_aspect_ratio = std::make_pair(min_side, max_side);
}
OnOutputFormatRequest(target_landscape_aspect_ratio, max_pixel_count,
target_portrait_aspect_ratio, max_pixel_count, max_fps);
}
void VideoAdapter::OnOutputFormatRequest(
const absl::optional<std::pair<int, int>>& target_landscape_aspect_ratio,
const absl::optional<int>& max_landscape_pixel_count,
const absl::optional<std::pair<int, int>>& target_portrait_aspect_ratio,
const absl::optional<int>& max_portrait_pixel_count,
const absl::optional<int>& max_fps) {
webrtc::MutexLock lock(&mutex_);
OutputFormatRequest request = {
.target_landscape_aspect_ratio = target_landscape_aspect_ratio,
.max_landscape_pixel_count = max_landscape_pixel_count,
.target_portrait_aspect_ratio = target_portrait_aspect_ratio,
.max_portrait_pixel_count = max_portrait_pixel_count,
.max_fps = max_fps};
if (stashed_output_format_request_) {
// Save the output format request for later use in case the encoder making
// this call would become active, because currently all active encoders use
// requested_resolution instead.
stashed_output_format_request_ = request;
RTC_LOG(LS_INFO) << "Stashing OnOutputFormatRequest: "
<< stashed_output_format_request_->ToString();
} else {
output_format_request_ = request;
RTC_LOG(LS_INFO) << "Setting output_format_request_: "
<< output_format_request_.ToString();
}
framerate_controller_.Reset();
}
void VideoAdapter::OnSinkWants(const rtc::VideoSinkWants& sink_wants) {
webrtc::MutexLock lock(&mutex_);
resolution_request_max_pixel_count_ = sink_wants.max_pixel_count;
resolution_request_target_pixel_count_ =
sink_wants.target_pixel_count.value_or(
resolution_request_max_pixel_count_);
max_framerate_request_ = sink_wants.max_framerate_fps;
resolution_alignment_ = cricket::LeastCommonMultiple(
source_resolution_alignment_, sink_wants.resolution_alignment);
if (!sink_wants.aggregates) {
RTC_LOG(LS_WARNING)
<< "These should always be created by VideoBroadcaster!";
return;
}
// If requested_resolution is used, and there are no active encoders
// that are NOT using requested_resolution (aka newapi), then override
// calls to OnOutputFormatRequest and use values from requested_resolution
// instead (combined with qualityscaling based on pixel counts above).
if (webrtc::field_trial::IsDisabled(
"WebRTC-Video-RequestedResolutionOverrideOutputFormatRequest")) {
// kill-switch...
return;
}
if (!sink_wants.requested_resolution) {
if (stashed_output_format_request_) {
// because current active_output_format_request is based on
// requested_resolution logic, while current encoder(s) doesn't want that,
// we have to restore the stashed request.
RTC_LOG(LS_INFO) << "Unstashing OnOutputFormatRequest: "
<< stashed_output_format_request_->ToString();
output_format_request_ = *stashed_output_format_request_;
stashed_output_format_request_.reset();
}
return;
}
if (sink_wants.aggregates->any_active_without_requested_resolution) {
return;
}
if (!stashed_output_format_request_) {
// The active output format request is about to be rewritten by
// request_resolution. We need to save it for later use in case the encoder
// which doesn't use request_resolution logic become active in the future.
stashed_output_format_request_ = output_format_request_;
RTC_LOG(LS_INFO) << "Stashing OnOutputFormatRequest: "
<< stashed_output_format_request_->ToString();
}
auto res = *sink_wants.requested_resolution;
auto pixel_count = res.width * res.height;
output_format_request_.target_landscape_aspect_ratio =
std::make_pair(res.width, res.height);
output_format_request_.max_landscape_pixel_count = pixel_count;
output_format_request_.target_portrait_aspect_ratio =
std::make_pair(res.height, res.width);
output_format_request_.max_portrait_pixel_count = pixel_count;
output_format_request_.max_fps = max_framerate_request_;
RTC_LOG(LS_INFO) << "Setting output_format_request_ based on sink_wants: "
<< output_format_request_.ToString();
}
int VideoAdapter::GetTargetPixels() const {
webrtc::MutexLock lock(&mutex_);
return resolution_request_target_pixel_count_;
}
float VideoAdapter::GetMaxFramerate() const {
webrtc::MutexLock lock(&mutex_);
// Minimum of `output_format_request_.max_fps` and `max_framerate_request_` is
// used to throttle frame-rate.
int framerate =
std::min(max_framerate_request_,
output_format_request_.max_fps.value_or(max_framerate_request_));
if (framerate == std::numeric_limits<int>::max()) {
return std::numeric_limits<float>::infinity();
} else {
return max_framerate_request_;
}
}
std::string VideoAdapter::OutputFormatRequest::ToString() const {
rtc::StringBuilder oss;
oss << "[ ";
if (target_landscape_aspect_ratio == Swap(target_portrait_aspect_ratio) &&
max_landscape_pixel_count == max_portrait_pixel_count) {
if (target_landscape_aspect_ratio) {
oss << target_landscape_aspect_ratio->first << "x"
<< target_landscape_aspect_ratio->second;
} else {
oss << "unset-resolution";
}
if (max_landscape_pixel_count) {
oss << " max_pixel_count: " << *max_landscape_pixel_count;
}
} else {
oss << "[ landscape: ";
if (target_landscape_aspect_ratio) {
oss << target_landscape_aspect_ratio->first << "x"
<< target_landscape_aspect_ratio->second;
} else {
oss << "unset";
}
if (max_landscape_pixel_count) {
oss << " max_pixel_count: " << *max_landscape_pixel_count;
}
oss << " ] [ portrait: ";
if (target_portrait_aspect_ratio) {
oss << target_portrait_aspect_ratio->first << "x"
<< target_portrait_aspect_ratio->second;
}
if (max_portrait_pixel_count) {
oss << " max_pixel_count: " << *max_portrait_pixel_count;
}
oss << " ]";
}
oss << " max_fps: ";
if (max_fps) {
oss << *max_fps;
} else {
oss << "unset";
}
oss << " ]";
return oss.Release();
}
} // namespace cricket

View file

@ -0,0 +1,172 @@
/*
* Copyright (c) 2010 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_BASE_VIDEO_ADAPTER_H_
#define MEDIA_BASE_VIDEO_ADAPTER_H_
#include <stdint.h>
#include <string>
#include <utility>
#include "absl/types/optional.h"
#include "api/video/video_source_interface.h"
#include "common_video/framerate_controller.h"
#include "media/base/video_common.h"
#include "rtc_base/synchronization/mutex.h"
#include "rtc_base/system/rtc_export.h"
#include "rtc_base/thread_annotations.h"
namespace cricket {
// VideoAdapter adapts an input video frame to an output frame based on the
// specified input and output formats. The adaptation includes dropping frames
// to reduce frame rate and scaling frames.
// VideoAdapter is thread safe.
class RTC_EXPORT VideoAdapter {
public:
VideoAdapter();
// The source requests output frames whose width and height are divisible
// by `source_resolution_alignment`.
explicit VideoAdapter(int source_resolution_alignment);
virtual ~VideoAdapter();
VideoAdapter(const VideoAdapter&) = delete;
VideoAdapter& operator=(const VideoAdapter&) = delete;
// Return the adapted resolution and cropping parameters given the
// input resolution. The input frame should first be cropped, then
// scaled to the final output resolution. Returns true if the frame
// should be adapted, and false if it should be dropped.
bool AdaptFrameResolution(int in_width,
int in_height,
int64_t in_timestamp_ns,
int* cropped_width,
int* cropped_height,
int* out_width,
int* out_height) RTC_LOCKS_EXCLUDED(mutex_);
// DEPRECATED. Please use OnOutputFormatRequest below.
// TODO(asapersson): Remove this once it is no longer used.
// Requests the output frame size and frame interval from
// `AdaptFrameResolution` to not be larger than `format`. Also, the input
// frame size will be cropped to match the requested aspect ratio. The
// requested aspect ratio is orientation agnostic and will be adjusted to
// maintain the input orientation, so it doesn't matter if e.g. 1280x720 or
// 720x1280 is requested.
// Note: Should be called from the source only.
void OnOutputFormatRequest(const absl::optional<VideoFormat>& format)
RTC_LOCKS_EXCLUDED(mutex_);
// Requests output frame size and frame interval from `AdaptFrameResolution`.
// `target_aspect_ratio`: The input frame size will be cropped to match the
// requested aspect ratio. The aspect ratio is orientation agnostic and will
// be adjusted to maintain the input orientation (i.e. it doesn't matter if
// e.g. <1280,720> or <720,1280> is requested).
// `max_pixel_count`: The maximum output frame size.
// `max_fps`: The maximum output framerate.
// Note: Should be called from the source only.
void OnOutputFormatRequest(
const absl::optional<std::pair<int, int>>& target_aspect_ratio,
const absl::optional<int>& max_pixel_count,
const absl::optional<int>& max_fps) RTC_LOCKS_EXCLUDED(mutex_);
// Same as above, but allows setting two different target aspect ratios
// depending on incoming frame orientation. This gives more fine-grained
// control and can e.g. be used to force landscape video to be cropped to
// portrait video.
void OnOutputFormatRequest(
const absl::optional<std::pair<int, int>>& target_landscape_aspect_ratio,
const absl::optional<int>& max_landscape_pixel_count,
const absl::optional<std::pair<int, int>>& target_portrait_aspect_ratio,
const absl::optional<int>& max_portrait_pixel_count,
const absl::optional<int>& max_fps) RTC_LOCKS_EXCLUDED(mutex_);
// Requests the output frame size from `AdaptFrameResolution` to have as close
// as possible to `sink_wants.target_pixel_count` pixels (if set)
// but no more than `sink_wants.max_pixel_count`.
// `sink_wants.max_framerate_fps` is essentially analogous to
// `sink_wants.max_pixel_count`, but for framerate rather than resolution.
// Set `sink_wants.max_pixel_count` and/or `sink_wants.max_framerate_fps` to
// std::numeric_limit<int>::max() if no upper limit is desired.
// The sink resolution alignment requirement is given by
// `sink_wants.resolution_alignment`.
// Note: Should be called from the sink only.
void OnSinkWants(const rtc::VideoSinkWants& sink_wants)
RTC_LOCKS_EXCLUDED(mutex_);
// Returns maximum image area, which shouldn't impose any adaptations.
// Can return `numeric_limits<int>::max()` if no limit is set.
int GetTargetPixels() const;
// Returns current frame-rate limit.
// Can return `numeric_limits<float>::infinity()` if no limit is set.
float GetMaxFramerate() const;
private:
// Determine if frame should be dropped based on input fps and requested fps.
bool DropFrame(int64_t in_timestamp_ns) RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
int frames_in_ RTC_GUARDED_BY(mutex_); // Number of input frames.
int frames_out_ RTC_GUARDED_BY(mutex_); // Number of output frames.
int frames_scaled_ RTC_GUARDED_BY(mutex_); // Number of frames scaled.
int adaption_changes_
RTC_GUARDED_BY(mutex_); // Number of changes in scale factor.
int previous_width_ RTC_GUARDED_BY(mutex_); // Previous adapter output width.
int previous_height_
RTC_GUARDED_BY(mutex_); // Previous adapter output height.
const bool variable_start_scale_factor_;
// The fixed source resolution alignment requirement.
const int source_resolution_alignment_;
// The currently applied resolution alignment, as given by the requirements:
// - the fixed `source_resolution_alignment_`; and
// - the latest `sink_wants.resolution_alignment`.
int resolution_alignment_ RTC_GUARDED_BY(mutex_);
// Max number of pixels/fps requested via calls to OnOutputFormatRequest,
// OnResolutionFramerateRequest respectively.
// The adapted output format is the minimum of these.
struct OutputFormatRequest {
absl::optional<std::pair<int, int>> target_landscape_aspect_ratio;
absl::optional<int> max_landscape_pixel_count;
absl::optional<std::pair<int, int>> target_portrait_aspect_ratio;
absl::optional<int> max_portrait_pixel_count;
absl::optional<int> max_fps;
// For logging.
std::string ToString() const;
};
OutputFormatRequest output_format_request_ RTC_GUARDED_BY(mutex_);
int resolution_request_target_pixel_count_ RTC_GUARDED_BY(mutex_);
int resolution_request_max_pixel_count_ RTC_GUARDED_BY(mutex_);
int max_framerate_request_ RTC_GUARDED_BY(mutex_);
// Stashed OutputFormatRequest that is used to save value of
// OnOutputFormatRequest in case all active encoders are using
// requested_resolution. I.e when all active encoders are using
// requested_resolution, the call to OnOutputFormatRequest is ignored
// and the value from requested_resolution is used instead (to scale/crop
// frame). This allows for an application to only use
// RtpEncodingParameters::request_resolution and get the same behavior as if
// it had used VideoAdapter::OnOutputFormatRequest.
absl::optional<OutputFormatRequest> stashed_output_format_request_
RTC_GUARDED_BY(mutex_);
webrtc::FramerateController framerate_controller_ RTC_GUARDED_BY(mutex_);
// The critical section to protect the above variables.
mutable webrtc::Mutex mutex_;
};
} // namespace cricket
#endif // MEDIA_BASE_VIDEO_ADAPTER_H_

View file

@ -0,0 +1,214 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/base/video_broadcaster.h"
#include <algorithm>
#include <vector>
#include "absl/types/optional.h"
#include "api/video/i420_buffer.h"
#include "api/video/video_rotation.h"
#include "media/base/video_common.h"
#include "rtc_base/checks.h"
#include "rtc_base/logging.h"
namespace rtc {
VideoBroadcaster::VideoBroadcaster() = default;
VideoBroadcaster::~VideoBroadcaster() = default;
void VideoBroadcaster::AddOrUpdateSink(
VideoSinkInterface<webrtc::VideoFrame>* sink,
const VideoSinkWants& wants) {
RTC_DCHECK(sink != nullptr);
webrtc::MutexLock lock(&sinks_and_wants_lock_);
if (!FindSinkPair(sink)) {
// `Sink` is a new sink, which didn't receive previous frame.
previous_frame_sent_to_all_sinks_ = false;
if (last_constraints_.has_value()) {
RTC_LOG(LS_INFO) << __func__ << " forwarding stored constraints min_fps "
<< last_constraints_->min_fps.value_or(-1) << " max_fps "
<< last_constraints_->max_fps.value_or(-1);
sink->OnConstraintsChanged(*last_constraints_);
}
}
VideoSourceBase::AddOrUpdateSink(sink, wants);
UpdateWants();
}
void VideoBroadcaster::RemoveSink(
VideoSinkInterface<webrtc::VideoFrame>* sink) {
RTC_DCHECK(sink != nullptr);
webrtc::MutexLock lock(&sinks_and_wants_lock_);
VideoSourceBase::RemoveSink(sink);
UpdateWants();
}
bool VideoBroadcaster::frame_wanted() const {
webrtc::MutexLock lock(&sinks_and_wants_lock_);
return !sink_pairs().empty();
}
VideoSinkWants VideoBroadcaster::wants() const {
webrtc::MutexLock lock(&sinks_and_wants_lock_);
return current_wants_;
}
void VideoBroadcaster::OnFrame(const webrtc::VideoFrame& frame) {
webrtc::MutexLock lock(&sinks_and_wants_lock_);
bool current_frame_was_discarded = false;
for (auto& sink_pair : sink_pairs()) {
if (sink_pair.wants.rotation_applied &&
frame.rotation() != webrtc::kVideoRotation_0) {
// Calls to OnFrame are not synchronized with changes to the sink wants.
// When rotation_applied is set to true, one or a few frames may get here
// with rotation still pending. Protect sinks that don't expect any
// pending rotation.
RTC_LOG(LS_VERBOSE) << "Discarding frame with unexpected rotation.";
sink_pair.sink->OnDiscardedFrame();
current_frame_was_discarded = true;
continue;
}
if (sink_pair.wants.black_frames) {
webrtc::VideoFrame black_frame =
webrtc::VideoFrame::Builder()
.set_video_frame_buffer(
GetBlackFrameBuffer(frame.width(), frame.height()))
.set_rotation(frame.rotation())
.set_timestamp_us(frame.timestamp_us())
.set_id(frame.id())
.build();
sink_pair.sink->OnFrame(black_frame);
} else if (!previous_frame_sent_to_all_sinks_ && frame.has_update_rect()) {
// Since last frame was not sent to some sinks, no reliable update
// information is available, so we need to clear the update rect.
webrtc::VideoFrame copy = frame;
copy.clear_update_rect();
sink_pair.sink->OnFrame(copy);
} else {
sink_pair.sink->OnFrame(frame);
}
}
previous_frame_sent_to_all_sinks_ = !current_frame_was_discarded;
}
void VideoBroadcaster::OnDiscardedFrame() {
webrtc::MutexLock lock(&sinks_and_wants_lock_);
for (auto& sink_pair : sink_pairs()) {
sink_pair.sink->OnDiscardedFrame();
}
}
void VideoBroadcaster::ProcessConstraints(
const webrtc::VideoTrackSourceConstraints& constraints) {
webrtc::MutexLock lock(&sinks_and_wants_lock_);
RTC_LOG(LS_INFO) << __func__ << " min_fps "
<< constraints.min_fps.value_or(-1) << " max_fps "
<< constraints.max_fps.value_or(-1) << " broadcasting to "
<< sink_pairs().size() << " sinks.";
last_constraints_ = constraints;
for (auto& sink_pair : sink_pairs())
sink_pair.sink->OnConstraintsChanged(constraints);
}
void VideoBroadcaster::UpdateWants() {
VideoSinkWants wants;
wants.rotation_applied = false;
wants.resolution_alignment = 1;
wants.aggregates.emplace(VideoSinkWants::Aggregates());
wants.is_active = false;
// TODO(webrtc:14451) : I think it makes sense to always
// "ignore" encoders that are not active. But that would
// probably require a controlled roll out with a field trials?
// To play it safe, only ignore inactive encoders is there is an
// active encoder using the new api (requested_resolution),
// this means that there is only a behavioural change when using new
// api.
bool ignore_inactive_encoders_old_api = false;
for (auto& sink : sink_pairs()) {
if (sink.wants.is_active && sink.wants.requested_resolution.has_value()) {
ignore_inactive_encoders_old_api = true;
break;
}
}
for (auto& sink : sink_pairs()) {
if (!sink.wants.is_active &&
(sink.wants.requested_resolution || ignore_inactive_encoders_old_api)) {
continue;
}
// wants.rotation_applied == ANY(sink.wants.rotation_applied)
if (sink.wants.rotation_applied) {
wants.rotation_applied = true;
}
// wants.max_pixel_count == MIN(sink.wants.max_pixel_count)
if (sink.wants.max_pixel_count < wants.max_pixel_count) {
wants.max_pixel_count = sink.wants.max_pixel_count;
}
// Select the minimum requested target_pixel_count, if any, of all sinks so
// that we don't over utilize the resources for any one.
// TODO(sprang): Consider using the median instead, since the limit can be
// expressed by max_pixel_count.
if (sink.wants.target_pixel_count &&
(!wants.target_pixel_count ||
(*sink.wants.target_pixel_count < *wants.target_pixel_count))) {
wants.target_pixel_count = sink.wants.target_pixel_count;
}
// Select the minimum for the requested max framerates.
if (sink.wants.max_framerate_fps < wants.max_framerate_fps) {
wants.max_framerate_fps = sink.wants.max_framerate_fps;
}
wants.resolution_alignment = cricket::LeastCommonMultiple(
wants.resolution_alignment, sink.wants.resolution_alignment);
// Pick MAX(requested_resolution) since the actual can be downscaled
// in encoder instead.
if (sink.wants.requested_resolution) {
if (!wants.requested_resolution) {
wants.requested_resolution = sink.wants.requested_resolution;
} else {
wants.requested_resolution->width =
std::max(wants.requested_resolution->width,
sink.wants.requested_resolution->width);
wants.requested_resolution->height =
std::max(wants.requested_resolution->height,
sink.wants.requested_resolution->height);
}
} else if (sink.wants.is_active) {
wants.aggregates->any_active_without_requested_resolution = true;
}
wants.is_active |= sink.wants.is_active;
}
if (wants.target_pixel_count &&
*wants.target_pixel_count >= wants.max_pixel_count) {
wants.target_pixel_count.emplace(wants.max_pixel_count);
}
current_wants_ = wants;
}
const rtc::scoped_refptr<webrtc::VideoFrameBuffer>&
VideoBroadcaster::GetBlackFrameBuffer(int width, int height) {
if (!black_frame_buffer_ || black_frame_buffer_->width() != width ||
black_frame_buffer_->height() != height) {
rtc::scoped_refptr<webrtc::I420Buffer> buffer =
webrtc::I420Buffer::Create(width, height);
webrtc::I420Buffer::SetBlack(buffer.get());
black_frame_buffer_ = buffer;
}
return black_frame_buffer_;
}
} // namespace rtc

View file

@ -0,0 +1,82 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_BASE_VIDEO_BROADCASTER_H_
#define MEDIA_BASE_VIDEO_BROADCASTER_H_
#include "api/media_stream_interface.h"
#include "api/scoped_refptr.h"
#include "api/sequence_checker.h"
#include "api/video/video_frame_buffer.h"
#include "api/video/video_source_interface.h"
#include "media/base/video_source_base.h"
#include "rtc_base/synchronization/mutex.h"
#include "rtc_base/thread_annotations.h"
namespace rtc {
// VideoBroadcaster broadcast video frames to sinks and combines VideoSinkWants
// from its sinks. It does that by implementing rtc::VideoSourceInterface and
// rtc::VideoSinkInterface. The class is threadsafe; methods may be called on
// any thread. This is needed because VideoStreamEncoder calls AddOrUpdateSink
// both on the worker thread and on the encoder task queue.
class VideoBroadcaster : public VideoSourceBase,
public VideoSinkInterface<webrtc::VideoFrame> {
public:
VideoBroadcaster();
~VideoBroadcaster() override;
// Adds a new, or updates an already existing sink. If the sink is new and
// ProcessConstraints has been called previously, the new sink's
// OnConstraintsCalled method will be invoked with the most recent
// constraints.
void AddOrUpdateSink(VideoSinkInterface<webrtc::VideoFrame>* sink,
const VideoSinkWants& wants) override;
void RemoveSink(VideoSinkInterface<webrtc::VideoFrame>* sink) override;
// Returns true if the next frame will be delivered to at least one sink.
bool frame_wanted() const;
// Returns VideoSinkWants a source is requested to fulfill. They are
// aggregated by all VideoSinkWants from all sinks.
VideoSinkWants wants() const;
// This method ensures that if a sink sets rotation_applied == true,
// it will never receive a frame with pending rotation. Our caller
// may pass in frames without precise synchronization with changes
// to the VideoSinkWants.
void OnFrame(const webrtc::VideoFrame& frame) override;
void OnDiscardedFrame() override;
// Called on the network thread when constraints change. Forwards the
// constraints to sinks added with AddOrUpdateSink via OnConstraintsChanged.
void ProcessConstraints(
const webrtc::VideoTrackSourceConstraints& constraints);
protected:
void UpdateWants() RTC_EXCLUSIVE_LOCKS_REQUIRED(sinks_and_wants_lock_);
const rtc::scoped_refptr<webrtc::VideoFrameBuffer>& GetBlackFrameBuffer(
int width,
int height) RTC_EXCLUSIVE_LOCKS_REQUIRED(sinks_and_wants_lock_);
mutable webrtc::Mutex sinks_and_wants_lock_;
VideoSinkWants current_wants_ RTC_GUARDED_BY(sinks_and_wants_lock_);
rtc::scoped_refptr<webrtc::VideoFrameBuffer> black_frame_buffer_;
bool previous_frame_sent_to_all_sinks_ RTC_GUARDED_BY(sinks_and_wants_lock_) =
true;
absl::optional<webrtc::VideoTrackSourceConstraints> last_constraints_
RTC_GUARDED_BY(sinks_and_wants_lock_);
};
} // namespace rtc
#endif // MEDIA_BASE_VIDEO_BROADCASTER_H_

View file

@ -0,0 +1,97 @@
/*
* Copyright (c) 2010 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/base/video_common.h"
#include "api/array_view.h"
#include "rtc_base/arraysize.h"
#include "rtc_base/checks.h"
#include "rtc_base/strings/string_builder.h"
namespace cricket {
struct FourCCAliasEntry {
uint32_t alias;
uint32_t canonical;
};
static const FourCCAliasEntry kFourCCAliases[] = {
{FOURCC_IYUV, FOURCC_I420},
{FOURCC_YU16, FOURCC_I422},
{FOURCC_YU24, FOURCC_I444},
{FOURCC_YUYV, FOURCC_YUY2},
{FOURCC_YUVS, FOURCC_YUY2},
{FOURCC_HDYC, FOURCC_UYVY},
{FOURCC_2VUY, FOURCC_UYVY},
{FOURCC_JPEG, FOURCC_MJPG}, // Note: JPEG has DHT while MJPG does not.
{FOURCC_DMB1, FOURCC_MJPG},
{FOURCC_BA81, FOURCC_BGGR},
{FOURCC_RGB3, FOURCC_RAW},
{FOURCC_BGR3, FOURCC_24BG},
{FOURCC_CM32, FOURCC_BGRA},
{FOURCC_CM24, FOURCC_RAW},
};
uint32_t CanonicalFourCC(uint32_t fourcc) {
for (uint32_t i = 0; i < arraysize(kFourCCAliases); ++i) {
if (kFourCCAliases[i].alias == fourcc) {
return kFourCCAliases[i].canonical;
}
}
// Not an alias, so return it as-is.
return fourcc;
}
// The C++ standard requires a namespace-scope definition of static const
// integral types even when they are initialized in the declaration (see
// [class.static.data]/4), but MSVC with /Ze is non-conforming and treats that
// as a multiply defined symbol error. See Also:
// http://msdn.microsoft.com/en-us/library/34h23df8.aspx
#ifndef _MSC_EXTENSIONS
const int64_t VideoFormat::kMinimumInterval; // Initialized in header.
#endif
std::string VideoFormat::ToString() const {
std::string fourcc_name = GetFourccName(fourcc) + " ";
for (std::string::const_iterator i = fourcc_name.begin();
i < fourcc_name.end(); ++i) {
// Test character is printable; Avoid isprint() which asserts on negatives.
if (*i < 32 || *i >= 127) {
fourcc_name = "";
break;
}
}
char buf[256];
rtc::SimpleStringBuilder sb(buf);
sb << fourcc_name << width << "x" << height << "x"
<< IntervalToFpsFloat(interval);
return sb.str();
}
int GreatestCommonDivisor(int a, int b) {
RTC_DCHECK_GE(a, 0);
RTC_DCHECK_GT(b, 0);
int c = a % b;
while (c != 0) {
a = b;
b = c;
c = a % b;
}
return b;
}
int LeastCommonMultiple(int a, int b) {
RTC_DCHECK_GT(a, 0);
RTC_DCHECK_GT(b, 0);
return a * (b / GreatestCommonDivisor(a, b));
}
} // namespace cricket

View file

@ -0,0 +1,224 @@
/*
* Copyright (c) 2004 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
// Common definition for video, including fourcc and VideoFormat.
#ifndef MEDIA_BASE_VIDEO_COMMON_H_
#define MEDIA_BASE_VIDEO_COMMON_H_
#include <stdint.h>
#include <string>
#include "rtc_base/system/rtc_export.h"
#include "rtc_base/time_utils.h"
namespace cricket {
//////////////////////////////////////////////////////////////////////////////
// Definition of FourCC codes
//////////////////////////////////////////////////////////////////////////////
// Convert four characters to a FourCC code.
// Needs to be a macro otherwise the OS X compiler complains when the kFormat*
// constants are used in a switch.
#define CRICKET_FOURCC(a, b, c, d) \
((static_cast<uint32_t>(a)) | (static_cast<uint32_t>(b) << 8) | \
(static_cast<uint32_t>(c) << 16) | (static_cast<uint32_t>(d) << 24))
// Some pages discussing FourCC codes:
// http://www.fourcc.org/yuv.php
// http://v4l2spec.bytesex.org/spec/book1.htm
// http://developer.apple.com/quicktime/icefloe/dispatch020.html
// http://msdn.microsoft.com/library/windows/desktop/dd206750.aspx#nv12
// http://people.xiph.org/~xiphmont/containers/nut/nut4cc.txt
// FourCC codes grouped according to implementation efficiency.
// Primary formats should convert in 1 efficient step.
// Secondary formats are converted in 2 steps.
// Auxilliary formats call primary converters.
enum FourCC {
// 9 Primary YUV formats: 5 planar, 2 biplanar, 2 packed.
FOURCC_I420 = CRICKET_FOURCC('I', '4', '2', '0'),
FOURCC_I422 = CRICKET_FOURCC('I', '4', '2', '2'),
FOURCC_I444 = CRICKET_FOURCC('I', '4', '4', '4'),
FOURCC_I411 = CRICKET_FOURCC('I', '4', '1', '1'),
FOURCC_I400 = CRICKET_FOURCC('I', '4', '0', '0'),
FOURCC_NV21 = CRICKET_FOURCC('N', 'V', '2', '1'),
FOURCC_NV12 = CRICKET_FOURCC('N', 'V', '1', '2'),
FOURCC_YUY2 = CRICKET_FOURCC('Y', 'U', 'Y', '2'),
FOURCC_UYVY = CRICKET_FOURCC('U', 'Y', 'V', 'Y'),
// 2 Secondary YUV formats: row biplanar.
FOURCC_M420 = CRICKET_FOURCC('M', '4', '2', '0'),
// 9 Primary RGB formats: 4 32 bpp, 2 24 bpp, 3 16 bpp.
FOURCC_ARGB = CRICKET_FOURCC('A', 'R', 'G', 'B'),
FOURCC_BGRA = CRICKET_FOURCC('B', 'G', 'R', 'A'),
FOURCC_ABGR = CRICKET_FOURCC('A', 'B', 'G', 'R'),
FOURCC_24BG = CRICKET_FOURCC('2', '4', 'B', 'G'),
FOURCC_RAW = CRICKET_FOURCC('r', 'a', 'w', ' '),
FOURCC_RGBA = CRICKET_FOURCC('R', 'G', 'B', 'A'),
FOURCC_RGBP = CRICKET_FOURCC('R', 'G', 'B', 'P'), // bgr565.
FOURCC_RGBO = CRICKET_FOURCC('R', 'G', 'B', 'O'), // abgr1555.
FOURCC_R444 = CRICKET_FOURCC('R', '4', '4', '4'), // argb4444.
// 4 Secondary RGB formats: 4 Bayer Patterns.
FOURCC_RGGB = CRICKET_FOURCC('R', 'G', 'G', 'B'),
FOURCC_BGGR = CRICKET_FOURCC('B', 'G', 'G', 'R'),
FOURCC_GRBG = CRICKET_FOURCC('G', 'R', 'B', 'G'),
FOURCC_GBRG = CRICKET_FOURCC('G', 'B', 'R', 'G'),
// 1 Primary Compressed YUV format.
FOURCC_MJPG = CRICKET_FOURCC('M', 'J', 'P', 'G'),
// 5 Auxiliary YUV variations: 3 with U and V planes are swapped, 1 Alias.
FOURCC_YV12 = CRICKET_FOURCC('Y', 'V', '1', '2'),
FOURCC_YV16 = CRICKET_FOURCC('Y', 'V', '1', '6'),
FOURCC_YV24 = CRICKET_FOURCC('Y', 'V', '2', '4'),
FOURCC_YU12 = CRICKET_FOURCC('Y', 'U', '1', '2'), // Linux version of I420.
FOURCC_J420 = CRICKET_FOURCC('J', '4', '2', '0'),
FOURCC_J400 = CRICKET_FOURCC('J', '4', '0', '0'),
// 14 Auxiliary aliases. CanonicalFourCC() maps these to canonical FOURCC.
FOURCC_IYUV = CRICKET_FOURCC('I', 'Y', 'U', 'V'), // Alias for I420.
FOURCC_YU16 = CRICKET_FOURCC('Y', 'U', '1', '6'), // Alias for I422.
FOURCC_YU24 = CRICKET_FOURCC('Y', 'U', '2', '4'), // Alias for I444.
FOURCC_YUYV = CRICKET_FOURCC('Y', 'U', 'Y', 'V'), // Alias for YUY2.
FOURCC_YUVS = CRICKET_FOURCC('y', 'u', 'v', 's'), // Alias for YUY2 on Mac.
FOURCC_HDYC = CRICKET_FOURCC('H', 'D', 'Y', 'C'), // Alias for UYVY.
FOURCC_2VUY = CRICKET_FOURCC('2', 'v', 'u', 'y'), // Alias for UYVY on Mac.
FOURCC_JPEG = CRICKET_FOURCC('J', 'P', 'E', 'G'), // Alias for MJPG.
FOURCC_DMB1 = CRICKET_FOURCC('d', 'm', 'b', '1'), // Alias for MJPG on Mac.
FOURCC_BA81 = CRICKET_FOURCC('B', 'A', '8', '1'), // Alias for BGGR.
FOURCC_RGB3 = CRICKET_FOURCC('R', 'G', 'B', '3'), // Alias for RAW.
FOURCC_BGR3 = CRICKET_FOURCC('B', 'G', 'R', '3'), // Alias for 24BG.
FOURCC_CM32 = CRICKET_FOURCC(0, 0, 0, 32), // BGRA kCMPixelFormat_32ARGB
FOURCC_CM24 = CRICKET_FOURCC(0, 0, 0, 24), // RAW kCMPixelFormat_24RGB
// 1 Auxiliary compressed YUV format set aside for capturer.
FOURCC_H264 = CRICKET_FOURCC('H', '2', '6', '4'),
};
#undef CRICKET_FOURCC
// Match any fourcc.
// We move this out of the enum because using it in many places caused
// the compiler to get grumpy, presumably since the above enum is
// backed by an int.
static const uint32_t FOURCC_ANY = 0xFFFFFFFF;
// Converts fourcc aliases into canonical ones.
uint32_t CanonicalFourCC(uint32_t fourcc);
// Get FourCC code as a string.
inline std::string GetFourccName(uint32_t fourcc) {
std::string name;
name.push_back(static_cast<char>(fourcc & 0xFF));
name.push_back(static_cast<char>((fourcc >> 8) & 0xFF));
name.push_back(static_cast<char>((fourcc >> 16) & 0xFF));
name.push_back(static_cast<char>((fourcc >> 24) & 0xFF));
return name;
}
//////////////////////////////////////////////////////////////////////////////
// Definition of VideoFormat.
//////////////////////////////////////////////////////////////////////////////
// VideoFormat with Plain Old Data for global variables.
struct VideoFormatPod {
int width; // Number of pixels.
int height; // Number of pixels.
int64_t interval; // Nanoseconds.
uint32_t fourcc; // Color space. FOURCC_ANY means that any color space is OK.
};
struct RTC_EXPORT VideoFormat : VideoFormatPod {
static const int64_t kMinimumInterval =
rtc::kNumNanosecsPerSec / 10000; // 10k fps.
VideoFormat() { Construct(0, 0, 0, 0); }
VideoFormat(int w, int h, int64_t interval_ns, uint32_t cc) {
Construct(w, h, interval_ns, cc);
}
explicit VideoFormat(const VideoFormatPod& format) {
Construct(format.width, format.height, format.interval, format.fourcc);
}
void Construct(int w, int h, int64_t interval_ns, uint32_t cc) {
width = w;
height = h;
interval = interval_ns;
fourcc = cc;
}
static int64_t FpsToInterval(int fps) {
return fps ? rtc::kNumNanosecsPerSec / fps : kMinimumInterval;
}
static int IntervalToFps(int64_t interval) {
if (!interval) {
return 0;
}
return static_cast<int>(rtc::kNumNanosecsPerSec / interval);
}
static float IntervalToFpsFloat(int64_t interval) {
if (!interval) {
return 0.f;
}
return static_cast<float>(rtc::kNumNanosecsPerSec) /
static_cast<float>(interval);
}
bool operator==(const VideoFormat& format) const {
return width == format.width && height == format.height &&
interval == format.interval && fourcc == format.fourcc;
}
bool operator!=(const VideoFormat& format) const {
return !(*this == format);
}
bool operator<(const VideoFormat& format) const {
return (fourcc < format.fourcc) ||
(fourcc == format.fourcc && width < format.width) ||
(fourcc == format.fourcc && width == format.width &&
height < format.height) ||
(fourcc == format.fourcc && width == format.width &&
height == format.height && interval > format.interval);
}
int framerate() const { return IntervalToFps(interval); }
// Check if both width and height are 0.
bool IsSize0x0() const { return 0 == width && 0 == height; }
// Check if this format is less than another one by comparing the resolution
// and frame rate.
bool IsPixelRateLess(const VideoFormat& format) const {
return width * height * framerate() <
format.width * format.height * format.framerate();
}
// Get a string presentation in the form of "fourcc width x height x fps"
std::string ToString() const;
};
// Returns the largest positive integer that divides both `a` and `b`.
int GreatestCommonDivisor(int a, int b);
// Returns the smallest positive integer that is divisible by both `a` and `b`.
int LeastCommonMultiple(int a, int b);
} // namespace cricket
#endif // MEDIA_BASE_VIDEO_COMMON_H_

View file

@ -0,0 +1,104 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "media/base/video_source_base.h"
#include <algorithm>
#include "absl/algorithm/container.h"
#include "rtc_base/checks.h"
namespace rtc {
VideoSourceBase::VideoSourceBase() = default;
VideoSourceBase::~VideoSourceBase() = default;
void VideoSourceBase::AddOrUpdateSink(
VideoSinkInterface<webrtc::VideoFrame>* sink,
const VideoSinkWants& wants) {
RTC_DCHECK(sink != nullptr);
SinkPair* sink_pair = FindSinkPair(sink);
if (!sink_pair) {
sinks_.push_back(SinkPair(sink, wants));
} else {
sink_pair->wants = wants;
}
}
void VideoSourceBase::RemoveSink(VideoSinkInterface<webrtc::VideoFrame>* sink) {
RTC_DCHECK(sink != nullptr);
RTC_DCHECK(FindSinkPair(sink));
sinks_.erase(std::remove_if(sinks_.begin(), sinks_.end(),
[sink](const SinkPair& sink_pair) {
return sink_pair.sink == sink;
}),
sinks_.end());
}
VideoSourceBase::SinkPair* VideoSourceBase::FindSinkPair(
const VideoSinkInterface<webrtc::VideoFrame>* sink) {
auto sink_pair_it = absl::c_find_if(
sinks_,
[sink](const SinkPair& sink_pair) { return sink_pair.sink == sink; });
if (sink_pair_it != sinks_.end()) {
return &*sink_pair_it;
}
return nullptr;
}
VideoSourceBaseGuarded::VideoSourceBaseGuarded() = default;
VideoSourceBaseGuarded::~VideoSourceBaseGuarded() = default;
void VideoSourceBaseGuarded::AddOrUpdateSink(
VideoSinkInterface<webrtc::VideoFrame>* sink,
const VideoSinkWants& wants) {
RTC_DCHECK_RUN_ON(&source_sequence_);
RTC_DCHECK(sink != nullptr);
SinkPair* sink_pair = FindSinkPair(sink);
if (!sink_pair) {
sinks_.push_back(SinkPair(sink, wants));
} else {
sink_pair->wants = wants;
}
}
void VideoSourceBaseGuarded::RemoveSink(
VideoSinkInterface<webrtc::VideoFrame>* sink) {
RTC_DCHECK_RUN_ON(&source_sequence_);
RTC_DCHECK(sink != nullptr);
RTC_DCHECK(FindSinkPair(sink));
sinks_.erase(std::remove_if(sinks_.begin(), sinks_.end(),
[sink](const SinkPair& sink_pair) {
return sink_pair.sink == sink;
}),
sinks_.end());
}
VideoSourceBaseGuarded::SinkPair* VideoSourceBaseGuarded::FindSinkPair(
const VideoSinkInterface<webrtc::VideoFrame>* sink) {
RTC_DCHECK_RUN_ON(&source_sequence_);
auto sink_pair_it = absl::c_find_if(
sinks_,
[sink](const SinkPair& sink_pair) { return sink_pair.sink == sink; });
if (sink_pair_it != sinks_.end()) {
return &*sink_pair_it;
}
return nullptr;
}
const std::vector<VideoSourceBaseGuarded::SinkPair>&
VideoSourceBaseGuarded::sink_pairs() const {
RTC_DCHECK_RUN_ON(&source_sequence_);
return sinks_;
}
} // namespace rtc

View file

@ -0,0 +1,83 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MEDIA_BASE_VIDEO_SOURCE_BASE_H_
#define MEDIA_BASE_VIDEO_SOURCE_BASE_H_
#include <vector>
#include "api/sequence_checker.h"
#include "api/video/video_frame.h"
#include "api/video/video_sink_interface.h"
#include "api/video/video_source_interface.h"
#include "rtc_base/system/no_unique_address.h"
namespace rtc {
// VideoSourceBase is not thread safe. Before using this class, consider using
// VideoSourceBaseGuarded below instead, which is an identical implementation
// but applies a sequence checker to help protect internal state.
// TODO(bugs.webrtc.org/12780): Delete this class.
class VideoSourceBase : public VideoSourceInterface<webrtc::VideoFrame> {
public:
VideoSourceBase();
~VideoSourceBase() override;
void AddOrUpdateSink(VideoSinkInterface<webrtc::VideoFrame>* sink,
const VideoSinkWants& wants) override;
void RemoveSink(VideoSinkInterface<webrtc::VideoFrame>* sink) override;
protected:
struct SinkPair {
SinkPair(VideoSinkInterface<webrtc::VideoFrame>* sink, VideoSinkWants wants)
: sink(sink), wants(wants) {}
VideoSinkInterface<webrtc::VideoFrame>* sink;
VideoSinkWants wants;
};
SinkPair* FindSinkPair(const VideoSinkInterface<webrtc::VideoFrame>* sink);
const std::vector<SinkPair>& sink_pairs() const { return sinks_; }
private:
std::vector<SinkPair> sinks_;
};
// VideoSourceBaseGuarded assumes that operations related to sinks, occur on the
// same TQ/thread that the object was constructed on.
class VideoSourceBaseGuarded : public VideoSourceInterface<webrtc::VideoFrame> {
public:
VideoSourceBaseGuarded();
~VideoSourceBaseGuarded() override;
void AddOrUpdateSink(VideoSinkInterface<webrtc::VideoFrame>* sink,
const VideoSinkWants& wants) override;
void RemoveSink(VideoSinkInterface<webrtc::VideoFrame>* sink) override;
protected:
struct SinkPair {
SinkPair(VideoSinkInterface<webrtc::VideoFrame>* sink, VideoSinkWants wants)
: sink(sink), wants(wants) {}
VideoSinkInterface<webrtc::VideoFrame>* sink;
VideoSinkWants wants;
};
SinkPair* FindSinkPair(const VideoSinkInterface<webrtc::VideoFrame>* sink);
const std::vector<SinkPair>& sink_pairs() const;
// Keep the `source_sequence_` checker protected to allow sub classes the
// ability to call Detach() if/when appropriate.
RTC_NO_UNIQUE_ADDRESS webrtc::SequenceChecker source_sequence_;
private:
std::vector<SinkPair> sinks_ RTC_GUARDED_BY(&source_sequence_);
};
} // namespace rtc
#endif // MEDIA_BASE_VIDEO_SOURCE_BASE_H_