Repo created

This commit is contained in:
Fr4nz D13trich 2025-11-22 14:04:28 +01:00
parent 81b91f4139
commit f8c34fa5ee
22732 changed files with 4815320 additions and 2 deletions

View file

@ -0,0 +1,8 @@
sprang@webrtc.org
danilchap@webrtc.org
brandtr@webrtc.org
tommi@webrtc.org
mflodman@webrtc.org
stefan@webrtc.org
per-file version.cc=webrtc-version-updater@webrtc-ci.iam.gserviceaccount.com

View file

@ -0,0 +1,3 @@
eshr@webrtc.org
hbos@webrtc.org
ilnik@webrtc.org

View file

@ -0,0 +1,17 @@
/*
* Copyright 2020 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "call/adaptation/adaptation_constraint.h"
namespace webrtc {
AdaptationConstraint::~AdaptationConstraint() {}
} // namespace webrtc

View file

@ -0,0 +1,41 @@
/*
* Copyright 2020 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef CALL_ADAPTATION_ADAPTATION_CONSTRAINT_H_
#define CALL_ADAPTATION_ADAPTATION_CONSTRAINT_H_
#include <string>
#include "api/adaptation/resource.h"
#include "call/adaptation/video_source_restrictions.h"
#include "call/adaptation/video_stream_input_state.h"
namespace webrtc {
// Adaptation constraints have the ability to prevent applying a proposed
// adaptation (expressed as restrictions before/after adaptation).
class AdaptationConstraint {
public:
virtual ~AdaptationConstraint();
virtual std::string Name() const = 0;
// TODO(https://crbug.com/webrtc/11172): When we have multi-stream adaptation
// support, this interface needs to indicate which stream the adaptation
// applies to.
virtual bool IsAdaptationUpAllowed(
const VideoStreamInputState& input_state,
const VideoSourceRestrictions& restrictions_before,
const VideoSourceRestrictions& restrictions_after) const = 0;
};
} // namespace webrtc
#endif // CALL_ADAPTATION_ADAPTATION_CONSTRAINT_H_

View file

@ -0,0 +1,122 @@
/*
* Copyright 2020 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "call/adaptation/broadcast_resource_listener.h"
#include <algorithm>
#include <string>
#include <utility>
#include "absl/strings/string_view.h"
#include "api/make_ref_counted.h"
#include "rtc_base/checks.h"
#include "rtc_base/synchronization/mutex.h"
namespace webrtc {
// The AdapterResource redirects resource usage measurements from its parent to
// a single ResourceListener.
class BroadcastResourceListener::AdapterResource : public Resource {
public:
explicit AdapterResource(absl::string_view name) : name_(std::move(name)) {}
~AdapterResource() override { RTC_DCHECK(!listener_); }
// The parent is letting us know we have a usage neasurement.
void OnResourceUsageStateMeasured(ResourceUsageState usage_state) {
MutexLock lock(&lock_);
if (!listener_)
return;
listener_->OnResourceUsageStateMeasured(rtc::scoped_refptr<Resource>(this),
usage_state);
}
// Resource implementation.
std::string Name() const override { return name_; }
void SetResourceListener(ResourceListener* listener) override {
MutexLock lock(&lock_);
RTC_DCHECK(!listener_ || !listener);
listener_ = listener;
}
private:
const std::string name_;
Mutex lock_;
ResourceListener* listener_ RTC_GUARDED_BY(lock_) = nullptr;
};
BroadcastResourceListener::BroadcastResourceListener(
rtc::scoped_refptr<Resource> source_resource)
: source_resource_(source_resource), is_listening_(false) {
RTC_DCHECK(source_resource_);
}
BroadcastResourceListener::~BroadcastResourceListener() {
RTC_DCHECK(!is_listening_);
}
rtc::scoped_refptr<Resource> BroadcastResourceListener::SourceResource() const {
return source_resource_;
}
void BroadcastResourceListener::StartListening() {
MutexLock lock(&lock_);
RTC_DCHECK(!is_listening_);
source_resource_->SetResourceListener(this);
is_listening_ = true;
}
void BroadcastResourceListener::StopListening() {
MutexLock lock(&lock_);
RTC_DCHECK(is_listening_);
RTC_DCHECK(adapters_.empty());
source_resource_->SetResourceListener(nullptr);
is_listening_ = false;
}
rtc::scoped_refptr<Resource>
BroadcastResourceListener::CreateAdapterResource() {
MutexLock lock(&lock_);
RTC_DCHECK(is_listening_);
rtc::scoped_refptr<AdapterResource> adapter =
rtc::make_ref_counted<AdapterResource>(source_resource_->Name() +
"Adapter");
adapters_.push_back(adapter);
return adapter;
}
void BroadcastResourceListener::RemoveAdapterResource(
rtc::scoped_refptr<Resource> resource) {
MutexLock lock(&lock_);
auto it = std::find(adapters_.begin(), adapters_.end(), resource);
RTC_DCHECK(it != adapters_.end());
adapters_.erase(it);
}
std::vector<rtc::scoped_refptr<Resource>>
BroadcastResourceListener::GetAdapterResources() {
std::vector<rtc::scoped_refptr<Resource>> resources;
MutexLock lock(&lock_);
for (const auto& adapter : adapters_) {
resources.push_back(adapter);
}
return resources;
}
void BroadcastResourceListener::OnResourceUsageStateMeasured(
rtc::scoped_refptr<Resource> resource,
ResourceUsageState usage_state) {
RTC_DCHECK_EQ(resource, source_resource_);
MutexLock lock(&lock_);
for (const auto& adapter : adapters_) {
adapter->OnResourceUsageStateMeasured(usage_state);
}
}
} // namespace webrtc

View file

@ -0,0 +1,75 @@
/*
* Copyright 2020 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef CALL_ADAPTATION_BROADCAST_RESOURCE_LISTENER_H_
#define CALL_ADAPTATION_BROADCAST_RESOURCE_LISTENER_H_
#include <vector>
#include "api/adaptation/resource.h"
#include "api/scoped_refptr.h"
#include "rtc_base/synchronization/mutex.h"
namespace webrtc {
// Responsible for forwarding 1 resource usage measurement to N listeners by
// creating N "adapter" resources.
//
// Example:
// If we have ResourceA, ResourceListenerX and ResourceListenerY we can create a
// BroadcastResourceListener that listens to ResourceA, use CreateAdapter() to
// spawn adapter resources ResourceX and ResourceY and let ResourceListenerX
// listen to ResourceX and ResourceListenerY listen to ResourceY. When ResourceA
// makes a measurement it will be echoed by both ResourceX and ResourceY.
//
// TODO(https://crbug.com/webrtc/11565): When the ResourceAdaptationProcessor is
// moved to call there will only be one ResourceAdaptationProcessor that needs
// to listen to the injected resources. When this is the case, delete this class
// and DCHECK that a Resource's listener is never overwritten.
class BroadcastResourceListener : public ResourceListener {
public:
explicit BroadcastResourceListener(
rtc::scoped_refptr<Resource> source_resource);
~BroadcastResourceListener() override;
rtc::scoped_refptr<Resource> SourceResource() const;
void StartListening();
void StopListening();
// Creates a Resource that redirects any resource usage measurements that
// BroadcastResourceListener receives to its listener.
rtc::scoped_refptr<Resource> CreateAdapterResource();
// Unregister the adapter from the BroadcastResourceListener; it will no
// longer receive resource usage measurement and will no longer be referenced.
// Use this to prevent memory leaks of old adapters.
void RemoveAdapterResource(rtc::scoped_refptr<Resource> resource);
std::vector<rtc::scoped_refptr<Resource>> GetAdapterResources();
// ResourceListener implementation.
void OnResourceUsageStateMeasured(rtc::scoped_refptr<Resource> resource,
ResourceUsageState usage_state) override;
private:
class AdapterResource;
friend class AdapterResource;
const rtc::scoped_refptr<Resource> source_resource_;
Mutex lock_;
bool is_listening_ RTC_GUARDED_BY(lock_);
// The AdapterResource unregisters itself prior to destruction, guaranteeing
// that these pointers are safe to use.
std::vector<rtc::scoped_refptr<AdapterResource>> adapters_
RTC_GUARDED_BY(lock_);
};
} // namespace webrtc
#endif // CALL_ADAPTATION_BROADCAST_RESOURCE_LISTENER_H_

View file

@ -0,0 +1,14 @@
/*
* Copyright 2020 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "call/adaptation/degradation_preference_provider.h"
webrtc::DegradationPreferenceProvider::~DegradationPreferenceProvider() =
default;

View file

@ -0,0 +1,27 @@
/*
* Copyright 2020 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef CALL_ADAPTATION_DEGRADATION_PREFERENCE_PROVIDER_H_
#define CALL_ADAPTATION_DEGRADATION_PREFERENCE_PROVIDER_H_
#include "api/rtp_parameters.h"
namespace webrtc {
class DegradationPreferenceProvider {
public:
virtual ~DegradationPreferenceProvider();
virtual DegradationPreference degradation_preference() const = 0;
};
} // namespace webrtc
#endif // CALL_ADAPTATION_DEGRADATION_PREFERENCE_PROVIDER_H_

View file

@ -0,0 +1,54 @@
/*
* Copyright 2020 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "call/adaptation/encoder_settings.h"
#include <utility>
namespace webrtc {
EncoderSettings::EncoderSettings(VideoEncoder::EncoderInfo encoder_info,
VideoEncoderConfig encoder_config,
VideoCodec video_codec)
: encoder_info_(std::move(encoder_info)),
encoder_config_(std::move(encoder_config)),
video_codec_(std::move(video_codec)) {}
EncoderSettings::EncoderSettings(const EncoderSettings& other)
: encoder_info_(other.encoder_info_),
encoder_config_(other.encoder_config_.Copy()),
video_codec_(other.video_codec_) {}
EncoderSettings& EncoderSettings::operator=(const EncoderSettings& other) {
encoder_info_ = other.encoder_info_;
encoder_config_ = other.encoder_config_.Copy();
video_codec_ = other.video_codec_;
return *this;
}
const VideoEncoder::EncoderInfo& EncoderSettings::encoder_info() const {
return encoder_info_;
}
const VideoEncoderConfig& EncoderSettings::encoder_config() const {
return encoder_config_;
}
const VideoCodec& EncoderSettings::video_codec() const {
return video_codec_;
}
VideoCodecType GetVideoCodecTypeOrGeneric(
const absl::optional<EncoderSettings>& settings) {
return settings.has_value() ? settings->encoder_config().codec_type
: kVideoCodecGeneric;
}
} // namespace webrtc

View file

@ -0,0 +1,48 @@
/*
* Copyright 2020 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef CALL_ADAPTATION_ENCODER_SETTINGS_H_
#define CALL_ADAPTATION_ENCODER_SETTINGS_H_
#include "absl/types/optional.h"
#include "api/video_codecs/video_codec.h"
#include "api/video_codecs/video_encoder.h"
#include "video/config/video_encoder_config.h"
namespace webrtc {
// Information about an encoder available when reconfiguring the encoder.
class EncoderSettings {
public:
EncoderSettings(VideoEncoder::EncoderInfo encoder_info,
VideoEncoderConfig encoder_config,
VideoCodec video_codec);
EncoderSettings(const EncoderSettings& other);
EncoderSettings& operator=(const EncoderSettings& other);
// Encoder capabilities, implementation info, etc.
const VideoEncoder::EncoderInfo& encoder_info() const;
// Configuration parameters, ultimately coming from the API and negotiation.
const VideoEncoderConfig& encoder_config() const;
// Lower level config, heavily based on the VideoEncoderConfig.
const VideoCodec& video_codec() const;
private:
VideoEncoder::EncoderInfo encoder_info_;
VideoEncoderConfig encoder_config_;
VideoCodec video_codec_;
};
VideoCodecType GetVideoCodecTypeOrGeneric(
const absl::optional<EncoderSettings>& settings);
} // namespace webrtc
#endif // CALL_ADAPTATION_ENCODER_SETTINGS_H_

View file

@ -0,0 +1,378 @@
/*
* Copyright 2020 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "call/adaptation/resource_adaptation_processor.h"
#include <algorithm>
#include <string>
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/strings/string_view.h"
#include "api/sequence_checker.h"
#include "api/video/video_adaptation_counters.h"
#include "call/adaptation/video_stream_adapter.h"
#include "rtc_base/logging.h"
#include "rtc_base/strings/string_builder.h"
namespace webrtc {
ResourceAdaptationProcessor::ResourceListenerDelegate::ResourceListenerDelegate(
ResourceAdaptationProcessor* processor)
: task_queue_(TaskQueueBase::Current()), processor_(processor) {
RTC_DCHECK(task_queue_);
}
void ResourceAdaptationProcessor::ResourceListenerDelegate::
OnProcessorDestroyed() {
RTC_DCHECK_RUN_ON(task_queue_);
processor_ = nullptr;
}
void ResourceAdaptationProcessor::ResourceListenerDelegate::
OnResourceUsageStateMeasured(rtc::scoped_refptr<Resource> resource,
ResourceUsageState usage_state) {
if (!task_queue_->IsCurrent()) {
task_queue_->PostTask(
[this_ref = rtc::scoped_refptr<ResourceListenerDelegate>(this),
resource, usage_state] {
this_ref->OnResourceUsageStateMeasured(resource, usage_state);
});
return;
}
RTC_DCHECK_RUN_ON(task_queue_);
if (processor_) {
processor_->OnResourceUsageStateMeasured(resource, usage_state);
}
}
ResourceAdaptationProcessor::MitigationResultAndLogMessage::
MitigationResultAndLogMessage()
: result(MitigationResult::kAdaptationApplied), message() {}
ResourceAdaptationProcessor::MitigationResultAndLogMessage::
MitigationResultAndLogMessage(MitigationResult result,
absl::string_view message)
: result(result), message(message) {}
ResourceAdaptationProcessor::ResourceAdaptationProcessor(
VideoStreamAdapter* stream_adapter)
: task_queue_(TaskQueueBase::Current()),
resource_listener_delegate_(
rtc::make_ref_counted<ResourceListenerDelegate>(this)),
resources_(),
stream_adapter_(stream_adapter),
last_reported_source_restrictions_(),
previous_mitigation_results_() {
RTC_DCHECK(task_queue_);
stream_adapter_->AddRestrictionsListener(this);
}
ResourceAdaptationProcessor::~ResourceAdaptationProcessor() {
RTC_DCHECK_RUN_ON(task_queue_);
RTC_DCHECK(resources_.empty())
<< "There are resource(s) attached to a ResourceAdaptationProcessor "
<< "being destroyed.";
stream_adapter_->RemoveRestrictionsListener(this);
resource_listener_delegate_->OnProcessorDestroyed();
}
void ResourceAdaptationProcessor::AddResourceLimitationsListener(
ResourceLimitationsListener* limitations_listener) {
RTC_DCHECK_RUN_ON(task_queue_);
RTC_DCHECK(std::find(resource_limitations_listeners_.begin(),
resource_limitations_listeners_.end(),
limitations_listener) ==
resource_limitations_listeners_.end());
resource_limitations_listeners_.push_back(limitations_listener);
}
void ResourceAdaptationProcessor::RemoveResourceLimitationsListener(
ResourceLimitationsListener* limitations_listener) {
RTC_DCHECK_RUN_ON(task_queue_);
auto it =
std::find(resource_limitations_listeners_.begin(),
resource_limitations_listeners_.end(), limitations_listener);
RTC_DCHECK(it != resource_limitations_listeners_.end());
resource_limitations_listeners_.erase(it);
}
void ResourceAdaptationProcessor::AddResource(
rtc::scoped_refptr<Resource> resource) {
RTC_DCHECK(resource);
{
MutexLock crit(&resources_lock_);
RTC_DCHECK(absl::c_find(resources_, resource) == resources_.end())
<< "Resource \"" << resource->Name() << "\" was already registered.";
resources_.push_back(resource);
}
resource->SetResourceListener(resource_listener_delegate_.get());
RTC_LOG(LS_INFO) << "Registered resource \"" << resource->Name() << "\".";
}
std::vector<rtc::scoped_refptr<Resource>>
ResourceAdaptationProcessor::GetResources() const {
MutexLock crit(&resources_lock_);
return resources_;
}
void ResourceAdaptationProcessor::RemoveResource(
rtc::scoped_refptr<Resource> resource) {
RTC_DCHECK(resource);
RTC_LOG(LS_INFO) << "Removing resource \"" << resource->Name() << "\".";
resource->SetResourceListener(nullptr);
{
MutexLock crit(&resources_lock_);
auto it = absl::c_find(resources_, resource);
RTC_DCHECK(it != resources_.end()) << "Resource \"" << resource->Name()
<< "\" was not a registered resource.";
resources_.erase(it);
}
RemoveLimitationsImposedByResource(std::move(resource));
}
void ResourceAdaptationProcessor::RemoveLimitationsImposedByResource(
rtc::scoped_refptr<Resource> resource) {
if (!task_queue_->IsCurrent()) {
task_queue_->PostTask(
[this, resource]() { RemoveLimitationsImposedByResource(resource); });
return;
}
RTC_DCHECK_RUN_ON(task_queue_);
auto resource_adaptation_limits =
adaptation_limits_by_resources_.find(resource);
if (resource_adaptation_limits != adaptation_limits_by_resources_.end()) {
VideoStreamAdapter::RestrictionsWithCounters adaptation_limits =
resource_adaptation_limits->second;
adaptation_limits_by_resources_.erase(resource_adaptation_limits);
if (adaptation_limits_by_resources_.empty()) {
// Only the resource being removed was adapted so clear restrictions.
stream_adapter_->ClearRestrictions();
return;
}
VideoStreamAdapter::RestrictionsWithCounters most_limited =
FindMostLimitedResources().second;
if (adaptation_limits.counters.Total() <= most_limited.counters.Total()) {
// The removed limitations were less limited than the most limited
// resource. Don't change the current restrictions.
return;
}
// Apply the new most limited resource as the next restrictions.
Adaptation adapt_to = stream_adapter_->GetAdaptationTo(
most_limited.counters, most_limited.restrictions);
RTC_DCHECK_EQ(adapt_to.status(), Adaptation::Status::kValid);
stream_adapter_->ApplyAdaptation(adapt_to, nullptr);
RTC_LOG(LS_INFO)
<< "Most limited resource removed. Restoring restrictions to "
"next most limited restrictions: "
<< most_limited.restrictions.ToString() << " with counters "
<< most_limited.counters.ToString();
}
}
void ResourceAdaptationProcessor::OnResourceUsageStateMeasured(
rtc::scoped_refptr<Resource> resource,
ResourceUsageState usage_state) {
RTC_DCHECK_RUN_ON(task_queue_);
RTC_DCHECK(resource);
// `resource` could have been removed after signalling.
{
MutexLock crit(&resources_lock_);
if (absl::c_find(resources_, resource) == resources_.end()) {
RTC_LOG(LS_INFO) << "Ignoring signal from removed resource \""
<< resource->Name() << "\".";
return;
}
}
MitigationResultAndLogMessage result_and_message;
switch (usage_state) {
case ResourceUsageState::kOveruse:
result_and_message = OnResourceOveruse(resource);
break;
case ResourceUsageState::kUnderuse:
result_and_message = OnResourceUnderuse(resource);
break;
}
// Maybe log the result of the operation.
auto it = previous_mitigation_results_.find(resource.get());
if (it != previous_mitigation_results_.end() &&
it->second == result_and_message.result) {
// This resource has previously reported the same result and we haven't
// successfully adapted since - don't log to avoid spam.
return;
}
RTC_LOG(LS_INFO) << "Resource \"" << resource->Name() << "\" signalled "
<< ResourceUsageStateToString(usage_state) << ". "
<< result_and_message.message;
if (result_and_message.result == MitigationResult::kAdaptationApplied) {
previous_mitigation_results_.clear();
} else {
previous_mitigation_results_.insert(
std::make_pair(resource.get(), result_and_message.result));
}
}
ResourceAdaptationProcessor::MitigationResultAndLogMessage
ResourceAdaptationProcessor::OnResourceUnderuse(
rtc::scoped_refptr<Resource> reason_resource) {
RTC_DCHECK_RUN_ON(task_queue_);
// How can this stream be adapted up?
Adaptation adaptation = stream_adapter_->GetAdaptationUp();
if (adaptation.status() != Adaptation::Status::kValid) {
rtc::StringBuilder message;
message << "Not adapting up because VideoStreamAdapter returned "
<< Adaptation::StatusToString(adaptation.status());
return MitigationResultAndLogMessage(MitigationResult::kRejectedByAdapter,
message.Release());
}
// Check that resource is most limited.
std::vector<rtc::scoped_refptr<Resource>> most_limited_resources;
VideoStreamAdapter::RestrictionsWithCounters most_limited_restrictions;
std::tie(most_limited_resources, most_limited_restrictions) =
FindMostLimitedResources();
// If the most restricted resource is less limited than current restrictions
// then proceed with adapting up.
if (!most_limited_resources.empty() &&
most_limited_restrictions.counters.Total() >=
stream_adapter_->adaptation_counters().Total()) {
// If `reason_resource` is not one of the most limiting resources then abort
// adaptation.
if (absl::c_find(most_limited_resources, reason_resource) ==
most_limited_resources.end()) {
rtc::StringBuilder message;
message << "Resource \"" << reason_resource->Name()
<< "\" was not the most limited resource.";
return MitigationResultAndLogMessage(
MitigationResult::kNotMostLimitedResource, message.Release());
}
if (most_limited_resources.size() > 1) {
// If there are multiple most limited resources, all must signal underuse
// before the adaptation is applied.
UpdateResourceLimitations(reason_resource, adaptation.restrictions(),
adaptation.counters());
rtc::StringBuilder message;
message << "Resource \"" << reason_resource->Name()
<< "\" was not the only most limited resource.";
return MitigationResultAndLogMessage(
MitigationResult::kSharedMostLimitedResource, message.Release());
}
}
// Apply adaptation.
stream_adapter_->ApplyAdaptation(adaptation, reason_resource);
rtc::StringBuilder message;
message << "Adapted up successfully. Unfiltered adaptations: "
<< stream_adapter_->adaptation_counters().ToString();
return MitigationResultAndLogMessage(MitigationResult::kAdaptationApplied,
message.Release());
}
ResourceAdaptationProcessor::MitigationResultAndLogMessage
ResourceAdaptationProcessor::OnResourceOveruse(
rtc::scoped_refptr<Resource> reason_resource) {
RTC_DCHECK_RUN_ON(task_queue_);
// How can this stream be adapted up?
Adaptation adaptation = stream_adapter_->GetAdaptationDown();
if (adaptation.status() == Adaptation::Status::kLimitReached) {
// Add resource as most limited.
VideoStreamAdapter::RestrictionsWithCounters restrictions;
std::tie(std::ignore, restrictions) = FindMostLimitedResources();
UpdateResourceLimitations(reason_resource, restrictions.restrictions,
restrictions.counters);
}
if (adaptation.status() != Adaptation::Status::kValid) {
rtc::StringBuilder message;
message << "Not adapting down because VideoStreamAdapter returned "
<< Adaptation::StatusToString(adaptation.status());
return MitigationResultAndLogMessage(MitigationResult::kRejectedByAdapter,
message.Release());
}
// Apply adaptation.
UpdateResourceLimitations(reason_resource, adaptation.restrictions(),
adaptation.counters());
stream_adapter_->ApplyAdaptation(adaptation, reason_resource);
rtc::StringBuilder message;
message << "Adapted down successfully. Unfiltered adaptations: "
<< stream_adapter_->adaptation_counters().ToString();
return MitigationResultAndLogMessage(MitigationResult::kAdaptationApplied,
message.Release());
}
std::pair<std::vector<rtc::scoped_refptr<Resource>>,
VideoStreamAdapter::RestrictionsWithCounters>
ResourceAdaptationProcessor::FindMostLimitedResources() const {
std::vector<rtc::scoped_refptr<Resource>> most_limited_resources;
VideoStreamAdapter::RestrictionsWithCounters most_limited_restrictions{
VideoSourceRestrictions(), VideoAdaptationCounters()};
for (const auto& resource_and_adaptation_limit_ :
adaptation_limits_by_resources_) {
const auto& restrictions_with_counters =
resource_and_adaptation_limit_.second;
if (restrictions_with_counters.counters.Total() >
most_limited_restrictions.counters.Total()) {
most_limited_restrictions = restrictions_with_counters;
most_limited_resources.clear();
most_limited_resources.push_back(resource_and_adaptation_limit_.first);
} else if (most_limited_restrictions.counters ==
restrictions_with_counters.counters) {
most_limited_resources.push_back(resource_and_adaptation_limit_.first);
}
}
return std::make_pair(std::move(most_limited_resources),
most_limited_restrictions);
}
void ResourceAdaptationProcessor::UpdateResourceLimitations(
rtc::scoped_refptr<Resource> reason_resource,
const VideoSourceRestrictions& restrictions,
const VideoAdaptationCounters& counters) {
auto& adaptation_limits = adaptation_limits_by_resources_[reason_resource];
if (adaptation_limits.restrictions == restrictions &&
adaptation_limits.counters == counters) {
return;
}
adaptation_limits = {restrictions, counters};
std::map<rtc::scoped_refptr<Resource>, VideoAdaptationCounters> limitations;
for (const auto& p : adaptation_limits_by_resources_) {
limitations.insert(std::make_pair(p.first, p.second.counters));
}
for (auto limitations_listener : resource_limitations_listeners_) {
limitations_listener->OnResourceLimitationChanged(reason_resource,
limitations);
}
}
void ResourceAdaptationProcessor::OnVideoSourceRestrictionsUpdated(
VideoSourceRestrictions restrictions,
const VideoAdaptationCounters& adaptation_counters,
rtc::scoped_refptr<Resource> reason,
const VideoSourceRestrictions& unfiltered_restrictions) {
RTC_DCHECK_RUN_ON(task_queue_);
if (reason) {
UpdateResourceLimitations(reason, unfiltered_restrictions,
adaptation_counters);
} else if (adaptation_counters.Total() == 0) {
// Adaptations are cleared.
adaptation_limits_by_resources_.clear();
previous_mitigation_results_.clear();
for (auto limitations_listener : resource_limitations_listeners_) {
limitations_listener->OnResourceLimitationChanged(nullptr, {});
}
}
}
} // namespace webrtc

View file

@ -0,0 +1,167 @@
/*
* Copyright 2020 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef CALL_ADAPTATION_RESOURCE_ADAPTATION_PROCESSOR_H_
#define CALL_ADAPTATION_RESOURCE_ADAPTATION_PROCESSOR_H_
#include <map>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "api/adaptation/resource.h"
#include "api/rtp_parameters.h"
#include "api/scoped_refptr.h"
#include "api/task_queue/task_queue_base.h"
#include "api/video/video_adaptation_counters.h"
#include "api/video/video_frame.h"
#include "call/adaptation/resource_adaptation_processor_interface.h"
#include "call/adaptation/video_source_restrictions.h"
#include "call/adaptation/video_stream_adapter.h"
#include "call/adaptation/video_stream_input_state.h"
#include "call/adaptation/video_stream_input_state_provider.h"
#include "video/video_stream_encoder_observer.h"
namespace webrtc {
// The Resource Adaptation Processor is responsible for reacting to resource
// usage measurements (e.g. overusing or underusing CPU). When a resource is
// overused the Processor is responsible for performing mitigations in order to
// consume less resources.
//
// Today we have one Processor per VideoStreamEncoder and the Processor is only
// capable of restricting resolution or frame rate of the encoded stream. In the
// future we should have a single Processor responsible for all encoded streams,
// and it should be capable of reconfiguring other things than just
// VideoSourceRestrictions (e.g. reduce render frame rate).
// See Resource-Adaptation hotlist:
// https://bugs.chromium.org/u/590058293/hotlists/Resource-Adaptation
//
// The ResourceAdaptationProcessor is single-threaded. It may be constructed on
// any thread but MUST subsequently be used and destroyed on a single sequence,
// i.e. the "resource adaptation task queue". Resources can be added and removed
// from any thread.
class ResourceAdaptationProcessor : public ResourceAdaptationProcessorInterface,
public VideoSourceRestrictionsListener,
public ResourceListener {
public:
explicit ResourceAdaptationProcessor(
VideoStreamAdapter* video_stream_adapter);
~ResourceAdaptationProcessor() override;
// ResourceAdaptationProcessorInterface implementation.
void AddResourceLimitationsListener(
ResourceLimitationsListener* limitations_listener) override;
void RemoveResourceLimitationsListener(
ResourceLimitationsListener* limitations_listener) override;
void AddResource(rtc::scoped_refptr<Resource> resource) override;
std::vector<rtc::scoped_refptr<Resource>> GetResources() const override;
void RemoveResource(rtc::scoped_refptr<Resource> resource) override;
// ResourceListener implementation.
// Triggers OnResourceUnderuse() or OnResourceOveruse().
void OnResourceUsageStateMeasured(rtc::scoped_refptr<Resource> resource,
ResourceUsageState usage_state) override;
// VideoSourceRestrictionsListener implementation.
void OnVideoSourceRestrictionsUpdated(
VideoSourceRestrictions restrictions,
const VideoAdaptationCounters& adaptation_counters,
rtc::scoped_refptr<Resource> reason,
const VideoSourceRestrictions& unfiltered_restrictions) override;
private:
// If resource usage measurements happens off the adaptation task queue, this
// class takes care of posting the measurement for the processor to handle it
// on the adaptation task queue.
class ResourceListenerDelegate : public rtc::RefCountInterface,
public ResourceListener {
public:
explicit ResourceListenerDelegate(ResourceAdaptationProcessor* processor);
void OnProcessorDestroyed();
// ResourceListener implementation.
void OnResourceUsageStateMeasured(rtc::scoped_refptr<Resource> resource,
ResourceUsageState usage_state) override;
private:
TaskQueueBase* task_queue_;
ResourceAdaptationProcessor* processor_ RTC_GUARDED_BY(task_queue_);
};
enum class MitigationResult {
kNotMostLimitedResource,
kSharedMostLimitedResource,
kRejectedByAdapter,
kAdaptationApplied,
};
struct MitigationResultAndLogMessage {
MitigationResultAndLogMessage();
MitigationResultAndLogMessage(MitigationResult result,
absl::string_view message);
MitigationResult result;
std::string message;
};
// Performs the adaptation by getting the next target, applying it and
// informing listeners of the new VideoSourceRestriction and adaptation
// counters.
MitigationResultAndLogMessage OnResourceUnderuse(
rtc::scoped_refptr<Resource> reason_resource);
MitigationResultAndLogMessage OnResourceOveruse(
rtc::scoped_refptr<Resource> reason_resource);
void UpdateResourceLimitations(rtc::scoped_refptr<Resource> reason_resource,
const VideoSourceRestrictions& restrictions,
const VideoAdaptationCounters& counters)
RTC_RUN_ON(task_queue_);
// Searches `adaptation_limits_by_resources_` for each resource with the
// highest total adaptation counts. Adaptation up may only occur if the
// resource performing the adaptation is the only most limited resource. This
// function returns the list of all most limited resources as well as the
// corresponding adaptation of that resource.
std::pair<std::vector<rtc::scoped_refptr<Resource>>,
VideoStreamAdapter::RestrictionsWithCounters>
FindMostLimitedResources() const RTC_RUN_ON(task_queue_);
void RemoveLimitationsImposedByResource(
rtc::scoped_refptr<Resource> resource);
TaskQueueBase* task_queue_;
rtc::scoped_refptr<ResourceListenerDelegate> resource_listener_delegate_;
// Input and output.
mutable Mutex resources_lock_;
std::vector<rtc::scoped_refptr<Resource>> resources_
RTC_GUARDED_BY(resources_lock_);
std::vector<ResourceLimitationsListener*> resource_limitations_listeners_
RTC_GUARDED_BY(task_queue_);
// Purely used for statistics, does not ensure mapped resources stay alive.
std::map<rtc::scoped_refptr<Resource>,
VideoStreamAdapter::RestrictionsWithCounters>
adaptation_limits_by_resources_ RTC_GUARDED_BY(task_queue_);
// Responsible for generating and applying possible adaptations.
VideoStreamAdapter* const stream_adapter_ RTC_GUARDED_BY(task_queue_);
VideoSourceRestrictions last_reported_source_restrictions_
RTC_GUARDED_BY(task_queue_);
// Keeps track of previous mitigation results per resource since the last
// successful adaptation. Used to avoid RTC_LOG spam.
std::map<Resource*, MitigationResult> previous_mitigation_results_
RTC_GUARDED_BY(task_queue_);
};
} // namespace webrtc
#endif // CALL_ADAPTATION_RESOURCE_ADAPTATION_PROCESSOR_H_

View file

@ -0,0 +1,20 @@
/*
* Copyright 2020 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "call/adaptation/resource_adaptation_processor_interface.h"
namespace webrtc {
ResourceAdaptationProcessorInterface::~ResourceAdaptationProcessorInterface() =
default;
ResourceLimitationsListener::~ResourceLimitationsListener() = default;
} // namespace webrtc

View file

@ -0,0 +1,67 @@
/*
* Copyright 2020 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef CALL_ADAPTATION_RESOURCE_ADAPTATION_PROCESSOR_INTERFACE_H_
#define CALL_ADAPTATION_RESOURCE_ADAPTATION_PROCESSOR_INTERFACE_H_
#include <map>
#include <vector>
#include "absl/types/optional.h"
#include "api/adaptation/resource.h"
#include "api/rtp_parameters.h"
#include "api/scoped_refptr.h"
#include "api/task_queue/task_queue_base.h"
#include "api/video/video_adaptation_counters.h"
#include "api/video/video_frame.h"
#include "call/adaptation/adaptation_constraint.h"
#include "call/adaptation/encoder_settings.h"
#include "call/adaptation/video_source_restrictions.h"
namespace webrtc {
class ResourceLimitationsListener {
public:
virtual ~ResourceLimitationsListener();
// The limitations on a resource were changed. This does not mean the current
// video restrictions have changed.
virtual void OnResourceLimitationChanged(
rtc::scoped_refptr<Resource> resource,
const std::map<rtc::scoped_refptr<Resource>, VideoAdaptationCounters>&
resource_limitations) = 0;
};
// The Resource Adaptation Processor is responsible for reacting to resource
// usage measurements (e.g. overusing or underusing CPU). When a resource is
// overused the Processor is responsible for performing mitigations in order to
// consume less resources.
class ResourceAdaptationProcessorInterface {
public:
virtual ~ResourceAdaptationProcessorInterface();
virtual void AddResourceLimitationsListener(
ResourceLimitationsListener* limitations_listener) = 0;
virtual void RemoveResourceLimitationsListener(
ResourceLimitationsListener* limitations_listener) = 0;
// Starts or stops listening to resources, effectively enabling or disabling
// processing. May be called from anywhere.
// TODO(https://crbug.com/webrtc/11172): Automatically register and unregister
// with AddResource() and RemoveResource() instead. When the processor is
// multi-stream aware, stream-specific resouces will get added and removed
// over time.
virtual void AddResource(rtc::scoped_refptr<Resource> resource) = 0;
virtual std::vector<rtc::scoped_refptr<Resource>> GetResources() const = 0;
virtual void RemoveResource(rtc::scoped_refptr<Resource> resource) = 0;
};
} // namespace webrtc
#endif // CALL_ADAPTATION_RESOURCE_ADAPTATION_PROCESSOR_INTERFACE_H_

View file

@ -0,0 +1,173 @@
/*
* Copyright 2020 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "call/adaptation/video_source_restrictions.h"
#include <algorithm>
#include <limits>
#include "rtc_base/checks.h"
#include "rtc_base/strings/string_builder.h"
namespace webrtc {
VideoSourceRestrictions::VideoSourceRestrictions()
: max_pixels_per_frame_(absl::nullopt),
target_pixels_per_frame_(absl::nullopt),
max_frame_rate_(absl::nullopt) {}
VideoSourceRestrictions::VideoSourceRestrictions(
absl::optional<size_t> max_pixels_per_frame,
absl::optional<size_t> target_pixels_per_frame,
absl::optional<double> max_frame_rate)
: max_pixels_per_frame_(std::move(max_pixels_per_frame)),
target_pixels_per_frame_(std::move(target_pixels_per_frame)),
max_frame_rate_(std::move(max_frame_rate)) {
RTC_DCHECK(!max_pixels_per_frame_.has_value() ||
max_pixels_per_frame_.value() <
static_cast<size_t>(std::numeric_limits<int>::max()));
RTC_DCHECK(!max_frame_rate_.has_value() ||
max_frame_rate_.value() < std::numeric_limits<int>::max());
RTC_DCHECK(!max_frame_rate_.has_value() || max_frame_rate_.value() > 0.0);
}
std::string VideoSourceRestrictions::ToString() const {
rtc::StringBuilder ss;
ss << "{";
if (max_frame_rate_)
ss << " max_fps=" << max_frame_rate_.value();
if (max_pixels_per_frame_)
ss << " max_pixels_per_frame=" << max_pixels_per_frame_.value();
if (target_pixels_per_frame_)
ss << " target_pixels_per_frame=" << target_pixels_per_frame_.value();
ss << " }";
return ss.Release();
}
const absl::optional<size_t>& VideoSourceRestrictions::max_pixels_per_frame()
const {
return max_pixels_per_frame_;
}
const absl::optional<size_t>& VideoSourceRestrictions::target_pixels_per_frame()
const {
return target_pixels_per_frame_;
}
const absl::optional<double>& VideoSourceRestrictions::max_frame_rate() const {
return max_frame_rate_;
}
void VideoSourceRestrictions::set_max_pixels_per_frame(
absl::optional<size_t> max_pixels_per_frame) {
max_pixels_per_frame_ = std::move(max_pixels_per_frame);
}
void VideoSourceRestrictions::set_target_pixels_per_frame(
absl::optional<size_t> target_pixels_per_frame) {
target_pixels_per_frame_ = std::move(target_pixels_per_frame);
}
void VideoSourceRestrictions::set_max_frame_rate(
absl::optional<double> max_frame_rate) {
max_frame_rate_ = std::move(max_frame_rate);
}
void VideoSourceRestrictions::UpdateMin(const VideoSourceRestrictions& other) {
if (max_pixels_per_frame_.has_value()) {
max_pixels_per_frame_ = std::min(*max_pixels_per_frame_,
other.max_pixels_per_frame().value_or(
std::numeric_limits<size_t>::max()));
} else {
max_pixels_per_frame_ = other.max_pixels_per_frame();
}
if (target_pixels_per_frame_.has_value()) {
target_pixels_per_frame_ = std::min(
*target_pixels_per_frame_, other.target_pixels_per_frame().value_or(
std::numeric_limits<size_t>::max()));
} else {
target_pixels_per_frame_ = other.target_pixels_per_frame();
}
if (max_frame_rate_.has_value()) {
max_frame_rate_ = std::min(
*max_frame_rate_,
other.max_frame_rate().value_or(std::numeric_limits<double>::max()));
} else {
max_frame_rate_ = other.max_frame_rate();
}
}
bool DidRestrictionsIncrease(VideoSourceRestrictions before,
VideoSourceRestrictions after) {
bool decreased_resolution = DidDecreaseResolution(before, after);
bool decreased_framerate = DidDecreaseFrameRate(before, after);
bool same_resolution =
before.max_pixels_per_frame() == after.max_pixels_per_frame();
bool same_framerate = before.max_frame_rate() == after.max_frame_rate();
return (decreased_resolution && decreased_framerate) ||
(decreased_resolution && same_framerate) ||
(same_resolution && decreased_framerate);
}
bool DidRestrictionsDecrease(VideoSourceRestrictions before,
VideoSourceRestrictions after) {
bool increased_resolution = DidIncreaseResolution(before, after);
bool increased_framerate = DidIncreaseFrameRate(before, after);
bool same_resolution =
before.max_pixels_per_frame() == after.max_pixels_per_frame();
bool same_framerate = before.max_frame_rate() == after.max_frame_rate();
return (increased_resolution && increased_framerate) ||
(increased_resolution && same_framerate) ||
(same_resolution && increased_framerate);
}
bool DidIncreaseResolution(VideoSourceRestrictions restrictions_before,
VideoSourceRestrictions restrictions_after) {
if (!restrictions_before.max_pixels_per_frame().has_value())
return false;
if (!restrictions_after.max_pixels_per_frame().has_value())
return true;
return restrictions_after.max_pixels_per_frame().value() >
restrictions_before.max_pixels_per_frame().value();
}
bool DidDecreaseResolution(VideoSourceRestrictions restrictions_before,
VideoSourceRestrictions restrictions_after) {
if (!restrictions_after.max_pixels_per_frame().has_value())
return false;
if (!restrictions_before.max_pixels_per_frame().has_value())
return true;
return restrictions_after.max_pixels_per_frame().value() <
restrictions_before.max_pixels_per_frame().value();
}
bool DidIncreaseFrameRate(VideoSourceRestrictions restrictions_before,
VideoSourceRestrictions restrictions_after) {
if (!restrictions_before.max_frame_rate().has_value())
return false;
if (!restrictions_after.max_frame_rate().has_value())
return true;
return restrictions_after.max_frame_rate().value() >
restrictions_before.max_frame_rate().value();
}
bool DidDecreaseFrameRate(VideoSourceRestrictions restrictions_before,
VideoSourceRestrictions restrictions_after) {
if (!restrictions_after.max_frame_rate().has_value())
return false;
if (!restrictions_before.max_frame_rate().has_value())
return true;
return restrictions_after.max_frame_rate().value() <
restrictions_before.max_frame_rate().value();
}
} // namespace webrtc

View file

@ -0,0 +1,89 @@
/*
* Copyright 2020 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef CALL_ADAPTATION_VIDEO_SOURCE_RESTRICTIONS_H_
#define CALL_ADAPTATION_VIDEO_SOURCE_RESTRICTIONS_H_
#include <string>
#include <utility>
#include "absl/types/optional.h"
namespace webrtc {
// Describes optional restrictions to the resolution and frame rate of a video
// source.
class VideoSourceRestrictions {
public:
// Constructs without any restrictions.
VideoSourceRestrictions();
// All values must be positive or nullopt.
// TODO(hbos): Support expressing "disable this stream"?
VideoSourceRestrictions(absl::optional<size_t> max_pixels_per_frame,
absl::optional<size_t> target_pixels_per_frame,
absl::optional<double> max_frame_rate);
bool operator==(const VideoSourceRestrictions& rhs) const {
return max_pixels_per_frame_ == rhs.max_pixels_per_frame_ &&
target_pixels_per_frame_ == rhs.target_pixels_per_frame_ &&
max_frame_rate_ == rhs.max_frame_rate_;
}
bool operator!=(const VideoSourceRestrictions& rhs) const {
return !(*this == rhs);
}
std::string ToString() const;
// The source must produce a resolution less than or equal to
// max_pixels_per_frame().
const absl::optional<size_t>& max_pixels_per_frame() const;
// The source should produce a resolution as close to the
// target_pixels_per_frame() as possible, provided this does not exceed
// max_pixels_per_frame().
// The actual pixel count selected depends on the capabilities of the source.
// TODO(hbos): Clarify how "target" is used. One possible implementation: open
// the camera in the smallest resolution that is greater than or equal to the
// target and scale it down to the target if it is greater. Is this an
// accurate description of what this does today, or do we do something else?
const absl::optional<size_t>& target_pixels_per_frame() const;
const absl::optional<double>& max_frame_rate() const;
void set_max_pixels_per_frame(absl::optional<size_t> max_pixels_per_frame);
void set_target_pixels_per_frame(
absl::optional<size_t> target_pixels_per_frame);
void set_max_frame_rate(absl::optional<double> max_frame_rate);
// Update `this` with min(`this`, `other`).
void UpdateMin(const VideoSourceRestrictions& other);
private:
// These map to rtc::VideoSinkWants's `max_pixel_count` and
// `target_pixel_count`.
absl::optional<size_t> max_pixels_per_frame_;
absl::optional<size_t> target_pixels_per_frame_;
absl::optional<double> max_frame_rate_;
};
bool DidRestrictionsIncrease(VideoSourceRestrictions before,
VideoSourceRestrictions after);
bool DidRestrictionsDecrease(VideoSourceRestrictions before,
VideoSourceRestrictions after);
bool DidIncreaseResolution(VideoSourceRestrictions restrictions_before,
VideoSourceRestrictions restrictions_after);
bool DidDecreaseResolution(VideoSourceRestrictions restrictions_before,
VideoSourceRestrictions restrictions_after);
bool DidIncreaseFrameRate(VideoSourceRestrictions restrictions_before,
VideoSourceRestrictions restrictions_after);
bool DidDecreaseFrameRate(VideoSourceRestrictions restrictions_before,
VideoSourceRestrictions restrictions_after);
} // namespace webrtc
#endif // CALL_ADAPTATION_VIDEO_SOURCE_RESTRICTIONS_H_

View file

@ -0,0 +1,753 @@
/*
* Copyright 2020 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "call/adaptation/video_stream_adapter.h"
#include <algorithm>
#include <limits>
#include <utility>
#include "absl/types/optional.h"
#include "absl/types/variant.h"
#include "api/sequence_checker.h"
#include "api/video/video_adaptation_counters.h"
#include "api/video/video_adaptation_reason.h"
#include "api/video_codecs/video_encoder.h"
#include "call/adaptation/video_source_restrictions.h"
#include "call/adaptation/video_stream_input_state.h"
#include "modules/video_coding/svc/scalability_mode_util.h"
#include "rtc_base/checks.h"
#include "rtc_base/logging.h"
#include "rtc_base/numerics/safe_conversions.h"
namespace webrtc {
const int kMinFrameRateFps = 2;
namespace {
// For frame rate, the steps we take are 2/3 (down) and 3/2 (up).
int GetLowerFrameRateThan(int fps) {
RTC_DCHECK(fps != std::numeric_limits<int>::max());
return (fps * 2) / 3;
}
// TODO(hbos): Use absl::optional<> instead?
int GetHigherFrameRateThan(int fps) {
return fps != std::numeric_limits<int>::max()
? (fps * 3) / 2
: std::numeric_limits<int>::max();
}
int GetIncreasedMaxPixelsWanted(int target_pixels) {
if (target_pixels == std::numeric_limits<int>::max())
return std::numeric_limits<int>::max();
// When we decrease resolution, we go down to at most 3/5 of current pixels.
// Thus to increase resolution, we need 3/5 to get back to where we started.
// When going up, the desired max_pixels_per_frame() has to be significantly
// higher than the target because the source's native resolutions might not
// match the target. We pick 12/5 of the target.
//
// (This value was historically 4 times the old target, which is (3/5)*4 of
// the new target - or 12/5 - assuming the target is adjusted according to
// the above steps.)
RTC_DCHECK(target_pixels != std::numeric_limits<int>::max());
return (target_pixels * 12) / 5;
}
bool CanDecreaseResolutionTo(int target_pixels,
int target_pixels_min,
const VideoStreamInputState& input_state,
const VideoSourceRestrictions& restrictions) {
int max_pixels_per_frame =
rtc::dchecked_cast<int>(restrictions.max_pixels_per_frame().value_or(
std::numeric_limits<int>::max()));
return target_pixels < max_pixels_per_frame &&
target_pixels_min >= input_state.min_pixels_per_frame();
}
bool CanIncreaseResolutionTo(int target_pixels,
const VideoSourceRestrictions& restrictions) {
int max_pixels_wanted = GetIncreasedMaxPixelsWanted(target_pixels);
int max_pixels_per_frame =
rtc::dchecked_cast<int>(restrictions.max_pixels_per_frame().value_or(
std::numeric_limits<int>::max()));
return max_pixels_wanted > max_pixels_per_frame;
}
bool CanDecreaseFrameRateTo(int max_frame_rate,
const VideoSourceRestrictions& restrictions) {
const int fps_wanted = std::max(kMinFrameRateFps, max_frame_rate);
return fps_wanted <
rtc::dchecked_cast<int>(restrictions.max_frame_rate().value_or(
std::numeric_limits<int>::max()));
}
bool CanIncreaseFrameRateTo(int max_frame_rate,
const VideoSourceRestrictions& restrictions) {
return max_frame_rate >
rtc::dchecked_cast<int>(restrictions.max_frame_rate().value_or(
std::numeric_limits<int>::max()));
}
bool MinPixelLimitReached(const VideoStreamInputState& input_state) {
if (input_state.single_active_stream_pixels().has_value()) {
return GetLowerResolutionThan(
input_state.single_active_stream_pixels().value()) <
input_state.min_pixels_per_frame();
}
return input_state.frame_size_pixels().has_value() &&
GetLowerResolutionThan(input_state.frame_size_pixels().value()) <
input_state.min_pixels_per_frame();
}
} // namespace
VideoSourceRestrictionsListener::~VideoSourceRestrictionsListener() = default;
VideoSourceRestrictions FilterRestrictionsByDegradationPreference(
VideoSourceRestrictions source_restrictions,
DegradationPreference degradation_preference) {
switch (degradation_preference) {
case DegradationPreference::BALANCED:
break;
case DegradationPreference::MAINTAIN_FRAMERATE:
source_restrictions.set_max_frame_rate(absl::nullopt);
break;
case DegradationPreference::MAINTAIN_RESOLUTION:
source_restrictions.set_max_pixels_per_frame(absl::nullopt);
source_restrictions.set_target_pixels_per_frame(absl::nullopt);
break;
case DegradationPreference::DISABLED:
source_restrictions.set_max_pixels_per_frame(absl::nullopt);
source_restrictions.set_target_pixels_per_frame(absl::nullopt);
source_restrictions.set_max_frame_rate(absl::nullopt);
}
return source_restrictions;
}
// For resolution, the steps we take are 3/5 (down) and 5/3 (up).
// Notice the asymmetry of which restriction property is set depending on if
// we are adapting up or down:
// - VideoSourceRestrictor::DecreaseResolution() sets the max_pixels_per_frame()
// to the desired target and target_pixels_per_frame() to null.
// - VideoSourceRestrictor::IncreaseResolutionTo() sets the
// target_pixels_per_frame() to the desired target, and max_pixels_per_frame()
// is set according to VideoSourceRestrictor::GetIncreasedMaxPixelsWanted().
int GetLowerResolutionThan(int pixel_count) {
RTC_DCHECK(pixel_count != std::numeric_limits<int>::max());
return (pixel_count * 3) / 5;
}
// TODO(hbos): Use absl::optional<> instead?
int GetHigherResolutionThan(int pixel_count) {
return pixel_count != std::numeric_limits<int>::max()
? (pixel_count * 5) / 3
: std::numeric_limits<int>::max();
}
// static
const char* Adaptation::StatusToString(Adaptation::Status status) {
switch (status) {
case Adaptation::Status::kValid:
return "kValid";
case Adaptation::Status::kLimitReached:
return "kLimitReached";
case Adaptation::Status::kAwaitingPreviousAdaptation:
return "kAwaitingPreviousAdaptation";
case Status::kInsufficientInput:
return "kInsufficientInput";
case Status::kAdaptationDisabled:
return "kAdaptationDisabled";
case Status::kRejectedByConstraint:
return "kRejectedByConstraint";
}
RTC_CHECK_NOTREACHED();
}
Adaptation::Adaptation(int validation_id,
VideoSourceRestrictions restrictions,
VideoAdaptationCounters counters,
VideoStreamInputState input_state)
: validation_id_(validation_id),
status_(Status::kValid),
input_state_(std::move(input_state)),
restrictions_(std::move(restrictions)),
counters_(std::move(counters)) {}
Adaptation::Adaptation(int validation_id, Status invalid_status)
: validation_id_(validation_id), status_(invalid_status) {
RTC_DCHECK_NE(status_, Status::kValid);
}
Adaptation::Status Adaptation::status() const {
return status_;
}
const VideoStreamInputState& Adaptation::input_state() const {
return input_state_;
}
const VideoSourceRestrictions& Adaptation::restrictions() const {
return restrictions_;
}
const VideoAdaptationCounters& Adaptation::counters() const {
return counters_;
}
VideoStreamAdapter::VideoStreamAdapter(
VideoStreamInputStateProvider* input_state_provider,
VideoStreamEncoderObserver* encoder_stats_observer,
const FieldTrialsView& field_trials)
: input_state_provider_(input_state_provider),
encoder_stats_observer_(encoder_stats_observer),
balanced_settings_(field_trials),
adaptation_validation_id_(0),
degradation_preference_(DegradationPreference::DISABLED),
awaiting_frame_size_change_(absl::nullopt) {
sequence_checker_.Detach();
RTC_DCHECK(input_state_provider_);
RTC_DCHECK(encoder_stats_observer_);
}
VideoStreamAdapter::~VideoStreamAdapter() {
RTC_DCHECK(adaptation_constraints_.empty())
<< "There are constaint(s) attached to a VideoStreamAdapter being "
"destroyed.";
}
VideoSourceRestrictions VideoStreamAdapter::source_restrictions() const {
RTC_DCHECK_RUN_ON(&sequence_checker_);
return current_restrictions_.restrictions;
}
const VideoAdaptationCounters& VideoStreamAdapter::adaptation_counters() const {
RTC_DCHECK_RUN_ON(&sequence_checker_);
return current_restrictions_.counters;
}
void VideoStreamAdapter::ClearRestrictions() {
RTC_DCHECK_RUN_ON(&sequence_checker_);
// Invalidate any previously returned Adaptation.
RTC_LOG(LS_INFO) << "Resetting restrictions";
++adaptation_validation_id_;
current_restrictions_ = {VideoSourceRestrictions(),
VideoAdaptationCounters()};
awaiting_frame_size_change_ = absl::nullopt;
BroadcastVideoRestrictionsUpdate(input_state_provider_->InputState(),
nullptr);
}
void VideoStreamAdapter::AddRestrictionsListener(
VideoSourceRestrictionsListener* restrictions_listener) {
RTC_DCHECK_RUN_ON(&sequence_checker_);
RTC_DCHECK(std::find(restrictions_listeners_.begin(),
restrictions_listeners_.end(),
restrictions_listener) == restrictions_listeners_.end());
restrictions_listeners_.push_back(restrictions_listener);
}
void VideoStreamAdapter::RemoveRestrictionsListener(
VideoSourceRestrictionsListener* restrictions_listener) {
RTC_DCHECK_RUN_ON(&sequence_checker_);
auto it = std::find(restrictions_listeners_.begin(),
restrictions_listeners_.end(), restrictions_listener);
RTC_DCHECK(it != restrictions_listeners_.end());
restrictions_listeners_.erase(it);
}
void VideoStreamAdapter::AddAdaptationConstraint(
AdaptationConstraint* adaptation_constraint) {
RTC_DCHECK_RUN_ON(&sequence_checker_);
RTC_DCHECK(std::find(adaptation_constraints_.begin(),
adaptation_constraints_.end(),
adaptation_constraint) == adaptation_constraints_.end());
adaptation_constraints_.push_back(adaptation_constraint);
}
void VideoStreamAdapter::RemoveAdaptationConstraint(
AdaptationConstraint* adaptation_constraint) {
RTC_DCHECK_RUN_ON(&sequence_checker_);
auto it = std::find(adaptation_constraints_.begin(),
adaptation_constraints_.end(), adaptation_constraint);
RTC_DCHECK(it != adaptation_constraints_.end());
adaptation_constraints_.erase(it);
}
void VideoStreamAdapter::SetDegradationPreference(
DegradationPreference degradation_preference) {
RTC_DCHECK_RUN_ON(&sequence_checker_);
if (degradation_preference_ == degradation_preference)
return;
// Invalidate any previously returned Adaptation.
++adaptation_validation_id_;
bool balanced_switch =
degradation_preference == DegradationPreference::BALANCED ||
degradation_preference_ == DegradationPreference::BALANCED;
degradation_preference_ = degradation_preference;
if (balanced_switch) {
// ClearRestrictions() calls BroadcastVideoRestrictionsUpdate(nullptr).
ClearRestrictions();
} else {
BroadcastVideoRestrictionsUpdate(input_state_provider_->InputState(),
nullptr);
}
}
struct VideoStreamAdapter::RestrictionsOrStateVisitor {
Adaptation operator()(const RestrictionsWithCounters& r) const {
return Adaptation(adaptation_validation_id, r.restrictions, r.counters,
input_state);
}
Adaptation operator()(const Adaptation::Status& status) const {
RTC_DCHECK_NE(status, Adaptation::Status::kValid);
return Adaptation(adaptation_validation_id, status);
}
const int adaptation_validation_id;
const VideoStreamInputState& input_state;
};
Adaptation VideoStreamAdapter::RestrictionsOrStateToAdaptation(
VideoStreamAdapter::RestrictionsOrState step_or_state,
const VideoStreamInputState& input_state) const {
RTC_DCHECK(!step_or_state.valueless_by_exception());
return absl::visit(
RestrictionsOrStateVisitor{adaptation_validation_id_, input_state},
step_or_state);
}
Adaptation VideoStreamAdapter::GetAdaptationUp(
const VideoStreamInputState& input_state) const {
RestrictionsOrState step = GetAdaptationUpStep(input_state);
// If an adaptation proposed, check with the constraints that it is ok.
if (absl::holds_alternative<RestrictionsWithCounters>(step)) {
RestrictionsWithCounters restrictions =
absl::get<RestrictionsWithCounters>(step);
for (const auto* constraint : adaptation_constraints_) {
if (!constraint->IsAdaptationUpAllowed(input_state,
current_restrictions_.restrictions,
restrictions.restrictions)) {
RTC_LOG(LS_INFO) << "Not adapting up because constraint \""
<< constraint->Name() << "\" disallowed it";
step = Adaptation::Status::kRejectedByConstraint;
}
}
}
return RestrictionsOrStateToAdaptation(step, input_state);
}
Adaptation VideoStreamAdapter::GetAdaptationUp() {
RTC_DCHECK_RUN_ON(&sequence_checker_);
VideoStreamInputState input_state = input_state_provider_->InputState();
++adaptation_validation_id_;
Adaptation adaptation = GetAdaptationUp(input_state);
return adaptation;
}
VideoStreamAdapter::RestrictionsOrState VideoStreamAdapter::GetAdaptationUpStep(
const VideoStreamInputState& input_state) const {
if (!HasSufficientInputForAdaptation(input_state)) {
return Adaptation::Status::kInsufficientInput;
}
// Don't adapt if we're awaiting a previous adaptation to have an effect.
if (awaiting_frame_size_change_ &&
awaiting_frame_size_change_->pixels_increased &&
degradation_preference_ == DegradationPreference::MAINTAIN_FRAMERATE &&
input_state.frame_size_pixels().value() <=
awaiting_frame_size_change_->frame_size_pixels) {
return Adaptation::Status::kAwaitingPreviousAdaptation;
}
// Maybe propose targets based on degradation preference.
switch (degradation_preference_) {
case DegradationPreference::BALANCED: {
// Attempt to increase target frame rate.
RestrictionsOrState increase_frame_rate =
IncreaseFramerate(input_state, current_restrictions_);
if (absl::holds_alternative<RestrictionsWithCounters>(
increase_frame_rate)) {
return increase_frame_rate;
}
// else, increase resolution.
[[fallthrough]];
}
case DegradationPreference::MAINTAIN_FRAMERATE: {
// Attempt to increase pixel count.
return IncreaseResolution(input_state, current_restrictions_);
}
case DegradationPreference::MAINTAIN_RESOLUTION: {
// Scale up framerate.
return IncreaseFramerate(input_state, current_restrictions_);
}
case DegradationPreference::DISABLED:
return Adaptation::Status::kAdaptationDisabled;
}
RTC_CHECK_NOTREACHED();
}
Adaptation VideoStreamAdapter::GetAdaptationDown() {
RTC_DCHECK_RUN_ON(&sequence_checker_);
VideoStreamInputState input_state = input_state_provider_->InputState();
++adaptation_validation_id_;
RestrictionsOrState restrictions_or_state =
GetAdaptationDownStep(input_state, current_restrictions_);
if (MinPixelLimitReached(input_state)) {
encoder_stats_observer_->OnMinPixelLimitReached();
}
// Check for min_fps
if (degradation_preference_ == DegradationPreference::BALANCED &&
absl::holds_alternative<RestrictionsWithCounters>(
restrictions_or_state)) {
restrictions_or_state = AdaptIfFpsDiffInsufficient(
input_state,
absl::get<RestrictionsWithCounters>(restrictions_or_state));
}
return RestrictionsOrStateToAdaptation(restrictions_or_state, input_state);
}
VideoStreamAdapter::RestrictionsOrState
VideoStreamAdapter::AdaptIfFpsDiffInsufficient(
const VideoStreamInputState& input_state,
const RestrictionsWithCounters& restrictions) const {
RTC_DCHECK_EQ(degradation_preference_, DegradationPreference::BALANCED);
int frame_size_pixels = input_state.single_active_stream_pixels().value_or(
input_state.frame_size_pixels().value());
absl::optional<int> min_fps_diff =
balanced_settings_.MinFpsDiff(frame_size_pixels);
if (current_restrictions_.counters.fps_adaptations <
restrictions.counters.fps_adaptations &&
min_fps_diff && input_state.frames_per_second() > 0) {
int fps_diff = input_state.frames_per_second() -
restrictions.restrictions.max_frame_rate().value();
if (fps_diff < min_fps_diff.value()) {
return GetAdaptationDownStep(input_state, restrictions);
}
}
return restrictions;
}
VideoStreamAdapter::RestrictionsOrState
VideoStreamAdapter::GetAdaptationDownStep(
const VideoStreamInputState& input_state,
const RestrictionsWithCounters& current_restrictions) const {
if (!HasSufficientInputForAdaptation(input_state)) {
return Adaptation::Status::kInsufficientInput;
}
// Don't adapt if we're awaiting a previous adaptation to have an effect or
// if we switched degradation preference.
if (awaiting_frame_size_change_ &&
!awaiting_frame_size_change_->pixels_increased &&
degradation_preference_ == DegradationPreference::MAINTAIN_FRAMERATE &&
input_state.frame_size_pixels().value() >=
awaiting_frame_size_change_->frame_size_pixels) {
return Adaptation::Status::kAwaitingPreviousAdaptation;
}
// Maybe propose targets based on degradation preference.
switch (degradation_preference_) {
case DegradationPreference::BALANCED: {
// Try scale down framerate, if lower.
RestrictionsOrState decrease_frame_rate =
DecreaseFramerate(input_state, current_restrictions);
if (absl::holds_alternative<RestrictionsWithCounters>(
decrease_frame_rate)) {
return decrease_frame_rate;
}
// else, decrease resolution.
[[fallthrough]];
}
case DegradationPreference::MAINTAIN_FRAMERATE: {
return DecreaseResolution(input_state, current_restrictions);
}
case DegradationPreference::MAINTAIN_RESOLUTION: {
return DecreaseFramerate(input_state, current_restrictions);
}
case DegradationPreference::DISABLED:
return Adaptation::Status::kAdaptationDisabled;
}
RTC_CHECK_NOTREACHED();
}
VideoStreamAdapter::RestrictionsOrState VideoStreamAdapter::DecreaseResolution(
const VideoStreamInputState& input_state,
const RestrictionsWithCounters& current_restrictions) {
int target_pixels =
GetLowerResolutionThan(input_state.frame_size_pixels().value());
// Use single active stream if set, this stream could be lower than the input.
int target_pixels_min =
GetLowerResolutionThan(input_state.single_active_stream_pixels().value_or(
input_state.frame_size_pixels().value()));
if (!CanDecreaseResolutionTo(target_pixels, target_pixels_min, input_state,
current_restrictions.restrictions)) {
return Adaptation::Status::kLimitReached;
}
RestrictionsWithCounters new_restrictions = current_restrictions;
RTC_LOG(LS_INFO) << "Scaling down resolution, max pixels: " << target_pixels;
new_restrictions.restrictions.set_max_pixels_per_frame(
target_pixels != std::numeric_limits<int>::max()
? absl::optional<size_t>(target_pixels)
: absl::nullopt);
new_restrictions.restrictions.set_target_pixels_per_frame(absl::nullopt);
++new_restrictions.counters.resolution_adaptations;
return new_restrictions;
}
VideoStreamAdapter::RestrictionsOrState VideoStreamAdapter::DecreaseFramerate(
const VideoStreamInputState& input_state,
const RestrictionsWithCounters& current_restrictions) const {
int max_frame_rate;
if (degradation_preference_ == DegradationPreference::MAINTAIN_RESOLUTION) {
max_frame_rate = GetLowerFrameRateThan(input_state.frames_per_second());
} else if (degradation_preference_ == DegradationPreference::BALANCED) {
int frame_size_pixels = input_state.single_active_stream_pixels().value_or(
input_state.frame_size_pixels().value());
max_frame_rate = balanced_settings_.MinFps(input_state.video_codec_type(),
frame_size_pixels);
} else {
RTC_DCHECK_NOTREACHED();
max_frame_rate = GetLowerFrameRateThan(input_state.frames_per_second());
}
if (!CanDecreaseFrameRateTo(max_frame_rate,
current_restrictions.restrictions)) {
return Adaptation::Status::kLimitReached;
}
RestrictionsWithCounters new_restrictions = current_restrictions;
max_frame_rate = std::max(kMinFrameRateFps, max_frame_rate);
RTC_LOG(LS_INFO) << "Scaling down framerate: " << max_frame_rate;
new_restrictions.restrictions.set_max_frame_rate(
max_frame_rate != std::numeric_limits<int>::max()
? absl::optional<double>(max_frame_rate)
: absl::nullopt);
++new_restrictions.counters.fps_adaptations;
return new_restrictions;
}
VideoStreamAdapter::RestrictionsOrState VideoStreamAdapter::IncreaseResolution(
const VideoStreamInputState& input_state,
const RestrictionsWithCounters& current_restrictions) {
int target_pixels = input_state.frame_size_pixels().value();
if (current_restrictions.counters.resolution_adaptations == 1) {
RTC_LOG(LS_INFO) << "Removing resolution down-scaling setting.";
target_pixels = std::numeric_limits<int>::max();
}
target_pixels = GetHigherResolutionThan(target_pixels);
if (!CanIncreaseResolutionTo(target_pixels,
current_restrictions.restrictions)) {
return Adaptation::Status::kLimitReached;
}
int max_pixels_wanted = GetIncreasedMaxPixelsWanted(target_pixels);
RestrictionsWithCounters new_restrictions = current_restrictions;
RTC_LOG(LS_INFO) << "Scaling up resolution, max pixels: "
<< max_pixels_wanted;
new_restrictions.restrictions.set_max_pixels_per_frame(
max_pixels_wanted != std::numeric_limits<int>::max()
? absl::optional<size_t>(max_pixels_wanted)
: absl::nullopt);
new_restrictions.restrictions.set_target_pixels_per_frame(
max_pixels_wanted != std::numeric_limits<int>::max()
? absl::optional<size_t>(target_pixels)
: absl::nullopt);
--new_restrictions.counters.resolution_adaptations;
RTC_DCHECK_GE(new_restrictions.counters.resolution_adaptations, 0);
return new_restrictions;
}
VideoStreamAdapter::RestrictionsOrState VideoStreamAdapter::IncreaseFramerate(
const VideoStreamInputState& input_state,
const RestrictionsWithCounters& current_restrictions) const {
int max_frame_rate;
if (degradation_preference_ == DegradationPreference::MAINTAIN_RESOLUTION) {
max_frame_rate = GetHigherFrameRateThan(input_state.frames_per_second());
} else if (degradation_preference_ == DegradationPreference::BALANCED) {
int frame_size_pixels = input_state.single_active_stream_pixels().value_or(
input_state.frame_size_pixels().value());
max_frame_rate = balanced_settings_.MaxFps(input_state.video_codec_type(),
frame_size_pixels);
// Temporary fix for cases when there are fewer framerate adaptation steps
// up than down. Make number of down/up steps equal.
if (max_frame_rate == std::numeric_limits<int>::max() &&
current_restrictions.counters.fps_adaptations > 1) {
// Do not unrestrict framerate to allow additional adaptation up steps.
RTC_LOG(LS_INFO) << "Modifying framerate due to remaining fps count.";
max_frame_rate -= current_restrictions.counters.fps_adaptations;
}
// In BALANCED, the max_frame_rate must be checked before proceeding. This
// is because the MaxFps might be the current Fps and so the balanced
// settings may want to scale up the resolution.
if (!CanIncreaseFrameRateTo(max_frame_rate,
current_restrictions.restrictions)) {
return Adaptation::Status::kLimitReached;
}
} else {
RTC_DCHECK_NOTREACHED();
max_frame_rate = GetHigherFrameRateThan(input_state.frames_per_second());
}
if (current_restrictions.counters.fps_adaptations == 1) {
RTC_LOG(LS_INFO) << "Removing framerate down-scaling setting.";
max_frame_rate = std::numeric_limits<int>::max();
}
if (!CanIncreaseFrameRateTo(max_frame_rate,
current_restrictions.restrictions)) {
return Adaptation::Status::kLimitReached;
}
RTC_LOG(LS_INFO) << "Scaling up framerate: " << max_frame_rate;
RestrictionsWithCounters new_restrictions = current_restrictions;
new_restrictions.restrictions.set_max_frame_rate(
max_frame_rate != std::numeric_limits<int>::max()
? absl::optional<double>(max_frame_rate)
: absl::nullopt);
--new_restrictions.counters.fps_adaptations;
RTC_DCHECK_GE(new_restrictions.counters.fps_adaptations, 0);
return new_restrictions;
}
Adaptation VideoStreamAdapter::GetAdaptDownResolution() {
RTC_DCHECK_RUN_ON(&sequence_checker_);
VideoStreamInputState input_state = input_state_provider_->InputState();
switch (degradation_preference_) {
case DegradationPreference::DISABLED:
return RestrictionsOrStateToAdaptation(
Adaptation::Status::kAdaptationDisabled, input_state);
case DegradationPreference::MAINTAIN_RESOLUTION:
return RestrictionsOrStateToAdaptation(Adaptation::Status::kLimitReached,
input_state);
case DegradationPreference::MAINTAIN_FRAMERATE:
return GetAdaptationDown();
case DegradationPreference::BALANCED: {
return RestrictionsOrStateToAdaptation(
GetAdaptDownResolutionStepForBalanced(input_state), input_state);
}
}
RTC_CHECK_NOTREACHED();
}
VideoStreamAdapter::RestrictionsOrState
VideoStreamAdapter::GetAdaptDownResolutionStepForBalanced(
const VideoStreamInputState& input_state) const {
// Adapt twice if the first adaptation did not decrease resolution.
auto first_step = GetAdaptationDownStep(input_state, current_restrictions_);
if (!absl::holds_alternative<RestrictionsWithCounters>(first_step)) {
return first_step;
}
auto first_restrictions = absl::get<RestrictionsWithCounters>(first_step);
if (first_restrictions.counters.resolution_adaptations >
current_restrictions_.counters.resolution_adaptations) {
return first_step;
}
// We didn't decrease resolution so force it; amend a resolution resuction
// to the existing framerate reduction in `first_restrictions`.
auto second_step = DecreaseResolution(input_state, first_restrictions);
if (absl::holds_alternative<RestrictionsWithCounters>(second_step)) {
return second_step;
}
// If the second step was not successful then settle for the first one.
return first_step;
}
void VideoStreamAdapter::ApplyAdaptation(
const Adaptation& adaptation,
rtc::scoped_refptr<Resource> resource) {
RTC_DCHECK_RUN_ON(&sequence_checker_);
RTC_DCHECK_EQ(adaptation.validation_id_, adaptation_validation_id_);
if (adaptation.status() != Adaptation::Status::kValid)
return;
// Remember the input pixels and fps of this adaptation. Used to avoid
// adapting again before this adaptation has had an effect.
if (DidIncreaseResolution(current_restrictions_.restrictions,
adaptation.restrictions())) {
awaiting_frame_size_change_.emplace(
true, adaptation.input_state().frame_size_pixels().value());
} else if (DidDecreaseResolution(current_restrictions_.restrictions,
adaptation.restrictions())) {
awaiting_frame_size_change_.emplace(
false, adaptation.input_state().frame_size_pixels().value());
} else {
awaiting_frame_size_change_ = absl::nullopt;
}
current_restrictions_ = {adaptation.restrictions(), adaptation.counters()};
BroadcastVideoRestrictionsUpdate(adaptation.input_state(), resource);
}
Adaptation VideoStreamAdapter::GetAdaptationTo(
const VideoAdaptationCounters& counters,
const VideoSourceRestrictions& restrictions) {
// Adapts up/down from the current levels so counters are equal.
RTC_DCHECK_RUN_ON(&sequence_checker_);
VideoStreamInputState input_state = input_state_provider_->InputState();
return Adaptation(adaptation_validation_id_, restrictions, counters,
input_state);
}
void VideoStreamAdapter::BroadcastVideoRestrictionsUpdate(
const VideoStreamInputState& input_state,
const rtc::scoped_refptr<Resource>& resource) {
RTC_DCHECK_RUN_ON(&sequence_checker_);
VideoSourceRestrictions filtered = FilterRestrictionsByDegradationPreference(
source_restrictions(), degradation_preference_);
if (last_filtered_restrictions_ == filtered) {
return;
}
for (auto* restrictions_listener : restrictions_listeners_) {
restrictions_listener->OnVideoSourceRestrictionsUpdated(
filtered, current_restrictions_.counters, resource,
source_restrictions());
}
last_video_source_restrictions_ = current_restrictions_.restrictions;
last_filtered_restrictions_ = filtered;
}
bool VideoStreamAdapter::HasSufficientInputForAdaptation(
const VideoStreamInputState& input_state) const {
return input_state.HasInputFrameSizeAndFramesPerSecond() &&
(degradation_preference_ !=
DegradationPreference::MAINTAIN_RESOLUTION ||
input_state.frames_per_second() >= kMinFrameRateFps);
}
VideoStreamAdapter::AwaitingFrameSizeChange::AwaitingFrameSizeChange(
bool pixels_increased,
int frame_size_pixels)
: pixels_increased(pixels_increased),
frame_size_pixels(frame_size_pixels) {}
absl::optional<uint32_t> VideoStreamAdapter::GetSingleActiveLayerPixels(
const VideoCodec& codec) {
int num_active = 0;
absl::optional<uint32_t> pixels;
if (codec.codecType == VideoCodecType::kVideoCodecAV1 &&
codec.GetScalabilityMode().has_value()) {
for (int i = 0;
i < ScalabilityModeToNumSpatialLayers(*(codec.GetScalabilityMode()));
++i) {
if (codec.spatialLayers[i].active) {
++num_active;
pixels = codec.spatialLayers[i].width * codec.spatialLayers[i].height;
}
}
} else if (codec.codecType == VideoCodecType::kVideoCodecVP9) {
for (int i = 0; i < codec.VP9().numberOfSpatialLayers; ++i) {
if (codec.spatialLayers[i].active) {
++num_active;
pixels = codec.spatialLayers[i].width * codec.spatialLayers[i].height;
}
}
} else {
for (int i = 0; i < codec.numberOfSimulcastStreams; ++i) {
if (codec.simulcastStream[i].active) {
++num_active;
pixels =
codec.simulcastStream[i].width * codec.simulcastStream[i].height;
}
}
}
return (num_active > 1) ? absl::nullopt : pixels;
}
} // namespace webrtc

View file

@ -0,0 +1,271 @@
/*
* Copyright 2020 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef CALL_ADAPTATION_VIDEO_STREAM_ADAPTER_H_
#define CALL_ADAPTATION_VIDEO_STREAM_ADAPTER_H_
#include <memory>
#include <utility>
#include <vector>
#include "absl/types/optional.h"
#include "absl/types/variant.h"
#include "api/adaptation/resource.h"
#include "api/field_trials_view.h"
#include "api/rtp_parameters.h"
#include "api/video/video_adaptation_counters.h"
#include "call/adaptation/adaptation_constraint.h"
#include "call/adaptation/degradation_preference_provider.h"
#include "call/adaptation/video_source_restrictions.h"
#include "call/adaptation/video_stream_input_state.h"
#include "call/adaptation/video_stream_input_state_provider.h"
#include "modules/video_coding/utility/quality_scaler.h"
#include "rtc_base/experiments/balanced_degradation_settings.h"
#include "rtc_base/system/no_unique_address.h"
#include "rtc_base/thread_annotations.h"
#include "video/video_stream_encoder_observer.h"
namespace webrtc {
// The listener is responsible for carrying out the reconfiguration of the video
// source such that the VideoSourceRestrictions are fulfilled.
class VideoSourceRestrictionsListener {
public:
virtual ~VideoSourceRestrictionsListener();
// The `restrictions` are filtered by degradation preference but not the
// `adaptation_counters`, which are currently only reported for legacy stats
// calculation purposes.
virtual void OnVideoSourceRestrictionsUpdated(
VideoSourceRestrictions restrictions,
const VideoAdaptationCounters& adaptation_counters,
rtc::scoped_refptr<Resource> reason,
const VideoSourceRestrictions& unfiltered_restrictions) = 0;
};
class VideoStreamAdapter;
extern const int kMinFrameRateFps;
VideoSourceRestrictions FilterRestrictionsByDegradationPreference(
VideoSourceRestrictions source_restrictions,
DegradationPreference degradation_preference);
int GetLowerResolutionThan(int pixel_count);
int GetHigherResolutionThan(int pixel_count);
// Either represents the next VideoSourceRestrictions the VideoStreamAdapter
// will take, or provides a Status code indicating the reason for not adapting
// if the adaptation is not valid.
class Adaptation final {
public:
enum class Status {
// Applying this adaptation will have an effect. All other Status codes
// indicate that adaptation is not possible and why.
kValid,
// Cannot adapt. The minimum or maximum adaptation has already been reached.
// There are no more steps to take.
kLimitReached,
// Cannot adapt. The resolution or frame rate requested by a recent
// adaptation has not yet been reflected in the input resolution or frame
// rate; adaptation is refused to avoid "double-adapting".
kAwaitingPreviousAdaptation,
// Not enough input.
kInsufficientInput,
// Adaptation disabled via degradation preference.
kAdaptationDisabled,
// Adaptation up was rejected by a VideoAdaptationConstraint.
kRejectedByConstraint,
};
static const char* StatusToString(Status status);
Status status() const;
const VideoStreamInputState& input_state() const;
const VideoSourceRestrictions& restrictions() const;
const VideoAdaptationCounters& counters() const;
private:
friend class VideoStreamAdapter;
// Constructs with a valid adaptation. Status is kValid.
Adaptation(int validation_id,
VideoSourceRestrictions restrictions,
VideoAdaptationCounters counters,
VideoStreamInputState input_state);
// Constructor when adaptation is not valid. Status MUST NOT be kValid.
Adaptation(int validation_id, Status invalid_status);
// An Adaptation can become invalidated if the state of VideoStreamAdapter is
// modified before the Adaptation is applied. To guard against this, this ID
// has to match VideoStreamAdapter::adaptation_validation_id_ when applied.
// TODO(https://crbug.com/webrtc/11700): Remove the validation_id_.
const int validation_id_;
const Status status_;
// Input state when adaptation was made.
const VideoStreamInputState input_state_;
const VideoSourceRestrictions restrictions_;
const VideoAdaptationCounters counters_;
};
// Owns the VideoSourceRestriction for a single stream and is responsible for
// adapting it up or down when told to do so. This class serves the following
// purposes:
// 1. Keep track of a stream's restrictions.
// 2. Provide valid ways to adapt up or down the stream's restrictions.
// 3. Modify the stream's restrictions in one of the valid ways.
class VideoStreamAdapter {
public:
VideoStreamAdapter(VideoStreamInputStateProvider* input_state_provider,
VideoStreamEncoderObserver* encoder_stats_observer,
const FieldTrialsView& field_trials);
~VideoStreamAdapter();
VideoSourceRestrictions source_restrictions() const;
const VideoAdaptationCounters& adaptation_counters() const;
void ClearRestrictions();
void AddRestrictionsListener(
VideoSourceRestrictionsListener* restrictions_listener);
void RemoveRestrictionsListener(
VideoSourceRestrictionsListener* restrictions_listener);
void AddAdaptationConstraint(AdaptationConstraint* adaptation_constraint);
void RemoveAdaptationConstraint(AdaptationConstraint* adaptation_constraint);
// TODO(hbos): Setting the degradation preference should not clear
// restrictions! This is not defined in the spec and is unexpected, there is a
// tiny risk that people would discover and rely on this behavior.
void SetDegradationPreference(DegradationPreference degradation_preference);
// Returns an adaptation that we are guaranteed to be able to apply, or a
// status code indicating the reason why we cannot adapt.
Adaptation GetAdaptationUp();
Adaptation GetAdaptationDown();
Adaptation GetAdaptationTo(const VideoAdaptationCounters& counters,
const VideoSourceRestrictions& restrictions);
// Tries to adapt the resolution one step. This is used for initial frame
// dropping. Does nothing if the degradation preference is not BALANCED or
// MAINTAIN_FRAMERATE. In the case of BALANCED, it will try twice to reduce
// the resolution. If it fails twice it gives up.
Adaptation GetAdaptDownResolution();
// Updates source_restrictions() the Adaptation.
void ApplyAdaptation(const Adaptation& adaptation,
rtc::scoped_refptr<Resource> resource);
struct RestrictionsWithCounters {
VideoSourceRestrictions restrictions;
VideoAdaptationCounters counters;
};
static absl::optional<uint32_t> GetSingleActiveLayerPixels(
const VideoCodec& codec);
private:
void BroadcastVideoRestrictionsUpdate(
const VideoStreamInputState& input_state,
const rtc::scoped_refptr<Resource>& resource);
bool HasSufficientInputForAdaptation(const VideoStreamInputState& input_state)
const RTC_RUN_ON(&sequence_checker_);
using RestrictionsOrState =
absl::variant<RestrictionsWithCounters, Adaptation::Status>;
RestrictionsOrState GetAdaptationUpStep(
const VideoStreamInputState& input_state) const
RTC_RUN_ON(&sequence_checker_);
RestrictionsOrState GetAdaptationDownStep(
const VideoStreamInputState& input_state,
const RestrictionsWithCounters& current_restrictions) const
RTC_RUN_ON(&sequence_checker_);
RestrictionsOrState GetAdaptDownResolutionStepForBalanced(
const VideoStreamInputState& input_state) const
RTC_RUN_ON(&sequence_checker_);
RestrictionsOrState AdaptIfFpsDiffInsufficient(
const VideoStreamInputState& input_state,
const RestrictionsWithCounters& restrictions) const
RTC_RUN_ON(&sequence_checker_);
Adaptation GetAdaptationUp(const VideoStreamInputState& input_state) const
RTC_RUN_ON(&sequence_checker_);
Adaptation GetAdaptationDown(const VideoStreamInputState& input_state) const
RTC_RUN_ON(&sequence_checker_);
static RestrictionsOrState DecreaseResolution(
const VideoStreamInputState& input_state,
const RestrictionsWithCounters& current_restrictions);
static RestrictionsOrState IncreaseResolution(
const VideoStreamInputState& input_state,
const RestrictionsWithCounters& current_restrictions);
// Framerate methods are member functions because they need internal state
// if the degradation preference is BALANCED.
RestrictionsOrState DecreaseFramerate(
const VideoStreamInputState& input_state,
const RestrictionsWithCounters& current_restrictions) const
RTC_RUN_ON(&sequence_checker_);
RestrictionsOrState IncreaseFramerate(
const VideoStreamInputState& input_state,
const RestrictionsWithCounters& current_restrictions) const
RTC_RUN_ON(&sequence_checker_);
struct RestrictionsOrStateVisitor;
Adaptation RestrictionsOrStateToAdaptation(
RestrictionsOrState step_or_state,
const VideoStreamInputState& input_state) const
RTC_RUN_ON(&sequence_checker_);
RTC_NO_UNIQUE_ADDRESS SequenceChecker sequence_checker_
RTC_GUARDED_BY(&sequence_checker_);
// Gets the input state which is the basis of all adaptations.
// Thread safe.
VideoStreamInputStateProvider* input_state_provider_;
// Used to signal when min pixel limit has been reached.
VideoStreamEncoderObserver* const encoder_stats_observer_;
// Decides the next adaptation target in DegradationPreference::BALANCED.
const BalancedDegradationSettings balanced_settings_;
// To guard against applying adaptations that have become invalidated, an
// Adaptation that is applied has to have a matching validation ID.
int adaptation_validation_id_ RTC_GUARDED_BY(&sequence_checker_);
// When deciding the next target up or down, different strategies are used
// depending on the DegradationPreference.
// https://w3c.github.io/mst-content-hint/#dom-rtcdegradationpreference
DegradationPreference degradation_preference_
RTC_GUARDED_BY(&sequence_checker_);
// Used to avoid adapting twice. Stores the resolution at the time of the last
// adaptation.
// TODO(hbos): Can we implement a more general "cooldown" mechanism of
// resources intead? If we already have adapted it seems like we should wait
// a while before adapting again, so that we are not acting on usage
// measurements that are made obsolete/unreliable by an "ongoing" adaptation.
struct AwaitingFrameSizeChange {
AwaitingFrameSizeChange(bool pixels_increased, int frame_size);
const bool pixels_increased;
const int frame_size_pixels;
};
absl::optional<AwaitingFrameSizeChange> awaiting_frame_size_change_
RTC_GUARDED_BY(&sequence_checker_);
// The previous restrictions value. Starts as unrestricted.
VideoSourceRestrictions last_video_source_restrictions_
RTC_GUARDED_BY(&sequence_checker_);
VideoSourceRestrictions last_filtered_restrictions_
RTC_GUARDED_BY(&sequence_checker_);
std::vector<VideoSourceRestrictionsListener*> restrictions_listeners_
RTC_GUARDED_BY(&sequence_checker_);
std::vector<AdaptationConstraint*> adaptation_constraints_
RTC_GUARDED_BY(&sequence_checker_);
RestrictionsWithCounters current_restrictions_
RTC_GUARDED_BY(&sequence_checker_);
};
} // namespace webrtc
#endif // CALL_ADAPTATION_VIDEO_STREAM_ADAPTER_H_

View file

@ -0,0 +1,80 @@
/*
* Copyright 2020 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "call/adaptation/video_stream_input_state.h"
#include "api/video_codecs/video_encoder.h"
namespace webrtc {
VideoStreamInputState::VideoStreamInputState()
: has_input_(false),
frame_size_pixels_(absl::nullopt),
frames_per_second_(0),
video_codec_type_(VideoCodecType::kVideoCodecGeneric),
min_pixels_per_frame_(kDefaultMinPixelsPerFrame),
single_active_stream_pixels_(absl::nullopt) {}
void VideoStreamInputState::set_has_input(bool has_input) {
has_input_ = has_input;
}
void VideoStreamInputState::set_frame_size_pixels(
absl::optional<int> frame_size_pixels) {
frame_size_pixels_ = frame_size_pixels;
}
void VideoStreamInputState::set_frames_per_second(int frames_per_second) {
frames_per_second_ = frames_per_second;
}
void VideoStreamInputState::set_video_codec_type(
VideoCodecType video_codec_type) {
video_codec_type_ = video_codec_type;
}
void VideoStreamInputState::set_min_pixels_per_frame(int min_pixels_per_frame) {
min_pixels_per_frame_ = min_pixels_per_frame;
}
void VideoStreamInputState::set_single_active_stream_pixels(
absl::optional<int> single_active_stream_pixels) {
single_active_stream_pixels_ = single_active_stream_pixels;
}
bool VideoStreamInputState::has_input() const {
return has_input_;
}
absl::optional<int> VideoStreamInputState::frame_size_pixels() const {
return frame_size_pixels_;
}
int VideoStreamInputState::frames_per_second() const {
return frames_per_second_;
}
VideoCodecType VideoStreamInputState::video_codec_type() const {
return video_codec_type_;
}
int VideoStreamInputState::min_pixels_per_frame() const {
return min_pixels_per_frame_;
}
absl::optional<int> VideoStreamInputState::single_active_stream_pixels() const {
return single_active_stream_pixels_;
}
bool VideoStreamInputState::HasInputFrameSizeAndFramesPerSecond() const {
return has_input_ && frame_size_pixels_.has_value();
}
} // namespace webrtc

View file

@ -0,0 +1,53 @@
/*
* Copyright 2020 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef CALL_ADAPTATION_VIDEO_STREAM_INPUT_STATE_H_
#define CALL_ADAPTATION_VIDEO_STREAM_INPUT_STATE_H_
#include "absl/types/optional.h"
#include "api/video/video_codec_type.h"
namespace webrtc {
// The source resolution, frame rate and other properties of a
// VideoStreamEncoder.
class VideoStreamInputState {
public:
VideoStreamInputState();
void set_has_input(bool has_input);
void set_frame_size_pixels(absl::optional<int> frame_size_pixels);
void set_frames_per_second(int frames_per_second);
void set_video_codec_type(VideoCodecType video_codec_type);
void set_min_pixels_per_frame(int min_pixels_per_frame);
void set_single_active_stream_pixels(
absl::optional<int> single_active_stream_pixels);
bool has_input() const;
absl::optional<int> frame_size_pixels() const;
int frames_per_second() const;
VideoCodecType video_codec_type() const;
int min_pixels_per_frame() const;
absl::optional<int> single_active_stream_pixels() const;
bool HasInputFrameSizeAndFramesPerSecond() const;
private:
bool has_input_;
absl::optional<int> frame_size_pixels_;
int frames_per_second_;
VideoCodecType video_codec_type_;
int min_pixels_per_frame_;
absl::optional<int> single_active_stream_pixels_;
};
} // namespace webrtc
#endif // CALL_ADAPTATION_VIDEO_STREAM_INPUT_STATE_H_

View file

@ -0,0 +1,54 @@
/*
* Copyright 2020 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "call/adaptation/video_stream_input_state_provider.h"
#include "call/adaptation/video_stream_adapter.h"
namespace webrtc {
VideoStreamInputStateProvider::VideoStreamInputStateProvider(
VideoStreamEncoderObserver* frame_rate_provider)
: frame_rate_provider_(frame_rate_provider) {}
VideoStreamInputStateProvider::~VideoStreamInputStateProvider() {}
void VideoStreamInputStateProvider::OnHasInputChanged(bool has_input) {
MutexLock lock(&mutex_);
input_state_.set_has_input(has_input);
}
void VideoStreamInputStateProvider::OnFrameSizeObserved(int frame_size_pixels) {
RTC_DCHECK_GT(frame_size_pixels, 0);
MutexLock lock(&mutex_);
input_state_.set_frame_size_pixels(frame_size_pixels);
}
void VideoStreamInputStateProvider::OnEncoderSettingsChanged(
EncoderSettings encoder_settings) {
MutexLock lock(&mutex_);
input_state_.set_video_codec_type(
encoder_settings.encoder_config().codec_type);
input_state_.set_min_pixels_per_frame(
encoder_settings.encoder_info().scaling_settings.min_pixels_per_frame);
input_state_.set_single_active_stream_pixels(
VideoStreamAdapter::GetSingleActiveLayerPixels(
encoder_settings.video_codec()));
}
VideoStreamInputState VideoStreamInputStateProvider::InputState() {
// GetInputFrameRate() is thread-safe.
int input_fps = frame_rate_provider_->GetInputFrameRate();
MutexLock lock(&mutex_);
input_state_.set_frames_per_second(input_fps);
return input_state_;
}
} // namespace webrtc

View file

@ -0,0 +1,41 @@
/*
* Copyright 2020 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef CALL_ADAPTATION_VIDEO_STREAM_INPUT_STATE_PROVIDER_H_
#define CALL_ADAPTATION_VIDEO_STREAM_INPUT_STATE_PROVIDER_H_
#include "call/adaptation/encoder_settings.h"
#include "call/adaptation/video_stream_input_state.h"
#include "rtc_base/synchronization/mutex.h"
#include "video/video_stream_encoder_observer.h"
namespace webrtc {
class VideoStreamInputStateProvider {
public:
VideoStreamInputStateProvider(
VideoStreamEncoderObserver* frame_rate_provider);
virtual ~VideoStreamInputStateProvider();
void OnHasInputChanged(bool has_input);
void OnFrameSizeObserved(int frame_size_pixels);
void OnEncoderSettingsChanged(EncoderSettings encoder_settings);
virtual VideoStreamInputState InputState();
private:
Mutex mutex_;
VideoStreamEncoderObserver* const frame_rate_provider_;
VideoStreamInputState input_state_ RTC_GUARDED_BY(mutex_);
};
} // namespace webrtc
#endif // CALL_ADAPTATION_VIDEO_STREAM_INPUT_STATE_PROVIDER_H_

View file

@ -0,0 +1,24 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "call/audio_receive_stream.h"
namespace webrtc {
AudioReceiveStreamInterface::Stats::Stats() = default;
AudioReceiveStreamInterface::Stats::~Stats() = default;
AudioReceiveStreamInterface::Config::Config() = default;
AudioReceiveStreamInterface::Config::~Config() = default;
AudioReceiveStreamInterface::Config::Rtp::Rtp() = default;
AudioReceiveStreamInterface::Config::Rtp::~Rtp() = default;
} // namespace webrtc

View file

@ -0,0 +1,207 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef CALL_AUDIO_RECEIVE_STREAM_H_
#define CALL_AUDIO_RECEIVE_STREAM_H_
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "absl/types/optional.h"
#include "api/audio_codecs/audio_decoder_factory.h"
#include "api/call/transport.h"
#include "api/crypto/crypto_options.h"
#include "api/rtp_parameters.h"
#include "call/receive_stream.h"
#include "call/rtp_config.h"
namespace webrtc {
class AudioSinkInterface;
class AudioReceiveStreamInterface : public MediaReceiveStreamInterface {
public:
struct Stats {
Stats();
~Stats();
uint32_t remote_ssrc = 0;
int64_t payload_bytes_received = 0;
int64_t header_and_padding_bytes_received = 0;
uint32_t packets_received = 0;
uint64_t fec_packets_received = 0;
uint64_t fec_packets_discarded = 0;
int32_t packets_lost = 0;
uint64_t packets_discarded = 0;
uint32_t nacks_sent = 0;
std::string codec_name;
absl::optional<int> codec_payload_type;
uint32_t jitter_ms = 0;
uint32_t jitter_buffer_ms = 0;
uint32_t jitter_buffer_preferred_ms = 0;
uint32_t delay_estimate_ms = 0;
int32_t audio_level = -1;
// Stats below correspond to similarly-named fields in the WebRTC stats
// spec. https://w3c.github.io/webrtc-stats/#dom-rtcmediastreamtrackstats
double total_output_energy = 0.0;
uint64_t total_samples_received = 0;
double total_output_duration = 0.0;
uint64_t concealed_samples = 0;
uint64_t silent_concealed_samples = 0;
uint64_t concealment_events = 0;
double jitter_buffer_delay_seconds = 0.0;
uint64_t jitter_buffer_emitted_count = 0;
double jitter_buffer_target_delay_seconds = 0.0;
double jitter_buffer_minimum_delay_seconds = 0.0;
uint64_t inserted_samples_for_deceleration = 0;
uint64_t removed_samples_for_acceleration = 0;
// Stats below DO NOT correspond directly to anything in the WebRTC stats
float expand_rate = 0.0f;
float speech_expand_rate = 0.0f;
float secondary_decoded_rate = 0.0f;
float secondary_discarded_rate = 0.0f;
float accelerate_rate = 0.0f;
float preemptive_expand_rate = 0.0f;
uint64_t delayed_packet_outage_samples = 0;
int32_t decoding_calls_to_silence_generator = 0;
int32_t decoding_calls_to_neteq = 0;
int32_t decoding_normal = 0;
// TODO(alexnarest): Consider decoding_neteq_plc for consistency
int32_t decoding_plc = 0;
int32_t decoding_codec_plc = 0;
int32_t decoding_cng = 0;
int32_t decoding_plc_cng = 0;
int32_t decoding_muted_output = 0;
int64_t capture_start_ntp_time_ms = 0;
// The timestamp at which the last packet was received, i.e. the time of the
// local clock when it was received - not the RTP timestamp of that packet.
// https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-lastpacketreceivedtimestamp
absl::optional<Timestamp> last_packet_received;
uint64_t jitter_buffer_flushes = 0;
double relative_packet_arrival_delay_seconds = 0.0;
int32_t interruption_count = 0;
int32_t total_interruption_duration_ms = 0;
// https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-estimatedplayouttimestamp
absl::optional<int64_t> estimated_playout_ntp_timestamp_ms;
// Remote outbound stats derived by the received RTCP sender reports.
// https://w3c.github.io/webrtc-stats/#remoteoutboundrtpstats-dict*
absl::optional<int64_t> last_sender_report_timestamp_ms;
absl::optional<int64_t> last_sender_report_remote_timestamp_ms;
uint64_t sender_reports_packets_sent = 0;
uint64_t sender_reports_bytes_sent = 0;
uint64_t sender_reports_reports_count = 0;
absl::optional<TimeDelta> round_trip_time;
TimeDelta total_round_trip_time = TimeDelta::Zero();
int round_trip_time_measurements = 0;
};
struct Config {
Config();
~Config();
std::string ToString() const;
// Receive-stream specific RTP settings.
struct Rtp : public ReceiveStreamRtpConfig {
Rtp();
~Rtp();
std::string ToString() const;
// See NackConfig for description.
NackConfig nack;
} rtp;
// Receive-side RTT.
bool enable_non_sender_rtt = false;
Transport* rtcp_send_transport = nullptr;
// NetEq settings.
size_t jitter_buffer_max_packets = 200;
bool jitter_buffer_fast_accelerate = false;
int jitter_buffer_min_delay_ms = 0;
// Identifier for an A/V synchronization group. Empty string to disable.
// TODO(pbos): Synchronize streams in a sync group, not just one video
// stream to one audio stream. Tracked by issue webrtc:4762.
std::string sync_group;
// Decoder specifications for every payload type that we can receive.
std::map<int, SdpAudioFormat> decoder_map;
rtc::scoped_refptr<AudioDecoderFactory> decoder_factory;
absl::optional<AudioCodecPairId> codec_pair_id;
// Per PeerConnection crypto options.
webrtc::CryptoOptions crypto_options;
// An optional custom frame decryptor that allows the entire frame to be
// decrypted in whatever way the caller choses. This is not required by
// default.
// TODO(tommi): Remove this member variable from the struct. It's not
// a part of the AudioReceiveStreamInterface state but rather a pass through
// variable.
rtc::scoped_refptr<webrtc::FrameDecryptorInterface> frame_decryptor;
// An optional frame transformer used by insertable streams to transform
// encoded frames.
// TODO(tommi): Remove this member variable from the struct. It's not
// a part of the AudioReceiveStreamInterface state but rather a pass through
// variable.
rtc::scoped_refptr<webrtc::FrameTransformerInterface> frame_transformer;
};
// Methods that support reconfiguring the stream post initialization.
virtual void SetDecoderMap(std::map<int, SdpAudioFormat> decoder_map) = 0;
virtual void SetNackHistory(int history_ms) = 0;
virtual void SetNonSenderRttMeasurement(bool enabled) = 0;
// Returns true if the stream has been started.
virtual bool IsRunning() const = 0;
virtual Stats GetStats(bool get_and_clear_legacy_stats) const = 0;
Stats GetStats() { return GetStats(/*get_and_clear_legacy_stats=*/true); }
// Sets an audio sink that receives unmixed audio from the receive stream.
// Ownership of the sink is managed by the caller.
// Only one sink can be set and passing a null sink clears an existing one.
// NOTE: Audio must still somehow be pulled through AudioTransport for audio
// to stream through this sink. In practice, this happens if mixed audio
// is being pulled+rendered and/or if audio is being pulled for the purposes
// of feeding to the AEC.
virtual void SetSink(AudioSinkInterface* sink) = 0;
// Sets playback gain of the stream, applied when mixing, and thus after it
// is potentially forwarded to any attached AudioSinkInterface implementation.
virtual void SetGain(float gain) = 0;
// Sets a base minimum for the playout delay. Base minimum delay sets lower
// bound on minimum delay value determining lower bound on playout delay.
//
// Returns true if value was successfully set, false overwise.
virtual bool SetBaseMinimumPlayoutDelayMs(int delay_ms) = 0;
// Returns current value of base minimum delay in milliseconds.
virtual int GetBaseMinimumPlayoutDelayMs() const = 0;
// Synchronization source (stream identifier) to be received.
// This member will not change mid-stream and can be assumed to be const
// post initialization.
virtual uint32_t remote_ssrc() const = 0;
protected:
virtual ~AudioReceiveStreamInterface() {}
};
} // namespace webrtc
#endif // CALL_AUDIO_RECEIVE_STREAM_H_

View file

@ -0,0 +1,108 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "call/audio_send_stream.h"
#include <stddef.h>
#include "rtc_base/strings/audio_format_to_string.h"
#include "rtc_base/strings/string_builder.h"
namespace webrtc {
AudioSendStream::Stats::Stats() = default;
AudioSendStream::Stats::~Stats() = default;
AudioSendStream::Config::Config(Transport* send_transport)
: send_transport(send_transport) {}
AudioSendStream::Config::~Config() = default;
std::string AudioSendStream::Config::ToString() const {
rtc::StringBuilder ss;
ss << "{rtp: " << rtp.ToString();
ss << ", rtcp_report_interval_ms: " << rtcp_report_interval_ms;
ss << ", send_transport: " << (send_transport ? "(Transport)" : "null");
ss << ", min_bitrate_bps: " << min_bitrate_bps;
ss << ", max_bitrate_bps: " << max_bitrate_bps;
ss << ", has audio_network_adaptor_config: "
<< (audio_network_adaptor_config ? "true" : "false");
ss << ", has_dscp: " << (has_dscp ? "true" : "false");
ss << ", send_codec_spec: "
<< (send_codec_spec ? send_codec_spec->ToString() : "<unset>");
ss << "}";
return ss.Release();
}
AudioSendStream::Config::Rtp::Rtp() = default;
AudioSendStream::Config::Rtp::~Rtp() = default;
std::string AudioSendStream::Config::Rtp::ToString() const {
char buf[1024];
rtc::SimpleStringBuilder ss(buf);
ss << "{ssrc: " << ssrc;
if (!rid.empty()) {
ss << ", rid: " << rid;
}
if (!mid.empty()) {
ss << ", mid: " << mid;
}
ss << ", extmap-allow-mixed: " << (extmap_allow_mixed ? "true" : "false");
ss << ", extensions: [";
for (size_t i = 0; i < extensions.size(); ++i) {
ss << extensions[i].ToString();
if (i != extensions.size() - 1) {
ss << ", ";
}
}
ss << ']';
ss << ", c_name: " << c_name;
ss << '}';
return ss.str();
}
AudioSendStream::Config::SendCodecSpec::SendCodecSpec(
int payload_type,
const SdpAudioFormat& format)
: payload_type(payload_type), format(format) {}
AudioSendStream::Config::SendCodecSpec::~SendCodecSpec() = default;
std::string AudioSendStream::Config::SendCodecSpec::ToString() const {
char buf[1024];
rtc::SimpleStringBuilder ss(buf);
ss << "{nack_enabled: " << (nack_enabled ? "true" : "false");
ss << ", transport_cc_enabled: " << (transport_cc_enabled ? "true" : "false");
ss << ", enable_non_sender_rtt: "
<< (enable_non_sender_rtt ? "true" : "false");
ss << ", cng_payload_type: "
<< (cng_payload_type ? rtc::ToString(*cng_payload_type) : "<unset>");
ss << ", red_payload_type: "
<< (red_payload_type ? rtc::ToString(*red_payload_type) : "<unset>");
ss << ", payload_type: " << payload_type;
ss << ", format: " << rtc::ToString(format);
ss << '}';
return ss.str();
}
bool AudioSendStream::Config::SendCodecSpec::operator==(
const AudioSendStream::Config::SendCodecSpec& rhs) const {
if (nack_enabled == rhs.nack_enabled &&
transport_cc_enabled == rhs.transport_cc_enabled &&
enable_non_sender_rtt == rhs.enable_non_sender_rtt &&
cng_payload_type == rhs.cng_payload_type &&
red_payload_type == rhs.red_payload_type &&
payload_type == rhs.payload_type && format == rhs.format &&
target_bitrate_bps == rhs.target_bitrate_bps) {
return true;
}
return false;
}
} // namespace webrtc

View file

@ -0,0 +1,201 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef CALL_AUDIO_SEND_STREAM_H_
#define CALL_AUDIO_SEND_STREAM_H_
#include <memory>
#include <string>
#include <vector>
#include "absl/types/optional.h"
#include "api/audio_codecs/audio_codec_pair_id.h"
#include "api/audio_codecs/audio_encoder.h"
#include "api/audio_codecs/audio_encoder_factory.h"
#include "api/audio_codecs/audio_format.h"
#include "api/call/transport.h"
#include "api/crypto/crypto_options.h"
#include "api/crypto/frame_encryptor_interface.h"
#include "api/frame_transformer_interface.h"
#include "api/rtp_parameters.h"
#include "api/rtp_sender_interface.h"
#include "api/scoped_refptr.h"
#include "call/audio_sender.h"
#include "call/rtp_config.h"
#include "modules/audio_processing/include/audio_processing_statistics.h"
#include "modules/rtp_rtcp/include/report_block_data.h"
namespace webrtc {
class AudioSendStream : public AudioSender {
public:
struct Stats {
Stats();
~Stats();
// TODO(solenberg): Harmonize naming and defaults with receive stream stats.
uint32_t local_ssrc = 0;
int64_t payload_bytes_sent = 0;
int64_t header_and_padding_bytes_sent = 0;
// https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-retransmittedbytessent
uint64_t retransmitted_bytes_sent = 0;
int32_t packets_sent = 0;
// https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-totalpacketsenddelay
TimeDelta total_packet_send_delay = TimeDelta::Zero();
// https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-retransmittedpacketssent
uint64_t retransmitted_packets_sent = 0;
int32_t packets_lost = -1;
float fraction_lost = -1.0f;
std::string codec_name;
absl::optional<int> codec_payload_type;
int32_t jitter_ms = -1;
int64_t rtt_ms = -1;
int16_t audio_level = 0;
// See description of "totalAudioEnergy" in the WebRTC stats spec:
// https://w3c.github.io/webrtc-stats/#dom-rtcmediastreamtrackstats-totalaudioenergy
double total_input_energy = 0.0;
double total_input_duration = 0.0;
ANAStats ana_statistics;
AudioProcessingStats apm_statistics;
int64_t target_bitrate_bps = 0;
// A snapshot of Report Blocks with additional data of interest to
// statistics. Within this list, the sender-source SSRC pair is unique and
// per-pair the ReportBlockData represents the latest Report Block that was
// received for that pair.
std::vector<ReportBlockData> report_block_datas;
uint32_t nacks_received = 0;
};
struct Config {
Config() = delete;
explicit Config(Transport* send_transport);
~Config();
std::string ToString() const;
// Send-stream specific RTP settings.
struct Rtp {
Rtp();
~Rtp();
std::string ToString() const;
// Sender SSRC.
uint32_t ssrc = 0;
// The value to send in the RID RTP header extension if the extension is
// included in the list of extensions.
std::string rid;
// The value to send in the MID RTP header extension if the extension is
// included in the list of extensions.
std::string mid;
// Corresponds to the SDP attribute extmap-allow-mixed.
bool extmap_allow_mixed = false;
// RTP header extensions used for the sent stream.
std::vector<RtpExtension> extensions;
// RTCP CNAME, see RFC 3550.
std::string c_name;
} rtp;
// Time interval between RTCP report for audio
int rtcp_report_interval_ms = 5000;
// Transport for outgoing packets. The transport is expected to exist for
// the entire life of the AudioSendStream and is owned by the API client.
Transport* send_transport = nullptr;
// Bitrate limits used for variable audio bitrate streams. Set both to -1 to
// disable audio bitrate adaptation.
// Note: This is still an experimental feature and not ready for real usage.
int min_bitrate_bps = -1;
int max_bitrate_bps = -1;
double bitrate_priority = 1.0;
bool has_dscp = false;
// Defines whether to turn on audio network adaptor, and defines its config
// string.
absl::optional<std::string> audio_network_adaptor_config;
struct SendCodecSpec {
SendCodecSpec(int payload_type, const SdpAudioFormat& format);
~SendCodecSpec();
std::string ToString() const;
bool operator==(const SendCodecSpec& rhs) const;
bool operator!=(const SendCodecSpec& rhs) const {
return !(*this == rhs);
}
int payload_type;
SdpAudioFormat format;
bool nack_enabled = false;
bool transport_cc_enabled = false;
bool enable_non_sender_rtt = false;
absl::optional<int> cng_payload_type;
absl::optional<int> red_payload_type;
// If unset, use the encoder's default target bitrate.
absl::optional<int> target_bitrate_bps;
};
absl::optional<SendCodecSpec> send_codec_spec;
rtc::scoped_refptr<AudioEncoderFactory> encoder_factory;
absl::optional<AudioCodecPairId> codec_pair_id;
// Track ID as specified during track creation.
std::string track_id;
// Per PeerConnection crypto options.
webrtc::CryptoOptions crypto_options;
// An optional custom frame encryptor that allows the entire frame to be
// encryptor in whatever way the caller choses. This is not required by
// default.
rtc::scoped_refptr<webrtc::FrameEncryptorInterface> frame_encryptor;
// An optional frame transformer used by insertable streams to transform
// encoded frames.
rtc::scoped_refptr<webrtc::FrameTransformerInterface> frame_transformer;
};
virtual ~AudioSendStream() = default;
virtual const webrtc::AudioSendStream::Config& GetConfig() const = 0;
// Reconfigure the stream according to the Configuration.
virtual void Reconfigure(const Config& config,
SetParametersCallback callback) = 0;
// Starts stream activity.
// When a stream is active, it can receive, process and deliver packets.
virtual void Start() = 0;
// Stops stream activity.
// When a stream is stopped, it can't receive, process or deliver packets.
virtual void Stop() = 0;
// TODO(solenberg): Make payload_type a config property instead.
virtual bool SendTelephoneEvent(int payload_type,
int payload_frequency,
int event,
int duration_ms) = 0;
virtual void SetMuted(bool muted) = 0;
virtual Stats GetStats() const = 0;
virtual Stats GetStats(bool has_remote_tracks) const = 0;
};
} // namespace webrtc
#endif // CALL_AUDIO_SEND_STREAM_H_

View file

@ -0,0 +1,30 @@
/*
* Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef CALL_AUDIO_SENDER_H_
#define CALL_AUDIO_SENDER_H_
#include <memory>
#include "api/audio/audio_frame.h"
namespace webrtc {
class AudioSender {
public:
// Encode and send audio.
virtual void SendAudioData(std::unique_ptr<AudioFrame> audio_frame) = 0;
virtual ~AudioSender() = default;
};
} // namespace webrtc
#endif // CALL_AUDIO_SENDER_H_

View file

@ -0,0 +1,18 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "call/audio_state.h"
namespace webrtc {
AudioState::Config::Config() = default;
AudioState::Config::~Config() = default;
} // namespace webrtc

View file

@ -0,0 +1,69 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef CALL_AUDIO_STATE_H_
#define CALL_AUDIO_STATE_H_
#include "api/audio/audio_mixer.h"
#include "api/scoped_refptr.h"
#include "modules/async_audio_processing/async_audio_processing.h"
#include "modules/audio_device/include/audio_device.h"
#include "modules/audio_processing/include/audio_processing.h"
#include "rtc_base/ref_count.h"
namespace webrtc {
class AudioTransport;
// AudioState holds the state which must be shared between multiple instances of
// webrtc::Call for audio processing purposes.
class AudioState : public rtc::RefCountInterface {
public:
struct Config {
Config();
~Config();
// The audio mixer connected to active receive streams. One per
// AudioState.
rtc::scoped_refptr<AudioMixer> audio_mixer;
// The audio processing module.
rtc::scoped_refptr<webrtc::AudioProcessing> audio_processing;
// TODO(solenberg): Temporary: audio device module.
rtc::scoped_refptr<webrtc::AudioDeviceModule> audio_device_module;
rtc::scoped_refptr<AsyncAudioProcessing::Factory>
async_audio_processing_factory;
};
virtual AudioProcessing* audio_processing() = 0;
virtual AudioTransport* audio_transport() = 0;
// Enable/disable playout of the audio channels. Enabled by default.
// This will stop playout of the underlying audio device but start a task
// which will poll for audio data every 10ms to ensure that audio processing
// happens and the audio stats are updated.
virtual void SetPlayout(bool enabled) = 0;
// Enable/disable recording of the audio channels. Enabled by default.
// This will stop recording of the underlying audio device and no audio
// packets will be encoded or transmitted.
virtual void SetRecording(bool enabled) = 0;
virtual void SetStereoChannelSwapping(bool enable) = 0;
static rtc::scoped_refptr<AudioState> Create(
const AudioState::Config& config);
~AudioState() override {}
};
} // namespace webrtc
#endif // CALL_AUDIO_STATE_H_

View file

@ -0,0 +1,593 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*
*/
#include "call/bitrate_allocator.h"
#include <algorithm>
#include <cmath>
#include <memory>
#include <utility>
#include "absl/algorithm/container.h"
#include "api/units/data_rate.h"
#include "api/units/time_delta.h"
#include "rtc_base/checks.h"
#include "rtc_base/logging.h"
#include "rtc_base/numerics/safe_minmax.h"
#include "system_wrappers/include/clock.h"
#include "system_wrappers/include/metrics.h"
namespace webrtc {
namespace {
using bitrate_allocator_impl::AllocatableTrack;
// Allow packets to be transmitted in up to 2 times max video bitrate if the
// bandwidth estimate allows it.
const uint8_t kTransmissionMaxBitrateMultiplier = 2;
const int kDefaultBitrateBps = 300000;
// Require a bitrate increase of max(10%, 20kbps) to resume paused streams.
const double kToggleFactor = 0.1;
const uint32_t kMinToggleBitrateBps = 20000;
const int64_t kBweLogIntervalMs = 5000;
double MediaRatio(uint32_t allocated_bitrate, uint32_t protection_bitrate) {
RTC_DCHECK_GT(allocated_bitrate, 0);
if (protection_bitrate == 0)
return 1.0;
uint32_t media_bitrate = allocated_bitrate - protection_bitrate;
return media_bitrate / static_cast<double>(allocated_bitrate);
}
bool EnoughBitrateForAllObservers(
const std::vector<AllocatableTrack>& allocatable_tracks,
uint32_t bitrate,
uint32_t sum_min_bitrates) {
if (bitrate < sum_min_bitrates)
return false;
uint32_t extra_bitrate_per_observer =
(bitrate - sum_min_bitrates) /
static_cast<uint32_t>(allocatable_tracks.size());
for (const auto& observer_config : allocatable_tracks) {
if (observer_config.config.min_bitrate_bps + extra_bitrate_per_observer <
observer_config.MinBitrateWithHysteresis()) {
return false;
}
}
return true;
}
// Splits `bitrate` evenly to observers already in `allocation`.
// `include_zero_allocations` decides if zero allocations should be part of
// the distribution or not. The allowed max bitrate is `max_multiplier` x
// observer max bitrate.
void DistributeBitrateEvenly(
const std::vector<AllocatableTrack>& allocatable_tracks,
uint32_t bitrate,
bool include_zero_allocations,
int max_multiplier,
std::map<BitrateAllocatorObserver*, int>* allocation) {
RTC_DCHECK_EQ(allocation->size(), allocatable_tracks.size());
std::multimap<uint32_t, const AllocatableTrack*> list_max_bitrates;
for (const auto& observer_config : allocatable_tracks) {
if (include_zero_allocations ||
allocation->at(observer_config.observer) != 0) {
list_max_bitrates.insert(
{observer_config.config.max_bitrate_bps, &observer_config});
}
}
auto it = list_max_bitrates.begin();
while (it != list_max_bitrates.end()) {
RTC_DCHECK_GT(bitrate, 0);
uint32_t extra_allocation =
bitrate / static_cast<uint32_t>(list_max_bitrates.size());
uint32_t total_allocation =
extra_allocation + allocation->at(it->second->observer);
bitrate -= extra_allocation;
if (total_allocation > max_multiplier * it->first) {
// There is more than we can fit for this observer, carry over to the
// remaining observers.
bitrate += total_allocation - max_multiplier * it->first;
total_allocation = max_multiplier * it->first;
}
// Finally, update the allocation for this observer.
allocation->at(it->second->observer) = total_allocation;
it = list_max_bitrates.erase(it);
}
}
// From the available `bitrate`, each observer will be allocated a
// proportional amount based upon its bitrate priority. If that amount is
// more than the observer's capacity, it will be allocated its capacity, and
// the excess bitrate is still allocated proportionally to other observers.
// Allocating the proportional amount means an observer with twice the
// bitrate_priority of another will be allocated twice the bitrate.
void DistributeBitrateRelatively(
const std::vector<AllocatableTrack>& allocatable_tracks,
uint32_t remaining_bitrate,
const std::map<BitrateAllocatorObserver*, int>& observers_capacities,
std::map<BitrateAllocatorObserver*, int>* allocation) {
RTC_DCHECK_EQ(allocation->size(), allocatable_tracks.size());
RTC_DCHECK_EQ(observers_capacities.size(), allocatable_tracks.size());
struct PriorityRateObserverConfig {
BitrateAllocatorObserver* allocation_key;
// The amount of bitrate bps that can be allocated to this observer.
int capacity_bps;
double bitrate_priority;
};
double bitrate_priority_sum = 0;
std::vector<PriorityRateObserverConfig> priority_rate_observers;
for (const auto& observer_config : allocatable_tracks) {
priority_rate_observers.push_back(PriorityRateObserverConfig{
observer_config.observer,
observers_capacities.at(observer_config.observer),
observer_config.config.bitrate_priority});
bitrate_priority_sum += observer_config.config.bitrate_priority;
}
// Iterate in the order observers can be allocated their full capacity.
// We want to sort by which observers will be allocated their full capacity
// first. By dividing each observer's capacity by its bitrate priority we
// are "normalizing" the capacity of an observer by the rate it will be
// filled. This is because the amount allocated is based upon bitrate
// priority. We allocate twice as much bitrate to an observer with twice the
// bitrate priority of another.
absl::c_sort(priority_rate_observers, [](const auto& a, const auto& b) {
return a.capacity_bps / a.bitrate_priority <
b.capacity_bps / b.bitrate_priority;
});
size_t i;
for (i = 0; i < priority_rate_observers.size(); ++i) {
const auto& priority_rate_observer = priority_rate_observers[i];
// We allocate the full capacity to an observer only if its relative
// portion from the remaining bitrate is sufficient to allocate its full
// capacity. This means we aren't greedily allocating the full capacity, but
// that it is only done when there is also enough bitrate to allocate the
// proportional amounts to all other observers.
double observer_share =
priority_rate_observer.bitrate_priority / bitrate_priority_sum;
double allocation_bps = observer_share * remaining_bitrate;
bool enough_bitrate = allocation_bps >= priority_rate_observer.capacity_bps;
if (!enough_bitrate)
break;
allocation->at(priority_rate_observer.allocation_key) +=
priority_rate_observer.capacity_bps;
remaining_bitrate -= priority_rate_observer.capacity_bps;
bitrate_priority_sum -= priority_rate_observer.bitrate_priority;
}
// From the remaining bitrate, allocate the proportional amounts to the
// observers that aren't allocated their max capacity.
for (; i < priority_rate_observers.size(); ++i) {
const auto& priority_rate_observer = priority_rate_observers[i];
double fraction_allocated =
priority_rate_observer.bitrate_priority / bitrate_priority_sum;
allocation->at(priority_rate_observer.allocation_key) +=
fraction_allocated * remaining_bitrate;
}
}
// Allocates bitrate to observers when there isn't enough to allocate the
// minimum to all observers.
std::map<BitrateAllocatorObserver*, int> LowRateAllocation(
const std::vector<AllocatableTrack>& allocatable_tracks,
uint32_t bitrate) {
std::map<BitrateAllocatorObserver*, int> allocation;
// Start by allocating bitrate to observers enforcing a min bitrate, hence
// remaining_bitrate might turn negative.
int64_t remaining_bitrate = bitrate;
for (const auto& observer_config : allocatable_tracks) {
int32_t allocated_bitrate = 0;
if (observer_config.config.enforce_min_bitrate)
allocated_bitrate = observer_config.config.min_bitrate_bps;
allocation[observer_config.observer] = allocated_bitrate;
remaining_bitrate -= allocated_bitrate;
}
// Allocate bitrate to all previously active streams.
if (remaining_bitrate > 0) {
for (const auto& observer_config : allocatable_tracks) {
if (observer_config.config.enforce_min_bitrate ||
observer_config.LastAllocatedBitrate() == 0)
continue;
uint32_t required_bitrate = observer_config.MinBitrateWithHysteresis();
if (remaining_bitrate >= required_bitrate) {
allocation[observer_config.observer] = required_bitrate;
remaining_bitrate -= required_bitrate;
}
}
}
// Allocate bitrate to previously paused streams.
if (remaining_bitrate > 0) {
for (const auto& observer_config : allocatable_tracks) {
if (observer_config.LastAllocatedBitrate() != 0)
continue;
// Add a hysteresis to avoid toggling.
uint32_t required_bitrate = observer_config.MinBitrateWithHysteresis();
if (remaining_bitrate >= required_bitrate) {
allocation[observer_config.observer] = required_bitrate;
remaining_bitrate -= required_bitrate;
}
}
}
// Split a possible remainder evenly on all streams with an allocation.
if (remaining_bitrate > 0)
DistributeBitrateEvenly(allocatable_tracks, remaining_bitrate, false, 1,
&allocation);
RTC_DCHECK_EQ(allocation.size(), allocatable_tracks.size());
return allocation;
}
// Allocates bitrate to all observers when the available bandwidth is enough
// to allocate the minimum to all observers but not enough to allocate the
// max bitrate of each observer.
// Allocates the bitrate based on the bitrate priority of each observer. This
// bitrate priority defines the priority for bitrate to be allocated to that
// observer in relation to other observers. For example with two observers, if
// observer 1 had a bitrate_priority = 1.0, and observer 2 has a
// bitrate_priority = 2.0, the expected behavior is that observer 2 will be
// allocated twice the bitrate as observer 1 above the each observer's
// min_bitrate_bps values, until one of the observers hits its max_bitrate_bps.
std::map<BitrateAllocatorObserver*, int> NormalRateAllocation(
const std::vector<AllocatableTrack>& allocatable_tracks,
uint32_t bitrate,
uint32_t sum_min_bitrates) {
std::map<BitrateAllocatorObserver*, int> allocation;
std::map<BitrateAllocatorObserver*, int> observers_capacities;
for (const auto& observer_config : allocatable_tracks) {
allocation[observer_config.observer] =
observer_config.config.min_bitrate_bps;
observers_capacities[observer_config.observer] =
observer_config.config.max_bitrate_bps -
observer_config.config.min_bitrate_bps;
}
bitrate -= sum_min_bitrates;
// TODO(srte): Implement fair sharing between prioritized streams, currently
// they are treated on a first come first serve basis.
for (const auto& observer_config : allocatable_tracks) {
int64_t priority_margin = observer_config.config.priority_bitrate_bps -
allocation[observer_config.observer];
if (priority_margin > 0 && bitrate > 0) {
int64_t extra_bitrate = std::min<int64_t>(priority_margin, bitrate);
allocation[observer_config.observer] +=
rtc::dchecked_cast<int>(extra_bitrate);
observers_capacities[observer_config.observer] -= extra_bitrate;
bitrate -= extra_bitrate;
}
}
// From the remaining bitrate, allocate a proportional amount to each observer
// above the min bitrate already allocated.
if (bitrate > 0)
DistributeBitrateRelatively(allocatable_tracks, bitrate,
observers_capacities, &allocation);
return allocation;
}
// Allocates bitrate to observers when there is enough available bandwidth
// for all observers to be allocated their max bitrate.
std::map<BitrateAllocatorObserver*, int> MaxRateAllocation(
const std::vector<AllocatableTrack>& allocatable_tracks,
uint32_t bitrate,
uint32_t sum_max_bitrates) {
std::map<BitrateAllocatorObserver*, int> allocation;
for (const auto& observer_config : allocatable_tracks) {
allocation[observer_config.observer] =
observer_config.config.max_bitrate_bps;
bitrate -= observer_config.config.max_bitrate_bps;
}
DistributeBitrateEvenly(allocatable_tracks, bitrate, true,
kTransmissionMaxBitrateMultiplier, &allocation);
return allocation;
}
// Allocates zero bitrate to all observers.
std::map<BitrateAllocatorObserver*, int> ZeroRateAllocation(
const std::vector<AllocatableTrack>& allocatable_tracks) {
std::map<BitrateAllocatorObserver*, int> allocation;
for (const auto& observer_config : allocatable_tracks)
allocation[observer_config.observer] = 0;
return allocation;
}
std::map<BitrateAllocatorObserver*, int> AllocateBitrates(
const std::vector<AllocatableTrack>& allocatable_tracks,
uint32_t bitrate) {
if (allocatable_tracks.empty())
return std::map<BitrateAllocatorObserver*, int>();
if (bitrate == 0)
return ZeroRateAllocation(allocatable_tracks);
uint32_t sum_min_bitrates = 0;
uint32_t sum_max_bitrates = 0;
for (const auto& observer_config : allocatable_tracks) {
sum_min_bitrates += observer_config.config.min_bitrate_bps;
sum_max_bitrates += observer_config.config.max_bitrate_bps;
}
// Not enough for all observers to get an allocation, allocate according to:
// enforced min bitrate -> allocated bitrate previous round -> restart paused
// streams.
if (!EnoughBitrateForAllObservers(allocatable_tracks, bitrate,
sum_min_bitrates))
return LowRateAllocation(allocatable_tracks, bitrate);
// All observers will get their min bitrate plus a share of the rest. This
// share is allocated to each observer based on its bitrate_priority.
if (bitrate <= sum_max_bitrates)
return NormalRateAllocation(allocatable_tracks, bitrate, sum_min_bitrates);
// All observers will get up to transmission_max_bitrate_multiplier_ x max.
return MaxRateAllocation(allocatable_tracks, bitrate, sum_max_bitrates);
}
} // namespace
BitrateAllocator::BitrateAllocator(LimitObserver* limit_observer)
: limit_observer_(limit_observer),
last_target_bps_(0),
last_stable_target_bps_(0),
last_non_zero_bitrate_bps_(kDefaultBitrateBps),
last_fraction_loss_(0),
last_rtt_(0),
last_bwe_period_ms_(1000),
num_pause_events_(0),
last_bwe_log_time_(0) {
sequenced_checker_.Detach();
}
BitrateAllocator::~BitrateAllocator() {
RTC_HISTOGRAM_COUNTS_100("WebRTC.Call.NumberOfPauseEvents",
num_pause_events_);
}
void BitrateAllocator::UpdateStartRate(uint32_t start_rate_bps) {
RTC_DCHECK_RUN_ON(&sequenced_checker_);
last_non_zero_bitrate_bps_ = start_rate_bps;
}
void BitrateAllocator::OnNetworkEstimateChanged(TargetTransferRate msg) {
RTC_DCHECK_RUN_ON(&sequenced_checker_);
last_target_bps_ = msg.target_rate.bps();
last_stable_target_bps_ = msg.stable_target_rate.bps();
last_non_zero_bitrate_bps_ =
last_target_bps_ > 0 ? last_target_bps_ : last_non_zero_bitrate_bps_;
int loss_ratio_255 = msg.network_estimate.loss_rate_ratio * 255;
last_fraction_loss_ =
rtc::dchecked_cast<uint8_t>(rtc::SafeClamp(loss_ratio_255, 0, 255));
last_rtt_ = msg.network_estimate.round_trip_time.ms();
last_bwe_period_ms_ = msg.network_estimate.bwe_period.ms();
// Periodically log the incoming BWE.
int64_t now = msg.at_time.ms();
if (now > last_bwe_log_time_ + kBweLogIntervalMs) {
RTC_LOG(LS_INFO) << "Current BWE " << last_target_bps_;
last_bwe_log_time_ = now;
}
auto allocation = AllocateBitrates(allocatable_tracks_, last_target_bps_);
auto stable_bitrate_allocation =
AllocateBitrates(allocatable_tracks_, last_stable_target_bps_);
for (auto& config : allocatable_tracks_) {
uint32_t allocated_bitrate = allocation[config.observer];
uint32_t allocated_stable_target_rate =
stable_bitrate_allocation[config.observer];
BitrateAllocationUpdate update;
update.target_bitrate = DataRate::BitsPerSec(allocated_bitrate);
update.stable_target_bitrate =
DataRate::BitsPerSec(allocated_stable_target_rate);
update.packet_loss_ratio = last_fraction_loss_ / 256.0;
update.round_trip_time = TimeDelta::Millis(last_rtt_);
update.bwe_period = TimeDelta::Millis(last_bwe_period_ms_);
update.cwnd_reduce_ratio = msg.cwnd_reduce_ratio;
uint32_t protection_bitrate = config.observer->OnBitrateUpdated(update);
if (allocated_bitrate == 0 && config.allocated_bitrate_bps > 0) {
if (last_target_bps_ > 0)
++num_pause_events_;
// The protection bitrate is an estimate based on the ratio between media
// and protection used before this observer was muted.
uint32_t predicted_protection_bps =
(1.0 - config.media_ratio) * config.config.min_bitrate_bps;
RTC_LOG(LS_INFO) << "Pausing observer " << config.observer
<< " with configured min bitrate "
<< config.config.min_bitrate_bps
<< " and current estimate of " << last_target_bps_
<< " and protection bitrate "
<< predicted_protection_bps;
} else if (allocated_bitrate > 0 && config.allocated_bitrate_bps == 0) {
if (last_target_bps_ > 0)
++num_pause_events_;
RTC_LOG(LS_INFO) << "Resuming observer " << config.observer
<< ", configured min bitrate "
<< config.config.min_bitrate_bps
<< ", current allocation " << allocated_bitrate
<< " and protection bitrate " << protection_bitrate;
}
// Only update the media ratio if the observer got an allocation.
if (allocated_bitrate > 0)
config.media_ratio = MediaRatio(allocated_bitrate, protection_bitrate);
config.allocated_bitrate_bps = allocated_bitrate;
}
UpdateAllocationLimits();
}
void BitrateAllocator::AddObserver(BitrateAllocatorObserver* observer,
MediaStreamAllocationConfig config) {
RTC_DCHECK_RUN_ON(&sequenced_checker_);
RTC_DCHECK_GT(config.bitrate_priority, 0);
RTC_DCHECK(std::isnormal(config.bitrate_priority));
auto it = absl::c_find_if(
allocatable_tracks_,
[observer](const auto& config) { return config.observer == observer; });
// Update settings if the observer already exists, create a new one otherwise.
if (it != allocatable_tracks_.end()) {
it->config = config;
} else {
allocatable_tracks_.push_back(AllocatableTrack(observer, config));
}
if (last_target_bps_ > 0) {
// Calculate a new allocation and update all observers.
auto allocation = AllocateBitrates(allocatable_tracks_, last_target_bps_);
auto stable_bitrate_allocation =
AllocateBitrates(allocatable_tracks_, last_stable_target_bps_);
for (auto& config : allocatable_tracks_) {
uint32_t allocated_bitrate = allocation[config.observer];
uint32_t allocated_stable_bitrate =
stable_bitrate_allocation[config.observer];
BitrateAllocationUpdate update;
update.target_bitrate = DataRate::BitsPerSec(allocated_bitrate);
update.stable_target_bitrate =
DataRate::BitsPerSec(allocated_stable_bitrate);
update.packet_loss_ratio = last_fraction_loss_ / 256.0;
update.round_trip_time = TimeDelta::Millis(last_rtt_);
update.bwe_period = TimeDelta::Millis(last_bwe_period_ms_);
uint32_t protection_bitrate = config.observer->OnBitrateUpdated(update);
config.allocated_bitrate_bps = allocated_bitrate;
if (allocated_bitrate > 0)
config.media_ratio = MediaRatio(allocated_bitrate, protection_bitrate);
}
} else {
// Currently, an encoder is not allowed to produce frames.
// But we still have to return the initial config bitrate + let the
// observer know that it can not produce frames.
BitrateAllocationUpdate update;
update.target_bitrate = DataRate::Zero();
update.stable_target_bitrate = DataRate::Zero();
update.packet_loss_ratio = last_fraction_loss_ / 256.0;
update.round_trip_time = TimeDelta::Millis(last_rtt_);
update.bwe_period = TimeDelta::Millis(last_bwe_period_ms_);
observer->OnBitrateUpdated(update);
}
UpdateAllocationLimits();
}
void BitrateAllocator::UpdateAllocationLimits() {
BitrateAllocationLimits limits;
for (const auto& config : allocatable_tracks_) {
uint32_t stream_padding = config.config.pad_up_bitrate_bps;
if (config.config.enforce_min_bitrate) {
limits.min_allocatable_rate +=
DataRate::BitsPerSec(config.config.min_bitrate_bps);
} else if (config.allocated_bitrate_bps == 0) {
stream_padding =
std::max(config.MinBitrateWithHysteresis(), stream_padding);
}
limits.max_padding_rate += DataRate::BitsPerSec(stream_padding);
limits.max_allocatable_rate +=
DataRate::BitsPerSec(config.config.max_bitrate_bps);
}
if (limits.min_allocatable_rate == current_limits_.min_allocatable_rate &&
limits.max_allocatable_rate == current_limits_.max_allocatable_rate &&
limits.max_padding_rate == current_limits_.max_padding_rate) {
return;
}
current_limits_ = limits;
RTC_LOG(LS_INFO) << "UpdateAllocationLimits : total_requested_min_bitrate: "
<< ToString(limits.min_allocatable_rate)
<< ", total_requested_padding_bitrate: "
<< ToString(limits.max_padding_rate)
<< ", total_requested_max_bitrate: "
<< ToString(limits.max_allocatable_rate);
limit_observer_->OnAllocationLimitsChanged(limits);
}
void BitrateAllocator::RemoveObserver(BitrateAllocatorObserver* observer) {
RTC_DCHECK_RUN_ON(&sequenced_checker_);
for (auto it = allocatable_tracks_.begin(); it != allocatable_tracks_.end();
++it) {
if (it->observer == observer) {
allocatable_tracks_.erase(it);
break;
}
}
UpdateAllocationLimits();
}
int BitrateAllocator::GetStartBitrate(
BitrateAllocatorObserver* observer) const {
RTC_DCHECK_RUN_ON(&sequenced_checker_);
auto it = absl::c_find_if(
allocatable_tracks_,
[observer](const auto& config) { return config.observer == observer; });
if (it == allocatable_tracks_.end()) {
// This observer hasn't been added yet, just give it its fair share.
return last_non_zero_bitrate_bps_ /
static_cast<int>((allocatable_tracks_.size() + 1));
} else if (it->allocated_bitrate_bps == -1) {
// This observer hasn't received an allocation yet, so do the same.
return last_non_zero_bitrate_bps_ /
static_cast<int>(allocatable_tracks_.size());
} else {
// This observer already has an allocation.
return it->allocated_bitrate_bps;
}
}
uint32_t bitrate_allocator_impl::AllocatableTrack::LastAllocatedBitrate()
const {
// Return the configured minimum bitrate for newly added observers, to avoid
// requiring an extra high bitrate for the observer to get an allocated
// bitrate.
return allocated_bitrate_bps == -1 ? config.min_bitrate_bps
: allocated_bitrate_bps;
}
uint32_t bitrate_allocator_impl::AllocatableTrack::MinBitrateWithHysteresis()
const {
uint32_t min_bitrate = config.min_bitrate_bps;
if (LastAllocatedBitrate() == 0) {
min_bitrate += std::max(static_cast<uint32_t>(kToggleFactor * min_bitrate),
kMinToggleBitrateBps);
}
// Account for protection bitrate used by this observer in the previous
// allocation.
// Note: the ratio will only be updated when the stream is active, meaning a
// paused stream won't get any ratio updates. This might lead to waiting a bit
// longer than necessary if the network condition improves, but this is to
// avoid too much toggling.
if (media_ratio > 0.0 && media_ratio < 1.0)
min_bitrate += min_bitrate * (1.0 - media_ratio);
return min_bitrate;
}
} // namespace webrtc

View file

@ -0,0 +1,170 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef CALL_BITRATE_ALLOCATOR_H_
#define CALL_BITRATE_ALLOCATOR_H_
#include <stdint.h>
#include <map>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "api/call/bitrate_allocation.h"
#include "api/sequence_checker.h"
#include "api/transport/network_types.h"
#include "rtc_base/system/no_unique_address.h"
namespace webrtc {
class Clock;
// Used by all send streams with adaptive bitrate, to get the currently
// allocated bitrate for the send stream. The current network properties are
// given at the same time, to let the send stream decide about possible loss
// protection.
class BitrateAllocatorObserver {
public:
// Returns the amount of protection used by the BitrateAllocatorObserver
// implementation, as bitrate in bps.
virtual uint32_t OnBitrateUpdated(BitrateAllocationUpdate update) = 0;
protected:
virtual ~BitrateAllocatorObserver() {}
};
// Struct describing parameters for how a media stream should get bitrate
// allocated to it.
struct MediaStreamAllocationConfig {
// Minimum bitrate supported by track. 0 equals no min bitrate.
uint32_t min_bitrate_bps;
// Maximum bitrate supported by track. 0 equals no max bitrate.
uint32_t max_bitrate_bps;
uint32_t pad_up_bitrate_bps;
int64_t priority_bitrate_bps;
// True means track may not be paused by allocating 0 bitrate will allocate at
// least `min_bitrate_bps` for this observer, even if the BWE is too low,
// false will allocate 0 to the observer if BWE doesn't allow
// `min_bitrate_bps`.
bool enforce_min_bitrate;
// The amount of bitrate allocated to this observer relative to all other
// observers. If an observer has twice the bitrate_priority of other
// observers, it should be allocated twice the bitrate above its min.
double bitrate_priority;
};
// Interface used for mocking
class BitrateAllocatorInterface {
public:
virtual void AddObserver(BitrateAllocatorObserver* observer,
MediaStreamAllocationConfig config) = 0;
virtual void RemoveObserver(BitrateAllocatorObserver* observer) = 0;
virtual int GetStartBitrate(BitrateAllocatorObserver* observer) const = 0;
protected:
virtual ~BitrateAllocatorInterface() = default;
};
namespace bitrate_allocator_impl {
struct AllocatableTrack {
AllocatableTrack(BitrateAllocatorObserver* observer,
MediaStreamAllocationConfig allocation_config)
: observer(observer),
config(allocation_config),
allocated_bitrate_bps(-1),
media_ratio(1.0) {}
BitrateAllocatorObserver* observer;
MediaStreamAllocationConfig config;
int64_t allocated_bitrate_bps;
double media_ratio; // Part of the total bitrate used for media [0.0, 1.0].
uint32_t LastAllocatedBitrate() const;
// The minimum bitrate required by this observer, including
// enable-hysteresis if the observer is in a paused state.
uint32_t MinBitrateWithHysteresis() const;
};
} // namespace bitrate_allocator_impl
// Usage: this class will register multiple RtcpBitrateObserver's one at each
// RTCP module. It will aggregate the results and run one bandwidth estimation
// and push the result to the encoders via BitrateAllocatorObserver(s).
class BitrateAllocator : public BitrateAllocatorInterface {
public:
// Used to get notified when send stream limits such as the minimum send
// bitrate and max padding bitrate is changed.
class LimitObserver {
public:
virtual void OnAllocationLimitsChanged(BitrateAllocationLimits limits) = 0;
protected:
virtual ~LimitObserver() = default;
};
explicit BitrateAllocator(LimitObserver* limit_observer);
~BitrateAllocator() override;
void UpdateStartRate(uint32_t start_rate_bps);
// Allocate target_bitrate across the registered BitrateAllocatorObservers.
void OnNetworkEstimateChanged(TargetTransferRate msg);
// Set the configuration used by the bandwidth management.
// `observer` updates bitrates if already in use.
// `config` is the configuration to use for allocation.
// Note that `observer`->OnBitrateUpdated() will be called
// within the scope of this method with the current rtt, fraction_loss and
// available bitrate and that the bitrate in OnBitrateUpdated will be zero if
// the `observer` is currently not allowed to send data.
void AddObserver(BitrateAllocatorObserver* observer,
MediaStreamAllocationConfig config) override;
// Removes a previously added observer, but will not trigger a new bitrate
// allocation.
void RemoveObserver(BitrateAllocatorObserver* observer) override;
// Returns initial bitrate allocated for `observer`. If `observer` is not in
// the list of added observers, a best guess is returned.
int GetStartBitrate(BitrateAllocatorObserver* observer) const override;
private:
using AllocatableTrack = bitrate_allocator_impl::AllocatableTrack;
// Calculates the minimum requested send bitrate and max padding bitrate and
// calls LimitObserver::OnAllocationLimitsChanged.
void UpdateAllocationLimits() RTC_RUN_ON(&sequenced_checker_);
// Allow packets to be transmitted in up to 2 times max video bitrate if the
// bandwidth estimate allows it.
// TODO(bugs.webrtc.org/8541): May be worth to refactor to keep this logic in
// video send stream.
static uint8_t GetTransmissionMaxBitrateMultiplier();
RTC_NO_UNIQUE_ADDRESS SequenceChecker sequenced_checker_;
LimitObserver* const limit_observer_ RTC_GUARDED_BY(&sequenced_checker_);
// Stored in a list to keep track of the insertion order.
std::vector<AllocatableTrack> allocatable_tracks_
RTC_GUARDED_BY(&sequenced_checker_);
uint32_t last_target_bps_ RTC_GUARDED_BY(&sequenced_checker_);
uint32_t last_stable_target_bps_ RTC_GUARDED_BY(&sequenced_checker_);
uint32_t last_non_zero_bitrate_bps_ RTC_GUARDED_BY(&sequenced_checker_);
uint8_t last_fraction_loss_ RTC_GUARDED_BY(&sequenced_checker_);
int64_t last_rtt_ RTC_GUARDED_BY(&sequenced_checker_);
int64_t last_bwe_period_ms_ RTC_GUARDED_BY(&sequenced_checker_);
// Number of mute events based on too low BWE, not network up/down.
int num_pause_events_ RTC_GUARDED_BY(&sequenced_checker_);
int64_t last_bwe_log_time_ RTC_GUARDED_BY(&sequenced_checker_);
BitrateAllocationLimits current_limits_ RTC_GUARDED_BY(&sequenced_checker_);
};
} // namespace webrtc
#endif // CALL_BITRATE_ALLOCATOR_H_

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,149 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef CALL_CALL_H_
#define CALL_CALL_H_
#include <algorithm>
#include <memory>
#include <string>
#include <vector>
#include "absl/strings/string_view.h"
#include "api/adaptation/resource.h"
#include "api/media_types.h"
#include "api/task_queue/task_queue_base.h"
#include "call/audio_receive_stream.h"
#include "call/audio_send_stream.h"
#include "call/call_config.h"
#include "call/flexfec_receive_stream.h"
#include "call/packet_receiver.h"
#include "call/video_receive_stream.h"
#include "call/video_send_stream.h"
#include "rtc_base/copy_on_write_buffer.h"
#include "rtc_base/network/sent_packet.h"
#include "rtc_base/network_route.h"
#include "rtc_base/ref_count.h"
namespace webrtc {
// A Call represents a two-way connection carrying zero or more outgoing
// and incoming media streams, transported over one or more RTP transports.
// A Call instance can contain several send and/or receive streams. All streams
// are assumed to have the same remote endpoint and will share bitrate estimates
// etc.
// When using the PeerConnection API, there is an one to one relationship
// between the PeerConnection and the Call.
class Call {
public:
struct Stats {
std::string ToString(int64_t time_ms) const;
int send_bandwidth_bps = 0; // Estimated available send bandwidth.
int max_padding_bitrate_bps = 0; // Cumulative configured max padding.
int recv_bandwidth_bps = 0; // Estimated available receive bandwidth.
int64_t pacer_delay_ms = 0;
int64_t rtt_ms = -1;
};
static std::unique_ptr<Call> Create(const CallConfig& config);
virtual AudioSendStream* CreateAudioSendStream(
const AudioSendStream::Config& config) = 0;
virtual void DestroyAudioSendStream(AudioSendStream* send_stream) = 0;
virtual AudioReceiveStreamInterface* CreateAudioReceiveStream(
const AudioReceiveStreamInterface::Config& config) = 0;
virtual void DestroyAudioReceiveStream(
AudioReceiveStreamInterface* receive_stream) = 0;
virtual VideoSendStream* CreateVideoSendStream(
VideoSendStream::Config config,
VideoEncoderConfig encoder_config) = 0;
virtual VideoSendStream* CreateVideoSendStream(
VideoSendStream::Config config,
VideoEncoderConfig encoder_config,
std::unique_ptr<FecController> fec_controller);
virtual void DestroyVideoSendStream(VideoSendStream* send_stream) = 0;
virtual VideoReceiveStreamInterface* CreateVideoReceiveStream(
VideoReceiveStreamInterface::Config configuration) = 0;
virtual void DestroyVideoReceiveStream(
VideoReceiveStreamInterface* receive_stream) = 0;
// In order for a created VideoReceiveStreamInterface to be aware that it is
// protected by a FlexfecReceiveStream, the latter should be created before
// the former.
virtual FlexfecReceiveStream* CreateFlexfecReceiveStream(
const FlexfecReceiveStream::Config config) = 0;
virtual void DestroyFlexfecReceiveStream(
FlexfecReceiveStream* receive_stream) = 0;
// When a resource is overused, the Call will try to reduce the load on the
// sysem, for example by reducing the resolution or frame rate of encoded
// streams.
virtual void AddAdaptationResource(rtc::scoped_refptr<Resource> resource) = 0;
// All received RTP and RTCP packets for the call should be inserted to this
// PacketReceiver. The PacketReceiver pointer is valid as long as the
// Call instance exists.
virtual PacketReceiver* Receiver() = 0;
// This is used to access the transport controller send instance owned by
// Call. The send transport controller is currently owned by Call for legacy
// reasons. (for instance variants of call tests are built on this assumtion)
// TODO(srte): Move ownership of transport controller send out of Call and
// remove this method interface.
virtual RtpTransportControllerSendInterface* GetTransportControllerSend() = 0;
// Returns the call statistics, such as estimated send and receive bandwidth,
// pacing delay, etc.
virtual Stats GetStats() const = 0;
// TODO(skvlad): When the unbundled case with multiple streams for the same
// media type going over different networks is supported, track the state
// for each stream separately. Right now it's global per media type.
virtual void SignalChannelNetworkState(MediaType media,
NetworkState state) = 0;
virtual void OnAudioTransportOverheadChanged(
int transport_overhead_per_packet) = 0;
// Called when a receive stream's local ssrc has changed and association with
// send streams needs to be updated.
virtual void OnLocalSsrcUpdated(AudioReceiveStreamInterface& stream,
uint32_t local_ssrc) = 0;
virtual void OnLocalSsrcUpdated(VideoReceiveStreamInterface& stream,
uint32_t local_ssrc) = 0;
virtual void OnLocalSsrcUpdated(FlexfecReceiveStream& stream,
uint32_t local_ssrc) = 0;
virtual void OnUpdateSyncGroup(AudioReceiveStreamInterface& stream,
absl::string_view sync_group) = 0;
virtual void OnSentPacket(const rtc::SentPacket& sent_packet) = 0;
virtual void SetClientBitratePreferences(
const BitrateSettings& preferences) = 0;
virtual const FieldTrialsView& trials() const = 0;
virtual TaskQueueBase* network_thread() const = 0;
virtual TaskQueueBase* worker_thread() const = 0;
virtual ~Call() {}
};
} // namespace webrtc
#endif // CALL_CALL_H_

View file

@ -0,0 +1,38 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "call/call_config.h"
#include "api/environment/environment.h"
#include "api/task_queue/task_queue_base.h"
namespace webrtc {
CallConfig::CallConfig(const Environment& env,
TaskQueueBase* network_task_queue)
: env(env),
network_task_queue_(network_task_queue) {}
CallConfig::CallConfig(const CallConfig& config) = default;
RtpTransportConfig CallConfig::ExtractTransportConfig() const {
RtpTransportConfig transport_config = {.env = env};
transport_config.bitrate_config = bitrate_config;
transport_config.network_controller_factory = network_controller_factory;
transport_config.network_state_predictor_factory =
network_state_predictor_factory;
transport_config.pacer_burst_interval = pacer_burst_interval;
return transport_config;
}
CallConfig::~CallConfig() = default;
} // namespace webrtc

View file

@ -0,0 +1,83 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef CALL_CALL_CONFIG_H_
#define CALL_CALL_CONFIG_H_
#include "api/environment/environment.h"
#include "api/fec_controller.h"
#include "api/metronome/metronome.h"
#include "api/neteq/neteq_factory.h"
#include "api/network_state_predictor.h"
#include "api/transport/bitrate_settings.h"
#include "api/transport/network_control.h"
#include "call/audio_state.h"
#include "call/rtp_transport_config.h"
#include "call/rtp_transport_controller_send_factory_interface.h"
namespace webrtc {
class AudioProcessing;
struct CallConfig {
// If `network_task_queue` is set to nullptr, Call will assume that network
// related callbacks will be made on the same TQ as the Call instance was
// constructed on.
explicit CallConfig(const Environment& env,
TaskQueueBase* network_task_queue = nullptr);
CallConfig(const CallConfig&);
~CallConfig();
RtpTransportConfig ExtractTransportConfig() const;
Environment env;
// Bitrate config used until valid bitrate estimates are calculated. Also
// used to cap total bitrate used. This comes from the remote connection.
BitrateConstraints bitrate_config;
// AudioState which is possibly shared between multiple calls.
rtc::scoped_refptr<AudioState> audio_state;
// Audio Processing Module to be used in this call.
AudioProcessing* audio_processing = nullptr;
// FecController to use for this call.
FecControllerFactoryInterface* fec_controller_factory = nullptr;
// NetworkStatePredictor to use for this call.
NetworkStatePredictorFactoryInterface* network_state_predictor_factory =
nullptr;
// Network controller factory to use for this call.
NetworkControllerFactoryInterface* network_controller_factory = nullptr;
// NetEq factory to use for this call.
NetEqFactory* neteq_factory = nullptr;
TaskQueueBase* const network_task_queue_ = nullptr;
// RtpTransportControllerSend to use for this call.
RtpTransportControllerSendFactoryInterface*
rtp_transport_controller_send_factory = nullptr;
Metronome* decode_metronome = nullptr;
Metronome* encode_metronome = nullptr;
// The burst interval of the pacer, see TaskQueuePacedSender constructor.
absl::optional<TimeDelta> pacer_burst_interval;
// Enables send packet batching from the egress RTP sender.
bool enable_send_packet_batching = false;
};
} // namespace webrtc
#endif // CALL_CALL_CONFIG_H_

View file

@ -0,0 +1,98 @@
/*
* Copyright 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "call/create_call.h"
#include <stdio.h>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/types/optional.h"
#include "api/test/simulated_network.h"
#include "api/units/time_delta.h"
#include "call/call.h"
#include "call/degraded_call.h"
#include "rtc_base/checks.h"
#include "rtc_base/experiments/field_trial_list.h"
#include "rtc_base/experiments/field_trial_parser.h"
namespace webrtc {
namespace {
using TimeScopedNetworkConfig = DegradedCall::TimeScopedNetworkConfig;
std::vector<TimeScopedNetworkConfig> GetNetworkConfigs(
const FieldTrialsView& trials,
bool send) {
FieldTrialStructList<TimeScopedNetworkConfig> trials_list(
{FieldTrialStructMember("queue_length_packets",
[](TimeScopedNetworkConfig* p) {
// FieldTrialParser does not natively support
// size_t type, so use this ugly cast as
// workaround.
return reinterpret_cast<unsigned*>(
&p->queue_length_packets);
}),
FieldTrialStructMember(
"queue_delay_ms",
[](TimeScopedNetworkConfig* p) { return &p->queue_delay_ms; }),
FieldTrialStructMember("delay_standard_deviation_ms",
[](TimeScopedNetworkConfig* p) {
return &p->delay_standard_deviation_ms;
}),
FieldTrialStructMember(
"link_capacity_kbps",
[](TimeScopedNetworkConfig* p) { return &p->link_capacity_kbps; }),
FieldTrialStructMember(
"loss_percent",
[](TimeScopedNetworkConfig* p) { return &p->loss_percent; }),
FieldTrialStructMember(
"allow_reordering",
[](TimeScopedNetworkConfig* p) { return &p->allow_reordering; }),
FieldTrialStructMember("avg_burst_loss_length",
[](TimeScopedNetworkConfig* p) {
return &p->avg_burst_loss_length;
}),
FieldTrialStructMember(
"packet_overhead",
[](TimeScopedNetworkConfig* p) { return &p->packet_overhead; }),
FieldTrialStructMember(
"duration",
[](TimeScopedNetworkConfig* p) { return &p->duration; })},
{});
ParseFieldTrial({&trials_list},
trials.Lookup(send ? "WebRTC-FakeNetworkSendConfig"
: "WebRTC-FakeNetworkReceiveConfig"));
return trials_list.Get();
}
} // namespace
std::unique_ptr<Call> CreateCall(const CallConfig& config) {
std::vector<DegradedCall::TimeScopedNetworkConfig> send_degradation_configs =
GetNetworkConfigs(config.env.field_trials(), /*send=*/true);
std::vector<DegradedCall::TimeScopedNetworkConfig>
receive_degradation_configs =
GetNetworkConfigs(config.env.field_trials(), /*send=*/false);
std::unique_ptr<Call> call = Call::Create(config);
if (!send_degradation_configs.empty() ||
!receive_degradation_configs.empty()) {
return std::make_unique<DegradedCall>(
std::move(call), send_degradation_configs, receive_degradation_configs);
}
return call;
}
} // namespace webrtc

View file

@ -0,0 +1,25 @@
/*
* Copyright 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef CALL_CREATE_CALL_H_
#define CALL_CREATE_CALL_H_
#include <memory>
#include "call/call.h"
#include "call/call_config.h"
namespace webrtc {
std::unique_ptr<Call> CreateCall(const CallConfig& config);
} // namespace webrtc
#endif // CALL_CREATE_CALL_H_

View file

@ -0,0 +1,380 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "call/degraded_call.h"
#include <memory>
#include <utility>
#include "absl/strings/string_view.h"
#include "api/sequence_checker.h"
#include "modules/rtp_rtcp/source/rtp_util.h"
#include "rtc_base/thread.h"
namespace webrtc {
DegradedCall::FakeNetworkPipeOnTaskQueue::FakeNetworkPipeOnTaskQueue(
TaskQueueBase* task_queue,
rtc::scoped_refptr<PendingTaskSafetyFlag> call_alive,
Clock* clock,
std::unique_ptr<NetworkBehaviorInterface> network_behavior)
: clock_(clock),
task_queue_(task_queue),
call_alive_(std::move(call_alive)),
pipe_(clock, std::move(network_behavior)) {}
void DegradedCall::FakeNetworkPipeOnTaskQueue::SendRtp(
rtc::ArrayView<const uint8_t> packet,
const PacketOptions& options,
Transport* transport) {
pipe_.SendRtp(packet, options, transport);
Process();
}
void DegradedCall::FakeNetworkPipeOnTaskQueue::SendRtcp(
rtc::ArrayView<const uint8_t> packet,
Transport* transport) {
pipe_.SendRtcp(packet, transport);
Process();
}
void DegradedCall::FakeNetworkPipeOnTaskQueue::AddActiveTransport(
Transport* transport) {
pipe_.AddActiveTransport(transport);
}
void DegradedCall::FakeNetworkPipeOnTaskQueue::RemoveActiveTransport(
Transport* transport) {
pipe_.RemoveActiveTransport(transport);
}
bool DegradedCall::FakeNetworkPipeOnTaskQueue::Process() {
pipe_.Process();
auto time_to_next = pipe_.TimeUntilNextProcess();
if (!time_to_next) {
// Packet was probably sent immediately.
return false;
}
task_queue_->PostTask(SafeTask(call_alive_, [this, time_to_next] {
RTC_DCHECK_RUN_ON(task_queue_);
int64_t next_process_time = *time_to_next + clock_->TimeInMilliseconds();
if (!next_process_ms_ || next_process_time < *next_process_ms_) {
next_process_ms_ = next_process_time;
task_queue_->PostDelayedHighPrecisionTask(
SafeTask(call_alive_,
[this] {
RTC_DCHECK_RUN_ON(task_queue_);
if (!Process()) {
next_process_ms_.reset();
}
}),
TimeDelta::Millis(*time_to_next));
}
}));
return true;
}
DegradedCall::FakeNetworkPipeTransportAdapter::FakeNetworkPipeTransportAdapter(
FakeNetworkPipeOnTaskQueue* fake_network,
Call* call,
Clock* clock,
Transport* real_transport)
: network_pipe_(fake_network),
call_(call),
clock_(clock),
real_transport_(real_transport) {
network_pipe_->AddActiveTransport(real_transport);
}
DegradedCall::FakeNetworkPipeTransportAdapter::
~FakeNetworkPipeTransportAdapter() {
network_pipe_->RemoveActiveTransport(real_transport_);
}
bool DegradedCall::FakeNetworkPipeTransportAdapter::SendRtp(
rtc::ArrayView<const uint8_t> packet,
const PacketOptions& options) {
// A call here comes from the RTP stack (probably pacer). We intercept it and
// put it in the fake network pipe instead, but report to Call that is has
// been sent, so that the bandwidth estimator sees the delay we add.
network_pipe_->SendRtp(packet, options, real_transport_);
if (options.packet_id != -1) {
rtc::SentPacket sent_packet;
sent_packet.packet_id = options.packet_id;
sent_packet.send_time_ms = clock_->TimeInMilliseconds();
sent_packet.info.included_in_feedback = options.included_in_feedback;
sent_packet.info.included_in_allocation = options.included_in_allocation;
sent_packet.info.packet_size_bytes = packet.size();
sent_packet.info.packet_type = rtc::PacketType::kData;
call_->OnSentPacket(sent_packet);
}
return true;
}
bool DegradedCall::FakeNetworkPipeTransportAdapter::SendRtcp(
rtc::ArrayView<const uint8_t> packet) {
network_pipe_->SendRtcp(packet, real_transport_);
return true;
}
DegradedCall::DegradedCall(
std::unique_ptr<Call> call,
const std::vector<TimeScopedNetworkConfig>& send_configs,
const std::vector<TimeScopedNetworkConfig>& receive_configs)
: clock_(Clock::GetRealTimeClock()),
call_(std::move(call)),
call_alive_(PendingTaskSafetyFlag::CreateDetached()),
send_config_index_(0),
send_configs_(send_configs),
send_simulated_network_(nullptr),
receive_config_index_(0),
receive_configs_(receive_configs) {
if (!receive_configs_.empty()) {
auto network = std::make_unique<SimulatedNetwork>(receive_configs_[0]);
receive_simulated_network_ = network.get();
receive_pipe_ =
std::make_unique<webrtc::FakeNetworkPipe>(clock_, std::move(network));
receive_pipe_->SetReceiver(call_->Receiver());
if (receive_configs_.size() > 1) {
call_->network_thread()->PostDelayedTask(
SafeTask(call_alive_, [this] { UpdateReceiveNetworkConfig(); }),
receive_configs_[0].duration);
}
}
if (!send_configs_.empty()) {
auto network = std::make_unique<SimulatedNetwork>(send_configs_[0]);
send_simulated_network_ = network.get();
send_pipe_ = std::make_unique<FakeNetworkPipeOnTaskQueue>(
call_->network_thread(), call_alive_, clock_, std::move(network));
if (send_configs_.size() > 1) {
call_->network_thread()->PostDelayedTask(
SafeTask(call_alive_, [this] { UpdateSendNetworkConfig(); }),
send_configs_[0].duration);
}
}
}
DegradedCall::~DegradedCall() {
RTC_DCHECK_RUN_ON(call_->worker_thread());
// Thread synchronization is required to call `SetNotAlive`.
// Otherwise, when the `DegradedCall` object is destroyed but
// `SetNotAlive` has not yet been called,
// another Closure guarded by `call_alive_` may be called.
// TODO(https://crbug.com/webrtc/12649): Remove this block-invoke.
static_cast<rtc::Thread*>(call_->network_thread())
->BlockingCall(
[flag = std::move(call_alive_)]() mutable { flag->SetNotAlive(); });
}
AudioSendStream* DegradedCall::CreateAudioSendStream(
const AudioSendStream::Config& config) {
if (!send_configs_.empty()) {
auto transport_adapter = std::make_unique<FakeNetworkPipeTransportAdapter>(
send_pipe_.get(), call_.get(), clock_, config.send_transport);
AudioSendStream::Config degrade_config = config;
degrade_config.send_transport = transport_adapter.get();
AudioSendStream* send_stream = call_->CreateAudioSendStream(degrade_config);
if (send_stream) {
audio_send_transport_adapters_[send_stream] =
std::move(transport_adapter);
}
return send_stream;
}
return call_->CreateAudioSendStream(config);
}
void DegradedCall::DestroyAudioSendStream(AudioSendStream* send_stream) {
call_->DestroyAudioSendStream(send_stream);
audio_send_transport_adapters_.erase(send_stream);
}
AudioReceiveStreamInterface* DegradedCall::CreateAudioReceiveStream(
const AudioReceiveStreamInterface::Config& config) {
return call_->CreateAudioReceiveStream(config);
}
void DegradedCall::DestroyAudioReceiveStream(
AudioReceiveStreamInterface* receive_stream) {
call_->DestroyAudioReceiveStream(receive_stream);
}
VideoSendStream* DegradedCall::CreateVideoSendStream(
VideoSendStream::Config config,
VideoEncoderConfig encoder_config) {
std::unique_ptr<FakeNetworkPipeTransportAdapter> transport_adapter;
if (!send_configs_.empty()) {
transport_adapter = std::make_unique<FakeNetworkPipeTransportAdapter>(
send_pipe_.get(), call_.get(), clock_, config.send_transport);
config.send_transport = transport_adapter.get();
}
VideoSendStream* send_stream = call_->CreateVideoSendStream(
std::move(config), std::move(encoder_config));
if (send_stream && transport_adapter) {
video_send_transport_adapters_[send_stream] = std::move(transport_adapter);
}
return send_stream;
}
VideoSendStream* DegradedCall::CreateVideoSendStream(
VideoSendStream::Config config,
VideoEncoderConfig encoder_config,
std::unique_ptr<FecController> fec_controller) {
std::unique_ptr<FakeNetworkPipeTransportAdapter> transport_adapter;
if (!send_configs_.empty()) {
transport_adapter = std::make_unique<FakeNetworkPipeTransportAdapter>(
send_pipe_.get(), call_.get(), clock_, config.send_transport);
config.send_transport = transport_adapter.get();
}
VideoSendStream* send_stream = call_->CreateVideoSendStream(
std::move(config), std::move(encoder_config), std::move(fec_controller));
if (send_stream && transport_adapter) {
video_send_transport_adapters_[send_stream] = std::move(transport_adapter);
}
return send_stream;
}
void DegradedCall::DestroyVideoSendStream(VideoSendStream* send_stream) {
call_->DestroyVideoSendStream(send_stream);
video_send_transport_adapters_.erase(send_stream);
}
VideoReceiveStreamInterface* DegradedCall::CreateVideoReceiveStream(
VideoReceiveStreamInterface::Config configuration) {
return call_->CreateVideoReceiveStream(std::move(configuration));
}
void DegradedCall::DestroyVideoReceiveStream(
VideoReceiveStreamInterface* receive_stream) {
call_->DestroyVideoReceiveStream(receive_stream);
}
FlexfecReceiveStream* DegradedCall::CreateFlexfecReceiveStream(
const FlexfecReceiveStream::Config config) {
return call_->CreateFlexfecReceiveStream(std::move(config));
}
void DegradedCall::DestroyFlexfecReceiveStream(
FlexfecReceiveStream* receive_stream) {
call_->DestroyFlexfecReceiveStream(receive_stream);
}
void DegradedCall::AddAdaptationResource(
rtc::scoped_refptr<Resource> resource) {
call_->AddAdaptationResource(std::move(resource));
}
PacketReceiver* DegradedCall::Receiver() {
if (!receive_configs_.empty()) {
return this;
}
return call_->Receiver();
}
RtpTransportControllerSendInterface*
DegradedCall::GetTransportControllerSend() {
return call_->GetTransportControllerSend();
}
Call::Stats DegradedCall::GetStats() const {
return call_->GetStats();
}
const FieldTrialsView& DegradedCall::trials() const {
return call_->trials();
}
TaskQueueBase* DegradedCall::network_thread() const {
return call_->network_thread();
}
TaskQueueBase* DegradedCall::worker_thread() const {
return call_->worker_thread();
}
void DegradedCall::SignalChannelNetworkState(MediaType media,
NetworkState state) {
call_->SignalChannelNetworkState(media, state);
}
void DegradedCall::OnAudioTransportOverheadChanged(
int transport_overhead_per_packet) {
call_->OnAudioTransportOverheadChanged(transport_overhead_per_packet);
}
void DegradedCall::OnLocalSsrcUpdated(AudioReceiveStreamInterface& stream,
uint32_t local_ssrc) {
call_->OnLocalSsrcUpdated(stream, local_ssrc);
}
void DegradedCall::OnLocalSsrcUpdated(VideoReceiveStreamInterface& stream,
uint32_t local_ssrc) {
call_->OnLocalSsrcUpdated(stream, local_ssrc);
}
void DegradedCall::OnLocalSsrcUpdated(FlexfecReceiveStream& stream,
uint32_t local_ssrc) {
call_->OnLocalSsrcUpdated(stream, local_ssrc);
}
void DegradedCall::OnUpdateSyncGroup(AudioReceiveStreamInterface& stream,
absl::string_view sync_group) {
call_->OnUpdateSyncGroup(stream, sync_group);
}
void DegradedCall::OnSentPacket(const rtc::SentPacket& sent_packet) {
if (!send_configs_.empty()) {
// If we have a degraded send-transport, we have already notified call
// about the supposed network send time. Discard the actual network send
// time in order to properly fool the BWE.
return;
}
call_->OnSentPacket(sent_packet);
}
void DegradedCall::DeliverRtpPacket(
MediaType media_type,
RtpPacketReceived packet,
OnUndemuxablePacketHandler undemuxable_packet_handler) {
RTC_DCHECK_RUN_ON(&received_packet_sequence_checker_);
receive_pipe_->DeliverRtpPacket(media_type, std::move(packet),
std::move(undemuxable_packet_handler));
receive_pipe_->Process();
}
void DegradedCall::DeliverRtcpPacket(rtc::CopyOnWriteBuffer packet) {
RTC_DCHECK_RUN_ON(&received_packet_sequence_checker_);
receive_pipe_->DeliverRtcpPacket(std::move(packet));
receive_pipe_->Process();
}
void DegradedCall::SetClientBitratePreferences(
const webrtc::BitrateSettings& preferences) {
call_->SetClientBitratePreferences(preferences);
}
void DegradedCall::UpdateSendNetworkConfig() {
send_config_index_ = (send_config_index_ + 1) % send_configs_.size();
send_simulated_network_->SetConfig(send_configs_[send_config_index_]);
call_->network_thread()->PostDelayedTask(
SafeTask(call_alive_, [this] { UpdateSendNetworkConfig(); }),
send_configs_[send_config_index_].duration);
}
void DegradedCall::UpdateReceiveNetworkConfig() {
receive_config_index_ = (receive_config_index_ + 1) % receive_configs_.size();
receive_simulated_network_->SetConfig(
receive_configs_[receive_config_index_]);
call_->network_thread()->PostDelayedTask(
SafeTask(call_alive_, [this] { UpdateReceiveNetworkConfig(); }),
receive_configs_[receive_config_index_].duration);
}
} // namespace webrtc

View file

@ -0,0 +1,202 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef CALL_DEGRADED_CALL_H_
#define CALL_DEGRADED_CALL_H_
#include <stddef.h>
#include <stdint.h>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "api/call/transport.h"
#include "api/fec_controller.h"
#include "api/media_types.h"
#include "api/rtp_headers.h"
#include "api/task_queue/pending_task_safety_flag.h"
#include "api/test/simulated_network.h"
#include "call/audio_receive_stream.h"
#include "call/audio_send_stream.h"
#include "call/call.h"
#include "call/fake_network_pipe.h"
#include "call/flexfec_receive_stream.h"
#include "call/packet_receiver.h"
#include "call/rtp_transport_controller_send_interface.h"
#include "call/simulated_network.h"
#include "call/video_receive_stream.h"
#include "call/video_send_stream.h"
#include "rtc_base/copy_on_write_buffer.h"
#include "rtc_base/network/sent_packet.h"
#include "rtc_base/task_queue.h"
#include "system_wrappers/include/clock.h"
#include "video/config/video_encoder_config.h"
namespace webrtc {
class DegradedCall : public Call, private PacketReceiver {
public:
struct TimeScopedNetworkConfig : public BuiltInNetworkBehaviorConfig {
TimeDelta duration = TimeDelta::PlusInfinity();
};
explicit DegradedCall(
std::unique_ptr<Call> call,
const std::vector<TimeScopedNetworkConfig>& send_configs,
const std::vector<TimeScopedNetworkConfig>& receive_configs);
~DegradedCall() override;
// Implements Call.
AudioSendStream* CreateAudioSendStream(
const AudioSendStream::Config& config) override;
void DestroyAudioSendStream(AudioSendStream* send_stream) override;
AudioReceiveStreamInterface* CreateAudioReceiveStream(
const AudioReceiveStreamInterface::Config& config) override;
void DestroyAudioReceiveStream(
AudioReceiveStreamInterface* receive_stream) override;
VideoSendStream* CreateVideoSendStream(
VideoSendStream::Config config,
VideoEncoderConfig encoder_config) override;
VideoSendStream* CreateVideoSendStream(
VideoSendStream::Config config,
VideoEncoderConfig encoder_config,
std::unique_ptr<FecController> fec_controller) override;
void DestroyVideoSendStream(VideoSendStream* send_stream) override;
VideoReceiveStreamInterface* CreateVideoReceiveStream(
VideoReceiveStreamInterface::Config configuration) override;
void DestroyVideoReceiveStream(
VideoReceiveStreamInterface* receive_stream) override;
FlexfecReceiveStream* CreateFlexfecReceiveStream(
const FlexfecReceiveStream::Config config) override;
void DestroyFlexfecReceiveStream(
FlexfecReceiveStream* receive_stream) override;
void AddAdaptationResource(rtc::scoped_refptr<Resource> resource) override;
PacketReceiver* Receiver() override;
RtpTransportControllerSendInterface* GetTransportControllerSend() override;
Stats GetStats() const override;
const FieldTrialsView& trials() const override;
TaskQueueBase* network_thread() const override;
TaskQueueBase* worker_thread() const override;
void SignalChannelNetworkState(MediaType media, NetworkState state) override;
void OnAudioTransportOverheadChanged(
int transport_overhead_per_packet) override;
void OnLocalSsrcUpdated(AudioReceiveStreamInterface& stream,
uint32_t local_ssrc) override;
void OnLocalSsrcUpdated(VideoReceiveStreamInterface& stream,
uint32_t local_ssrc) override;
void OnLocalSsrcUpdated(FlexfecReceiveStream& stream,
uint32_t local_ssrc) override;
void OnUpdateSyncGroup(AudioReceiveStreamInterface& stream,
absl::string_view sync_group) override;
void OnSentPacket(const rtc::SentPacket& sent_packet) override;
protected:
// Implements PacketReceiver.
void DeliverRtpPacket(
MediaType media_type,
RtpPacketReceived packet,
OnUndemuxablePacketHandler undemuxable_packet_handler) override;
void DeliverRtcpPacket(rtc::CopyOnWriteBuffer packet) override;
private:
class FakeNetworkPipeOnTaskQueue {
public:
FakeNetworkPipeOnTaskQueue(
TaskQueueBase* task_queue,
rtc::scoped_refptr<PendingTaskSafetyFlag> call_alive,
Clock* clock,
std::unique_ptr<NetworkBehaviorInterface> network_behavior);
void SendRtp(rtc::ArrayView<const uint8_t> packet,
const PacketOptions& options,
Transport* transport);
void SendRtcp(rtc::ArrayView<const uint8_t> packet, Transport* transport);
void AddActiveTransport(Transport* transport);
void RemoveActiveTransport(Transport* transport);
private:
// Try to process packets on the fake network queue.
// Returns true if call resulted in a delayed process, false if queue empty.
bool Process();
Clock* const clock_;
TaskQueueBase* const task_queue_;
rtc::scoped_refptr<PendingTaskSafetyFlag> call_alive_;
FakeNetworkPipe pipe_;
absl::optional<int64_t> next_process_ms_ RTC_GUARDED_BY(&task_queue_);
};
// For audio/video send stream, a TransportAdapter instance is used to
// intercept packets to be sent, and put them into a common FakeNetworkPipe
// in such as way that they will eventually (unless dropped) be forwarded to
// the correct Transport for that stream.
class FakeNetworkPipeTransportAdapter : public Transport {
public:
FakeNetworkPipeTransportAdapter(FakeNetworkPipeOnTaskQueue* fake_network,
Call* call,
Clock* clock,
Transport* real_transport);
~FakeNetworkPipeTransportAdapter();
bool SendRtp(rtc::ArrayView<const uint8_t> packet,
const PacketOptions& options) override;
bool SendRtcp(rtc::ArrayView<const uint8_t> packet) override;
private:
FakeNetworkPipeOnTaskQueue* const network_pipe_;
Call* const call_;
Clock* const clock_;
Transport* const real_transport_;
};
void SetClientBitratePreferences(
const webrtc::BitrateSettings& preferences) override;
void UpdateSendNetworkConfig();
void UpdateReceiveNetworkConfig();
Clock* const clock_;
const std::unique_ptr<Call> call_;
// For cancelling tasks on the network thread when DegradedCall is destroyed
rtc::scoped_refptr<PendingTaskSafetyFlag> call_alive_;
size_t send_config_index_;
const std::vector<TimeScopedNetworkConfig> send_configs_;
SimulatedNetwork* send_simulated_network_;
std::unique_ptr<FakeNetworkPipeOnTaskQueue> send_pipe_;
std::map<AudioSendStream*, std::unique_ptr<FakeNetworkPipeTransportAdapter>>
audio_send_transport_adapters_;
std::map<VideoSendStream*, std::unique_ptr<FakeNetworkPipeTransportAdapter>>
video_send_transport_adapters_;
size_t receive_config_index_;
const std::vector<TimeScopedNetworkConfig> receive_configs_;
SimulatedNetwork* receive_simulated_network_;
SequenceChecker received_packet_sequence_checker_;
std::unique_ptr<FakeNetworkPipe> receive_pipe_
RTC_GUARDED_BY(received_packet_sequence_checker_);
};
} // namespace webrtc
#endif // CALL_DEGRADED_CALL_H_

View file

@ -0,0 +1,382 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "call/fake_network_pipe.h"
#include <string.h>
#include <algorithm>
#include <queue>
#include <utility>
#include <vector>
#include "api/media_types.h"
#include "api/units/timestamp.h"
#include "modules/rtp_rtcp/source/rtp_packet_received.h"
#include "rtc_base/checks.h"
#include "rtc_base/logging.h"
#include "system_wrappers/include/clock.h"
namespace webrtc {
namespace {
constexpr int64_t kLogIntervalMs = 5000;
} // namespace
NetworkPacket::NetworkPacket(rtc::CopyOnWriteBuffer packet,
int64_t send_time,
int64_t arrival_time,
absl::optional<PacketOptions> packet_options,
bool is_rtcp,
MediaType media_type,
absl::optional<int64_t> packet_time_us,
Transport* transport)
: packet_(std::move(packet)),
send_time_(send_time),
arrival_time_(arrival_time),
packet_options_(packet_options),
is_rtcp_(is_rtcp),
media_type_(media_type),
packet_time_us_(packet_time_us),
transport_(transport) {}
NetworkPacket::NetworkPacket(RtpPacketReceived packet_received,
MediaType media_type,
int64_t send_time,
int64_t arrival_time)
: packet_(packet_received.Buffer()),
send_time_(send_time),
arrival_time_(arrival_time),
is_rtcp_(false),
media_type_(media_type),
packet_time_us_(packet_received.arrival_time().us()),
packet_received_(std::move(packet_received)),
transport_(nullptr) {}
NetworkPacket::NetworkPacket(NetworkPacket&& o)
: packet_(std::move(o.packet_)),
send_time_(o.send_time_),
arrival_time_(o.arrival_time_),
packet_options_(o.packet_options_),
is_rtcp_(o.is_rtcp_),
media_type_(o.media_type_),
packet_time_us_(o.packet_time_us_),
packet_received_(std::move(o.packet_received_)),
transport_(o.transport_) {}
NetworkPacket::~NetworkPacket() = default;
NetworkPacket& NetworkPacket::operator=(NetworkPacket&& o) {
packet_ = std::move(o.packet_);
send_time_ = o.send_time_;
arrival_time_ = o.arrival_time_;
packet_options_ = o.packet_options_;
is_rtcp_ = o.is_rtcp_;
media_type_ = o.media_type_;
packet_time_us_ = o.packet_time_us_;
packet_received_ = o.packet_received_;
transport_ = o.transport_;
return *this;
}
FakeNetworkPipe::FakeNetworkPipe(
Clock* clock,
std::unique_ptr<NetworkBehaviorInterface> network_behavior)
: FakeNetworkPipe(clock, std::move(network_behavior), nullptr, 1) {}
FakeNetworkPipe::FakeNetworkPipe(
Clock* clock,
std::unique_ptr<NetworkBehaviorInterface> network_behavior,
PacketReceiver* receiver)
: FakeNetworkPipe(clock, std::move(network_behavior), receiver, 1) {}
FakeNetworkPipe::FakeNetworkPipe(
Clock* clock,
std::unique_ptr<NetworkBehaviorInterface> network_behavior,
PacketReceiver* receiver,
uint64_t seed)
: clock_(clock),
network_behavior_(std::move(network_behavior)),
receiver_(receiver),
clock_offset_ms_(0),
dropped_packets_(0),
sent_packets_(0),
total_packet_delay_us_(0),
last_log_time_us_(clock_->TimeInMicroseconds()) {}
FakeNetworkPipe::~FakeNetworkPipe() {
RTC_DCHECK(active_transports_.empty());
}
void FakeNetworkPipe::SetReceiver(PacketReceiver* receiver) {
MutexLock lock(&config_lock_);
receiver_ = receiver;
}
void FakeNetworkPipe::AddActiveTransport(Transport* transport) {
MutexLock lock(&config_lock_);
active_transports_[transport]++;
}
void FakeNetworkPipe::RemoveActiveTransport(Transport* transport) {
MutexLock lock(&config_lock_);
auto it = active_transports_.find(transport);
RTC_CHECK(it != active_transports_.end());
if (--(it->second) == 0) {
active_transports_.erase(it);
}
}
bool FakeNetworkPipe::SendRtp(rtc::ArrayView<const uint8_t> packet,
const PacketOptions& options,
Transport* transport) {
RTC_DCHECK(transport);
EnqueuePacket(rtc::CopyOnWriteBuffer(packet), options, false, transport);
return true;
}
bool FakeNetworkPipe::SendRtcp(rtc::ArrayView<const uint8_t> packet,
Transport* transport) {
RTC_DCHECK(transport);
EnqueuePacket(rtc::CopyOnWriteBuffer(packet), absl::nullopt, true, transport);
return true;
}
void FakeNetworkPipe::DeliverRtpPacket(
MediaType media_type,
RtpPacketReceived packet,
OnUndemuxablePacketHandler undemuxable_packet_handler) {
MutexLock lock(&process_lock_);
int64_t time_now_us = clock_->TimeInMicroseconds();
EnqueuePacket(
NetworkPacket(std::move(packet), media_type, time_now_us, time_now_us));
}
void FakeNetworkPipe::DeliverRtcpPacket(rtc::CopyOnWriteBuffer packet) {
EnqueuePacket(std::move(packet), absl::nullopt, true, MediaType::ANY,
absl::nullopt);
}
void FakeNetworkPipe::SetClockOffset(int64_t offset_ms) {
MutexLock lock(&config_lock_);
clock_offset_ms_ = offset_ms;
}
FakeNetworkPipe::StoredPacket::StoredPacket(NetworkPacket&& packet)
: packet(std::move(packet)) {}
bool FakeNetworkPipe::EnqueuePacket(rtc::CopyOnWriteBuffer packet,
absl::optional<PacketOptions> options,
bool is_rtcp,
MediaType media_type,
absl::optional<int64_t> packet_time_us) {
MutexLock lock(&process_lock_);
int64_t time_now_us = clock_->TimeInMicroseconds();
return EnqueuePacket(NetworkPacket(std::move(packet), time_now_us,
time_now_us, options, is_rtcp, media_type,
packet_time_us, nullptr));
}
bool FakeNetworkPipe::EnqueuePacket(rtc::CopyOnWriteBuffer packet,
absl::optional<PacketOptions> options,
bool is_rtcp,
Transport* transport) {
MutexLock lock(&process_lock_);
int64_t time_now_us = clock_->TimeInMicroseconds();
return EnqueuePacket(NetworkPacket(std::move(packet), time_now_us,
time_now_us, options, is_rtcp,
MediaType::ANY, absl::nullopt, transport));
}
bool FakeNetworkPipe::EnqueuePacket(NetworkPacket&& net_packet) {
int64_t send_time_us = net_packet.send_time();
size_t packet_size = net_packet.data_length();
packets_in_flight_.emplace_back(StoredPacket(std::move(net_packet)));
int64_t packet_id = reinterpret_cast<uint64_t>(&packets_in_flight_.back());
bool sent = network_behavior_->EnqueuePacket(
PacketInFlightInfo(packet_size, send_time_us, packet_id));
if (!sent) {
packets_in_flight_.pop_back();
++dropped_packets_;
}
return sent;
}
float FakeNetworkPipe::PercentageLoss() {
MutexLock lock(&process_lock_);
if (sent_packets_ == 0)
return 0;
return static_cast<float>(dropped_packets_) /
(sent_packets_ + dropped_packets_);
}
int FakeNetworkPipe::AverageDelay() {
MutexLock lock(&process_lock_);
if (sent_packets_ == 0)
return 0;
return static_cast<int>(total_packet_delay_us_ /
(1000 * static_cast<int64_t>(sent_packets_)));
}
size_t FakeNetworkPipe::DroppedPackets() {
MutexLock lock(&process_lock_);
return dropped_packets_;
}
size_t FakeNetworkPipe::SentPackets() {
MutexLock lock(&process_lock_);
return sent_packets_;
}
void FakeNetworkPipe::Process() {
int64_t time_now_us;
std::queue<NetworkPacket> packets_to_deliver;
{
MutexLock lock(&process_lock_);
time_now_us = clock_->TimeInMicroseconds();
if (time_now_us - last_log_time_us_ > kLogIntervalMs * 1000) {
int64_t queueing_delay_us = 0;
if (!packets_in_flight_.empty())
queueing_delay_us =
time_now_us - packets_in_flight_.front().packet.send_time();
RTC_LOG(LS_INFO) << "Network queue: " << queueing_delay_us / 1000
<< " ms.";
last_log_time_us_ = time_now_us;
}
std::vector<PacketDeliveryInfo> delivery_infos =
network_behavior_->DequeueDeliverablePackets(time_now_us);
for (auto& delivery_info : delivery_infos) {
// In the common case where no reordering happens, find will return early
// as the first packet will be a match.
auto packet_it =
std::find_if(packets_in_flight_.begin(), packets_in_flight_.end(),
[&delivery_info](StoredPacket& packet_ref) {
return reinterpret_cast<uint64_t>(&packet_ref) ==
delivery_info.packet_id;
});
// Check that the packet is in the deque of packets in flight.
RTC_CHECK(packet_it != packets_in_flight_.end());
// Check that the packet is not already removed.
RTC_DCHECK(!packet_it->removed);
NetworkPacket packet = std::move(packet_it->packet);
packet_it->removed = true;
// Cleanup of removed packets at the beginning of the deque.
while (!packets_in_flight_.empty() &&
packets_in_flight_.front().removed) {
packets_in_flight_.pop_front();
}
if (delivery_info.receive_time_us != PacketDeliveryInfo::kNotReceived) {
int64_t added_delay_us =
delivery_info.receive_time_us - packet.send_time();
packet.IncrementArrivalTime(added_delay_us);
packets_to_deliver.emplace(std::move(packet));
// `time_now_us` might be later than when the packet should have
// arrived, due to NetworkProcess being called too late. For stats, use
// the time it should have been on the link.
total_packet_delay_us_ += added_delay_us;
++sent_packets_;
} else {
++dropped_packets_;
}
}
}
MutexLock lock(&config_lock_);
while (!packets_to_deliver.empty()) {
NetworkPacket packet = std::move(packets_to_deliver.front());
packets_to_deliver.pop();
DeliverNetworkPacket(&packet);
}
}
void FakeNetworkPipe::DeliverNetworkPacket(NetworkPacket* packet) {
Transport* transport = packet->transport();
if (transport) {
RTC_DCHECK(!receiver_);
if (active_transports_.find(transport) == active_transports_.end()) {
// Transport has been destroyed, ignore this packet.
return;
}
if (packet->is_rtcp()) {
transport->SendRtcp(
rtc::MakeArrayView(packet->data(), packet->data_length()));
} else {
transport->SendRtp(
rtc::MakeArrayView(packet->data(), packet->data_length()),
packet->packet_options());
}
} else if (receiver_) {
int64_t packet_time_us = packet->packet_time_us().value_or(-1);
if (packet_time_us != -1) {
int64_t queue_time_us = packet->arrival_time() - packet->send_time();
RTC_CHECK(queue_time_us >= 0);
packet_time_us += queue_time_us;
packet_time_us += (clock_offset_ms_ * 1000);
}
if (packet->is_rtcp()) {
receiver_->DeliverRtcpPacket(std::move(*packet->raw_packet()));
} else if (packet->packet_received()) {
packet->packet_received()->set_arrival_time(
Timestamp::Micros(packet_time_us));
receiver_->DeliverRtpPacket(
packet->media_type(), *packet->packet_received(),
[](const RtpPacketReceived& packet) {
RTC_LOG(LS_WARNING)
<< "Unexpected failed demuxing packet in FakeNetworkPipe, "
"Ssrc: "
<< packet.Ssrc() << " seq : " << packet.SequenceNumber();
return false;
});
}
}
}
absl::optional<int64_t> FakeNetworkPipe::TimeUntilNextProcess() {
MutexLock lock(&process_lock_);
absl::optional<int64_t> delivery_us = network_behavior_->NextDeliveryTimeUs();
if (delivery_us) {
int64_t delay_us = *delivery_us - clock_->TimeInMicroseconds();
return std::max<int64_t>((delay_us + 500) / 1000, 0);
}
return absl::nullopt;
}
bool FakeNetworkPipe::HasReceiver() const {
MutexLock lock(&config_lock_);
return receiver_ != nullptr;
}
void FakeNetworkPipe::DeliverPacketWithLock(NetworkPacket* packet) {
MutexLock lock(&config_lock_);
DeliverNetworkPacket(packet);
}
void FakeNetworkPipe::ResetStats() {
MutexLock lock(&process_lock_);
dropped_packets_ = 0;
sent_packets_ = 0;
total_packet_delay_us_ = 0;
}
int64_t FakeNetworkPipe::GetTimeInMicroseconds() const {
return clock_->TimeInMicroseconds();
}
} // namespace webrtc

View file

@ -0,0 +1,230 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef CALL_FAKE_NETWORK_PIPE_H_
#define CALL_FAKE_NETWORK_PIPE_H_
#include <deque>
#include <map>
#include <memory>
#include <queue>
#include <set>
#include <string>
#include <vector>
#include "api/call/transport.h"
#include "api/test/simulated_network.h"
#include "call/simulated_packet_receiver.h"
#include "modules/rtp_rtcp/source/rtp_packet_received.h"
#include "rtc_base/synchronization/mutex.h"
#include "rtc_base/thread_annotations.h"
namespace webrtc {
class Clock;
class PacketReceiver;
enum class MediaType;
class NetworkPacket {
public:
NetworkPacket(rtc::CopyOnWriteBuffer packet,
int64_t send_time,
int64_t arrival_time,
absl::optional<PacketOptions> packet_options,
bool is_rtcp,
MediaType media_type,
absl::optional<int64_t> packet_time_us,
Transport* transport);
NetworkPacket(RtpPacketReceived packet,
MediaType media_type,
int64_t send_time,
int64_t arrival_time);
// Disallow copy constructor and copy assignment (no deep copies of `data_`).
NetworkPacket(const NetworkPacket&) = delete;
~NetworkPacket();
NetworkPacket& operator=(const NetworkPacket&) = delete;
// Allow move constructor/assignment, so that we can use in stl containers.
NetworkPacket(NetworkPacket&&);
NetworkPacket& operator=(NetworkPacket&&);
const uint8_t* data() const { return packet_.data(); }
size_t data_length() const { return packet_.size(); }
rtc::CopyOnWriteBuffer* raw_packet() { return &packet_; }
int64_t send_time() const { return send_time_; }
int64_t arrival_time() const { return arrival_time_; }
void IncrementArrivalTime(int64_t extra_delay) {
arrival_time_ += extra_delay;
}
PacketOptions packet_options() const {
return packet_options_.value_or(PacketOptions());
}
bool is_rtcp() const { return is_rtcp_; }
MediaType media_type() const { return media_type_; }
absl::optional<int64_t> packet_time_us() const { return packet_time_us_; }
RtpPacketReceived* packet_received() {
return packet_received_ ? &packet_received_.value() : nullptr;
}
absl::optional<RtpPacketReceived> packet_received() const {
return packet_received_;
}
Transport* transport() const { return transport_; }
private:
rtc::CopyOnWriteBuffer packet_;
// The time the packet was sent out on the network.
int64_t send_time_;
// The time the packet should arrive at the receiver.
int64_t arrival_time_;
// If using a Transport for outgoing degradation, populate with
// PacketOptions (transport-wide sequence number) for RTP.
absl::optional<PacketOptions> packet_options_;
bool is_rtcp_;
// If using a PacketReceiver for incoming degradation, populate with
// appropriate MediaType and packet time. This type/timing will be kept and
// forwarded. The packet time might be altered to reflect time spent in fake
// network pipe.
MediaType media_type_;
absl::optional<int64_t> packet_time_us_;
absl::optional<RtpPacketReceived> packet_received_;
Transport* transport_;
};
// Class faking a network link, internally is uses an implementation of a
// SimulatedNetworkInterface to simulate network behavior.
class FakeNetworkPipe : public SimulatedPacketReceiverInterface {
public:
// Will keep `network_behavior` alive while pipe is alive itself.
FakeNetworkPipe(Clock* clock,
std::unique_ptr<NetworkBehaviorInterface> network_behavior);
FakeNetworkPipe(Clock* clock,
std::unique_ptr<NetworkBehaviorInterface> network_behavior,
PacketReceiver* receiver);
FakeNetworkPipe(Clock* clock,
std::unique_ptr<NetworkBehaviorInterface> network_behavior,
PacketReceiver* receiver,
uint64_t seed);
~FakeNetworkPipe() override;
FakeNetworkPipe(const FakeNetworkPipe&) = delete;
FakeNetworkPipe& operator=(const FakeNetworkPipe&) = delete;
void SetClockOffset(int64_t offset_ms);
// Must not be called in parallel with DeliverPacket or Process.
void SetReceiver(PacketReceiver* receiver) override;
// Adds/subtracts references to Transport instances. If a Transport is
// destroyed we cannot use to forward a potential delayed packet, these
// methods are used to maintain a map of which instances are live.
void AddActiveTransport(Transport* transport);
void RemoveActiveTransport(Transport* transport);
// Methods for use with Transport interface. When/if packets are delivered,
// they will be passed to the instance specified by the `transport` parameter.
// Note that that instance must be in the map of active transports.
bool SendRtp(rtc::ArrayView<const uint8_t> packet,
const PacketOptions& options,
Transport* transport);
bool SendRtcp(rtc::ArrayView<const uint8_t> packet, Transport* transport);
// Implements the PacketReceiver interface. When/if packets are delivered,
// they will be passed directly to the receiver instance given in
// SetReceiver(). The receive time will be increased by the amount of time the
// packet spent in the fake network pipe.
void DeliverRtpPacket(
MediaType media_type,
RtpPacketReceived packet,
OnUndemuxablePacketHandler undemuxable_packet_handler) override;
void DeliverRtcpPacket(rtc::CopyOnWriteBuffer packet) override;
// Processes the network queues and trigger PacketReceiver::IncomingPacket for
// packets ready to be delivered.
void Process() override;
absl::optional<int64_t> TimeUntilNextProcess() override;
// Get statistics.
float PercentageLoss();
int AverageDelay() override;
size_t DroppedPackets();
size_t SentPackets();
void ResetStats();
protected:
void DeliverPacketWithLock(NetworkPacket* packet);
int64_t GetTimeInMicroseconds() const;
bool ShouldProcess(int64_t time_now_us) const;
void SetTimeToNextProcess(int64_t skip_us);
private:
struct StoredPacket {
NetworkPacket packet;
bool removed = false;
explicit StoredPacket(NetworkPacket&& packet);
StoredPacket(StoredPacket&&) = default;
StoredPacket(const StoredPacket&) = delete;
StoredPacket& operator=(const StoredPacket&) = delete;
StoredPacket() = delete;
};
// Returns true if enqueued, or false if packet was dropped. Use this method
// when enqueueing packets that should be received by PacketReceiver instance.
bool EnqueuePacket(rtc::CopyOnWriteBuffer packet,
absl::optional<PacketOptions> options,
bool is_rtcp,
MediaType media_type,
absl::optional<int64_t> packet_time_us);
// Returns true if enqueued, or false if packet was dropped. Use this method
// when enqueueing packets that should be received by Transport instance.
bool EnqueuePacket(rtc::CopyOnWriteBuffer packet,
absl::optional<PacketOptions> options,
bool is_rtcp,
Transport* transport);
bool EnqueuePacket(NetworkPacket&& net_packet)
RTC_EXCLUSIVE_LOCKS_REQUIRED(process_lock_);
void DeliverNetworkPacket(NetworkPacket* packet)
RTC_EXCLUSIVE_LOCKS_REQUIRED(config_lock_);
bool HasReceiver() const;
Clock* const clock_;
// `config_lock` guards the mostly constant things like the callbacks.
mutable Mutex config_lock_;
const std::unique_ptr<NetworkBehaviorInterface> network_behavior_;
PacketReceiver* receiver_ RTC_GUARDED_BY(config_lock_);
// `process_lock` guards the data structures involved in delay and loss
// processes, such as the packet queues.
Mutex process_lock_;
// Packets are added at the back of the deque, this makes the deque ordered
// by increasing send time. The common case when removing packets from the
// deque is removing early packets, which will be close to the front of the
// deque. This makes finding the packets in the deque efficient in the common
// case.
std::deque<StoredPacket> packets_in_flight_ RTC_GUARDED_BY(process_lock_);
int64_t clock_offset_ms_ RTC_GUARDED_BY(config_lock_);
// Statistics.
size_t dropped_packets_ RTC_GUARDED_BY(process_lock_);
size_t sent_packets_ RTC_GUARDED_BY(process_lock_);
int64_t total_packet_delay_us_ RTC_GUARDED_BY(process_lock_);
int64_t last_log_time_us_;
std::map<Transport*, size_t> active_transports_ RTC_GUARDED_BY(config_lock_);
};
} // namespace webrtc
#endif // CALL_FAKE_NETWORK_PIPE_H_

View file

@ -0,0 +1,26 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "call/flexfec_receive_stream.h"
#include "rtc_base/checks.h"
namespace webrtc {
FlexfecReceiveStream::Config::Config(Transport* rtcp_send_transport)
: rtcp_send_transport(rtcp_send_transport) {
RTC_DCHECK(rtcp_send_transport);
}
FlexfecReceiveStream::Config::Config(const Config& config) = default;
FlexfecReceiveStream::Config::~Config() = default;
} // namespace webrtc

View file

@ -0,0 +1,79 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef CALL_FLEXFEC_RECEIVE_STREAM_H_
#define CALL_FLEXFEC_RECEIVE_STREAM_H_
#include <stdint.h>
#include <string>
#include <vector>
#include "api/call/transport.h"
#include "api/rtp_headers.h"
#include "api/rtp_parameters.h"
#include "call/receive_stream.h"
#include "call/rtp_packet_sink_interface.h"
#include "modules/rtp_rtcp/include/receive_statistics.h"
namespace webrtc {
class FlexfecReceiveStream : public RtpPacketSinkInterface,
public ReceiveStreamInterface {
public:
~FlexfecReceiveStream() override = default;
struct Config {
explicit Config(Transport* rtcp_send_transport);
Config(const Config&);
~Config();
std::string ToString() const;
// Returns true if all RTP information is available in order to
// enable receiving FlexFEC.
bool IsCompleteAndEnabled() const;
// Payload type for FlexFEC.
int payload_type = -1;
ReceiveStreamRtpConfig rtp;
// Vector containing a single element, corresponding to the SSRC of the
// media stream being protected by this FlexFEC stream. The vector MUST have
// size 1.
//
// TODO(brandtr): Update comment above when we support multistream
// protection.
std::vector<uint32_t> protected_media_ssrcs;
// What RTCP mode to use in the reports.
RtcpMode rtcp_mode = RtcpMode::kCompound;
// Transport for outgoing RTCP packets.
Transport* rtcp_send_transport = nullptr;
};
// TODO(tommi): FlexfecReceiveStream inherits from ReceiveStreamInterface,
// not VideoReceiveStreamInterface where there's also a SetRtcpMode method.
// Perhaps this should be in ReceiveStreamInterface and apply to audio streams
// as well (although there's no logic that would use it at present).
virtual void SetRtcpMode(RtcpMode mode) = 0;
// Called to change the payload type after initialization.
virtual void SetPayloadType(int payload_type) = 0;
virtual int payload_type() const = 0;
virtual const ReceiveStatistics* GetStats() const = 0;
};
} // namespace webrtc
#endif // CALL_FLEXFEC_RECEIVE_STREAM_H_

View file

@ -0,0 +1,201 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "call/flexfec_receive_stream_impl.h"
#include <stddef.h>
#include <cstdint>
#include <string>
#include <utility>
#include "api/array_view.h"
#include "api/call/transport.h"
#include "api/rtp_parameters.h"
#include "call/rtp_stream_receiver_controller_interface.h"
#include "modules/rtp_rtcp/include/flexfec_receiver.h"
#include "modules/rtp_rtcp/source/rtp_packet_received.h"
#include "rtc_base/checks.h"
#include "rtc_base/logging.h"
#include "rtc_base/strings/string_builder.h"
#include "system_wrappers/include/clock.h"
namespace webrtc {
std::string FlexfecReceiveStream::Config::ToString() const {
char buf[1024];
rtc::SimpleStringBuilder ss(buf);
ss << "{payload_type: " << payload_type;
ss << ", remote_ssrc: " << rtp.remote_ssrc;
ss << ", local_ssrc: " << rtp.local_ssrc;
ss << ", protected_media_ssrcs: [";
size_t i = 0;
for (; i + 1 < protected_media_ssrcs.size(); ++i)
ss << protected_media_ssrcs[i] << ", ";
if (!protected_media_ssrcs.empty())
ss << protected_media_ssrcs[i];
ss << "}";
return ss.str();
}
bool FlexfecReceiveStream::Config::IsCompleteAndEnabled() const {
// Check if FlexFEC is enabled.
if (payload_type < 0)
return false;
// Do we have the necessary SSRC information?
if (rtp.remote_ssrc == 0)
return false;
// TODO(brandtr): Update this check when we support multistream protection.
if (protected_media_ssrcs.size() != 1u)
return false;
return true;
}
namespace {
// TODO(brandtr): Update this function when we support multistream protection.
std::unique_ptr<FlexfecReceiver> MaybeCreateFlexfecReceiver(
Clock* clock,
const FlexfecReceiveStream::Config& config,
RecoveredPacketReceiver* recovered_packet_receiver) {
if (config.payload_type < 0) {
RTC_LOG(LS_WARNING)
<< "Invalid FlexFEC payload type given. "
"This FlexfecReceiveStream will therefore be useless.";
return nullptr;
}
RTC_DCHECK_GE(config.payload_type, 0);
RTC_DCHECK_LE(config.payload_type, 127);
if (config.rtp.remote_ssrc == 0) {
RTC_LOG(LS_WARNING)
<< "Invalid FlexFEC SSRC given. "
"This FlexfecReceiveStream will therefore be useless.";
return nullptr;
}
if (config.protected_media_ssrcs.empty()) {
RTC_LOG(LS_WARNING)
<< "No protected media SSRC supplied. "
"This FlexfecReceiveStream will therefore be useless.";
return nullptr;
}
if (config.protected_media_ssrcs.size() > 1) {
RTC_LOG(LS_WARNING)
<< "The supplied FlexfecConfig contained multiple protected "
"media streams, but our implementation currently only "
"supports protecting a single media stream. "
"To avoid confusion, disabling FlexFEC completely.";
return nullptr;
}
RTC_DCHECK_EQ(1U, config.protected_media_ssrcs.size());
return std::unique_ptr<FlexfecReceiver>(new FlexfecReceiver(
clock, config.rtp.remote_ssrc, config.protected_media_ssrcs[0],
recovered_packet_receiver));
}
std::unique_ptr<ModuleRtpRtcpImpl2> CreateRtpRtcpModule(
Clock* clock,
ReceiveStatistics* receive_statistics,
const FlexfecReceiveStreamImpl::Config& config,
RtcpRttStats* rtt_stats) {
RtpRtcpInterface::Configuration configuration;
configuration.audio = false;
configuration.receiver_only = true;
configuration.clock = clock;
configuration.receive_statistics = receive_statistics;
configuration.outgoing_transport = config.rtcp_send_transport;
configuration.rtt_stats = rtt_stats;
configuration.local_media_ssrc = config.rtp.local_ssrc;
return ModuleRtpRtcpImpl2::Create(configuration);
}
} // namespace
FlexfecReceiveStreamImpl::FlexfecReceiveStreamImpl(
Clock* clock,
Config config,
RecoveredPacketReceiver* recovered_packet_receiver,
RtcpRttStats* rtt_stats)
: remote_ssrc_(config.rtp.remote_ssrc),
payload_type_(config.payload_type),
receiver_(
MaybeCreateFlexfecReceiver(clock, config, recovered_packet_receiver)),
rtp_receive_statistics_(ReceiveStatistics::Create(clock)),
rtp_rtcp_(CreateRtpRtcpModule(clock,
rtp_receive_statistics_.get(),
config,
rtt_stats)) {
RTC_LOG(LS_INFO) << "FlexfecReceiveStreamImpl: " << config.ToString();
RTC_DCHECK_GE(payload_type_, -1);
packet_sequence_checker_.Detach();
// RTCP reporting.
rtp_rtcp_->SetRTCPStatus(config.rtcp_mode);
}
FlexfecReceiveStreamImpl::~FlexfecReceiveStreamImpl() {
RTC_DLOG(LS_INFO) << "~FlexfecReceiveStreamImpl: ssrc: " << remote_ssrc_;
}
void FlexfecReceiveStreamImpl::RegisterWithTransport(
RtpStreamReceiverControllerInterface* receiver_controller) {
RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
RTC_DCHECK(!rtp_stream_receiver_);
if (!receiver_)
return;
// TODO(nisse): OnRtpPacket in this class delegates all real work to
// `receiver_`. So maybe we don't need to implement RtpPacketSinkInterface
// here at all, we'd then delete the OnRtpPacket method and instead register
// `receiver_` as the RtpPacketSinkInterface for this stream.
rtp_stream_receiver_ =
receiver_controller->CreateReceiver(remote_ssrc(), this);
}
void FlexfecReceiveStreamImpl::UnregisterFromTransport() {
RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
rtp_stream_receiver_.reset();
}
void FlexfecReceiveStreamImpl::OnRtpPacket(const RtpPacketReceived& packet) {
RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
if (!receiver_)
return;
receiver_->OnRtpPacket(packet);
// Do not report media packets in the RTCP RRs generated by `rtp_rtcp_`.
if (packet.Ssrc() == remote_ssrc()) {
rtp_receive_statistics_->OnRtpPacket(packet);
}
}
void FlexfecReceiveStreamImpl::SetPayloadType(int payload_type) {
RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
RTC_DCHECK_GE(payload_type, -1);
payload_type_ = payload_type;
}
int FlexfecReceiveStreamImpl::payload_type() const {
RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
return payload_type_;
}
void FlexfecReceiveStreamImpl::SetLocalSsrc(uint32_t local_ssrc) {
RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
if (local_ssrc == rtp_rtcp_->local_media_ssrc())
return;
rtp_rtcp_->SetLocalSsrc(local_ssrc);
}
} // namespace webrtc

View file

@ -0,0 +1,99 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef CALL_FLEXFEC_RECEIVE_STREAM_IMPL_H_
#define CALL_FLEXFEC_RECEIVE_STREAM_IMPL_H_
#include <memory>
#include <vector>
#include "call/flexfec_receive_stream.h"
#include "call/rtp_packet_sink_interface.h"
#include "modules/rtp_rtcp/source/rtp_rtcp_impl2.h"
#include "rtc_base/system/no_unique_address.h"
#include "system_wrappers/include/clock.h"
namespace webrtc {
class FlexfecReceiver;
class ReceiveStatistics;
class RecoveredPacketReceiver;
class RtcpRttStats;
class RtpPacketReceived;
class RtpRtcp;
class RtpStreamReceiverControllerInterface;
class RtpStreamReceiverInterface;
class FlexfecReceiveStreamImpl : public FlexfecReceiveStream {
public:
FlexfecReceiveStreamImpl(Clock* clock,
Config config,
RecoveredPacketReceiver* recovered_packet_receiver,
RtcpRttStats* rtt_stats);
// Destruction happens on the worker thread. Prior to destruction the caller
// must ensure that a registration with the transport has been cleared. See
// `RegisterWithTransport` for details.
// TODO(tommi): As a further improvement to this, performing the full
// destruction on the network thread could be made the default.
~FlexfecReceiveStreamImpl() override;
// Called on the network thread to register/unregister with the network
// transport.
void RegisterWithTransport(
RtpStreamReceiverControllerInterface* receiver_controller);
// If registration has previously been done (via `RegisterWithTransport`) then
// `UnregisterFromTransport` must be called prior to destruction, on the
// network thread.
void UnregisterFromTransport();
// RtpPacketSinkInterface.
void OnRtpPacket(const RtpPacketReceived& packet) override;
void SetPayloadType(int payload_type) override;
int payload_type() const override;
// Updates the `rtp_video_stream_receiver_`'s `local_ssrc` when the default
// sender has been created, changed or removed.
void SetLocalSsrc(uint32_t local_ssrc);
uint32_t remote_ssrc() const { return remote_ssrc_; }
void SetRtcpMode(RtcpMode mode) override {
RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
rtp_rtcp_->SetRTCPStatus(mode);
}
const ReceiveStatistics* GetStats() const override {
return rtp_receive_statistics_.get();
}
private:
RTC_NO_UNIQUE_ADDRESS SequenceChecker packet_sequence_checker_;
const uint32_t remote_ssrc_;
// `payload_type_` is initially set to -1, indicating that FlexFec is
// disabled.
int payload_type_ RTC_GUARDED_BY(packet_sequence_checker_) = -1;
// Erasure code interfacing.
const std::unique_ptr<FlexfecReceiver> receiver_;
// RTCP reporting.
const std::unique_ptr<ReceiveStatistics> rtp_receive_statistics_;
const std::unique_ptr<ModuleRtpRtcpImpl2> rtp_rtcp_;
std::unique_ptr<RtpStreamReceiverInterface> rtp_stream_receiver_
RTC_GUARDED_BY(packet_sequence_checker_);
};
} // namespace webrtc
#endif // CALL_FLEXFEC_RECEIVE_STREAM_IMPL_H_

View file

@ -0,0 +1,45 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef CALL_PACKET_RECEIVER_H_
#define CALL_PACKET_RECEIVER_H_
#include "absl/functional/any_invocable.h"
#include "api/media_types.h"
#include "modules/rtp_rtcp/source/rtp_packet_received.h"
#include "rtc_base/checks.h"
#include "rtc_base/copy_on_write_buffer.h"
namespace webrtc {
class PacketReceiver {
public:
// Demux RTCP packets. Must be called on the worker thread.
virtual void DeliverRtcpPacket(rtc::CopyOnWriteBuffer packet) = 0;
// Invoked once when a packet packet is received that can not be demuxed.
// If the method returns true, a new attempt is made to demux the packet.
using OnUndemuxablePacketHandler =
absl::AnyInvocable<bool(const RtpPacketReceived& parsed_packet)>;
// Must be called on the worker thread.
// If `media_type` is not Audio or Video, packets may be used for BWE
// calculations but are not demuxed.
virtual void DeliverRtpPacket(
MediaType media_type,
RtpPacketReceived packet,
OnUndemuxablePacketHandler undemuxable_packet_handler) = 0;
protected:
virtual ~PacketReceiver() {}
};
} // namespace webrtc
#endif // CALL_PACKET_RECEIVER_H_

View file

@ -0,0 +1,72 @@
/*
* Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef CALL_RECEIVE_STREAM_H_
#define CALL_RECEIVE_STREAM_H_
#include <vector>
#include "api/crypto/frame_decryptor_interface.h"
#include "api/frame_transformer_interface.h"
#include "api/media_types.h"
#include "api/scoped_refptr.h"
#include "api/transport/rtp/rtp_source.h"
namespace webrtc {
// Common base interface for MediaReceiveStreamInterface based classes and
// FlexfecReceiveStream.
class ReceiveStreamInterface {
public:
// Receive-stream specific RTP settings.
// TODO(tommi): This struct isn't needed at this level anymore. Move it closer
// to where it's used.
struct ReceiveStreamRtpConfig {
// Synchronization source (stream identifier) to be received.
// This member will not change mid-stream and can be assumed to be const
// post initialization.
uint32_t remote_ssrc = 0;
// Sender SSRC used for sending RTCP (such as receiver reports).
// This value may change mid-stream and must be done on the same thread
// that the value is read on (i.e. packet delivery).
uint32_t local_ssrc = 0;
};
protected:
virtual ~ReceiveStreamInterface() {}
};
// Either an audio or video receive stream.
class MediaReceiveStreamInterface : public ReceiveStreamInterface {
public:
// Starts stream activity.
// When a stream is active, it can receive, process and deliver packets.
virtual void Start() = 0;
// Stops stream activity. Must be called to match with a previous call to
// `Start()`. When a stream has been stopped, it won't receive, decode,
// process or deliver packets to downstream objects such as callback pointers
// set in the config struct.
virtual void Stop() = 0;
virtual void SetDepacketizerToDecoderFrameTransformer(
rtc::scoped_refptr<webrtc::FrameTransformerInterface>
frame_transformer) = 0;
virtual void SetFrameDecryptor(
rtc::scoped_refptr<webrtc::FrameDecryptorInterface> frame_decryptor) = 0;
virtual std::vector<RtpSource> GetSources() const = 0;
};
} // namespace webrtc
#endif // CALL_RECEIVE_STREAM_H_

View file

@ -0,0 +1,120 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "call/receive_time_calculator.h"
#include <memory>
#include <string>
#include <type_traits>
#include "rtc_base/experiments/field_trial_parser.h"
#include "rtc_base/numerics/safe_minmax.h"
namespace webrtc {
namespace {
const char kBweReceiveTimeCorrection[] = "WebRTC-Bwe-ReceiveTimeFix";
} // namespace
ReceiveTimeCalculatorConfig::ReceiveTimeCalculatorConfig(
const FieldTrialsView& field_trials)
: max_packet_time_repair("maxrep", TimeDelta::Millis(2000)),
stall_threshold("stall", TimeDelta::Millis(5)),
tolerance("tol", TimeDelta::Millis(1)),
max_stall("maxstall", TimeDelta::Seconds(5)) {
std::string trial_string = field_trials.Lookup(kBweReceiveTimeCorrection);
ParseFieldTrial(
{&max_packet_time_repair, &stall_threshold, &tolerance, &max_stall},
trial_string);
}
ReceiveTimeCalculatorConfig::ReceiveTimeCalculatorConfig(
const ReceiveTimeCalculatorConfig&) = default;
ReceiveTimeCalculatorConfig::~ReceiveTimeCalculatorConfig() = default;
ReceiveTimeCalculator::ReceiveTimeCalculator(
const FieldTrialsView& field_trials)
: config_(field_trials) {}
std::unique_ptr<ReceiveTimeCalculator>
ReceiveTimeCalculator::CreateFromFieldTrial(
const FieldTrialsView& field_trials) {
if (!field_trials.IsEnabled(kBweReceiveTimeCorrection))
return nullptr;
return std::make_unique<ReceiveTimeCalculator>(field_trials);
}
int64_t ReceiveTimeCalculator::ReconcileReceiveTimes(int64_t packet_time_us,
int64_t system_time_us,
int64_t safe_time_us) {
int64_t stall_time_us = system_time_us - packet_time_us;
if (total_system_time_passed_us_ < config_.stall_threshold->us()) {
stall_time_us = rtc::SafeMin(stall_time_us, config_.max_stall->us());
}
int64_t corrected_time_us = safe_time_us - stall_time_us;
if (last_packet_time_us_ == -1 && stall_time_us < 0) {
static_clock_offset_us_ = stall_time_us;
corrected_time_us += static_clock_offset_us_;
} else if (last_packet_time_us_ > 0) {
// All repairs depend on variables being intialized
int64_t packet_time_delta_us = packet_time_us - last_packet_time_us_;
int64_t system_time_delta_us = system_time_us - last_system_time_us_;
int64_t safe_time_delta_us = safe_time_us - last_safe_time_us_;
// Repair backwards clock resets during initial stall. In this case, the
// reset is observed only in packet time but never in system time.
if (system_time_delta_us < 0)
total_system_time_passed_us_ += config_.stall_threshold->us();
else
total_system_time_passed_us_ += system_time_delta_us;
if (packet_time_delta_us < 0 &&
total_system_time_passed_us_ < config_.stall_threshold->us()) {
static_clock_offset_us_ -= packet_time_delta_us;
}
corrected_time_us += static_clock_offset_us_;
// Detect resets inbetween clock readings in socket and app.
bool forward_clock_reset =
corrected_time_us + config_.tolerance->us() < last_corrected_time_us_;
bool obvious_backward_clock_reset = system_time_us < packet_time_us;
// Harder case with backward clock reset during stall, the reset being
// smaller than the stall. Compensate throughout the stall.
bool small_backward_clock_reset =
!obvious_backward_clock_reset &&
safe_time_delta_us > system_time_delta_us + config_.tolerance->us();
bool stall_start =
packet_time_delta_us >= 0 &&
system_time_delta_us > packet_time_delta_us + config_.tolerance->us();
bool stall_is_over = safe_time_delta_us > config_.stall_threshold->us();
bool packet_time_caught_up =
packet_time_delta_us < 0 && system_time_delta_us >= 0;
if (stall_start && small_backward_clock_reset)
small_reset_during_stall_ = true;
else if (stall_is_over || packet_time_caught_up)
small_reset_during_stall_ = false;
// If resets are detected, advance time by (capped) packet time increase.
if (forward_clock_reset || obvious_backward_clock_reset ||
small_reset_during_stall_) {
corrected_time_us = last_corrected_time_us_ +
rtc::SafeClamp(packet_time_delta_us, 0,
config_.max_packet_time_repair->us());
}
}
last_corrected_time_us_ = corrected_time_us;
last_packet_time_us_ = packet_time_us;
last_system_time_us_ = system_time_us;
last_safe_time_us_ = safe_time_us;
return corrected_time_us;
}
} // namespace webrtc

View file

@ -0,0 +1,63 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef CALL_RECEIVE_TIME_CALCULATOR_H_
#define CALL_RECEIVE_TIME_CALCULATOR_H_
#include <stdint.h>
#include <memory>
#include "api/field_trials_view.h"
#include "api/units/time_delta.h"
#include "rtc_base/experiments/field_trial_parser.h"
namespace webrtc {
struct ReceiveTimeCalculatorConfig {
explicit ReceiveTimeCalculatorConfig(const FieldTrialsView& field_trials);
ReceiveTimeCalculatorConfig(const ReceiveTimeCalculatorConfig&);
ReceiveTimeCalculatorConfig& operator=(const ReceiveTimeCalculatorConfig&) =
default;
~ReceiveTimeCalculatorConfig();
FieldTrialParameter<TimeDelta> max_packet_time_repair;
FieldTrialParameter<TimeDelta> stall_threshold;
FieldTrialParameter<TimeDelta> tolerance;
FieldTrialParameter<TimeDelta> max_stall;
};
// The receive time calculator serves the purpose of combining packet time
// stamps with a safely incremental clock. This assumes that the packet time
// stamps are based on lower layer timestamps that have more accurate time
// increments since they are based on the exact receive time. They might
// however, have large jumps due to clock resets in the system. To compensate
// this they are combined with a safe clock source that is guaranteed to be
// consistent, but it will not be able to measure the exact time when a packet
// is received.
class ReceiveTimeCalculator {
public:
static std::unique_ptr<ReceiveTimeCalculator> CreateFromFieldTrial(
const FieldTrialsView& field_trials);
explicit ReceiveTimeCalculator(const FieldTrialsView& field_trials);
int64_t ReconcileReceiveTimes(int64_t packet_time_us_,
int64_t system_time_us_,
int64_t safe_time_us_);
private:
int64_t last_corrected_time_us_ = -1;
int64_t last_packet_time_us_ = -1;
int64_t last_system_time_us_ = -1;
int64_t last_safe_time_us_ = -1;
int64_t total_system_time_passed_us_ = 0;
int64_t static_clock_offset_us_ = 0;
int64_t small_reset_during_stall_ = false;
ReceiveTimeCalculatorConfig config_;
};
} // namespace webrtc
#endif // CALL_RECEIVE_TIME_CALCULATOR_H_

View file

@ -0,0 +1,135 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "call/rtp_bitrate_configurator.h"
#include <algorithm>
#include "rtc_base/checks.h"
namespace {
// Returns its smallest positive argument. If neither argument is positive,
// returns an arbitrary nonpositive value.
int MinPositive(int a, int b) {
if (a <= 0) {
return b;
}
if (b <= 0) {
return a;
}
return std::min(a, b);
}
} // namespace
namespace webrtc {
RtpBitrateConfigurator::RtpBitrateConfigurator(
const BitrateConstraints& bitrate_config)
: bitrate_config_(bitrate_config), base_bitrate_config_(bitrate_config) {
RTC_DCHECK_GE(bitrate_config.min_bitrate_bps, 0);
RTC_DCHECK_GE(bitrate_config.start_bitrate_bps,
bitrate_config.min_bitrate_bps);
if (bitrate_config.max_bitrate_bps != -1) {
RTC_DCHECK_GE(bitrate_config.max_bitrate_bps,
bitrate_config.start_bitrate_bps);
}
}
RtpBitrateConfigurator::~RtpBitrateConfigurator() = default;
BitrateConstraints RtpBitrateConfigurator::GetConfig() const {
return bitrate_config_;
}
absl::optional<BitrateConstraints>
RtpBitrateConfigurator::UpdateWithSdpParameters(
const BitrateConstraints& bitrate_config) {
RTC_DCHECK_GE(bitrate_config.min_bitrate_bps, 0);
RTC_DCHECK_NE(bitrate_config.start_bitrate_bps, 0);
if (bitrate_config.max_bitrate_bps != -1) {
RTC_DCHECK_GT(bitrate_config.max_bitrate_bps, 0);
}
absl::optional<int> new_start;
// Only update the "start" bitrate if it's set, and different from the old
// value. In practice, this value comes from the x-google-start-bitrate codec
// parameter in SDP, and setting the same remote description twice shouldn't
// restart bandwidth estimation.
if (bitrate_config.start_bitrate_bps != -1 &&
bitrate_config.start_bitrate_bps !=
base_bitrate_config_.start_bitrate_bps) {
new_start.emplace(bitrate_config.start_bitrate_bps);
}
base_bitrate_config_ = bitrate_config;
return UpdateConstraints(new_start);
}
absl::optional<BitrateConstraints>
RtpBitrateConfigurator::UpdateWithClientPreferences(
const BitrateSettings& bitrate_mask) {
bitrate_config_mask_ = bitrate_mask;
return UpdateConstraints(bitrate_mask.start_bitrate_bps);
}
// Relay cap can change only max bitrate.
absl::optional<BitrateConstraints> RtpBitrateConfigurator::UpdateWithRelayCap(
DataRate cap) {
if (cap.IsFinite()) {
RTC_DCHECK(!cap.IsZero());
}
max_bitrate_over_relay_ = cap;
return UpdateConstraints(absl::nullopt);
}
absl::optional<BitrateConstraints> RtpBitrateConfigurator::UpdateConstraints(
const absl::optional<int>& new_start) {
BitrateConstraints updated;
updated.min_bitrate_bps =
std::max(bitrate_config_mask_.min_bitrate_bps.value_or(0),
base_bitrate_config_.min_bitrate_bps);
updated.max_bitrate_bps =
MinPositive(bitrate_config_mask_.max_bitrate_bps.value_or(-1),
base_bitrate_config_.max_bitrate_bps);
updated.max_bitrate_bps =
MinPositive(updated.max_bitrate_bps, max_bitrate_over_relay_.bps_or(-1));
// If the combined min ends up greater than the combined max, the max takes
// priority.
if (updated.max_bitrate_bps != -1 &&
updated.min_bitrate_bps > updated.max_bitrate_bps) {
updated.min_bitrate_bps = updated.max_bitrate_bps;
}
// If there is nothing to update (min/max unchanged, no new bandwidth
// estimation start value), return early.
if (updated.min_bitrate_bps == bitrate_config_.min_bitrate_bps &&
updated.max_bitrate_bps == bitrate_config_.max_bitrate_bps &&
!new_start) {
return absl::nullopt;
}
if (new_start) {
// Clamp start by min and max.
updated.start_bitrate_bps = MinPositive(
std::max(*new_start, updated.min_bitrate_bps), updated.max_bitrate_bps);
} else {
updated.start_bitrate_bps = -1;
}
BitrateConstraints config_to_return = updated;
if (!new_start) {
updated.start_bitrate_bps = bitrate_config_.start_bitrate_bps;
}
bitrate_config_ = updated;
return config_to_return;
}
} // namespace webrtc

View file

@ -0,0 +1,77 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef CALL_RTP_BITRATE_CONFIGURATOR_H_
#define CALL_RTP_BITRATE_CONFIGURATOR_H_
#include "absl/types/optional.h"
#include "api/transport/bitrate_settings.h"
#include "api/units/data_rate.h"
namespace webrtc {
// RtpBitrateConfigurator calculates the bitrate configuration based on received
// remote configuration combined with local overrides.
class RtpBitrateConfigurator {
public:
explicit RtpBitrateConfigurator(const BitrateConstraints& bitrate_config);
~RtpBitrateConfigurator();
RtpBitrateConfigurator(const RtpBitrateConfigurator&) = delete;
RtpBitrateConfigurator& operator=(const RtpBitrateConfigurator&) = delete;
BitrateConstraints GetConfig() const;
// The greater min and smaller max set by this and SetClientBitratePreferences
// will be used. The latest non-negative start value from either call will be
// used. Specifying a start bitrate (>0) will reset the current bitrate
// estimate. This is due to how the 'x-google-start-bitrate' flag is currently
// implemented. Passing -1 leaves the start bitrate unchanged. Behavior is not
// guaranteed for other negative values or 0.
// The optional return value is set with new configuration if it was updated.
absl::optional<BitrateConstraints> UpdateWithSdpParameters(
const BitrateConstraints& bitrate_config_);
// The greater min and smaller max set by this and SetSdpBitrateParameters
// will be used. The latest non-negative start value form either call will be
// used. Specifying a start bitrate will reset the current bitrate estimate.
// Assumes 0 <= min <= start <= max holds for set parameters.
// Update the bitrate configuration
// The optional return value is set with new configuration if it was updated.
absl::optional<BitrateConstraints> UpdateWithClientPreferences(
const BitrateSettings& bitrate_mask);
// Apply a cap for relayed calls.
absl::optional<BitrateConstraints> UpdateWithRelayCap(DataRate cap);
private:
// Applies update to the BitrateConstraints cached in `config_`, resetting
// with `new_start` if set.
absl::optional<BitrateConstraints> UpdateConstraints(
const absl::optional<int>& new_start);
// Bitrate config used until valid bitrate estimates are calculated. Also
// used to cap total bitrate used. This comes from the remote connection.
BitrateConstraints bitrate_config_;
// The config mask set by SetClientBitratePreferences.
// 0 <= min <= start <= max
BitrateSettings bitrate_config_mask_;
// The config set by SetSdpBitrateParameters.
// min >= 0, start != 0, max == -1 || max > 0
BitrateConstraints base_bitrate_config_;
// Bandwidth cap applied for relayed calls.
DataRate max_bitrate_over_relay_ = DataRate::PlusInfinity();
};
} // namespace webrtc
#endif // CALL_RTP_BITRATE_CONFIGURATOR_H_

View file

@ -0,0 +1,203 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "call/rtp_config.h"
#include <cstdint>
#include "absl/algorithm/container.h"
#include "api/array_view.h"
#include "rtc_base/checks.h"
#include "rtc_base/strings/string_builder.h"
namespace webrtc {
namespace {
uint32_t FindAssociatedSsrc(uint32_t ssrc,
const std::vector<uint32_t>& ssrcs,
const std::vector<uint32_t>& associated_ssrcs) {
RTC_DCHECK_EQ(ssrcs.size(), associated_ssrcs.size());
for (size_t i = 0; i < ssrcs.size(); ++i) {
if (ssrcs[i] == ssrc)
return associated_ssrcs[i];
}
RTC_DCHECK_NOTREACHED();
return 0;
}
} // namespace
std::string LntfConfig::ToString() const {
return enabled ? "{enabled: true}" : "{enabled: false}";
}
std::string NackConfig::ToString() const {
char buf[1024];
rtc::SimpleStringBuilder ss(buf);
ss << "{rtp_history_ms: " << rtp_history_ms;
ss << '}';
return ss.str();
}
std::string UlpfecConfig::ToString() const {
char buf[1024];
rtc::SimpleStringBuilder ss(buf);
ss << "{ulpfec_payload_type: " << ulpfec_payload_type;
ss << ", red_payload_type: " << red_payload_type;
ss << ", red_rtx_payload_type: " << red_rtx_payload_type;
ss << '}';
return ss.str();
}
bool UlpfecConfig::operator==(const UlpfecConfig& other) const {
return ulpfec_payload_type == other.ulpfec_payload_type &&
red_payload_type == other.red_payload_type &&
red_rtx_payload_type == other.red_rtx_payload_type;
}
RtpConfig::RtpConfig() = default;
RtpConfig::RtpConfig(const RtpConfig&) = default;
RtpConfig::~RtpConfig() = default;
RtpConfig::Flexfec::Flexfec() = default;
RtpConfig::Flexfec::Flexfec(const Flexfec&) = default;
RtpConfig::Flexfec::~Flexfec() = default;
std::string RtpConfig::ToString() const {
char buf[2 * 1024];
rtc::SimpleStringBuilder ss(buf);
ss << "{ssrcs: [";
for (size_t i = 0; i < ssrcs.size(); ++i) {
ss << ssrcs[i];
if (i != ssrcs.size() - 1)
ss << ", ";
}
ss << "], rids: [";
for (size_t i = 0; i < rids.size(); ++i) {
ss << rids[i];
if (i != rids.size() - 1)
ss << ", ";
}
ss << "], mid: '" << mid << "'";
ss << ", rtcp_mode: "
<< (rtcp_mode == RtcpMode::kCompound ? "RtcpMode::kCompound"
: "RtcpMode::kReducedSize");
ss << ", max_packet_size: " << max_packet_size;
ss << ", extmap-allow-mixed: " << (extmap_allow_mixed ? "true" : "false");
ss << ", extensions: [";
for (size_t i = 0; i < extensions.size(); ++i) {
ss << extensions[i].ToString();
if (i != extensions.size() - 1)
ss << ", ";
}
ss << ']';
ss << ", lntf: " << lntf.ToString();
ss << ", nack: {rtp_history_ms: " << nack.rtp_history_ms << '}';
ss << ", ulpfec: " << ulpfec.ToString();
ss << ", payload_name: " << payload_name;
ss << ", payload_type: " << payload_type;
ss << ", raw_payload: " << (raw_payload ? "true" : "false");
ss << ", flexfec: {payload_type: " << flexfec.payload_type;
ss << ", ssrc: " << flexfec.ssrc;
ss << ", protected_media_ssrcs: [";
for (size_t i = 0; i < flexfec.protected_media_ssrcs.size(); ++i) {
ss << flexfec.protected_media_ssrcs[i];
if (i != flexfec.protected_media_ssrcs.size() - 1)
ss << ", ";
}
ss << "]}";
ss << ", rtx: " << rtx.ToString();
ss << ", c_name: " << c_name;
ss << '}';
return ss.str();
}
RtpConfig::Rtx::Rtx() = default;
RtpConfig::Rtx::Rtx(const Rtx&) = default;
RtpConfig::Rtx::~Rtx() = default;
std::string RtpConfig::Rtx::ToString() const {
char buf[1024];
rtc::SimpleStringBuilder ss(buf);
ss << "{ssrcs: [";
for (size_t i = 0; i < ssrcs.size(); ++i) {
ss << ssrcs[i];
if (i != ssrcs.size() - 1)
ss << ", ";
}
ss << ']';
ss << ", payload_type: " << payload_type;
ss << '}';
return ss.str();
}
bool RtpConfig::IsMediaSsrc(uint32_t ssrc) const {
return absl::c_linear_search(ssrcs, ssrc);
}
bool RtpConfig::IsRtxSsrc(uint32_t ssrc) const {
return absl::c_linear_search(rtx.ssrcs, ssrc);
}
bool RtpConfig::IsFlexfecSsrc(uint32_t ssrc) const {
return flexfec.payload_type != -1 && ssrc == flexfec.ssrc;
}
absl::optional<uint32_t> RtpConfig::GetRtxSsrcAssociatedWithMediaSsrc(
uint32_t media_ssrc) const {
RTC_DCHECK(IsMediaSsrc(media_ssrc));
// If we don't use RTX there is no association.
if (rtx.ssrcs.empty())
return absl::nullopt;
// If we use RTX there MUST be an association ssrcs[i] <-> rtx.ssrcs[i].
RTC_DCHECK_EQ(ssrcs.size(), rtx.ssrcs.size());
return FindAssociatedSsrc(media_ssrc, ssrcs, rtx.ssrcs);
}
uint32_t RtpConfig::GetMediaSsrcAssociatedWithRtxSsrc(uint32_t rtx_ssrc) const {
RTC_DCHECK(IsRtxSsrc(rtx_ssrc));
// If we use RTX there MUST be an association ssrcs[i] <-> rtx.ssrcs[i].
RTC_DCHECK_EQ(ssrcs.size(), rtx.ssrcs.size());
return FindAssociatedSsrc(rtx_ssrc, rtx.ssrcs, ssrcs);
}
uint32_t RtpConfig::GetMediaSsrcAssociatedWithFlexfecSsrc(
uint32_t flexfec_ssrc) const {
RTC_DCHECK(IsFlexfecSsrc(flexfec_ssrc));
// If we use FlexFEC there MUST be an associated media ssrc.
//
// TODO(brandtr/hbos): The current implementation only supports an association
// with a single media ssrc. If multiple ssrcs are to be supported in the
// future, in order not to break GetStats()'s packet and byte counters, we
// must be able to tell how many packets and bytes have contributed to which
// SSRC.
RTC_DCHECK_EQ(1u, flexfec.protected_media_ssrcs.size());
uint32_t media_ssrc = flexfec.protected_media_ssrcs[0];
RTC_DCHECK(IsMediaSsrc(media_ssrc));
return media_ssrc;
}
absl::optional<std::string> RtpConfig::GetRidForSsrc(uint32_t ssrc) const {
auto it = std::find(ssrcs.begin(), ssrcs.end(), ssrc);
if (it != ssrcs.end()) {
size_t ssrc_index = std::distance(ssrcs.begin(), it);
if (ssrc_index < rids.size()) {
return rids[ssrc_index];
}
}
return absl::nullopt;
}
} // namespace webrtc

View file

@ -0,0 +1,175 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef CALL_RTP_CONFIG_H_
#define CALL_RTP_CONFIG_H_
#include <stddef.h>
#include <stdint.h>
#include <string>
#include <vector>
#include "absl/types/optional.h"
#include "api/rtp_headers.h"
#include "api/rtp_parameters.h"
namespace webrtc {
// Currently only VP8/VP9 specific.
struct RtpPayloadState {
int16_t picture_id = -1;
uint8_t tl0_pic_idx = 0;
int64_t shared_frame_id = 0;
};
// Settings for LNTF (LossNotification). Still highly experimental.
struct LntfConfig {
std::string ToString() const;
bool enabled{false};
};
// Settings for NACK, see RFC 4585 for details.
struct NackConfig {
NackConfig() : rtp_history_ms(0) {}
std::string ToString() const;
// Send side: the time RTP packets are stored for retransmissions.
// Receive side: the time the receiver is prepared to wait for
// retransmissions.
// Set to '0' to disable.
int rtp_history_ms;
};
// Settings for ULPFEC forward error correction.
// Set the payload types to '-1' to disable.
struct UlpfecConfig {
UlpfecConfig()
: ulpfec_payload_type(-1),
red_payload_type(-1),
red_rtx_payload_type(-1) {}
std::string ToString() const;
bool operator==(const UlpfecConfig& other) const;
// Payload type used for ULPFEC packets.
int ulpfec_payload_type;
// Payload type used for RED packets.
int red_payload_type;
// RTX payload type for RED payload.
int red_rtx_payload_type;
};
static const size_t kDefaultMaxPacketSize = 1500 - 40; // TCP over IPv4.
struct RtpConfig {
RtpConfig();
RtpConfig(const RtpConfig&);
~RtpConfig();
std::string ToString() const;
std::vector<uint32_t> ssrcs;
// The Rtp Stream Ids (aka RIDs) to send in the RID RTP header extension
// if the extension is included in the list of extensions.
// If rids are specified, they should correspond to the `ssrcs` vector.
// This means that:
// 1. rids.size() == 0 || rids.size() == ssrcs.size().
// 2. If rids is not empty, then `rids[i]` should use `ssrcs[i]`.
std::vector<std::string> rids;
// The value to send in the MID RTP header extension if the extension is
// included in the list of extensions.
std::string mid;
// See RtcpMode for description.
RtcpMode rtcp_mode = RtcpMode::kCompound;
// Max RTP packet size delivered to send transport from VideoEngine.
size_t max_packet_size = kDefaultMaxPacketSize;
// Corresponds to the SDP attribute extmap-allow-mixed.
bool extmap_allow_mixed = false;
// RTP header extensions to use for this send stream.
std::vector<RtpExtension> extensions;
// TODO(nisse): For now, these are fixed, but we'd like to support
// changing codec without recreating the VideoSendStream. Then these
// fields must be removed, and association between payload type and codec
// must move above the per-stream level. Ownership could be with
// RtpTransportControllerSend, with a reference from RtpVideoSender, where
// the latter would be responsible for mapping the codec type of encoded
// images to the right payload type.
std::string payload_name;
int payload_type = -1;
// Payload should be packetized using raw packetizer (payload header will
// not be added, additional meta data is expected to be present in generic
// frame descriptor RTP header extension).
bool raw_payload = false;
// See LntfConfig for description.
LntfConfig lntf;
// See NackConfig for description.
NackConfig nack;
// See UlpfecConfig for description.
UlpfecConfig ulpfec;
struct Flexfec {
Flexfec();
Flexfec(const Flexfec&);
~Flexfec();
// Payload type of FlexFEC. Set to -1 to disable sending FlexFEC.
int payload_type = -1;
// SSRC of FlexFEC stream.
uint32_t ssrc = 0;
// Vector containing a single element, corresponding to the SSRC of the
// media stream being protected by this FlexFEC stream.
// The vector MUST have size 1.
//
// TODO(brandtr): Update comment above when we support
// multistream protection.
std::vector<uint32_t> protected_media_ssrcs;
} flexfec;
// Settings for RTP retransmission payload format, see RFC 4588 for
// details.
struct Rtx {
Rtx();
Rtx(const Rtx&);
~Rtx();
std::string ToString() const;
// SSRCs to use for the RTX streams.
std::vector<uint32_t> ssrcs;
// Payload type to use for the RTX stream.
int payload_type = -1;
} rtx;
// RTCP CNAME, see RFC 3550.
std::string c_name;
// Enables send packet batching from the egress RTP sender.
bool enable_send_packet_batching = false;
bool IsMediaSsrc(uint32_t ssrc) const;
bool IsRtxSsrc(uint32_t ssrc) const;
bool IsFlexfecSsrc(uint32_t ssrc) const;
absl::optional<uint32_t> GetRtxSsrcAssociatedWithMediaSsrc(
uint32_t media_ssrc) const;
uint32_t GetMediaSsrcAssociatedWithRtxSsrc(uint32_t rtx_ssrc) const;
uint32_t GetMediaSsrcAssociatedWithFlexfecSsrc(uint32_t flexfec_ssrc) const;
absl::optional<std::string> GetRidForSsrc(uint32_t ssrc) const;
};
} // namespace webrtc
#endif // CALL_RTP_CONFIG_H_

View file

@ -0,0 +1,452 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "call/rtp_demuxer.h"
#include "absl/strings/string_view.h"
#include "call/rtp_packet_sink_interface.h"
#include "modules/rtp_rtcp/source/rtp_header_extensions.h"
#include "modules/rtp_rtcp/source/rtp_packet_received.h"
#include "rtc_base/checks.h"
#include "rtc_base/logging.h"
#include "rtc_base/strings/string_builder.h"
namespace webrtc {
namespace {
template <typename Container, typename Value>
size_t RemoveFromMultimapByValue(Container* multimap, const Value& value) {
size_t count = 0;
for (auto it = multimap->begin(); it != multimap->end();) {
if (it->second == value) {
it = multimap->erase(it);
++count;
} else {
++it;
}
}
return count;
}
template <typename Map, typename Value>
size_t RemoveFromMapByValue(Map* map, const Value& value) {
return EraseIf(*map, [&](const auto& elem) { return elem.second == value; });
}
// Temp fix: MID in SDP is allowed to be slightly longer than what's allowed
// in the RTP demuxer. Truncate if needed; this won't match, but it only
// makes sense in places that wouldn't use this for matching anyway.
// TODO(bugs.webrtc.org/12517): remove when length 16 is policed by parser.
std::string CheckMidLength(absl::string_view mid) {
std::string new_mid(mid);
if (new_mid.length() > BaseRtpStringExtension::kMaxValueSizeBytes) {
RTC_LOG(LS_WARNING) << "`mid` attribute too long. Truncating.";
new_mid.resize(BaseRtpStringExtension::kMaxValueSizeBytes);
}
return new_mid;
}
} // namespace
RtpDemuxerCriteria::RtpDemuxerCriteria(
absl::string_view mid,
absl::string_view rsid /*= absl::string_view()*/)
: mid_(CheckMidLength(mid)), rsid_(rsid) {}
RtpDemuxerCriteria::RtpDemuxerCriteria() = default;
RtpDemuxerCriteria::~RtpDemuxerCriteria() = default;
bool RtpDemuxerCriteria::operator==(const RtpDemuxerCriteria& other) const {
return mid_ == other.mid_ && rsid_ == other.rsid_ && ssrcs_ == other.ssrcs_ &&
payload_types_ == other.payload_types_;
}
bool RtpDemuxerCriteria::operator!=(const RtpDemuxerCriteria& other) const {
return !(*this == other);
}
std::string RtpDemuxerCriteria::ToString() const {
rtc::StringBuilder sb;
sb << "{mid: " << (mid_.empty() ? "<empty>" : mid_)
<< ", rsid: " << (rsid_.empty() ? "<empty>" : rsid_) << ", ssrcs: [";
for (auto ssrc : ssrcs_) {
sb << ssrc << ", ";
}
sb << "], payload_types = [";
for (auto pt : payload_types_) {
sb << pt << ", ";
}
sb << "]}";
return sb.Release();
}
// static
std::string RtpDemuxer::DescribePacket(const RtpPacketReceived& packet) {
rtc::StringBuilder sb;
sb << "PT=" << packet.PayloadType() << " SSRC=" << packet.Ssrc();
std::string mid;
if (packet.GetExtension<RtpMid>(&mid)) {
sb << " MID=" << mid;
}
std::string rsid;
if (packet.GetExtension<RtpStreamId>(&rsid)) {
sb << " RSID=" << rsid;
}
std::string rrsid;
if (packet.GetExtension<RepairedRtpStreamId>(&rrsid)) {
sb << " RRSID=" << rrsid;
}
return sb.Release();
}
RtpDemuxer::RtpDemuxer(bool use_mid /* = true*/) : use_mid_(use_mid) {}
RtpDemuxer::~RtpDemuxer() {
RTC_DCHECK(sink_by_mid_.empty());
RTC_DCHECK(sink_by_ssrc_.empty());
RTC_DCHECK(sinks_by_pt_.empty());
RTC_DCHECK(sink_by_mid_and_rsid_.empty());
RTC_DCHECK(sink_by_rsid_.empty());
}
bool RtpDemuxer::AddSink(const RtpDemuxerCriteria& criteria,
RtpPacketSinkInterface* sink) {
RTC_DCHECK(!criteria.payload_types().empty() || !criteria.ssrcs().empty() ||
!criteria.mid().empty() || !criteria.rsid().empty());
RTC_DCHECK(criteria.mid().empty() || IsLegalMidName(criteria.mid()));
RTC_DCHECK(criteria.rsid().empty() || IsLegalRsidName(criteria.rsid()));
RTC_DCHECK(sink);
// We return false instead of DCHECKing for logical conflicts with the new
// criteria because new sinks are created according to user-specified SDP and
// we do not want to crash due to a data validation error.
if (CriteriaWouldConflict(criteria)) {
RTC_LOG(LS_ERROR) << "Unable to add sink=" << sink
<< " due to conflicting criteria " << criteria.ToString();
return false;
}
if (!criteria.mid().empty()) {
if (criteria.rsid().empty()) {
sink_by_mid_.emplace(criteria.mid(), sink);
} else {
sink_by_mid_and_rsid_.emplace(
std::make_pair(criteria.mid(), criteria.rsid()), sink);
}
} else {
if (!criteria.rsid().empty()) {
sink_by_rsid_.emplace(criteria.rsid(), sink);
}
}
for (uint32_t ssrc : criteria.ssrcs()) {
sink_by_ssrc_.emplace(ssrc, sink);
}
for (uint8_t payload_type : criteria.payload_types()) {
sinks_by_pt_.emplace(payload_type, sink);
}
RefreshKnownMids();
RTC_DLOG(LS_INFO) << "Added sink = " << sink << " for criteria "
<< criteria.ToString();
return true;
}
bool RtpDemuxer::CriteriaWouldConflict(
const RtpDemuxerCriteria& criteria) const {
if (!criteria.mid().empty()) {
if (criteria.rsid().empty()) {
// If the MID is in the known_mids_ set, then there is already a sink
// added for this MID directly, or there is a sink already added with a
// MID, RSID pair for our MID and some RSID.
// Adding this criteria would cause one of these rules to be shadowed, so
// reject this new criteria.
if (known_mids_.find(criteria.mid()) != known_mids_.end()) {
RTC_LOG(LS_INFO) << criteria.ToString()
<< " would conflict with known mid";
return true;
}
} else {
// If the exact rule already exists, then reject this duplicate.
const auto sink_by_mid_and_rsid = sink_by_mid_and_rsid_.find(
std::make_pair(criteria.mid(), criteria.rsid()));
if (sink_by_mid_and_rsid != sink_by_mid_and_rsid_.end()) {
RTC_LOG(LS_INFO) << criteria.ToString()
<< " would conflict with existing sink = "
<< sink_by_mid_and_rsid->second
<< " by mid+rsid binding";
return true;
}
// If there is already a sink registered for the bare MID, then this
// criteria will never receive any packets because they will just be
// directed to that MID sink, so reject this new criteria.
const auto sink_by_mid = sink_by_mid_.find(criteria.mid());
if (sink_by_mid != sink_by_mid_.end()) {
RTC_LOG(LS_INFO) << criteria.ToString()
<< " would conflict with existing sink = "
<< sink_by_mid->second << " by mid binding";
return true;
}
}
}
for (uint32_t ssrc : criteria.ssrcs()) {
const auto sink_by_ssrc = sink_by_ssrc_.find(ssrc);
if (sink_by_ssrc != sink_by_ssrc_.end()) {
RTC_LOG(LS_INFO) << criteria.ToString()
<< " would conflict with existing sink = "
<< sink_by_ssrc->second << " binding by SSRC=" << ssrc;
return true;
}
}
// TODO(steveanton): May also sanity check payload types.
return false;
}
void RtpDemuxer::RefreshKnownMids() {
known_mids_.clear();
for (auto const& item : sink_by_mid_) {
const std::string& mid = item.first;
known_mids_.insert(mid);
}
for (auto const& item : sink_by_mid_and_rsid_) {
const std::string& mid = item.first.first;
known_mids_.insert(mid);
}
}
bool RtpDemuxer::AddSink(uint32_t ssrc, RtpPacketSinkInterface* sink) {
RtpDemuxerCriteria criteria;
criteria.ssrcs().insert(ssrc);
return AddSink(criteria, sink);
}
void RtpDemuxer::AddSink(absl::string_view rsid, RtpPacketSinkInterface* sink) {
RtpDemuxerCriteria criteria(absl::string_view() /* mid */, rsid);
AddSink(criteria, sink);
}
bool RtpDemuxer::RemoveSink(const RtpPacketSinkInterface* sink) {
RTC_DCHECK(sink);
size_t num_removed = RemoveFromMapByValue(&sink_by_mid_, sink) +
RemoveFromMapByValue(&sink_by_ssrc_, sink) +
RemoveFromMultimapByValue(&sinks_by_pt_, sink) +
RemoveFromMapByValue(&sink_by_mid_and_rsid_, sink) +
RemoveFromMapByValue(&sink_by_rsid_, sink);
RefreshKnownMids();
return num_removed > 0;
}
flat_set<uint32_t> RtpDemuxer::GetSsrcsForSink(
const RtpPacketSinkInterface* sink) const {
flat_set<uint32_t> ssrcs;
if (sink) {
for (const auto& it : sink_by_ssrc_) {
if (it.second == sink) {
ssrcs.insert(it.first);
}
}
}
return ssrcs;
}
bool RtpDemuxer::OnRtpPacket(const RtpPacketReceived& packet) {
RtpPacketSinkInterface* sink = ResolveSink(packet);
if (sink != nullptr) {
sink->OnRtpPacket(packet);
return true;
}
return false;
}
RtpPacketSinkInterface* RtpDemuxer::ResolveSink(
const RtpPacketReceived& packet) {
// See the BUNDLE spec for high level reference to this algorithm:
// https://tools.ietf.org/html/draft-ietf-mmusic-sdp-bundle-negotiation-38#section-10.2
// RSID and RRID are routed to the same sinks. If an RSID is specified on a
// repair packet, it should be ignored and the RRID should be used.
std::string packet_mid, packet_rsid;
bool has_mid = use_mid_ && packet.GetExtension<RtpMid>(&packet_mid);
bool has_rsid = packet.GetExtension<RepairedRtpStreamId>(&packet_rsid);
if (!has_rsid) {
has_rsid = packet.GetExtension<RtpStreamId>(&packet_rsid);
}
uint32_t ssrc = packet.Ssrc();
// The BUNDLE spec says to drop any packets with unknown MIDs, even if the
// SSRC is known/latched.
if (has_mid && known_mids_.find(packet_mid) == known_mids_.end()) {
return nullptr;
}
// Cache information we learn about SSRCs and IDs. We need to do this even if
// there isn't a rule/sink yet because we might add an MID/RSID rule after
// learning an MID/RSID<->SSRC association.
std::string* mid = nullptr;
if (has_mid) {
mid_by_ssrc_[ssrc] = packet_mid;
mid = &packet_mid;
} else {
// If the packet does not include a MID header extension, check if there is
// a latched MID for the SSRC.
const auto it = mid_by_ssrc_.find(ssrc);
if (it != mid_by_ssrc_.end()) {
mid = &it->second;
}
}
std::string* rsid = nullptr;
if (has_rsid) {
rsid_by_ssrc_[ssrc] = packet_rsid;
rsid = &packet_rsid;
} else {
// If the packet does not include an RRID/RSID header extension, check if
// there is a latched RSID for the SSRC.
const auto it = rsid_by_ssrc_.find(ssrc);
if (it != rsid_by_ssrc_.end()) {
rsid = &it->second;
}
}
// If MID and/or RSID is specified, prioritize that for demuxing the packet.
// The motivation behind the BUNDLE algorithm is that we trust these are used
// deliberately by senders and are more likely to be correct than SSRC/payload
// type which are included with every packet.
// TODO(steveanton): According to the BUNDLE spec, new SSRC mappings are only
// accepted if the packet's extended sequence number is
// greater than that of the last SSRC mapping update.
// https://tools.ietf.org/html/rfc7941#section-4.2.6
if (mid != nullptr) {
RtpPacketSinkInterface* sink_by_mid = ResolveSinkByMid(*mid, ssrc);
if (sink_by_mid != nullptr) {
return sink_by_mid;
}
// RSID is scoped to a given MID if both are included.
if (rsid != nullptr) {
RtpPacketSinkInterface* sink_by_mid_rsid =
ResolveSinkByMidRsid(*mid, *rsid, ssrc);
if (sink_by_mid_rsid != nullptr) {
return sink_by_mid_rsid;
}
}
// At this point, there is at least one sink added for this MID and an RSID
// but either the packet does not have an RSID or it is for a different
// RSID. This falls outside the BUNDLE spec so drop the packet.
return nullptr;
}
// RSID can be used without MID as long as they are unique.
if (rsid != nullptr) {
RtpPacketSinkInterface* sink_by_rsid = ResolveSinkByRsid(*rsid, ssrc);
if (sink_by_rsid != nullptr) {
return sink_by_rsid;
}
}
// We trust signaled SSRC more than payload type which is likely to conflict
// between streams.
const auto ssrc_sink_it = sink_by_ssrc_.find(ssrc);
if (ssrc_sink_it != sink_by_ssrc_.end()) {
return ssrc_sink_it->second;
}
// Legacy senders will only signal payload type, support that as last resort.
return ResolveSinkByPayloadType(packet.PayloadType(), ssrc);
}
RtpPacketSinkInterface* RtpDemuxer::ResolveSinkByMid(absl::string_view mid,
uint32_t ssrc) {
const auto it = sink_by_mid_.find(mid);
if (it != sink_by_mid_.end()) {
RtpPacketSinkInterface* sink = it->second;
AddSsrcSinkBinding(ssrc, sink);
return sink;
}
return nullptr;
}
RtpPacketSinkInterface* RtpDemuxer::ResolveSinkByMidRsid(absl::string_view mid,
absl::string_view rsid,
uint32_t ssrc) {
const auto it = sink_by_mid_and_rsid_.find(
std::make_pair(std::string(mid), std::string(rsid)));
if (it != sink_by_mid_and_rsid_.end()) {
RtpPacketSinkInterface* sink = it->second;
AddSsrcSinkBinding(ssrc, sink);
return sink;
}
return nullptr;
}
RtpPacketSinkInterface* RtpDemuxer::ResolveSinkByRsid(absl::string_view rsid,
uint32_t ssrc) {
const auto it = sink_by_rsid_.find(rsid);
if (it != sink_by_rsid_.end()) {
RtpPacketSinkInterface* sink = it->second;
AddSsrcSinkBinding(ssrc, sink);
return sink;
}
return nullptr;
}
RtpPacketSinkInterface* RtpDemuxer::ResolveSinkByPayloadType(
uint8_t payload_type,
uint32_t ssrc) {
const auto range = sinks_by_pt_.equal_range(payload_type);
if (range.first != range.second) {
auto it = range.first;
const auto end = range.second;
if (std::next(it) == end) {
RtpPacketSinkInterface* sink = it->second;
AddSsrcSinkBinding(ssrc, sink);
return sink;
}
}
return nullptr;
}
void RtpDemuxer::AddSsrcSinkBinding(uint32_t ssrc,
RtpPacketSinkInterface* sink) {
if (sink_by_ssrc_.size() >= kMaxSsrcBindings) {
RTC_LOG(LS_WARNING) << "New SSRC=" << ssrc
<< " sink binding ignored; limit of" << kMaxSsrcBindings
<< " bindings has been reached.";
return;
}
auto result = sink_by_ssrc_.emplace(ssrc, sink);
auto it = result.first;
bool inserted = result.second;
if (inserted) {
RTC_DLOG(LS_INFO) << "Added sink = " << sink
<< " binding with SSRC=" << ssrc;
} else if (it->second != sink) {
RTC_DLOG(LS_INFO) << "Updated sink = " << sink
<< " binding with SSRC=" << ssrc;
it->second = sink;
}
}
} // namespace webrtc

View file

@ -0,0 +1,221 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef CALL_RTP_DEMUXER_H_
#define CALL_RTP_DEMUXER_H_
#include <map>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/string_view.h"
#include "rtc_base/containers/flat_map.h"
#include "rtc_base/containers/flat_set.h"
namespace webrtc {
class RtpPacketReceived;
class RtpPacketSinkInterface;
// This struct describes the criteria that will be used to match packets to a
// specific sink.
class RtpDemuxerCriteria {
public:
explicit RtpDemuxerCriteria(absl::string_view mid,
absl::string_view rsid = absl::string_view());
RtpDemuxerCriteria();
~RtpDemuxerCriteria();
bool operator==(const RtpDemuxerCriteria& other) const;
bool operator!=(const RtpDemuxerCriteria& other) const;
// If not the empty string, will match packets with this MID.
const std::string& mid() const { return mid_; }
// Return string representation of demux criteria to facilitate logging
std::string ToString() const;
// If not the empty string, will match packets with this as their RTP stream
// ID or repaired RTP stream ID.
// Note that if both MID and RSID are specified, this will only match packets
// that have both specified (either through RTP header extensions, SSRC
// latching or RTCP).
const std::string& rsid() const { return rsid_; }
// The criteria will match packets with any of these SSRCs.
const flat_set<uint32_t>& ssrcs() const { return ssrcs_; }
// Writable accessor for directly modifying the list of ssrcs.
flat_set<uint32_t>& ssrcs() { return ssrcs_; }
// The criteria will match packets with any of these payload types.
const flat_set<uint8_t>& payload_types() const { return payload_types_; }
// Writable accessor for directly modifying the list of payload types.
flat_set<uint8_t>& payload_types() { return payload_types_; }
private:
// Intentionally private member variables to encourage specifying them via the
// constructor and consider them to be const as much as possible.
const std::string mid_;
const std::string rsid_;
flat_set<uint32_t> ssrcs_;
flat_set<uint8_t> payload_types_;
};
// This class represents the RTP demuxing, for a single RTP session (i.e., one
// SSRC space, see RFC 7656). It isn't thread aware, leaving responsibility of
// multithreading issues to the user of this class.
// The demuxing algorithm follows the sketch given in the BUNDLE draft:
// https://tools.ietf.org/html/draft-ietf-mmusic-sdp-bundle-negotiation-38#section-10.2
// with modifications to support RTP stream IDs also.
//
// When a packet is received, the RtpDemuxer will route according to the
// following rules:
// 1. If the packet contains the MID header extension, and no sink has been
// added with that MID as a criteria, the packet is not routed.
// 2. If the packet has the MID header extension, but no RSID or RRID extension,
// and the MID is bound to a sink, then bind its SSRC to the same sink and
// forward the packet to that sink. Note that rebinding to the same sink is
// not an error. (Later packets with that SSRC would therefore be forwarded
// to the same sink, whether they have the MID header extension or not.)
// 3. If the packet has the MID header extension and either the RSID or RRID
// extension, and the MID, RSID (or RRID) pair is bound to a sink, then bind
// its SSRC to the same sink and forward the packet to that sink. Later
// packets with that SSRC will be forwarded to the same sink.
// 4. If the packet has the RSID or RRID header extension, but no MID extension,
// and the RSID or RRID is bound to an RSID sink, then bind its SSRC to the
// same sink and forward the packet to that sink. Later packets with that
// SSRC will be forwarded to the same sink.
// 5. If the packet's SSRC is bound to an SSRC through a previous call to
// AddSink, then forward the packet to that sink. Note that the RtpDemuxer
// will not verify the payload type even if included in the sink's criteria.
// The sink is expected to do the check in its handler.
// 6. If the packet's payload type is bound to exactly one payload type sink
// through an earlier call to AddSink, then forward the packet to that sink.
// 7. Otherwise, the packet is not routed.
//
// In summary, the routing algorithm will always try to first match MID and RSID
// (including through SSRC binding), match SSRC directly as needed, and use
// payload types only if all else fails.
class RtpDemuxer {
public:
// Maximum number of unique SSRC bindings allowed. This limit is to prevent
// memory overuse attacks due to a malicious peer sending many packets with
// different SSRCs.
static constexpr int kMaxSsrcBindings = 1000;
// Returns a string that contains all the attributes of the given packet
// relevant for demuxing.
static std::string DescribePacket(const RtpPacketReceived& packet);
explicit RtpDemuxer(bool use_mid = true);
~RtpDemuxer();
RtpDemuxer(const RtpDemuxer&) = delete;
void operator=(const RtpDemuxer&) = delete;
// Registers a sink that will be notified when RTP packets match its given
// criteria according to the algorithm described in the class description.
// Returns true if the sink was successfully added.
// Returns false in the following situations:
// - Only MID is specified and the MID is already registered.
// - Only RSID is specified and the RSID is already registered.
// - Both MID and RSID is specified and the (MID, RSID) pair is already
// registered.
// - Any of the criteria SSRCs are already registered.
// If false is returned, no changes are made to the demuxer state.
bool AddSink(const RtpDemuxerCriteria& criteria,
RtpPacketSinkInterface* sink);
// Registers a sink. Multiple SSRCs may be mapped to the same sink, but
// each SSRC may only be mapped to one sink. The return value reports
// whether the association has been recorded or rejected. Rejection may occur
// if the SSRC has already been associated with a sink. The previously added
// sink is *not* forgotten.
bool AddSink(uint32_t ssrc, RtpPacketSinkInterface* sink);
// Registers a sink's association to an RSID. Only one sink may be associated
// with a given RSID. Null pointer is not allowed.
void AddSink(absl::string_view rsid, RtpPacketSinkInterface* sink);
// Removes a sink. Return value reports if anything was actually removed.
// Null pointer is not allowed.
bool RemoveSink(const RtpPacketSinkInterface* sink);
// Returns the set of SSRCs associated with a sink.
flat_set<uint32_t> GetSsrcsForSink(const RtpPacketSinkInterface* sink) const;
// Demuxes the given packet and forwards it to the chosen sink. Returns true
// if the packet was forwarded and false if the packet was dropped.
bool OnRtpPacket(const RtpPacketReceived& packet);
private:
// Returns true if adding a sink with the given criteria would cause conflicts
// with the existing criteria and should be rejected.
bool CriteriaWouldConflict(const RtpDemuxerCriteria& criteria) const;
// Runs the demux algorithm on the given packet and returns the sink that
// should receive the packet.
// Will record any SSRC<->ID associations along the way.
// If the packet should be dropped, this method returns null.
RtpPacketSinkInterface* ResolveSink(const RtpPacketReceived& packet);
// Used by the ResolveSink algorithm.
RtpPacketSinkInterface* ResolveSinkByMid(absl::string_view mid,
uint32_t ssrc);
RtpPacketSinkInterface* ResolveSinkByMidRsid(absl::string_view mid,
absl::string_view rsid,
uint32_t ssrc);
RtpPacketSinkInterface* ResolveSinkByRsid(absl::string_view rsid,
uint32_t ssrc);
RtpPacketSinkInterface* ResolveSinkByPayloadType(uint8_t payload_type,
uint32_t ssrc);
// Regenerate the known_mids_ set from information in the sink_by_mid_ and
// sink_by_mid_and_rsid_ maps.
void RefreshKnownMids();
// Map each sink by its component attributes to facilitate quick lookups.
// Payload Type mapping is a multimap because if two sinks register for the
// same payload type, both AddSinks succeed but we must know not to demux on
// that attribute since it is ambiguous.
// Note: Mappings are only modified by AddSink/RemoveSink (except for
// SSRC mapping which receives all MID, payload type, or RSID to SSRC bindings
// discovered when demuxing packets).
flat_map<std::string, RtpPacketSinkInterface*> sink_by_mid_;
flat_map<uint32_t, RtpPacketSinkInterface*> sink_by_ssrc_;
std::multimap<uint8_t, RtpPacketSinkInterface*> sinks_by_pt_;
flat_map<std::pair<std::string, std::string>, RtpPacketSinkInterface*>
sink_by_mid_and_rsid_;
flat_map<std::string, RtpPacketSinkInterface*> sink_by_rsid_;
// Tracks all the MIDs that have been identified in added criteria. Used to
// determine if a packet should be dropped right away because the MID is
// unknown.
flat_set<std::string> known_mids_;
// Records learned mappings of MID --> SSRC and RSID --> SSRC as packets are
// received.
// This is stored separately from the sink mappings because if a sink is
// removed we want to still remember these associations.
flat_map<uint32_t, std::string> mid_by_ssrc_;
flat_map<uint32_t, std::string> rsid_by_ssrc_;
// Adds a binding from the SSRC to the given sink.
void AddSsrcSinkBinding(uint32_t ssrc, RtpPacketSinkInterface* sink);
const bool use_mid_;
};
} // namespace webrtc
#endif // CALL_RTP_DEMUXER_H_

View file

@ -0,0 +1,26 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef CALL_RTP_PACKET_SINK_INTERFACE_H_
#define CALL_RTP_PACKET_SINK_INTERFACE_H_
namespace webrtc {
class RtpPacketReceived;
// This class represents a receiver of already parsed RTP packets.
class RtpPacketSinkInterface {
public:
virtual ~RtpPacketSinkInterface() = default;
virtual void OnRtpPacket(const RtpPacketReceived& packet) = 0;
};
} // namespace webrtc
#endif // CALL_RTP_PACKET_SINK_INTERFACE_H_

View file

@ -0,0 +1,795 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "call/rtp_payload_params.h"
#include <stddef.h>
#include <algorithm>
#include "absl/container/inlined_vector.h"
#include "absl/strings/match.h"
#include "absl/types/variant.h"
#include "api/video/video_timing.h"
#include "modules/video_coding/codecs/h264/include/h264_globals.h"
#include "modules/video_coding/codecs/interface/common_constants.h"
#include "modules/video_coding/codecs/vp8/include/vp8_globals.h"
#include "modules/video_coding/codecs/vp9/include/vp9_globals.h"
#include "modules/video_coding/frame_dependencies_calculator.h"
#include "rtc_base/arraysize.h"
#include "rtc_base/checks.h"
#include "rtc_base/logging.h"
#include "rtc_base/random.h"
#include "rtc_base/time_utils.h"
namespace webrtc {
namespace {
constexpr int kMaxSimulatedSpatialLayers = 3;
void PopulateRtpWithCodecSpecifics(const CodecSpecificInfo& info,
absl::optional<int> spatial_index,
RTPVideoHeader* rtp) {
rtp->codec = info.codecType;
rtp->is_last_frame_in_picture = info.end_of_picture;
switch (info.codecType) {
case kVideoCodecVP8: {
auto& vp8_header = rtp->video_type_header.emplace<RTPVideoHeaderVP8>();
vp8_header.InitRTPVideoHeaderVP8();
vp8_header.nonReference = info.codecSpecific.VP8.nonReference;
vp8_header.temporalIdx = info.codecSpecific.VP8.temporalIdx;
vp8_header.layerSync = info.codecSpecific.VP8.layerSync;
vp8_header.keyIdx = info.codecSpecific.VP8.keyIdx;
return;
}
case kVideoCodecVP9: {
auto& vp9_header = rtp->video_type_header.emplace<RTPVideoHeaderVP9>();
vp9_header.InitRTPVideoHeaderVP9();
vp9_header.inter_pic_predicted =
info.codecSpecific.VP9.inter_pic_predicted;
vp9_header.flexible_mode = info.codecSpecific.VP9.flexible_mode;
vp9_header.ss_data_available = info.codecSpecific.VP9.ss_data_available;
vp9_header.non_ref_for_inter_layer_pred =
info.codecSpecific.VP9.non_ref_for_inter_layer_pred;
vp9_header.temporal_idx = info.codecSpecific.VP9.temporal_idx;
vp9_header.temporal_up_switch = info.codecSpecific.VP9.temporal_up_switch;
vp9_header.inter_layer_predicted =
info.codecSpecific.VP9.inter_layer_predicted;
vp9_header.gof_idx = info.codecSpecific.VP9.gof_idx;
vp9_header.num_spatial_layers = info.codecSpecific.VP9.num_spatial_layers;
vp9_header.first_active_layer = info.codecSpecific.VP9.first_active_layer;
if (vp9_header.num_spatial_layers > 1) {
vp9_header.spatial_idx = spatial_index.value_or(kNoSpatialIdx);
} else {
vp9_header.spatial_idx = kNoSpatialIdx;
}
if (info.codecSpecific.VP9.ss_data_available) {
vp9_header.spatial_layer_resolution_present =
info.codecSpecific.VP9.spatial_layer_resolution_present;
if (info.codecSpecific.VP9.spatial_layer_resolution_present) {
for (size_t i = 0; i < info.codecSpecific.VP9.num_spatial_layers;
++i) {
vp9_header.width[i] = info.codecSpecific.VP9.width[i];
vp9_header.height[i] = info.codecSpecific.VP9.height[i];
}
}
vp9_header.gof.CopyGofInfoVP9(info.codecSpecific.VP9.gof);
}
vp9_header.num_ref_pics = info.codecSpecific.VP9.num_ref_pics;
for (int i = 0; i < info.codecSpecific.VP9.num_ref_pics; ++i) {
vp9_header.pid_diff[i] = info.codecSpecific.VP9.p_diff[i];
}
vp9_header.end_of_picture = info.end_of_picture;
return;
}
case kVideoCodecH264: {
auto& h264_header = rtp->video_type_header.emplace<RTPVideoHeaderH264>();
h264_header.packetization_mode =
info.codecSpecific.H264.packetization_mode;
return;
}
case kVideoCodecH265: {
auto& h265_header = rtp->video_type_header.emplace<RTPVideoHeaderH265>();
h265_header.packetization_mode =
info.codecSpecific.H265.packetization_mode;
}
return;
case kVideoCodecMultiplex:
case kVideoCodecGeneric:
rtp->codec = kVideoCodecGeneric;
return;
default:
return;
}
}
void SetVideoTiming(const EncodedImage& image, VideoSendTiming* timing) {
if (image.timing_.flags == VideoSendTiming::TimingFrameFlags::kInvalid ||
image.timing_.flags == VideoSendTiming::TimingFrameFlags::kNotTriggered) {
timing->flags = VideoSendTiming::TimingFrameFlags::kInvalid;
return;
}
timing->encode_start_delta_ms = VideoSendTiming::GetDeltaCappedMs(
image.capture_time_ms_, image.timing_.encode_start_ms);
timing->encode_finish_delta_ms = VideoSendTiming::GetDeltaCappedMs(
image.capture_time_ms_, image.timing_.encode_finish_ms);
timing->packetization_finish_delta_ms = 0;
timing->pacer_exit_delta_ms = 0;
timing->network_timestamp_delta_ms = 0;
timing->network2_timestamp_delta_ms = 0;
timing->flags = image.timing_.flags;
}
// Returns structure that aligns with simulated generic info. The templates
// allow to produce valid dependency descriptor for any stream where
// `num_spatial_layers` * `num_temporal_layers` <= 32 (limited by
// https://aomediacodec.github.io/av1-rtp-spec/#a82-syntax, see
// template_fdiffs()). The set of the templates is not tuned for any paricular
// structure thus dependency descriptor would use more bytes on the wire than
// with tuned templates.
FrameDependencyStructure MinimalisticStructure(int num_spatial_layers,
int num_temporal_layers) {
RTC_DCHECK_LE(num_spatial_layers, DependencyDescriptor::kMaxSpatialIds);
RTC_DCHECK_LE(num_temporal_layers, DependencyDescriptor::kMaxTemporalIds);
RTC_DCHECK_LE(num_spatial_layers * num_temporal_layers, 32);
FrameDependencyStructure structure;
structure.num_decode_targets = num_spatial_layers * num_temporal_layers;
structure.num_chains = num_spatial_layers;
structure.templates.reserve(num_spatial_layers * num_temporal_layers);
for (int sid = 0; sid < num_spatial_layers; ++sid) {
for (int tid = 0; tid < num_temporal_layers; ++tid) {
FrameDependencyTemplate a_template;
a_template.spatial_id = sid;
a_template.temporal_id = tid;
for (int s = 0; s < num_spatial_layers; ++s) {
for (int t = 0; t < num_temporal_layers; ++t) {
// Prefer kSwitch indication for frames that is part of the decode
// target because dependency descriptor information generated in this
// class use kSwitch indications more often that kRequired, increasing
// the chance of a good (or complete) template match.
a_template.decode_target_indications.push_back(
sid <= s && tid <= t ? DecodeTargetIndication::kSwitch
: DecodeTargetIndication::kNotPresent);
}
}
a_template.frame_diffs.push_back(tid == 0 ? num_spatial_layers *
num_temporal_layers
: num_spatial_layers);
a_template.chain_diffs.assign(structure.num_chains, 1);
structure.templates.push_back(a_template);
structure.decode_target_protected_by_chain.push_back(sid);
}
}
return structure;
}
} // namespace
RtpPayloadParams::RtpPayloadParams(const uint32_t ssrc,
const RtpPayloadState* state,
const FieldTrialsView& trials)
: ssrc_(ssrc),
generic_picture_id_experiment_(
absl::StartsWith(trials.Lookup("WebRTC-GenericPictureId"),
"Enabled")),
simulate_generic_structure_(absl::StartsWith(
trials.Lookup("WebRTC-GenericCodecDependencyDescriptor"),
"Enabled")) {
for (auto& spatial_layer : last_shared_frame_id_)
spatial_layer.fill(-1);
chain_last_frame_id_.fill(-1);
buffer_id_to_frame_id_.fill(-1);
Random random(rtc::TimeMicros());
state_.picture_id =
state ? state->picture_id : (random.Rand<int16_t>() & 0x7FFF);
state_.tl0_pic_idx = state ? state->tl0_pic_idx : (random.Rand<uint8_t>());
}
RtpPayloadParams::RtpPayloadParams(const RtpPayloadParams& other) = default;
RtpPayloadParams::~RtpPayloadParams() {}
RTPVideoHeader RtpPayloadParams::GetRtpVideoHeader(
const EncodedImage& image,
const CodecSpecificInfo* codec_specific_info,
int64_t shared_frame_id) {
RTPVideoHeader rtp_video_header;
if (codec_specific_info) {
PopulateRtpWithCodecSpecifics(*codec_specific_info, image.SpatialIndex(),
&rtp_video_header);
}
rtp_video_header.simulcastIdx = image.SimulcastIndex().value_or(0);
rtp_video_header.frame_type = image._frameType;
rtp_video_header.rotation = image.rotation_;
rtp_video_header.content_type = image.content_type_;
rtp_video_header.playout_delay = image.PlayoutDelay();
rtp_video_header.width = image._encodedWidth;
rtp_video_header.height = image._encodedHeight;
rtp_video_header.color_space = image.ColorSpace()
? absl::make_optional(*image.ColorSpace())
: absl::nullopt;
rtp_video_header.video_frame_tracking_id = image.VideoFrameTrackingId();
SetVideoTiming(image, &rtp_video_header.video_timing);
const bool is_keyframe = image._frameType == VideoFrameType::kVideoFrameKey;
const bool first_frame_in_picture =
(codec_specific_info && codec_specific_info->codecType == kVideoCodecVP9)
? codec_specific_info->codecSpecific.VP9.first_frame_in_picture
: true;
SetCodecSpecific(&rtp_video_header, first_frame_in_picture);
SetGeneric(codec_specific_info, shared_frame_id, is_keyframe,
&rtp_video_header);
return rtp_video_header;
}
uint32_t RtpPayloadParams::ssrc() const {
return ssrc_;
}
RtpPayloadState RtpPayloadParams::state() const {
return state_;
}
void RtpPayloadParams::SetCodecSpecific(RTPVideoHeader* rtp_video_header,
bool first_frame_in_picture) {
// Always set picture id. Set tl0_pic_idx iff temporal index is set.
if (first_frame_in_picture) {
state_.picture_id = (static_cast<uint16_t>(state_.picture_id) + 1) & 0x7FFF;
}
if (rtp_video_header->codec == kVideoCodecVP8) {
auto& vp8_header =
absl::get<RTPVideoHeaderVP8>(rtp_video_header->video_type_header);
vp8_header.pictureId = state_.picture_id;
if (vp8_header.temporalIdx != kNoTemporalIdx) {
if (vp8_header.temporalIdx == 0) {
++state_.tl0_pic_idx;
}
vp8_header.tl0PicIdx = state_.tl0_pic_idx;
}
}
if (rtp_video_header->codec == kVideoCodecVP9) {
auto& vp9_header =
absl::get<RTPVideoHeaderVP9>(rtp_video_header->video_type_header);
vp9_header.picture_id = state_.picture_id;
// Note that in the case that we have no temporal layers but we do have
// spatial layers, packets will carry layering info with a temporal_idx of
// zero, and we then have to set and increment tl0_pic_idx.
if (vp9_header.temporal_idx != kNoTemporalIdx ||
vp9_header.spatial_idx != kNoSpatialIdx) {
if (first_frame_in_picture &&
(vp9_header.temporal_idx == 0 ||
vp9_header.temporal_idx == kNoTemporalIdx)) {
++state_.tl0_pic_idx;
}
vp9_header.tl0_pic_idx = state_.tl0_pic_idx;
}
}
if (generic_picture_id_experiment_ &&
rtp_video_header->codec == kVideoCodecGeneric) {
rtp_video_header->video_type_header.emplace<RTPVideoHeaderLegacyGeneric>()
.picture_id = state_.picture_id;
}
}
RTPVideoHeader::GenericDescriptorInfo
RtpPayloadParams::GenericDescriptorFromFrameInfo(
const GenericFrameInfo& frame_info,
int64_t frame_id) {
RTPVideoHeader::GenericDescriptorInfo generic;
generic.frame_id = frame_id;
generic.dependencies = dependencies_calculator_.FromBuffersUsage(
frame_id, frame_info.encoder_buffers);
generic.chain_diffs =
chains_calculator_.From(frame_id, frame_info.part_of_chain);
generic.spatial_index = frame_info.spatial_id;
generic.temporal_index = frame_info.temporal_id;
generic.decode_target_indications = frame_info.decode_target_indications;
generic.active_decode_targets = frame_info.active_decode_targets;
return generic;
}
void RtpPayloadParams::SetGeneric(const CodecSpecificInfo* codec_specific_info,
int64_t frame_id,
bool is_keyframe,
RTPVideoHeader* rtp_video_header) {
if (codec_specific_info && codec_specific_info->generic_frame_info &&
!codec_specific_info->generic_frame_info->encoder_buffers.empty()) {
if (is_keyframe) {
// Key frame resets all chains it is in.
chains_calculator_.Reset(
codec_specific_info->generic_frame_info->part_of_chain);
}
rtp_video_header->generic = GenericDescriptorFromFrameInfo(
*codec_specific_info->generic_frame_info, frame_id);
return;
}
switch (rtp_video_header->codec) {
case VideoCodecType::kVideoCodecGeneric:
GenericToGeneric(frame_id, is_keyframe, rtp_video_header);
return;
case VideoCodecType::kVideoCodecVP8:
if (codec_specific_info) {
Vp8ToGeneric(codec_specific_info->codecSpecific.VP8, frame_id,
is_keyframe, rtp_video_header);
}
return;
case VideoCodecType::kVideoCodecVP9:
if (codec_specific_info != nullptr) {
Vp9ToGeneric(codec_specific_info->codecSpecific.VP9, frame_id,
*rtp_video_header);
}
return;
case VideoCodecType::kVideoCodecAV1:
// TODO(philipel): Implement AV1 to generic descriptor.
return;
case VideoCodecType::kVideoCodecH264:
if (codec_specific_info) {
H264ToGeneric(codec_specific_info->codecSpecific.H264, frame_id,
is_keyframe, rtp_video_header);
}
return;
case VideoCodecType::kVideoCodecMultiplex:
return;
case VideoCodecType::kVideoCodecH265:
// TODO(bugs.webrtc.org/13485): Implement H265 to generic descriptor.
return;
}
RTC_DCHECK_NOTREACHED() << "Unsupported codec.";
}
absl::optional<FrameDependencyStructure> RtpPayloadParams::GenericStructure(
const CodecSpecificInfo* codec_specific_info) {
if (codec_specific_info == nullptr) {
return absl::nullopt;
}
// This helper shouldn't be used when template structure is specified
// explicetly.
RTC_DCHECK(!codec_specific_info->template_structure.has_value());
switch (codec_specific_info->codecType) {
case VideoCodecType::kVideoCodecGeneric:
if (simulate_generic_structure_) {
return MinimalisticStructure(/*num_spatial_layers=*/1,
/*num_temporal_layer=*/1);
}
return absl::nullopt;
case VideoCodecType::kVideoCodecVP8:
return MinimalisticStructure(/*num_spatial_layers=*/1,
/*num_temporal_layer=*/kMaxTemporalStreams);
case VideoCodecType::kVideoCodecVP9: {
absl::optional<FrameDependencyStructure> structure =
MinimalisticStructure(
/*num_spatial_layers=*/kMaxSimulatedSpatialLayers,
/*num_temporal_layer=*/kMaxTemporalStreams);
const CodecSpecificInfoVP9& vp9 = codec_specific_info->codecSpecific.VP9;
if (vp9.ss_data_available && vp9.spatial_layer_resolution_present) {
RenderResolution first_valid;
RenderResolution last_valid;
for (size_t i = 0; i < vp9.num_spatial_layers; ++i) {
RenderResolution r(vp9.width[i], vp9.height[i]);
if (r.Valid()) {
if (!first_valid.Valid()) {
first_valid = r;
}
last_valid = r;
}
structure->resolutions.push_back(r);
}
if (!last_valid.Valid()) {
// No valid resolution found. Do not send resolutions.
structure->resolutions.clear();
} else {
structure->resolutions.resize(kMaxSimulatedSpatialLayers, last_valid);
// VP9 encoder wrapper may disable first few spatial layers by
// setting invalid resolution (0,0). `structure->resolutions`
// doesn't support invalid resolution, so reset them to something
// valid.
for (RenderResolution& r : structure->resolutions) {
if (!r.Valid()) {
r = first_valid;
}
}
}
}
return structure;
}
case VideoCodecType::kVideoCodecAV1:
case VideoCodecType::kVideoCodecH264:
case VideoCodecType::kVideoCodecH265:
case VideoCodecType::kVideoCodecMultiplex:
return absl::nullopt;
}
RTC_DCHECK_NOTREACHED() << "Unsupported codec.";
}
void RtpPayloadParams::GenericToGeneric(int64_t shared_frame_id,
bool is_keyframe,
RTPVideoHeader* rtp_video_header) {
RTPVideoHeader::GenericDescriptorInfo& generic =
rtp_video_header->generic.emplace();
generic.frame_id = shared_frame_id;
generic.decode_target_indications.push_back(DecodeTargetIndication::kSwitch);
if (is_keyframe) {
generic.chain_diffs.push_back(0);
last_shared_frame_id_[0].fill(-1);
} else {
int64_t frame_id = last_shared_frame_id_[0][0];
RTC_DCHECK_NE(frame_id, -1);
RTC_DCHECK_LT(frame_id, shared_frame_id);
generic.chain_diffs.push_back(shared_frame_id - frame_id);
generic.dependencies.push_back(frame_id);
}
last_shared_frame_id_[0][0] = shared_frame_id;
}
void RtpPayloadParams::H264ToGeneric(const CodecSpecificInfoH264& h264_info,
int64_t shared_frame_id,
bool is_keyframe,
RTPVideoHeader* rtp_video_header) {
const int temporal_index =
h264_info.temporal_idx != kNoTemporalIdx ? h264_info.temporal_idx : 0;
if (temporal_index >= RtpGenericFrameDescriptor::kMaxTemporalLayers) {
RTC_LOG(LS_WARNING) << "Temporal and/or spatial index is too high to be "
"used with generic frame descriptor.";
return;
}
RTPVideoHeader::GenericDescriptorInfo& generic =
rtp_video_header->generic.emplace();
generic.frame_id = shared_frame_id;
generic.temporal_index = temporal_index;
if (is_keyframe) {
RTC_DCHECK_EQ(temporal_index, 0);
last_shared_frame_id_[/*spatial index*/ 0].fill(-1);
last_shared_frame_id_[/*spatial index*/ 0][temporal_index] =
shared_frame_id;
return;
}
if (h264_info.base_layer_sync) {
int64_t tl0_frame_id = last_shared_frame_id_[/*spatial index*/ 0][0];
for (int i = 1; i < RtpGenericFrameDescriptor::kMaxTemporalLayers; ++i) {
if (last_shared_frame_id_[/*spatial index*/ 0][i] < tl0_frame_id) {
last_shared_frame_id_[/*spatial index*/ 0][i] = -1;
}
}
RTC_DCHECK_GE(tl0_frame_id, 0);
RTC_DCHECK_LT(tl0_frame_id, shared_frame_id);
generic.dependencies.push_back(tl0_frame_id);
} else {
for (int i = 0; i <= temporal_index; ++i) {
int64_t frame_id = last_shared_frame_id_[/*spatial index*/ 0][i];
if (frame_id != -1) {
RTC_DCHECK_LT(frame_id, shared_frame_id);
generic.dependencies.push_back(frame_id);
}
}
}
last_shared_frame_id_[/*spatial_index*/ 0][temporal_index] = shared_frame_id;
}
void RtpPayloadParams::Vp8ToGeneric(const CodecSpecificInfoVP8& vp8_info,
int64_t shared_frame_id,
bool is_keyframe,
RTPVideoHeader* rtp_video_header) {
const auto& vp8_header =
absl::get<RTPVideoHeaderVP8>(rtp_video_header->video_type_header);
const int spatial_index = 0;
const int temporal_index =
vp8_header.temporalIdx != kNoTemporalIdx ? vp8_header.temporalIdx : 0;
if (temporal_index >= RtpGenericFrameDescriptor::kMaxTemporalLayers ||
spatial_index >= RtpGenericFrameDescriptor::kMaxSpatialLayers) {
RTC_LOG(LS_WARNING) << "Temporal and/or spatial index is too high to be "
"used with generic frame descriptor.";
return;
}
RTPVideoHeader::GenericDescriptorInfo& generic =
rtp_video_header->generic.emplace();
generic.frame_id = shared_frame_id;
generic.spatial_index = spatial_index;
generic.temporal_index = temporal_index;
// Generate decode target indications.
RTC_DCHECK_LT(temporal_index, kMaxTemporalStreams);
generic.decode_target_indications.resize(kMaxTemporalStreams);
auto it = std::fill_n(generic.decode_target_indications.begin(),
temporal_index, DecodeTargetIndication::kNotPresent);
std::fill(it, generic.decode_target_indications.end(),
DecodeTargetIndication::kSwitch);
// Frame dependencies.
if (vp8_info.useExplicitDependencies) {
SetDependenciesVp8New(vp8_info, shared_frame_id, is_keyframe,
vp8_header.layerSync, &generic);
} else {
SetDependenciesVp8Deprecated(vp8_info, shared_frame_id, is_keyframe,
spatial_index, temporal_index,
vp8_header.layerSync, &generic);
}
// Calculate chains.
generic.chain_diffs = {
(is_keyframe || chain_last_frame_id_[0] < 0)
? 0
: static_cast<int>(shared_frame_id - chain_last_frame_id_[0])};
if (temporal_index == 0) {
chain_last_frame_id_[0] = shared_frame_id;
}
}
void RtpPayloadParams::Vp9ToGeneric(const CodecSpecificInfoVP9& vp9_info,
int64_t shared_frame_id,
RTPVideoHeader& rtp_video_header) {
const auto& vp9_header =
absl::get<RTPVideoHeaderVP9>(rtp_video_header.video_type_header);
const int num_spatial_layers = kMaxSimulatedSpatialLayers;
const int first_active_spatial_id = vp9_header.first_active_layer;
const int last_active_spatial_id = vp9_header.num_spatial_layers - 1;
const int num_temporal_layers = kMaxTemporalStreams;
static_assert(num_spatial_layers <=
RtpGenericFrameDescriptor::kMaxSpatialLayers);
static_assert(num_temporal_layers <=
RtpGenericFrameDescriptor::kMaxTemporalLayers);
static_assert(num_spatial_layers <= DependencyDescriptor::kMaxSpatialIds);
static_assert(num_temporal_layers <= DependencyDescriptor::kMaxTemporalIds);
int spatial_index =
vp9_header.spatial_idx != kNoSpatialIdx ? vp9_header.spatial_idx : 0;
int temporal_index =
vp9_header.temporal_idx != kNoTemporalIdx ? vp9_header.temporal_idx : 0;
if (!(temporal_index < num_temporal_layers &&
first_active_spatial_id <= spatial_index &&
spatial_index <= last_active_spatial_id &&
last_active_spatial_id < num_spatial_layers)) {
// Prefer to generate no generic layering than an inconsistent one.
RTC_LOG(LS_ERROR) << "Inconsistent layer id sid=" << spatial_index
<< ",tid=" << temporal_index
<< " in VP9 header. Active spatial ids: ["
<< first_active_spatial_id << ","
<< last_active_spatial_id << "]";
return;
}
RTPVideoHeader::GenericDescriptorInfo& result =
rtp_video_header.generic.emplace();
result.frame_id = shared_frame_id;
result.spatial_index = spatial_index;
result.temporal_index = temporal_index;
result.decode_target_indications.reserve(num_spatial_layers *
num_temporal_layers);
for (int sid = 0; sid < num_spatial_layers; ++sid) {
for (int tid = 0; tid < num_temporal_layers; ++tid) {
DecodeTargetIndication dti;
if (sid < spatial_index || tid < temporal_index) {
dti = DecodeTargetIndication::kNotPresent;
} else if (spatial_index != sid &&
vp9_header.non_ref_for_inter_layer_pred) {
dti = DecodeTargetIndication::kNotPresent;
} else if (sid == spatial_index && tid == temporal_index) {
// Assume that if frame is decodable, all of its own layer is decodable.
dti = DecodeTargetIndication::kSwitch;
} else if (sid == spatial_index && vp9_header.temporal_up_switch) {
dti = DecodeTargetIndication::kSwitch;
} else if (!vp9_header.inter_pic_predicted) {
// Key frame or spatial upswitch
dti = DecodeTargetIndication::kSwitch;
} else {
// Make no other assumptions. That should be safe, though suboptimal.
// To provide more accurate dti, encoder wrapper should fill in
// CodecSpecificInfo::generic_frame_info
dti = DecodeTargetIndication::kRequired;
}
result.decode_target_indications.push_back(dti);
}
}
// Calculate frame dependencies.
static constexpr int kPictureDiffLimit = 128;
if (last_vp9_frame_id_.empty()) {
// Create the array only if it is ever used.
last_vp9_frame_id_.resize(kPictureDiffLimit);
}
if (vp9_header.flexible_mode) {
if (vp9_header.inter_layer_predicted && spatial_index > 0) {
result.dependencies.push_back(
last_vp9_frame_id_[vp9_header.picture_id % kPictureDiffLimit]
[spatial_index - 1]);
}
if (vp9_header.inter_pic_predicted) {
for (size_t i = 0; i < vp9_header.num_ref_pics; ++i) {
// picture_id is 15 bit number that wraps around. Though undeflow may
// produce picture that exceeds 2^15, it is ok because in this
// code block only last 7 bits of the picture_id are used.
uint16_t depend_on = vp9_header.picture_id - vp9_header.pid_diff[i];
result.dependencies.push_back(
last_vp9_frame_id_[depend_on % kPictureDiffLimit][spatial_index]);
}
}
last_vp9_frame_id_[vp9_header.picture_id % kPictureDiffLimit]
[spatial_index] = shared_frame_id;
} else {
// Implementing general conversion logic for non-flexible mode requires some
// work and we will almost certainly never need it, so for now support only
// non-layerd streams.
if (spatial_index > 0 || temporal_index > 0) {
// Prefer to generate no generic layering than an inconsistent one.
rtp_video_header.generic.reset();
return;
}
if (vp9_header.inter_pic_predicted) {
// Since we only support non-scalable streams we only need to save the
// last frame id.
result.dependencies.push_back(last_vp9_frame_id_[0][0]);
}
last_vp9_frame_id_[0][0] = shared_frame_id;
}
result.active_decode_targets =
((uint32_t{1} << num_temporal_layers * (last_active_spatial_id + 1)) -
1) ^
((uint32_t{1} << num_temporal_layers * first_active_spatial_id) - 1);
// Calculate chains, asuming chain includes all frames with temporal_id = 0
if (!vp9_header.inter_pic_predicted && !vp9_header.inter_layer_predicted) {
// Assume frames without dependencies also reset chains.
for (int sid = spatial_index; sid <= last_active_spatial_id; ++sid) {
chain_last_frame_id_[sid] = -1;
}
}
result.chain_diffs.resize(num_spatial_layers, 0);
for (int sid = first_active_spatial_id; sid <= last_active_spatial_id;
++sid) {
if (chain_last_frame_id_[sid] == -1) {
result.chain_diffs[sid] = 0;
continue;
}
int64_t chain_diff = shared_frame_id - chain_last_frame_id_[sid];
if (chain_diff >= 256) {
RTC_LOG(LS_ERROR)
<< "Too many frames since last VP9 T0 frame for spatial layer #"
<< sid << " at frame#" << shared_frame_id;
chain_last_frame_id_[sid] = -1;
chain_diff = 0;
}
result.chain_diffs[sid] = chain_diff;
}
if (temporal_index == 0) {
chain_last_frame_id_[spatial_index] = shared_frame_id;
if (!vp9_header.non_ref_for_inter_layer_pred) {
for (int sid = spatial_index + 1; sid <= last_active_spatial_id; ++sid) {
chain_last_frame_id_[sid] = shared_frame_id;
}
}
}
}
void RtpPayloadParams::SetDependenciesVp8Deprecated(
const CodecSpecificInfoVP8& vp8_info,
int64_t shared_frame_id,
bool is_keyframe,
int spatial_index,
int temporal_index,
bool layer_sync,
RTPVideoHeader::GenericDescriptorInfo* generic) {
RTC_DCHECK(!vp8_info.useExplicitDependencies);
RTC_DCHECK(!new_version_used_.has_value() || !new_version_used_.value());
new_version_used_ = false;
if (is_keyframe) {
RTC_DCHECK_EQ(temporal_index, 0);
last_shared_frame_id_[spatial_index].fill(-1);
last_shared_frame_id_[spatial_index][temporal_index] = shared_frame_id;
return;
}
if (layer_sync) {
int64_t tl0_frame_id = last_shared_frame_id_[spatial_index][0];
for (int i = 1; i < RtpGenericFrameDescriptor::kMaxTemporalLayers; ++i) {
if (last_shared_frame_id_[spatial_index][i] < tl0_frame_id) {
last_shared_frame_id_[spatial_index][i] = -1;
}
}
RTC_DCHECK_GE(tl0_frame_id, 0);
RTC_DCHECK_LT(tl0_frame_id, shared_frame_id);
generic->dependencies.push_back(tl0_frame_id);
} else {
for (int i = 0; i <= temporal_index; ++i) {
int64_t frame_id = last_shared_frame_id_[spatial_index][i];
if (frame_id != -1) {
RTC_DCHECK_LT(frame_id, shared_frame_id);
generic->dependencies.push_back(frame_id);
}
}
}
last_shared_frame_id_[spatial_index][temporal_index] = shared_frame_id;
}
void RtpPayloadParams::SetDependenciesVp8New(
const CodecSpecificInfoVP8& vp8_info,
int64_t shared_frame_id,
bool is_keyframe,
bool layer_sync,
RTPVideoHeader::GenericDescriptorInfo* generic) {
RTC_DCHECK(vp8_info.useExplicitDependencies);
RTC_DCHECK(!new_version_used_.has_value() || new_version_used_.value());
new_version_used_ = true;
if (is_keyframe) {
RTC_DCHECK_EQ(vp8_info.referencedBuffersCount, 0u);
buffer_id_to_frame_id_.fill(shared_frame_id);
return;
}
constexpr size_t kBuffersCountVp8 = CodecSpecificInfoVP8::kBuffersCount;
RTC_DCHECK_GT(vp8_info.referencedBuffersCount, 0u);
RTC_DCHECK_LE(vp8_info.referencedBuffersCount,
arraysize(vp8_info.referencedBuffers));
for (size_t i = 0; i < vp8_info.referencedBuffersCount; ++i) {
const size_t referenced_buffer = vp8_info.referencedBuffers[i];
RTC_DCHECK_LT(referenced_buffer, kBuffersCountVp8);
RTC_DCHECK_LT(referenced_buffer, buffer_id_to_frame_id_.size());
const int64_t dependency_frame_id =
buffer_id_to_frame_id_[referenced_buffer];
RTC_DCHECK_GE(dependency_frame_id, 0);
RTC_DCHECK_LT(dependency_frame_id, shared_frame_id);
const bool is_new_dependency =
std::find(generic->dependencies.begin(), generic->dependencies.end(),
dependency_frame_id) == generic->dependencies.end();
if (is_new_dependency) {
generic->dependencies.push_back(dependency_frame_id);
}
}
RTC_DCHECK_LE(vp8_info.updatedBuffersCount, kBuffersCountVp8);
for (size_t i = 0; i < vp8_info.updatedBuffersCount; ++i) {
const size_t updated_id = vp8_info.updatedBuffers[i];
buffer_id_to_frame_id_[updated_id] = shared_frame_id;
}
RTC_DCHECK_LE(buffer_id_to_frame_id_.size(), kBuffersCountVp8);
}
} // namespace webrtc

View file

@ -0,0 +1,134 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef CALL_RTP_PAYLOAD_PARAMS_H_
#define CALL_RTP_PAYLOAD_PARAMS_H_
#include <array>
#include <vector>
#include "absl/types/optional.h"
#include "api/field_trials_view.h"
#include "api/video_codecs/video_encoder.h"
#include "call/rtp_config.h"
#include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor.h"
#include "modules/rtp_rtcp/source/rtp_video_header.h"
#include "modules/video_coding/chain_diff_calculator.h"
#include "modules/video_coding/frame_dependencies_calculator.h"
#include "modules/video_coding/include/video_codec_interface.h"
namespace webrtc {
// State for setting picture id and tl0 pic idx, for VP8 and VP9
// TODO(nisse): Make these properties not codec specific.
class RtpPayloadParams final {
public:
RtpPayloadParams(uint32_t ssrc,
const RtpPayloadState* state,
const FieldTrialsView& trials);
RtpPayloadParams(const RtpPayloadParams& other);
~RtpPayloadParams();
RTPVideoHeader GetRtpVideoHeader(const EncodedImage& image,
const CodecSpecificInfo* codec_specific_info,
int64_t shared_frame_id);
// Returns structure that aligns with simulated generic info generated by
// `GetRtpVideoHeader` for the `codec_specific_info`
absl::optional<FrameDependencyStructure> GenericStructure(
const CodecSpecificInfo* codec_specific_info);
uint32_t ssrc() const;
RtpPayloadState state() const;
private:
void SetCodecSpecific(RTPVideoHeader* rtp_video_header,
bool first_frame_in_picture);
RTPVideoHeader::GenericDescriptorInfo GenericDescriptorFromFrameInfo(
const GenericFrameInfo& frame_info,
int64_t frame_id);
void SetGeneric(const CodecSpecificInfo* codec_specific_info,
int64_t frame_id,
bool is_keyframe,
RTPVideoHeader* rtp_video_header);
void Vp8ToGeneric(const CodecSpecificInfoVP8& vp8_info,
int64_t shared_frame_id,
bool is_keyframe,
RTPVideoHeader* rtp_video_header);
void Vp9ToGeneric(const CodecSpecificInfoVP9& vp9_info,
int64_t shared_frame_id,
RTPVideoHeader& rtp_video_header);
void H264ToGeneric(const CodecSpecificInfoH264& h264_info,
int64_t shared_frame_id,
bool is_keyframe,
RTPVideoHeader* rtp_video_header);
void GenericToGeneric(int64_t shared_frame_id,
bool is_keyframe,
RTPVideoHeader* rtp_video_header);
// TODO(bugs.webrtc.org/10242): Delete SetDependenciesVp8Deprecated() and move
// the logic in SetDependenciesVp8New() into Vp8ToGeneric() once all hardware
// wrappers have been updated.
void SetDependenciesVp8Deprecated(
const CodecSpecificInfoVP8& vp8_info,
int64_t shared_frame_id,
bool is_keyframe,
int spatial_index,
int temporal_index,
bool layer_sync,
RTPVideoHeader::GenericDescriptorInfo* generic);
void SetDependenciesVp8New(const CodecSpecificInfoVP8& vp8_info,
int64_t shared_frame_id,
bool is_keyframe,
bool layer_sync,
RTPVideoHeader::GenericDescriptorInfo* generic);
FrameDependenciesCalculator dependencies_calculator_;
ChainDiffCalculator chains_calculator_;
// TODO(bugs.webrtc.org/10242): Remove once all encoder-wrappers are updated.
// Holds the last shared frame id for a given (spatial, temporal) layer.
std::array<std::array<int64_t, RtpGenericFrameDescriptor::kMaxTemporalLayers>,
RtpGenericFrameDescriptor::kMaxSpatialLayers>
last_shared_frame_id_;
// circular buffer of frame ids for the last 128 vp9 pictures.
// ids for the `picture_id` are stored at the index `picture_id % 128`.
std::vector<std::array<int64_t, RtpGenericFrameDescriptor::kMaxSpatialLayers>>
last_vp9_frame_id_;
// Last frame id for each chain
std::array<int64_t, RtpGenericFrameDescriptor::kMaxSpatialLayers>
chain_last_frame_id_;
// TODO(eladalon): When additional codecs are supported,
// set kMaxCodecBuffersCount to the max() of these codecs' buffer count.
static constexpr size_t kMaxCodecBuffersCount =
CodecSpecificInfoVP8::kBuffersCount;
// Maps buffer IDs to the frame-ID stored in them.
std::array<int64_t, kMaxCodecBuffersCount> buffer_id_to_frame_id_;
// Until we remove SetDependenciesVp8Deprecated(), we should make sure
// that, for a given object, we either always use
// SetDependenciesVp8Deprecated(), or always use SetDependenciesVp8New().
// TODO(bugs.webrtc.org/10242): Remove.
absl::optional<bool> new_version_used_;
const uint32_t ssrc_;
RtpPayloadState state_;
const bool generic_picture_id_experiment_;
const bool simulate_generic_structure_;
};
} // namespace webrtc
#endif // CALL_RTP_PAYLOAD_PARAMS_H_

View file

@ -0,0 +1,71 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "call/rtp_stream_receiver_controller.h"
#include <memory>
#include "rtc_base/logging.h"
namespace webrtc {
RtpStreamReceiverController::Receiver::Receiver(
RtpStreamReceiverController* controller,
uint32_t ssrc,
RtpPacketSinkInterface* sink)
: controller_(controller), sink_(sink) {
const bool sink_added = controller_->AddSink(ssrc, sink_);
if (!sink_added) {
RTC_LOG(LS_ERROR)
<< "RtpStreamReceiverController::Receiver::Receiver: Sink "
"could not be added for SSRC="
<< ssrc << ".";
}
}
RtpStreamReceiverController::Receiver::~Receiver() {
// This may fail, if corresponding AddSink in the constructor failed.
controller_->RemoveSink(sink_);
}
RtpStreamReceiverController::RtpStreamReceiverController() {}
RtpStreamReceiverController::~RtpStreamReceiverController() = default;
std::unique_ptr<RtpStreamReceiverInterface>
RtpStreamReceiverController::CreateReceiver(uint32_t ssrc,
RtpPacketSinkInterface* sink) {
return std::make_unique<Receiver>(this, ssrc, sink);
}
bool RtpStreamReceiverController::OnRtpPacket(const RtpPacketReceived& packet) {
RTC_DCHECK_RUN_ON(&demuxer_sequence_);
return demuxer_.OnRtpPacket(packet);
}
void RtpStreamReceiverController::OnRecoveredPacket(
const RtpPacketReceived& packet) {
RTC_DCHECK_RUN_ON(&demuxer_sequence_);
demuxer_.OnRtpPacket(packet);
}
bool RtpStreamReceiverController::AddSink(uint32_t ssrc,
RtpPacketSinkInterface* sink) {
RTC_DCHECK_RUN_ON(&demuxer_sequence_);
return demuxer_.AddSink(ssrc, sink);
}
bool RtpStreamReceiverController::RemoveSink(
const RtpPacketSinkInterface* sink) {
RTC_DCHECK_RUN_ON(&demuxer_sequence_);
return demuxer_.RemoveSink(sink);
}
} // namespace webrtc

View file

@ -0,0 +1,80 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef CALL_RTP_STREAM_RECEIVER_CONTROLLER_H_
#define CALL_RTP_STREAM_RECEIVER_CONTROLLER_H_
#include <memory>
#include "api/sequence_checker.h"
#include "call/rtp_demuxer.h"
#include "call/rtp_stream_receiver_controller_interface.h"
#include "modules/rtp_rtcp/include/recovered_packet_receiver.h"
namespace webrtc {
class RtpPacketReceived;
// This class represents the RTP receive parsing and demuxing, for a
// single RTP session.
// TODO(bugs.webrtc.org/7135): Add RTCP processing, we should aim to terminate
// RTCP and not leave any RTCP processing to individual receive streams.
class RtpStreamReceiverController : public RtpStreamReceiverControllerInterface,
public RecoveredPacketReceiver {
public:
RtpStreamReceiverController();
~RtpStreamReceiverController() override;
// Implements RtpStreamReceiverControllerInterface.
std::unique_ptr<RtpStreamReceiverInterface> CreateReceiver(
uint32_t ssrc,
RtpPacketSinkInterface* sink) override;
// TODO(bugs.webrtc.org/7135): Not yet responsible for parsing.
bool OnRtpPacket(const RtpPacketReceived& packet);
// Implements RecoveredPacketReceiver.
// Responsible for demuxing recovered FLEXFEC packets.
void OnRecoveredPacket(const RtpPacketReceived& packet) override;
private:
class Receiver : public RtpStreamReceiverInterface {
public:
Receiver(RtpStreamReceiverController* controller,
uint32_t ssrc,
RtpPacketSinkInterface* sink);
~Receiver() override;
private:
RtpStreamReceiverController* const controller_;
RtpPacketSinkInterface* const sink_;
};
// Thread-safe wrappers for the corresponding RtpDemuxer methods.
bool AddSink(uint32_t ssrc, RtpPacketSinkInterface* sink);
bool RemoveSink(const RtpPacketSinkInterface* sink);
// TODO(bugs.webrtc.org/11993): We expect construction and all methods to be
// called on the same thread/tq. Currently this is the worker thread
// (including OnRtpPacket) but a more natural fit would be the network thread.
// Using a sequence checker to ensure that usage is correct but at the same
// time not require a specific thread/tq, an instance of this class + the
// associated functionality should be easily moved from one execution context
// to another (i.e. when network packets don't hop to the worker thread inside
// of Call).
SequenceChecker demuxer_sequence_;
// At this level the demuxer is only configured to demux by SSRC, so don't
// worry about MIDs (MIDs are handled by upper layers).
RtpDemuxer demuxer_ RTC_GUARDED_BY(&demuxer_sequence_){false /*use_mid*/};
};
} // namespace webrtc
#endif // CALL_RTP_STREAM_RECEIVER_CONTROLLER_H_

View file

@ -0,0 +1,43 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef CALL_RTP_STREAM_RECEIVER_CONTROLLER_INTERFACE_H_
#define CALL_RTP_STREAM_RECEIVER_CONTROLLER_INTERFACE_H_
#include <memory>
#include "call/rtp_packet_sink_interface.h"
namespace webrtc {
// An RtpStreamReceiver is responsible for the rtp-specific but
// media-independent state needed for receiving an RTP stream.
// TODO(bugs.webrtc.org/7135): Currently, only owns the association between ssrc
// and the stream's RtpPacketSinkInterface. Ownership of corresponding objects
// from modules/rtp_rtcp/ should move to this class (or rather, the
// corresponding implementation class). We should add methods for getting rtp
// receive stats, and for sending RTCP messages related to the receive stream.
class RtpStreamReceiverInterface {
public:
virtual ~RtpStreamReceiverInterface() {}
};
// This class acts as a factory for RtpStreamReceiver objects.
class RtpStreamReceiverControllerInterface {
public:
virtual ~RtpStreamReceiverControllerInterface() {}
virtual std::unique_ptr<RtpStreamReceiverInterface> CreateReceiver(
uint32_t ssrc,
RtpPacketSinkInterface* sink) = 0;
};
} // namespace webrtc
#endif // CALL_RTP_STREAM_RECEIVER_CONTROLLER_INTERFACE_H_

View file

@ -0,0 +1,44 @@
/*
* Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef CALL_RTP_TRANSPORT_CONFIG_H_
#define CALL_RTP_TRANSPORT_CONFIG_H_
#include <memory>
#include "absl/types/optional.h"
#include "api/environment/environment.h"
#include "api/network_state_predictor.h"
#include "api/transport/bitrate_settings.h"
#include "api/transport/network_control.h"
#include "api/units/time_delta.h"
namespace webrtc {
struct RtpTransportConfig {
Environment env;
// Bitrate config used until valid bitrate estimates are calculated. Also
// used to cap total bitrate used. This comes from the remote connection.
BitrateConstraints bitrate_config;
// NetworkStatePredictor to use for this call.
NetworkStatePredictorFactoryInterface* network_state_predictor_factory =
nullptr;
// Network controller factory to use for this call.
NetworkControllerFactoryInterface* network_controller_factory = nullptr;
// The burst interval of the pacer, see TaskQueuePacedSender constructor.
absl::optional<TimeDelta> pacer_burst_interval;
};
} // namespace webrtc
#endif // CALL_RTP_TRANSPORT_CONFIG_H_

View file

@ -0,0 +1,761 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "call/rtp_transport_controller_send.h"
#include <memory>
#include <utility>
#include <vector>
#include "absl/strings/match.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "api/task_queue/pending_task_safety_flag.h"
#include "api/task_queue/task_queue_base.h"
#include "api/transport/goog_cc_factory.h"
#include "api/transport/network_types.h"
#include "api/units/data_rate.h"
#include "api/units/time_delta.h"
#include "api/units/timestamp.h"
#include "call/rtp_video_sender.h"
#include "logging/rtc_event_log/events/rtc_event_remote_estimate.h"
#include "logging/rtc_event_log/events/rtc_event_route_change.h"
#include "modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h"
#include "rtc_base/checks.h"
#include "rtc_base/logging.h"
#include "rtc_base/rate_limiter.h"
namespace webrtc {
namespace {
static const int64_t kRetransmitWindowSizeMs = 500;
static const size_t kMaxOverheadBytes = 500;
constexpr TimeDelta kPacerQueueUpdateInterval = TimeDelta::Millis(25);
TargetRateConstraints ConvertConstraints(int min_bitrate_bps,
int max_bitrate_bps,
int start_bitrate_bps,
Clock* clock) {
TargetRateConstraints msg;
msg.at_time = Timestamp::Millis(clock->TimeInMilliseconds());
msg.min_data_rate = min_bitrate_bps >= 0
? DataRate::BitsPerSec(min_bitrate_bps)
: DataRate::Zero();
msg.max_data_rate = max_bitrate_bps > 0
? DataRate::BitsPerSec(max_bitrate_bps)
: DataRate::Infinity();
if (start_bitrate_bps > 0)
msg.starting_rate = DataRate::BitsPerSec(start_bitrate_bps);
return msg;
}
TargetRateConstraints ConvertConstraints(const BitrateConstraints& contraints,
Clock* clock) {
return ConvertConstraints(contraints.min_bitrate_bps,
contraints.max_bitrate_bps,
contraints.start_bitrate_bps, clock);
}
bool IsRelayed(const rtc::NetworkRoute& route) {
return route.local.uses_turn() || route.remote.uses_turn();
}
} // namespace
RtpTransportControllerSend::RtpTransportControllerSend(
const RtpTransportConfig& config)
: env_(config.env),
task_queue_(TaskQueueBase::Current()),
bitrate_configurator_(config.bitrate_config),
pacer_started_(false),
pacer_(&env_.clock(),
&packet_router_,
env_.field_trials(),
TimeDelta::Millis(5),
3),
observer_(nullptr),
controller_factory_override_(config.network_controller_factory),
controller_factory_fallback_(
std::make_unique<GoogCcNetworkControllerFactory>(
config.network_state_predictor_factory)),
process_interval_(controller_factory_fallback_->GetProcessInterval()),
last_report_block_time_(
Timestamp::Millis(env_.clock().TimeInMilliseconds())),
reset_feedback_on_route_change_(
!env_.field_trials().IsEnabled("WebRTC-Bwe-NoFeedbackReset")),
add_pacing_to_cwin_(env_.field_trials().IsEnabled(
"WebRTC-AddPacingToCongestionWindowPushback")),
relay_bandwidth_cap_("relay_cap", DataRate::PlusInfinity()),
transport_overhead_bytes_per_packet_(0),
network_available_(false),
congestion_window_size_(DataSize::PlusInfinity()),
is_congested_(false),
retransmission_rate_limiter_(&env_.clock(), kRetransmitWindowSizeMs) {
ParseFieldTrial(
{&relay_bandwidth_cap_},
env_.field_trials().Lookup("WebRTC-Bwe-NetworkRouteConstraints"));
initial_config_.constraints =
ConvertConstraints(config.bitrate_config, &env_.clock());
initial_config_.event_log = &env_.event_log();
initial_config_.key_value_config = &env_.field_trials();
RTC_DCHECK(config.bitrate_config.start_bitrate_bps > 0);
pacer_.SetPacingRates(
DataRate::BitsPerSec(config.bitrate_config.start_bitrate_bps),
DataRate::Zero());
if (config.pacer_burst_interval) {
// Default burst interval overriden by config.
pacer_.SetSendBurstInterval(*config.pacer_burst_interval);
}
}
RtpTransportControllerSend::~RtpTransportControllerSend() {
RTC_DCHECK_RUN_ON(&sequence_checker_);
RTC_DCHECK(video_rtp_senders_.empty());
pacer_queue_update_task_.Stop();
controller_task_.Stop();
}
RtpVideoSenderInterface* RtpTransportControllerSend::CreateRtpVideoSender(
const std::map<uint32_t, RtpState>& suspended_ssrcs,
const std::map<uint32_t, RtpPayloadState>& states,
const RtpConfig& rtp_config,
int rtcp_report_interval_ms,
Transport* send_transport,
const RtpSenderObservers& observers,
RtcEventLog* event_log,
std::unique_ptr<FecController> fec_controller,
const RtpSenderFrameEncryptionConfig& frame_encryption_config,
rtc::scoped_refptr<FrameTransformerInterface> frame_transformer) {
RTC_DCHECK_RUN_ON(&sequence_checker_);
video_rtp_senders_.push_back(std::make_unique<RtpVideoSender>(
&env_.clock(), suspended_ssrcs, states, rtp_config,
rtcp_report_interval_ms, send_transport, observers,
// TODO(holmer): Remove this circular dependency by injecting
// the parts of RtpTransportControllerSendInterface that are really used.
this, event_log, &retransmission_rate_limiter_, std::move(fec_controller),
frame_encryption_config.frame_encryptor,
frame_encryption_config.crypto_options, std::move(frame_transformer),
env_.field_trials(), &env_.task_queue_factory()));
return video_rtp_senders_.back().get();
}
void RtpTransportControllerSend::DestroyRtpVideoSender(
RtpVideoSenderInterface* rtp_video_sender) {
RTC_DCHECK_RUN_ON(&sequence_checker_);
std::vector<std::unique_ptr<RtpVideoSenderInterface>>::iterator it =
video_rtp_senders_.end();
for (it = video_rtp_senders_.begin(); it != video_rtp_senders_.end(); ++it) {
if (it->get() == rtp_video_sender) {
break;
}
}
RTC_DCHECK(it != video_rtp_senders_.end());
video_rtp_senders_.erase(it);
}
void RtpTransportControllerSend::RegisterSendingRtpStream(
RtpRtcpInterface& rtp_module) {
RTC_DCHECK_RUN_ON(&sequence_checker_);
// Allow pacer to send packets using this module.
packet_router_.AddSendRtpModule(&rtp_module,
/*remb_candidate=*/true);
pacer_.SetAllowProbeWithoutMediaPacket(
bwe_settings_.allow_probe_without_media &&
packet_router_.SupportsRtxPayloadPadding());
}
void RtpTransportControllerSend::DeRegisterSendingRtpStream(
RtpRtcpInterface& rtp_module) {
RTC_DCHECK_RUN_ON(&sequence_checker_);
// Disabling media, remove from packet router map to reduce size and
// prevent any stray packets in the pacer from asynchronously arriving
// to a disabled module.
packet_router_.RemoveSendRtpModule(&rtp_module);
// Clear the pacer queue of any packets pertaining to this module.
pacer_.RemovePacketsForSsrc(rtp_module.SSRC());
if (rtp_module.RtxSsrc().has_value()) {
pacer_.RemovePacketsForSsrc(*rtp_module.RtxSsrc());
}
if (rtp_module.FlexfecSsrc().has_value()) {
pacer_.RemovePacketsForSsrc(*rtp_module.FlexfecSsrc());
}
pacer_.SetAllowProbeWithoutMediaPacket(
bwe_settings_.allow_probe_without_media &&
packet_router_.SupportsRtxPayloadPadding());
}
void RtpTransportControllerSend::UpdateControlState() {
absl::optional<TargetTransferRate> update = control_handler_->GetUpdate();
if (!update)
return;
retransmission_rate_limiter_.SetMaxRate(update->target_rate.bps());
// We won't create control_handler_ until we have an observers.
RTC_DCHECK(observer_ != nullptr);
observer_->OnTargetTransferRate(*update);
}
void RtpTransportControllerSend::UpdateCongestedState() {
if (auto update = GetCongestedStateUpdate()) {
is_congested_ = update.value();
pacer_.SetCongested(update.value());
}
}
absl::optional<bool> RtpTransportControllerSend::GetCongestedStateUpdate()
const {
bool congested = transport_feedback_adapter_.GetOutstandingData() >=
congestion_window_size_;
if (congested != is_congested_)
return congested;
return absl::nullopt;
}
PacketRouter* RtpTransportControllerSend::packet_router() {
return &packet_router_;
}
NetworkStateEstimateObserver*
RtpTransportControllerSend::network_state_estimate_observer() {
return this;
}
TransportFeedbackObserver*
RtpTransportControllerSend::transport_feedback_observer() {
return this;
}
RtpPacketSender* RtpTransportControllerSend::packet_sender() {
return &pacer_;
}
void RtpTransportControllerSend::SetAllocatedSendBitrateLimits(
BitrateAllocationLimits limits) {
RTC_DCHECK_RUN_ON(&sequence_checker_);
streams_config_.min_total_allocated_bitrate = limits.min_allocatable_rate;
streams_config_.max_padding_rate = limits.max_padding_rate;
streams_config_.max_total_allocated_bitrate = limits.max_allocatable_rate;
UpdateStreamsConfig();
}
void RtpTransportControllerSend::SetPacingFactor(float pacing_factor) {
RTC_DCHECK_RUN_ON(&sequence_checker_);
streams_config_.pacing_factor = pacing_factor;
UpdateStreamsConfig();
}
void RtpTransportControllerSend::SetQueueTimeLimit(int limit_ms) {
pacer_.SetQueueTimeLimit(TimeDelta::Millis(limit_ms));
}
StreamFeedbackProvider*
RtpTransportControllerSend::GetStreamFeedbackProvider() {
return &feedback_demuxer_;
}
void RtpTransportControllerSend::ReconfigureBandwidthEstimation(
const BandwidthEstimationSettings& settings) {
RTC_DCHECK_RUN_ON(&sequence_checker_);
bwe_settings_ = settings;
if (controller_) {
// Recreate the controller and handler.
control_handler_ = nullptr;
controller_ = nullptr;
// The BWE controller is created when/if the network is available.
MaybeCreateControllers();
if (controller_) {
BitrateConstraints constraints = bitrate_configurator_.GetConfig();
UpdateBitrateConstraints(constraints);
UpdateStreamsConfig();
UpdateNetworkAvailability();
}
}
pacer_.SetAllowProbeWithoutMediaPacket(
bwe_settings_.allow_probe_without_media &&
packet_router_.SupportsRtxPayloadPadding());
}
void RtpTransportControllerSend::RegisterTargetTransferRateObserver(
TargetTransferRateObserver* observer) {
RTC_DCHECK_RUN_ON(&sequence_checker_);
RTC_DCHECK(observer_ == nullptr);
observer_ = observer;
observer_->OnStartRateUpdate(*initial_config_.constraints.starting_rate);
MaybeCreateControllers();
}
bool RtpTransportControllerSend::IsRelevantRouteChange(
const rtc::NetworkRoute& old_route,
const rtc::NetworkRoute& new_route) const {
// TODO(bugs.webrtc.org/11438): Experiment with using more information/
// other conditions.
bool connected_changed = old_route.connected != new_route.connected;
bool route_ids_changed =
old_route.local.network_id() != new_route.local.network_id() ||
old_route.remote.network_id() != new_route.remote.network_id();
if (relay_bandwidth_cap_->IsFinite()) {
bool relaying_changed = IsRelayed(old_route) != IsRelayed(new_route);
return connected_changed || route_ids_changed || relaying_changed;
} else {
return connected_changed || route_ids_changed;
}
}
void RtpTransportControllerSend::OnNetworkRouteChanged(
absl::string_view transport_name,
const rtc::NetworkRoute& network_route) {
RTC_DCHECK_RUN_ON(&sequence_checker_);
// Check if the network route is connected.
if (!network_route.connected) {
// TODO(honghaiz): Perhaps handle this in SignalChannelNetworkState and
// consider merging these two methods.
return;
}
absl::optional<BitrateConstraints> relay_constraint_update =
ApplyOrLiftRelayCap(IsRelayed(network_route));
// Check whether the network route has changed on each transport.
auto result = network_routes_.insert(
// Explicit conversion of transport_name to std::string here is necessary
// to support some platforms that cannot yet deal with implicit
// conversion in these types of situations.
std::make_pair(std::string(transport_name), network_route));
auto kv = result.first;
bool inserted = result.second;
if (inserted || !(kv->second == network_route)) {
RTC_LOG(LS_INFO) << "Network route changed on transport " << transport_name
<< ": new_route = " << network_route.DebugString();
if (!inserted) {
RTC_LOG(LS_INFO) << "old_route = " << kv->second.DebugString();
}
}
if (inserted) {
if (relay_constraint_update.has_value()) {
UpdateBitrateConstraints(*relay_constraint_update);
}
transport_overhead_bytes_per_packet_ = network_route.packet_overhead;
// No need to reset BWE if this is the first time the network connects.
return;
}
const rtc::NetworkRoute old_route = kv->second;
kv->second = network_route;
// Check if enough conditions of the new/old route has changed
// to trigger resetting of bitrates (and a probe).
if (IsRelevantRouteChange(old_route, network_route)) {
BitrateConstraints bitrate_config = bitrate_configurator_.GetConfig();
RTC_LOG(LS_INFO) << "Reset bitrates to min: "
<< bitrate_config.min_bitrate_bps
<< " bps, start: " << bitrate_config.start_bitrate_bps
<< " bps, max: " << bitrate_config.max_bitrate_bps
<< " bps.";
RTC_DCHECK_GT(bitrate_config.start_bitrate_bps, 0);
env_.event_log().Log(std::make_unique<RtcEventRouteChange>(
network_route.connected, network_route.packet_overhead));
NetworkRouteChange msg;
msg.at_time = Timestamp::Millis(env_.clock().TimeInMilliseconds());
msg.constraints = ConvertConstraints(bitrate_config, &env_.clock());
transport_overhead_bytes_per_packet_ = network_route.packet_overhead;
if (reset_feedback_on_route_change_) {
transport_feedback_adapter_.SetNetworkRoute(network_route);
}
if (controller_) {
PostUpdates(controller_->OnNetworkRouteChange(msg));
} else {
UpdateInitialConstraints(msg.constraints);
}
is_congested_ = false;
pacer_.SetCongested(false);
}
}
void RtpTransportControllerSend::OnNetworkAvailability(bool network_available) {
RTC_DCHECK_RUN_ON(&sequence_checker_);
RTC_LOG(LS_VERBOSE) << "SignalNetworkState "
<< (network_available ? "Up" : "Down");
network_available_ = network_available;
if (network_available) {
pacer_.Resume();
} else {
pacer_.Pause();
}
is_congested_ = false;
pacer_.SetCongested(false);
if (!controller_) {
MaybeCreateControllers();
}
UpdateNetworkAvailability();
for (auto& rtp_sender : video_rtp_senders_) {
rtp_sender->OnNetworkAvailability(network_available);
}
}
NetworkLinkRtcpObserver* RtpTransportControllerSend::GetRtcpObserver() {
return this;
}
int64_t RtpTransportControllerSend::GetPacerQueuingDelayMs() const {
return pacer_.OldestPacketWaitTime().ms();
}
absl::optional<Timestamp> RtpTransportControllerSend::GetFirstPacketTime()
const {
return pacer_.FirstSentPacketTime();
}
void RtpTransportControllerSend::EnablePeriodicAlrProbing(bool enable) {
RTC_DCHECK_RUN_ON(&sequence_checker_);
streams_config_.requests_alr_probing = enable;
UpdateStreamsConfig();
}
void RtpTransportControllerSend::OnSentPacket(
const rtc::SentPacket& sent_packet) {
// Normally called on the network thread!
// TODO(crbug.com/1373439): Clarify other thread contexts calling in,
// and simplify task posting logic when the combined network/worker project
// launches.
if (TaskQueueBase::Current() != task_queue_) {
task_queue_->PostTask(SafeTask(safety_.flag(), [this, sent_packet]() {
RTC_DCHECK_RUN_ON(&sequence_checker_);
ProcessSentPacket(sent_packet);
}));
return;
}
RTC_DCHECK_RUN_ON(&sequence_checker_);
ProcessSentPacket(sent_packet);
}
void RtpTransportControllerSend::ProcessSentPacket(
const rtc::SentPacket& sent_packet) {
RTC_DCHECK_RUN_ON(&sequence_checker_);
absl::optional<SentPacket> packet_msg =
transport_feedback_adapter_.ProcessSentPacket(sent_packet);
if (!packet_msg)
return;
auto congestion_update = GetCongestedStateUpdate();
NetworkControlUpdate control_update;
if (controller_)
control_update = controller_->OnSentPacket(*packet_msg);
if (!congestion_update && !control_update.has_updates())
return;
ProcessSentPacketUpdates(std::move(control_update));
}
// RTC_RUN_ON(task_queue_)
void RtpTransportControllerSend::ProcessSentPacketUpdates(
NetworkControlUpdate updates) {
RTC_DCHECK_RUN_ON(&sequence_checker_);
// Only update outstanding data if:
// 1. Packet feedback is used.
// 2. The packet has not yet received an acknowledgement.
// 3. It is not a retransmission of an earlier packet.
UpdateCongestedState();
if (controller_) {
PostUpdates(std::move(updates));
}
}
void RtpTransportControllerSend::OnReceivedPacket(
const ReceivedPacket& packet_msg) {
RTC_DCHECK_RUN_ON(&sequence_checker_);
if (controller_)
PostUpdates(controller_->OnReceivedPacket(packet_msg));
}
void RtpTransportControllerSend::UpdateBitrateConstraints(
const BitrateConstraints& updated) {
RTC_DCHECK_RUN_ON(&sequence_checker_);
TargetRateConstraints msg = ConvertConstraints(updated, &env_.clock());
if (controller_) {
PostUpdates(controller_->OnTargetRateConstraints(msg));
} else {
UpdateInitialConstraints(msg);
}
}
void RtpTransportControllerSend::SetSdpBitrateParameters(
const BitrateConstraints& constraints) {
RTC_DCHECK_RUN_ON(&sequence_checker_);
absl::optional<BitrateConstraints> updated =
bitrate_configurator_.UpdateWithSdpParameters(constraints);
if (updated.has_value()) {
UpdateBitrateConstraints(*updated);
} else {
RTC_LOG(LS_VERBOSE)
<< "WebRTC.RtpTransportControllerSend.SetSdpBitrateParameters: "
"nothing to update";
}
}
void RtpTransportControllerSend::SetClientBitratePreferences(
const BitrateSettings& preferences) {
RTC_DCHECK_RUN_ON(&sequence_checker_);
absl::optional<BitrateConstraints> updated =
bitrate_configurator_.UpdateWithClientPreferences(preferences);
if (updated.has_value()) {
UpdateBitrateConstraints(*updated);
} else {
RTC_LOG(LS_VERBOSE)
<< "WebRTC.RtpTransportControllerSend.SetClientBitratePreferences: "
"nothing to update";
}
}
absl::optional<BitrateConstraints>
RtpTransportControllerSend::ApplyOrLiftRelayCap(bool is_relayed) {
DataRate cap = is_relayed ? relay_bandwidth_cap_ : DataRate::PlusInfinity();
return bitrate_configurator_.UpdateWithRelayCap(cap);
}
void RtpTransportControllerSend::OnTransportOverheadChanged(
size_t transport_overhead_bytes_per_packet) {
RTC_DCHECK_RUN_ON(&sequence_checker_);
if (transport_overhead_bytes_per_packet >= kMaxOverheadBytes) {
RTC_LOG(LS_ERROR) << "Transport overhead exceeds " << kMaxOverheadBytes;
return;
}
pacer_.SetTransportOverhead(
DataSize::Bytes(transport_overhead_bytes_per_packet));
// TODO(holmer): Call AudioRtpSenders when they have been moved to
// RtpTransportControllerSend.
for (auto& rtp_video_sender : video_rtp_senders_) {
rtp_video_sender->OnTransportOverheadChanged(
transport_overhead_bytes_per_packet);
}
}
void RtpTransportControllerSend::AccountForAudioPacketsInPacedSender(
bool account_for_audio) {
pacer_.SetAccountForAudioPackets(account_for_audio);
}
void RtpTransportControllerSend::IncludeOverheadInPacedSender() {
pacer_.SetIncludeOverhead();
}
void RtpTransportControllerSend::EnsureStarted() {
RTC_DCHECK_RUN_ON(&sequence_checker_);
if (!pacer_started_) {
pacer_started_ = true;
pacer_.EnsureStarted();
}
}
void RtpTransportControllerSend::OnReceiverEstimatedMaxBitrate(
Timestamp receive_time,
DataRate bitrate) {
RTC_DCHECK_RUN_ON(&sequence_checker_);
RemoteBitrateReport msg;
msg.receive_time = receive_time;
msg.bandwidth = bitrate;
if (controller_)
PostUpdates(controller_->OnRemoteBitrateReport(msg));
}
void RtpTransportControllerSend::OnRttUpdate(Timestamp receive_time,
TimeDelta rtt) {
RTC_DCHECK_RUN_ON(&sequence_checker_);
RoundTripTimeUpdate report;
report.receive_time = receive_time;
report.round_trip_time = rtt.RoundTo(TimeDelta::Millis(1));
report.smoothed = false;
if (controller_ && !report.round_trip_time.IsZero())
PostUpdates(controller_->OnRoundTripTimeUpdate(report));
}
void RtpTransportControllerSend::OnAddPacket(
const RtpPacketSendInfo& packet_info) {
RTC_DCHECK_RUN_ON(&sequence_checker_);
Timestamp creation_time =
Timestamp::Millis(env_.clock().TimeInMilliseconds());
feedback_demuxer_.AddPacket(packet_info);
transport_feedback_adapter_.AddPacket(
packet_info, transport_overhead_bytes_per_packet_, creation_time);
}
void RtpTransportControllerSend::OnTransportFeedback(
Timestamp receive_time,
const rtcp::TransportFeedback& feedback) {
RTC_DCHECK_RUN_ON(&sequence_checker_);
feedback_demuxer_.OnTransportFeedback(feedback);
absl::optional<TransportPacketsFeedback> feedback_msg =
transport_feedback_adapter_.ProcessTransportFeedback(feedback,
receive_time);
if (feedback_msg) {
if (controller_)
PostUpdates(controller_->OnTransportPacketsFeedback(*feedback_msg));
// Only update outstanding data if any packet is first time acked.
UpdateCongestedState();
}
}
void RtpTransportControllerSend::OnRemoteNetworkEstimate(
NetworkStateEstimate estimate) {
RTC_DCHECK_RUN_ON(&sequence_checker_);
env_.event_log().Log(std::make_unique<RtcEventRemoteEstimate>(
estimate.link_capacity_lower, estimate.link_capacity_upper));
estimate.update_time = Timestamp::Millis(env_.clock().TimeInMilliseconds());
if (controller_)
PostUpdates(controller_->OnNetworkStateEstimate(estimate));
}
void RtpTransportControllerSend::MaybeCreateControllers() {
RTC_DCHECK(!controller_);
RTC_DCHECK(!control_handler_);
if (!network_available_ || !observer_)
return;
control_handler_ = std::make_unique<CongestionControlHandler>();
initial_config_.constraints.at_time =
Timestamp::Millis(env_.clock().TimeInMilliseconds());
initial_config_.stream_based_config = streams_config_;
// TODO(srte): Use fallback controller if no feedback is available.
if (controller_factory_override_) {
RTC_LOG(LS_INFO) << "Creating overridden congestion controller";
controller_ = controller_factory_override_->Create(initial_config_);
process_interval_ = controller_factory_override_->GetProcessInterval();
} else {
RTC_LOG(LS_INFO) << "Creating fallback congestion controller";
controller_ = controller_factory_fallback_->Create(initial_config_);
process_interval_ = controller_factory_fallback_->GetProcessInterval();
}
UpdateControllerWithTimeInterval();
StartProcessPeriodicTasks();
}
void RtpTransportControllerSend::UpdateNetworkAvailability() {
if (!controller_) {
return;
}
NetworkAvailability msg;
msg.at_time = Timestamp::Millis(env_.clock().TimeInMilliseconds());
msg.network_available = network_available_;
control_handler_->SetNetworkAvailability(network_available_);
PostUpdates(controller_->OnNetworkAvailability(msg));
UpdateControlState();
}
void RtpTransportControllerSend::UpdateInitialConstraints(
TargetRateConstraints new_contraints) {
if (!new_contraints.starting_rate)
new_contraints.starting_rate = initial_config_.constraints.starting_rate;
RTC_DCHECK(new_contraints.starting_rate);
initial_config_.constraints = new_contraints;
}
void RtpTransportControllerSend::StartProcessPeriodicTasks() {
RTC_DCHECK_RUN_ON(&sequence_checker_);
if (!pacer_queue_update_task_.Running()) {
pacer_queue_update_task_ = RepeatingTaskHandle::DelayedStart(
task_queue_, kPacerQueueUpdateInterval, [this]() {
RTC_DCHECK_RUN_ON(&sequence_checker_);
TimeDelta expected_queue_time = pacer_.ExpectedQueueTime();
control_handler_->SetPacerQueue(expected_queue_time);
UpdateControlState();
return kPacerQueueUpdateInterval;
});
}
controller_task_.Stop();
if (process_interval_.IsFinite()) {
controller_task_ = RepeatingTaskHandle::DelayedStart(
task_queue_, process_interval_, [this]() {
RTC_DCHECK_RUN_ON(&sequence_checker_);
UpdateControllerWithTimeInterval();
return process_interval_;
});
}
}
void RtpTransportControllerSend::UpdateControllerWithTimeInterval() {
RTC_DCHECK(controller_);
ProcessInterval msg;
msg.at_time = Timestamp::Millis(env_.clock().TimeInMilliseconds());
if (add_pacing_to_cwin_)
msg.pacer_queue = pacer_.QueueSizeData();
PostUpdates(controller_->OnProcessInterval(msg));
}
void RtpTransportControllerSend::UpdateStreamsConfig() {
streams_config_.at_time =
Timestamp::Millis(env_.clock().TimeInMilliseconds());
if (controller_)
PostUpdates(controller_->OnStreamsConfig(streams_config_));
}
void RtpTransportControllerSend::PostUpdates(NetworkControlUpdate update) {
if (update.congestion_window) {
congestion_window_size_ = *update.congestion_window;
UpdateCongestedState();
}
if (update.pacer_config) {
pacer_.SetPacingRates(update.pacer_config->data_rate(),
update.pacer_config->pad_rate());
}
if (!update.probe_cluster_configs.empty()) {
pacer_.CreateProbeClusters(std::move(update.probe_cluster_configs));
}
if (update.target_rate) {
control_handler_->SetTargetRate(*update.target_rate);
UpdateControlState();
}
}
void RtpTransportControllerSend::OnReport(
Timestamp receive_time,
rtc::ArrayView<const ReportBlockData> report_blocks) {
RTC_DCHECK_RUN_ON(&sequence_checker_);
if (report_blocks.empty())
return;
int total_packets_lost_delta = 0;
int total_packets_delta = 0;
// Compute the packet loss from all report blocks.
for (const ReportBlockData& report_block : report_blocks) {
auto [it, inserted] =
last_report_blocks_.try_emplace(report_block.source_ssrc());
LossReport& last_loss_report = it->second;
if (!inserted) {
total_packets_delta += report_block.extended_highest_sequence_number() -
last_loss_report.extended_highest_sequence_number;
total_packets_lost_delta +=
report_block.cumulative_lost() - last_loss_report.cumulative_lost;
}
last_loss_report.extended_highest_sequence_number =
report_block.extended_highest_sequence_number();
last_loss_report.cumulative_lost = report_block.cumulative_lost();
}
// Can only compute delta if there has been previous blocks to compare to. If
// not, total_packets_delta will be unchanged and there's nothing more to do.
if (!total_packets_delta)
return;
int packets_received_delta = total_packets_delta - total_packets_lost_delta;
// To detect lost packets, at least one packet has to be received. This check
// is needed to avoid bandwith detection update in
// VideoSendStreamTest.SuspendBelowMinBitrate
if (packets_received_delta < 1)
return;
TransportLossReport msg;
msg.packets_lost_delta = total_packets_lost_delta;
msg.packets_received_delta = packets_received_delta;
msg.receive_time = receive_time;
msg.start_time = last_report_block_time_;
msg.end_time = receive_time;
if (controller_)
PostUpdates(controller_->OnTransportLossReport(msg));
last_report_block_time_ = receive_time;
}
} // namespace webrtc

View file

@ -0,0 +1,218 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef CALL_RTP_TRANSPORT_CONTROLLER_SEND_H_
#define CALL_RTP_TRANSPORT_CONTROLLER_SEND_H_
#include <atomic>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "absl/strings/string_view.h"
#include "api/environment/environment.h"
#include "api/network_state_predictor.h"
#include "api/sequence_checker.h"
#include "api/task_queue/task_queue_base.h"
#include "api/task_queue/task_queue_factory.h"
#include "api/transport/network_control.h"
#include "api/units/data_rate.h"
#include "call/rtp_bitrate_configurator.h"
#include "call/rtp_transport_config.h"
#include "call/rtp_transport_controller_send_interface.h"
#include "call/rtp_video_sender.h"
#include "modules/congestion_controller/rtp/control_handler.h"
#include "modules/congestion_controller/rtp/transport_feedback_adapter.h"
#include "modules/congestion_controller/rtp/transport_feedback_demuxer.h"
#include "modules/pacing/packet_router.h"
#include "modules/pacing/rtp_packet_pacer.h"
#include "modules/pacing/task_queue_paced_sender.h"
#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "rtc_base/network_route.h"
#include "rtc_base/race_checker.h"
#include "rtc_base/task_queue.h"
#include "rtc_base/task_utils/repeating_task.h"
namespace webrtc {
class FrameEncryptorInterface;
class RtcEventLog;
class RtpTransportControllerSend final
: public RtpTransportControllerSendInterface,
public NetworkLinkRtcpObserver,
public TransportFeedbackObserver,
public NetworkStateEstimateObserver {
public:
explicit RtpTransportControllerSend(const RtpTransportConfig& config);
~RtpTransportControllerSend() override;
RtpTransportControllerSend(const RtpTransportControllerSend&) = delete;
RtpTransportControllerSend& operator=(const RtpTransportControllerSend&) =
delete;
// TODO(tommi): Change to std::unique_ptr<>.
RtpVideoSenderInterface* CreateRtpVideoSender(
const std::map<uint32_t, RtpState>& suspended_ssrcs,
const std::map<uint32_t, RtpPayloadState>&
states, // move states into RtpTransportControllerSend
const RtpConfig& rtp_config,
int rtcp_report_interval_ms,
Transport* send_transport,
const RtpSenderObservers& observers,
RtcEventLog* event_log,
std::unique_ptr<FecController> fec_controller,
const RtpSenderFrameEncryptionConfig& frame_encryption_config,
rtc::scoped_refptr<FrameTransformerInterface> frame_transformer) override;
void DestroyRtpVideoSender(
RtpVideoSenderInterface* rtp_video_sender) override;
// Implements RtpTransportControllerSendInterface
void RegisterSendingRtpStream(RtpRtcpInterface& rtp_module) override;
void DeRegisterSendingRtpStream(RtpRtcpInterface& rtp_module) override;
PacketRouter* packet_router() override;
NetworkStateEstimateObserver* network_state_estimate_observer() override;
TransportFeedbackObserver* transport_feedback_observer() override;
RtpPacketSender* packet_sender() override;
void SetAllocatedSendBitrateLimits(BitrateAllocationLimits limits) override;
void ReconfigureBandwidthEstimation(
const BandwidthEstimationSettings& settings) override;
void SetPacingFactor(float pacing_factor) override;
void SetQueueTimeLimit(int limit_ms) override;
StreamFeedbackProvider* GetStreamFeedbackProvider() override;
void RegisterTargetTransferRateObserver(
TargetTransferRateObserver* observer) override;
void OnNetworkRouteChanged(absl::string_view transport_name,
const rtc::NetworkRoute& network_route) override;
void OnNetworkAvailability(bool network_available) override;
NetworkLinkRtcpObserver* GetRtcpObserver() override;
int64_t GetPacerQueuingDelayMs() const override;
absl::optional<Timestamp> GetFirstPacketTime() const override;
void EnablePeriodicAlrProbing(bool enable) override;
void OnSentPacket(const rtc::SentPacket& sent_packet) override;
void OnReceivedPacket(const ReceivedPacket& packet_msg) override;
void SetSdpBitrateParameters(const BitrateConstraints& constraints) override;
void SetClientBitratePreferences(const BitrateSettings& preferences) override;
void OnTransportOverheadChanged(
size_t transport_overhead_bytes_per_packet) override;
void AccountForAudioPacketsInPacedSender(bool account_for_audio) override;
void IncludeOverheadInPacedSender() override;
void EnsureStarted() override;
// Implements NetworkLinkRtcpObserver interface
void OnReceiverEstimatedMaxBitrate(Timestamp receive_time,
DataRate bitrate) override;
void OnReport(Timestamp receive_time,
rtc::ArrayView<const ReportBlockData> report_blocks) override;
void OnRttUpdate(Timestamp receive_time, TimeDelta rtt) override;
void OnTransportFeedback(Timestamp receive_time,
const rtcp::TransportFeedback& feedback) override;
// Implements TransportFeedbackObserver interface
void OnAddPacket(const RtpPacketSendInfo& packet_info) override;
// Implements NetworkStateEstimateObserver interface
void OnRemoteNetworkEstimate(NetworkStateEstimate estimate) override;
private:
void MaybeCreateControllers() RTC_RUN_ON(sequence_checker_);
void UpdateNetworkAvailability() RTC_RUN_ON(sequence_checker_);
void UpdateInitialConstraints(TargetRateConstraints new_contraints)
RTC_RUN_ON(sequence_checker_);
void StartProcessPeriodicTasks() RTC_RUN_ON(sequence_checker_);
void UpdateControllerWithTimeInterval() RTC_RUN_ON(sequence_checker_);
absl::optional<BitrateConstraints> ApplyOrLiftRelayCap(bool is_relayed);
bool IsRelevantRouteChange(const rtc::NetworkRoute& old_route,
const rtc::NetworkRoute& new_route) const;
void UpdateBitrateConstraints(const BitrateConstraints& updated);
void UpdateStreamsConfig() RTC_RUN_ON(sequence_checker_);
void PostUpdates(NetworkControlUpdate update) RTC_RUN_ON(sequence_checker_);
void UpdateControlState() RTC_RUN_ON(sequence_checker_);
void UpdateCongestedState() RTC_RUN_ON(sequence_checker_);
absl::optional<bool> GetCongestedStateUpdate() const
RTC_RUN_ON(sequence_checker_);
void ProcessSentPacket(const rtc::SentPacket& sent_packet)
RTC_RUN_ON(sequence_checker_);
void ProcessSentPacketUpdates(NetworkControlUpdate updates)
RTC_RUN_ON(sequence_checker_);
const Environment env_;
SequenceChecker sequence_checker_;
TaskQueueBase* task_queue_;
PacketRouter packet_router_;
std::vector<std::unique_ptr<RtpVideoSenderInterface>> video_rtp_senders_
RTC_GUARDED_BY(&sequence_checker_);
RtpBitrateConfigurator bitrate_configurator_;
std::map<std::string, rtc::NetworkRoute> network_routes_
RTC_GUARDED_BY(sequence_checker_);
BandwidthEstimationSettings bwe_settings_ RTC_GUARDED_BY(sequence_checker_);
bool pacer_started_ RTC_GUARDED_BY(sequence_checker_);
TaskQueuePacedSender pacer_;
TargetTransferRateObserver* observer_ RTC_GUARDED_BY(sequence_checker_);
TransportFeedbackDemuxer feedback_demuxer_;
TransportFeedbackAdapter transport_feedback_adapter_
RTC_GUARDED_BY(sequence_checker_);
NetworkControllerFactoryInterface* const controller_factory_override_
RTC_PT_GUARDED_BY(sequence_checker_);
const std::unique_ptr<NetworkControllerFactoryInterface>
controller_factory_fallback_ RTC_PT_GUARDED_BY(sequence_checker_);
std::unique_ptr<CongestionControlHandler> control_handler_
RTC_GUARDED_BY(sequence_checker_) RTC_PT_GUARDED_BY(sequence_checker_);
std::unique_ptr<NetworkControllerInterface> controller_
RTC_GUARDED_BY(sequence_checker_) RTC_PT_GUARDED_BY(sequence_checker_);
TimeDelta process_interval_ RTC_GUARDED_BY(sequence_checker_);
struct LossReport {
uint32_t extended_highest_sequence_number = 0;
int cumulative_lost = 0;
};
std::map<uint32_t, LossReport> last_report_blocks_
RTC_GUARDED_BY(sequence_checker_);
Timestamp last_report_block_time_ RTC_GUARDED_BY(sequence_checker_);
NetworkControllerConfig initial_config_ RTC_GUARDED_BY(sequence_checker_);
StreamsConfig streams_config_ RTC_GUARDED_BY(sequence_checker_);
const bool reset_feedback_on_route_change_;
const bool add_pacing_to_cwin_;
FieldTrialParameter<DataRate> relay_bandwidth_cap_;
size_t transport_overhead_bytes_per_packet_ RTC_GUARDED_BY(sequence_checker_);
bool network_available_ RTC_GUARDED_BY(sequence_checker_);
RepeatingTaskHandle pacer_queue_update_task_
RTC_GUARDED_BY(sequence_checker_);
RepeatingTaskHandle controller_task_ RTC_GUARDED_BY(sequence_checker_);
DataSize congestion_window_size_ RTC_GUARDED_BY(sequence_checker_);
bool is_congested_ RTC_GUARDED_BY(sequence_checker_);
// Protected by internal locks.
RateLimiter retransmission_rate_limiter_;
ScopedTaskSafety safety_;
};
} // namespace webrtc
#endif // CALL_RTP_TRANSPORT_CONTROLLER_SEND_H_

View file

@ -0,0 +1,32 @@
/*
* Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef CALL_RTP_TRANSPORT_CONTROLLER_SEND_FACTORY_H_
#define CALL_RTP_TRANSPORT_CONTROLLER_SEND_FACTORY_H_
#include <memory>
#include <utility>
#include "call/rtp_transport_controller_send.h"
#include "call/rtp_transport_controller_send_factory_interface.h"
namespace webrtc {
class RtpTransportControllerSendFactory
: public RtpTransportControllerSendFactoryInterface {
public:
std::unique_ptr<RtpTransportControllerSendInterface> Create(
const RtpTransportConfig& config) override {
return std::make_unique<RtpTransportControllerSend>(config);
}
virtual ~RtpTransportControllerSendFactory() {}
};
} // namespace webrtc
#endif // CALL_RTP_TRANSPORT_CONTROLLER_SEND_FACTORY_H_

View file

@ -0,0 +1,29 @@
/*
* Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef CALL_RTP_TRANSPORT_CONTROLLER_SEND_FACTORY_INTERFACE_H_
#define CALL_RTP_TRANSPORT_CONTROLLER_SEND_FACTORY_INTERFACE_H_
#include <memory>
#include "call/rtp_transport_config.h"
#include "call/rtp_transport_controller_send_interface.h"
namespace webrtc {
// A factory used for dependency injection on the send side of the transport
// controller.
class RtpTransportControllerSendFactoryInterface {
public:
virtual ~RtpTransportControllerSendFactoryInterface() = default;
virtual std::unique_ptr<RtpTransportControllerSendInterface> Create(
const RtpTransportConfig& config) = 0;
};
} // namespace webrtc
#endif // CALL_RTP_TRANSPORT_CONTROLLER_SEND_FACTORY_INTERFACE_H_

View file

@ -0,0 +1,171 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef CALL_RTP_TRANSPORT_CONTROLLER_SEND_INTERFACE_H_
#define CALL_RTP_TRANSPORT_CONTROLLER_SEND_INTERFACE_H_
#include <stddef.h>
#include <stdint.h>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "api/crypto/crypto_options.h"
#include "api/fec_controller.h"
#include "api/frame_transformer_interface.h"
#include "api/rtc_event_log/rtc_event_log.h"
#include "api/transport/bandwidth_estimation_settings.h"
#include "api/transport/bitrate_settings.h"
#include "api/units/timestamp.h"
#include "call/rtp_config.h"
#include "common_video/frame_counts.h"
#include "modules/rtp_rtcp/include/report_block_data.h"
#include "modules/rtp_rtcp/include/rtcp_statistics.h"
#include "modules/rtp_rtcp/include/rtp_packet_sender.h"
#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "modules/rtp_rtcp/source/rtp_packet_received.h"
namespace rtc {
struct SentPacket;
struct NetworkRoute;
class TaskQueue;
} // namespace rtc
namespace webrtc {
class FrameEncryptorInterface;
class TargetTransferRateObserver;
class Transport;
class PacketRouter;
class RtpVideoSenderInterface;
class RtpPacketSender;
class RtpRtcpInterface;
struct RtpSenderObservers {
RtcpRttStats* rtcp_rtt_stats;
RtcpIntraFrameObserver* intra_frame_callback;
RtcpLossNotificationObserver* rtcp_loss_notification_observer;
ReportBlockDataObserver* report_block_data_observer;
StreamDataCountersCallback* rtp_stats;
BitrateStatisticsObserver* bitrate_observer;
FrameCountObserver* frame_count_observer;
RtcpPacketTypeCounterObserver* rtcp_type_observer;
SendPacketObserver* send_packet_observer;
};
struct RtpSenderFrameEncryptionConfig {
FrameEncryptorInterface* frame_encryptor = nullptr;
CryptoOptions crypto_options;
};
// An RtpTransportController should own everything related to the RTP
// transport to/from a remote endpoint. We should have separate
// interfaces for send and receive side, even if they are implemented
// by the same class. This is an ongoing refactoring project. At some
// point, this class should be promoted to a public api under
// webrtc/api/rtp/.
//
// For a start, this object is just a collection of the objects needed
// by the VideoSendStream constructor. The plan is to move ownership
// of all RTP-related objects here, and add methods to create per-ssrc
// objects which would then be passed to VideoSendStream. Eventually,
// direct accessors like packet_router() should be removed.
//
// This should also have a reference to the underlying
// webrtc::Transport(s). Currently, webrtc::Transport is implemented by
// WebRtcVideoChannel and WebRtcVoiceMediaChannel, and owned by
// WebrtcSession. Video and audio always uses different transport
// objects, even in the common case where they are bundled over the
// same underlying transport.
//
// Extracting the logic of the webrtc::Transport from BaseChannel and
// subclasses into a separate class seems to be a prerequesite for
// moving the transport here.
class RtpTransportControllerSendInterface {
public:
virtual ~RtpTransportControllerSendInterface() {}
virtual PacketRouter* packet_router() = 0;
virtual RtpVideoSenderInterface* CreateRtpVideoSender(
const std::map<uint32_t, RtpState>& suspended_ssrcs,
// TODO(holmer): Move states into RtpTransportControllerSend.
const std::map<uint32_t, RtpPayloadState>& states,
const RtpConfig& rtp_config,
int rtcp_report_interval_ms,
Transport* send_transport,
const RtpSenderObservers& observers,
RtcEventLog* event_log,
std::unique_ptr<FecController> fec_controller,
const RtpSenderFrameEncryptionConfig& frame_encryption_config,
rtc::scoped_refptr<FrameTransformerInterface> frame_transformer) = 0;
virtual void DestroyRtpVideoSender(
RtpVideoSenderInterface* rtp_video_sender) = 0;
// Register a specific RTP stream as sending. This means that the pacer and
// packet router can send packets using this RTP stream.
virtual void RegisterSendingRtpStream(RtpRtcpInterface& rtp_module) = 0;
// Pacer and PacketRouter stop using this RTP stream.
virtual void DeRegisterSendingRtpStream(RtpRtcpInterface& rtp_module) = 0;
virtual NetworkStateEstimateObserver* network_state_estimate_observer() = 0;
virtual TransportFeedbackObserver* transport_feedback_observer() = 0;
virtual RtpPacketSender* packet_sender() = 0;
// SetAllocatedSendBitrateLimits sets bitrates limits imposed by send codec
// settings.
virtual void SetAllocatedSendBitrateLimits(
BitrateAllocationLimits limits) = 0;
virtual void ReconfigureBandwidthEstimation(
const BandwidthEstimationSettings& settings) = 0;
virtual void SetPacingFactor(float pacing_factor) = 0;
virtual void SetQueueTimeLimit(int limit_ms) = 0;
virtual StreamFeedbackProvider* GetStreamFeedbackProvider() = 0;
virtual void RegisterTargetTransferRateObserver(
TargetTransferRateObserver* observer) = 0;
virtual void OnNetworkRouteChanged(
absl::string_view transport_name,
const rtc::NetworkRoute& network_route) = 0;
virtual void OnNetworkAvailability(bool network_available) = 0;
virtual NetworkLinkRtcpObserver* GetRtcpObserver() = 0;
virtual int64_t GetPacerQueuingDelayMs() const = 0;
virtual absl::optional<Timestamp> GetFirstPacketTime() const = 0;
virtual void EnablePeriodicAlrProbing(bool enable) = 0;
// Called when a packet has been sent.
// The call should arrive on the network thread, but may not in all cases
// (some tests don't adhere to this). Implementations today should not block
// the calling thread or make assumptions about the thread context.
virtual void OnSentPacket(const rtc::SentPacket& sent_packet) = 0;
virtual void OnReceivedPacket(const ReceivedPacket& received_packet) = 0;
virtual void SetSdpBitrateParameters(
const BitrateConstraints& constraints) = 0;
virtual void SetClientBitratePreferences(
const BitrateSettings& preferences) = 0;
virtual void OnTransportOverheadChanged(
size_t transport_overhead_per_packet) = 0;
virtual void AccountForAudioPacketsInPacedSender(bool account_for_audio) = 0;
virtual void IncludeOverheadInPacedSender() = 0;
virtual void EnsureStarted() = 0;
};
} // namespace webrtc
#endif // CALL_RTP_TRANSPORT_CONTROLLER_SEND_INTERFACE_H_

View file

@ -0,0 +1,986 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "call/rtp_video_sender.h"
#include <algorithm>
#include <memory>
#include <string>
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/strings/match.h"
#include "absl/strings/string_view.h"
#include "api/array_view.h"
#include "api/task_queue/task_queue_factory.h"
#include "api/transport/field_trial_based_config.h"
#include "api/units/time_delta.h"
#include "api/video_codecs/video_codec.h"
#include "call/rtp_transport_controller_send_interface.h"
#include "modules/pacing/packet_router.h"
#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "modules/rtp_rtcp/source/rtp_rtcp_impl2.h"
#include "modules/rtp_rtcp/source/rtp_sender.h"
#include "modules/video_coding/include/video_codec_interface.h"
#include "rtc_base/checks.h"
#include "rtc_base/logging.h"
#include "rtc_base/task_queue.h"
#include "rtc_base/trace_event.h"
namespace webrtc {
namespace webrtc_internal_rtp_video_sender {
RtpStreamSender::RtpStreamSender(
std::unique_ptr<ModuleRtpRtcpImpl2> rtp_rtcp,
std::unique_ptr<RTPSenderVideo> sender_video,
std::unique_ptr<VideoFecGenerator> fec_generator)
: rtp_rtcp(std::move(rtp_rtcp)),
sender_video(std::move(sender_video)),
fec_generator(std::move(fec_generator)) {}
RtpStreamSender::~RtpStreamSender() = default;
} // namespace webrtc_internal_rtp_video_sender
namespace {
static const int kMinSendSidePacketHistorySize = 600;
// We don't do MTU discovery, so assume that we have the standard ethernet MTU.
static const size_t kPathMTU = 1500;
using webrtc_internal_rtp_video_sender::RtpStreamSender;
bool PayloadTypeSupportsSkippingFecPackets(absl::string_view payload_name,
const FieldTrialsView& trials) {
const VideoCodecType codecType =
PayloadStringToCodecType(std::string(payload_name));
if (codecType == kVideoCodecVP8 || codecType == kVideoCodecVP9) {
return true;
}
if (codecType == kVideoCodecGeneric &&
absl::StartsWith(trials.Lookup("WebRTC-GenericPictureId"), "Enabled")) {
return true;
}
return false;
}
bool ShouldDisableRedAndUlpfec(bool flexfec_enabled,
const RtpConfig& rtp_config,
const FieldTrialsView& trials) {
// Consistency of NACK and RED+ULPFEC parameters is checked in this function.
const bool nack_enabled = rtp_config.nack.rtp_history_ms > 0;
// Shorthands.
auto IsRedEnabled = [&]() { return rtp_config.ulpfec.red_payload_type >= 0; };
auto IsUlpfecEnabled = [&]() {
return rtp_config.ulpfec.ulpfec_payload_type >= 0;
};
bool should_disable_red_and_ulpfec = false;
if (absl::StartsWith(trials.Lookup("WebRTC-DisableUlpFecExperiment"),
"Enabled")) {
RTC_LOG(LS_INFO) << "Experiment to disable sending ULPFEC is enabled.";
should_disable_red_and_ulpfec = true;
}
// If enabled, FlexFEC takes priority over RED+ULPFEC.
if (flexfec_enabled) {
if (IsUlpfecEnabled()) {
RTC_LOG(LS_INFO)
<< "Both FlexFEC and ULPFEC are configured. Disabling ULPFEC.";
}
should_disable_red_and_ulpfec = true;
}
// Payload types without picture ID cannot determine that a stream is complete
// without retransmitting FEC, so using ULPFEC + NACK for H.264 (for instance)
// is a waste of bandwidth since FEC packets still have to be transmitted.
// Note that this is not the case with FlexFEC.
if (nack_enabled && IsUlpfecEnabled() &&
!PayloadTypeSupportsSkippingFecPackets(rtp_config.payload_name, trials)) {
RTC_LOG(LS_WARNING)
<< "Transmitting payload type without picture ID using "
"NACK+ULPFEC is a waste of bandwidth since ULPFEC packets "
"also have to be retransmitted. Disabling ULPFEC.";
should_disable_red_and_ulpfec = true;
}
// Verify payload types.
if (IsUlpfecEnabled() ^ IsRedEnabled()) {
RTC_LOG(LS_WARNING)
<< "Only RED or only ULPFEC enabled, but not both. Disabling both.";
should_disable_red_and_ulpfec = true;
}
return should_disable_red_and_ulpfec;
}
// TODO(brandtr): Update this function when we support multistream protection.
std::unique_ptr<VideoFecGenerator> MaybeCreateFecGenerator(
Clock* clock,
const RtpConfig& rtp,
const std::map<uint32_t, RtpState>& suspended_ssrcs,
int simulcast_index,
const FieldTrialsView& trials) {
// If flexfec is configured that takes priority.
if (rtp.flexfec.payload_type >= 0) {
RTC_DCHECK_GE(rtp.flexfec.payload_type, 0);
RTC_DCHECK_LE(rtp.flexfec.payload_type, 127);
if (rtp.flexfec.ssrc == 0) {
RTC_LOG(LS_WARNING) << "FlexFEC is enabled, but no FlexFEC SSRC given. "
"Therefore disabling FlexFEC.";
return nullptr;
}
if (rtp.flexfec.protected_media_ssrcs.empty()) {
RTC_LOG(LS_WARNING)
<< "FlexFEC is enabled, but no protected media SSRC given. "
"Therefore disabling FlexFEC.";
return nullptr;
}
if (rtp.flexfec.protected_media_ssrcs.size() > 1) {
RTC_LOG(LS_WARNING)
<< "The supplied FlexfecConfig contained multiple protected "
"media streams, but our implementation currently only "
"supports protecting a single media stream. "
"To avoid confusion, disabling FlexFEC completely.";
return nullptr;
}
if (absl::c_find(rtp.flexfec.protected_media_ssrcs,
rtp.ssrcs[simulcast_index]) ==
rtp.flexfec.protected_media_ssrcs.end()) {
// Media SSRC not among flexfec protected SSRCs.
return nullptr;
}
const RtpState* rtp_state = nullptr;
auto it = suspended_ssrcs.find(rtp.flexfec.ssrc);
if (it != suspended_ssrcs.end()) {
rtp_state = &it->second;
}
RTC_DCHECK_EQ(1U, rtp.flexfec.protected_media_ssrcs.size());
return std::make_unique<FlexfecSender>(
rtp.flexfec.payload_type, rtp.flexfec.ssrc,
rtp.flexfec.protected_media_ssrcs[0], rtp.mid, rtp.extensions,
RTPSender::FecExtensionSizes(), rtp_state, clock);
} else if (rtp.ulpfec.red_payload_type >= 0 &&
rtp.ulpfec.ulpfec_payload_type >= 0 &&
!ShouldDisableRedAndUlpfec(/*flexfec_enabled=*/false, rtp,
trials)) {
// Flexfec not configured, but ulpfec is and is not disabled.
return std::make_unique<UlpfecGenerator>(
rtp.ulpfec.red_payload_type, rtp.ulpfec.ulpfec_payload_type, clock);
}
// Not a single FEC is given.
return nullptr;
}
std::vector<RtpStreamSender> CreateRtpStreamSenders(
Clock* clock,
const RtpConfig& rtp_config,
const RtpSenderObservers& observers,
int rtcp_report_interval_ms,
Transport* send_transport,
RtpTransportControllerSendInterface* transport,
const std::map<uint32_t, RtpState>& suspended_ssrcs,
RtcEventLog* event_log,
RateLimiter* retransmission_rate_limiter,
FrameEncryptorInterface* frame_encryptor,
const CryptoOptions& crypto_options,
rtc::scoped_refptr<FrameTransformerInterface> frame_transformer,
const FieldTrialsView& trials,
TaskQueueFactory* task_queue_factory) {
RTC_DCHECK_GT(rtp_config.ssrcs.size(), 0);
RTC_DCHECK(task_queue_factory);
RtpRtcpInterface::Configuration configuration;
configuration.clock = clock;
configuration.audio = false;
configuration.receiver_only = false;
configuration.outgoing_transport = send_transport;
configuration.intra_frame_callback = observers.intra_frame_callback;
configuration.rtcp_loss_notification_observer =
observers.rtcp_loss_notification_observer;
configuration.network_link_rtcp_observer = transport->GetRtcpObserver();
configuration.network_state_estimate_observer =
transport->network_state_estimate_observer();
configuration.transport_feedback_callback =
transport->transport_feedback_observer();
configuration.rtt_stats = observers.rtcp_rtt_stats;
configuration.rtcp_packet_type_counter_observer =
observers.rtcp_type_observer;
configuration.report_block_data_observer =
observers.report_block_data_observer;
configuration.paced_sender = transport->packet_sender();
configuration.send_bitrate_observer = observers.bitrate_observer;
configuration.send_packet_observer = observers.send_packet_observer;
configuration.event_log = event_log;
if (trials.IsDisabled("WebRTC-DisableRtxRateLimiter")) {
configuration.retransmission_rate_limiter = retransmission_rate_limiter;
}
configuration.rtp_stats_callback = observers.rtp_stats;
configuration.frame_encryptor = frame_encryptor;
configuration.require_frame_encryption =
crypto_options.sframe.require_frame_encryption;
configuration.extmap_allow_mixed = rtp_config.extmap_allow_mixed;
configuration.rtcp_report_interval_ms = rtcp_report_interval_ms;
configuration.field_trials = &trials;
configuration.enable_send_packet_batching =
rtp_config.enable_send_packet_batching;
std::vector<RtpStreamSender> rtp_streams;
RTC_DCHECK(rtp_config.rtx.ssrcs.empty() ||
rtp_config.rtx.ssrcs.size() == rtp_config.ssrcs.size());
// Some streams could have been disabled, but the rids are still there.
// This will occur when simulcast has been disabled for a codec (e.g. VP9)
RTC_DCHECK(rtp_config.rids.empty() ||
rtp_config.rids.size() >= rtp_config.ssrcs.size());
for (size_t i = 0; i < rtp_config.ssrcs.size(); ++i) {
RTPSenderVideo::Config video_config;
configuration.local_media_ssrc = rtp_config.ssrcs[i];
std::unique_ptr<VideoFecGenerator> fec_generator =
MaybeCreateFecGenerator(clock, rtp_config, suspended_ssrcs, i, trials);
configuration.fec_generator = fec_generator.get();
configuration.rtx_send_ssrc =
rtp_config.GetRtxSsrcAssociatedWithMediaSsrc(rtp_config.ssrcs[i]);
RTC_DCHECK_EQ(configuration.rtx_send_ssrc.has_value(),
!rtp_config.rtx.ssrcs.empty());
configuration.rid = (i < rtp_config.rids.size()) ? rtp_config.rids[i] : "";
configuration.need_rtp_packet_infos = rtp_config.lntf.enabled;
std::unique_ptr<ModuleRtpRtcpImpl2> rtp_rtcp(
ModuleRtpRtcpImpl2::Create(configuration));
rtp_rtcp->SetSendingStatus(false);
rtp_rtcp->SetSendingMediaStatus(false);
rtp_rtcp->SetRTCPStatus(RtcpMode::kCompound);
// Set NACK.
rtp_rtcp->SetStorePacketsStatus(true, kMinSendSidePacketHistorySize);
video_config.clock = configuration.clock;
video_config.rtp_sender = rtp_rtcp->RtpSender();
video_config.frame_encryptor = frame_encryptor;
video_config.require_frame_encryption =
crypto_options.sframe.require_frame_encryption;
video_config.field_trials = &trials;
video_config.enable_retransmit_all_layers =
!video_config.field_trials->IsDisabled(
"WebRTC-Video-EnableRetransmitAllLayers");
const bool using_flexfec =
fec_generator &&
fec_generator->GetFecType() == VideoFecGenerator::FecType::kFlexFec;
const bool should_disable_red_and_ulpfec =
ShouldDisableRedAndUlpfec(using_flexfec, rtp_config, trials);
if (!should_disable_red_and_ulpfec &&
rtp_config.ulpfec.red_payload_type != -1) {
video_config.red_payload_type = rtp_config.ulpfec.red_payload_type;
}
if (fec_generator) {
video_config.fec_type = fec_generator->GetFecType();
video_config.fec_overhead_bytes = fec_generator->MaxPacketOverhead();
}
video_config.frame_transformer = frame_transformer;
video_config.task_queue_factory = task_queue_factory;
auto sender_video = std::make_unique<RTPSenderVideo>(video_config);
rtp_streams.emplace_back(std::move(rtp_rtcp), std::move(sender_video),
std::move(fec_generator));
}
return rtp_streams;
}
absl::optional<VideoCodecType> GetVideoCodecType(const RtpConfig& config) {
if (config.raw_payload) {
return absl::nullopt;
}
return PayloadStringToCodecType(config.payload_name);
}
bool TransportSeqNumExtensionConfigured(const RtpConfig& config) {
return absl::c_any_of(config.extensions, [](const RtpExtension& ext) {
return ext.uri == RtpExtension::kTransportSequenceNumberUri;
});
}
// Returns true when some coded video sequence can be decoded starting with
// this frame without requiring any previous frames.
// e.g. it is the same as a key frame when spatial scalability is not used.
// When spatial scalability is used, then it is true for layer frames of
// a key frame without inter-layer dependencies.
bool IsFirstFrameOfACodedVideoSequence(
const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info) {
if (encoded_image._frameType != VideoFrameType::kVideoFrameKey) {
return false;
}
if (codec_specific_info != nullptr) {
if (codec_specific_info->generic_frame_info.has_value()) {
// This function is used before
// `codec_specific_info->generic_frame_info->frame_diffs` are calculated,
// so need to use a more complicated way to check for presence of the
// dependencies.
return absl::c_none_of(
codec_specific_info->generic_frame_info->encoder_buffers,
[](const CodecBufferUsage& buffer) { return buffer.referenced; });
}
if (codec_specific_info->codecType == VideoCodecType::kVideoCodecVP8 ||
codec_specific_info->codecType == VideoCodecType::kVideoCodecH264 ||
codec_specific_info->codecType == VideoCodecType::kVideoCodecGeneric) {
// These codecs do not support intra picture dependencies, so a frame
// marked as a key frame should be a key frame.
return true;
}
}
// Without depenedencies described in generic format do an educated guess.
// It might be wrong for VP9 with spatial layer 0 skipped or higher spatial
// layer not depending on the spatial layer 0. This corner case is unimportant
// for current usage of this helper function.
// Use <= to accept both 0 (i.e. the first) and nullopt (i.e. the only).
return encoded_image.SpatialIndex() <= 0;
}
} // namespace
RtpVideoSender::RtpVideoSender(
Clock* clock,
const std::map<uint32_t, RtpState>& suspended_ssrcs,
const std::map<uint32_t, RtpPayloadState>& states,
const RtpConfig& rtp_config,
int rtcp_report_interval_ms,
Transport* send_transport,
const RtpSenderObservers& observers,
RtpTransportControllerSendInterface* transport,
RtcEventLog* event_log,
RateLimiter* retransmission_limiter,
std::unique_ptr<FecController> fec_controller,
FrameEncryptorInterface* frame_encryptor,
const CryptoOptions& crypto_options,
rtc::scoped_refptr<FrameTransformerInterface> frame_transformer,
const FieldTrialsView& field_trials,
TaskQueueFactory* task_queue_factory)
: field_trials_(field_trials),
use_frame_rate_for_overhead_(absl::StartsWith(
field_trials_.Lookup("WebRTC-Video-UseFrameRateForOverhead"),
"Enabled")),
has_packet_feedback_(TransportSeqNumExtensionConfigured(rtp_config)),
active_(false),
fec_controller_(std::move(fec_controller)),
fec_allowed_(true),
rtp_streams_(CreateRtpStreamSenders(clock,
rtp_config,
observers,
rtcp_report_interval_ms,
send_transport,
transport,
suspended_ssrcs,
event_log,
retransmission_limiter,
frame_encryptor,
crypto_options,
std::move(frame_transformer),
field_trials_,
task_queue_factory)),
rtp_config_(rtp_config),
codec_type_(GetVideoCodecType(rtp_config)),
transport_(transport),
transport_overhead_bytes_per_packet_(0),
encoder_target_rate_bps_(0),
frame_counts_(rtp_config.ssrcs.size()),
frame_count_observer_(observers.frame_count_observer) {
transport_checker_.Detach();
RTC_DCHECK_EQ(rtp_config_.ssrcs.size(), rtp_streams_.size());
if (has_packet_feedback_)
transport_->IncludeOverheadInPacedSender();
// SSRCs are assumed to be sorted in the same order as `rtp_modules`.
for (uint32_t ssrc : rtp_config_.ssrcs) {
// Restore state if it previously existed.
const RtpPayloadState* state = nullptr;
auto it = states.find(ssrc);
if (it != states.end()) {
state = &it->second;
shared_frame_id_ = std::max(shared_frame_id_, state->shared_frame_id);
}
params_.push_back(RtpPayloadParams(ssrc, state, field_trials_));
}
// RTP/RTCP initialization.
for (size_t i = 0; i < rtp_config_.extensions.size(); ++i) {
const std::string& extension = rtp_config_.extensions[i].uri;
int id = rtp_config_.extensions[i].id;
RTC_DCHECK(RtpExtension::IsSupportedForVideo(extension));
for (const RtpStreamSender& stream : rtp_streams_) {
stream.rtp_rtcp->RegisterRtpHeaderExtension(extension, id);
}
}
ConfigureSsrcs(suspended_ssrcs);
if (!rtp_config_.mid.empty()) {
for (const RtpStreamSender& stream : rtp_streams_) {
stream.rtp_rtcp->SetMid(rtp_config_.mid);
}
}
bool fec_enabled = false;
for (const RtpStreamSender& stream : rtp_streams_) {
// Simulcast has one module for each layer. Set the CNAME on all modules.
stream.rtp_rtcp->SetCNAME(rtp_config_.c_name.c_str());
stream.rtp_rtcp->SetMaxRtpPacketSize(rtp_config_.max_packet_size);
stream.rtp_rtcp->RegisterSendPayloadFrequency(rtp_config_.payload_type,
kVideoPayloadTypeFrequency);
if (stream.fec_generator != nullptr) {
fec_enabled = true;
}
}
// Currently, both ULPFEC and FlexFEC use the same FEC rate calculation logic,
// so enable that logic if either of those FEC schemes are enabled.
fec_controller_->SetProtectionMethod(fec_enabled, NackEnabled());
fec_controller_->SetProtectionCallback(this);
// Construction happens on the worker thread (see Call::CreateVideoSendStream)
// but subseqeuent calls to the RTP state will happen on one of two threads:
// * The pacer thread for actually sending packets.
// * The transport thread when tearing down and quering GetRtpState().
// Detach thread checkers.
for (const RtpStreamSender& stream : rtp_streams_) {
stream.rtp_rtcp->OnPacketSendingThreadSwitched();
}
}
RtpVideoSender::~RtpVideoSender() {
RTC_DCHECK_RUN_ON(&transport_checker_);
SetActiveModulesLocked(
/*sending=*/false);
}
void RtpVideoSender::SetSending(bool enabled) {
RTC_DCHECK_RUN_ON(&transport_checker_);
MutexLock lock(&mutex_);
if (enabled == active_) {
return;
}
SetActiveModulesLocked(/*sending=*/enabled);
}
void RtpVideoSender::SetActiveModulesLocked(bool sending) {
RTC_DCHECK_RUN_ON(&transport_checker_);
if (active_ == sending) {
return;
}
active_ = sending;
for (size_t i = 0; i < rtp_streams_.size(); ++i) {
RtpRtcpInterface& rtp_module = *rtp_streams_[i].rtp_rtcp;
rtp_module.SetSendingStatus(sending);
rtp_module.SetSendingMediaStatus(sending);
if (sending) {
transport_->RegisterSendingRtpStream(rtp_module);
} else {
transport_->DeRegisterSendingRtpStream(rtp_module);
}
}
auto* feedback_provider = transport_->GetStreamFeedbackProvider();
if (!sending) {
feedback_provider->DeRegisterStreamFeedbackObserver(this);
} else {
feedback_provider->RegisterStreamFeedbackObserver(rtp_config_.ssrcs, this);
}
}
bool RtpVideoSender::IsActive() {
RTC_DCHECK_RUN_ON(&transport_checker_);
MutexLock lock(&mutex_);
return IsActiveLocked();
}
bool RtpVideoSender::IsActiveLocked() {
return active_ && !rtp_streams_.empty();
}
EncodedImageCallback::Result RtpVideoSender::OnEncodedImage(
const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info) {
fec_controller_->UpdateWithEncodedData(encoded_image.size(),
encoded_image._frameType);
MutexLock lock(&mutex_);
RTC_DCHECK(!rtp_streams_.empty());
if (!active_)
return Result(Result::ERROR_SEND_FAILED);
shared_frame_id_++;
size_t simulcast_index = encoded_image.SimulcastIndex().value_or(0);
RTC_DCHECK_LT(simulcast_index, rtp_streams_.size());
uint32_t rtp_timestamp =
encoded_image.RtpTimestamp() +
rtp_streams_[simulcast_index].rtp_rtcp->StartTimestamp();
// RTCPSender has it's own copy of the timestamp offset, added in
// RTCPSender::BuildSR, hence we must not add the in the offset for this call.
// TODO(nisse): Delete RTCPSender:timestamp_offset_, and see if we can confine
// knowledge of the offset to a single place.
if (!rtp_streams_[simulcast_index].rtp_rtcp->OnSendingRtpFrame(
encoded_image.RtpTimestamp(), encoded_image.capture_time_ms_,
rtp_config_.payload_type,
encoded_image._frameType == VideoFrameType::kVideoFrameKey)) {
// The payload router could be active but this module isn't sending.
return Result(Result::ERROR_SEND_FAILED);
}
TimeDelta expected_retransmission_time = TimeDelta::PlusInfinity();
if (encoded_image.RetransmissionAllowed()) {
expected_retransmission_time =
rtp_streams_[simulcast_index].rtp_rtcp->ExpectedRetransmissionTime();
}
if (IsFirstFrameOfACodedVideoSequence(encoded_image, codec_specific_info)) {
// In order to use the dependency descriptor RTP header extension:
// - Pass along any `FrameDependencyStructure` templates produced by the
// encoder adapter.
// - If none were produced the `RtpPayloadParams::*ToGeneric` for the
// particular codec have simulated a dependency structure, so provide a
// minimal set of templates.
// - Otherwise, don't pass along any templates at all which will disable
// the generation of a dependency descriptor.
RTPSenderVideo& sender_video = *rtp_streams_[simulcast_index].sender_video;
if (codec_specific_info && codec_specific_info->template_structure) {
sender_video.SetVideoStructure(&*codec_specific_info->template_structure);
} else if (absl::optional<FrameDependencyStructure> structure =
params_[simulcast_index].GenericStructure(
codec_specific_info)) {
sender_video.SetVideoStructure(&*structure);
} else {
sender_video.SetVideoStructure(nullptr);
}
}
bool send_result =
rtp_streams_[simulcast_index].sender_video->SendEncodedImage(
rtp_config_.payload_type, codec_type_, rtp_timestamp, encoded_image,
params_[simulcast_index].GetRtpVideoHeader(
encoded_image, codec_specific_info, shared_frame_id_),
expected_retransmission_time);
if (frame_count_observer_) {
FrameCounts& counts = frame_counts_[simulcast_index];
if (encoded_image._frameType == VideoFrameType::kVideoFrameKey) {
++counts.key_frames;
} else if (encoded_image._frameType == VideoFrameType::kVideoFrameDelta) {
++counts.delta_frames;
} else {
RTC_DCHECK(encoded_image._frameType == VideoFrameType::kEmptyFrame);
}
frame_count_observer_->FrameCountUpdated(
counts, rtp_config_.ssrcs[simulcast_index]);
}
if (!send_result)
return Result(Result::ERROR_SEND_FAILED);
return Result(Result::OK, rtp_timestamp);
}
void RtpVideoSender::OnBitrateAllocationUpdated(
const VideoBitrateAllocation& bitrate) {
RTC_DCHECK_RUN_ON(&transport_checker_);
MutexLock lock(&mutex_);
if (IsActiveLocked()) {
if (rtp_streams_.size() == 1) {
// If spatial scalability is enabled, it is covered by a single stream.
rtp_streams_[0].rtp_rtcp->SetVideoBitrateAllocation(bitrate);
} else {
std::vector<absl::optional<VideoBitrateAllocation>> layer_bitrates =
bitrate.GetSimulcastAllocations();
// Simulcast is in use, split the VideoBitrateAllocation into one struct
// per rtp stream, moving over the temporal layer allocation.
for (size_t i = 0; i < rtp_streams_.size(); ++i) {
// The next spatial layer could be used if the current one is
// inactive.
if (layer_bitrates[i]) {
rtp_streams_[i].rtp_rtcp->SetVideoBitrateAllocation(
*layer_bitrates[i]);
} else {
// Signal a 0 bitrate on a simulcast stream.
rtp_streams_[i].rtp_rtcp->SetVideoBitrateAllocation(
VideoBitrateAllocation());
}
}
}
}
}
void RtpVideoSender::OnVideoLayersAllocationUpdated(
const VideoLayersAllocation& allocation) {
MutexLock lock(&mutex_);
if (IsActiveLocked()) {
for (size_t i = 0; i < rtp_streams_.size(); ++i) {
VideoLayersAllocation stream_allocation = allocation;
stream_allocation.rtp_stream_index = i;
rtp_streams_[i].sender_video->SetVideoLayersAllocation(
std::move(stream_allocation));
// Only send video frames on the rtp module if the encoder is configured
// to send. This is to prevent stray frames to be sent after an encoder
// has been reconfigured.
rtp_streams_[i].rtp_rtcp->SetSendingMediaStatus(
absl::c_any_of(allocation.active_spatial_layers,
[&i](const VideoLayersAllocation::SpatialLayer layer) {
return layer.rtp_stream_index == static_cast<int>(i);
}));
}
}
}
bool RtpVideoSender::NackEnabled() const {
const bool nack_enabled = rtp_config_.nack.rtp_history_ms > 0;
return nack_enabled;
}
DataRate RtpVideoSender::GetPostEncodeOverhead() const {
DataRate post_encode_overhead = DataRate::Zero();
for (size_t i = 0; i < rtp_streams_.size(); ++i) {
if (rtp_streams_[i].rtp_rtcp->SendingMedia()) {
post_encode_overhead +=
rtp_streams_[i].sender_video->PostEncodeOverhead();
}
}
return post_encode_overhead;
}
void RtpVideoSender::DeliverRtcp(const uint8_t* packet, size_t length) {
// Runs on a network thread.
for (const RtpStreamSender& stream : rtp_streams_)
stream.rtp_rtcp->IncomingRtcpPacket(rtc::MakeArrayView(packet, length));
}
void RtpVideoSender::ConfigureSsrcs(
const std::map<uint32_t, RtpState>& suspended_ssrcs) {
// Configure regular SSRCs.
RTC_CHECK(ssrc_to_rtp_module_.empty());
for (size_t i = 0; i < rtp_config_.ssrcs.size(); ++i) {
uint32_t ssrc = rtp_config_.ssrcs[i];
RtpRtcpInterface* const rtp_rtcp = rtp_streams_[i].rtp_rtcp.get();
// Restore RTP state if previous existed.
auto it = suspended_ssrcs.find(ssrc);
if (it != suspended_ssrcs.end())
rtp_rtcp->SetRtpState(it->second);
ssrc_to_rtp_module_[ssrc] = rtp_rtcp;
}
// Set up RTX if available.
if (rtp_config_.rtx.ssrcs.empty())
return;
RTC_DCHECK_EQ(rtp_config_.rtx.ssrcs.size(), rtp_config_.ssrcs.size());
for (size_t i = 0; i < rtp_config_.rtx.ssrcs.size(); ++i) {
uint32_t ssrc = rtp_config_.rtx.ssrcs[i];
RtpRtcpInterface* const rtp_rtcp = rtp_streams_[i].rtp_rtcp.get();
auto it = suspended_ssrcs.find(ssrc);
if (it != suspended_ssrcs.end())
rtp_rtcp->SetRtxState(it->second);
}
// Configure RTX payload types.
RTC_DCHECK_GE(rtp_config_.rtx.payload_type, 0);
for (const RtpStreamSender& stream : rtp_streams_) {
stream.rtp_rtcp->SetRtxSendPayloadType(rtp_config_.rtx.payload_type,
rtp_config_.payload_type);
stream.rtp_rtcp->SetRtxSendStatus(kRtxRetransmitted |
kRtxRedundantPayloads);
}
if (rtp_config_.ulpfec.red_payload_type != -1 &&
rtp_config_.ulpfec.red_rtx_payload_type != -1) {
for (const RtpStreamSender& stream : rtp_streams_) {
stream.rtp_rtcp->SetRtxSendPayloadType(
rtp_config_.ulpfec.red_rtx_payload_type,
rtp_config_.ulpfec.red_payload_type);
}
}
}
void RtpVideoSender::OnNetworkAvailability(bool network_available) {
for (const RtpStreamSender& stream : rtp_streams_) {
stream.rtp_rtcp->SetRTCPStatus(network_available ? rtp_config_.rtcp_mode
: RtcpMode::kOff);
}
}
std::map<uint32_t, RtpState> RtpVideoSender::GetRtpStates() const {
std::map<uint32_t, RtpState> rtp_states;
for (size_t i = 0; i < rtp_config_.ssrcs.size(); ++i) {
uint32_t ssrc = rtp_config_.ssrcs[i];
RTC_DCHECK_EQ(ssrc, rtp_streams_[i].rtp_rtcp->SSRC());
rtp_states[ssrc] = rtp_streams_[i].rtp_rtcp->GetRtpState();
// Only happens during shutdown, when RTP module is already inactive,
// so OK to call fec generator here.
if (rtp_streams_[i].fec_generator) {
absl::optional<RtpState> fec_state =
rtp_streams_[i].fec_generator->GetRtpState();
if (fec_state) {
uint32_t ssrc = rtp_config_.flexfec.ssrc;
rtp_states[ssrc] = *fec_state;
}
}
}
for (size_t i = 0; i < rtp_config_.rtx.ssrcs.size(); ++i) {
uint32_t ssrc = rtp_config_.rtx.ssrcs[i];
rtp_states[ssrc] = rtp_streams_[i].rtp_rtcp->GetRtxState();
}
return rtp_states;
}
std::map<uint32_t, RtpPayloadState> RtpVideoSender::GetRtpPayloadStates()
const {
MutexLock lock(&mutex_);
std::map<uint32_t, RtpPayloadState> payload_states;
for (const auto& param : params_) {
payload_states[param.ssrc()] = param.state();
payload_states[param.ssrc()].shared_frame_id = shared_frame_id_;
}
return payload_states;
}
void RtpVideoSender::OnTransportOverheadChanged(
size_t transport_overhead_bytes_per_packet) {
MutexLock lock(&mutex_);
transport_overhead_bytes_per_packet_ = transport_overhead_bytes_per_packet;
size_t max_rtp_packet_size =
std::min(rtp_config_.max_packet_size,
kPathMTU - transport_overhead_bytes_per_packet_);
for (const RtpStreamSender& stream : rtp_streams_) {
stream.rtp_rtcp->SetMaxRtpPacketSize(max_rtp_packet_size);
}
}
void RtpVideoSender::OnBitrateUpdated(BitrateAllocationUpdate update,
int framerate) {
// Substract overhead from bitrate.
MutexLock lock(&mutex_);
size_t num_active_streams = 0;
size_t overhead_bytes_per_packet = 0;
for (const auto& stream : rtp_streams_) {
if (stream.rtp_rtcp->SendingMedia()) {
overhead_bytes_per_packet += stream.rtp_rtcp->ExpectedPerPacketOverhead();
++num_active_streams;
}
}
if (num_active_streams > 1) {
overhead_bytes_per_packet /= num_active_streams;
}
DataSize packet_overhead = DataSize::Bytes(
overhead_bytes_per_packet + transport_overhead_bytes_per_packet_);
DataSize max_total_packet_size = DataSize::Bytes(
rtp_config_.max_packet_size + transport_overhead_bytes_per_packet_);
uint32_t payload_bitrate_bps = update.target_bitrate.bps();
if (has_packet_feedback_) {
DataRate overhead_rate =
CalculateOverheadRate(update.target_bitrate, max_total_packet_size,
packet_overhead, Frequency::Hertz(framerate));
// TODO(srte): We probably should not accept 0 payload bitrate here.
payload_bitrate_bps = rtc::saturated_cast<uint32_t>(payload_bitrate_bps -
overhead_rate.bps());
}
// Get the encoder target rate. It is the estimated network rate -
// protection overhead.
// TODO(srte): We should multiply with 255 here.
encoder_target_rate_bps_ = fec_controller_->UpdateFecRates(
payload_bitrate_bps, framerate,
rtc::saturated_cast<uint8_t>(update.packet_loss_ratio * 256),
loss_mask_vector_, update.round_trip_time.ms());
if (!fec_allowed_) {
encoder_target_rate_bps_ = payload_bitrate_bps;
// fec_controller_->UpdateFecRates() was still called so as to allow
// `fec_controller_` to update whatever internal state it might have,
// since `fec_allowed_` may be toggled back on at any moment.
}
// Subtract post encode overhead from the encoder target. If target rate
// is really low, cap the overhead at 50%. This also avoids the case where
// `encoder_target_rate_bps_` is 0 due to encoder pause event while the
// packetization rate is positive since packets are still flowing.
uint32_t post_encode_overhead_bps = std::min(
GetPostEncodeOverhead().bps<uint32_t>(), encoder_target_rate_bps_ / 2);
encoder_target_rate_bps_ -= post_encode_overhead_bps;
loss_mask_vector_.clear();
uint32_t encoder_overhead_rate_bps = 0;
if (has_packet_feedback_) {
// TODO(srte): The packet size should probably be the same as in the
// CalculateOverheadRate call above (just max_total_packet_size), it doesn't
// make sense to use different packet rates for different overhead
// calculations.
DataRate encoder_overhead_rate = CalculateOverheadRate(
DataRate::BitsPerSec(encoder_target_rate_bps_),
max_total_packet_size - DataSize::Bytes(overhead_bytes_per_packet),
packet_overhead, Frequency::Hertz(framerate));
encoder_overhead_rate_bps = std::min(
encoder_overhead_rate.bps<uint32_t>(),
update.target_bitrate.bps<uint32_t>() - encoder_target_rate_bps_);
}
const uint32_t media_rate = encoder_target_rate_bps_ +
encoder_overhead_rate_bps +
post_encode_overhead_bps;
RTC_DCHECK_GE(update.target_bitrate, DataRate::BitsPerSec(media_rate));
// `protection_bitrate_bps_` includes overhead.
protection_bitrate_bps_ = update.target_bitrate.bps() - media_rate;
}
uint32_t RtpVideoSender::GetPayloadBitrateBps() const {
return encoder_target_rate_bps_;
}
uint32_t RtpVideoSender::GetProtectionBitrateBps() const {
return protection_bitrate_bps_;
}
std::vector<RtpSequenceNumberMap::Info> RtpVideoSender::GetSentRtpPacketInfos(
uint32_t ssrc,
rtc::ArrayView<const uint16_t> sequence_numbers) const {
for (const auto& rtp_stream : rtp_streams_) {
if (ssrc == rtp_stream.rtp_rtcp->SSRC()) {
return rtp_stream.rtp_rtcp->GetSentRtpPacketInfos(sequence_numbers);
}
}
return std::vector<RtpSequenceNumberMap::Info>();
}
int RtpVideoSender::ProtectionRequest(const FecProtectionParams* delta_params,
const FecProtectionParams* key_params,
uint32_t* sent_video_rate_bps,
uint32_t* sent_nack_rate_bps,
uint32_t* sent_fec_rate_bps) {
*sent_video_rate_bps = 0;
*sent_nack_rate_bps = 0;
*sent_fec_rate_bps = 0;
for (const RtpStreamSender& stream : rtp_streams_) {
stream.rtp_rtcp->SetFecProtectionParams(*delta_params, *key_params);
auto send_bitrate = stream.rtp_rtcp->GetSendRates();
*sent_video_rate_bps += send_bitrate[RtpPacketMediaType::kVideo].bps();
*sent_fec_rate_bps +=
send_bitrate[RtpPacketMediaType::kForwardErrorCorrection].bps();
*sent_nack_rate_bps +=
send_bitrate[RtpPacketMediaType::kRetransmission].bps();
}
return 0;
}
void RtpVideoSender::SetRetransmissionMode(int retransmission_mode) {
MutexLock lock(&mutex_);
for (const RtpStreamSender& stream : rtp_streams_) {
stream.sender_video->SetRetransmissionSetting(retransmission_mode);
}
}
void RtpVideoSender::SetFecAllowed(bool fec_allowed) {
MutexLock lock(&mutex_);
fec_allowed_ = fec_allowed;
}
void RtpVideoSender::OnPacketFeedbackVector(
std::vector<StreamPacketInfo> packet_feedback_vector) {
if (fec_controller_->UseLossVectorMask()) {
MutexLock lock(&mutex_);
for (const StreamPacketInfo& packet : packet_feedback_vector) {
loss_mask_vector_.push_back(!packet.received);
}
}
// Map from SSRC to all acked packets for that RTP module.
std::map<uint32_t, std::vector<uint16_t>> acked_packets_per_ssrc;
for (const StreamPacketInfo& packet : packet_feedback_vector) {
if (packet.received && packet.ssrc) {
acked_packets_per_ssrc[*packet.ssrc].push_back(
packet.rtp_sequence_number);
}
}
// Map from SSRC to vector of RTP sequence numbers that are indicated as
// lost by feedback, without being trailed by any received packets.
std::map<uint32_t, std::vector<uint16_t>> early_loss_detected_per_ssrc;
for (const StreamPacketInfo& packet : packet_feedback_vector) {
// Only include new media packets, not retransmissions/padding/fec.
if (!packet.received && packet.ssrc && !packet.is_retransmission) {
// Last known lost packet, might not be detectable as lost by remote
// jitter buffer.
early_loss_detected_per_ssrc[*packet.ssrc].push_back(
packet.rtp_sequence_number);
} else {
// Packet received, so any loss prior to this is already detectable.
early_loss_detected_per_ssrc.erase(*packet.ssrc);
}
}
for (const auto& kv : early_loss_detected_per_ssrc) {
const uint32_t ssrc = kv.first;
auto it = ssrc_to_rtp_module_.find(ssrc);
RTC_CHECK(it != ssrc_to_rtp_module_.end());
RTPSender* rtp_sender = it->second->RtpSender();
for (uint16_t sequence_number : kv.second) {
rtp_sender->ReSendPacket(sequence_number);
}
}
for (const auto& kv : acked_packets_per_ssrc) {
const uint32_t ssrc = kv.first;
auto it = ssrc_to_rtp_module_.find(ssrc);
if (it == ssrc_to_rtp_module_.end()) {
// No media, likely FEC or padding. Ignore since there's no RTP history to
// clean up anyway.
continue;
}
rtc::ArrayView<const uint16_t> rtp_sequence_numbers(kv.second);
it->second->OnPacketsAcknowledged(rtp_sequence_numbers);
}
}
void RtpVideoSender::SetEncodingData(size_t width,
size_t height,
size_t num_temporal_layers) {
fec_controller_->SetEncodingData(width, height, num_temporal_layers,
rtp_config_.max_packet_size);
}
DataRate RtpVideoSender::CalculateOverheadRate(DataRate data_rate,
DataSize packet_size,
DataSize overhead_per_packet,
Frequency framerate) const {
Frequency packet_rate = data_rate / packet_size;
if (use_frame_rate_for_overhead_) {
framerate = std::max(framerate, Frequency::Hertz(1));
DataSize frame_size = data_rate / framerate;
int packets_per_frame = ceil(frame_size / packet_size);
packet_rate = packets_per_frame * framerate;
}
return packet_rate.RoundUpTo(Frequency::Hertz(1)) * overhead_per_packet;
}
} // namespace webrtc

View file

@ -0,0 +1,218 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef CALL_RTP_VIDEO_SENDER_H_
#define CALL_RTP_VIDEO_SENDER_H_
#include <map>
#include <memory>
#include <unordered_set>
#include <vector>
#include "absl/types/optional.h"
#include "api/array_view.h"
#include "api/call/transport.h"
#include "api/fec_controller.h"
#include "api/fec_controller_override.h"
#include "api/field_trials_view.h"
#include "api/rtc_event_log/rtc_event_log.h"
#include "api/sequence_checker.h"
#include "api/task_queue/task_queue_base.h"
#include "api/task_queue/task_queue_factory.h"
#include "api/video_codecs/video_encoder.h"
#include "call/rtp_config.h"
#include "call/rtp_payload_params.h"
#include "call/rtp_transport_controller_send_interface.h"
#include "call/rtp_video_sender_interface.h"
#include "modules/rtp_rtcp/include/flexfec_sender.h"
#include "modules/rtp_rtcp/source/rtp_rtcp_impl2.h"
#include "modules/rtp_rtcp/source/rtp_sender.h"
#include "modules/rtp_rtcp/source/rtp_sender_video.h"
#include "modules/rtp_rtcp/source/rtp_sequence_number_map.h"
#include "modules/rtp_rtcp/source/rtp_video_header.h"
#include "rtc_base/rate_limiter.h"
#include "rtc_base/synchronization/mutex.h"
#include "rtc_base/thread_annotations.h"
namespace webrtc {
class FrameEncryptorInterface;
class RtpTransportControllerSendInterface;
namespace webrtc_internal_rtp_video_sender {
// RTP state for a single simulcast stream. Internal to the implementation of
// RtpVideoSender.
struct RtpStreamSender {
RtpStreamSender(std::unique_ptr<ModuleRtpRtcpImpl2> rtp_rtcp,
std::unique_ptr<RTPSenderVideo> sender_video,
std::unique_ptr<VideoFecGenerator> fec_generator);
~RtpStreamSender();
RtpStreamSender(RtpStreamSender&&) = default;
RtpStreamSender& operator=(RtpStreamSender&&) = default;
// Note: Needs pointer stability.
std::unique_ptr<ModuleRtpRtcpImpl2> rtp_rtcp;
std::unique_ptr<RTPSenderVideo> sender_video;
std::unique_ptr<VideoFecGenerator> fec_generator;
};
} // namespace webrtc_internal_rtp_video_sender
// RtpVideoSender routes outgoing data to the correct sending RTP module, based
// on the simulcast layer in RTPVideoHeader.
class RtpVideoSender : public RtpVideoSenderInterface,
public VCMProtectionCallback,
public StreamFeedbackObserver {
public:
// Rtp modules are assumed to be sorted in simulcast index order.
RtpVideoSender(
Clock* clock,
const std::map<uint32_t, RtpState>& suspended_ssrcs,
const std::map<uint32_t, RtpPayloadState>& states,
const RtpConfig& rtp_config,
int rtcp_report_interval_ms,
Transport* send_transport,
const RtpSenderObservers& observers,
RtpTransportControllerSendInterface* transport,
RtcEventLog* event_log,
RateLimiter* retransmission_limiter, // move inside RtpTransport
std::unique_ptr<FecController> fec_controller,
FrameEncryptorInterface* frame_encryptor,
const CryptoOptions& crypto_options, // move inside RtpTransport
rtc::scoped_refptr<FrameTransformerInterface> frame_transformer,
const FieldTrialsView& field_trials,
TaskQueueFactory* task_queue_factory);
~RtpVideoSender() override;
RtpVideoSender(const RtpVideoSender&) = delete;
RtpVideoSender& operator=(const RtpVideoSender&) = delete;
void SetSending(bool enabled) RTC_LOCKS_EXCLUDED(mutex_) override;
bool IsActive() RTC_LOCKS_EXCLUDED(mutex_) override;
void OnNetworkAvailability(bool network_available)
RTC_LOCKS_EXCLUDED(mutex_) override;
std::map<uint32_t, RtpState> GetRtpStates() const
RTC_LOCKS_EXCLUDED(mutex_) override;
std::map<uint32_t, RtpPayloadState> GetRtpPayloadStates() const
RTC_LOCKS_EXCLUDED(mutex_) override;
void DeliverRtcp(const uint8_t* packet, size_t length)
RTC_LOCKS_EXCLUDED(mutex_) override;
// Implements webrtc::VCMProtectionCallback.
int ProtectionRequest(const FecProtectionParams* delta_params,
const FecProtectionParams* key_params,
uint32_t* sent_video_rate_bps,
uint32_t* sent_nack_rate_bps,
uint32_t* sent_fec_rate_bps)
RTC_LOCKS_EXCLUDED(mutex_) override;
// 'retransmission_mode' is either a value of enum RetransmissionMode, or
// computed with bitwise operators on values of enum RetransmissionMode.
void SetRetransmissionMode(int retransmission_mode)
RTC_LOCKS_EXCLUDED(mutex_) override;
// Implements FecControllerOverride.
void SetFecAllowed(bool fec_allowed) RTC_LOCKS_EXCLUDED(mutex_) override;
// Implements EncodedImageCallback.
// Returns 0 if the packet was routed / sent, -1 otherwise.
EncodedImageCallback::Result OnEncodedImage(
const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info)
RTC_LOCKS_EXCLUDED(mutex_) override;
void OnBitrateAllocationUpdated(const VideoBitrateAllocation& bitrate)
RTC_LOCKS_EXCLUDED(mutex_) override;
void OnVideoLayersAllocationUpdated(
const VideoLayersAllocation& layers) override;
void OnTransportOverheadChanged(size_t transport_overhead_bytes_per_packet)
RTC_LOCKS_EXCLUDED(mutex_) override;
void OnBitrateUpdated(BitrateAllocationUpdate update, int framerate)
RTC_LOCKS_EXCLUDED(mutex_) override;
uint32_t GetPayloadBitrateBps() const RTC_LOCKS_EXCLUDED(mutex_) override;
uint32_t GetProtectionBitrateBps() const RTC_LOCKS_EXCLUDED(mutex_) override;
void SetEncodingData(size_t width, size_t height, size_t num_temporal_layers)
RTC_LOCKS_EXCLUDED(mutex_) override;
std::vector<RtpSequenceNumberMap::Info> GetSentRtpPacketInfos(
uint32_t ssrc,
rtc::ArrayView<const uint16_t> sequence_numbers) const
RTC_LOCKS_EXCLUDED(mutex_) override;
// From StreamFeedbackObserver.
void OnPacketFeedbackVector(
std::vector<StreamPacketInfo> packet_feedback_vector)
RTC_LOCKS_EXCLUDED(mutex_) override;
private:
bool IsActiveLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
void SetActiveModulesLocked(bool sending)
RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
void UpdateModuleSendingState() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
void ConfigureProtection();
void ConfigureSsrcs(const std::map<uint32_t, RtpState>& suspended_ssrcs);
bool NackEnabled() const;
DataRate GetPostEncodeOverhead() const;
DataRate CalculateOverheadRate(DataRate data_rate,
DataSize packet_size,
DataSize overhead_per_packet,
Frequency framerate) const;
const FieldTrialsView& field_trials_;
const bool use_frame_rate_for_overhead_;
const bool has_packet_feedback_;
// Semantically equivalent to checking for `transport_->GetWorkerQueue()`
// but some tests need to be updated to call from the correct context.
RTC_NO_UNIQUE_ADDRESS SequenceChecker transport_checker_;
// TODO(bugs.webrtc.org/13517): Remove mutex_ once RtpVideoSender runs on the
// transport task queue.
mutable Mutex mutex_;
bool active_ RTC_GUARDED_BY(mutex_);
const std::unique_ptr<FecController> fec_controller_;
bool fec_allowed_ RTC_GUARDED_BY(mutex_);
// Rtp modules are assumed to be sorted in simulcast index order.
const std::vector<webrtc_internal_rtp_video_sender::RtpStreamSender>
rtp_streams_;
const RtpConfig rtp_config_;
const absl::optional<VideoCodecType> codec_type_;
RtpTransportControllerSendInterface* const transport_;
// When using the generic descriptor we want all simulcast streams to share
// one frame id space (so that the SFU can switch stream without having to
// rewrite the frame id), therefore `shared_frame_id` has to live in a place
// where we are aware of all the different streams.
int64_t shared_frame_id_ = 0;
std::vector<RtpPayloadParams> params_ RTC_GUARDED_BY(mutex_);
size_t transport_overhead_bytes_per_packet_ RTC_GUARDED_BY(mutex_);
uint32_t protection_bitrate_bps_;
uint32_t encoder_target_rate_bps_;
std::vector<bool> loss_mask_vector_ RTC_GUARDED_BY(mutex_);
std::vector<FrameCounts> frame_counts_ RTC_GUARDED_BY(mutex_);
FrameCountObserver* const frame_count_observer_;
// Effectively const map from SSRC to RtpRtcp, for all media SSRCs.
// This map is set at construction time and never changed, but it's
// non-trivial to make it properly const.
std::map<uint32_t, RtpRtcpInterface*> ssrc_to_rtp_module_;
};
} // namespace webrtc
#endif // CALL_RTP_VIDEO_SENDER_H_

View file

@ -0,0 +1,65 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef CALL_RTP_VIDEO_SENDER_INTERFACE_H_
#define CALL_RTP_VIDEO_SENDER_INTERFACE_H_
#include <map>
#include <vector>
#include "absl/types/optional.h"
#include "api/array_view.h"
#include "api/call/bitrate_allocation.h"
#include "api/fec_controller_override.h"
#include "api/video/video_layers_allocation.h"
#include "call/rtp_config.h"
#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "modules/rtp_rtcp/source/rtp_sequence_number_map.h"
#include "modules/video_coding/include/video_codec_interface.h"
namespace webrtc {
class VideoBitrateAllocation;
struct FecProtectionParams;
class RtpVideoSenderInterface : public EncodedImageCallback,
public FecControllerOverride {
public:
// Sets weather or not RTP packets is allowed to be sent on this sender.
virtual void SetSending(bool enabled) = 0;
virtual bool IsActive() = 0;
virtual void OnNetworkAvailability(bool network_available) = 0;
virtual std::map<uint32_t, RtpState> GetRtpStates() const = 0;
virtual std::map<uint32_t, RtpPayloadState> GetRtpPayloadStates() const = 0;
virtual void DeliverRtcp(const uint8_t* packet, size_t length) = 0;
virtual void OnBitrateAllocationUpdated(
const VideoBitrateAllocation& bitrate) = 0;
virtual void OnVideoLayersAllocationUpdated(
const VideoLayersAllocation& allocation) = 0;
virtual void OnBitrateUpdated(BitrateAllocationUpdate update,
int framerate) = 0;
virtual void OnTransportOverheadChanged(
size_t transport_overhead_bytes_per_packet) = 0;
virtual uint32_t GetPayloadBitrateBps() const = 0;
virtual uint32_t GetProtectionBitrateBps() const = 0;
virtual void SetEncodingData(size_t width,
size_t height,
size_t num_temporal_layers) = 0;
virtual std::vector<RtpSequenceNumberMap::Info> GetSentRtpPacketInfos(
uint32_t ssrc,
rtc::ArrayView<const uint16_t> sequence_numbers) const = 0;
// Implements FecControllerOverride.
void SetFecAllowed(bool fec_allowed) override = 0;
};
} // namespace webrtc
#endif // CALL_RTP_VIDEO_SENDER_INTERFACE_H_

View file

@ -0,0 +1,88 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "call/rtx_receive_stream.h"
#include <string.h>
#include <utility>
#include "api/array_view.h"
#include "modules/rtp_rtcp/include/receive_statistics.h"
#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "modules/rtp_rtcp/source/rtp_packet_received.h"
#include "rtc_base/checks.h"
#include "rtc_base/logging.h"
namespace webrtc {
RtxReceiveStream::RtxReceiveStream(
RtpPacketSinkInterface* media_sink,
std::map<int, int> associated_payload_types,
uint32_t media_ssrc,
ReceiveStatistics* rtp_receive_statistics /* = nullptr */)
: media_sink_(media_sink),
associated_payload_types_(std::move(associated_payload_types)),
media_ssrc_(media_ssrc),
rtp_receive_statistics_(rtp_receive_statistics) {
packet_checker_.Detach();
if (associated_payload_types_.empty()) {
RTC_LOG(LS_WARNING)
<< "RtxReceiveStream created with empty payload type mapping.";
}
}
RtxReceiveStream::~RtxReceiveStream() = default;
void RtxReceiveStream::SetAssociatedPayloadTypes(
std::map<int, int> associated_payload_types) {
RTC_DCHECK_RUN_ON(&packet_checker_);
associated_payload_types_ = std::move(associated_payload_types);
}
void RtxReceiveStream::OnRtpPacket(const RtpPacketReceived& rtx_packet) {
RTC_DCHECK_RUN_ON(&packet_checker_);
if (rtp_receive_statistics_) {
rtp_receive_statistics_->OnRtpPacket(rtx_packet);
}
rtc::ArrayView<const uint8_t> payload = rtx_packet.payload();
if (payload.size() < kRtxHeaderSize) {
return;
}
auto it = associated_payload_types_.find(rtx_packet.PayloadType());
if (it == associated_payload_types_.end()) {
RTC_DLOG(LS_VERBOSE) << "Unknown payload type "
<< static_cast<int>(rtx_packet.PayloadType())
<< " on rtx ssrc " << rtx_packet.Ssrc();
return;
}
RtpPacketReceived media_packet;
media_packet.CopyHeaderFrom(rtx_packet);
media_packet.SetSsrc(media_ssrc_);
media_packet.SetSequenceNumber((payload[0] << 8) + payload[1]);
media_packet.SetPayloadType(it->second);
media_packet.set_recovered(true);
media_packet.set_arrival_time(rtx_packet.arrival_time());
// Skip the RTX header.
rtc::ArrayView<const uint8_t> rtx_payload = payload.subview(kRtxHeaderSize);
uint8_t* media_payload = media_packet.AllocatePayload(rtx_payload.size());
RTC_DCHECK(media_payload != nullptr);
memcpy(media_payload, rtx_payload.data(), rtx_payload.size());
media_sink_->OnRtpPacket(media_packet);
}
} // namespace webrtc

View file

@ -0,0 +1,59 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef CALL_RTX_RECEIVE_STREAM_H_
#define CALL_RTX_RECEIVE_STREAM_H_
#include <cstdint>
#include <map>
#include "api/sequence_checker.h"
#include "call/rtp_packet_sink_interface.h"
#include "rtc_base/system/no_unique_address.h"
namespace webrtc {
class ReceiveStatistics;
// This class is responsible for RTX decapsulation. The resulting media packets
// are passed on to a sink representing the associated media stream.
class RtxReceiveStream : public RtpPacketSinkInterface {
public:
RtxReceiveStream(RtpPacketSinkInterface* media_sink,
std::map<int, int> associated_payload_types,
uint32_t media_ssrc,
// TODO(nisse): Delete this argument, and
// corresponding member variable, by moving the
// responsibility for rtcp feedback to
// RtpStreamReceiverController.
ReceiveStatistics* rtp_receive_statistics = nullptr);
~RtxReceiveStream() override;
// Update payload types post construction. Must be called from the same
// calling context as `OnRtpPacket` is called on.
void SetAssociatedPayloadTypes(std::map<int, int> associated_payload_types);
// RtpPacketSinkInterface.
void OnRtpPacket(const RtpPacketReceived& packet) override;
private:
RTC_NO_UNIQUE_ADDRESS SequenceChecker packet_checker_;
RtpPacketSinkInterface* const media_sink_;
// Map from rtx payload type -> media payload type.
std::map<int, int> associated_payload_types_ RTC_GUARDED_BY(&packet_checker_);
// TODO(nisse): Ultimately, the media receive stream shouldn't care about the
// ssrc, and we should delete this.
const uint32_t media_ssrc_;
ReceiveStatistics* const rtp_receive_statistics_;
};
} // namespace webrtc
#endif // CALL_RTX_RECEIVE_STREAM_H_

View file

@ -0,0 +1,276 @@
/*
* Copyright 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "call/simulated_network.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <utility>
#include "api/units/data_rate.h"
#include "api/units/data_size.h"
#include "api/units/time_delta.h"
#include "rtc_base/checks.h"
namespace webrtc {
namespace {
// Calculate the time (in microseconds) that takes to send N `bits` on a
// network with link capacity equal to `capacity_kbps` starting at time
// `start_time_us`.
int64_t CalculateArrivalTimeUs(int64_t start_time_us,
int64_t bits,
int capacity_kbps) {
// If capacity is 0, the link capacity is assumed to be infinite.
if (capacity_kbps == 0) {
return start_time_us;
}
// Adding `capacity - 1` to the numerator rounds the extra delay caused by
// capacity constraints up to an integral microsecond. Sending 0 bits takes 0
// extra time, while sending 1 bit gets rounded up to 1 (the multiplication by
// 1000 is because capacity is in kbps).
// The factor 1000 comes from 10^6 / 10^3, where 10^6 is due to the time unit
// being us and 10^3 is due to the rate unit being kbps.
return start_time_us + ((1000 * bits + capacity_kbps - 1) / capacity_kbps);
}
} // namespace
SimulatedNetwork::SimulatedNetwork(Config config, uint64_t random_seed)
: random_(random_seed),
bursting_(false),
last_enqueue_time_us_(0),
last_capacity_link_exit_time_(0) {
SetConfig(config);
}
SimulatedNetwork::~SimulatedNetwork() = default;
void SimulatedNetwork::SetConfig(const Config& config) {
MutexLock lock(&config_lock_);
config_state_.config = config; // Shallow copy of the struct.
double prob_loss = config.loss_percent / 100.0;
if (config_state_.config.avg_burst_loss_length == -1) {
// Uniform loss
config_state_.prob_loss_bursting = prob_loss;
config_state_.prob_start_bursting = prob_loss;
} else {
// Lose packets according to a gilbert-elliot model.
int avg_burst_loss_length = config.avg_burst_loss_length;
int min_avg_burst_loss_length = std::ceil(prob_loss / (1 - prob_loss));
RTC_CHECK_GT(avg_burst_loss_length, min_avg_burst_loss_length)
<< "For a total packet loss of " << config.loss_percent
<< "%% then"
" avg_burst_loss_length must be "
<< min_avg_burst_loss_length + 1 << " or higher.";
config_state_.prob_loss_bursting = (1.0 - 1.0 / avg_burst_loss_length);
config_state_.prob_start_bursting =
prob_loss / (1 - prob_loss) / avg_burst_loss_length;
}
}
void SimulatedNetwork::UpdateConfig(
std::function<void(BuiltInNetworkBehaviorConfig*)> config_modifier) {
MutexLock lock(&config_lock_);
config_modifier(&config_state_.config);
}
void SimulatedNetwork::PauseTransmissionUntil(int64_t until_us) {
MutexLock lock(&config_lock_);
config_state_.pause_transmission_until_us = until_us;
}
bool SimulatedNetwork::EnqueuePacket(PacketInFlightInfo packet) {
RTC_DCHECK_RUNS_SERIALIZED(&process_checker_);
// Check that old packets don't get enqueued, the SimulatedNetwork expect that
// the packets' send time is monotonically increasing. The tolerance for
// non-monotonic enqueue events is 0.5 ms because on multi core systems
// clock_gettime(CLOCK_MONOTONIC) can show non-monotonic behaviour between
// theads running on different cores.
// TODO(bugs.webrtc.org/14525): Open a bug on this with the goal to re-enable
// the DCHECK.
// At the moment, we see more than 130ms between non-monotonic events, which
// is more than expected.
// RTC_DCHECK_GE(packet.send_time_us - last_enqueue_time_us_, -2000);
ConfigState state = GetConfigState();
// If the network config requires packet overhead, let's apply it as early as
// possible.
packet.size += state.config.packet_overhead;
// If `queue_length_packets` is 0, the queue size is infinite.
if (state.config.queue_length_packets > 0 &&
capacity_link_.size() >= state.config.queue_length_packets) {
// Too many packet on the link, drop this one.
return false;
}
// If the packet has been sent before the previous packet in the network left
// the capacity queue, let's ensure the new packet will start its trip in the
// network after the last bit of the previous packet has left it.
int64_t packet_send_time_us = packet.send_time_us;
if (!capacity_link_.empty()) {
packet_send_time_us =
std::max(packet_send_time_us, capacity_link_.back().arrival_time_us);
}
capacity_link_.push({.packet = packet,
.arrival_time_us = CalculateArrivalTimeUs(
packet_send_time_us, packet.size * 8,
state.config.link_capacity_kbps)});
// Only update `next_process_time_us_` if not already set (if set, there is no
// way that a new packet will make the `next_process_time_us_` change).
if (!next_process_time_us_) {
RTC_DCHECK_EQ(capacity_link_.size(), 1);
next_process_time_us_ = capacity_link_.front().arrival_time_us;
}
last_enqueue_time_us_ = packet.send_time_us;
return true;
}
absl::optional<int64_t> SimulatedNetwork::NextDeliveryTimeUs() const {
RTC_DCHECK_RUNS_SERIALIZED(&process_checker_);
return next_process_time_us_;
}
void SimulatedNetwork::UpdateCapacityQueue(ConfigState state,
int64_t time_now_us) {
// If there is at least one packet in the `capacity_link_`, let's update its
// arrival time to take into account changes in the network configuration
// since the last call to UpdateCapacityQueue.
if (!capacity_link_.empty()) {
capacity_link_.front().arrival_time_us = CalculateArrivalTimeUs(
std::max(capacity_link_.front().packet.send_time_us,
last_capacity_link_exit_time_),
capacity_link_.front().packet.size * 8,
state.config.link_capacity_kbps);
}
// The capacity link is empty or the first packet is not expected to exit yet.
if (capacity_link_.empty() ||
time_now_us < capacity_link_.front().arrival_time_us) {
return;
}
bool reorder_packets = false;
do {
// Time to get this packet (the original or just updated arrival_time_us is
// smaller or equal to time_now_us).
PacketInfo packet = capacity_link_.front();
capacity_link_.pop();
// If the network is paused, the pause will be implemented as an extra delay
// to be spent in the `delay_link_` queue.
if (state.pause_transmission_until_us > packet.arrival_time_us) {
packet.arrival_time_us = state.pause_transmission_until_us;
}
// Store the original arrival time, before applying packet loss or extra
// delay. This is needed to know when it is the first available time the
// next packet in the `capacity_link_` queue can start transmitting.
last_capacity_link_exit_time_ = packet.arrival_time_us;
// Drop packets at an average rate of `state.config.loss_percent` with
// and average loss burst length of `state.config.avg_burst_loss_length`.
if ((bursting_ && random_.Rand<double>() < state.prob_loss_bursting) ||
(!bursting_ && random_.Rand<double>() < state.prob_start_bursting)) {
bursting_ = true;
packet.arrival_time_us = PacketDeliveryInfo::kNotReceived;
} else {
// If packets are not dropped, apply extra delay as configured.
bursting_ = false;
int64_t arrival_time_jitter_us = std::max(
random_.Gaussian(state.config.queue_delay_ms * 1000,
state.config.delay_standard_deviation_ms * 1000),
0.0);
// If reordering is not allowed then adjust arrival_time_jitter
// to make sure all packets are sent in order.
int64_t last_arrival_time_us =
delay_link_.empty() ? -1 : delay_link_.back().arrival_time_us;
if (!state.config.allow_reordering && !delay_link_.empty() &&
packet.arrival_time_us + arrival_time_jitter_us <
last_arrival_time_us) {
arrival_time_jitter_us = last_arrival_time_us - packet.arrival_time_us;
}
packet.arrival_time_us += arrival_time_jitter_us;
// Optimization: Schedule a reorder only when a packet will exit before
// the one in front.
if (last_arrival_time_us > packet.arrival_time_us) {
reorder_packets = true;
}
}
delay_link_.emplace_back(packet);
// If there are no packets in the queue, there is nothing else to do.
if (capacity_link_.empty()) {
break;
}
// If instead there is another packet in the `capacity_link_` queue, let's
// calculate its arrival_time_us based on the latest config (which might
// have been changed since it was enqueued).
int64_t next_start = std::max(last_capacity_link_exit_time_,
capacity_link_.front().packet.send_time_us);
capacity_link_.front().arrival_time_us = CalculateArrivalTimeUs(
next_start, capacity_link_.front().packet.size * 8,
state.config.link_capacity_kbps);
// And if the next packet in the queue needs to exit, let's dequeue it.
} while (capacity_link_.front().arrival_time_us <= time_now_us);
if (state.config.allow_reordering && reorder_packets) {
// Packets arrived out of order and since the network config allows
// reordering, let's sort them per arrival_time_us to make so they will also
// be delivered out of order.
std::stable_sort(delay_link_.begin(), delay_link_.end(),
[](const PacketInfo& p1, const PacketInfo& p2) {
return p1.arrival_time_us < p2.arrival_time_us;
});
}
}
SimulatedNetwork::ConfigState SimulatedNetwork::GetConfigState() const {
MutexLock lock(&config_lock_);
return config_state_;
}
std::vector<PacketDeliveryInfo> SimulatedNetwork::DequeueDeliverablePackets(
int64_t receive_time_us) {
RTC_DCHECK_RUNS_SERIALIZED(&process_checker_);
UpdateCapacityQueue(GetConfigState(), receive_time_us);
std::vector<PacketDeliveryInfo> packets_to_deliver;
// Check the extra delay queue.
while (!delay_link_.empty() &&
receive_time_us >= delay_link_.front().arrival_time_us) {
PacketInfo packet_info = delay_link_.front();
packets_to_deliver.emplace_back(
PacketDeliveryInfo(packet_info.packet, packet_info.arrival_time_us));
delay_link_.pop_front();
}
if (!delay_link_.empty()) {
next_process_time_us_ = delay_link_.front().arrival_time_us;
} else if (!capacity_link_.empty()) {
next_process_time_us_ = capacity_link_.front().arrival_time_us;
} else {
next_process_time_us_.reset();
}
return packets_to_deliver;
}
} // namespace webrtc

View file

@ -0,0 +1,134 @@
/*
* Copyright 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef CALL_SIMULATED_NETWORK_H_
#define CALL_SIMULATED_NETWORK_H_
#include <stdint.h>
#include <deque>
#include <queue>
#include <vector>
#include "absl/types/optional.h"
#include "api/sequence_checker.h"
#include "api/test/simulated_network.h"
#include "api/units/data_size.h"
#include "api/units/timestamp.h"
#include "rtc_base/race_checker.h"
#include "rtc_base/random.h"
#include "rtc_base/synchronization/mutex.h"
#include "rtc_base/thread_annotations.h"
namespace webrtc {
// Class simulating a network link.
//
// This is a basic implementation of NetworkBehaviorInterface that supports:
// - Packet loss
// - Capacity delay
// - Extra delay with or without packets reorder
// - Packet overhead
// - Queue max capacity
class RTC_EXPORT SimulatedNetwork : public SimulatedNetworkInterface {
public:
using Config = BuiltInNetworkBehaviorConfig;
explicit SimulatedNetwork(Config config, uint64_t random_seed = 1);
~SimulatedNetwork() override;
// Sets a new configuration. This will affect packets that will be sent with
// EnqueuePacket but also packets in the network that have not left the
// network emulation. Packets that are ready to be retrieved by
// DequeueDeliverablePackets are not affected by the new configuration.
// TODO(bugs.webrtc.org/14525): Fix SetConfig and make it apply only to the
// part of the packet that is currently being sent (instead of applying to
// all of it).
void SetConfig(const Config& config) override;
void UpdateConfig(std::function<void(BuiltInNetworkBehaviorConfig*)>
config_modifier) override;
void PauseTransmissionUntil(int64_t until_us) override;
// NetworkBehaviorInterface
bool EnqueuePacket(PacketInFlightInfo packet) override;
std::vector<PacketDeliveryInfo> DequeueDeliverablePackets(
int64_t receive_time_us) override;
absl::optional<int64_t> NextDeliveryTimeUs() const override;
private:
struct PacketInfo {
PacketInFlightInfo packet;
// Time when the packet has left (or will leave) the network.
int64_t arrival_time_us;
};
// Contains current configuration state.
struct ConfigState {
// Static link configuration.
Config config;
// The probability to drop the packet if we are currently dropping a
// burst of packet
double prob_loss_bursting;
// The probability to drop a burst of packets.
double prob_start_bursting;
// Used for temporary delay spikes.
int64_t pause_transmission_until_us = 0;
};
// Moves packets from capacity- to delay link.
void UpdateCapacityQueue(ConfigState state, int64_t time_now_us)
RTC_RUN_ON(&process_checker_);
ConfigState GetConfigState() const;
mutable Mutex config_lock_;
// Guards the data structures involved in delay and loss processing, such as
// the packet queues.
rtc::RaceChecker process_checker_;
// Models the capacity of the network by rejecting packets if the queue is
// full and keeping them in the queue until they are ready to exit (according
// to the link capacity, which cannot be violated, e.g. a 1 kbps link will
// only be able to deliver 1000 bits per second).
//
// Invariant:
// The head of the `capacity_link_` has arrival_time_us correctly set to the
// time when the packet is supposed to be delivered (without accounting
// potential packet loss or potential extra delay and without accounting for a
// new configuration of the network, which requires a re-computation of the
// arrival_time_us).
std::queue<PacketInfo> capacity_link_ RTC_GUARDED_BY(process_checker_);
// Models the extra delay of the network (see `queue_delay_ms`
// and `delay_standard_deviation_ms` in BuiltInNetworkBehaviorConfig), packets
// in the `delay_link_` have technically already left the network and don't
// use its capacity but they are not delivered yet.
std::deque<PacketInfo> delay_link_ RTC_GUARDED_BY(process_checker_);
// Represents the next moment in time when the network is supposed to deliver
// packets to the client (either by pulling them from `delay_link_` or
// `capacity_link_` or both).
absl::optional<int64_t> next_process_time_us_
RTC_GUARDED_BY(process_checker_);
ConfigState config_state_ RTC_GUARDED_BY(config_lock_);
Random random_ RTC_GUARDED_BY(process_checker_);
// Are we currently dropping a burst of packets?
bool bursting_;
// The send time of the last enqueued packet, this is only used to check that
// the send time of enqueued packets is monotonically increasing.
int64_t last_enqueue_time_us_;
// The last time a packet left the capacity_link_ (used to enforce
// the capacity of the link and avoid packets starts to get sent before
// the link it free).
int64_t last_capacity_link_exit_time_;
};
} // namespace webrtc
#endif // CALL_SIMULATED_NETWORK_H_

View file

@ -0,0 +1,42 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef CALL_SIMULATED_PACKET_RECEIVER_H_
#define CALL_SIMULATED_PACKET_RECEIVER_H_
#include "api/test/simulated_network.h"
#include "call/packet_receiver.h"
namespace webrtc {
// Private API that is fixing surface between DirectTransport and underlying
// network conditions simulation implementation.
class SimulatedPacketReceiverInterface : public PacketReceiver {
public:
// Must not be called in parallel with DeliverPacket or Process.
// Destination receiver will be injected with this method
virtual void SetReceiver(PacketReceiver* receiver) = 0;
// Reports average packet delay.
virtual int AverageDelay() = 0;
// Process any pending tasks such as timeouts.
// Called on a worker thread.
virtual void Process() = 0;
// Returns the time until next process or nullopt to indicate that the next
// process time is unknown. If the next process time is unknown, this should
// be checked again any time a packet is enqueued.
virtual absl::optional<int64_t> TimeUntilNextProcess() = 0;
};
} // namespace webrtc
#endif // CALL_SIMULATED_PACKET_RECEIVER_H_

View file

@ -0,0 +1,17 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "call/syncable.h"
namespace webrtc {
Syncable::~Syncable() = default;
} // namespace webrtc

View file

@ -0,0 +1,46 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
// Syncable is used by RtpStreamsSynchronizer in VideoReceiveStreamInterface,
// and implemented by AudioReceiveStreamInterface.
#ifndef CALL_SYNCABLE_H_
#define CALL_SYNCABLE_H_
#include <stdint.h>
#include "absl/types/optional.h"
namespace webrtc {
class Syncable {
public:
struct Info {
int64_t latest_receive_time_ms = 0;
uint32_t latest_received_capture_timestamp = 0;
uint32_t capture_time_ntp_secs = 0;
uint32_t capture_time_ntp_frac = 0;
uint32_t capture_time_source_clock = 0;
int current_delay_ms = 0;
};
virtual ~Syncable();
virtual uint32_t id() const = 0;
virtual absl::optional<Info> GetInfo() const = 0;
virtual bool GetPlayoutRtpTimestamp(uint32_t* rtp_timestamp,
int64_t* time_ms) const = 0;
virtual bool SetMinimumPlayoutDelay(int delay_ms) = 0;
virtual void SetEstimatedPlayoutNtpTimestampMs(int64_t ntp_timestamp_ms,
int64_t time_ms) = 0;
};
} // namespace webrtc
#endif // CALL_SYNCABLE_H_

View file

@ -0,0 +1,25 @@
/*
* Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "call/version.h"
namespace webrtc {
// The timestamp is always in UTC.
const char* const kSourceTimestamp = "WebRTC source stamp 2024-02-18T04:06:34";
void LoadWebRTCVersionInRegister() {
// Using volatile to instruct the compiler to not optimize `p` away even
// if it looks unused.
const char* volatile p = kSourceTimestamp;
static_cast<void>(p);
}
} // namespace webrtc

View file

@ -0,0 +1,25 @@
/*
* Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef CALL_VERSION_H_
#define CALL_VERSION_H_
// LoadWebRTCVersionInRegistry is a helper function that loads the pointer to
// the WebRTC version string into a register. While this function doesn't do
// anything useful, it is needed in order to avoid that compiler optimizations
// remove the WebRTC version string from the final binary.
namespace webrtc {
void LoadWebRTCVersionInRegister();
} // namespace webrtc
#endif // CALL_VERSION_H_

View file

@ -0,0 +1,168 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "call/video_receive_stream.h"
#include "rtc_base/strings/string_builder.h"
namespace webrtc {
VideoReceiveStreamInterface::Decoder::Decoder(SdpVideoFormat video_format,
int payload_type)
: video_format(std::move(video_format)), payload_type(payload_type) {}
VideoReceiveStreamInterface::Decoder::Decoder() : video_format("Unset") {}
VideoReceiveStreamInterface::Decoder::Decoder(const Decoder&) = default;
VideoReceiveStreamInterface::Decoder::~Decoder() = default;
bool VideoReceiveStreamInterface::Decoder::operator==(
const Decoder& other) const {
return payload_type == other.payload_type &&
video_format == other.video_format;
}
std::string VideoReceiveStreamInterface::Decoder::ToString() const {
char buf[1024];
rtc::SimpleStringBuilder ss(buf);
ss << "{payload_type: " << payload_type;
ss << ", payload_name: " << video_format.name;
ss << ", codec_params: {";
for (auto it = video_format.parameters.begin();
it != video_format.parameters.end(); ++it) {
if (it != video_format.parameters.begin()) {
ss << ", ";
}
ss << it->first << ": " << it->second;
}
ss << '}';
ss << '}';
return ss.str();
}
VideoReceiveStreamInterface::Stats::Stats() = default;
VideoReceiveStreamInterface::Stats::~Stats() = default;
std::string VideoReceiveStreamInterface::Stats::ToString(
int64_t time_ms) const {
char buf[2048];
rtc::SimpleStringBuilder ss(buf);
ss << "VideoReceiveStreamInterface stats: " << time_ms << ", {ssrc: " << ssrc
<< ", ";
ss << "total_bps: " << total_bitrate_bps << ", ";
// Spec-compliant stats are camelCased to distinguish them from
// the legacy and internal stats.
ss << "frameWidth: " << width << ", ";
ss << "frameHeight: " << height << ", ";
// TODO(crbug.com/webrtc/15166): `key` and `delta` will not
// perfectly match the other frame counters.
ss << "key: " << frame_counts.key_frames << ", ";
ss << "delta: " << frame_counts.delta_frames << ", ";
ss << "framesAssembledFromMultiplePackets: "
<< frames_assembled_from_multiple_packets << ", ";
ss << "framesDecoded: " << frames_decoded << ", ";
ss << "framesDropped: " << frames_dropped << ", ";
ss << "network_fps: " << network_frame_rate << ", ";
ss << "decode_fps: " << decode_frame_rate << ", ";
ss << "render_fps: " << render_frame_rate << ", ";
ss << "decode_ms: " << decode_ms << ", ";
ss << "max_decode_ms: " << max_decode_ms << ", ";
ss << "first_frame_received_to_decoded_ms: "
<< first_frame_received_to_decoded_ms << ", ";
ss << "current_delay_ms: " << current_delay_ms << ", ";
ss << "target_delay_ms: " << target_delay_ms << ", ";
ss << "jitter_delay_ms: " << jitter_buffer_ms << ", ";
ss << "totalAssemblyTime: " << total_assembly_time.seconds<double>() << ", ";
ss << "jitterBufferDelay: " << jitter_buffer_delay.seconds<double>() << ", ";
ss << "jitterBufferTargetDelay: "
<< jitter_buffer_target_delay.seconds<double>() << ", ";
ss << "jitterBufferEmittedCount: " << jitter_buffer_emitted_count << ", ";
ss << "jitterBufferMinimumDelay: "
<< jitter_buffer_minimum_delay.seconds<double>() << ", ";
ss << "totalDecodeTime: " << total_decode_time.seconds<double>() << ", ";
ss << "totalProcessingDelay: " << total_processing_delay.seconds<double>()
<< ", ";
ss << "min_playout_delay_ms: " << min_playout_delay_ms << ", ";
ss << "sync_offset_ms: " << sync_offset_ms << ", ";
ss << "cum_loss: " << rtp_stats.packets_lost << ", ";
ss << "nackCount: " << rtcp_packet_type_counts.nack_packets << ", ";
ss << "firCount: " << rtcp_packet_type_counts.fir_packets << ", ";
ss << "pliCount: " << rtcp_packet_type_counts.pli_packets;
ss << '}';
return ss.str();
}
VideoReceiveStreamInterface::Config::Config(const Config&) = default;
VideoReceiveStreamInterface::Config::Config(Config&&) = default;
VideoReceiveStreamInterface::Config::Config(
Transport* rtcp_send_transport,
VideoDecoderFactory* decoder_factory)
: decoder_factory(decoder_factory),
rtcp_send_transport(rtcp_send_transport) {}
VideoReceiveStreamInterface::Config&
VideoReceiveStreamInterface::Config::operator=(Config&&) = default;
VideoReceiveStreamInterface::Config::Config::~Config() = default;
std::string VideoReceiveStreamInterface::Config::ToString() const {
char buf[4 * 1024];
rtc::SimpleStringBuilder ss(buf);
ss << "{decoders: [";
for (size_t i = 0; i < decoders.size(); ++i) {
ss << decoders[i].ToString();
if (i != decoders.size() - 1)
ss << ", ";
}
ss << ']';
ss << ", rtp: " << rtp.ToString();
ss << ", renderer: " << (renderer ? "(renderer)" : "nullptr");
ss << ", render_delay_ms: " << render_delay_ms;
if (!sync_group.empty())
ss << ", sync_group: " << sync_group;
ss << '}';
return ss.str();
}
VideoReceiveStreamInterface::Config::Rtp::Rtp() = default;
VideoReceiveStreamInterface::Config::Rtp::Rtp(const Rtp&) = default;
VideoReceiveStreamInterface::Config::Rtp::~Rtp() = default;
std::string VideoReceiveStreamInterface::Config::Rtp::ToString() const {
char buf[2 * 1024];
rtc::SimpleStringBuilder ss(buf);
ss << "{remote_ssrc: " << remote_ssrc;
ss << ", local_ssrc: " << local_ssrc;
ss << ", rtcp_mode: "
<< (rtcp_mode == RtcpMode::kCompound ? "RtcpMode::kCompound"
: "RtcpMode::kReducedSize");
ss << ", rtcp_xr: ";
ss << "{receiver_reference_time_report: "
<< (rtcp_xr.receiver_reference_time_report ? "on" : "off");
ss << '}';
ss << ", lntf: {enabled: " << (lntf.enabled ? "true" : "false") << '}';
ss << ", nack: {rtp_history_ms: " << nack.rtp_history_ms << '}';
ss << ", ulpfec_payload_type: " << ulpfec_payload_type;
ss << ", red_type: " << red_payload_type;
ss << ", rtx_ssrc: " << rtx_ssrc;
ss << ", rtx_payload_types: {";
for (auto& kv : rtx_associated_payload_types) {
ss << kv.first << " (pt) -> " << kv.second << " (apt), ";
}
ss << '}';
ss << ", raw_payload_types: {";
for (const auto& pt : raw_payload_types) {
ss << pt << ", ";
}
ss << '}';
ss << '}';
return ss.str();
}
} // namespace webrtc

View file

@ -0,0 +1,328 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef CALL_VIDEO_RECEIVE_STREAM_H_
#define CALL_VIDEO_RECEIVE_STREAM_H_
#include <cstdint>
#include <limits>
#include <map>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include "api/call/transport.h"
#include "api/crypto/crypto_options.h"
#include "api/rtp_headers.h"
#include "api/rtp_parameters.h"
#include "api/video/recordable_encoded_frame.h"
#include "api/video/video_content_type.h"
#include "api/video/video_frame.h"
#include "api/video/video_sink_interface.h"
#include "api/video/video_timing.h"
#include "api/video_codecs/sdp_video_format.h"
#include "call/receive_stream.h"
#include "call/rtp_config.h"
#include "common_video/frame_counts.h"
#include "modules/rtp_rtcp/include/rtcp_statistics.h"
#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "rtc_base/checks.h"
namespace webrtc {
class RtpPacketSinkInterface;
class VideoDecoderFactory;
class VideoReceiveStreamInterface : public MediaReceiveStreamInterface {
public:
// Class for handling moving in/out recording state.
struct RecordingState {
RecordingState() = default;
explicit RecordingState(
std::function<void(const RecordableEncodedFrame&)> callback)
: callback(std::move(callback)) {}
// Callback stored from the VideoReceiveStreamInterface. The
// VideoReceiveStreamInterface client should not interpret the attribute.
std::function<void(const RecordableEncodedFrame&)> callback;
// Memento of when a keyframe request was last sent. The
// VideoReceiveStreamInterface client should not interpret the attribute.
absl::optional<int64_t> last_keyframe_request_ms;
};
// TODO(mflodman) Move all these settings to VideoDecoder and move the
// declaration to common_types.h.
struct Decoder {
Decoder(SdpVideoFormat video_format, int payload_type);
Decoder();
Decoder(const Decoder&);
~Decoder();
bool operator==(const Decoder& other) const;
std::string ToString() const;
SdpVideoFormat video_format;
// Received RTP packets with this payload type will be sent to this decoder
// instance.
int payload_type = 0;
};
struct Stats {
Stats();
~Stats();
std::string ToString(int64_t time_ms) const;
int network_frame_rate = 0;
int decode_frame_rate = 0;
int render_frame_rate = 0;
uint32_t frames_rendered = 0;
// Decoder stats.
absl::optional<std::string> decoder_implementation_name;
absl::optional<bool> power_efficient_decoder;
FrameCounts frame_counts;
int decode_ms = 0;
int max_decode_ms = 0;
int current_delay_ms = 0;
int target_delay_ms = 0;
int jitter_buffer_ms = 0;
// https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-jitterbufferdelay
TimeDelta jitter_buffer_delay = TimeDelta::Zero();
// https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-jitterbuffertargetdelay
TimeDelta jitter_buffer_target_delay = TimeDelta::Zero();
// https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-jitterbufferemittedcount
uint64_t jitter_buffer_emitted_count = 0;
// https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-jitterbufferminimumdelay
TimeDelta jitter_buffer_minimum_delay = TimeDelta::Zero();
int min_playout_delay_ms = 0;
int render_delay_ms = 10;
int64_t interframe_delay_max_ms = -1;
// Frames dropped due to decoding failures or if the system is too slow.
// https://www.w3.org/TR/webrtc-stats/#dom-rtcvideoreceiverstats-framesdropped
uint32_t frames_dropped = 0;
uint32_t frames_decoded = 0;
// https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-totaldecodetime
TimeDelta total_decode_time = TimeDelta::Zero();
// https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-totalprocessingdelay
TimeDelta total_processing_delay = TimeDelta::Zero();
// TODO(bugs.webrtc.org/13986): standardize
TimeDelta total_assembly_time = TimeDelta::Zero();
uint32_t frames_assembled_from_multiple_packets = 0;
// Total inter frame delay in seconds.
// https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-totalinterframedelay
double total_inter_frame_delay = 0;
// Total squared inter frame delay in seconds^2.
// https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-totalsqauredinterframedelay
double total_squared_inter_frame_delay = 0;
int64_t first_frame_received_to_decoded_ms = -1;
absl::optional<uint64_t> qp_sum;
int current_payload_type = -1;
int total_bitrate_bps = 0;
int width = 0;
int height = 0;
uint32_t freeze_count = 0;
uint32_t pause_count = 0;
uint32_t total_freezes_duration_ms = 0;
uint32_t total_pauses_duration_ms = 0;
VideoContentType content_type = VideoContentType::UNSPECIFIED;
// https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-estimatedplayouttimestamp
absl::optional<int64_t> estimated_playout_ntp_timestamp_ms;
int sync_offset_ms = std::numeric_limits<int>::max();
uint32_t ssrc = 0;
std::string c_name;
RtpReceiveStats rtp_stats;
RtcpPacketTypeCounter rtcp_packet_type_counts;
absl::optional<RtpReceiveStats> rtx_rtp_stats;
// Timing frame info: all important timestamps for a full lifetime of a
// single 'timing frame'.
absl::optional<webrtc::TimingFrameInfo> timing_frame_info;
};
struct Config {
private:
// Access to the copy constructor is private to force use of the Copy()
// method for those exceptional cases where we do use it.
Config(const Config&);
public:
Config() = delete;
Config(Config&&);
Config(Transport* rtcp_send_transport,
VideoDecoderFactory* decoder_factory = nullptr);
Config& operator=(Config&&);
Config& operator=(const Config&) = delete;
~Config();
// Mostly used by tests. Avoid creating copies if you can.
Config Copy() const { return Config(*this); }
std::string ToString() const;
// Decoders for every payload that we can receive.
std::vector<Decoder> decoders;
// Ownership stays with WebrtcVideoEngine (delegated from PeerConnection).
VideoDecoderFactory* decoder_factory = nullptr;
// Receive-stream specific RTP settings.
struct Rtp : public ReceiveStreamRtpConfig {
Rtp();
Rtp(const Rtp&);
~Rtp();
std::string ToString() const;
// See NackConfig for description.
NackConfig nack;
// See RtcpMode for description.
RtcpMode rtcp_mode = RtcpMode::kCompound;
// Extended RTCP settings.
struct RtcpXr {
// True if RTCP Receiver Reference Time Report Block extension
// (RFC 3611) should be enabled.
bool receiver_reference_time_report = false;
} rtcp_xr;
// How to request keyframes from a remote sender. Applies only if lntf is
// disabled.
KeyFrameReqMethod keyframe_method = KeyFrameReqMethod::kPliRtcp;
// See LntfConfig for description.
LntfConfig lntf;
// Payload types for ULPFEC and RED, respectively.
int ulpfec_payload_type = -1;
int red_payload_type = -1;
// SSRC for retransmissions.
uint32_t rtx_ssrc = 0;
// Set if the stream is protected using FlexFEC.
bool protected_by_flexfec = false;
// Optional callback sink to support additional packet handlers such as
// FlexFec.
RtpPacketSinkInterface* packet_sink_ = nullptr;
// Map from rtx payload type -> media payload type.
// For RTX to be enabled, both an SSRC and this mapping are needed.
std::map<int, int> rtx_associated_payload_types;
// Payload types that should be depacketized using raw depacketizer
// (payload header will not be parsed and must not be present, additional
// meta data is expected to be present in generic frame descriptor
// RTP header extension).
std::set<int> raw_payload_types;
} rtp;
// Transport for outgoing packets (RTCP).
Transport* rtcp_send_transport = nullptr;
// Must always be set.
rtc::VideoSinkInterface<VideoFrame>* renderer = nullptr;
// Expected delay needed by the renderer, i.e. the frame will be delivered
// this many milliseconds, if possible, earlier than the ideal render time.
int render_delay_ms = 10;
// If false, pass frames on to the renderer as soon as they are
// available.
bool enable_prerenderer_smoothing = true;
// Identifier for an A/V synchronization group. Empty string to disable.
// TODO(pbos): Synchronize streams in a sync group, not just video streams
// to one of the audio streams.
std::string sync_group;
// An optional custom frame decryptor that allows the entire frame to be
// decrypted in whatever way the caller choses. This is not required by
// default.
rtc::scoped_refptr<webrtc::FrameDecryptorInterface> frame_decryptor;
// Per PeerConnection cryptography options.
CryptoOptions crypto_options;
rtc::scoped_refptr<webrtc::FrameTransformerInterface> frame_transformer;
};
// TODO(pbos): Add info on currently-received codec to Stats.
virtual Stats GetStats() const = 0;
// Sets a base minimum for the playout delay. Base minimum delay sets lower
// bound on minimum delay value determining lower bound on playout delay.
//
// Returns true if value was successfully set, false overwise.
virtual bool SetBaseMinimumPlayoutDelayMs(int delay_ms) = 0;
// Returns current value of base minimum delay in milliseconds.
virtual int GetBaseMinimumPlayoutDelayMs() const = 0;
// Sets and returns recording state. The old state is moved out
// of the video receive stream and returned to the caller, and `state`
// is moved in. If the state's callback is set, it will be called with
// recordable encoded frames as they arrive.
// If `generate_key_frame` is true, the method will generate a key frame.
// When the function returns, it's guaranteed that all old callouts
// to the returned callback has ceased.
// Note: the client should not interpret the returned state's attributes, but
// instead treat it as opaque data.
virtual RecordingState SetAndGetRecordingState(RecordingState state,
bool generate_key_frame) = 0;
// Cause eventual generation of a key frame from the sender.
virtual void GenerateKeyFrame() = 0;
virtual void SetRtcpMode(RtcpMode mode) = 0;
// Sets or clears a flexfec RTP sink. This affects `rtp.packet_sink_` and
// `rtp.protected_by_flexfec` parts of the configuration. Must be called on
// the packet delivery thread.
// TODO(bugs.webrtc.org/11993): Packet delivery thread today means `worker
// thread` but will be `network thread`.
virtual void SetFlexFecProtection(RtpPacketSinkInterface* flexfec_sink) = 0;
// Turns on/off loss notifications. Must be called on the packet delivery
// thread.
virtual void SetLossNotificationEnabled(bool enabled) = 0;
// Modify `rtp.nack.rtp_history_ms` post construction. Setting this value
// to 0 disables nack.
// Must be called on the packet delivery thread.
virtual void SetNackHistory(TimeDelta history) = 0;
virtual void SetProtectionPayloadTypes(int red_payload_type,
int ulpfec_payload_type) = 0;
virtual void SetRtcpXr(Config::Rtp::RtcpXr rtcp_xr) = 0;
virtual void SetAssociatedPayloadTypes(
std::map<int, int> associated_payload_types) = 0;
virtual void UpdateRtxSsrc(uint32_t ssrc) = 0;
protected:
virtual ~VideoReceiveStreamInterface() {}
};
} // namespace webrtc
#endif // CALL_VIDEO_RECEIVE_STREAM_H_

View file

@ -0,0 +1,127 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "call/video_send_stream.h"
#include <utility>
#include "api/crypto/frame_encryptor_interface.h"
#include "rtc_base/strings/string_builder.h"
#include "rtc_base/strings/string_format.h"
namespace webrtc {
namespace {
const char* StreamTypeToString(VideoSendStream::StreamStats::StreamType type) {
switch (type) {
case VideoSendStream::StreamStats::StreamType::kMedia:
return "media";
case VideoSendStream::StreamStats::StreamType::kRtx:
return "rtx";
case VideoSendStream::StreamStats::StreamType::kFlexfec:
return "flexfec";
}
RTC_CHECK_NOTREACHED();
}
} // namespace
VideoSendStream::StreamStats::StreamStats() = default;
VideoSendStream::StreamStats::~StreamStats() = default;
std::string VideoSendStream::StreamStats::ToString() const {
char buf[1024];
rtc::SimpleStringBuilder ss(buf);
ss << "type: " << StreamTypeToString(type);
if (referenced_media_ssrc.has_value())
ss << " (for: " << referenced_media_ssrc.value() << ")";
ss << ", ";
ss << "width: " << width << ", ";
ss << "height: " << height << ", ";
ss << "key: " << frame_counts.key_frames << ", ";
ss << "delta: " << frame_counts.delta_frames << ", ";
ss << "total_bps: " << total_bitrate_bps << ", ";
ss << "retransmit_bps: " << retransmit_bitrate_bps << ", ";
ss << "avg_delay_ms: " << avg_delay_ms << ", ";
ss << "max_delay_ms: " << max_delay_ms << ", ";
if (report_block_data) {
ss << "cum_loss: " << report_block_data->cumulative_lost() << ", ";
ss << "max_ext_seq: "
<< report_block_data->extended_highest_sequence_number() << ", ";
}
ss << "nack: " << rtcp_packet_type_counts.nack_packets << ", ";
ss << "fir: " << rtcp_packet_type_counts.fir_packets << ", ";
ss << "pli: " << rtcp_packet_type_counts.pli_packets;
return ss.str();
}
VideoSendStream::Stats::Stats() = default;
VideoSendStream::Stats::~Stats() = default;
std::string VideoSendStream::Stats::ToString(int64_t time_ms) const {
char buf[2048];
rtc::SimpleStringBuilder ss(buf);
ss << "VideoSendStream stats: " << time_ms << ", {";
ss << "input_fps: " << rtc::StringFormat("%.1f", input_frame_rate) << ", ";
ss << "encode_fps: " << encode_frame_rate << ", ";
ss << "encode_ms: " << avg_encode_time_ms << ", ";
ss << "encode_usage_perc: " << encode_usage_percent << ", ";
ss << "target_bps: " << target_media_bitrate_bps << ", ";
ss << "media_bps: " << media_bitrate_bps << ", ";
ss << "suspended: " << (suspended ? "true" : "false") << ", ";
ss << "bw_adapted_res: " << (bw_limited_resolution ? "true" : "false")
<< ", ";
ss << "cpu_adapted_res: " << (cpu_limited_resolution ? "true" : "false")
<< ", ";
ss << "bw_adapted_fps: " << (bw_limited_framerate ? "true" : "false") << ", ";
ss << "cpu_adapted_fps: " << (cpu_limited_framerate ? "true" : "false")
<< ", ";
ss << "#cpu_adaptations: " << number_of_cpu_adapt_changes << ", ";
ss << "#quality_adaptations: " << number_of_quality_adapt_changes;
ss << '}';
for (const auto& substream : substreams) {
if (substream.second.type ==
VideoSendStream::StreamStats::StreamType::kMedia) {
ss << " {ssrc: " << substream.first << ", ";
ss << substream.second.ToString();
ss << '}';
}
}
return ss.str();
}
VideoSendStream::Config::Config(const Config&) = default;
VideoSendStream::Config::Config(Config&&) = default;
VideoSendStream::Config::Config(Transport* send_transport)
: rtp(),
encoder_settings(VideoEncoder::Capabilities(rtp.lntf.enabled)),
send_transport(send_transport) {}
VideoSendStream::Config& VideoSendStream::Config::operator=(Config&&) = default;
VideoSendStream::Config::Config::~Config() = default;
std::string VideoSendStream::Config::ToString() const {
char buf[2 * 1024];
rtc::SimpleStringBuilder ss(buf);
ss << "{encoder_settings: { experiment_cpu_load_estimator: "
<< (encoder_settings.experiment_cpu_load_estimator ? "on" : "off") << "}}";
ss << ", rtp: " << rtp.ToString();
ss << ", rtcp_report_interval_ms: " << rtcp_report_interval_ms;
ss << ", send_transport: " << (send_transport ? "(Transport)" : "nullptr");
ss << ", render_delay_ms: " << render_delay_ms;
ss << ", target_delay_ms: " << target_delay_ms;
ss << ", suspend_below_min_bitrate: "
<< (suspend_below_min_bitrate ? "on" : "off");
ss << '}';
return ss.str();
}
} // namespace webrtc

View file

@ -0,0 +1,260 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef CALL_VIDEO_SEND_STREAM_H_
#define CALL_VIDEO_SEND_STREAM_H_
#include <stdint.h>
#include <map>
#include <string>
#include <vector>
#include "absl/types/optional.h"
#include "api/adaptation/resource.h"
#include "api/call/transport.h"
#include "api/crypto/crypto_options.h"
#include "api/frame_transformer_interface.h"
#include "api/rtp_parameters.h"
#include "api/rtp_sender_interface.h"
#include "api/scoped_refptr.h"
#include "api/video/video_content_type.h"
#include "api/video/video_frame.h"
#include "api/video/video_sink_interface.h"
#include "api/video/video_source_interface.h"
#include "api/video/video_stream_encoder_settings.h"
#include "api/video_codecs/scalability_mode.h"
#include "call/rtp_config.h"
#include "common_video/frame_counts.h"
#include "common_video/include/quality_limitation_reason.h"
#include "modules/rtp_rtcp/include/report_block_data.h"
#include "modules/rtp_rtcp/include/rtcp_statistics.h"
#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "video/config/video_encoder_config.h"
namespace webrtc {
class FrameEncryptorInterface;
class VideoSendStream {
public:
// Multiple StreamStats objects are present if simulcast is used (multiple
// kMedia streams) or if RTX or FlexFEC is negotiated. Multiple SVC layers, on
// the other hand, does not cause additional StreamStats.
struct StreamStats {
enum class StreamType {
// A media stream is an RTP stream for audio or video. Retransmissions and
// FEC is either sent over the same SSRC or negotiated to be sent over
// separate SSRCs, in which case separate StreamStats objects exist with
// references to this media stream's SSRC.
kMedia,
// RTX streams are streams dedicated to retransmissions. They have a
// dependency on a single kMedia stream: `referenced_media_ssrc`.
kRtx,
// FlexFEC streams are streams dedicated to FlexFEC. They have a
// dependency on a single kMedia stream: `referenced_media_ssrc`.
kFlexfec,
};
StreamStats();
~StreamStats();
std::string ToString() const;
StreamType type = StreamType::kMedia;
// If `type` is kRtx or kFlexfec this value is present. The referenced SSRC
// is the kMedia stream that this stream is performing retransmissions or
// FEC for. If `type` is kMedia, this value is null.
absl::optional<uint32_t> referenced_media_ssrc;
FrameCounts frame_counts;
int width = 0;
int height = 0;
// TODO(holmer): Move bitrate_bps out to the webrtc::Call layer.
int total_bitrate_bps = 0;
int retransmit_bitrate_bps = 0;
// `avg_delay_ms` and `max_delay_ms` are only used in tests. Consider
// deleting.
int avg_delay_ms = 0;
int max_delay_ms = 0;
StreamDataCounters rtp_stats;
RtcpPacketTypeCounter rtcp_packet_type_counts;
// A snapshot of the most recent Report Block with additional data of
// interest to statistics. Used to implement RTCRemoteInboundRtpStreamStats.
absl::optional<ReportBlockData> report_block_data;
double encode_frame_rate = 0.0;
int frames_encoded = 0;
absl::optional<uint64_t> qp_sum;
uint64_t total_encode_time_ms = 0;
uint64_t total_encoded_bytes_target = 0;
uint32_t huge_frames_sent = 0;
absl::optional<ScalabilityMode> scalability_mode;
};
struct Stats {
Stats();
~Stats();
std::string ToString(int64_t time_ms) const;
absl::optional<std::string> encoder_implementation_name;
double input_frame_rate = 0;
int encode_frame_rate = 0;
int avg_encode_time_ms = 0;
int encode_usage_percent = 0;
uint32_t frames_encoded = 0;
// https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-totalencodetime
uint64_t total_encode_time_ms = 0;
// https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-totalencodedbytestarget
uint64_t total_encoded_bytes_target = 0;
uint32_t frames = 0;
uint32_t frames_dropped_by_capturer = 0;
uint32_t frames_dropped_by_bad_timestamp = 0;
uint32_t frames_dropped_by_encoder_queue = 0;
uint32_t frames_dropped_by_rate_limiter = 0;
uint32_t frames_dropped_by_congestion_window = 0;
uint32_t frames_dropped_by_encoder = 0;
// Bitrate the encoder is currently configured to use due to bandwidth
// limitations.
int target_media_bitrate_bps = 0;
// Bitrate the encoder is actually producing.
int media_bitrate_bps = 0;
bool suspended = false;
bool bw_limited_resolution = false;
bool cpu_limited_resolution = false;
bool bw_limited_framerate = false;
bool cpu_limited_framerate = false;
// https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-qualitylimitationreason
QualityLimitationReason quality_limitation_reason =
QualityLimitationReason::kNone;
// https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-qualitylimitationdurations
std::map<QualityLimitationReason, int64_t> quality_limitation_durations_ms;
// https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-qualitylimitationresolutionchanges
uint32_t quality_limitation_resolution_changes = 0;
// Total number of times resolution as been requested to be changed due to
// CPU/quality adaptation.
int number_of_cpu_adapt_changes = 0;
int number_of_quality_adapt_changes = 0;
bool has_entered_low_resolution = false;
std::map<uint32_t, StreamStats> substreams;
webrtc::VideoContentType content_type =
webrtc::VideoContentType::UNSPECIFIED;
uint32_t frames_sent = 0;
uint32_t huge_frames_sent = 0;
absl::optional<bool> power_efficient_encoder;
};
struct Config {
public:
Config() = delete;
Config(Config&&);
explicit Config(Transport* send_transport);
Config& operator=(Config&&);
Config& operator=(const Config&) = delete;
~Config();
// Mostly used by tests. Avoid creating copies if you can.
Config Copy() const { return Config(*this); }
std::string ToString() const;
RtpConfig rtp;
VideoStreamEncoderSettings encoder_settings;
// Time interval between RTCP report for video
int rtcp_report_interval_ms = 1000;
// Transport for outgoing packets.
Transport* send_transport = nullptr;
// Expected delay needed by the renderer, i.e. the frame will be delivered
// this many milliseconds, if possible, earlier than expected render time.
// Only valid if `local_renderer` is set.
int render_delay_ms = 0;
// Target delay in milliseconds. A positive value indicates this stream is
// used for streaming instead of a real-time call.
int target_delay_ms = 0;
// True if the stream should be suspended when the available bitrate fall
// below the minimum configured bitrate. If this variable is false, the
// stream may send at a rate higher than the estimated available bitrate.
bool suspend_below_min_bitrate = false;
// Enables periodic bandwidth probing in application-limited region.
bool periodic_alr_bandwidth_probing = false;
// An optional custom frame encryptor that allows the entire frame to be
// encrypted in whatever way the caller chooses. This is not required by
// default.
rtc::scoped_refptr<webrtc::FrameEncryptorInterface> frame_encryptor;
// An optional encoder selector provided by the user.
// Overrides VideoEncoderFactory::GetEncoderSelector().
// Owned by RtpSenderBase.
VideoEncoderFactory::EncoderSelectorInterface* encoder_selector = nullptr;
// Per PeerConnection cryptography options.
CryptoOptions crypto_options;
rtc::scoped_refptr<webrtc::FrameTransformerInterface> frame_transformer;
private:
// Access to the copy constructor is private to force use of the Copy()
// method for those exceptional cases where we do use it.
Config(const Config&);
};
// Starts stream activity.
// When a stream is active, it can receive, process and deliver packets.
virtual void Start() = 0;
// Stops stream activity.
// When a stream is stopped, it can't receive, process or deliver packets.
virtual void Stop() = 0;
// Accessor for determining if the stream is active. This is an inexpensive
// call that must be made on the same thread as `Start()` and `Stop()` methods
// are called on and will return `true` iff activity has been started
// via `Start()`.
virtual bool started() = 0;
// If the resource is overusing, the VideoSendStream will try to reduce
// resolution or frame rate until no resource is overusing.
// TODO(https://crbug.com/webrtc/11565): When the ResourceAdaptationProcessor
// is moved to Call this method could be deleted altogether in favor of
// Call-level APIs only.
virtual void AddAdaptationResource(rtc::scoped_refptr<Resource> resource) = 0;
virtual std::vector<rtc::scoped_refptr<Resource>>
GetAdaptationResources() = 0;
virtual void SetSource(
rtc::VideoSourceInterface<webrtc::VideoFrame>* source,
const DegradationPreference& degradation_preference) = 0;
// Set which streams to send. Must have at least as many SSRCs as configured
// in the config. Encoder settings are passed on to the encoder instance along
// with the VideoStream settings.
virtual void ReconfigureVideoEncoder(VideoEncoderConfig config) = 0;
virtual void ReconfigureVideoEncoder(VideoEncoderConfig config,
SetParametersCallback callback) = 0;
virtual Stats GetStats() = 0;
virtual void GenerateKeyFrame(const std::vector<std::string>& rids) = 0;
protected:
virtual ~VideoSendStream() {}
};
} // namespace webrtc
#endif // CALL_VIDEO_SEND_STREAM_H_