Repo created
This commit is contained in:
parent
81b91f4139
commit
f8c34fa5ee
22732 changed files with 4815320 additions and 2 deletions
6
TMessagesProj/jni/voip/webrtc/video/OWNERS
Normal file
6
TMessagesProj/jni/voip/webrtc/video/OWNERS
Normal file
|
|
@ -0,0 +1,6 @@
|
|||
asapersson@webrtc.org
|
||||
ilnik@webrtc.org
|
||||
mflodman@webrtc.org
|
||||
philipel@webrtc.org
|
||||
sprang@webrtc.org
|
||||
stefan@webrtc.org
|
||||
|
|
@ -0,0 +1,62 @@
|
|||
/*
|
||||
* Copyright 2020 The WebRTC Project Authors. All rights reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "video/adaptation/balanced_constraint.h"
|
||||
|
||||
#include <string>
|
||||
#include <utility>
|
||||
|
||||
#include "api/sequence_checker.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
BalancedConstraint::BalancedConstraint(
|
||||
DegradationPreferenceProvider* degradation_preference_provider,
|
||||
const FieldTrialsView& field_trials)
|
||||
: encoder_target_bitrate_bps_(absl::nullopt),
|
||||
balanced_settings_(field_trials),
|
||||
degradation_preference_provider_(degradation_preference_provider) {
|
||||
RTC_DCHECK(degradation_preference_provider_);
|
||||
sequence_checker_.Detach();
|
||||
}
|
||||
|
||||
void BalancedConstraint::OnEncoderTargetBitrateUpdated(
|
||||
absl::optional<uint32_t> encoder_target_bitrate_bps) {
|
||||
RTC_DCHECK_RUN_ON(&sequence_checker_);
|
||||
encoder_target_bitrate_bps_ = std::move(encoder_target_bitrate_bps);
|
||||
}
|
||||
|
||||
bool BalancedConstraint::IsAdaptationUpAllowed(
|
||||
const VideoStreamInputState& input_state,
|
||||
const VideoSourceRestrictions& restrictions_before,
|
||||
const VideoSourceRestrictions& restrictions_after) const {
|
||||
RTC_DCHECK_RUN_ON(&sequence_checker_);
|
||||
// Don't adapt if BalancedDegradationSettings applies and determines this will
|
||||
// exceed bitrate constraints.
|
||||
if (degradation_preference_provider_->degradation_preference() ==
|
||||
DegradationPreference::BALANCED) {
|
||||
int frame_size_pixels = input_state.single_active_stream_pixels().value_or(
|
||||
input_state.frame_size_pixels().value());
|
||||
if (!balanced_settings_.CanAdaptUp(
|
||||
input_state.video_codec_type(), frame_size_pixels,
|
||||
encoder_target_bitrate_bps_.value_or(0))) {
|
||||
return false;
|
||||
}
|
||||
if (DidIncreaseResolution(restrictions_before, restrictions_after) &&
|
||||
!balanced_settings_.CanAdaptUpResolution(
|
||||
input_state.video_codec_type(), frame_size_pixels,
|
||||
encoder_target_bitrate_bps_.value_or(0))) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,53 @@
|
|||
/*
|
||||
* Copyright 2020 The WebRTC Project Authors. All rights reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef VIDEO_ADAPTATION_BALANCED_CONSTRAINT_H_
|
||||
#define VIDEO_ADAPTATION_BALANCED_CONSTRAINT_H_
|
||||
|
||||
#include <string>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "api/field_trials_view.h"
|
||||
#include "api/sequence_checker.h"
|
||||
#include "call/adaptation/adaptation_constraint.h"
|
||||
#include "call/adaptation/degradation_preference_provider.h"
|
||||
#include "rtc_base/experiments/balanced_degradation_settings.h"
|
||||
#include "rtc_base/system/no_unique_address.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class BalancedConstraint : public AdaptationConstraint {
|
||||
public:
|
||||
BalancedConstraint(
|
||||
DegradationPreferenceProvider* degradation_preference_provider,
|
||||
const FieldTrialsView& field_trials);
|
||||
~BalancedConstraint() override = default;
|
||||
|
||||
void OnEncoderTargetBitrateUpdated(
|
||||
absl::optional<uint32_t> encoder_target_bitrate_bps);
|
||||
|
||||
// AdaptationConstraint implementation.
|
||||
std::string Name() const override { return "BalancedConstraint"; }
|
||||
bool IsAdaptationUpAllowed(
|
||||
const VideoStreamInputState& input_state,
|
||||
const VideoSourceRestrictions& restrictions_before,
|
||||
const VideoSourceRestrictions& restrictions_after) const override;
|
||||
|
||||
private:
|
||||
RTC_NO_UNIQUE_ADDRESS SequenceChecker sequence_checker_;
|
||||
absl::optional<uint32_t> encoder_target_bitrate_bps_
|
||||
RTC_GUARDED_BY(&sequence_checker_);
|
||||
const BalancedDegradationSettings balanced_settings_;
|
||||
const DegradationPreferenceProvider* degradation_preference_provider_;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // VIDEO_ADAPTATION_BALANCED_CONSTRAINT_H_
|
||||
|
|
@ -0,0 +1,83 @@
|
|||
/*
|
||||
* Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "video/adaptation/bandwidth_quality_scaler_resource.h"
|
||||
|
||||
#include <utility>
|
||||
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/experiments/balanced_degradation_settings.h"
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/time_utils.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// static
|
||||
rtc::scoped_refptr<BandwidthQualityScalerResource>
|
||||
BandwidthQualityScalerResource::Create() {
|
||||
return rtc::make_ref_counted<BandwidthQualityScalerResource>();
|
||||
}
|
||||
|
||||
BandwidthQualityScalerResource::BandwidthQualityScalerResource()
|
||||
: VideoStreamEncoderResource("BandwidthQualityScalerResource"),
|
||||
bandwidth_quality_scaler_(nullptr) {}
|
||||
|
||||
BandwidthQualityScalerResource::~BandwidthQualityScalerResource() {
|
||||
RTC_DCHECK(!bandwidth_quality_scaler_);
|
||||
}
|
||||
|
||||
bool BandwidthQualityScalerResource::is_started() const {
|
||||
RTC_DCHECK_RUN_ON(encoder_queue());
|
||||
return bandwidth_quality_scaler_.get();
|
||||
}
|
||||
|
||||
void BandwidthQualityScalerResource::StartCheckForOveruse(
|
||||
const std::vector<VideoEncoder::ResolutionBitrateLimits>&
|
||||
resolution_bitrate_limits) {
|
||||
RTC_DCHECK_RUN_ON(encoder_queue());
|
||||
RTC_DCHECK(!is_started());
|
||||
bandwidth_quality_scaler_ = std::make_unique<BandwidthQualityScaler>(this);
|
||||
|
||||
// If the configuration parameters more than one, we should define and
|
||||
// declare the function BandwidthQualityScaler::Initialize() and call it.
|
||||
bandwidth_quality_scaler_->SetResolutionBitrateLimits(
|
||||
resolution_bitrate_limits);
|
||||
}
|
||||
|
||||
void BandwidthQualityScalerResource::StopCheckForOveruse() {
|
||||
RTC_DCHECK_RUN_ON(encoder_queue());
|
||||
RTC_DCHECK(is_started());
|
||||
// Ensure we have no pending callbacks. This makes it safe to destroy the
|
||||
// BandwidthQualityScaler and even task queues with tasks in-flight.
|
||||
bandwidth_quality_scaler_.reset();
|
||||
}
|
||||
|
||||
void BandwidthQualityScalerResource::OnReportUsageBandwidthHigh() {
|
||||
OnResourceUsageStateMeasured(ResourceUsageState::kOveruse);
|
||||
}
|
||||
|
||||
void BandwidthQualityScalerResource::OnReportUsageBandwidthLow() {
|
||||
OnResourceUsageStateMeasured(ResourceUsageState::kUnderuse);
|
||||
}
|
||||
|
||||
void BandwidthQualityScalerResource::OnEncodeCompleted(
|
||||
const EncodedImage& encoded_image,
|
||||
int64_t time_sent_in_us,
|
||||
int64_t encoded_image_size_bytes) {
|
||||
RTC_DCHECK_RUN_ON(encoder_queue());
|
||||
|
||||
if (bandwidth_quality_scaler_) {
|
||||
bandwidth_quality_scaler_->ReportEncodeInfo(
|
||||
encoded_image_size_bytes, time_sent_in_us / 1000,
|
||||
encoded_image._encodedWidth, encoded_image._encodedHeight);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,62 @@
|
|||
/*
|
||||
* Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef VIDEO_ADAPTATION_BANDWIDTH_QUALITY_SCALER_RESOURCE_H_
|
||||
#define VIDEO_ADAPTATION_BANDWIDTH_QUALITY_SCALER_RESOURCE_H_
|
||||
|
||||
#include <memory>
|
||||
#include <queue>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "api/scoped_refptr.h"
|
||||
#include "api/video/video_adaptation_reason.h"
|
||||
#include "api/video_codecs/video_encoder.h"
|
||||
#include "call/adaptation/degradation_preference_provider.h"
|
||||
#include "call/adaptation/resource_adaptation_processor_interface.h"
|
||||
#include "modules/video_coding/utility/bandwidth_quality_scaler.h"
|
||||
#include "video/adaptation/video_stream_encoder_resource.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// Handles interaction with the BandwidthQualityScaler.
|
||||
class BandwidthQualityScalerResource
|
||||
: public VideoStreamEncoderResource,
|
||||
public BandwidthQualityScalerUsageHandlerInterface {
|
||||
public:
|
||||
static rtc::scoped_refptr<BandwidthQualityScalerResource> Create();
|
||||
|
||||
BandwidthQualityScalerResource();
|
||||
~BandwidthQualityScalerResource() override;
|
||||
|
||||
bool is_started() const;
|
||||
|
||||
void OnEncodeCompleted(const EncodedImage& encoded_image,
|
||||
int64_t time_sent_in_us,
|
||||
int64_t encoded_image_size_bytes);
|
||||
|
||||
void StartCheckForOveruse(
|
||||
const std::vector<VideoEncoder::ResolutionBitrateLimits>&
|
||||
resolution_bitrate_limits);
|
||||
void StopCheckForOveruse();
|
||||
|
||||
// BandwidthScalerQpUsageHandlerInterface implementation.
|
||||
void OnReportUsageBandwidthHigh() override;
|
||||
void OnReportUsageBandwidthLow() override;
|
||||
|
||||
private:
|
||||
std::unique_ptr<BandwidthQualityScaler> bandwidth_quality_scaler_
|
||||
RTC_GUARDED_BY(encoder_queue());
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // VIDEO_ADAPTATION_BANDWIDTH_QUALITY_SCALER_RESOURCE_H_
|
||||
|
|
@ -0,0 +1,88 @@
|
|||
/*
|
||||
* Copyright 2020 The WebRTC Project Authors. All rights reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "video/adaptation/bitrate_constraint.h"
|
||||
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "api/sequence_checker.h"
|
||||
#include "call/adaptation/video_stream_adapter.h"
|
||||
#include "video/adaptation/video_stream_encoder_resource_manager.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
BitrateConstraint::BitrateConstraint()
|
||||
: encoder_settings_(absl::nullopt),
|
||||
encoder_target_bitrate_bps_(absl::nullopt) {
|
||||
sequence_checker_.Detach();
|
||||
}
|
||||
|
||||
void BitrateConstraint::OnEncoderSettingsUpdated(
|
||||
absl::optional<EncoderSettings> encoder_settings) {
|
||||
RTC_DCHECK_RUN_ON(&sequence_checker_);
|
||||
encoder_settings_ = std::move(encoder_settings);
|
||||
}
|
||||
|
||||
void BitrateConstraint::OnEncoderTargetBitrateUpdated(
|
||||
absl::optional<uint32_t> encoder_target_bitrate_bps) {
|
||||
RTC_DCHECK_RUN_ON(&sequence_checker_);
|
||||
encoder_target_bitrate_bps_ = std::move(encoder_target_bitrate_bps);
|
||||
}
|
||||
|
||||
// Checks if resolution is allowed to adapt up based on the current bitrate and
|
||||
// ResolutionBitrateLimits.min_start_bitrate_bps for the next higher resolution.
|
||||
// Bitrate limits usage is restricted to a single active stream/layer (e.g. when
|
||||
// quality scaling is enabled).
|
||||
bool BitrateConstraint::IsAdaptationUpAllowed(
|
||||
const VideoStreamInputState& input_state,
|
||||
const VideoSourceRestrictions& restrictions_before,
|
||||
const VideoSourceRestrictions& restrictions_after) const {
|
||||
RTC_DCHECK_RUN_ON(&sequence_checker_);
|
||||
// Make sure bitrate limits are not violated.
|
||||
if (DidIncreaseResolution(restrictions_before, restrictions_after)) {
|
||||
if (!encoder_settings_.has_value()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
uint32_t bitrate_bps = encoder_target_bitrate_bps_.value_or(0);
|
||||
if (bitrate_bps == 0) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (VideoStreamEncoderResourceManager::IsSimulcastOrMultipleSpatialLayers(
|
||||
encoder_settings_->encoder_config(),
|
||||
encoder_settings_->video_codec())) {
|
||||
// Resolution bitrate limits usage is restricted to singlecast.
|
||||
return true;
|
||||
}
|
||||
|
||||
absl::optional<int> current_frame_size_px =
|
||||
input_state.single_active_stream_pixels();
|
||||
if (!current_frame_size_px.has_value()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
absl::optional<VideoEncoder::ResolutionBitrateLimits> bitrate_limits =
|
||||
encoder_settings_->encoder_info().GetEncoderBitrateLimitsForResolution(
|
||||
// Need some sort of expected resulting pixels to be used
|
||||
// instead of unrestricted.
|
||||
GetHigherResolutionThan(*current_frame_size_px));
|
||||
|
||||
if (bitrate_limits.has_value()) {
|
||||
RTC_DCHECK_GE(bitrate_limits->frame_size_pixels, *current_frame_size_px);
|
||||
return bitrate_bps >=
|
||||
static_cast<uint32_t>(bitrate_limits->min_start_bitrate_bps);
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,53 @@
|
|||
/*
|
||||
* Copyright 2020 The WebRTC Project Authors. All rights reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef VIDEO_ADAPTATION_BITRATE_CONSTRAINT_H_
|
||||
#define VIDEO_ADAPTATION_BITRATE_CONSTRAINT_H_
|
||||
|
||||
#include <string>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "api/sequence_checker.h"
|
||||
#include "call/adaptation/adaptation_constraint.h"
|
||||
#include "call/adaptation/encoder_settings.h"
|
||||
#include "call/adaptation/video_source_restrictions.h"
|
||||
#include "call/adaptation/video_stream_input_state.h"
|
||||
#include "rtc_base/system/no_unique_address.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class BitrateConstraint : public AdaptationConstraint {
|
||||
public:
|
||||
BitrateConstraint();
|
||||
~BitrateConstraint() override = default;
|
||||
|
||||
void OnEncoderSettingsUpdated(
|
||||
absl::optional<EncoderSettings> encoder_settings);
|
||||
void OnEncoderTargetBitrateUpdated(
|
||||
absl::optional<uint32_t> encoder_target_bitrate_bps);
|
||||
|
||||
// AdaptationConstraint implementation.
|
||||
std::string Name() const override { return "BitrateConstraint"; }
|
||||
bool IsAdaptationUpAllowed(
|
||||
const VideoStreamInputState& input_state,
|
||||
const VideoSourceRestrictions& restrictions_before,
|
||||
const VideoSourceRestrictions& restrictions_after) const override;
|
||||
|
||||
private:
|
||||
RTC_NO_UNIQUE_ADDRESS SequenceChecker sequence_checker_;
|
||||
absl::optional<EncoderSettings> encoder_settings_
|
||||
RTC_GUARDED_BY(&sequence_checker_);
|
||||
absl::optional<uint32_t> encoder_target_bitrate_bps_
|
||||
RTC_GUARDED_BY(&sequence_checker_);
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // VIDEO_ADAPTATION_BITRATE_CONSTRAINT_H_
|
||||
|
|
@ -0,0 +1,105 @@
|
|||
/*
|
||||
* Copyright 2020 The WebRTC Project Authors. All rights reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "video/adaptation/encode_usage_resource.h"
|
||||
|
||||
#include <limits>
|
||||
#include <utility>
|
||||
|
||||
#include "rtc_base/checks.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// static
|
||||
rtc::scoped_refptr<EncodeUsageResource> EncodeUsageResource::Create(
|
||||
std::unique_ptr<OveruseFrameDetector> overuse_detector) {
|
||||
return rtc::make_ref_counted<EncodeUsageResource>(
|
||||
std::move(overuse_detector));
|
||||
}
|
||||
|
||||
EncodeUsageResource::EncodeUsageResource(
|
||||
std::unique_ptr<OveruseFrameDetector> overuse_detector)
|
||||
: VideoStreamEncoderResource("EncoderUsageResource"),
|
||||
overuse_detector_(std::move(overuse_detector)),
|
||||
is_started_(false),
|
||||
target_frame_rate_(absl::nullopt) {
|
||||
RTC_DCHECK(overuse_detector_);
|
||||
}
|
||||
|
||||
EncodeUsageResource::~EncodeUsageResource() {}
|
||||
|
||||
bool EncodeUsageResource::is_started() const {
|
||||
RTC_DCHECK_RUN_ON(encoder_queue());
|
||||
return is_started_;
|
||||
}
|
||||
|
||||
void EncodeUsageResource::StartCheckForOveruse(CpuOveruseOptions options) {
|
||||
RTC_DCHECK_RUN_ON(encoder_queue());
|
||||
RTC_DCHECK(!is_started_);
|
||||
overuse_detector_->StartCheckForOveruse(TaskQueueBase::Current(),
|
||||
std::move(options), this);
|
||||
is_started_ = true;
|
||||
overuse_detector_->OnTargetFramerateUpdated(TargetFrameRateAsInt());
|
||||
}
|
||||
|
||||
void EncodeUsageResource::StopCheckForOveruse() {
|
||||
RTC_DCHECK_RUN_ON(encoder_queue());
|
||||
overuse_detector_->StopCheckForOveruse();
|
||||
is_started_ = false;
|
||||
}
|
||||
|
||||
void EncodeUsageResource::SetTargetFrameRate(
|
||||
absl::optional<double> target_frame_rate) {
|
||||
RTC_DCHECK_RUN_ON(encoder_queue());
|
||||
if (target_frame_rate == target_frame_rate_)
|
||||
return;
|
||||
target_frame_rate_ = target_frame_rate;
|
||||
if (is_started_)
|
||||
overuse_detector_->OnTargetFramerateUpdated(TargetFrameRateAsInt());
|
||||
}
|
||||
|
||||
void EncodeUsageResource::OnEncodeStarted(const VideoFrame& cropped_frame,
|
||||
int64_t time_when_first_seen_us) {
|
||||
RTC_DCHECK_RUN_ON(encoder_queue());
|
||||
// TODO(hbos): Rename FrameCaptured() to something more appropriate (e.g.
|
||||
// "OnEncodeStarted"?) or revise usage.
|
||||
overuse_detector_->FrameCaptured(cropped_frame, time_when_first_seen_us);
|
||||
}
|
||||
|
||||
void EncodeUsageResource::OnEncodeCompleted(
|
||||
uint32_t timestamp,
|
||||
int64_t time_sent_in_us,
|
||||
int64_t capture_time_us,
|
||||
absl::optional<int> encode_duration_us) {
|
||||
RTC_DCHECK_RUN_ON(encoder_queue());
|
||||
// TODO(hbos): Rename FrameSent() to something more appropriate (e.g.
|
||||
// "OnEncodeCompleted"?).
|
||||
overuse_detector_->FrameSent(timestamp, time_sent_in_us, capture_time_us,
|
||||
encode_duration_us);
|
||||
}
|
||||
|
||||
void EncodeUsageResource::AdaptUp() {
|
||||
RTC_DCHECK_RUN_ON(encoder_queue());
|
||||
OnResourceUsageStateMeasured(ResourceUsageState::kUnderuse);
|
||||
}
|
||||
|
||||
void EncodeUsageResource::AdaptDown() {
|
||||
RTC_DCHECK_RUN_ON(encoder_queue());
|
||||
OnResourceUsageStateMeasured(ResourceUsageState::kOveruse);
|
||||
}
|
||||
|
||||
int EncodeUsageResource::TargetFrameRateAsInt() {
|
||||
RTC_DCHECK_RUN_ON(encoder_queue());
|
||||
return target_frame_rate_.has_value()
|
||||
? static_cast<int>(target_frame_rate_.value())
|
||||
: std::numeric_limits<int>::max();
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,68 @@
|
|||
/*
|
||||
* Copyright 2020 The WebRTC Project Authors. All rights reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef VIDEO_ADAPTATION_ENCODE_USAGE_RESOURCE_H_
|
||||
#define VIDEO_ADAPTATION_ENCODE_USAGE_RESOURCE_H_
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "api/scoped_refptr.h"
|
||||
#include "api/video/video_adaptation_reason.h"
|
||||
#include "video/adaptation/overuse_frame_detector.h"
|
||||
#include "video/adaptation/video_stream_encoder_resource.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// Handles interaction with the OveruseDetector.
|
||||
// TODO(hbos): Add unittests specific to this class, it is currently only tested
|
||||
// indirectly by usage in the ResourceAdaptationProcessor (which is only tested
|
||||
// because of its usage in VideoStreamEncoder); all tests are currently in
|
||||
// video_stream_encoder_unittest.cc.
|
||||
class EncodeUsageResource : public VideoStreamEncoderResource,
|
||||
public OveruseFrameDetectorObserverInterface {
|
||||
public:
|
||||
static rtc::scoped_refptr<EncodeUsageResource> Create(
|
||||
std::unique_ptr<OveruseFrameDetector> overuse_detector);
|
||||
|
||||
explicit EncodeUsageResource(
|
||||
std::unique_ptr<OveruseFrameDetector> overuse_detector);
|
||||
~EncodeUsageResource() override;
|
||||
|
||||
bool is_started() const;
|
||||
|
||||
void StartCheckForOveruse(CpuOveruseOptions options);
|
||||
void StopCheckForOveruse();
|
||||
|
||||
void SetTargetFrameRate(absl::optional<double> target_frame_rate);
|
||||
void OnEncodeStarted(const VideoFrame& cropped_frame,
|
||||
int64_t time_when_first_seen_us);
|
||||
void OnEncodeCompleted(uint32_t timestamp,
|
||||
int64_t time_sent_in_us,
|
||||
int64_t capture_time_us,
|
||||
absl::optional<int> encode_duration_us);
|
||||
|
||||
// OveruseFrameDetectorObserverInterface implementation.
|
||||
void AdaptUp() override;
|
||||
void AdaptDown() override;
|
||||
|
||||
private:
|
||||
int TargetFrameRateAsInt();
|
||||
|
||||
const std::unique_ptr<OveruseFrameDetector> overuse_detector_
|
||||
RTC_GUARDED_BY(encoder_queue());
|
||||
bool is_started_ RTC_GUARDED_BY(encoder_queue());
|
||||
absl::optional<double> target_frame_rate_ RTC_GUARDED_BY(encoder_queue());
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // VIDEO_ADAPTATION_ENCODE_USAGE_RESOURCE_H_
|
||||
|
|
@ -0,0 +1,669 @@
|
|||
/*
|
||||
* Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "video/adaptation/overuse_frame_detector.h"
|
||||
|
||||
#include <math.h>
|
||||
#include <stdio.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <list>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
|
||||
#include "api/video/video_frame.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/numerics/exp_filter.h"
|
||||
#include "rtc_base/time_utils.h"
|
||||
#include "rtc_base/trace_event.h"
|
||||
#include "system_wrappers/include/field_trial.h"
|
||||
|
||||
#if defined(WEBRTC_MAC) && !defined(WEBRTC_IOS)
|
||||
#include <mach/mach.h>
|
||||
#endif // defined(WEBRTC_MAC) && !defined(WEBRTC_IOS)
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace {
|
||||
const int64_t kCheckForOveruseIntervalMs = 5000;
|
||||
const int64_t kTimeToFirstCheckForOveruseMs = 100;
|
||||
|
||||
// Delay between consecutive rampups. (Used for quick recovery.)
|
||||
const int kQuickRampUpDelayMs = 10 * 1000;
|
||||
// Delay between rampup attempts. Initially uses standard, scales up to max.
|
||||
const int kStandardRampUpDelayMs = 40 * 1000;
|
||||
const int kMaxRampUpDelayMs = 240 * 1000;
|
||||
// Expontential back-off factor, to prevent annoying up-down behaviour.
|
||||
const double kRampUpBackoffFactor = 2.0;
|
||||
|
||||
// Max number of overuses detected before always applying the rampup delay.
|
||||
const int kMaxOverusesBeforeApplyRampupDelay = 4;
|
||||
|
||||
// The maximum exponent to use in VCMExpFilter.
|
||||
const float kMaxExp = 7.0f;
|
||||
// Default value used before first reconfiguration.
|
||||
const int kDefaultFrameRate = 30;
|
||||
// Default sample diff, default frame rate.
|
||||
const float kDefaultSampleDiffMs = 1000.0f / kDefaultFrameRate;
|
||||
// A factor applied to the sample diff on OnTargetFramerateUpdated to determine
|
||||
// a max limit for the sample diff. For instance, with a framerate of 30fps,
|
||||
// the sample diff is capped to (1000 / 30) * 1.35 = 45ms. This prevents
|
||||
// triggering too soon if there are individual very large outliers.
|
||||
const float kMaxSampleDiffMarginFactor = 1.35f;
|
||||
// Minimum framerate allowed for usage calculation. This prevents crazy long
|
||||
// encode times from being accepted if the frame rate happens to be low.
|
||||
const int kMinFramerate = 7;
|
||||
const int kMaxFramerate = 30;
|
||||
|
||||
// Class for calculating the processing usage on the send-side (the average
|
||||
// processing time of a frame divided by the average time difference between
|
||||
// captured frames).
|
||||
class SendProcessingUsage1 : public OveruseFrameDetector::ProcessingUsage {
|
||||
public:
|
||||
explicit SendProcessingUsage1(const CpuOveruseOptions& options)
|
||||
: kWeightFactorFrameDiff(0.998f),
|
||||
kWeightFactorProcessing(0.995f),
|
||||
kInitialSampleDiffMs(40.0f),
|
||||
options_(options),
|
||||
count_(0),
|
||||
last_processed_capture_time_us_(-1),
|
||||
max_sample_diff_ms_(kDefaultSampleDiffMs * kMaxSampleDiffMarginFactor),
|
||||
filtered_processing_ms_(new rtc::ExpFilter(kWeightFactorProcessing)),
|
||||
filtered_frame_diff_ms_(new rtc::ExpFilter(kWeightFactorFrameDiff)) {
|
||||
Reset();
|
||||
}
|
||||
~SendProcessingUsage1() override {}
|
||||
|
||||
void Reset() override {
|
||||
frame_timing_.clear();
|
||||
count_ = 0;
|
||||
last_processed_capture_time_us_ = -1;
|
||||
max_sample_diff_ms_ = kDefaultSampleDiffMs * kMaxSampleDiffMarginFactor;
|
||||
filtered_frame_diff_ms_->Reset(kWeightFactorFrameDiff);
|
||||
filtered_frame_diff_ms_->Apply(1.0f, kInitialSampleDiffMs);
|
||||
filtered_processing_ms_->Reset(kWeightFactorProcessing);
|
||||
filtered_processing_ms_->Apply(1.0f, InitialProcessingMs());
|
||||
}
|
||||
|
||||
void SetMaxSampleDiffMs(float diff_ms) override {
|
||||
max_sample_diff_ms_ = diff_ms;
|
||||
}
|
||||
|
||||
void FrameCaptured(const VideoFrame& frame,
|
||||
int64_t time_when_first_seen_us,
|
||||
int64_t last_capture_time_us) override {
|
||||
if (last_capture_time_us != -1)
|
||||
AddCaptureSample(1e-3 * (time_when_first_seen_us - last_capture_time_us));
|
||||
|
||||
frame_timing_.push_back(FrameTiming(frame.timestamp_us(), frame.timestamp(),
|
||||
time_when_first_seen_us));
|
||||
}
|
||||
|
||||
absl::optional<int> FrameSent(
|
||||
uint32_t timestamp,
|
||||
int64_t time_sent_in_us,
|
||||
int64_t /* capture_time_us */,
|
||||
absl::optional<int> /* encode_duration_us */) override {
|
||||
absl::optional<int> encode_duration_us;
|
||||
// Delay before reporting actual encoding time, used to have the ability to
|
||||
// detect total encoding time when encoding more than one layer. Encoding is
|
||||
// here assumed to finish within a second (or that we get enough long-time
|
||||
// samples before one second to trigger an overuse even when this is not the
|
||||
// case).
|
||||
static const int64_t kEncodingTimeMeasureWindowMs = 1000;
|
||||
for (auto& it : frame_timing_) {
|
||||
if (it.timestamp == timestamp) {
|
||||
it.last_send_us = time_sent_in_us;
|
||||
break;
|
||||
}
|
||||
}
|
||||
// TODO(pbos): Handle the case/log errors when not finding the corresponding
|
||||
// frame (either very slow encoding or incorrect wrong timestamps returned
|
||||
// from the encoder).
|
||||
// This is currently the case for all frames on ChromeOS, so logging them
|
||||
// would be spammy, and triggering overuse would be wrong.
|
||||
// https://crbug.com/350106
|
||||
while (!frame_timing_.empty()) {
|
||||
FrameTiming timing = frame_timing_.front();
|
||||
if (time_sent_in_us - timing.capture_us <
|
||||
kEncodingTimeMeasureWindowMs * rtc::kNumMicrosecsPerMillisec) {
|
||||
break;
|
||||
}
|
||||
if (timing.last_send_us != -1) {
|
||||
encode_duration_us.emplace(
|
||||
static_cast<int>(timing.last_send_us - timing.capture_us));
|
||||
|
||||
if (last_processed_capture_time_us_ != -1) {
|
||||
int64_t diff_us = timing.capture_us - last_processed_capture_time_us_;
|
||||
AddSample(1e-3 * (*encode_duration_us), 1e-3 * diff_us);
|
||||
}
|
||||
last_processed_capture_time_us_ = timing.capture_us;
|
||||
}
|
||||
frame_timing_.pop_front();
|
||||
}
|
||||
return encode_duration_us;
|
||||
}
|
||||
|
||||
int Value() override {
|
||||
if (count_ < static_cast<uint32_t>(options_.min_frame_samples)) {
|
||||
return static_cast<int>(InitialUsageInPercent() + 0.5f);
|
||||
}
|
||||
float frame_diff_ms = std::max(filtered_frame_diff_ms_->filtered(), 1.0f);
|
||||
frame_diff_ms = std::min(frame_diff_ms, max_sample_diff_ms_);
|
||||
float encode_usage_percent =
|
||||
100.0f * filtered_processing_ms_->filtered() / frame_diff_ms;
|
||||
return static_cast<int>(encode_usage_percent + 0.5);
|
||||
}
|
||||
|
||||
private:
|
||||
struct FrameTiming {
|
||||
FrameTiming(int64_t capture_time_us, uint32_t timestamp, int64_t now)
|
||||
: capture_time_us(capture_time_us),
|
||||
timestamp(timestamp),
|
||||
capture_us(now),
|
||||
last_send_us(-1) {}
|
||||
int64_t capture_time_us;
|
||||
uint32_t timestamp;
|
||||
int64_t capture_us;
|
||||
int64_t last_send_us;
|
||||
};
|
||||
|
||||
void AddCaptureSample(float sample_ms) {
|
||||
float exp = sample_ms / kDefaultSampleDiffMs;
|
||||
exp = std::min(exp, kMaxExp);
|
||||
filtered_frame_diff_ms_->Apply(exp, sample_ms);
|
||||
}
|
||||
|
||||
void AddSample(float processing_ms, int64_t diff_last_sample_ms) {
|
||||
++count_;
|
||||
float exp = diff_last_sample_ms / kDefaultSampleDiffMs;
|
||||
exp = std::min(exp, kMaxExp);
|
||||
filtered_processing_ms_->Apply(exp, processing_ms);
|
||||
}
|
||||
|
||||
float InitialUsageInPercent() const {
|
||||
// Start in between the underuse and overuse threshold.
|
||||
return (options_.low_encode_usage_threshold_percent +
|
||||
options_.high_encode_usage_threshold_percent) /
|
||||
2.0f;
|
||||
}
|
||||
|
||||
float InitialProcessingMs() const {
|
||||
return InitialUsageInPercent() * kInitialSampleDiffMs / 100;
|
||||
}
|
||||
|
||||
const float kWeightFactorFrameDiff;
|
||||
const float kWeightFactorProcessing;
|
||||
const float kInitialSampleDiffMs;
|
||||
|
||||
const CpuOveruseOptions options_;
|
||||
std::list<FrameTiming> frame_timing_;
|
||||
uint64_t count_;
|
||||
int64_t last_processed_capture_time_us_;
|
||||
float max_sample_diff_ms_;
|
||||
std::unique_ptr<rtc::ExpFilter> filtered_processing_ms_;
|
||||
std::unique_ptr<rtc::ExpFilter> filtered_frame_diff_ms_;
|
||||
};
|
||||
|
||||
// New cpu load estimator.
|
||||
// TODO(bugs.webrtc.org/8504): For some period of time, we need to
|
||||
// switch between the two versions of the estimator for experiments.
|
||||
// When problems are sorted out, the old estimator should be deleted.
|
||||
class SendProcessingUsage2 : public OveruseFrameDetector::ProcessingUsage {
|
||||
public:
|
||||
explicit SendProcessingUsage2(const CpuOveruseOptions& options)
|
||||
: options_(options) {
|
||||
Reset();
|
||||
}
|
||||
~SendProcessingUsage2() override = default;
|
||||
|
||||
void Reset() override {
|
||||
prev_time_us_ = -1;
|
||||
// Start in between the underuse and overuse threshold.
|
||||
load_estimate_ = (options_.low_encode_usage_threshold_percent +
|
||||
options_.high_encode_usage_threshold_percent) /
|
||||
200.0;
|
||||
}
|
||||
|
||||
void SetMaxSampleDiffMs(float /* diff_ms */) override {}
|
||||
|
||||
void FrameCaptured(const VideoFrame& frame,
|
||||
int64_t time_when_first_seen_us,
|
||||
int64_t last_capture_time_us) override {}
|
||||
|
||||
absl::optional<int> FrameSent(
|
||||
uint32_t /* timestamp */,
|
||||
int64_t /* time_sent_in_us */,
|
||||
int64_t capture_time_us,
|
||||
absl::optional<int> encode_duration_us) override {
|
||||
if (encode_duration_us) {
|
||||
int duration_per_frame_us =
|
||||
DurationPerInputFrame(capture_time_us, *encode_duration_us);
|
||||
if (prev_time_us_ != -1) {
|
||||
if (capture_time_us < prev_time_us_) {
|
||||
// The weighting in AddSample assumes that samples are processed with
|
||||
// non-decreasing measurement timestamps. We could implement
|
||||
// appropriate weights for samples arriving late, but since it is a
|
||||
// rare case, keep things simple, by just pushing those measurements a
|
||||
// bit forward in time.
|
||||
capture_time_us = prev_time_us_;
|
||||
}
|
||||
AddSample(1e-6 * duration_per_frame_us,
|
||||
1e-6 * (capture_time_us - prev_time_us_));
|
||||
}
|
||||
}
|
||||
prev_time_us_ = capture_time_us;
|
||||
|
||||
return encode_duration_us;
|
||||
}
|
||||
|
||||
private:
|
||||
void AddSample(double encode_time, double diff_time) {
|
||||
RTC_CHECK_GE(diff_time, 0.0);
|
||||
|
||||
// Use the filter update
|
||||
//
|
||||
// load <-- x/d (1-exp (-d/T)) + exp (-d/T) load
|
||||
//
|
||||
// where we must take care for small d, using the proper limit
|
||||
// (1 - exp(-d/tau)) / d = 1/tau - d/2tau^2 + O(d^2)
|
||||
double tau = (1e-3 * options_.filter_time_ms);
|
||||
double e = diff_time / tau;
|
||||
double c;
|
||||
if (e < 0.0001) {
|
||||
c = (1 - e / 2) / tau;
|
||||
} else {
|
||||
c = -expm1(-e) / diff_time;
|
||||
}
|
||||
load_estimate_ = c * encode_time + exp(-e) * load_estimate_;
|
||||
}
|
||||
|
||||
int64_t DurationPerInputFrame(int64_t capture_time_us,
|
||||
int64_t encode_time_us) {
|
||||
// Discard data on old frames; limit 2 seconds.
|
||||
static constexpr int64_t kMaxAge = 2 * rtc::kNumMicrosecsPerSec;
|
||||
for (auto it = max_encode_time_per_input_frame_.begin();
|
||||
it != max_encode_time_per_input_frame_.end() &&
|
||||
it->first < capture_time_us - kMaxAge;) {
|
||||
it = max_encode_time_per_input_frame_.erase(it);
|
||||
}
|
||||
|
||||
std::map<int64_t, int>::iterator it;
|
||||
bool inserted;
|
||||
std::tie(it, inserted) = max_encode_time_per_input_frame_.emplace(
|
||||
capture_time_us, encode_time_us);
|
||||
if (inserted) {
|
||||
// First encoded frame for this input frame.
|
||||
return encode_time_us;
|
||||
}
|
||||
if (encode_time_us <= it->second) {
|
||||
// Shorter encode time than previous frame (unlikely). Count it as being
|
||||
// done in parallel.
|
||||
return 0;
|
||||
}
|
||||
// Record new maximum encode time, and return increase from previous max.
|
||||
int increase = encode_time_us - it->second;
|
||||
it->second = encode_time_us;
|
||||
return increase;
|
||||
}
|
||||
|
||||
int Value() override {
|
||||
return static_cast<int>(100.0 * load_estimate_ + 0.5);
|
||||
}
|
||||
|
||||
const CpuOveruseOptions options_;
|
||||
// Indexed by the capture timestamp, used as frame id.
|
||||
std::map<int64_t, int> max_encode_time_per_input_frame_;
|
||||
|
||||
int64_t prev_time_us_ = -1;
|
||||
double load_estimate_;
|
||||
};
|
||||
|
||||
// Class used for manual testing of overuse, enabled via field trial flag.
|
||||
class OverdoseInjector : public OveruseFrameDetector::ProcessingUsage {
|
||||
public:
|
||||
OverdoseInjector(std::unique_ptr<OveruseFrameDetector::ProcessingUsage> usage,
|
||||
int64_t normal_period_ms,
|
||||
int64_t overuse_period_ms,
|
||||
int64_t underuse_period_ms)
|
||||
: usage_(std::move(usage)),
|
||||
normal_period_ms_(normal_period_ms),
|
||||
overuse_period_ms_(overuse_period_ms),
|
||||
underuse_period_ms_(underuse_period_ms),
|
||||
state_(State::kNormal),
|
||||
last_toggling_ms_(-1) {
|
||||
RTC_DCHECK_GT(overuse_period_ms, 0);
|
||||
RTC_DCHECK_GT(normal_period_ms, 0);
|
||||
RTC_LOG(LS_INFO) << "Simulating overuse with intervals " << normal_period_ms
|
||||
<< "ms normal mode, " << overuse_period_ms
|
||||
<< "ms overuse mode.";
|
||||
}
|
||||
|
||||
~OverdoseInjector() override {}
|
||||
|
||||
void Reset() override { usage_->Reset(); }
|
||||
|
||||
void SetMaxSampleDiffMs(float diff_ms) override {
|
||||
usage_->SetMaxSampleDiffMs(diff_ms);
|
||||
}
|
||||
|
||||
void FrameCaptured(const VideoFrame& frame,
|
||||
int64_t time_when_first_seen_us,
|
||||
int64_t last_capture_time_us) override {
|
||||
usage_->FrameCaptured(frame, time_when_first_seen_us, last_capture_time_us);
|
||||
}
|
||||
|
||||
absl::optional<int> FrameSent(
|
||||
// These two argument used by old estimator.
|
||||
uint32_t timestamp,
|
||||
int64_t time_sent_in_us,
|
||||
// And these two by the new estimator.
|
||||
int64_t capture_time_us,
|
||||
absl::optional<int> encode_duration_us) override {
|
||||
return usage_->FrameSent(timestamp, time_sent_in_us, capture_time_us,
|
||||
encode_duration_us);
|
||||
}
|
||||
|
||||
int Value() override {
|
||||
int64_t now_ms = rtc::TimeMillis();
|
||||
if (last_toggling_ms_ == -1) {
|
||||
last_toggling_ms_ = now_ms;
|
||||
} else {
|
||||
switch (state_) {
|
||||
case State::kNormal:
|
||||
if (now_ms > last_toggling_ms_ + normal_period_ms_) {
|
||||
state_ = State::kOveruse;
|
||||
last_toggling_ms_ = now_ms;
|
||||
RTC_LOG(LS_INFO) << "Simulating CPU overuse.";
|
||||
}
|
||||
break;
|
||||
case State::kOveruse:
|
||||
if (now_ms > last_toggling_ms_ + overuse_period_ms_) {
|
||||
state_ = State::kUnderuse;
|
||||
last_toggling_ms_ = now_ms;
|
||||
RTC_LOG(LS_INFO) << "Simulating CPU underuse.";
|
||||
}
|
||||
break;
|
||||
case State::kUnderuse:
|
||||
if (now_ms > last_toggling_ms_ + underuse_period_ms_) {
|
||||
state_ = State::kNormal;
|
||||
last_toggling_ms_ = now_ms;
|
||||
RTC_LOG(LS_INFO) << "Actual CPU overuse measurements in effect.";
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
absl::optional<int> overried_usage_value;
|
||||
switch (state_) {
|
||||
case State::kNormal:
|
||||
break;
|
||||
case State::kOveruse:
|
||||
overried_usage_value.emplace(250);
|
||||
break;
|
||||
case State::kUnderuse:
|
||||
overried_usage_value.emplace(5);
|
||||
break;
|
||||
}
|
||||
|
||||
return overried_usage_value.value_or(usage_->Value());
|
||||
}
|
||||
|
||||
private:
|
||||
const std::unique_ptr<OveruseFrameDetector::ProcessingUsage> usage_;
|
||||
const int64_t normal_period_ms_;
|
||||
const int64_t overuse_period_ms_;
|
||||
const int64_t underuse_period_ms_;
|
||||
enum class State { kNormal, kOveruse, kUnderuse } state_;
|
||||
int64_t last_toggling_ms_;
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
std::unique_ptr<OveruseFrameDetector::ProcessingUsage>
|
||||
OveruseFrameDetector::CreateProcessingUsage(const CpuOveruseOptions& options) {
|
||||
std::unique_ptr<ProcessingUsage> instance;
|
||||
if (options.filter_time_ms > 0) {
|
||||
instance = std::make_unique<SendProcessingUsage2>(options);
|
||||
} else {
|
||||
instance = std::make_unique<SendProcessingUsage1>(options);
|
||||
}
|
||||
std::string toggling_interval =
|
||||
field_trial::FindFullName("WebRTC-ForceSimulatedOveruseIntervalMs");
|
||||
if (!toggling_interval.empty()) {
|
||||
int normal_period_ms = 0;
|
||||
int overuse_period_ms = 0;
|
||||
int underuse_period_ms = 0;
|
||||
if (sscanf(toggling_interval.c_str(), "%d-%d-%d", &normal_period_ms,
|
||||
&overuse_period_ms, &underuse_period_ms) == 3) {
|
||||
if (normal_period_ms > 0 && overuse_period_ms > 0 &&
|
||||
underuse_period_ms > 0) {
|
||||
instance = std::make_unique<OverdoseInjector>(
|
||||
std::move(instance), normal_period_ms, overuse_period_ms,
|
||||
underuse_period_ms);
|
||||
} else {
|
||||
RTC_LOG(LS_WARNING)
|
||||
<< "Invalid (non-positive) normal/overuse/underuse periods: "
|
||||
<< normal_period_ms << " / " << overuse_period_ms << " / "
|
||||
<< underuse_period_ms;
|
||||
}
|
||||
} else {
|
||||
RTC_LOG(LS_WARNING) << "Malformed toggling interval: "
|
||||
<< toggling_interval;
|
||||
}
|
||||
}
|
||||
return instance;
|
||||
}
|
||||
|
||||
OveruseFrameDetector::OveruseFrameDetector(
|
||||
CpuOveruseMetricsObserver* metrics_observer)
|
||||
: metrics_observer_(metrics_observer),
|
||||
num_process_times_(0),
|
||||
// TODO(bugs.webrtc.org/9078): Use absl::optional
|
||||
last_capture_time_us_(-1),
|
||||
num_pixels_(0),
|
||||
max_framerate_(kDefaultFrameRate),
|
||||
last_overuse_time_ms_(-1),
|
||||
checks_above_threshold_(0),
|
||||
num_overuse_detections_(0),
|
||||
last_rampup_time_ms_(-1),
|
||||
in_quick_rampup_(false),
|
||||
current_rampup_delay_ms_(kStandardRampUpDelayMs) {
|
||||
task_checker_.Detach();
|
||||
ParseFieldTrial({&filter_time_constant_},
|
||||
field_trial::FindFullName("WebRTC-CpuLoadEstimator"));
|
||||
}
|
||||
|
||||
OveruseFrameDetector::~OveruseFrameDetector() {}
|
||||
|
||||
void OveruseFrameDetector::StartCheckForOveruse(
|
||||
TaskQueueBase* task_queue_base,
|
||||
const CpuOveruseOptions& options,
|
||||
OveruseFrameDetectorObserverInterface* overuse_observer) {
|
||||
RTC_DCHECK_RUN_ON(&task_checker_);
|
||||
RTC_DCHECK(!check_overuse_task_.Running());
|
||||
RTC_DCHECK(overuse_observer != nullptr);
|
||||
|
||||
SetOptions(options);
|
||||
check_overuse_task_ = RepeatingTaskHandle::DelayedStart(
|
||||
task_queue_base, TimeDelta::Millis(kTimeToFirstCheckForOveruseMs),
|
||||
[this, overuse_observer] {
|
||||
CheckForOveruse(overuse_observer);
|
||||
return TimeDelta::Millis(kCheckForOveruseIntervalMs);
|
||||
});
|
||||
}
|
||||
void OveruseFrameDetector::StopCheckForOveruse() {
|
||||
RTC_DCHECK_RUN_ON(&task_checker_);
|
||||
check_overuse_task_.Stop();
|
||||
}
|
||||
|
||||
void OveruseFrameDetector::EncodedFrameTimeMeasured(int encode_duration_ms) {
|
||||
RTC_DCHECK_RUN_ON(&task_checker_);
|
||||
encode_usage_percent_ = usage_->Value();
|
||||
|
||||
metrics_observer_->OnEncodedFrameTimeMeasured(encode_duration_ms,
|
||||
*encode_usage_percent_);
|
||||
}
|
||||
|
||||
bool OveruseFrameDetector::FrameSizeChanged(int num_pixels) const {
|
||||
RTC_DCHECK_RUN_ON(&task_checker_);
|
||||
if (num_pixels != num_pixels_) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool OveruseFrameDetector::FrameTimeoutDetected(int64_t now_us) const {
|
||||
RTC_DCHECK_RUN_ON(&task_checker_);
|
||||
if (last_capture_time_us_ == -1)
|
||||
return false;
|
||||
return (now_us - last_capture_time_us_) >
|
||||
options_.frame_timeout_interval_ms * rtc::kNumMicrosecsPerMillisec;
|
||||
}
|
||||
|
||||
void OveruseFrameDetector::ResetAll(int num_pixels) {
|
||||
// Reset state, as a result resolution being changed. Do not however change
|
||||
// the current frame rate back to the default.
|
||||
RTC_DCHECK_RUN_ON(&task_checker_);
|
||||
num_pixels_ = num_pixels;
|
||||
usage_->Reset();
|
||||
last_capture_time_us_ = -1;
|
||||
num_process_times_ = 0;
|
||||
encode_usage_percent_ = absl::nullopt;
|
||||
OnTargetFramerateUpdated(max_framerate_);
|
||||
}
|
||||
|
||||
void OveruseFrameDetector::OnTargetFramerateUpdated(int framerate_fps) {
|
||||
RTC_DCHECK_RUN_ON(&task_checker_);
|
||||
RTC_DCHECK_GE(framerate_fps, 0);
|
||||
max_framerate_ = std::min(kMaxFramerate, framerate_fps);
|
||||
usage_->SetMaxSampleDiffMs((1000 / std::max(kMinFramerate, max_framerate_)) *
|
||||
kMaxSampleDiffMarginFactor);
|
||||
}
|
||||
|
||||
void OveruseFrameDetector::FrameCaptured(const VideoFrame& frame,
|
||||
int64_t time_when_first_seen_us) {
|
||||
RTC_DCHECK_RUN_ON(&task_checker_);
|
||||
|
||||
if (FrameSizeChanged(frame.width() * frame.height()) ||
|
||||
FrameTimeoutDetected(time_when_first_seen_us)) {
|
||||
ResetAll(frame.width() * frame.height());
|
||||
}
|
||||
|
||||
usage_->FrameCaptured(frame, time_when_first_seen_us, last_capture_time_us_);
|
||||
last_capture_time_us_ = time_when_first_seen_us;
|
||||
}
|
||||
|
||||
void OveruseFrameDetector::FrameSent(uint32_t timestamp,
|
||||
int64_t time_sent_in_us,
|
||||
int64_t capture_time_us,
|
||||
absl::optional<int> encode_duration_us) {
|
||||
RTC_DCHECK_RUN_ON(&task_checker_);
|
||||
encode_duration_us = usage_->FrameSent(timestamp, time_sent_in_us,
|
||||
capture_time_us, encode_duration_us);
|
||||
|
||||
if (encode_duration_us) {
|
||||
EncodedFrameTimeMeasured(*encode_duration_us /
|
||||
rtc::kNumMicrosecsPerMillisec);
|
||||
}
|
||||
}
|
||||
|
||||
void OveruseFrameDetector::CheckForOveruse(
|
||||
OveruseFrameDetectorObserverInterface* observer) {
|
||||
RTC_DCHECK_RUN_ON(&task_checker_);
|
||||
RTC_DCHECK(observer);
|
||||
++num_process_times_;
|
||||
if (num_process_times_ <= options_.min_process_count ||
|
||||
!encode_usage_percent_)
|
||||
return;
|
||||
|
||||
int64_t now_ms = rtc::TimeMillis();
|
||||
const char* action = "NoAction";
|
||||
|
||||
if (IsOverusing(*encode_usage_percent_)) {
|
||||
// If the last thing we did was going up, and now have to back down, we need
|
||||
// to check if this peak was short. If so we should back off to avoid going
|
||||
// back and forth between this load, the system doesn't seem to handle it.
|
||||
bool check_for_backoff = last_rampup_time_ms_ > last_overuse_time_ms_;
|
||||
if (check_for_backoff) {
|
||||
if (now_ms - last_rampup_time_ms_ < kStandardRampUpDelayMs ||
|
||||
num_overuse_detections_ > kMaxOverusesBeforeApplyRampupDelay) {
|
||||
// Going up was not ok for very long, back off.
|
||||
current_rampup_delay_ms_ *= kRampUpBackoffFactor;
|
||||
if (current_rampup_delay_ms_ > kMaxRampUpDelayMs)
|
||||
current_rampup_delay_ms_ = kMaxRampUpDelayMs;
|
||||
} else {
|
||||
// Not currently backing off, reset rampup delay.
|
||||
current_rampup_delay_ms_ = kStandardRampUpDelayMs;
|
||||
}
|
||||
}
|
||||
|
||||
last_overuse_time_ms_ = now_ms;
|
||||
in_quick_rampup_ = false;
|
||||
checks_above_threshold_ = 0;
|
||||
++num_overuse_detections_;
|
||||
|
||||
observer->AdaptDown();
|
||||
action = "AdaptDown";
|
||||
} else if (IsUnderusing(*encode_usage_percent_, now_ms)) {
|
||||
last_rampup_time_ms_ = now_ms;
|
||||
in_quick_rampup_ = true;
|
||||
|
||||
observer->AdaptUp();
|
||||
action = "AdaptUp";
|
||||
}
|
||||
TRACE_EVENT2("webrtc", "OveruseFrameDetector::CheckForOveruse",
|
||||
"encode_usage_percent", *encode_usage_percent_, "action",
|
||||
TRACE_STR_COPY(action));
|
||||
|
||||
int rampup_delay =
|
||||
in_quick_rampup_ ? kQuickRampUpDelayMs : current_rampup_delay_ms_;
|
||||
|
||||
RTC_LOG(LS_INFO) << "CheckForOveruse: encode usage " << *encode_usage_percent_
|
||||
<< " overuse detections " << num_overuse_detections_
|
||||
<< " rampup delay " << rampup_delay << " action " << action;
|
||||
}
|
||||
|
||||
void OveruseFrameDetector::SetOptions(const CpuOveruseOptions& options) {
|
||||
RTC_DCHECK_RUN_ON(&task_checker_);
|
||||
options_ = options;
|
||||
|
||||
// Time constant config overridable by field trial.
|
||||
if (filter_time_constant_) {
|
||||
options_.filter_time_ms = filter_time_constant_->ms();
|
||||
}
|
||||
// Force reset with next frame.
|
||||
num_pixels_ = 0;
|
||||
usage_ = CreateProcessingUsage(options);
|
||||
}
|
||||
|
||||
bool OveruseFrameDetector::IsOverusing(int usage_percent) {
|
||||
RTC_DCHECK_RUN_ON(&task_checker_);
|
||||
|
||||
if (usage_percent >= options_.high_encode_usage_threshold_percent) {
|
||||
++checks_above_threshold_;
|
||||
} else {
|
||||
checks_above_threshold_ = 0;
|
||||
}
|
||||
return checks_above_threshold_ >= options_.high_threshold_consecutive_count;
|
||||
}
|
||||
|
||||
bool OveruseFrameDetector::IsUnderusing(int usage_percent, int64_t time_now) {
|
||||
RTC_DCHECK_RUN_ON(&task_checker_);
|
||||
int delay = in_quick_rampup_ ? kQuickRampUpDelayMs : current_rampup_delay_ms_;
|
||||
if (time_now < last_rampup_time_ms_ + delay)
|
||||
return false;
|
||||
|
||||
return usage_percent < options_.low_encode_usage_threshold_percent;
|
||||
}
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,179 @@
|
|||
/*
|
||||
* Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef VIDEO_ADAPTATION_OVERUSE_FRAME_DETECTOR_H_
|
||||
#define VIDEO_ADAPTATION_OVERUSE_FRAME_DETECTOR_H_
|
||||
|
||||
#include <list>
|
||||
#include <memory>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "api/field_trials_view.h"
|
||||
#include "api/sequence_checker.h"
|
||||
#include "api/task_queue/task_queue_base.h"
|
||||
#include "rtc_base/experiments/field_trial_parser.h"
|
||||
#include "rtc_base/numerics/exp_filter.h"
|
||||
#include "rtc_base/system/no_unique_address.h"
|
||||
#include "rtc_base/task_utils/repeating_task.h"
|
||||
#include "rtc_base/thread_annotations.h"
|
||||
#include "video/video_stream_encoder_observer.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class VideoFrame;
|
||||
|
||||
struct CpuOveruseOptions {
|
||||
// Threshold for triggering overuse.
|
||||
int high_encode_usage_threshold_percent = 85;
|
||||
// Threshold for triggering underuse.
|
||||
// Note that we make the interval 2x+epsilon wide, since libyuv scaling steps
|
||||
// are close to that (when squared). This wide interval makes sure that
|
||||
// scaling up or down does not jump all the way across the interval.
|
||||
int low_encode_usage_threshold_percent =
|
||||
(high_encode_usage_threshold_percent - 1) / 2;
|
||||
// General settings.
|
||||
// The maximum allowed interval between two frames before resetting
|
||||
// estimations.
|
||||
int frame_timeout_interval_ms = 1500;
|
||||
// The minimum number of frames required.
|
||||
int min_frame_samples = 120;
|
||||
|
||||
// The number of initial process times required before
|
||||
// triggering an overuse/underuse.
|
||||
int min_process_count = 3;
|
||||
// The number of consecutive checks above the high threshold before triggering
|
||||
// an overuse.
|
||||
int high_threshold_consecutive_count = 2;
|
||||
// New estimator enabled if this is set non-zero.
|
||||
int filter_time_ms = 0; // Time constant for averaging
|
||||
};
|
||||
|
||||
class OveruseFrameDetectorObserverInterface {
|
||||
public:
|
||||
// Called to signal that we can handle larger or more frequent frames.
|
||||
virtual void AdaptUp() = 0;
|
||||
// Called to signal that the source should reduce the resolution or framerate.
|
||||
virtual void AdaptDown() = 0;
|
||||
|
||||
protected:
|
||||
virtual ~OveruseFrameDetectorObserverInterface() {}
|
||||
};
|
||||
|
||||
// Use to detect system overuse based on the send-side processing time of
|
||||
// incoming frames. All methods must be called on a single task queue but it can
|
||||
// be created and destroyed on an arbitrary thread.
|
||||
// OveruseFrameDetector::StartCheckForOveruse must be called to periodically
|
||||
// check for overuse.
|
||||
class OveruseFrameDetector {
|
||||
public:
|
||||
explicit OveruseFrameDetector(CpuOveruseMetricsObserver* metrics_observer);
|
||||
virtual ~OveruseFrameDetector();
|
||||
|
||||
OveruseFrameDetector(const OveruseFrameDetector&) = delete;
|
||||
OveruseFrameDetector& operator=(const OveruseFrameDetector&) = delete;
|
||||
|
||||
// Start to periodically check for overuse.
|
||||
void StartCheckForOveruse(
|
||||
TaskQueueBase* task_queue_base,
|
||||
const CpuOveruseOptions& options,
|
||||
OveruseFrameDetectorObserverInterface* overuse_observer);
|
||||
|
||||
// StopCheckForOveruse must be called before destruction if
|
||||
// StartCheckForOveruse has been called.
|
||||
void StopCheckForOveruse();
|
||||
|
||||
// Defines the current maximum framerate targeted by the capturer. This is
|
||||
// used to make sure the encode usage percent doesn't drop unduly if the
|
||||
// capturer has quiet periods (for instance caused by screen capturers with
|
||||
// variable capture rate depending on content updates), otherwise we might
|
||||
// experience adaptation toggling.
|
||||
virtual void OnTargetFramerateUpdated(int framerate_fps);
|
||||
|
||||
// Called for each captured frame.
|
||||
void FrameCaptured(const VideoFrame& frame, int64_t time_when_first_seen_us);
|
||||
|
||||
// Called for each sent frame.
|
||||
void FrameSent(uint32_t timestamp,
|
||||
int64_t time_sent_in_us,
|
||||
int64_t capture_time_us,
|
||||
absl::optional<int> encode_duration_us);
|
||||
|
||||
// Interface for cpu load estimation. Intended for internal use only.
|
||||
class ProcessingUsage {
|
||||
public:
|
||||
virtual void Reset() = 0;
|
||||
virtual void SetMaxSampleDiffMs(float diff_ms) = 0;
|
||||
virtual void FrameCaptured(const VideoFrame& frame,
|
||||
int64_t time_when_first_seen_us,
|
||||
int64_t last_capture_time_us) = 0;
|
||||
// Returns encode_time in us, if there's a new measurement.
|
||||
virtual absl::optional<int> FrameSent(
|
||||
// These two argument used by old estimator.
|
||||
uint32_t timestamp,
|
||||
int64_t time_sent_in_us,
|
||||
// And these two by the new estimator.
|
||||
int64_t capture_time_us,
|
||||
absl::optional<int> encode_duration_us) = 0;
|
||||
|
||||
virtual int Value() = 0;
|
||||
virtual ~ProcessingUsage() = default;
|
||||
};
|
||||
|
||||
protected:
|
||||
// Protected for test purposes.
|
||||
void CheckForOveruse(OveruseFrameDetectorObserverInterface* overuse_observer);
|
||||
void SetOptions(const CpuOveruseOptions& options);
|
||||
|
||||
CpuOveruseOptions options_;
|
||||
|
||||
private:
|
||||
void EncodedFrameTimeMeasured(int encode_duration_ms);
|
||||
bool IsOverusing(int encode_usage_percent);
|
||||
bool IsUnderusing(int encode_usage_percent, int64_t time_now);
|
||||
|
||||
bool FrameTimeoutDetected(int64_t now) const;
|
||||
bool FrameSizeChanged(int num_pixels) const;
|
||||
|
||||
void ResetAll(int num_pixels);
|
||||
|
||||
static std::unique_ptr<ProcessingUsage> CreateProcessingUsage(
|
||||
const CpuOveruseOptions& options);
|
||||
|
||||
RTC_NO_UNIQUE_ADDRESS SequenceChecker task_checker_;
|
||||
// Owned by the task queue from where StartCheckForOveruse is called.
|
||||
RepeatingTaskHandle check_overuse_task_ RTC_GUARDED_BY(task_checker_);
|
||||
|
||||
// Stats metrics.
|
||||
CpuOveruseMetricsObserver* const metrics_observer_;
|
||||
absl::optional<int> encode_usage_percent_ RTC_GUARDED_BY(task_checker_);
|
||||
|
||||
int64_t num_process_times_ RTC_GUARDED_BY(task_checker_);
|
||||
|
||||
int64_t last_capture_time_us_ RTC_GUARDED_BY(task_checker_);
|
||||
|
||||
// Number of pixels of last captured frame.
|
||||
int num_pixels_ RTC_GUARDED_BY(task_checker_);
|
||||
int max_framerate_ RTC_GUARDED_BY(task_checker_);
|
||||
int64_t last_overuse_time_ms_ RTC_GUARDED_BY(task_checker_);
|
||||
int checks_above_threshold_ RTC_GUARDED_BY(task_checker_);
|
||||
int num_overuse_detections_ RTC_GUARDED_BY(task_checker_);
|
||||
int64_t last_rampup_time_ms_ RTC_GUARDED_BY(task_checker_);
|
||||
bool in_quick_rampup_ RTC_GUARDED_BY(task_checker_);
|
||||
int current_rampup_delay_ms_ RTC_GUARDED_BY(task_checker_);
|
||||
|
||||
std::unique_ptr<ProcessingUsage> usage_ RTC_PT_GUARDED_BY(task_checker_);
|
||||
|
||||
// If set by field trial, overrides CpuOveruseOptions::filter_time_ms.
|
||||
FieldTrialOptional<TimeDelta> filter_time_constant_{"tau"};
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // VIDEO_ADAPTATION_OVERUSE_FRAME_DETECTOR_H_
|
||||
|
|
@ -0,0 +1,101 @@
|
|||
/*
|
||||
* Copyright 2020 The WebRTC Project Authors. All rights reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "video/adaptation/pixel_limit_resource.h"
|
||||
|
||||
#include "api/sequence_checker.h"
|
||||
#include "api/units/time_delta.h"
|
||||
#include "call/adaptation/video_stream_adapter.h"
|
||||
#include "rtc_base/checks.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace {
|
||||
|
||||
constexpr TimeDelta kResourceUsageCheckIntervalMs = TimeDelta::Seconds(5);
|
||||
|
||||
} // namespace
|
||||
|
||||
// static
|
||||
rtc::scoped_refptr<PixelLimitResource> PixelLimitResource::Create(
|
||||
TaskQueueBase* task_queue,
|
||||
VideoStreamInputStateProvider* input_state_provider) {
|
||||
return rtc::make_ref_counted<PixelLimitResource>(task_queue,
|
||||
input_state_provider);
|
||||
}
|
||||
|
||||
PixelLimitResource::PixelLimitResource(
|
||||
TaskQueueBase* task_queue,
|
||||
VideoStreamInputStateProvider* input_state_provider)
|
||||
: task_queue_(task_queue),
|
||||
input_state_provider_(input_state_provider),
|
||||
max_pixels_(absl::nullopt) {
|
||||
RTC_DCHECK(task_queue_);
|
||||
RTC_DCHECK(input_state_provider_);
|
||||
}
|
||||
|
||||
PixelLimitResource::~PixelLimitResource() {
|
||||
RTC_DCHECK(!listener_);
|
||||
RTC_DCHECK(!repeating_task_.Running());
|
||||
}
|
||||
|
||||
void PixelLimitResource::SetMaxPixels(int max_pixels) {
|
||||
RTC_DCHECK_RUN_ON(task_queue_);
|
||||
max_pixels_ = max_pixels;
|
||||
}
|
||||
|
||||
void PixelLimitResource::SetResourceListener(ResourceListener* listener) {
|
||||
RTC_DCHECK_RUN_ON(task_queue_);
|
||||
listener_ = listener;
|
||||
if (listener_) {
|
||||
repeating_task_.Stop();
|
||||
repeating_task_ = RepeatingTaskHandle::Start(task_queue_, [&] {
|
||||
RTC_DCHECK_RUN_ON(task_queue_);
|
||||
if (!listener_) {
|
||||
// We don't have a listener so resource adaptation must not be running,
|
||||
// try again later.
|
||||
return kResourceUsageCheckIntervalMs;
|
||||
}
|
||||
if (!max_pixels_.has_value()) {
|
||||
// No pixel limit configured yet, try again later.
|
||||
return kResourceUsageCheckIntervalMs;
|
||||
}
|
||||
absl::optional<int> frame_size_pixels =
|
||||
input_state_provider_->InputState().frame_size_pixels();
|
||||
if (!frame_size_pixels.has_value()) {
|
||||
// We haven't observed a frame yet so we don't know if it's going to be
|
||||
// too big or too small, try again later.
|
||||
return kResourceUsageCheckIntervalMs;
|
||||
}
|
||||
int current_pixels = frame_size_pixels.value();
|
||||
int target_pixel_upper_bounds = max_pixels_.value();
|
||||
// To avoid toggling, we allow any resolutions between
|
||||
// `target_pixel_upper_bounds` and video_stream_adapter.h's
|
||||
// GetLowerResolutionThan(). This is the pixels we end up if we adapt down
|
||||
// from `target_pixel_upper_bounds`.
|
||||
int target_pixels_lower_bounds =
|
||||
GetLowerResolutionThan(target_pixel_upper_bounds);
|
||||
if (current_pixels > target_pixel_upper_bounds) {
|
||||
listener_->OnResourceUsageStateMeasured(
|
||||
rtc::scoped_refptr<Resource>(this), ResourceUsageState::kOveruse);
|
||||
} else if (current_pixels < target_pixels_lower_bounds) {
|
||||
listener_->OnResourceUsageStateMeasured(
|
||||
rtc::scoped_refptr<Resource>(this), ResourceUsageState::kUnderuse);
|
||||
}
|
||||
return kResourceUsageCheckIntervalMs;
|
||||
});
|
||||
} else {
|
||||
repeating_task_.Stop();
|
||||
}
|
||||
// The task must be running if we have a listener.
|
||||
RTC_DCHECK(repeating_task_.Running() || !listener_);
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,60 @@
|
|||
/*
|
||||
* Copyright 2020 The WebRTC Project Authors. All rights reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef VIDEO_ADAPTATION_PIXEL_LIMIT_RESOURCE_H_
|
||||
#define VIDEO_ADAPTATION_PIXEL_LIMIT_RESOURCE_H_
|
||||
|
||||
#include <string>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "api/adaptation/resource.h"
|
||||
#include "api/scoped_refptr.h"
|
||||
#include "call/adaptation/video_stream_input_state_provider.h"
|
||||
#include "rtc_base/task_utils/repeating_task.h"
|
||||
#include "rtc_base/thread_annotations.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// An adaptation resource designed to be used in the TestBed. Used to simulate
|
||||
// being CPU limited.
|
||||
//
|
||||
// Periodically reports "overuse" or "underuse" (every 5 seconds) until the
|
||||
// stream is within the bounds specified in terms of a maximum resolution and
|
||||
// one resolution step lower than that (this avoids toggling when this is the
|
||||
// only resource in play). When multiple resources come in to play some amount
|
||||
// of toggling is still possible in edge cases but that is OK for testing
|
||||
// purposes.
|
||||
class PixelLimitResource : public Resource {
|
||||
public:
|
||||
static rtc::scoped_refptr<PixelLimitResource> Create(
|
||||
TaskQueueBase* task_queue,
|
||||
VideoStreamInputStateProvider* input_state_provider);
|
||||
|
||||
PixelLimitResource(TaskQueueBase* task_queue,
|
||||
VideoStreamInputStateProvider* input_state_provider);
|
||||
~PixelLimitResource() override;
|
||||
|
||||
void SetMaxPixels(int max_pixels);
|
||||
|
||||
// Resource implementation.
|
||||
std::string Name() const override { return "PixelLimitResource"; }
|
||||
void SetResourceListener(ResourceListener* listener) override;
|
||||
|
||||
private:
|
||||
TaskQueueBase* const task_queue_;
|
||||
VideoStreamInputStateProvider* const input_state_provider_;
|
||||
absl::optional<int> max_pixels_ RTC_GUARDED_BY(task_queue_);
|
||||
webrtc::ResourceListener* listener_ RTC_GUARDED_BY(task_queue_);
|
||||
RepeatingTaskHandle repeating_task_ RTC_GUARDED_BY(task_queue_);
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // VIDEO_ADAPTATION_PIXEL_LIMIT_RESOURCE_H_
|
||||
|
|
@ -0,0 +1,88 @@
|
|||
/*
|
||||
* Copyright 2020 The WebRTC Project Authors. All rights reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "video/adaptation/quality_rampup_experiment_helper.h"
|
||||
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
|
||||
#include "rtc_base/logging.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
QualityRampUpExperimentHelper::QualityRampUpExperimentHelper(
|
||||
QualityRampUpExperimentListener* experiment_listener,
|
||||
Clock* clock,
|
||||
QualityRampupExperiment experiment)
|
||||
: experiment_listener_(experiment_listener),
|
||||
clock_(clock),
|
||||
quality_rampup_experiment_(std::move(experiment)),
|
||||
cpu_adapted_(false),
|
||||
qp_resolution_adaptations_(0) {
|
||||
RTC_DCHECK(experiment_listener_);
|
||||
RTC_DCHECK(clock_);
|
||||
}
|
||||
|
||||
std::unique_ptr<QualityRampUpExperimentHelper>
|
||||
QualityRampUpExperimentHelper::CreateIfEnabled(
|
||||
QualityRampUpExperimentListener* experiment_listener,
|
||||
Clock* clock) {
|
||||
QualityRampupExperiment experiment = QualityRampupExperiment::ParseSettings();
|
||||
if (experiment.Enabled()) {
|
||||
return std::unique_ptr<QualityRampUpExperimentHelper>(
|
||||
new QualityRampUpExperimentHelper(experiment_listener, clock,
|
||||
experiment));
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void QualityRampUpExperimentHelper::ConfigureQualityRampupExperiment(
|
||||
bool reset,
|
||||
absl::optional<uint32_t> pixels,
|
||||
absl::optional<DataRate> max_bitrate) {
|
||||
if (reset)
|
||||
quality_rampup_experiment_.Reset();
|
||||
if (pixels && max_bitrate)
|
||||
quality_rampup_experiment_.SetMaxBitrate(*pixels, max_bitrate->kbps());
|
||||
}
|
||||
|
||||
void QualityRampUpExperimentHelper::PerformQualityRampupExperiment(
|
||||
rtc::scoped_refptr<QualityScalerResource> quality_scaler_resource,
|
||||
DataRate bandwidth,
|
||||
DataRate encoder_target_bitrate,
|
||||
absl::optional<DataRate> max_bitrate) {
|
||||
if (!quality_scaler_resource->is_started() || !max_bitrate)
|
||||
return;
|
||||
|
||||
int64_t now_ms = clock_->TimeInMilliseconds();
|
||||
|
||||
bool try_quality_rampup = false;
|
||||
if (quality_rampup_experiment_.BwHigh(now_ms, bandwidth.kbps())) {
|
||||
// Verify that encoder is at max bitrate and the QP is low.
|
||||
if (encoder_target_bitrate == *max_bitrate &&
|
||||
quality_scaler_resource->QpFastFilterLow()) {
|
||||
try_quality_rampup = true;
|
||||
}
|
||||
}
|
||||
if (try_quality_rampup && qp_resolution_adaptations_ > 0 && !cpu_adapted_) {
|
||||
experiment_listener_->OnQualityRampUp();
|
||||
}
|
||||
}
|
||||
|
||||
void QualityRampUpExperimentHelper::cpu_adapted(bool cpu_adapted) {
|
||||
cpu_adapted_ = cpu_adapted;
|
||||
}
|
||||
|
||||
void QualityRampUpExperimentHelper::qp_resolution_adaptations(
|
||||
int qp_resolution_adaptations) {
|
||||
qp_resolution_adaptations_ = qp_resolution_adaptations;
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,71 @@
|
|||
/*
|
||||
* Copyright 2020 The WebRTC Project Authors. All rights reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef VIDEO_ADAPTATION_QUALITY_RAMPUP_EXPERIMENT_HELPER_H_
|
||||
#define VIDEO_ADAPTATION_QUALITY_RAMPUP_EXPERIMENT_HELPER_H_
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "api/scoped_refptr.h"
|
||||
#include "api/units/data_rate.h"
|
||||
#include "rtc_base/experiments/quality_rampup_experiment.h"
|
||||
#include "system_wrappers/include/clock.h"
|
||||
#include "video/adaptation/quality_scaler_resource.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class QualityRampUpExperimentListener {
|
||||
public:
|
||||
virtual ~QualityRampUpExperimentListener() = default;
|
||||
virtual void OnQualityRampUp() = 0;
|
||||
};
|
||||
|
||||
// Helper class for orchestrating the WebRTC-Video-QualityRampupSettings
|
||||
// experiment.
|
||||
class QualityRampUpExperimentHelper {
|
||||
public:
|
||||
// Returns a QualityRampUpExperimentHelper if the experiment is enabled,
|
||||
// an nullptr otherwise.
|
||||
static std::unique_ptr<QualityRampUpExperimentHelper> CreateIfEnabled(
|
||||
QualityRampUpExperimentListener* experiment_listener,
|
||||
Clock* clock);
|
||||
|
||||
QualityRampUpExperimentHelper(const QualityRampUpExperimentHelper&) = delete;
|
||||
QualityRampUpExperimentHelper& operator=(
|
||||
const QualityRampUpExperimentHelper&) = delete;
|
||||
|
||||
void cpu_adapted(bool cpu_adapted);
|
||||
void qp_resolution_adaptations(int qp_adaptations);
|
||||
|
||||
void ConfigureQualityRampupExperiment(bool reset,
|
||||
absl::optional<uint32_t> pixels,
|
||||
absl::optional<DataRate> max_bitrate);
|
||||
|
||||
void PerformQualityRampupExperiment(
|
||||
rtc::scoped_refptr<QualityScalerResource> quality_scaler_resource,
|
||||
DataRate bandwidth,
|
||||
DataRate encoder_target_bitrate,
|
||||
absl::optional<DataRate> max_bitrate);
|
||||
|
||||
private:
|
||||
QualityRampUpExperimentHelper(
|
||||
QualityRampUpExperimentListener* experiment_listener,
|
||||
Clock* clock,
|
||||
QualityRampupExperiment experiment);
|
||||
QualityRampUpExperimentListener* const experiment_listener_;
|
||||
Clock* clock_;
|
||||
QualityRampupExperiment quality_rampup_experiment_;
|
||||
bool cpu_adapted_;
|
||||
int qp_resolution_adaptations_;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // VIDEO_ADAPTATION_QUALITY_RAMPUP_EXPERIMENT_HELPER_H_
|
||||
|
|
@ -0,0 +1,101 @@
|
|||
/*
|
||||
* Copyright 2020 The WebRTC Project Authors. All rights reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "video/adaptation/quality_scaler_resource.h"
|
||||
|
||||
#include <utility>
|
||||
|
||||
#include "api/field_trials_view.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/experiments/balanced_degradation_settings.h"
|
||||
#include "rtc_base/time_utils.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// static
|
||||
rtc::scoped_refptr<QualityScalerResource> QualityScalerResource::Create() {
|
||||
return rtc::make_ref_counted<QualityScalerResource>();
|
||||
}
|
||||
|
||||
QualityScalerResource::QualityScalerResource()
|
||||
: VideoStreamEncoderResource("QualityScalerResource"),
|
||||
quality_scaler_(nullptr) {}
|
||||
|
||||
QualityScalerResource::~QualityScalerResource() {
|
||||
RTC_DCHECK(!quality_scaler_);
|
||||
}
|
||||
|
||||
bool QualityScalerResource::is_started() const {
|
||||
RTC_DCHECK_RUN_ON(encoder_queue());
|
||||
return quality_scaler_.get();
|
||||
}
|
||||
|
||||
void QualityScalerResource::StartCheckForOveruse(
|
||||
VideoEncoder::QpThresholds qp_thresholds,
|
||||
const FieldTrialsView& field_trials) {
|
||||
RTC_DCHECK_RUN_ON(encoder_queue());
|
||||
RTC_DCHECK(!is_started());
|
||||
quality_scaler_ = std::make_unique<QualityScaler>(
|
||||
this, std::move(qp_thresholds), field_trials);
|
||||
}
|
||||
|
||||
void QualityScalerResource::StopCheckForOveruse() {
|
||||
RTC_DCHECK_RUN_ON(encoder_queue());
|
||||
RTC_DCHECK(is_started());
|
||||
// Ensure we have no pending callbacks. This makes it safe to destroy the
|
||||
// QualityScaler and even task queues with tasks in-flight.
|
||||
quality_scaler_.reset();
|
||||
}
|
||||
|
||||
void QualityScalerResource::SetQpThresholds(
|
||||
VideoEncoder::QpThresholds qp_thresholds) {
|
||||
RTC_DCHECK_RUN_ON(encoder_queue());
|
||||
RTC_DCHECK(is_started());
|
||||
quality_scaler_->SetQpThresholds(std::move(qp_thresholds));
|
||||
}
|
||||
|
||||
bool QualityScalerResource::QpFastFilterLow() {
|
||||
RTC_DCHECK_RUN_ON(encoder_queue());
|
||||
RTC_DCHECK(is_started());
|
||||
return quality_scaler_->QpFastFilterLow();
|
||||
}
|
||||
|
||||
void QualityScalerResource::OnEncodeCompleted(const EncodedImage& encoded_image,
|
||||
int64_t time_sent_in_us) {
|
||||
RTC_DCHECK_RUN_ON(encoder_queue());
|
||||
if (quality_scaler_ && encoded_image.qp_ >= 0) {
|
||||
quality_scaler_->ReportQp(encoded_image.qp_, time_sent_in_us);
|
||||
}
|
||||
}
|
||||
|
||||
void QualityScalerResource::OnFrameDropped(
|
||||
EncodedImageCallback::DropReason reason) {
|
||||
RTC_DCHECK_RUN_ON(encoder_queue());
|
||||
if (!quality_scaler_)
|
||||
return;
|
||||
switch (reason) {
|
||||
case EncodedImageCallback::DropReason::kDroppedByMediaOptimizations:
|
||||
quality_scaler_->ReportDroppedFrameByMediaOpt();
|
||||
break;
|
||||
case EncodedImageCallback::DropReason::kDroppedByEncoder:
|
||||
quality_scaler_->ReportDroppedFrameByEncoder();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void QualityScalerResource::OnReportQpUsageHigh() {
|
||||
OnResourceUsageStateMeasured(ResourceUsageState::kOveruse);
|
||||
}
|
||||
|
||||
void QualityScalerResource::OnReportQpUsageLow() {
|
||||
OnResourceUsageStateMeasured(ResourceUsageState::kUnderuse);
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,61 @@
|
|||
/*
|
||||
* Copyright 2020 The WebRTC Project Authors. All rights reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef VIDEO_ADAPTATION_QUALITY_SCALER_RESOURCE_H_
|
||||
#define VIDEO_ADAPTATION_QUALITY_SCALER_RESOURCE_H_
|
||||
|
||||
#include <memory>
|
||||
#include <queue>
|
||||
#include <string>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "api/field_trials_view.h"
|
||||
#include "api/scoped_refptr.h"
|
||||
#include "api/video/video_adaptation_reason.h"
|
||||
#include "api/video_codecs/video_encoder.h"
|
||||
#include "call/adaptation/degradation_preference_provider.h"
|
||||
#include "call/adaptation/resource_adaptation_processor_interface.h"
|
||||
#include "modules/video_coding/utility/quality_scaler.h"
|
||||
#include "video/adaptation/video_stream_encoder_resource.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// Handles interaction with the QualityScaler.
|
||||
class QualityScalerResource : public VideoStreamEncoderResource,
|
||||
public QualityScalerQpUsageHandlerInterface {
|
||||
public:
|
||||
static rtc::scoped_refptr<QualityScalerResource> Create();
|
||||
|
||||
QualityScalerResource();
|
||||
~QualityScalerResource() override;
|
||||
|
||||
bool is_started() const;
|
||||
|
||||
void StartCheckForOveruse(VideoEncoder::QpThresholds qp_thresholds,
|
||||
const FieldTrialsView& field_trials);
|
||||
void StopCheckForOveruse();
|
||||
void SetQpThresholds(VideoEncoder::QpThresholds qp_thresholds);
|
||||
bool QpFastFilterLow();
|
||||
void OnEncodeCompleted(const EncodedImage& encoded_image,
|
||||
int64_t time_sent_in_us);
|
||||
void OnFrameDropped(EncodedImageCallback::DropReason reason);
|
||||
|
||||
// QualityScalerQpUsageHandlerInterface implementation.
|
||||
void OnReportQpUsageHigh() override;
|
||||
void OnReportQpUsageLow() override;
|
||||
|
||||
private:
|
||||
std::unique_ptr<QualityScaler> quality_scaler_
|
||||
RTC_GUARDED_BY(encoder_queue());
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // VIDEO_ADAPTATION_QUALITY_SCALER_RESOURCE_H_
|
||||
|
|
@ -0,0 +1,63 @@
|
|||
/*
|
||||
* Copyright 2020 The WebRTC Project Authors. All rights reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "video/adaptation/video_stream_encoder_resource.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <utility>
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
VideoStreamEncoderResource::VideoStreamEncoderResource(std::string name)
|
||||
: lock_(),
|
||||
name_(std::move(name)),
|
||||
encoder_queue_(nullptr),
|
||||
listener_(nullptr) {}
|
||||
|
||||
VideoStreamEncoderResource::~VideoStreamEncoderResource() {
|
||||
RTC_DCHECK(!listener_)
|
||||
<< "There is a listener depending on a VideoStreamEncoderResource being "
|
||||
<< "destroyed.";
|
||||
}
|
||||
|
||||
void VideoStreamEncoderResource::RegisterEncoderTaskQueue(
|
||||
TaskQueueBase* encoder_queue) {
|
||||
RTC_DCHECK(!encoder_queue_);
|
||||
RTC_DCHECK(encoder_queue);
|
||||
encoder_queue_ = encoder_queue;
|
||||
}
|
||||
|
||||
void VideoStreamEncoderResource::SetResourceListener(
|
||||
ResourceListener* listener) {
|
||||
// If you want to change listener you need to unregister the old listener by
|
||||
// setting it to null first.
|
||||
MutexLock crit(&lock_);
|
||||
RTC_DCHECK(!listener_ || !listener) << "A listener is already set";
|
||||
listener_ = listener;
|
||||
}
|
||||
|
||||
std::string VideoStreamEncoderResource::Name() const {
|
||||
return name_;
|
||||
}
|
||||
|
||||
void VideoStreamEncoderResource::OnResourceUsageStateMeasured(
|
||||
ResourceUsageState usage_state) {
|
||||
MutexLock crit(&lock_);
|
||||
if (listener_) {
|
||||
listener_->OnResourceUsageStateMeasured(rtc::scoped_refptr<Resource>(this),
|
||||
usage_state);
|
||||
}
|
||||
}
|
||||
|
||||
TaskQueueBase* VideoStreamEncoderResource::encoder_queue() const {
|
||||
return encoder_queue_;
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,55 @@
|
|||
/*
|
||||
* Copyright 2020 The WebRTC Project Authors. All rights reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef VIDEO_ADAPTATION_VIDEO_STREAM_ENCODER_RESOURCE_H_
|
||||
#define VIDEO_ADAPTATION_VIDEO_STREAM_ENCODER_RESOURCE_H_
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "api/adaptation/resource.h"
|
||||
#include "api/sequence_checker.h"
|
||||
#include "api/task_queue/task_queue_base.h"
|
||||
#include "call/adaptation/adaptation_constraint.h"
|
||||
#include "rtc_base/synchronization/mutex.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class VideoStreamEncoderResource : public Resource {
|
||||
public:
|
||||
~VideoStreamEncoderResource() override;
|
||||
|
||||
// Registering task queues must be performed as part of initialization.
|
||||
void RegisterEncoderTaskQueue(TaskQueueBase* encoder_queue);
|
||||
|
||||
// Resource implementation.
|
||||
std::string Name() const override;
|
||||
void SetResourceListener(ResourceListener* listener) override;
|
||||
|
||||
protected:
|
||||
explicit VideoStreamEncoderResource(std::string name);
|
||||
|
||||
void OnResourceUsageStateMeasured(ResourceUsageState usage_state);
|
||||
|
||||
// The caller is responsible for ensuring the task queue is still valid.
|
||||
TaskQueueBase* encoder_queue() const;
|
||||
|
||||
private:
|
||||
mutable Mutex lock_;
|
||||
const std::string name_;
|
||||
// Treated as const after initialization.
|
||||
TaskQueueBase* encoder_queue_;
|
||||
ResourceListener* listener_ RTC_GUARDED_BY(lock_);
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // VIDEO_ADAPTATION_VIDEO_STREAM_ENCODER_RESOURCE_H_
|
||||
|
|
@ -0,0 +1,857 @@
|
|||
/*
|
||||
* Copyright 2020 The WebRTC Project Authors. All rights reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "video/adaptation/video_stream_encoder_resource_manager.h"
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <cmath>
|
||||
#include <limits>
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
|
||||
#include "absl/algorithm/container.h"
|
||||
#include "absl/base/macros.h"
|
||||
#include "api/adaptation/resource.h"
|
||||
#include "api/field_trials_view.h"
|
||||
#include "api/sequence_checker.h"
|
||||
#include "api/task_queue/task_queue_base.h"
|
||||
#include "api/video/video_adaptation_reason.h"
|
||||
#include "api/video/video_source_interface.h"
|
||||
#include "call/adaptation/video_source_restrictions.h"
|
||||
#include "modules/video_coding/svc/scalability_mode_util.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/numerics/safe_conversions.h"
|
||||
#include "rtc_base/strings/string_builder.h"
|
||||
#include "rtc_base/time_utils.h"
|
||||
#include "rtc_base/trace_event.h"
|
||||
#include "video/adaptation/quality_scaler_resource.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
const int kDefaultInputPixelsWidth = 176;
|
||||
const int kDefaultInputPixelsHeight = 144;
|
||||
|
||||
namespace {
|
||||
|
||||
constexpr const char* kPixelLimitResourceFieldTrialName =
|
||||
"WebRTC-PixelLimitResource";
|
||||
|
||||
bool IsResolutionScalingEnabled(DegradationPreference degradation_preference) {
|
||||
return degradation_preference == DegradationPreference::MAINTAIN_FRAMERATE ||
|
||||
degradation_preference == DegradationPreference::BALANCED;
|
||||
}
|
||||
|
||||
bool IsFramerateScalingEnabled(DegradationPreference degradation_preference) {
|
||||
return degradation_preference == DegradationPreference::MAINTAIN_RESOLUTION ||
|
||||
degradation_preference == DegradationPreference::BALANCED;
|
||||
}
|
||||
|
||||
std::string ToString(VideoAdaptationReason reason) {
|
||||
switch (reason) {
|
||||
case VideoAdaptationReason::kQuality:
|
||||
return "quality";
|
||||
case VideoAdaptationReason::kCpu:
|
||||
return "cpu";
|
||||
}
|
||||
RTC_CHECK_NOTREACHED();
|
||||
}
|
||||
|
||||
std::vector<bool> GetActiveLayersFlags(const VideoCodec& codec) {
|
||||
std::vector<bool> flags;
|
||||
if (codec.codecType == VideoCodecType::kVideoCodecVP9) {
|
||||
flags.resize(codec.VP9().numberOfSpatialLayers);
|
||||
for (size_t i = 0; i < flags.size(); ++i) {
|
||||
flags[i] = codec.spatialLayers[i].active;
|
||||
}
|
||||
} else {
|
||||
flags.resize(codec.numberOfSimulcastStreams);
|
||||
for (size_t i = 0; i < flags.size(); ++i) {
|
||||
flags[i] = codec.simulcastStream[i].active;
|
||||
}
|
||||
}
|
||||
return flags;
|
||||
}
|
||||
|
||||
bool EqualFlags(const std::vector<bool>& a, const std::vector<bool>& b) {
|
||||
if (a.size() != b.size())
|
||||
return false;
|
||||
return std::equal(a.begin(), a.end(), b.begin());
|
||||
}
|
||||
|
||||
absl::optional<DataRate> GetSingleActiveLayerMaxBitrate(
|
||||
const VideoCodec& codec) {
|
||||
int num_active = 0;
|
||||
absl::optional<DataRate> max_bitrate;
|
||||
if (codec.codecType == VideoCodecType::kVideoCodecVP9) {
|
||||
for (int i = 0; i < codec.VP9().numberOfSpatialLayers; ++i) {
|
||||
if (codec.spatialLayers[i].active) {
|
||||
++num_active;
|
||||
max_bitrate =
|
||||
DataRate::KilobitsPerSec(codec.spatialLayers[i].maxBitrate);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for (int i = 0; i < codec.numberOfSimulcastStreams; ++i) {
|
||||
if (codec.simulcastStream[i].active) {
|
||||
++num_active;
|
||||
max_bitrate =
|
||||
DataRate::KilobitsPerSec(codec.simulcastStream[i].maxBitrate);
|
||||
}
|
||||
}
|
||||
}
|
||||
return (num_active > 1) ? absl::nullopt : max_bitrate;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
class VideoStreamEncoderResourceManager::InitialFrameDropper {
|
||||
public:
|
||||
explicit InitialFrameDropper(
|
||||
rtc::scoped_refptr<QualityScalerResource> quality_scaler_resource,
|
||||
const FieldTrialsView& field_trials)
|
||||
: quality_scaler_resource_(quality_scaler_resource),
|
||||
quality_scaler_settings_(field_trials),
|
||||
has_seen_first_bwe_drop_(false),
|
||||
set_start_bitrate_(DataRate::Zero()),
|
||||
set_start_bitrate_time_ms_(0),
|
||||
initial_framedrop_(0),
|
||||
use_bandwidth_allocation_(false),
|
||||
bandwidth_allocation_(DataRate::Zero()),
|
||||
last_input_width_(0),
|
||||
last_input_height_(0),
|
||||
last_stream_configuration_changed_(false) {
|
||||
RTC_DCHECK(quality_scaler_resource_);
|
||||
}
|
||||
|
||||
// Output signal.
|
||||
bool DropInitialFrames() const {
|
||||
return initial_framedrop_ < kMaxInitialFramedrop;
|
||||
}
|
||||
|
||||
absl::optional<uint32_t> single_active_stream_pixels() const {
|
||||
return single_active_stream_pixels_;
|
||||
}
|
||||
|
||||
absl::optional<uint32_t> UseBandwidthAllocationBps() const {
|
||||
return (use_bandwidth_allocation_ &&
|
||||
bandwidth_allocation_ > DataRate::Zero())
|
||||
? absl::optional<uint32_t>(bandwidth_allocation_.bps())
|
||||
: absl::nullopt;
|
||||
}
|
||||
|
||||
bool last_stream_configuration_changed() const {
|
||||
return last_stream_configuration_changed_;
|
||||
}
|
||||
|
||||
// Input signals.
|
||||
void SetStartBitrate(DataRate start_bitrate, int64_t now_ms) {
|
||||
set_start_bitrate_ = start_bitrate;
|
||||
set_start_bitrate_time_ms_ = now_ms;
|
||||
}
|
||||
|
||||
void SetBandwidthAllocation(DataRate bandwidth_allocation) {
|
||||
bandwidth_allocation_ = bandwidth_allocation;
|
||||
}
|
||||
|
||||
void SetTargetBitrate(DataRate target_bitrate, int64_t now_ms) {
|
||||
if (set_start_bitrate_ > DataRate::Zero() && !has_seen_first_bwe_drop_ &&
|
||||
quality_scaler_resource_->is_started() &&
|
||||
quality_scaler_settings_.InitialBitrateIntervalMs() &&
|
||||
quality_scaler_settings_.InitialBitrateFactor()) {
|
||||
int64_t diff_ms = now_ms - set_start_bitrate_time_ms_;
|
||||
if (diff_ms <
|
||||
quality_scaler_settings_.InitialBitrateIntervalMs().value() &&
|
||||
(target_bitrate <
|
||||
(set_start_bitrate_ *
|
||||
quality_scaler_settings_.InitialBitrateFactor().value()))) {
|
||||
RTC_LOG(LS_INFO) << "Reset initial_framedrop_. Start bitrate: "
|
||||
<< set_start_bitrate_.bps()
|
||||
<< ", target bitrate: " << target_bitrate.bps();
|
||||
initial_framedrop_ = 0;
|
||||
has_seen_first_bwe_drop_ = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void OnEncoderSettingsUpdated(
|
||||
const VideoCodec& codec,
|
||||
const VideoAdaptationCounters& adaptation_counters) {
|
||||
last_stream_configuration_changed_ = false;
|
||||
std::vector<bool> active_flags = GetActiveLayersFlags(codec);
|
||||
// Check if the source resolution has changed for the external reasons,
|
||||
// i.e. without any adaptation from WebRTC.
|
||||
const bool source_resolution_changed =
|
||||
(last_input_width_ != codec.width ||
|
||||
last_input_height_ != codec.height) &&
|
||||
adaptation_counters.resolution_adaptations ==
|
||||
last_adaptation_counters_.resolution_adaptations;
|
||||
if (!EqualFlags(active_flags, last_active_flags_) ||
|
||||
source_resolution_changed) {
|
||||
// Streams configuration has changed.
|
||||
last_stream_configuration_changed_ = true;
|
||||
// Initial frame drop must be enabled because BWE might be way too low
|
||||
// for the selected resolution.
|
||||
if (quality_scaler_resource_->is_started()) {
|
||||
RTC_LOG(LS_INFO) << "Resetting initial_framedrop_ due to changed "
|
||||
"stream parameters";
|
||||
initial_framedrop_ = 0;
|
||||
if (single_active_stream_pixels_ &&
|
||||
VideoStreamAdapter::GetSingleActiveLayerPixels(codec) >
|
||||
*single_active_stream_pixels_) {
|
||||
// Resolution increased.
|
||||
use_bandwidth_allocation_ = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
last_adaptation_counters_ = adaptation_counters;
|
||||
last_active_flags_ = active_flags;
|
||||
last_input_width_ = codec.width;
|
||||
last_input_height_ = codec.height;
|
||||
single_active_stream_pixels_ =
|
||||
VideoStreamAdapter::GetSingleActiveLayerPixels(codec);
|
||||
}
|
||||
|
||||
void OnFrameDroppedDueToSize() { ++initial_framedrop_; }
|
||||
|
||||
void Disable() {
|
||||
initial_framedrop_ = kMaxInitialFramedrop;
|
||||
use_bandwidth_allocation_ = false;
|
||||
}
|
||||
|
||||
void OnQualityScalerSettingsUpdated() {
|
||||
if (quality_scaler_resource_->is_started()) {
|
||||
// Restart frame drops due to size.
|
||||
initial_framedrop_ = 0;
|
||||
} else {
|
||||
// Quality scaling disabled so we shouldn't drop initial frames.
|
||||
Disable();
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
// The maximum number of frames to drop at beginning of stream to try and
|
||||
// achieve desired bitrate.
|
||||
static const int kMaxInitialFramedrop = 4;
|
||||
|
||||
const rtc::scoped_refptr<QualityScalerResource> quality_scaler_resource_;
|
||||
const QualityScalerSettings quality_scaler_settings_;
|
||||
bool has_seen_first_bwe_drop_;
|
||||
DataRate set_start_bitrate_;
|
||||
int64_t set_start_bitrate_time_ms_;
|
||||
// Counts how many frames we've dropped in the initial framedrop phase.
|
||||
int initial_framedrop_;
|
||||
absl::optional<uint32_t> single_active_stream_pixels_;
|
||||
bool use_bandwidth_allocation_;
|
||||
DataRate bandwidth_allocation_;
|
||||
|
||||
std::vector<bool> last_active_flags_;
|
||||
VideoAdaptationCounters last_adaptation_counters_;
|
||||
int last_input_width_;
|
||||
int last_input_height_;
|
||||
bool last_stream_configuration_changed_;
|
||||
};
|
||||
|
||||
VideoStreamEncoderResourceManager::VideoStreamEncoderResourceManager(
|
||||
VideoStreamInputStateProvider* input_state_provider,
|
||||
VideoStreamEncoderObserver* encoder_stats_observer,
|
||||
Clock* clock,
|
||||
bool experiment_cpu_load_estimator,
|
||||
std::unique_ptr<OveruseFrameDetector> overuse_detector,
|
||||
DegradationPreferenceProvider* degradation_preference_provider,
|
||||
const FieldTrialsView& field_trials)
|
||||
: field_trials_(field_trials),
|
||||
degradation_preference_provider_(degradation_preference_provider),
|
||||
bitrate_constraint_(std::make_unique<BitrateConstraint>()),
|
||||
balanced_constraint_(
|
||||
std::make_unique<BalancedConstraint>(degradation_preference_provider_,
|
||||
field_trials)),
|
||||
encode_usage_resource_(
|
||||
EncodeUsageResource::Create(std::move(overuse_detector))),
|
||||
quality_scaler_resource_(QualityScalerResource::Create()),
|
||||
pixel_limit_resource_(nullptr),
|
||||
bandwidth_quality_scaler_resource_(
|
||||
BandwidthQualityScalerResource::Create()),
|
||||
encoder_queue_(nullptr),
|
||||
input_state_provider_(input_state_provider),
|
||||
adaptation_processor_(nullptr),
|
||||
encoder_stats_observer_(encoder_stats_observer),
|
||||
degradation_preference_(DegradationPreference::DISABLED),
|
||||
video_source_restrictions_(),
|
||||
balanced_settings_(field_trials),
|
||||
clock_(clock),
|
||||
experiment_cpu_load_estimator_(experiment_cpu_load_estimator),
|
||||
initial_frame_dropper_(
|
||||
std::make_unique<InitialFrameDropper>(quality_scaler_resource_,
|
||||
field_trials)),
|
||||
quality_scaling_experiment_enabled_(
|
||||
QualityScalingExperiment::Enabled(field_trials_)),
|
||||
pixel_limit_resource_experiment_enabled_(
|
||||
field_trials.IsEnabled(kPixelLimitResourceFieldTrialName)),
|
||||
encoder_target_bitrate_bps_(absl::nullopt),
|
||||
quality_rampup_experiment_(
|
||||
QualityRampUpExperimentHelper::CreateIfEnabled(this, clock_)),
|
||||
encoder_settings_(absl::nullopt) {
|
||||
TRACE_EVENT0(
|
||||
"webrtc",
|
||||
"VideoStreamEncoderResourceManager::VideoStreamEncoderResourceManager");
|
||||
RTC_CHECK(degradation_preference_provider_);
|
||||
RTC_CHECK(encoder_stats_observer_);
|
||||
}
|
||||
|
||||
VideoStreamEncoderResourceManager::~VideoStreamEncoderResourceManager() =
|
||||
default;
|
||||
|
||||
void VideoStreamEncoderResourceManager::Initialize(
|
||||
TaskQueueBase* encoder_queue) {
|
||||
RTC_DCHECK(!encoder_queue_);
|
||||
RTC_DCHECK(encoder_queue);
|
||||
encoder_queue_ = encoder_queue;
|
||||
encode_usage_resource_->RegisterEncoderTaskQueue(encoder_queue_);
|
||||
quality_scaler_resource_->RegisterEncoderTaskQueue(encoder_queue_);
|
||||
bandwidth_quality_scaler_resource_->RegisterEncoderTaskQueue(encoder_queue_);
|
||||
}
|
||||
|
||||
void VideoStreamEncoderResourceManager::SetAdaptationProcessor(
|
||||
ResourceAdaptationProcessorInterface* adaptation_processor,
|
||||
VideoStreamAdapter* stream_adapter) {
|
||||
RTC_DCHECK_RUN_ON(encoder_queue_);
|
||||
adaptation_processor_ = adaptation_processor;
|
||||
stream_adapter_ = stream_adapter;
|
||||
}
|
||||
|
||||
void VideoStreamEncoderResourceManager::SetDegradationPreferences(
|
||||
DegradationPreference degradation_preference) {
|
||||
RTC_DCHECK_RUN_ON(encoder_queue_);
|
||||
degradation_preference_ = degradation_preference;
|
||||
UpdateStatsAdaptationSettings();
|
||||
}
|
||||
|
||||
DegradationPreference
|
||||
VideoStreamEncoderResourceManager::degradation_preference() const {
|
||||
RTC_DCHECK_RUN_ON(encoder_queue_);
|
||||
return degradation_preference_;
|
||||
}
|
||||
|
||||
void VideoStreamEncoderResourceManager::ConfigureEncodeUsageResource() {
|
||||
RTC_DCHECK_RUN_ON(encoder_queue_);
|
||||
RTC_DCHECK(encoder_settings_.has_value());
|
||||
if (encode_usage_resource_->is_started()) {
|
||||
encode_usage_resource_->StopCheckForOveruse();
|
||||
} else {
|
||||
// If the resource has not yet started then it needs to be added.
|
||||
AddResource(encode_usage_resource_, VideoAdaptationReason::kCpu);
|
||||
}
|
||||
encode_usage_resource_->StartCheckForOveruse(GetCpuOveruseOptions());
|
||||
}
|
||||
|
||||
void VideoStreamEncoderResourceManager::MaybeInitializePixelLimitResource() {
|
||||
RTC_DCHECK_RUN_ON(encoder_queue_);
|
||||
RTC_DCHECK(adaptation_processor_);
|
||||
RTC_DCHECK(!pixel_limit_resource_);
|
||||
if (!pixel_limit_resource_experiment_enabled_) {
|
||||
// The field trial is not running.
|
||||
return;
|
||||
}
|
||||
int max_pixels = 0;
|
||||
std::string pixel_limit_field_trial =
|
||||
field_trials_.Lookup(kPixelLimitResourceFieldTrialName);
|
||||
if (sscanf(pixel_limit_field_trial.c_str(), "Enabled-%d", &max_pixels) != 1) {
|
||||
RTC_LOG(LS_ERROR) << "Couldn't parse " << kPixelLimitResourceFieldTrialName
|
||||
<< " trial config: " << pixel_limit_field_trial;
|
||||
return;
|
||||
}
|
||||
RTC_LOG(LS_INFO) << "Running field trial "
|
||||
<< kPixelLimitResourceFieldTrialName << " configured to "
|
||||
<< max_pixels << " max pixels";
|
||||
// Configure the specified max pixels from the field trial. The pixel limit
|
||||
// resource is active for the lifetme of the stream (until
|
||||
// StopManagedResources() is called).
|
||||
pixel_limit_resource_ =
|
||||
PixelLimitResource::Create(encoder_queue_, input_state_provider_);
|
||||
pixel_limit_resource_->SetMaxPixels(max_pixels);
|
||||
AddResource(pixel_limit_resource_, VideoAdaptationReason::kCpu);
|
||||
}
|
||||
|
||||
void VideoStreamEncoderResourceManager::StopManagedResources() {
|
||||
RTC_DCHECK_RUN_ON(encoder_queue_);
|
||||
RTC_DCHECK(adaptation_processor_);
|
||||
if (encode_usage_resource_->is_started()) {
|
||||
encode_usage_resource_->StopCheckForOveruse();
|
||||
RemoveResource(encode_usage_resource_);
|
||||
}
|
||||
if (quality_scaler_resource_->is_started()) {
|
||||
quality_scaler_resource_->StopCheckForOveruse();
|
||||
RemoveResource(quality_scaler_resource_);
|
||||
}
|
||||
if (pixel_limit_resource_) {
|
||||
RemoveResource(pixel_limit_resource_);
|
||||
pixel_limit_resource_ = nullptr;
|
||||
}
|
||||
if (bandwidth_quality_scaler_resource_->is_started()) {
|
||||
bandwidth_quality_scaler_resource_->StopCheckForOveruse();
|
||||
RemoveResource(bandwidth_quality_scaler_resource_);
|
||||
}
|
||||
}
|
||||
|
||||
void VideoStreamEncoderResourceManager::AddResource(
|
||||
rtc::scoped_refptr<Resource> resource,
|
||||
VideoAdaptationReason reason) {
|
||||
RTC_DCHECK_RUN_ON(encoder_queue_);
|
||||
RTC_DCHECK(resource);
|
||||
bool inserted;
|
||||
std::tie(std::ignore, inserted) = resources_.emplace(resource, reason);
|
||||
RTC_DCHECK(inserted) << "Resource " << resource->Name()
|
||||
<< " already was inserted";
|
||||
adaptation_processor_->AddResource(resource);
|
||||
}
|
||||
|
||||
void VideoStreamEncoderResourceManager::RemoveResource(
|
||||
rtc::scoped_refptr<Resource> resource) {
|
||||
{
|
||||
RTC_DCHECK_RUN_ON(encoder_queue_);
|
||||
RTC_DCHECK(resource);
|
||||
const auto& it = resources_.find(resource);
|
||||
RTC_DCHECK(it != resources_.end())
|
||||
<< "Resource \"" << resource->Name() << "\" not found.";
|
||||
resources_.erase(it);
|
||||
}
|
||||
adaptation_processor_->RemoveResource(resource);
|
||||
}
|
||||
|
||||
std::vector<AdaptationConstraint*>
|
||||
VideoStreamEncoderResourceManager::AdaptationConstraints() const {
|
||||
RTC_DCHECK_RUN_ON(encoder_queue_);
|
||||
return {bitrate_constraint_.get(), balanced_constraint_.get()};
|
||||
}
|
||||
|
||||
void VideoStreamEncoderResourceManager::SetEncoderSettings(
|
||||
EncoderSettings encoder_settings) {
|
||||
RTC_DCHECK_RUN_ON(encoder_queue_);
|
||||
encoder_settings_ = std::move(encoder_settings);
|
||||
bitrate_constraint_->OnEncoderSettingsUpdated(encoder_settings_);
|
||||
initial_frame_dropper_->OnEncoderSettingsUpdated(
|
||||
encoder_settings_->video_codec(), current_adaptation_counters_);
|
||||
MaybeUpdateTargetFrameRate();
|
||||
if (quality_rampup_experiment_) {
|
||||
quality_rampup_experiment_->ConfigureQualityRampupExperiment(
|
||||
initial_frame_dropper_->last_stream_configuration_changed(),
|
||||
initial_frame_dropper_->single_active_stream_pixels(),
|
||||
GetSingleActiveLayerMaxBitrate(encoder_settings_->video_codec()));
|
||||
}
|
||||
}
|
||||
|
||||
void VideoStreamEncoderResourceManager::SetStartBitrate(
|
||||
DataRate start_bitrate) {
|
||||
RTC_DCHECK_RUN_ON(encoder_queue_);
|
||||
if (!start_bitrate.IsZero()) {
|
||||
encoder_target_bitrate_bps_ = start_bitrate.bps();
|
||||
bitrate_constraint_->OnEncoderTargetBitrateUpdated(
|
||||
encoder_target_bitrate_bps_);
|
||||
balanced_constraint_->OnEncoderTargetBitrateUpdated(
|
||||
encoder_target_bitrate_bps_);
|
||||
}
|
||||
initial_frame_dropper_->SetStartBitrate(start_bitrate,
|
||||
clock_->TimeInMicroseconds());
|
||||
}
|
||||
|
||||
void VideoStreamEncoderResourceManager::SetTargetBitrate(
|
||||
DataRate target_bitrate) {
|
||||
RTC_DCHECK_RUN_ON(encoder_queue_);
|
||||
if (!target_bitrate.IsZero()) {
|
||||
encoder_target_bitrate_bps_ = target_bitrate.bps();
|
||||
bitrate_constraint_->OnEncoderTargetBitrateUpdated(
|
||||
encoder_target_bitrate_bps_);
|
||||
balanced_constraint_->OnEncoderTargetBitrateUpdated(
|
||||
encoder_target_bitrate_bps_);
|
||||
}
|
||||
initial_frame_dropper_->SetTargetBitrate(target_bitrate,
|
||||
clock_->TimeInMilliseconds());
|
||||
}
|
||||
|
||||
void VideoStreamEncoderResourceManager::SetEncoderRates(
|
||||
const VideoEncoder::RateControlParameters& encoder_rates) {
|
||||
RTC_DCHECK_RUN_ON(encoder_queue_);
|
||||
encoder_rates_ = encoder_rates;
|
||||
initial_frame_dropper_->SetBandwidthAllocation(
|
||||
encoder_rates.bandwidth_allocation);
|
||||
}
|
||||
|
||||
void VideoStreamEncoderResourceManager::OnFrameDroppedDueToSize() {
|
||||
RTC_DCHECK_RUN_ON(encoder_queue_);
|
||||
initial_frame_dropper_->OnFrameDroppedDueToSize();
|
||||
Adaptation reduce_resolution = stream_adapter_->GetAdaptDownResolution();
|
||||
if (reduce_resolution.status() == Adaptation::Status::kValid) {
|
||||
stream_adapter_->ApplyAdaptation(reduce_resolution,
|
||||
quality_scaler_resource_);
|
||||
}
|
||||
}
|
||||
|
||||
void VideoStreamEncoderResourceManager::OnEncodeStarted(
|
||||
const VideoFrame& cropped_frame,
|
||||
int64_t time_when_first_seen_us) {
|
||||
RTC_DCHECK_RUN_ON(encoder_queue_);
|
||||
encode_usage_resource_->OnEncodeStarted(cropped_frame,
|
||||
time_when_first_seen_us);
|
||||
}
|
||||
|
||||
void VideoStreamEncoderResourceManager::OnEncodeCompleted(
|
||||
const EncodedImage& encoded_image,
|
||||
int64_t time_sent_in_us,
|
||||
absl::optional<int> encode_duration_us,
|
||||
DataSize frame_size) {
|
||||
RTC_DCHECK_RUN_ON(encoder_queue_);
|
||||
// Inform `encode_usage_resource_` of the encode completed event.
|
||||
uint32_t timestamp = encoded_image.RtpTimestamp();
|
||||
int64_t capture_time_us =
|
||||
encoded_image.capture_time_ms_ * rtc::kNumMicrosecsPerMillisec;
|
||||
encode_usage_resource_->OnEncodeCompleted(
|
||||
timestamp, time_sent_in_us, capture_time_us, encode_duration_us);
|
||||
quality_scaler_resource_->OnEncodeCompleted(encoded_image, time_sent_in_us);
|
||||
bandwidth_quality_scaler_resource_->OnEncodeCompleted(
|
||||
encoded_image, time_sent_in_us, frame_size.bytes());
|
||||
}
|
||||
|
||||
void VideoStreamEncoderResourceManager::OnFrameDropped(
|
||||
EncodedImageCallback::DropReason reason) {
|
||||
RTC_DCHECK_RUN_ON(encoder_queue_);
|
||||
quality_scaler_resource_->OnFrameDropped(reason);
|
||||
}
|
||||
|
||||
bool VideoStreamEncoderResourceManager::DropInitialFrames() const {
|
||||
RTC_DCHECK_RUN_ON(encoder_queue_);
|
||||
return initial_frame_dropper_->DropInitialFrames();
|
||||
}
|
||||
|
||||
absl::optional<uint32_t>
|
||||
VideoStreamEncoderResourceManager::SingleActiveStreamPixels() const {
|
||||
RTC_DCHECK_RUN_ON(encoder_queue_);
|
||||
return initial_frame_dropper_->single_active_stream_pixels();
|
||||
}
|
||||
|
||||
absl::optional<uint32_t>
|
||||
VideoStreamEncoderResourceManager::UseBandwidthAllocationBps() const {
|
||||
RTC_DCHECK_RUN_ON(encoder_queue_);
|
||||
return initial_frame_dropper_->UseBandwidthAllocationBps();
|
||||
}
|
||||
|
||||
void VideoStreamEncoderResourceManager::OnMaybeEncodeFrame() {
|
||||
RTC_DCHECK_RUN_ON(encoder_queue_);
|
||||
initial_frame_dropper_->Disable();
|
||||
if (quality_rampup_experiment_ && quality_scaler_resource_->is_started()) {
|
||||
DataRate bandwidth = encoder_rates_.has_value()
|
||||
? encoder_rates_->bandwidth_allocation
|
||||
: DataRate::Zero();
|
||||
quality_rampup_experiment_->PerformQualityRampupExperiment(
|
||||
quality_scaler_resource_, bandwidth,
|
||||
DataRate::BitsPerSec(encoder_target_bitrate_bps_.value_or(0)),
|
||||
GetSingleActiveLayerMaxBitrate(encoder_settings_->video_codec()));
|
||||
}
|
||||
}
|
||||
|
||||
void VideoStreamEncoderResourceManager::UpdateQualityScalerSettings(
|
||||
absl::optional<VideoEncoder::QpThresholds> qp_thresholds) {
|
||||
RTC_DCHECK_RUN_ON(encoder_queue_);
|
||||
if (qp_thresholds.has_value()) {
|
||||
if (quality_scaler_resource_->is_started()) {
|
||||
quality_scaler_resource_->SetQpThresholds(qp_thresholds.value());
|
||||
} else {
|
||||
quality_scaler_resource_->StartCheckForOveruse(qp_thresholds.value(),
|
||||
field_trials_);
|
||||
AddResource(quality_scaler_resource_, VideoAdaptationReason::kQuality);
|
||||
}
|
||||
} else if (quality_scaler_resource_->is_started()) {
|
||||
quality_scaler_resource_->StopCheckForOveruse();
|
||||
RemoveResource(quality_scaler_resource_);
|
||||
}
|
||||
initial_frame_dropper_->OnQualityScalerSettingsUpdated();
|
||||
}
|
||||
|
||||
void VideoStreamEncoderResourceManager::UpdateBandwidthQualityScalerSettings(
|
||||
bool bandwidth_quality_scaling_allowed,
|
||||
const std::vector<VideoEncoder::ResolutionBitrateLimits>&
|
||||
resolution_bitrate_limits) {
|
||||
RTC_DCHECK_RUN_ON(encoder_queue_);
|
||||
|
||||
if (!bandwidth_quality_scaling_allowed) {
|
||||
if (bandwidth_quality_scaler_resource_->is_started()) {
|
||||
bandwidth_quality_scaler_resource_->StopCheckForOveruse();
|
||||
RemoveResource(bandwidth_quality_scaler_resource_);
|
||||
}
|
||||
} else {
|
||||
if (!bandwidth_quality_scaler_resource_->is_started()) {
|
||||
// Before executing "StartCheckForOveruse",we must execute "AddResource"
|
||||
// firstly,because it can make the listener valid.
|
||||
AddResource(bandwidth_quality_scaler_resource_,
|
||||
webrtc::VideoAdaptationReason::kQuality);
|
||||
bandwidth_quality_scaler_resource_->StartCheckForOveruse(
|
||||
resolution_bitrate_limits);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void VideoStreamEncoderResourceManager::ConfigureQualityScaler(
|
||||
const VideoEncoder::EncoderInfo& encoder_info) {
|
||||
RTC_DCHECK_RUN_ON(encoder_queue_);
|
||||
const auto scaling_settings = encoder_info.scaling_settings;
|
||||
const bool quality_scaling_allowed =
|
||||
IsResolutionScalingEnabled(degradation_preference_) &&
|
||||
(scaling_settings.thresholds.has_value() ||
|
||||
(encoder_settings_.has_value() &&
|
||||
encoder_settings_->encoder_config().is_quality_scaling_allowed)) &&
|
||||
encoder_info.is_qp_trusted.value_or(true);
|
||||
|
||||
// TODO(https://crbug.com/webrtc/11222): Should this move to
|
||||
// QualityScalerResource?
|
||||
if (quality_scaling_allowed) {
|
||||
if (!quality_scaler_resource_->is_started()) {
|
||||
// Quality scaler has not already been configured.
|
||||
|
||||
// Use experimental thresholds if available.
|
||||
absl::optional<VideoEncoder::QpThresholds> experimental_thresholds;
|
||||
if (quality_scaling_experiment_enabled_) {
|
||||
experimental_thresholds = QualityScalingExperiment::GetQpThresholds(
|
||||
GetVideoCodecTypeOrGeneric(encoder_settings_), field_trials_);
|
||||
}
|
||||
UpdateQualityScalerSettings(experimental_thresholds.has_value()
|
||||
? experimental_thresholds
|
||||
: scaling_settings.thresholds);
|
||||
}
|
||||
} else {
|
||||
UpdateQualityScalerSettings(absl::nullopt);
|
||||
}
|
||||
|
||||
// Set the qp-thresholds to the balanced settings if balanced mode.
|
||||
if (degradation_preference_ == DegradationPreference::BALANCED &&
|
||||
quality_scaler_resource_->is_started()) {
|
||||
absl::optional<VideoEncoder::QpThresholds> thresholds =
|
||||
balanced_settings_.GetQpThresholds(
|
||||
GetVideoCodecTypeOrGeneric(encoder_settings_),
|
||||
LastFrameSizeOrDefault());
|
||||
if (thresholds) {
|
||||
quality_scaler_resource_->SetQpThresholds(*thresholds);
|
||||
}
|
||||
}
|
||||
UpdateStatsAdaptationSettings();
|
||||
}
|
||||
|
||||
void VideoStreamEncoderResourceManager::ConfigureBandwidthQualityScaler(
|
||||
const VideoEncoder::EncoderInfo& encoder_info) {
|
||||
RTC_DCHECK_RUN_ON(encoder_queue_);
|
||||
const bool bandwidth_quality_scaling_allowed =
|
||||
IsResolutionScalingEnabled(degradation_preference_) &&
|
||||
(encoder_settings_.has_value() &&
|
||||
encoder_settings_->encoder_config().is_quality_scaling_allowed) &&
|
||||
!encoder_info.is_qp_trusted.value_or(true);
|
||||
|
||||
UpdateBandwidthQualityScalerSettings(bandwidth_quality_scaling_allowed,
|
||||
encoder_info.resolution_bitrate_limits);
|
||||
UpdateStatsAdaptationSettings();
|
||||
}
|
||||
|
||||
VideoAdaptationReason VideoStreamEncoderResourceManager::GetReasonFromResource(
|
||||
rtc::scoped_refptr<Resource> resource) const {
|
||||
RTC_DCHECK_RUN_ON(encoder_queue_);
|
||||
const auto& registered_resource = resources_.find(resource);
|
||||
RTC_DCHECK(registered_resource != resources_.end())
|
||||
<< resource->Name() << " not found.";
|
||||
return registered_resource->second;
|
||||
}
|
||||
|
||||
// TODO(pbos): Lower these thresholds (to closer to 100%) when we handle
|
||||
// pipelining encoders better (multiple input frames before something comes
|
||||
// out). This should effectively turn off CPU adaptations for systems that
|
||||
// remotely cope with the load right now.
|
||||
CpuOveruseOptions VideoStreamEncoderResourceManager::GetCpuOveruseOptions()
|
||||
const {
|
||||
RTC_DCHECK_RUN_ON(encoder_queue_);
|
||||
// This is already ensured by the only caller of this method:
|
||||
// StartResourceAdaptation().
|
||||
RTC_DCHECK(encoder_settings_.has_value());
|
||||
CpuOveruseOptions options;
|
||||
// Hardware accelerated encoders are assumed to be pipelined; give them
|
||||
// additional overuse time.
|
||||
if (encoder_settings_->encoder_info().is_hardware_accelerated) {
|
||||
options.low_encode_usage_threshold_percent = 150;
|
||||
options.high_encode_usage_threshold_percent = 200;
|
||||
}
|
||||
if (experiment_cpu_load_estimator_) {
|
||||
options.filter_time_ms = 5 * rtc::kNumMillisecsPerSec;
|
||||
}
|
||||
return options;
|
||||
}
|
||||
|
||||
int VideoStreamEncoderResourceManager::LastFrameSizeOrDefault() const {
|
||||
RTC_DCHECK_RUN_ON(encoder_queue_);
|
||||
return input_state_provider_->InputState()
|
||||
.single_active_stream_pixels()
|
||||
.value_or(
|
||||
input_state_provider_->InputState().frame_size_pixels().value_or(
|
||||
kDefaultInputPixelsWidth * kDefaultInputPixelsHeight));
|
||||
}
|
||||
|
||||
void VideoStreamEncoderResourceManager::OnVideoSourceRestrictionsUpdated(
|
||||
VideoSourceRestrictions restrictions,
|
||||
const VideoAdaptationCounters& adaptation_counters,
|
||||
rtc::scoped_refptr<Resource> reason,
|
||||
const VideoSourceRestrictions& unfiltered_restrictions) {
|
||||
RTC_DCHECK_RUN_ON(encoder_queue_);
|
||||
current_adaptation_counters_ = adaptation_counters;
|
||||
|
||||
// TODO(bugs.webrtc.org/11553) Remove reason parameter and add reset callback.
|
||||
if (!reason && adaptation_counters.Total() == 0) {
|
||||
// Adaptation was manually reset - clear the per-reason counters too.
|
||||
encoder_stats_observer_->ClearAdaptationStats();
|
||||
}
|
||||
|
||||
video_source_restrictions_ = FilterRestrictionsByDegradationPreference(
|
||||
restrictions, degradation_preference_);
|
||||
MaybeUpdateTargetFrameRate();
|
||||
}
|
||||
|
||||
void VideoStreamEncoderResourceManager::OnResourceLimitationChanged(
|
||||
rtc::scoped_refptr<Resource> resource,
|
||||
const std::map<rtc::scoped_refptr<Resource>, VideoAdaptationCounters>&
|
||||
resource_limitations) {
|
||||
RTC_DCHECK_RUN_ON(encoder_queue_);
|
||||
if (!resource) {
|
||||
encoder_stats_observer_->ClearAdaptationStats();
|
||||
return;
|
||||
}
|
||||
|
||||
std::map<VideoAdaptationReason, VideoAdaptationCounters> limitations;
|
||||
for (auto& resource_counter : resource_limitations) {
|
||||
std::map<VideoAdaptationReason, VideoAdaptationCounters>::iterator it;
|
||||
bool inserted;
|
||||
std::tie(it, inserted) = limitations.emplace(
|
||||
GetReasonFromResource(resource_counter.first), resource_counter.second);
|
||||
if (!inserted && it->second.Total() < resource_counter.second.Total()) {
|
||||
it->second = resource_counter.second;
|
||||
}
|
||||
}
|
||||
|
||||
VideoAdaptationReason adaptation_reason = GetReasonFromResource(resource);
|
||||
encoder_stats_observer_->OnAdaptationChanged(
|
||||
adaptation_reason, limitations[VideoAdaptationReason::kCpu],
|
||||
limitations[VideoAdaptationReason::kQuality]);
|
||||
|
||||
if (quality_rampup_experiment_) {
|
||||
bool cpu_limited = limitations.at(VideoAdaptationReason::kCpu).Total() > 0;
|
||||
auto qp_resolution_adaptations =
|
||||
limitations.at(VideoAdaptationReason::kQuality).resolution_adaptations;
|
||||
quality_rampup_experiment_->cpu_adapted(cpu_limited);
|
||||
quality_rampup_experiment_->qp_resolution_adaptations(
|
||||
qp_resolution_adaptations);
|
||||
}
|
||||
|
||||
RTC_LOG(LS_INFO) << ActiveCountsToString(limitations);
|
||||
}
|
||||
|
||||
void VideoStreamEncoderResourceManager::MaybeUpdateTargetFrameRate() {
|
||||
RTC_DCHECK_RUN_ON(encoder_queue_);
|
||||
absl::optional<double> codec_max_frame_rate =
|
||||
encoder_settings_.has_value()
|
||||
? absl::optional<double>(
|
||||
encoder_settings_->video_codec().maxFramerate)
|
||||
: absl::nullopt;
|
||||
// The current target framerate is the maximum frame rate as specified by
|
||||
// the current codec configuration or any limit imposed by the adaptation
|
||||
// module. This is used to make sure overuse detection doesn't needlessly
|
||||
// trigger in low and/or variable framerate scenarios.
|
||||
absl::optional<double> target_frame_rate =
|
||||
video_source_restrictions_.max_frame_rate();
|
||||
if (!target_frame_rate.has_value() ||
|
||||
(codec_max_frame_rate.has_value() &&
|
||||
codec_max_frame_rate.value() < target_frame_rate.value())) {
|
||||
target_frame_rate = codec_max_frame_rate;
|
||||
}
|
||||
encode_usage_resource_->SetTargetFrameRate(target_frame_rate);
|
||||
}
|
||||
|
||||
void VideoStreamEncoderResourceManager::UpdateStatsAdaptationSettings() const {
|
||||
RTC_DCHECK_RUN_ON(encoder_queue_);
|
||||
VideoStreamEncoderObserver::AdaptationSettings cpu_settings(
|
||||
IsResolutionScalingEnabled(degradation_preference_),
|
||||
IsFramerateScalingEnabled(degradation_preference_));
|
||||
|
||||
VideoStreamEncoderObserver::AdaptationSettings quality_settings =
|
||||
(quality_scaler_resource_->is_started() ||
|
||||
bandwidth_quality_scaler_resource_->is_started())
|
||||
? cpu_settings
|
||||
: VideoStreamEncoderObserver::AdaptationSettings();
|
||||
encoder_stats_observer_->UpdateAdaptationSettings(cpu_settings,
|
||||
quality_settings);
|
||||
}
|
||||
|
||||
// static
|
||||
std::string VideoStreamEncoderResourceManager::ActiveCountsToString(
|
||||
const std::map<VideoAdaptationReason, VideoAdaptationCounters>&
|
||||
active_counts) {
|
||||
rtc::StringBuilder ss;
|
||||
|
||||
ss << "Downgrade counts: fps: {";
|
||||
for (auto& reason_count : active_counts) {
|
||||
ss << ToString(reason_count.first) << ":";
|
||||
ss << reason_count.second.fps_adaptations;
|
||||
}
|
||||
ss << "}, resolution {";
|
||||
for (auto& reason_count : active_counts) {
|
||||
ss << ToString(reason_count.first) << ":";
|
||||
ss << reason_count.second.resolution_adaptations;
|
||||
}
|
||||
ss << "}";
|
||||
|
||||
return ss.Release();
|
||||
}
|
||||
|
||||
void VideoStreamEncoderResourceManager::OnQualityRampUp() {
|
||||
RTC_DCHECK_RUN_ON(encoder_queue_);
|
||||
stream_adapter_->ClearRestrictions();
|
||||
quality_rampup_experiment_.reset();
|
||||
}
|
||||
|
||||
bool VideoStreamEncoderResourceManager::IsSimulcastOrMultipleSpatialLayers(
|
||||
const VideoEncoderConfig& encoder_config,
|
||||
const VideoCodec& video_codec) {
|
||||
const std::vector<VideoStream>& simulcast_layers =
|
||||
encoder_config.simulcast_layers;
|
||||
if (simulcast_layers.empty()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
absl::optional<int> num_spatial_layers;
|
||||
if (simulcast_layers[0].scalability_mode.has_value() &&
|
||||
video_codec.numberOfSimulcastStreams == 1) {
|
||||
num_spatial_layers = ScalabilityModeToNumSpatialLayers(
|
||||
*simulcast_layers[0].scalability_mode);
|
||||
}
|
||||
|
||||
if (simulcast_layers.size() == 1) {
|
||||
// Check if multiple spatial layers are used.
|
||||
return num_spatial_layers && *num_spatial_layers > 1;
|
||||
}
|
||||
|
||||
bool svc_with_one_spatial_layer =
|
||||
num_spatial_layers && *num_spatial_layers == 1;
|
||||
if (simulcast_layers[0].active && !svc_with_one_spatial_layer) {
|
||||
// We can't distinguish between simulcast and singlecast when only the
|
||||
// lowest spatial layer is active. Treat this case as simulcast.
|
||||
return true;
|
||||
}
|
||||
|
||||
int num_active_layers =
|
||||
std::count_if(simulcast_layers.begin(), simulcast_layers.end(),
|
||||
[](const VideoStream& layer) { return layer.active; });
|
||||
return num_active_layers > 1;
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,239 @@
|
|||
/*
|
||||
* Copyright 2020 The WebRTC Project Authors. All rights reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef VIDEO_ADAPTATION_VIDEO_STREAM_ENCODER_RESOURCE_MANAGER_H_
|
||||
#define VIDEO_ADAPTATION_VIDEO_STREAM_ENCODER_RESOURCE_MANAGER_H_
|
||||
|
||||
#include <atomic>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <unordered_map>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "api/adaptation/resource.h"
|
||||
#include "api/field_trials_view.h"
|
||||
#include "api/rtp_parameters.h"
|
||||
#include "api/scoped_refptr.h"
|
||||
#include "api/task_queue/task_queue_base.h"
|
||||
#include "api/video/video_adaptation_counters.h"
|
||||
#include "api/video/video_adaptation_reason.h"
|
||||
#include "api/video/video_frame.h"
|
||||
#include "api/video/video_source_interface.h"
|
||||
#include "api/video_codecs/video_codec.h"
|
||||
#include "api/video_codecs/video_encoder.h"
|
||||
#include "call/adaptation/resource_adaptation_processor_interface.h"
|
||||
#include "call/adaptation/video_stream_adapter.h"
|
||||
#include "call/adaptation/video_stream_input_state_provider.h"
|
||||
#include "rtc_base/experiments/quality_scaler_settings.h"
|
||||
#include "rtc_base/ref_count.h"
|
||||
#include "rtc_base/strings/string_builder.h"
|
||||
#include "rtc_base/synchronization/mutex.h"
|
||||
#include "rtc_base/thread_annotations.h"
|
||||
#include "system_wrappers/include/clock.h"
|
||||
#include "video/adaptation/balanced_constraint.h"
|
||||
#include "video/adaptation/bandwidth_quality_scaler_resource.h"
|
||||
#include "video/adaptation/bitrate_constraint.h"
|
||||
#include "video/adaptation/encode_usage_resource.h"
|
||||
#include "video/adaptation/overuse_frame_detector.h"
|
||||
#include "video/adaptation/pixel_limit_resource.h"
|
||||
#include "video/adaptation/quality_rampup_experiment_helper.h"
|
||||
#include "video/adaptation/quality_scaler_resource.h"
|
||||
#include "video/adaptation/video_stream_encoder_resource.h"
|
||||
#include "video/config/video_encoder_config.h"
|
||||
#include "video/video_stream_encoder_observer.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// The assumed input frame size if we have not yet received a frame.
|
||||
// TODO(hbos): This is 144p - why are we assuming super low quality? Seems like
|
||||
// a bad heuristic.
|
||||
extern const int kDefaultInputPixelsWidth;
|
||||
extern const int kDefaultInputPixelsHeight;
|
||||
|
||||
// Owns adaptation-related Resources pertaining to a single VideoStreamEncoder
|
||||
// and passes on the relevant input from the encoder to the resources. The
|
||||
// resources provide resource usage states to the ResourceAdaptationProcessor
|
||||
// which is responsible for reconfiguring streams in order not to overuse
|
||||
// resources.
|
||||
//
|
||||
// The manager is also involved with various mitigations not part of the
|
||||
// ResourceAdaptationProcessor code such as the initial frame dropping.
|
||||
class VideoStreamEncoderResourceManager
|
||||
: public VideoSourceRestrictionsListener,
|
||||
public ResourceLimitationsListener,
|
||||
public QualityRampUpExperimentListener {
|
||||
public:
|
||||
VideoStreamEncoderResourceManager(
|
||||
VideoStreamInputStateProvider* input_state_provider,
|
||||
VideoStreamEncoderObserver* encoder_stats_observer,
|
||||
Clock* clock,
|
||||
bool experiment_cpu_load_estimator,
|
||||
std::unique_ptr<OveruseFrameDetector> overuse_detector,
|
||||
DegradationPreferenceProvider* degradation_preference_provider,
|
||||
const FieldTrialsView& field_trials);
|
||||
~VideoStreamEncoderResourceManager() override;
|
||||
|
||||
void Initialize(TaskQueueBase* encoder_queue);
|
||||
void SetAdaptationProcessor(
|
||||
ResourceAdaptationProcessorInterface* adaptation_processor,
|
||||
VideoStreamAdapter* stream_adapter);
|
||||
|
||||
// TODO(https://crbug.com/webrtc/11563): The degradation preference is a
|
||||
// setting of the Processor, it does not belong to the Manager - can we get
|
||||
// rid of this?
|
||||
void SetDegradationPreferences(DegradationPreference degradation_preference);
|
||||
DegradationPreference degradation_preference() const;
|
||||
|
||||
void ConfigureEncodeUsageResource();
|
||||
// Initializes the pixel limit resource if the "WebRTC-PixelLimitResource"
|
||||
// field trial is enabled. This can be used for testing.
|
||||
void MaybeInitializePixelLimitResource();
|
||||
// Stops the encode usage and quality scaler resources if not already stopped.
|
||||
// If the pixel limit resource was created it is also stopped and nulled.
|
||||
void StopManagedResources();
|
||||
|
||||
// Settings that affect the VideoStreamEncoder-specific resources.
|
||||
void SetEncoderSettings(EncoderSettings encoder_settings);
|
||||
void SetStartBitrate(DataRate start_bitrate);
|
||||
void SetTargetBitrate(DataRate target_bitrate);
|
||||
void SetEncoderRates(
|
||||
const VideoEncoder::RateControlParameters& encoder_rates);
|
||||
// TODO(https://crbug.com/webrtc/11338): This can be made private if we
|
||||
// configure on SetDegredationPreference and SetEncoderSettings.
|
||||
void ConfigureQualityScaler(const VideoEncoder::EncoderInfo& encoder_info);
|
||||
void ConfigureBandwidthQualityScaler(
|
||||
const VideoEncoder::EncoderInfo& encoder_info);
|
||||
|
||||
// Methods corresponding to different points in the encoding pipeline.
|
||||
void OnFrameDroppedDueToSize();
|
||||
void OnMaybeEncodeFrame();
|
||||
void OnEncodeStarted(const VideoFrame& cropped_frame,
|
||||
int64_t time_when_first_seen_us);
|
||||
void OnEncodeCompleted(const EncodedImage& encoded_image,
|
||||
int64_t time_sent_in_us,
|
||||
absl::optional<int> encode_duration_us,
|
||||
DataSize frame_size);
|
||||
void OnFrameDropped(EncodedImageCallback::DropReason reason);
|
||||
|
||||
// Resources need to be mapped to an AdaptReason (kCpu or kQuality) in order
|
||||
// to update legacy getStats().
|
||||
void AddResource(rtc::scoped_refptr<Resource> resource,
|
||||
VideoAdaptationReason reason);
|
||||
void RemoveResource(rtc::scoped_refptr<Resource> resource);
|
||||
std::vector<AdaptationConstraint*> AdaptationConstraints() const;
|
||||
// If true, the VideoStreamEncoder should execute its logic to maybe drop
|
||||
// frames based on size and bitrate.
|
||||
bool DropInitialFrames() const;
|
||||
absl::optional<uint32_t> SingleActiveStreamPixels() const;
|
||||
absl::optional<uint32_t> UseBandwidthAllocationBps() const;
|
||||
|
||||
// VideoSourceRestrictionsListener implementation.
|
||||
// Updates `video_source_restrictions_`.
|
||||
void OnVideoSourceRestrictionsUpdated(
|
||||
VideoSourceRestrictions restrictions,
|
||||
const VideoAdaptationCounters& adaptation_counters,
|
||||
rtc::scoped_refptr<Resource> reason,
|
||||
const VideoSourceRestrictions& unfiltered_restrictions) override;
|
||||
void OnResourceLimitationChanged(
|
||||
rtc::scoped_refptr<Resource> resource,
|
||||
const std::map<rtc::scoped_refptr<Resource>, VideoAdaptationCounters>&
|
||||
resource_limitations) override;
|
||||
|
||||
// QualityRampUpExperimentListener implementation.
|
||||
void OnQualityRampUp() override;
|
||||
|
||||
static bool IsSimulcastOrMultipleSpatialLayers(
|
||||
const VideoEncoderConfig& encoder_config,
|
||||
const VideoCodec& video_codec);
|
||||
|
||||
private:
|
||||
class InitialFrameDropper;
|
||||
|
||||
VideoAdaptationReason GetReasonFromResource(
|
||||
rtc::scoped_refptr<Resource> resource) const;
|
||||
|
||||
CpuOveruseOptions GetCpuOveruseOptions() const;
|
||||
int LastFrameSizeOrDefault() const;
|
||||
|
||||
// Calculates an up-to-date value of the target frame rate and informs the
|
||||
// `encode_usage_resource_` of the new value.
|
||||
void MaybeUpdateTargetFrameRate();
|
||||
|
||||
// Use nullopt to disable quality scaling.
|
||||
void UpdateQualityScalerSettings(
|
||||
absl::optional<VideoEncoder::QpThresholds> qp_thresholds);
|
||||
|
||||
void UpdateBandwidthQualityScalerSettings(
|
||||
bool bandwidth_quality_scaling_allowed,
|
||||
const std::vector<VideoEncoder::ResolutionBitrateLimits>&
|
||||
resolution_bitrate_limits);
|
||||
|
||||
void UpdateStatsAdaptationSettings() const;
|
||||
|
||||
static std::string ActiveCountsToString(
|
||||
const std::map<VideoAdaptationReason, VideoAdaptationCounters>&
|
||||
active_counts);
|
||||
|
||||
const FieldTrialsView& field_trials_;
|
||||
DegradationPreferenceProvider* const degradation_preference_provider_;
|
||||
std::unique_ptr<BitrateConstraint> bitrate_constraint_
|
||||
RTC_GUARDED_BY(encoder_queue_);
|
||||
const std::unique_ptr<BalancedConstraint> balanced_constraint_
|
||||
RTC_GUARDED_BY(encoder_queue_);
|
||||
const rtc::scoped_refptr<EncodeUsageResource> encode_usage_resource_;
|
||||
const rtc::scoped_refptr<QualityScalerResource> quality_scaler_resource_;
|
||||
rtc::scoped_refptr<PixelLimitResource> pixel_limit_resource_;
|
||||
const rtc::scoped_refptr<BandwidthQualityScalerResource>
|
||||
bandwidth_quality_scaler_resource_;
|
||||
|
||||
TaskQueueBase* encoder_queue_;
|
||||
VideoStreamInputStateProvider* const input_state_provider_
|
||||
RTC_GUARDED_BY(encoder_queue_);
|
||||
ResourceAdaptationProcessorInterface* adaptation_processor_;
|
||||
VideoStreamAdapter* stream_adapter_ RTC_GUARDED_BY(encoder_queue_);
|
||||
// Thread-safe.
|
||||
VideoStreamEncoderObserver* const encoder_stats_observer_;
|
||||
|
||||
DegradationPreference degradation_preference_ RTC_GUARDED_BY(encoder_queue_);
|
||||
VideoSourceRestrictions video_source_restrictions_
|
||||
RTC_GUARDED_BY(encoder_queue_);
|
||||
|
||||
VideoAdaptationCounters current_adaptation_counters_
|
||||
RTC_GUARDED_BY(encoder_queue_);
|
||||
|
||||
const BalancedDegradationSettings balanced_settings_;
|
||||
Clock* clock_ RTC_GUARDED_BY(encoder_queue_);
|
||||
const bool experiment_cpu_load_estimator_ RTC_GUARDED_BY(encoder_queue_);
|
||||
const std::unique_ptr<InitialFrameDropper> initial_frame_dropper_
|
||||
RTC_GUARDED_BY(encoder_queue_);
|
||||
const bool quality_scaling_experiment_enabled_ RTC_GUARDED_BY(encoder_queue_);
|
||||
const bool pixel_limit_resource_experiment_enabled_
|
||||
RTC_GUARDED_BY(encoder_queue_);
|
||||
absl::optional<uint32_t> encoder_target_bitrate_bps_
|
||||
RTC_GUARDED_BY(encoder_queue_);
|
||||
absl::optional<VideoEncoder::RateControlParameters> encoder_rates_
|
||||
RTC_GUARDED_BY(encoder_queue_);
|
||||
std::unique_ptr<QualityRampUpExperimentHelper> quality_rampup_experiment_
|
||||
RTC_GUARDED_BY(encoder_queue_);
|
||||
absl::optional<EncoderSettings> encoder_settings_
|
||||
RTC_GUARDED_BY(encoder_queue_);
|
||||
|
||||
// Ties a resource to a reason for statistical reporting. This AdaptReason is
|
||||
// also used by this module to make decisions about how to adapt up/down.
|
||||
std::map<rtc::scoped_refptr<Resource>, VideoAdaptationReason> resources_
|
||||
RTC_GUARDED_BY(encoder_queue_);
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // VIDEO_ADAPTATION_VIDEO_STREAM_ENCODER_RESOURCE_MANAGER_H_
|
||||
126
TMessagesProj/jni/voip/webrtc/video/alignment_adjuster.cc
Normal file
126
TMessagesProj/jni/voip/webrtc/video/alignment_adjuster.cc
Normal file
|
|
@ -0,0 +1,126 @@
|
|||
/*
|
||||
* Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "video/alignment_adjuster.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <limits>
|
||||
|
||||
#include "absl/algorithm/container.h"
|
||||
#include "rtc_base/logging.h"
|
||||
|
||||
namespace webrtc {
|
||||
namespace {
|
||||
// Round each scale factor to the closest rational in form alignment/i where i
|
||||
// is a multiple of `requested_alignment`. Each resolution divisible by
|
||||
// `alignment` will be divisible by `requested_alignment` after the scale factor
|
||||
// is applied.
|
||||
double RoundToMultiple(int alignment,
|
||||
int requested_alignment,
|
||||
VideoEncoderConfig* config,
|
||||
bool update_config) {
|
||||
double diff = 0.0;
|
||||
for (auto& layer : config->simulcast_layers) {
|
||||
double min_dist = std::numeric_limits<double>::max();
|
||||
double new_scale = 1.0;
|
||||
for (int i = requested_alignment; i <= alignment;
|
||||
i += requested_alignment) {
|
||||
double dist = std::abs(layer.scale_resolution_down_by -
|
||||
alignment / static_cast<double>(i));
|
||||
if (dist <= min_dist) {
|
||||
min_dist = dist;
|
||||
new_scale = alignment / static_cast<double>(i);
|
||||
}
|
||||
}
|
||||
diff += std::abs(layer.scale_resolution_down_by - new_scale);
|
||||
if (update_config) {
|
||||
RTC_LOG(LS_INFO) << "scale_resolution_down_by "
|
||||
<< layer.scale_resolution_down_by << " -> " << new_scale;
|
||||
layer.scale_resolution_down_by = new_scale;
|
||||
}
|
||||
}
|
||||
return diff;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
// Input: encoder_info.requested_resolution_alignment (K)
|
||||
// Input: encoder_info.apply_alignment_to_all_simulcast_layers (B)
|
||||
// Input: vector config->simulcast_layers.scale_resolution_down_by (S[i])
|
||||
// Output:
|
||||
// If B is false, returns K and does not adjust scaling factors.
|
||||
// Otherwise, returns adjusted alignment (A), adjusted scaling factors (S'[i])
|
||||
// are written in `config` such that:
|
||||
//
|
||||
// A / S'[i] are integers divisible by K
|
||||
// sum abs(S'[i] - S[i]) -> min
|
||||
// A integer <= 16
|
||||
//
|
||||
// Solution chooses closest S'[i] in a form A / j where j is a multiple of K.
|
||||
|
||||
int AlignmentAdjuster::GetAlignmentAndMaybeAdjustScaleFactors(
|
||||
const VideoEncoder::EncoderInfo& encoder_info,
|
||||
VideoEncoderConfig* config,
|
||||
absl::optional<size_t> max_layers) {
|
||||
const int requested_alignment = encoder_info.requested_resolution_alignment;
|
||||
if (!encoder_info.apply_alignment_to_all_simulcast_layers) {
|
||||
return requested_alignment;
|
||||
}
|
||||
|
||||
if (requested_alignment < 1 || config->number_of_streams <= 1 ||
|
||||
config->simulcast_layers.size() <= 1) {
|
||||
return requested_alignment;
|
||||
}
|
||||
|
||||
// Update alignment to also apply to simulcast layers.
|
||||
const bool has_scale_resolution_down_by = absl::c_any_of(
|
||||
config->simulcast_layers, [](const webrtc::VideoStream& layer) {
|
||||
return layer.scale_resolution_down_by >= 1.0;
|
||||
});
|
||||
|
||||
if (!has_scale_resolution_down_by) {
|
||||
// Default resolution downscaling used (scale factors: 1, 2, 4, ...).
|
||||
size_t size = config->simulcast_layers.size();
|
||||
if (max_layers && *max_layers > 0 && *max_layers < size) {
|
||||
size = *max_layers;
|
||||
}
|
||||
return requested_alignment * (1 << (size - 1));
|
||||
}
|
||||
|
||||
// Get alignment for downscaled layers.
|
||||
// Adjust `scale_resolution_down_by` to a common multiple to limit the
|
||||
// alignment value (to avoid largely cropped frames and possibly with an
|
||||
// aspect ratio far from the original).
|
||||
const int kMaxAlignment = 16;
|
||||
|
||||
for (auto& layer : config->simulcast_layers) {
|
||||
layer.scale_resolution_down_by =
|
||||
std::max(layer.scale_resolution_down_by, 1.0);
|
||||
layer.scale_resolution_down_by =
|
||||
std::min(layer.scale_resolution_down_by, 10000.0);
|
||||
}
|
||||
|
||||
// Decide on common multiple to use.
|
||||
double min_diff = std::numeric_limits<double>::max();
|
||||
int best_alignment = 1;
|
||||
for (int alignment = requested_alignment; alignment <= kMaxAlignment;
|
||||
++alignment) {
|
||||
double diff = RoundToMultiple(alignment, requested_alignment, config,
|
||||
/*update_config=*/false);
|
||||
if (diff < min_diff) {
|
||||
min_diff = diff;
|
||||
best_alignment = alignment;
|
||||
}
|
||||
}
|
||||
RoundToMultiple(best_alignment, requested_alignment, config,
|
||||
/*update_config=*/true);
|
||||
|
||||
return std::max(best_alignment, requested_alignment);
|
||||
}
|
||||
} // namespace webrtc
|
||||
42
TMessagesProj/jni/voip/webrtc/video/alignment_adjuster.h
Normal file
42
TMessagesProj/jni/voip/webrtc/video/alignment_adjuster.h
Normal file
|
|
@ -0,0 +1,42 @@
|
|||
/*
|
||||
* Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef VIDEO_ALIGNMENT_ADJUSTER_H_
|
||||
#define VIDEO_ALIGNMENT_ADJUSTER_H_
|
||||
|
||||
#include "api/video_codecs/video_encoder.h"
|
||||
#include "video/config/video_encoder_config.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class AlignmentAdjuster {
|
||||
public:
|
||||
// Returns the resolution alignment requested by the encoder (i.e
|
||||
// `EncoderInfo::requested_resolution_alignment` which ensures that delivered
|
||||
// frames to the encoder are divisible by this alignment).
|
||||
//
|
||||
// If `EncoderInfo::apply_alignment_to_all_simulcast_layers` is enabled, the
|
||||
// alignment will be adjusted to ensure that each simulcast layer also is
|
||||
// divisible by `requested_resolution_alignment`. The configured scale factors
|
||||
// `scale_resolution_down_by` may be adjusted to a common multiple to limit
|
||||
// the alignment value to avoid largely cropped frames and possibly with an
|
||||
// aspect ratio far from the original.
|
||||
|
||||
// Note: `max_layers` currently only taken into account when using default
|
||||
// scale factors.
|
||||
static int GetAlignmentAndMaybeAdjustScaleFactors(
|
||||
const VideoEncoder::EncoderInfo& info,
|
||||
VideoEncoderConfig* config,
|
||||
absl::optional<size_t> max_layers);
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // VIDEO_ALIGNMENT_ADJUSTER_H_
|
||||
123
TMessagesProj/jni/voip/webrtc/video/buffered_frame_decryptor.cc
Normal file
123
TMessagesProj/jni/voip/webrtc/video/buffered_frame_decryptor.cc
Normal file
|
|
@ -0,0 +1,123 @@
|
|||
/*
|
||||
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "video/buffered_frame_decryptor.h"
|
||||
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "modules/rtp_rtcp/source/frame_object.h"
|
||||
#include "modules/rtp_rtcp/source/rtp_descriptor_authentication.h"
|
||||
#include "rtc_base/logging.h"
|
||||
#include "system_wrappers/include/field_trial.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
BufferedFrameDecryptor::BufferedFrameDecryptor(
|
||||
OnDecryptedFrameCallback* decrypted_frame_callback,
|
||||
OnDecryptionStatusChangeCallback* decryption_status_change_callback,
|
||||
const FieldTrialsView& field_trials)
|
||||
: generic_descriptor_auth_experiment_(
|
||||
!field_trials.IsDisabled("WebRTC-GenericDescriptorAuth")),
|
||||
decrypted_frame_callback_(decrypted_frame_callback),
|
||||
decryption_status_change_callback_(decryption_status_change_callback) {}
|
||||
|
||||
BufferedFrameDecryptor::~BufferedFrameDecryptor() {}
|
||||
|
||||
void BufferedFrameDecryptor::SetFrameDecryptor(
|
||||
rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor) {
|
||||
frame_decryptor_ = std::move(frame_decryptor);
|
||||
}
|
||||
|
||||
void BufferedFrameDecryptor::ManageEncryptedFrame(
|
||||
std::unique_ptr<RtpFrameObject> encrypted_frame) {
|
||||
switch (DecryptFrame(encrypted_frame.get())) {
|
||||
case FrameDecision::kStash:
|
||||
if (stashed_frames_.size() >= kMaxStashedFrames) {
|
||||
RTC_LOG(LS_WARNING) << "Encrypted frame stash full poping oldest item.";
|
||||
stashed_frames_.pop_front();
|
||||
}
|
||||
stashed_frames_.push_back(std::move(encrypted_frame));
|
||||
break;
|
||||
case FrameDecision::kDecrypted:
|
||||
RetryStashedFrames();
|
||||
decrypted_frame_callback_->OnDecryptedFrame(std::move(encrypted_frame));
|
||||
break;
|
||||
case FrameDecision::kDrop:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
BufferedFrameDecryptor::FrameDecision BufferedFrameDecryptor::DecryptFrame(
|
||||
RtpFrameObject* frame) {
|
||||
// Optionally attempt to decrypt the raw video frame if it was provided.
|
||||
if (frame_decryptor_ == nullptr) {
|
||||
RTC_LOG(LS_INFO) << "Frame decryption required but not attached to this "
|
||||
"stream. Stashing frame.";
|
||||
return FrameDecision::kStash;
|
||||
}
|
||||
// Retrieve the maximum possible size of the decrypted payload.
|
||||
const size_t max_plaintext_byte_size =
|
||||
frame_decryptor_->GetMaxPlaintextByteSize(cricket::MEDIA_TYPE_VIDEO,
|
||||
frame->size());
|
||||
RTC_CHECK_LE(max_plaintext_byte_size, frame->size());
|
||||
// Place the decrypted frame inline into the existing frame.
|
||||
rtc::ArrayView<uint8_t> inline_decrypted_bitstream(frame->mutable_data(),
|
||||
max_plaintext_byte_size);
|
||||
|
||||
// Enable authenticating the header if the field trial isn't disabled.
|
||||
std::vector<uint8_t> additional_data;
|
||||
if (generic_descriptor_auth_experiment_) {
|
||||
additional_data = RtpDescriptorAuthentication(frame->GetRtpVideoHeader());
|
||||
}
|
||||
|
||||
// Attempt to decrypt the video frame.
|
||||
const FrameDecryptorInterface::Result decrypt_result =
|
||||
frame_decryptor_->Decrypt(cricket::MEDIA_TYPE_VIDEO, /*csrcs=*/{},
|
||||
additional_data, *frame,
|
||||
inline_decrypted_bitstream);
|
||||
// Optionally call the callback if there was a change in status
|
||||
if (decrypt_result.status != last_status_) {
|
||||
last_status_ = decrypt_result.status;
|
||||
decryption_status_change_callback_->OnDecryptionStatusChange(
|
||||
decrypt_result.status);
|
||||
}
|
||||
|
||||
if (!decrypt_result.IsOk()) {
|
||||
// Only stash frames if we have never decrypted a frame before.
|
||||
return first_frame_decrypted_ ? FrameDecision::kDrop
|
||||
: FrameDecision::kStash;
|
||||
}
|
||||
RTC_CHECK_LE(decrypt_result.bytes_written, max_plaintext_byte_size);
|
||||
// Update the frame to contain just the written bytes.
|
||||
frame->set_size(decrypt_result.bytes_written);
|
||||
|
||||
// Indicate that all future fail to decrypt frames should be dropped.
|
||||
if (!first_frame_decrypted_) {
|
||||
first_frame_decrypted_ = true;
|
||||
}
|
||||
|
||||
return FrameDecision::kDecrypted;
|
||||
}
|
||||
|
||||
void BufferedFrameDecryptor::RetryStashedFrames() {
|
||||
if (!stashed_frames_.empty()) {
|
||||
RTC_LOG(LS_INFO) << "Retrying stashed encrypted frames. Count: "
|
||||
<< stashed_frames_.size();
|
||||
}
|
||||
for (auto& frame : stashed_frames_) {
|
||||
if (DecryptFrame(frame.get()) == FrameDecision::kDecrypted) {
|
||||
decrypted_frame_callback_->OnDecryptedFrame(std::move(frame));
|
||||
}
|
||||
}
|
||||
stashed_frames_.clear();
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
105
TMessagesProj/jni/voip/webrtc/video/buffered_frame_decryptor.h
Normal file
105
TMessagesProj/jni/voip/webrtc/video/buffered_frame_decryptor.h
Normal file
|
|
@ -0,0 +1,105 @@
|
|||
/*
|
||||
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef VIDEO_BUFFERED_FRAME_DECRYPTOR_H_
|
||||
#define VIDEO_BUFFERED_FRAME_DECRYPTOR_H_
|
||||
|
||||
#include <deque>
|
||||
#include <memory>
|
||||
|
||||
#include "api/crypto/crypto_options.h"
|
||||
#include "api/crypto/frame_decryptor_interface.h"
|
||||
#include "api/field_trials_view.h"
|
||||
#include "modules/rtp_rtcp/source/frame_object.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// This callback is provided during the construction of the
|
||||
// BufferedFrameDecryptor and is called each time a frame is sucessfully
|
||||
// decrypted by the buffer.
|
||||
class OnDecryptedFrameCallback {
|
||||
public:
|
||||
virtual ~OnDecryptedFrameCallback() = default;
|
||||
// Called each time a decrypted frame is returned.
|
||||
virtual void OnDecryptedFrame(std::unique_ptr<RtpFrameObject> frame) = 0;
|
||||
};
|
||||
|
||||
// This callback is called each time there is a status change in the decryption
|
||||
// stream. For example going from a none state to a first decryption or going
|
||||
// frome a decryptable state to a non decryptable state.
|
||||
class OnDecryptionStatusChangeCallback {
|
||||
public:
|
||||
virtual ~OnDecryptionStatusChangeCallback() = default;
|
||||
// Called each time the decryption stream status changes. This call is
|
||||
// blocking so the caller must relinquish the callback quickly. This status
|
||||
// must match what is specified in the FrameDecryptorInterface file. Notably
|
||||
// 0 must indicate success and any positive integer is a failure.
|
||||
virtual void OnDecryptionStatusChange(
|
||||
FrameDecryptorInterface::Status status) = 0;
|
||||
};
|
||||
|
||||
// The BufferedFrameDecryptor is responsible for deciding when to pass
|
||||
// decrypted received frames onto the OnDecryptedFrameCallback. Frames can be
|
||||
// delayed when frame encryption is enabled but the key hasn't arrived yet. In
|
||||
// this case we stash about 1 second of encrypted frames instead of dropping
|
||||
// them to prevent re-requesting the key frame. This optimization is
|
||||
// particularly important on low bandwidth networks. Note stashing is only ever
|
||||
// done if we have never sucessfully decrypted a frame before. After the first
|
||||
// successful decryption payloads will never be stashed.
|
||||
class BufferedFrameDecryptor final {
|
||||
public:
|
||||
// Constructs a new BufferedFrameDecryptor that can hold
|
||||
explicit BufferedFrameDecryptor(
|
||||
OnDecryptedFrameCallback* decrypted_frame_callback,
|
||||
OnDecryptionStatusChangeCallback* decryption_status_change_callback,
|
||||
const FieldTrialsView& field_trials);
|
||||
|
||||
~BufferedFrameDecryptor();
|
||||
// This object cannot be copied.
|
||||
BufferedFrameDecryptor(const BufferedFrameDecryptor&) = delete;
|
||||
BufferedFrameDecryptor& operator=(const BufferedFrameDecryptor&) = delete;
|
||||
|
||||
// Sets a new frame decryptor as the decryptor for the buffered frame
|
||||
// decryptor. This allows the decryptor to be switched out without resetting
|
||||
// the video stream.
|
||||
void SetFrameDecryptor(
|
||||
rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor);
|
||||
|
||||
// Determines whether the frame should be stashed, dropped or handed off to
|
||||
// the OnDecryptedFrameCallback.
|
||||
void ManageEncryptedFrame(std::unique_ptr<RtpFrameObject> encrypted_frame);
|
||||
|
||||
private:
|
||||
// Represents what should be done with a given frame.
|
||||
enum class FrameDecision { kStash, kDecrypted, kDrop };
|
||||
|
||||
// Attempts to decrypt the frame, if it fails and no prior frames have been
|
||||
// decrypted it will return kStash. Otherwise fail to decrypts will return
|
||||
// kDrop. Successful decryptions will always return kDecrypted.
|
||||
FrameDecision DecryptFrame(RtpFrameObject* frame);
|
||||
// Retries all the stashed frames this is triggered each time a kDecrypted
|
||||
// event occurs.
|
||||
void RetryStashedFrames();
|
||||
|
||||
static const size_t kMaxStashedFrames = 24;
|
||||
|
||||
const bool generic_descriptor_auth_experiment_;
|
||||
bool first_frame_decrypted_ = false;
|
||||
FrameDecryptorInterface::Status last_status_ =
|
||||
FrameDecryptorInterface::Status::kUnknown;
|
||||
rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor_;
|
||||
OnDecryptedFrameCallback* const decrypted_frame_callback_;
|
||||
OnDecryptionStatusChangeCallback* const decryption_status_change_callback_;
|
||||
std::deque<std::unique_ptr<RtpFrameObject>> stashed_frames_;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // VIDEO_BUFFERED_FRAME_DECRYPTOR_H_
|
||||
168
TMessagesProj/jni/voip/webrtc/video/call_stats2.cc
Normal file
168
TMessagesProj/jni/voip/webrtc/video/call_stats2.cc
Normal file
|
|
@ -0,0 +1,168 @@
|
|||
/*
|
||||
* Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "video/call_stats2.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
|
||||
#include "absl/algorithm/container.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "system_wrappers/include/metrics.h"
|
||||
|
||||
namespace webrtc {
|
||||
namespace internal {
|
||||
namespace {
|
||||
|
||||
void RemoveOldReports(int64_t now, std::list<CallStats::RttTime>* reports) {
|
||||
static constexpr const int64_t kRttTimeoutMs = 1500;
|
||||
reports->remove_if(
|
||||
[&now](CallStats::RttTime& r) { return now - r.time > kRttTimeoutMs; });
|
||||
}
|
||||
|
||||
int64_t GetMaxRttMs(const std::list<CallStats::RttTime>& reports) {
|
||||
int64_t max_rtt_ms = -1;
|
||||
for (const CallStats::RttTime& rtt_time : reports)
|
||||
max_rtt_ms = std::max(rtt_time.rtt, max_rtt_ms);
|
||||
return max_rtt_ms;
|
||||
}
|
||||
|
||||
int64_t GetAvgRttMs(const std::list<CallStats::RttTime>& reports) {
|
||||
RTC_DCHECK(!reports.empty());
|
||||
int64_t sum = 0;
|
||||
for (std::list<CallStats::RttTime>::const_iterator it = reports.begin();
|
||||
it != reports.end(); ++it) {
|
||||
sum += it->rtt;
|
||||
}
|
||||
return sum / reports.size();
|
||||
}
|
||||
|
||||
int64_t GetNewAvgRttMs(const std::list<CallStats::RttTime>& reports,
|
||||
int64_t prev_avg_rtt) {
|
||||
if (reports.empty())
|
||||
return -1; // Reset (invalid average).
|
||||
|
||||
int64_t cur_rtt_ms = GetAvgRttMs(reports);
|
||||
if (prev_avg_rtt == -1)
|
||||
return cur_rtt_ms; // New initial average value.
|
||||
|
||||
// Weight factor to apply to the average rtt.
|
||||
// We weigh the old average at 70% against the new average (30%).
|
||||
constexpr const float kWeightFactor = 0.3f;
|
||||
return prev_avg_rtt * (1.0f - kWeightFactor) + cur_rtt_ms * kWeightFactor;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
constexpr TimeDelta CallStats::kUpdateInterval;
|
||||
|
||||
CallStats::CallStats(Clock* clock, TaskQueueBase* task_queue)
|
||||
: clock_(clock),
|
||||
max_rtt_ms_(-1),
|
||||
avg_rtt_ms_(-1),
|
||||
sum_avg_rtt_ms_(0),
|
||||
num_avg_rtt_(0),
|
||||
time_of_first_rtt_ms_(-1),
|
||||
task_queue_(task_queue) {
|
||||
RTC_DCHECK(task_queue_);
|
||||
RTC_DCHECK_RUN_ON(task_queue_);
|
||||
}
|
||||
|
||||
CallStats::~CallStats() {
|
||||
RTC_DCHECK_RUN_ON(task_queue_);
|
||||
RTC_DCHECK(observers_.empty());
|
||||
|
||||
repeating_task_.Stop();
|
||||
|
||||
UpdateHistograms();
|
||||
}
|
||||
|
||||
void CallStats::EnsureStarted() {
|
||||
RTC_DCHECK_RUN_ON(task_queue_);
|
||||
repeating_task_ =
|
||||
RepeatingTaskHandle::DelayedStart(task_queue_, kUpdateInterval, [this]() {
|
||||
UpdateAndReport();
|
||||
return kUpdateInterval;
|
||||
});
|
||||
}
|
||||
|
||||
void CallStats::UpdateAndReport() {
|
||||
RTC_DCHECK_RUN_ON(task_queue_);
|
||||
|
||||
RemoveOldReports(clock_->CurrentTime().ms(), &reports_);
|
||||
max_rtt_ms_ = GetMaxRttMs(reports_);
|
||||
avg_rtt_ms_ = GetNewAvgRttMs(reports_, avg_rtt_ms_);
|
||||
|
||||
// If there is a valid rtt, update all observers with the max rtt.
|
||||
if (max_rtt_ms_ >= 0) {
|
||||
RTC_DCHECK_GE(avg_rtt_ms_, 0);
|
||||
for (CallStatsObserver* observer : observers_)
|
||||
observer->OnRttUpdate(avg_rtt_ms_, max_rtt_ms_);
|
||||
// Sum for Histogram of average RTT reported over the entire call.
|
||||
sum_avg_rtt_ms_ += avg_rtt_ms_;
|
||||
++num_avg_rtt_;
|
||||
}
|
||||
}
|
||||
|
||||
void CallStats::RegisterStatsObserver(CallStatsObserver* observer) {
|
||||
RTC_DCHECK_RUN_ON(task_queue_);
|
||||
if (!absl::c_linear_search(observers_, observer))
|
||||
observers_.push_back(observer);
|
||||
}
|
||||
|
||||
void CallStats::DeregisterStatsObserver(CallStatsObserver* observer) {
|
||||
RTC_DCHECK_RUN_ON(task_queue_);
|
||||
observers_.remove(observer);
|
||||
}
|
||||
|
||||
int64_t CallStats::LastProcessedRtt() const {
|
||||
RTC_DCHECK_RUN_ON(task_queue_);
|
||||
// No need for locking since we're on the construction thread.
|
||||
return avg_rtt_ms_;
|
||||
}
|
||||
|
||||
void CallStats::OnRttUpdate(int64_t rtt) {
|
||||
// This callback may for some RtpRtcp module instances (video send stream) be
|
||||
// invoked from a separate task queue, in other cases, we should already be
|
||||
// on the correct TQ.
|
||||
int64_t now_ms = clock_->TimeInMilliseconds();
|
||||
auto update = [this, rtt, now_ms]() {
|
||||
RTC_DCHECK_RUN_ON(task_queue_);
|
||||
reports_.push_back(RttTime(rtt, now_ms));
|
||||
if (time_of_first_rtt_ms_ == -1)
|
||||
time_of_first_rtt_ms_ = now_ms;
|
||||
UpdateAndReport();
|
||||
};
|
||||
|
||||
if (task_queue_->IsCurrent()) {
|
||||
update();
|
||||
} else {
|
||||
task_queue_->PostTask(SafeTask(task_safety_.flag(), std::move(update)));
|
||||
}
|
||||
}
|
||||
|
||||
void CallStats::UpdateHistograms() {
|
||||
RTC_DCHECK_RUN_ON(task_queue_);
|
||||
|
||||
if (time_of_first_rtt_ms_ == -1 || num_avg_rtt_ < 1)
|
||||
return;
|
||||
|
||||
int64_t elapsed_sec =
|
||||
(clock_->TimeInMilliseconds() - time_of_first_rtt_ms_) / 1000;
|
||||
if (elapsed_sec >= metrics::kMinRunTimeInSeconds) {
|
||||
int64_t avg_rtt_ms = (sum_avg_rtt_ms_ + num_avg_rtt_ / 2) / num_avg_rtt_;
|
||||
RTC_HISTOGRAM_COUNTS_10000(
|
||||
"WebRTC.Video.AverageRoundTripTimeInMilliseconds", avg_rtt_ms);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace webrtc
|
||||
135
TMessagesProj/jni/voip/webrtc/video/call_stats2.h
Normal file
135
TMessagesProj/jni/voip/webrtc/video/call_stats2.h
Normal file
|
|
@ -0,0 +1,135 @@
|
|||
/*
|
||||
* Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef VIDEO_CALL_STATS2_H_
|
||||
#define VIDEO_CALL_STATS2_H_
|
||||
|
||||
#include <list>
|
||||
#include <memory>
|
||||
|
||||
#include "api/task_queue/pending_task_safety_flag.h"
|
||||
#include "api/task_queue/task_queue_base.h"
|
||||
#include "api/units/timestamp.h"
|
||||
#include "modules/include/module_common_types.h"
|
||||
#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
|
||||
#include "rtc_base/task_utils/repeating_task.h"
|
||||
#include "system_wrappers/include/clock.h"
|
||||
|
||||
namespace webrtc {
|
||||
namespace internal {
|
||||
|
||||
class CallStats {
|
||||
public:
|
||||
// Time interval for updating the observers.
|
||||
static constexpr TimeDelta kUpdateInterval = TimeDelta::Millis(1000);
|
||||
|
||||
// Must be created and destroyed on the same task_queue.
|
||||
CallStats(Clock* clock, TaskQueueBase* task_queue);
|
||||
~CallStats();
|
||||
|
||||
CallStats(const CallStats&) = delete;
|
||||
CallStats& operator=(const CallStats&) = delete;
|
||||
|
||||
// Ensure that necessary repeating tasks are started.
|
||||
void EnsureStarted();
|
||||
|
||||
// Expose an RtcpRttStats implementation without inheriting from RtcpRttStats.
|
||||
// That allows us to separate the threading model of how RtcpRttStats is
|
||||
// used (mostly on a process thread) and how CallStats is used (mostly on
|
||||
// the TQ/worker thread). Since for both cases, there is a LastProcessedRtt()
|
||||
// method, this separation allows us to not need a lock for either.
|
||||
RtcpRttStats* AsRtcpRttStats() { return &rtcp_rtt_stats_impl_; }
|
||||
|
||||
// Registers/deregisters a new observer to receive statistics updates.
|
||||
// Must be called from the construction thread.
|
||||
void RegisterStatsObserver(CallStatsObserver* observer);
|
||||
void DeregisterStatsObserver(CallStatsObserver* observer);
|
||||
|
||||
// Expose `LastProcessedRtt()` from RtcpRttStats to the public interface, as
|
||||
// it is the part of the API that is needed by direct users of CallStats.
|
||||
int64_t LastProcessedRtt() const;
|
||||
|
||||
// Exposed for tests to test histogram support.
|
||||
void UpdateHistogramsForTest() { UpdateHistograms(); }
|
||||
|
||||
// Helper struct keeping track of the time a rtt value is reported.
|
||||
struct RttTime {
|
||||
RttTime(int64_t new_rtt, int64_t rtt_time) : rtt(new_rtt), time(rtt_time) {}
|
||||
const int64_t rtt;
|
||||
const int64_t time;
|
||||
};
|
||||
|
||||
private:
|
||||
// Part of the RtcpRttStats implementation. Called by RtcpRttStatsImpl.
|
||||
void OnRttUpdate(int64_t rtt);
|
||||
|
||||
void UpdateAndReport();
|
||||
|
||||
// This method must only be called when the process thread is not
|
||||
// running, and from the construction thread.
|
||||
void UpdateHistograms();
|
||||
|
||||
class RtcpRttStatsImpl : public RtcpRttStats {
|
||||
public:
|
||||
explicit RtcpRttStatsImpl(CallStats* owner) : owner_(owner) {}
|
||||
~RtcpRttStatsImpl() override = default;
|
||||
|
||||
private:
|
||||
void OnRttUpdate(int64_t rtt) override {
|
||||
// For video send streams (video/video_send_stream.cc), the RtpRtcp module
|
||||
// is currently created on a transport worker TaskQueue and not the worker
|
||||
// thread - which is what happens in other cases. We should probably fix
|
||||
// that so that the call consistently comes in on the right thread.
|
||||
owner_->OnRttUpdate(rtt);
|
||||
}
|
||||
|
||||
int64_t LastProcessedRtt() const override {
|
||||
// This call path shouldn't be used anymore. This impl is only for
|
||||
// propagating the rtt from the RtpRtcp module, which does not call
|
||||
// LastProcessedRtt(). Down the line we should consider removing
|
||||
// LastProcessedRtt() and use the interface for event notifications only.
|
||||
RTC_DCHECK_NOTREACHED() << "Legacy call path";
|
||||
return 0;
|
||||
}
|
||||
|
||||
CallStats* const owner_;
|
||||
} rtcp_rtt_stats_impl_{this};
|
||||
|
||||
Clock* const clock_;
|
||||
|
||||
// Used to regularly call UpdateAndReport().
|
||||
RepeatingTaskHandle repeating_task_ RTC_GUARDED_BY(task_queue_);
|
||||
|
||||
// The last RTT in the statistics update (zero if there is no valid estimate).
|
||||
int64_t max_rtt_ms_ RTC_GUARDED_BY(task_queue_);
|
||||
|
||||
// Last reported average RTT value.
|
||||
int64_t avg_rtt_ms_ RTC_GUARDED_BY(task_queue_);
|
||||
|
||||
int64_t sum_avg_rtt_ms_ RTC_GUARDED_BY(task_queue_);
|
||||
int64_t num_avg_rtt_ RTC_GUARDED_BY(task_queue_);
|
||||
int64_t time_of_first_rtt_ms_ RTC_GUARDED_BY(task_queue_);
|
||||
|
||||
// All Rtt reports within valid time interval, oldest first.
|
||||
std::list<RttTime> reports_ RTC_GUARDED_BY(task_queue_);
|
||||
|
||||
// Observers getting stats reports.
|
||||
std::list<CallStatsObserver*> observers_ RTC_GUARDED_BY(task_queue_);
|
||||
|
||||
TaskQueueBase* const task_queue_;
|
||||
|
||||
// Used to signal destruction to potentially pending tasks.
|
||||
ScopedTaskSafety task_safety_;
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // VIDEO_CALL_STATS2_H_
|
||||
|
|
@ -0,0 +1,496 @@
|
|||
/*
|
||||
* Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
#include "video/config/encoder_stream_factory.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <limits>
|
||||
#include <set>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
|
||||
#include "absl/algorithm/container.h"
|
||||
#include "absl/strings/match.h"
|
||||
#include "api/video/video_codec_constants.h"
|
||||
#include "media/base/media_constants.h"
|
||||
#include "media/base/video_adapter.h"
|
||||
#include "modules/video_coding/codecs/vp9/svc_config.h"
|
||||
#include "rtc_base/experiments/min_video_bitrate_experiment.h"
|
||||
#include "rtc_base/experiments/normalize_simulcast_size_experiment.h"
|
||||
#include "rtc_base/logging.h"
|
||||
#include "video/config/simulcast.h"
|
||||
|
||||
namespace cricket {
|
||||
namespace {
|
||||
|
||||
const int kMinLayerSize = 16;
|
||||
|
||||
int ScaleDownResolution(int resolution,
|
||||
double scale_down_by,
|
||||
int min_resolution) {
|
||||
// Resolution is never scalied down to smaller than min_resolution.
|
||||
// If the input resolution is already smaller than min_resolution,
|
||||
// no scaling should be done at all.
|
||||
if (resolution <= min_resolution)
|
||||
return resolution;
|
||||
return std::max(static_cast<int>(resolution / scale_down_by + 0.5),
|
||||
min_resolution);
|
||||
}
|
||||
|
||||
bool PowerOfTwo(int value) {
|
||||
return (value > 0) && ((value & (value - 1)) == 0);
|
||||
}
|
||||
|
||||
bool IsScaleFactorsPowerOfTwo(const webrtc::VideoEncoderConfig& config) {
|
||||
for (const auto& layer : config.simulcast_layers) {
|
||||
double scale = std::max(layer.scale_resolution_down_by, 1.0);
|
||||
if (std::round(scale) != scale || !PowerOfTwo(scale)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool IsTemporalLayersSupported(const std::string& codec_name) {
|
||||
return absl::EqualsIgnoreCase(codec_name, kVp8CodecName) ||
|
||||
absl::EqualsIgnoreCase(codec_name, kVp9CodecName) ||
|
||||
absl::EqualsIgnoreCase(codec_name, kAv1CodecName);
|
||||
}
|
||||
|
||||
size_t FindRequiredActiveLayers(
|
||||
const webrtc::VideoEncoderConfig& encoder_config) {
|
||||
// Need enough layers so that at least the first active one is present.
|
||||
for (size_t i = 0; i < encoder_config.number_of_streams; ++i) {
|
||||
if (encoder_config.simulcast_layers[i].active) {
|
||||
return i + 1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
// The selected thresholds for QVGA and VGA corresponded to a QP around 10.
|
||||
// The change in QP declined above the selected bitrates.
|
||||
static int GetMaxDefaultVideoBitrateKbps(int width,
|
||||
int height,
|
||||
bool is_screenshare) {
|
||||
int max_bitrate;
|
||||
if (width * height <= 320 * 240) {
|
||||
max_bitrate = 600;
|
||||
} else if (width * height <= 640 * 480) {
|
||||
max_bitrate = 1700;
|
||||
} else if (width * height <= 960 * 540) {
|
||||
max_bitrate = 2000;
|
||||
} else {
|
||||
max_bitrate = 2500;
|
||||
}
|
||||
if (is_screenshare)
|
||||
max_bitrate = std::max(max_bitrate, 1200);
|
||||
return max_bitrate;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
// TODO(bugs.webrtc.org/8785): Consider removing max_qp as member of
|
||||
// EncoderStreamFactory and instead set this value individually for each stream
|
||||
// in the VideoEncoderConfig.simulcast_layers.
|
||||
EncoderStreamFactory::EncoderStreamFactory(std::string codec_name,
|
||||
int max_qp,
|
||||
bool is_screenshare,
|
||||
bool conference_mode)
|
||||
: codec_name_(codec_name),
|
||||
max_qp_(max_qp),
|
||||
is_screenshare_(is_screenshare),
|
||||
conference_mode_(conference_mode),
|
||||
trials_(fallback_trials_),
|
||||
encoder_info_requested_resolution_alignment_(1) {}
|
||||
|
||||
EncoderStreamFactory::EncoderStreamFactory(
|
||||
std::string codec_name,
|
||||
int max_qp,
|
||||
bool is_screenshare,
|
||||
bool conference_mode,
|
||||
const webrtc::VideoEncoder::EncoderInfo& encoder_info,
|
||||
absl::optional<webrtc::VideoSourceRestrictions> restrictions,
|
||||
const webrtc::FieldTrialsView* trials)
|
||||
: codec_name_(codec_name),
|
||||
max_qp_(max_qp),
|
||||
is_screenshare_(is_screenshare),
|
||||
conference_mode_(conference_mode),
|
||||
trials_(trials ? *trials : fallback_trials_),
|
||||
encoder_info_requested_resolution_alignment_(
|
||||
encoder_info.requested_resolution_alignment),
|
||||
restrictions_(restrictions) {}
|
||||
|
||||
std::vector<webrtc::VideoStream> EncoderStreamFactory::CreateEncoderStreams(
|
||||
int frame_width,
|
||||
int frame_height,
|
||||
const webrtc::VideoEncoderConfig& encoder_config) {
|
||||
RTC_DCHECK_GT(encoder_config.number_of_streams, 0);
|
||||
RTC_DCHECK_GE(encoder_config.simulcast_layers.size(),
|
||||
encoder_config.number_of_streams);
|
||||
|
||||
const absl::optional<webrtc::DataRate> experimental_min_bitrate =
|
||||
GetExperimentalMinVideoBitrate(encoder_config.codec_type);
|
||||
|
||||
bool is_simulcast = (encoder_config.number_of_streams > 1);
|
||||
// If scalability mode was specified, don't treat {active,inactive,inactive}
|
||||
// as simulcast since the simulcast configuration assumes very low bitrates
|
||||
// on the first layer. This would prevent rampup of multiple spatial layers.
|
||||
// See https://crbug.com/webrtc/15041.
|
||||
if (is_simulcast &&
|
||||
encoder_config.simulcast_layers[0].scalability_mode.has_value()) {
|
||||
// Require at least one non-first layer to be active for is_simulcast=true.
|
||||
is_simulcast = false;
|
||||
for (size_t i = 1; i < encoder_config.simulcast_layers.size(); ++i) {
|
||||
if (encoder_config.simulcast_layers[i].active) {
|
||||
is_simulcast = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (is_simulcast || ((absl::EqualsIgnoreCase(codec_name_, kVp8CodecName) ||
|
||||
absl::EqualsIgnoreCase(codec_name_, kH264CodecName)) &&
|
||||
is_screenshare_ && conference_mode_)) {
|
||||
return CreateSimulcastOrConferenceModeScreenshareStreams(
|
||||
frame_width, frame_height, encoder_config, experimental_min_bitrate);
|
||||
}
|
||||
|
||||
return CreateDefaultVideoStreams(frame_width, frame_height, encoder_config,
|
||||
experimental_min_bitrate);
|
||||
}
|
||||
|
||||
std::vector<webrtc::VideoStream>
|
||||
EncoderStreamFactory::CreateDefaultVideoStreams(
|
||||
int width,
|
||||
int height,
|
||||
const webrtc::VideoEncoderConfig& encoder_config,
|
||||
const absl::optional<webrtc::DataRate>& experimental_min_bitrate) const {
|
||||
std::vector<webrtc::VideoStream> layers;
|
||||
|
||||
// The max bitrate specified by the API.
|
||||
// - `encoder_config.simulcast_layers[0].max_bitrate_bps` comes from the first
|
||||
// RtpEncodingParamters, which is the encoding of this stream.
|
||||
// - `encoder_config.max_bitrate_bps` comes from SDP; "b=AS" or conditionally
|
||||
// "x-google-max-bitrate".
|
||||
// If `api_max_bitrate_bps` has a value then it is positive.
|
||||
absl::optional<int> api_max_bitrate_bps;
|
||||
if (encoder_config.simulcast_layers[0].max_bitrate_bps > 0) {
|
||||
api_max_bitrate_bps = encoder_config.simulcast_layers[0].max_bitrate_bps;
|
||||
}
|
||||
if (encoder_config.max_bitrate_bps > 0) {
|
||||
api_max_bitrate_bps =
|
||||
api_max_bitrate_bps.has_value()
|
||||
? std::min(encoder_config.max_bitrate_bps, *api_max_bitrate_bps)
|
||||
: encoder_config.max_bitrate_bps;
|
||||
}
|
||||
|
||||
// For unset max bitrates set default bitrate for non-simulcast.
|
||||
int max_bitrate_bps =
|
||||
api_max_bitrate_bps.has_value()
|
||||
? *api_max_bitrate_bps
|
||||
: GetMaxDefaultVideoBitrateKbps(width, height, is_screenshare_) *
|
||||
1000;
|
||||
|
||||
int min_bitrate_bps =
|
||||
experimental_min_bitrate
|
||||
? rtc::saturated_cast<int>(experimental_min_bitrate->bps())
|
||||
: webrtc::kDefaultMinVideoBitrateBps;
|
||||
if (encoder_config.simulcast_layers[0].min_bitrate_bps > 0) {
|
||||
// Use set min bitrate.
|
||||
min_bitrate_bps = encoder_config.simulcast_layers[0].min_bitrate_bps;
|
||||
// If only min bitrate is configured, make sure max is above min.
|
||||
if (!api_max_bitrate_bps.has_value())
|
||||
max_bitrate_bps = std::max(min_bitrate_bps, max_bitrate_bps);
|
||||
}
|
||||
int max_framerate = (encoder_config.simulcast_layers[0].max_framerate > 0)
|
||||
? encoder_config.simulcast_layers[0].max_framerate
|
||||
: kDefaultVideoMaxFramerate;
|
||||
|
||||
webrtc::VideoStream layer;
|
||||
layer.width = width;
|
||||
layer.height = height;
|
||||
layer.max_framerate = max_framerate;
|
||||
layer.requested_resolution =
|
||||
encoder_config.simulcast_layers[0].requested_resolution;
|
||||
// Note: VP9 seems to have be sending if any layer is active,
|
||||
// (see `UpdateSendState`) and still use parameters only from
|
||||
// encoder_config.simulcast_layers[0].
|
||||
layer.active = absl::c_any_of(encoder_config.simulcast_layers,
|
||||
[](const auto& layer) { return layer.active; });
|
||||
|
||||
if (encoder_config.simulcast_layers[0].requested_resolution) {
|
||||
auto res = GetLayerResolutionFromRequestedResolution(
|
||||
width, height,
|
||||
*encoder_config.simulcast_layers[0].requested_resolution);
|
||||
layer.width = res.width;
|
||||
layer.height = res.height;
|
||||
} else if (encoder_config.simulcast_layers[0].scale_resolution_down_by > 1.) {
|
||||
layer.width = ScaleDownResolution(
|
||||
layer.width,
|
||||
encoder_config.simulcast_layers[0].scale_resolution_down_by,
|
||||
kMinLayerSize);
|
||||
layer.height = ScaleDownResolution(
|
||||
layer.height,
|
||||
encoder_config.simulcast_layers[0].scale_resolution_down_by,
|
||||
kMinLayerSize);
|
||||
}
|
||||
|
||||
if (absl::EqualsIgnoreCase(codec_name_, kVp9CodecName)) {
|
||||
RTC_DCHECK(encoder_config.encoder_specific_settings);
|
||||
// Use VP9 SVC layering from codec settings which might be initialized
|
||||
// though field trial in ConfigureVideoEncoderSettings.
|
||||
webrtc::VideoCodecVP9 vp9_settings;
|
||||
encoder_config.encoder_specific_settings->FillVideoCodecVp9(&vp9_settings);
|
||||
layer.num_temporal_layers = vp9_settings.numberOfTemporalLayers;
|
||||
|
||||
// Number of spatial layers is signalled differently from different call
|
||||
// sites (sigh), pick the max as we are interested in the upper bound.
|
||||
int num_spatial_layers =
|
||||
std::max({encoder_config.simulcast_layers.size(),
|
||||
encoder_config.spatial_layers.size(),
|
||||
size_t{vp9_settings.numberOfSpatialLayers}});
|
||||
|
||||
if (width * height > 0 &&
|
||||
(layer.num_temporal_layers > 1u || num_spatial_layers > 1)) {
|
||||
// In SVC mode, the VP9 max bitrate is determined by SvcConfig, instead of
|
||||
// GetMaxDefaultVideoBitrateKbps().
|
||||
std::vector<webrtc::SpatialLayer> svc_layers =
|
||||
webrtc::GetSvcConfig(width, height, max_framerate,
|
||||
/*first_active_layer=*/0, num_spatial_layers,
|
||||
*layer.num_temporal_layers, is_screenshare_);
|
||||
int sum_max_bitrates_kbps = 0;
|
||||
for (const webrtc::SpatialLayer& spatial_layer : svc_layers) {
|
||||
sum_max_bitrates_kbps += spatial_layer.maxBitrate;
|
||||
}
|
||||
RTC_DCHECK_GE(sum_max_bitrates_kbps, 0);
|
||||
if (!api_max_bitrate_bps.has_value()) {
|
||||
max_bitrate_bps = sum_max_bitrates_kbps * 1000;
|
||||
} else {
|
||||
max_bitrate_bps =
|
||||
std::min(max_bitrate_bps, sum_max_bitrates_kbps * 1000);
|
||||
}
|
||||
max_bitrate_bps = std::max(min_bitrate_bps, max_bitrate_bps);
|
||||
}
|
||||
}
|
||||
|
||||
// In the case that the application sets a max bitrate that's lower than the
|
||||
// min bitrate, we adjust it down (see bugs.webrtc.org/9141).
|
||||
layer.min_bitrate_bps = std::min(min_bitrate_bps, max_bitrate_bps);
|
||||
if (encoder_config.simulcast_layers[0].target_bitrate_bps <= 0) {
|
||||
layer.target_bitrate_bps = max_bitrate_bps;
|
||||
} else {
|
||||
layer.target_bitrate_bps = std::min(
|
||||
encoder_config.simulcast_layers[0].target_bitrate_bps, max_bitrate_bps);
|
||||
}
|
||||
layer.max_bitrate_bps = max_bitrate_bps;
|
||||
layer.max_qp = max_qp_;
|
||||
layer.bitrate_priority = encoder_config.bitrate_priority;
|
||||
|
||||
if (IsTemporalLayersSupported(codec_name_)) {
|
||||
// Use configured number of temporal layers if set.
|
||||
if (encoder_config.simulcast_layers[0].num_temporal_layers) {
|
||||
layer.num_temporal_layers =
|
||||
*encoder_config.simulcast_layers[0].num_temporal_layers;
|
||||
}
|
||||
}
|
||||
layer.scalability_mode = encoder_config.simulcast_layers[0].scalability_mode;
|
||||
layers.push_back(layer);
|
||||
return layers;
|
||||
}
|
||||
|
||||
std::vector<webrtc::VideoStream>
|
||||
EncoderStreamFactory::CreateSimulcastOrConferenceModeScreenshareStreams(
|
||||
int width,
|
||||
int height,
|
||||
const webrtc::VideoEncoderConfig& encoder_config,
|
||||
const absl::optional<webrtc::DataRate>& experimental_min_bitrate) const {
|
||||
std::vector<webrtc::VideoStream> layers;
|
||||
|
||||
const bool temporal_layers_supported = IsTemporalLayersSupported(codec_name_);
|
||||
// Use legacy simulcast screenshare if conference mode is explicitly enabled
|
||||
// or use the regular simulcast configuration path which is generic.
|
||||
layers = GetSimulcastConfig(FindRequiredActiveLayers(encoder_config),
|
||||
encoder_config.number_of_streams, width, height,
|
||||
encoder_config.bitrate_priority, max_qp_,
|
||||
is_screenshare_ && conference_mode_,
|
||||
temporal_layers_supported, trials_);
|
||||
// Allow an experiment to override the minimum bitrate for the lowest
|
||||
// spatial layer. The experiment's configuration has the lowest priority.
|
||||
if (experimental_min_bitrate) {
|
||||
layers[0].min_bitrate_bps =
|
||||
rtc::saturated_cast<int>(experimental_min_bitrate->bps());
|
||||
}
|
||||
// Update the active simulcast layers and configured bitrates.
|
||||
bool is_highest_layer_max_bitrate_configured = false;
|
||||
const bool has_scale_resolution_down_by = absl::c_any_of(
|
||||
encoder_config.simulcast_layers, [](const webrtc::VideoStream& layer) {
|
||||
return layer.scale_resolution_down_by != -1.;
|
||||
});
|
||||
|
||||
bool default_scale_factors_used = true;
|
||||
if (has_scale_resolution_down_by) {
|
||||
default_scale_factors_used = IsScaleFactorsPowerOfTwo(encoder_config);
|
||||
}
|
||||
const bool norm_size_configured =
|
||||
webrtc::NormalizeSimulcastSizeExperiment::GetBase2Exponent().has_value();
|
||||
const int normalized_width =
|
||||
(default_scale_factors_used || norm_size_configured) &&
|
||||
(width >= kMinLayerSize)
|
||||
? NormalizeSimulcastSize(width, encoder_config.number_of_streams)
|
||||
: width;
|
||||
const int normalized_height =
|
||||
(default_scale_factors_used || norm_size_configured) &&
|
||||
(height >= kMinLayerSize)
|
||||
? NormalizeSimulcastSize(height, encoder_config.number_of_streams)
|
||||
: height;
|
||||
for (size_t i = 0; i < layers.size(); ++i) {
|
||||
layers[i].active = encoder_config.simulcast_layers[i].active;
|
||||
layers[i].scalability_mode =
|
||||
encoder_config.simulcast_layers[i].scalability_mode;
|
||||
layers[i].requested_resolution =
|
||||
encoder_config.simulcast_layers[i].requested_resolution;
|
||||
// Update with configured num temporal layers if supported by codec.
|
||||
if (encoder_config.simulcast_layers[i].num_temporal_layers &&
|
||||
IsTemporalLayersSupported(codec_name_)) {
|
||||
layers[i].num_temporal_layers =
|
||||
*encoder_config.simulcast_layers[i].num_temporal_layers;
|
||||
}
|
||||
if (encoder_config.simulcast_layers[i].max_framerate > 0) {
|
||||
layers[i].max_framerate =
|
||||
encoder_config.simulcast_layers[i].max_framerate;
|
||||
}
|
||||
if (encoder_config.simulcast_layers[i].requested_resolution.has_value()) {
|
||||
auto res = GetLayerResolutionFromRequestedResolution(
|
||||
normalized_width, normalized_height,
|
||||
*encoder_config.simulcast_layers[i].requested_resolution);
|
||||
layers[i].width = res.width;
|
||||
layers[i].height = res.height;
|
||||
} else if (has_scale_resolution_down_by) {
|
||||
const double scale_resolution_down_by = std::max(
|
||||
encoder_config.simulcast_layers[i].scale_resolution_down_by, 1.0);
|
||||
layers[i].width = ScaleDownResolution(
|
||||
normalized_width, scale_resolution_down_by, kMinLayerSize);
|
||||
layers[i].height = ScaleDownResolution(
|
||||
normalized_height, scale_resolution_down_by, kMinLayerSize);
|
||||
}
|
||||
// Update simulcast bitrates with configured min and max bitrate.
|
||||
if (encoder_config.simulcast_layers[i].min_bitrate_bps > 0) {
|
||||
layers[i].min_bitrate_bps =
|
||||
encoder_config.simulcast_layers[i].min_bitrate_bps;
|
||||
}
|
||||
if (encoder_config.simulcast_layers[i].max_bitrate_bps > 0) {
|
||||
layers[i].max_bitrate_bps =
|
||||
encoder_config.simulcast_layers[i].max_bitrate_bps;
|
||||
}
|
||||
if (encoder_config.simulcast_layers[i].target_bitrate_bps > 0) {
|
||||
layers[i].target_bitrate_bps =
|
||||
encoder_config.simulcast_layers[i].target_bitrate_bps;
|
||||
}
|
||||
if (encoder_config.simulcast_layers[i].min_bitrate_bps > 0 &&
|
||||
encoder_config.simulcast_layers[i].max_bitrate_bps > 0) {
|
||||
// Min and max bitrate are configured.
|
||||
// Set target to 3/4 of the max bitrate (or to max if below min).
|
||||
if (encoder_config.simulcast_layers[i].target_bitrate_bps <= 0)
|
||||
layers[i].target_bitrate_bps = layers[i].max_bitrate_bps * 3 / 4;
|
||||
if (layers[i].target_bitrate_bps < layers[i].min_bitrate_bps)
|
||||
layers[i].target_bitrate_bps = layers[i].max_bitrate_bps;
|
||||
} else if (encoder_config.simulcast_layers[i].min_bitrate_bps > 0) {
|
||||
// Only min bitrate is configured, make sure target/max are above min.
|
||||
layers[i].target_bitrate_bps =
|
||||
std::max(layers[i].target_bitrate_bps, layers[i].min_bitrate_bps);
|
||||
layers[i].max_bitrate_bps =
|
||||
std::max(layers[i].max_bitrate_bps, layers[i].min_bitrate_bps);
|
||||
} else if (encoder_config.simulcast_layers[i].max_bitrate_bps > 0) {
|
||||
// Only max bitrate is configured, make sure min/target are below max.
|
||||
// Keep target bitrate if it is set explicitly in encoding config.
|
||||
// Otherwise set target bitrate to 3/4 of the max bitrate
|
||||
// or the one calculated from GetSimulcastConfig() which is larger.
|
||||
layers[i].min_bitrate_bps =
|
||||
std::min(layers[i].min_bitrate_bps, layers[i].max_bitrate_bps);
|
||||
if (encoder_config.simulcast_layers[i].target_bitrate_bps <= 0) {
|
||||
layers[i].target_bitrate_bps = std::max(
|
||||
layers[i].target_bitrate_bps, layers[i].max_bitrate_bps * 3 / 4);
|
||||
}
|
||||
layers[i].target_bitrate_bps = std::max(
|
||||
std::min(layers[i].target_bitrate_bps, layers[i].max_bitrate_bps),
|
||||
layers[i].min_bitrate_bps);
|
||||
}
|
||||
if (i == layers.size() - 1) {
|
||||
is_highest_layer_max_bitrate_configured =
|
||||
encoder_config.simulcast_layers[i].max_bitrate_bps > 0;
|
||||
}
|
||||
}
|
||||
if (!is_screenshare_ && !is_highest_layer_max_bitrate_configured &&
|
||||
encoder_config.max_bitrate_bps > 0) {
|
||||
// No application-configured maximum for the largest layer.
|
||||
// If there is bitrate leftover, give it to the largest layer.
|
||||
BoostMaxSimulcastLayer(
|
||||
webrtc::DataRate::BitsPerSec(encoder_config.max_bitrate_bps), &layers);
|
||||
}
|
||||
|
||||
// Sort the layers by max_bitrate_bps, they might not always be from
|
||||
// smallest to biggest
|
||||
std::vector<size_t> index(layers.size());
|
||||
std::iota(index.begin(), index.end(), 0);
|
||||
std::stable_sort(index.begin(), index.end(), [&layers](size_t a, size_t b) {
|
||||
return layers[a].max_bitrate_bps < layers[b].max_bitrate_bps;
|
||||
});
|
||||
|
||||
if (!layers[index[0]].active) {
|
||||
// Adjust min bitrate of the first active layer to allow it to go as low as
|
||||
// the lowest (now inactive) layer could.
|
||||
// Otherwise, if e.g. a single HD stream is active, it would have 600kbps
|
||||
// min bitrate, which would always be allocated to the stream.
|
||||
// This would lead to congested network, dropped frames and overall bad
|
||||
// experience.
|
||||
|
||||
const int min_configured_bitrate = layers[index[0]].min_bitrate_bps;
|
||||
for (size_t i = 0; i < layers.size(); ++i) {
|
||||
if (layers[index[i]].active) {
|
||||
layers[index[i]].min_bitrate_bps = min_configured_bitrate;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return layers;
|
||||
}
|
||||
|
||||
webrtc::Resolution
|
||||
EncoderStreamFactory::GetLayerResolutionFromRequestedResolution(
|
||||
int frame_width,
|
||||
int frame_height,
|
||||
webrtc::Resolution requested_resolution) const {
|
||||
VideoAdapter adapter(encoder_info_requested_resolution_alignment_);
|
||||
adapter.OnOutputFormatRequest(requested_resolution.ToPair(),
|
||||
requested_resolution.PixelCount(),
|
||||
absl::nullopt);
|
||||
if (restrictions_) {
|
||||
rtc::VideoSinkWants wants;
|
||||
wants.is_active = true;
|
||||
wants.target_pixel_count = restrictions_->target_pixels_per_frame();
|
||||
wants.max_pixel_count =
|
||||
rtc::dchecked_cast<int>(restrictions_->max_pixels_per_frame().value_or(
|
||||
std::numeric_limits<int>::max()));
|
||||
wants.aggregates.emplace(rtc::VideoSinkWants::Aggregates());
|
||||
wants.resolution_alignment = encoder_info_requested_resolution_alignment_;
|
||||
adapter.OnSinkWants(wants);
|
||||
}
|
||||
int cropped_width, cropped_height;
|
||||
int out_width = 0, out_height = 0;
|
||||
if (!adapter.AdaptFrameResolution(frame_width, frame_height, 0,
|
||||
&cropped_width, &cropped_height, &out_width,
|
||||
&out_height)) {
|
||||
RTC_LOG(LS_ERROR) << "AdaptFrameResolution returned false!";
|
||||
}
|
||||
return {.width = out_width, .height = out_height};
|
||||
}
|
||||
|
||||
} // namespace cricket
|
||||
|
|
@ -0,0 +1,80 @@
|
|||
/*
|
||||
* Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
#ifndef VIDEO_CONFIG_ENCODER_STREAM_FACTORY_H_
|
||||
#define VIDEO_CONFIG_ENCODER_STREAM_FACTORY_H_
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "api/transport/field_trial_based_config.h"
|
||||
#include "api/units/data_rate.h"
|
||||
#include "api/video_codecs/video_encoder.h"
|
||||
#include "call/adaptation/video_source_restrictions.h"
|
||||
#include "video/config/video_encoder_config.h"
|
||||
|
||||
namespace cricket {
|
||||
|
||||
class EncoderStreamFactory
|
||||
: public webrtc::VideoEncoderConfig::VideoStreamFactoryInterface {
|
||||
public:
|
||||
// Note: this constructor is used by testcase in downstream.
|
||||
EncoderStreamFactory(std::string codec_name,
|
||||
int max_qp,
|
||||
bool is_screenshare,
|
||||
bool conference_mode);
|
||||
|
||||
EncoderStreamFactory(std::string codec_name,
|
||||
int max_qp,
|
||||
bool is_screenshare,
|
||||
bool conference_mode,
|
||||
const webrtc::VideoEncoder::EncoderInfo& encoder_info,
|
||||
absl::optional<webrtc::VideoSourceRestrictions>
|
||||
restrictions = absl::nullopt,
|
||||
const webrtc::FieldTrialsView* trials = nullptr);
|
||||
|
||||
std::vector<webrtc::VideoStream> CreateEncoderStreams(
|
||||
int width,
|
||||
int height,
|
||||
const webrtc::VideoEncoderConfig& encoder_config) override;
|
||||
|
||||
private:
|
||||
std::vector<webrtc::VideoStream> CreateDefaultVideoStreams(
|
||||
int width,
|
||||
int height,
|
||||
const webrtc::VideoEncoderConfig& encoder_config,
|
||||
const absl::optional<webrtc::DataRate>& experimental_min_bitrate) const;
|
||||
|
||||
std::vector<webrtc::VideoStream>
|
||||
CreateSimulcastOrConferenceModeScreenshareStreams(
|
||||
int width,
|
||||
int height,
|
||||
const webrtc::VideoEncoderConfig& encoder_config,
|
||||
const absl::optional<webrtc::DataRate>& experimental_min_bitrate) const;
|
||||
|
||||
webrtc::Resolution GetLayerResolutionFromRequestedResolution(
|
||||
int in_frame_width,
|
||||
int in_frame_height,
|
||||
webrtc::Resolution requested_resolution) const;
|
||||
|
||||
const std::string codec_name_;
|
||||
const int max_qp_;
|
||||
const bool is_screenshare_;
|
||||
// Allows a screenshare specific configuration, which enables temporal
|
||||
// layering and various settings.
|
||||
const bool conference_mode_;
|
||||
const webrtc::FieldTrialBasedConfig fallback_trials_;
|
||||
const webrtc::FieldTrialsView& trials_;
|
||||
const int encoder_info_requested_resolution_alignment_;
|
||||
const absl::optional<webrtc::VideoSourceRestrictions> restrictions_;
|
||||
};
|
||||
|
||||
} // namespace cricket
|
||||
|
||||
#endif // VIDEO_CONFIG_ENCODER_STREAM_FACTORY_H_
|
||||
495
TMessagesProj/jni/voip/webrtc/video/config/simulcast.cc
Normal file
495
TMessagesProj/jni/voip/webrtc/video/config/simulcast.cc
Normal file
|
|
@ -0,0 +1,495 @@
|
|||
/*
|
||||
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "video/config/simulcast.h"
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdio.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/strings/match.h"
|
||||
#include "absl/types/optional.h"
|
||||
#include "api/video/video_codec_constants.h"
|
||||
#include "media/base/media_constants.h"
|
||||
#include "modules/video_coding/utility/simulcast_rate_allocator.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/experiments/field_trial_parser.h"
|
||||
#include "rtc_base/experiments/min_video_bitrate_experiment.h"
|
||||
#include "rtc_base/experiments/normalize_simulcast_size_experiment.h"
|
||||
#include "rtc_base/experiments/rate_control_settings.h"
|
||||
#include "rtc_base/logging.h"
|
||||
|
||||
namespace cricket {
|
||||
|
||||
namespace {
|
||||
|
||||
constexpr char kUseLegacySimulcastLayerLimitFieldTrial[] =
|
||||
"WebRTC-LegacySimulcastLayerLimit";
|
||||
|
||||
constexpr double kDefaultMaxRoundupRate = 0.1;
|
||||
|
||||
// Limits for legacy conference screensharing mode. Currently used for the
|
||||
// lower of the two simulcast streams.
|
||||
constexpr webrtc::DataRate kScreenshareDefaultTl0Bitrate =
|
||||
webrtc::DataRate::KilobitsPerSec(200);
|
||||
constexpr webrtc::DataRate kScreenshareDefaultTl1Bitrate =
|
||||
webrtc::DataRate::KilobitsPerSec(1000);
|
||||
|
||||
// Min/max bitrate for the higher one of the two simulcast stream used for
|
||||
// screen content.
|
||||
constexpr webrtc::DataRate kScreenshareHighStreamMinBitrate =
|
||||
webrtc::DataRate::KilobitsPerSec(600);
|
||||
constexpr webrtc::DataRate kScreenshareHighStreamMaxBitrate =
|
||||
webrtc::DataRate::KilobitsPerSec(1250);
|
||||
|
||||
constexpr int kDefaultNumTemporalLayers = 3;
|
||||
constexpr int kScreenshareMaxSimulcastLayers = 2;
|
||||
constexpr int kScreenshareTemporalLayers = 2;
|
||||
|
||||
struct SimulcastFormat {
|
||||
int width;
|
||||
int height;
|
||||
// The maximum number of simulcast layers can be used for
|
||||
// resolutions at `widthxheight` for legacy applications.
|
||||
size_t max_layers;
|
||||
// The maximum bitrate for encoding stream at `widthxheight`, when we are
|
||||
// not sending the next higher spatial stream.
|
||||
webrtc::DataRate max_bitrate;
|
||||
// The target bitrate for encoding stream at `widthxheight`, when this layer
|
||||
// is not the highest layer (i.e., when we are sending another higher spatial
|
||||
// stream).
|
||||
webrtc::DataRate target_bitrate;
|
||||
// The minimum bitrate needed for encoding stream at `widthxheight`.
|
||||
webrtc::DataRate min_bitrate;
|
||||
};
|
||||
|
||||
// These tables describe from which resolution we can use how many
|
||||
// simulcast layers at what bitrates (maximum, target, and minimum).
|
||||
// Important!! Keep this table from high resolution to low resolution.
|
||||
constexpr const SimulcastFormat kSimulcastFormats[] = {
|
||||
{1920, 1080, 3, webrtc::DataRate::KilobitsPerSec(5000),
|
||||
webrtc::DataRate::KilobitsPerSec(4000),
|
||||
webrtc::DataRate::KilobitsPerSec(800)},
|
||||
{1280, 720, 3, webrtc::DataRate::KilobitsPerSec(2500),
|
||||
webrtc::DataRate::KilobitsPerSec(2500),
|
||||
webrtc::DataRate::KilobitsPerSec(600)},
|
||||
{960, 540, 3, webrtc::DataRate::KilobitsPerSec(1200),
|
||||
webrtc::DataRate::KilobitsPerSec(1200),
|
||||
webrtc::DataRate::KilobitsPerSec(350)},
|
||||
{640, 360, 2, webrtc::DataRate::KilobitsPerSec(700),
|
||||
webrtc::DataRate::KilobitsPerSec(500),
|
||||
webrtc::DataRate::KilobitsPerSec(150)},
|
||||
{480, 270, 2, webrtc::DataRate::KilobitsPerSec(450),
|
||||
webrtc::DataRate::KilobitsPerSec(350),
|
||||
webrtc::DataRate::KilobitsPerSec(150)},
|
||||
{320, 180, 1, webrtc::DataRate::KilobitsPerSec(200),
|
||||
webrtc::DataRate::KilobitsPerSec(150),
|
||||
webrtc::DataRate::KilobitsPerSec(30)},
|
||||
// As the resolution goes down, interpolate the target and max bitrates down
|
||||
// towards zero. The min bitrate is still limited at 30 kbps and the target
|
||||
// and the max will be capped from below accordingly.
|
||||
{0, 0, 1, webrtc::DataRate::KilobitsPerSec(0),
|
||||
webrtc::DataRate::KilobitsPerSec(0),
|
||||
webrtc::DataRate::KilobitsPerSec(30)}};
|
||||
|
||||
constexpr webrtc::DataRate Interpolate(const webrtc::DataRate& a,
|
||||
const webrtc::DataRate& b,
|
||||
float rate) {
|
||||
return a * (1.0 - rate) + b * rate;
|
||||
}
|
||||
|
||||
// TODO(webrtc:12415): Flip this to a kill switch when this feature launches.
|
||||
bool EnableLowresBitrateInterpolation(const webrtc::FieldTrialsView& trials) {
|
||||
return absl::StartsWith(
|
||||
trials.Lookup("WebRTC-LowresSimulcastBitrateInterpolation"), "Enabled");
|
||||
}
|
||||
|
||||
std::vector<SimulcastFormat> GetSimulcastFormats(
|
||||
bool enable_lowres_bitrate_interpolation) {
|
||||
std::vector<SimulcastFormat> formats;
|
||||
formats.insert(formats.begin(), std::begin(kSimulcastFormats),
|
||||
std::end(kSimulcastFormats));
|
||||
if (!enable_lowres_bitrate_interpolation) {
|
||||
RTC_CHECK_GE(formats.size(), 2u);
|
||||
SimulcastFormat& format0x0 = formats[formats.size() - 1];
|
||||
const SimulcastFormat& format_prev = formats[formats.size() - 2];
|
||||
format0x0.max_bitrate = format_prev.max_bitrate;
|
||||
format0x0.target_bitrate = format_prev.target_bitrate;
|
||||
format0x0.min_bitrate = format_prev.min_bitrate;
|
||||
}
|
||||
return formats;
|
||||
}
|
||||
|
||||
// Multiway: Number of temporal layers for each simulcast stream.
|
||||
int DefaultNumberOfTemporalLayers(const webrtc::FieldTrialsView& trials) {
|
||||
const std::string group_name =
|
||||
trials.Lookup("WebRTC-VP8ConferenceTemporalLayers");
|
||||
if (group_name.empty())
|
||||
return kDefaultNumTemporalLayers;
|
||||
|
||||
int num_temporal_layers = kDefaultNumTemporalLayers;
|
||||
if (sscanf(group_name.c_str(), "%d", &num_temporal_layers) == 1 &&
|
||||
num_temporal_layers > 0 &&
|
||||
num_temporal_layers <= webrtc::kMaxTemporalStreams) {
|
||||
return num_temporal_layers;
|
||||
}
|
||||
|
||||
RTC_LOG(LS_WARNING) << "Attempt to set number of temporal layers to "
|
||||
"incorrect value: "
|
||||
<< group_name;
|
||||
|
||||
return kDefaultNumTemporalLayers;
|
||||
}
|
||||
|
||||
int FindSimulcastFormatIndex(int width,
|
||||
int height,
|
||||
bool enable_lowres_bitrate_interpolation) {
|
||||
RTC_DCHECK_GE(width, 0);
|
||||
RTC_DCHECK_GE(height, 0);
|
||||
const auto formats = GetSimulcastFormats(enable_lowres_bitrate_interpolation);
|
||||
for (uint32_t i = 0; i < formats.size(); ++i) {
|
||||
if (width * height >= formats[i].width * formats[i].height) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
RTC_DCHECK_NOTREACHED();
|
||||
return -1;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
// Round size to nearest simulcast-friendly size.
|
||||
// Simulcast stream width and height must both be dividable by
|
||||
// |2 ^ (simulcast_layers - 1)|.
|
||||
int NormalizeSimulcastSize(int size, size_t simulcast_layers) {
|
||||
int base2_exponent = static_cast<int>(simulcast_layers) - 1;
|
||||
const absl::optional<int> experimental_base2_exponent =
|
||||
webrtc::NormalizeSimulcastSizeExperiment::GetBase2Exponent();
|
||||
if (experimental_base2_exponent &&
|
||||
(size > (1 << *experimental_base2_exponent))) {
|
||||
base2_exponent = *experimental_base2_exponent;
|
||||
}
|
||||
return ((size >> base2_exponent) << base2_exponent);
|
||||
}
|
||||
|
||||
SimulcastFormat InterpolateSimulcastFormat(
|
||||
int width,
|
||||
int height,
|
||||
absl::optional<double> max_roundup_rate,
|
||||
bool enable_lowres_bitrate_interpolation) {
|
||||
const auto formats = GetSimulcastFormats(enable_lowres_bitrate_interpolation);
|
||||
const int index = FindSimulcastFormatIndex(
|
||||
width, height, enable_lowres_bitrate_interpolation);
|
||||
if (index == 0)
|
||||
return formats[index];
|
||||
const int total_pixels_up =
|
||||
formats[index - 1].width * formats[index - 1].height;
|
||||
const int total_pixels_down = formats[index].width * formats[index].height;
|
||||
const int total_pixels = width * height;
|
||||
const float rate = (total_pixels_up - total_pixels) /
|
||||
static_cast<float>(total_pixels_up - total_pixels_down);
|
||||
|
||||
// Use upper resolution if `rate` is below the configured threshold.
|
||||
size_t max_layers = (rate < max_roundup_rate.value_or(kDefaultMaxRoundupRate))
|
||||
? formats[index - 1].max_layers
|
||||
: formats[index].max_layers;
|
||||
webrtc::DataRate max_bitrate = Interpolate(formats[index - 1].max_bitrate,
|
||||
formats[index].max_bitrate, rate);
|
||||
webrtc::DataRate target_bitrate = Interpolate(
|
||||
formats[index - 1].target_bitrate, formats[index].target_bitrate, rate);
|
||||
webrtc::DataRate min_bitrate = Interpolate(formats[index - 1].min_bitrate,
|
||||
formats[index].min_bitrate, rate);
|
||||
|
||||
return {width, height, max_layers, max_bitrate, target_bitrate, min_bitrate};
|
||||
}
|
||||
|
||||
SimulcastFormat InterpolateSimulcastFormat(
|
||||
int width,
|
||||
int height,
|
||||
bool enable_lowres_bitrate_interpolation) {
|
||||
return InterpolateSimulcastFormat(width, height, absl::nullopt,
|
||||
enable_lowres_bitrate_interpolation);
|
||||
}
|
||||
|
||||
webrtc::DataRate FindSimulcastMaxBitrate(
|
||||
int width,
|
||||
int height,
|
||||
bool enable_lowres_bitrate_interpolation) {
|
||||
return InterpolateSimulcastFormat(width, height,
|
||||
enable_lowres_bitrate_interpolation)
|
||||
.max_bitrate;
|
||||
}
|
||||
|
||||
webrtc::DataRate FindSimulcastTargetBitrate(
|
||||
int width,
|
||||
int height,
|
||||
bool enable_lowres_bitrate_interpolation) {
|
||||
return InterpolateSimulcastFormat(width, height,
|
||||
enable_lowres_bitrate_interpolation)
|
||||
.target_bitrate;
|
||||
}
|
||||
|
||||
webrtc::DataRate FindSimulcastMinBitrate(
|
||||
int width,
|
||||
int height,
|
||||
bool enable_lowres_bitrate_interpolation) {
|
||||
return InterpolateSimulcastFormat(width, height,
|
||||
enable_lowres_bitrate_interpolation)
|
||||
.min_bitrate;
|
||||
}
|
||||
|
||||
void BoostMaxSimulcastLayer(webrtc::DataRate max_bitrate,
|
||||
std::vector<webrtc::VideoStream>* layers) {
|
||||
if (layers->empty())
|
||||
return;
|
||||
|
||||
const webrtc::DataRate total_bitrate = GetTotalMaxBitrate(*layers);
|
||||
|
||||
// We're still not using all available bits.
|
||||
if (total_bitrate < max_bitrate) {
|
||||
// Spend additional bits to boost the max layer.
|
||||
const webrtc::DataRate bitrate_left = max_bitrate - total_bitrate;
|
||||
layers->back().max_bitrate_bps += bitrate_left.bps();
|
||||
}
|
||||
}
|
||||
|
||||
webrtc::DataRate GetTotalMaxBitrate(
|
||||
const std::vector<webrtc::VideoStream>& layers) {
|
||||
if (layers.empty())
|
||||
return webrtc::DataRate::Zero();
|
||||
|
||||
int total_max_bitrate_bps = 0;
|
||||
for (size_t s = 0; s < layers.size() - 1; ++s) {
|
||||
total_max_bitrate_bps += layers[s].target_bitrate_bps;
|
||||
}
|
||||
total_max_bitrate_bps += layers.back().max_bitrate_bps;
|
||||
return webrtc::DataRate::BitsPerSec(total_max_bitrate_bps);
|
||||
}
|
||||
|
||||
size_t LimitSimulcastLayerCount(int width,
|
||||
int height,
|
||||
size_t need_layers,
|
||||
size_t layer_count,
|
||||
const webrtc::FieldTrialsView& trials) {
|
||||
if (!absl::StartsWith(trials.Lookup(kUseLegacySimulcastLayerLimitFieldTrial),
|
||||
"Disabled")) {
|
||||
// Max layers from one higher resolution in kSimulcastFormats will be used
|
||||
// if the ratio (pixels_up - pixels) / (pixels_up - pixels_down) is less
|
||||
// than configured `max_ratio`. pixels_down is the selected index in
|
||||
// kSimulcastFormats based on pixels.
|
||||
webrtc::FieldTrialOptional<double> max_ratio("max_ratio");
|
||||
webrtc::ParseFieldTrial({&max_ratio},
|
||||
trials.Lookup("WebRTC-SimulcastLayerLimitRoundUp"));
|
||||
|
||||
const bool enable_lowres_bitrate_interpolation =
|
||||
EnableLowresBitrateInterpolation(trials);
|
||||
size_t adaptive_layer_count = std::max(
|
||||
need_layers,
|
||||
InterpolateSimulcastFormat(width, height, max_ratio.GetOptional(),
|
||||
enable_lowres_bitrate_interpolation)
|
||||
.max_layers);
|
||||
if (layer_count > adaptive_layer_count) {
|
||||
RTC_LOG(LS_WARNING) << "Reducing simulcast layer count from "
|
||||
<< layer_count << " to " << adaptive_layer_count;
|
||||
layer_count = adaptive_layer_count;
|
||||
}
|
||||
}
|
||||
return layer_count;
|
||||
}
|
||||
|
||||
std::vector<webrtc::VideoStream> GetSimulcastConfig(
|
||||
size_t min_layers,
|
||||
size_t max_layers,
|
||||
int width,
|
||||
int height,
|
||||
double bitrate_priority,
|
||||
int max_qp,
|
||||
bool is_screenshare_with_conference_mode,
|
||||
bool temporal_layers_supported,
|
||||
const webrtc::FieldTrialsView& trials) {
|
||||
RTC_DCHECK_LE(min_layers, max_layers);
|
||||
RTC_DCHECK(max_layers > 1 || is_screenshare_with_conference_mode);
|
||||
|
||||
const bool base_heavy_tl3_rate_alloc =
|
||||
webrtc::RateControlSettings::ParseFromKeyValueConfig(&trials)
|
||||
.Vp8BaseHeavyTl3RateAllocation();
|
||||
if (is_screenshare_with_conference_mode) {
|
||||
return GetScreenshareLayers(max_layers, width, height, bitrate_priority,
|
||||
max_qp, temporal_layers_supported,
|
||||
base_heavy_tl3_rate_alloc, trials);
|
||||
} else {
|
||||
// Some applications rely on the old behavior limiting the simulcast layer
|
||||
// count based on the resolution automatically, which they can get through
|
||||
// the WebRTC-LegacySimulcastLayerLimit field trial until they update.
|
||||
max_layers =
|
||||
LimitSimulcastLayerCount(width, height, min_layers, max_layers, trials);
|
||||
|
||||
return GetNormalSimulcastLayers(max_layers, width, height, bitrate_priority,
|
||||
max_qp, temporal_layers_supported,
|
||||
base_heavy_tl3_rate_alloc, trials);
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<webrtc::VideoStream> GetNormalSimulcastLayers(
|
||||
size_t layer_count,
|
||||
int width,
|
||||
int height,
|
||||
double bitrate_priority,
|
||||
int max_qp,
|
||||
bool temporal_layers_supported,
|
||||
bool base_heavy_tl3_rate_alloc,
|
||||
const webrtc::FieldTrialsView& trials) {
|
||||
std::vector<webrtc::VideoStream> layers(layer_count);
|
||||
const bool enable_lowres_bitrate_interpolation =
|
||||
EnableLowresBitrateInterpolation(trials);
|
||||
const int num_temporal_layers = DefaultNumberOfTemporalLayers(trials);
|
||||
// Format width and height has to be divisible by |2 ^ num_simulcast_layers -
|
||||
// 1|.
|
||||
width = NormalizeSimulcastSize(width, layer_count);
|
||||
height = NormalizeSimulcastSize(height, layer_count);
|
||||
// Add simulcast streams, from highest resolution (`s` = num_simulcast_layers
|
||||
// -1) to lowest resolution at `s` = 0.
|
||||
for (size_t s = layer_count - 1;; --s) {
|
||||
layers[s].width = width;
|
||||
layers[s].height = height;
|
||||
// TODO(pbos): Fill actual temporal-layer bitrate thresholds.
|
||||
layers[s].max_qp = max_qp;
|
||||
layers[s].num_temporal_layers =
|
||||
temporal_layers_supported ? num_temporal_layers : 1;
|
||||
layers[s].max_bitrate_bps =
|
||||
FindSimulcastMaxBitrate(width, height,
|
||||
enable_lowres_bitrate_interpolation)
|
||||
.bps();
|
||||
layers[s].target_bitrate_bps =
|
||||
FindSimulcastTargetBitrate(width, height,
|
||||
enable_lowres_bitrate_interpolation)
|
||||
.bps();
|
||||
if (s == 0) {
|
||||
// If alternative temporal rate allocation is selected, adjust the
|
||||
// bitrate of the lowest simulcast stream so that absolute bitrate for
|
||||
// the base temporal layer matches the bitrate for the base temporal
|
||||
// layer with the default 3 simulcast streams. Otherwise we risk a
|
||||
// higher threshold for receiving a feed at all.
|
||||
float rate_factor = 1.0;
|
||||
if (num_temporal_layers == 3) {
|
||||
if (base_heavy_tl3_rate_alloc) {
|
||||
// Base heavy allocation increases TL0 bitrate from 40% to 60%.
|
||||
rate_factor = 0.4 / 0.6;
|
||||
}
|
||||
} else {
|
||||
rate_factor =
|
||||
webrtc::SimulcastRateAllocator::GetTemporalRateAllocation(
|
||||
3, 0, /*base_heavy_tl3_rate_alloc=*/false) /
|
||||
webrtc::SimulcastRateAllocator::GetTemporalRateAllocation(
|
||||
num_temporal_layers, 0, /*base_heavy_tl3_rate_alloc=*/false);
|
||||
}
|
||||
|
||||
layers[s].max_bitrate_bps =
|
||||
static_cast<int>(layers[s].max_bitrate_bps * rate_factor);
|
||||
layers[s].target_bitrate_bps =
|
||||
static_cast<int>(layers[s].target_bitrate_bps * rate_factor);
|
||||
}
|
||||
layers[s].min_bitrate_bps =
|
||||
FindSimulcastMinBitrate(width, height,
|
||||
enable_lowres_bitrate_interpolation)
|
||||
.bps();
|
||||
|
||||
// Ensure consistency.
|
||||
layers[s].max_bitrate_bps =
|
||||
std::max(layers[s].min_bitrate_bps, layers[s].max_bitrate_bps);
|
||||
layers[s].target_bitrate_bps =
|
||||
std::max(layers[s].min_bitrate_bps, layers[s].target_bitrate_bps);
|
||||
|
||||
layers[s].max_framerate = kDefaultVideoMaxFramerate;
|
||||
|
||||
width /= 2;
|
||||
height /= 2;
|
||||
|
||||
if (s == 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
// Currently the relative bitrate priority of the sender is controlled by
|
||||
// the value of the lowest VideoStream.
|
||||
// TODO(bugs.webrtc.org/8630): The web specification describes being able to
|
||||
// control relative bitrate for each individual simulcast layer, but this
|
||||
// is currently just implemented per rtp sender.
|
||||
layers[0].bitrate_priority = bitrate_priority;
|
||||
return layers;
|
||||
}
|
||||
|
||||
std::vector<webrtc::VideoStream> GetScreenshareLayers(
|
||||
size_t max_layers,
|
||||
int width,
|
||||
int height,
|
||||
double bitrate_priority,
|
||||
int max_qp,
|
||||
bool temporal_layers_supported,
|
||||
bool base_heavy_tl3_rate_alloc,
|
||||
const webrtc::FieldTrialsView& trials) {
|
||||
size_t num_simulcast_layers =
|
||||
std::min<int>(max_layers, kScreenshareMaxSimulcastLayers);
|
||||
|
||||
std::vector<webrtc::VideoStream> layers(num_simulcast_layers);
|
||||
// For legacy screenshare in conference mode, tl0 and tl1 bitrates are
|
||||
// piggybacked on the VideoCodec struct as target and max bitrates,
|
||||
// respectively. See eg. webrtc::LibvpxVp8Encoder::SetRates().
|
||||
layers[0].width = width;
|
||||
layers[0].height = height;
|
||||
layers[0].max_qp = max_qp;
|
||||
layers[0].max_framerate = 5;
|
||||
layers[0].min_bitrate_bps = webrtc::kDefaultMinVideoBitrateBps;
|
||||
layers[0].target_bitrate_bps = kScreenshareDefaultTl0Bitrate.bps();
|
||||
layers[0].max_bitrate_bps = kScreenshareDefaultTl1Bitrate.bps();
|
||||
layers[0].num_temporal_layers = temporal_layers_supported ? 2 : 1;
|
||||
|
||||
// With simulcast enabled, add another spatial layer. This one will have a
|
||||
// more normal layout, with the regular 3 temporal layer pattern and no fps
|
||||
// restrictions. The base simulcast layer will still use legacy setup.
|
||||
if (num_simulcast_layers == kScreenshareMaxSimulcastLayers) {
|
||||
// Add optional upper simulcast layer.
|
||||
int max_bitrate_bps;
|
||||
bool using_boosted_bitrate = false;
|
||||
if (!temporal_layers_supported) {
|
||||
// Set the max bitrate to where the base layer would have been if temporal
|
||||
// layers were enabled.
|
||||
max_bitrate_bps = static_cast<int>(
|
||||
kScreenshareHighStreamMaxBitrate.bps() *
|
||||
webrtc::SimulcastRateAllocator::GetTemporalRateAllocation(
|
||||
kScreenshareTemporalLayers, 0, base_heavy_tl3_rate_alloc));
|
||||
} else {
|
||||
// Experimental temporal layer mode used, use increased max bitrate.
|
||||
max_bitrate_bps = kScreenshareHighStreamMaxBitrate.bps();
|
||||
using_boosted_bitrate = true;
|
||||
}
|
||||
|
||||
layers[1].width = width;
|
||||
layers[1].height = height;
|
||||
layers[1].max_qp = max_qp;
|
||||
layers[1].max_framerate = kDefaultVideoMaxFramerate;
|
||||
layers[1].num_temporal_layers =
|
||||
temporal_layers_supported ? kScreenshareTemporalLayers : 1;
|
||||
layers[1].min_bitrate_bps = using_boosted_bitrate
|
||||
? kScreenshareHighStreamMinBitrate.bps()
|
||||
: layers[0].target_bitrate_bps * 2;
|
||||
layers[1].target_bitrate_bps = max_bitrate_bps;
|
||||
layers[1].max_bitrate_bps = max_bitrate_bps;
|
||||
}
|
||||
|
||||
// The bitrate priority currently implemented on a per-sender level, so we
|
||||
// just set it for the first simulcast layer.
|
||||
layers[0].bitrate_priority = bitrate_priority;
|
||||
return layers;
|
||||
}
|
||||
|
||||
} // namespace cricket
|
||||
72
TMessagesProj/jni/voip/webrtc/video/config/simulcast.h
Normal file
72
TMessagesProj/jni/voip/webrtc/video/config/simulcast.h
Normal file
|
|
@ -0,0 +1,72 @@
|
|||
/*
|
||||
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef VIDEO_CONFIG_SIMULCAST_H_
|
||||
#define VIDEO_CONFIG_SIMULCAST_H_
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "api/field_trials_view.h"
|
||||
#include "api/units/data_rate.h"
|
||||
#include "video/config/video_encoder_config.h"
|
||||
|
||||
namespace cricket {
|
||||
|
||||
// Gets the total maximum bitrate for the `streams`.
|
||||
webrtc::DataRate GetTotalMaxBitrate(
|
||||
const std::vector<webrtc::VideoStream>& streams);
|
||||
|
||||
// Adds any bitrate of `max_bitrate` that is above the total maximum bitrate for
|
||||
// the `layers` to the highest quality layer.
|
||||
void BoostMaxSimulcastLayer(webrtc::DataRate max_bitrate,
|
||||
std::vector<webrtc::VideoStream>* layers);
|
||||
|
||||
// Round size to nearest simulcast-friendly size
|
||||
int NormalizeSimulcastSize(int size, size_t simulcast_layers);
|
||||
|
||||
// Gets simulcast settings.
|
||||
std::vector<webrtc::VideoStream> GetSimulcastConfig(
|
||||
size_t min_layers,
|
||||
size_t max_layers,
|
||||
int width,
|
||||
int height,
|
||||
double bitrate_priority,
|
||||
int max_qp,
|
||||
bool is_screenshare_with_conference_mode,
|
||||
bool temporal_layers_supported,
|
||||
const webrtc::FieldTrialsView& trials);
|
||||
|
||||
// Gets the simulcast config layers for a non-screensharing case.
|
||||
std::vector<webrtc::VideoStream> GetNormalSimulcastLayers(
|
||||
size_t max_layers,
|
||||
int width,
|
||||
int height,
|
||||
double bitrate_priority,
|
||||
int max_qp,
|
||||
bool temporal_layers_supported,
|
||||
bool base_heavy_tl3_rate_alloc,
|
||||
const webrtc::FieldTrialsView& trials);
|
||||
|
||||
// Gets simulcast config layers for screenshare settings.
|
||||
std::vector<webrtc::VideoStream> GetScreenshareLayers(
|
||||
size_t max_layers,
|
||||
int width,
|
||||
int height,
|
||||
double bitrate_priority,
|
||||
int max_qp,
|
||||
bool temporal_layers_supported,
|
||||
bool base_heavy_tl3_rate_alloc,
|
||||
const webrtc::FieldTrialsView& trials);
|
||||
|
||||
} // namespace cricket
|
||||
|
||||
#endif // VIDEO_CONFIG_SIMULCAST_H_
|
||||
|
|
@ -0,0 +1,148 @@
|
|||
/*
|
||||
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
#include "video/config/video_encoder_config.h"
|
||||
|
||||
#include <string>
|
||||
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/strings/string_builder.h"
|
||||
|
||||
namespace webrtc {
|
||||
VideoStream::VideoStream()
|
||||
: width(0),
|
||||
height(0),
|
||||
max_framerate(-1),
|
||||
min_bitrate_bps(-1),
|
||||
target_bitrate_bps(-1),
|
||||
max_bitrate_bps(-1),
|
||||
scale_resolution_down_by(-1.),
|
||||
max_qp(-1),
|
||||
num_temporal_layers(absl::nullopt),
|
||||
active(true) {}
|
||||
VideoStream::VideoStream(const VideoStream& other) = default;
|
||||
|
||||
VideoStream::~VideoStream() = default;
|
||||
|
||||
std::string VideoStream::ToString() const {
|
||||
char buf[1024];
|
||||
rtc::SimpleStringBuilder ss(buf);
|
||||
ss << "{width: " << width;
|
||||
ss << ", height: " << height;
|
||||
ss << ", max_framerate: " << max_framerate;
|
||||
ss << ", min_bitrate_bps:" << min_bitrate_bps;
|
||||
ss << ", target_bitrate_bps:" << target_bitrate_bps;
|
||||
ss << ", max_bitrate_bps:" << max_bitrate_bps;
|
||||
ss << ", max_qp: " << max_qp;
|
||||
ss << ", num_temporal_layers: " << num_temporal_layers.value_or(1);
|
||||
ss << ", bitrate_priority: " << bitrate_priority.value_or(0);
|
||||
ss << ", active: " << active;
|
||||
ss << ", scale_down_by: " << scale_resolution_down_by;
|
||||
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
VideoEncoderConfig::VideoEncoderConfig()
|
||||
: codec_type(kVideoCodecGeneric),
|
||||
video_format("Unset"),
|
||||
content_type(ContentType::kRealtimeVideo),
|
||||
frame_drop_enabled(false),
|
||||
encoder_specific_settings(nullptr),
|
||||
min_transmit_bitrate_bps(0),
|
||||
max_bitrate_bps(0),
|
||||
bitrate_priority(1.0),
|
||||
number_of_streams(0),
|
||||
legacy_conference_mode(false),
|
||||
is_quality_scaling_allowed(false) {}
|
||||
|
||||
VideoEncoderConfig::VideoEncoderConfig(VideoEncoderConfig&&) = default;
|
||||
|
||||
VideoEncoderConfig::~VideoEncoderConfig() = default;
|
||||
|
||||
std::string VideoEncoderConfig::ToString() const {
|
||||
char buf[1024];
|
||||
rtc::SimpleStringBuilder ss(buf);
|
||||
ss << "{codec_type: " << CodecTypeToPayloadString(codec_type);
|
||||
ss << ", content_type: ";
|
||||
switch (content_type) {
|
||||
case ContentType::kRealtimeVideo:
|
||||
ss << "kRealtimeVideo";
|
||||
break;
|
||||
case ContentType::kScreen:
|
||||
ss << "kScreenshare";
|
||||
break;
|
||||
}
|
||||
ss << ", frame_drop_enabled: " << frame_drop_enabled;
|
||||
ss << ", encoder_specific_settings: ";
|
||||
ss << (encoder_specific_settings != nullptr ? "(ptr)" : "NULL");
|
||||
|
||||
ss << ", min_transmit_bitrate_bps: " << min_transmit_bitrate_bps;
|
||||
ss << '}';
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
VideoEncoderConfig::VideoEncoderConfig(const VideoEncoderConfig&) = default;
|
||||
|
||||
void VideoEncoderConfig::EncoderSpecificSettings::FillEncoderSpecificSettings(
|
||||
VideoCodec* codec) const {
|
||||
if (codec->codecType == kVideoCodecVP8) {
|
||||
FillVideoCodecVp8(codec->VP8());
|
||||
} else if (codec->codecType == kVideoCodecVP9) {
|
||||
FillVideoCodecVp9(codec->VP9());
|
||||
} else if (codec->codecType == kVideoCodecAV1) {
|
||||
FillVideoCodecAv1(codec->AV1());
|
||||
} else {
|
||||
RTC_DCHECK_NOTREACHED()
|
||||
<< "Encoder specifics set/used for unknown codec type.";
|
||||
}
|
||||
}
|
||||
|
||||
void VideoEncoderConfig::EncoderSpecificSettings::FillVideoCodecVp8(
|
||||
VideoCodecVP8* vp8_settings) const {
|
||||
RTC_DCHECK_NOTREACHED();
|
||||
}
|
||||
|
||||
void VideoEncoderConfig::EncoderSpecificSettings::FillVideoCodecVp9(
|
||||
VideoCodecVP9* vp9_settings) const {
|
||||
RTC_DCHECK_NOTREACHED();
|
||||
}
|
||||
|
||||
void VideoEncoderConfig::EncoderSpecificSettings::FillVideoCodecAv1(
|
||||
VideoCodecAV1* av1_settings) const {
|
||||
RTC_DCHECK_NOTREACHED();
|
||||
}
|
||||
|
||||
VideoEncoderConfig::Vp8EncoderSpecificSettings::Vp8EncoderSpecificSettings(
|
||||
const VideoCodecVP8& specifics)
|
||||
: specifics_(specifics) {}
|
||||
|
||||
void VideoEncoderConfig::Vp8EncoderSpecificSettings::FillVideoCodecVp8(
|
||||
VideoCodecVP8* vp8_settings) const {
|
||||
*vp8_settings = specifics_;
|
||||
}
|
||||
|
||||
VideoEncoderConfig::Vp9EncoderSpecificSettings::Vp9EncoderSpecificSettings(
|
||||
const VideoCodecVP9& specifics)
|
||||
: specifics_(specifics) {}
|
||||
|
||||
void VideoEncoderConfig::Vp9EncoderSpecificSettings::FillVideoCodecVp9(
|
||||
VideoCodecVP9* vp9_settings) const {
|
||||
*vp9_settings = specifics_;
|
||||
}
|
||||
|
||||
VideoEncoderConfig::Av1EncoderSpecificSettings::Av1EncoderSpecificSettings(
|
||||
const VideoCodecAV1& specifics)
|
||||
: specifics_(specifics) {}
|
||||
|
||||
void VideoEncoderConfig::Av1EncoderSpecificSettings::FillVideoCodecAv1(
|
||||
VideoCodecAV1* av1_settings) const {
|
||||
*av1_settings = specifics_;
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,224 @@
|
|||
/*
|
||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef VIDEO_CONFIG_VIDEO_ENCODER_CONFIG_H_
|
||||
#define VIDEO_CONFIG_VIDEO_ENCODER_CONFIG_H_
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "api/scoped_refptr.h"
|
||||
#include "api/video/resolution.h"
|
||||
#include "api/video_codecs/scalability_mode.h"
|
||||
#include "api/video_codecs/sdp_video_format.h"
|
||||
#include "api/video_codecs/video_codec.h"
|
||||
#include "rtc_base/ref_count.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// The `VideoStream` struct describes a simulcast layer, or "stream".
|
||||
struct VideoStream {
|
||||
VideoStream();
|
||||
~VideoStream();
|
||||
VideoStream(const VideoStream& other);
|
||||
std::string ToString() const;
|
||||
|
||||
// Width/Height in pixels.
|
||||
// This is the actual width and height used to configure encoder,
|
||||
// which might be less than `requested_resolution` due to adaptation
|
||||
// or due to the source providing smaller frames than requested.
|
||||
size_t width;
|
||||
size_t height;
|
||||
|
||||
// Frame rate in fps.
|
||||
int max_framerate;
|
||||
|
||||
// Bitrate, in bps, for the stream.
|
||||
int min_bitrate_bps;
|
||||
int target_bitrate_bps;
|
||||
int max_bitrate_bps;
|
||||
|
||||
// Scaling factor applied to the stream size.
|
||||
// `width` and `height` values are already scaled down.
|
||||
double scale_resolution_down_by;
|
||||
|
||||
// Maximum Quantization Parameter to use when encoding the stream.
|
||||
int max_qp;
|
||||
|
||||
// Determines the number of temporal layers that the stream should be
|
||||
// encoded with. This value should be greater than zero.
|
||||
// TODO(brandtr): This class is used both for configuring the encoder
|
||||
// (meaning that this field _must_ be set), and for signaling the app-level
|
||||
// encoder settings (meaning that the field _may_ be set). We should separate
|
||||
// this and remove this optional instead.
|
||||
absl::optional<size_t> num_temporal_layers;
|
||||
|
||||
// The priority of this stream, to be used when allocating resources
|
||||
// between multiple streams.
|
||||
absl::optional<double> bitrate_priority;
|
||||
|
||||
absl::optional<ScalabilityMode> scalability_mode;
|
||||
|
||||
// If this stream is enabled by the user, or not.
|
||||
bool active;
|
||||
|
||||
// An optional user supplied max_frame_resolution
|
||||
// than can be set independently of (adapted) VideoSource.
|
||||
// This value is set from RtpEncodingParameters::requested_resolution
|
||||
// (i.e. used for signaling app-level settings).
|
||||
//
|
||||
// The actual encode resolution is in `width` and `height`,
|
||||
// which can be lower than requested_resolution,
|
||||
// e.g. if source only provides lower resolution or
|
||||
// if resource adaptation is active.
|
||||
absl::optional<Resolution> requested_resolution;
|
||||
};
|
||||
|
||||
class VideoEncoderConfig {
|
||||
public:
|
||||
// These are reference counted to permit copying VideoEncoderConfig and be
|
||||
// kept alive until all encoder_specific_settings go out of scope.
|
||||
// TODO(kthelgason): Consider removing the need for copying VideoEncoderConfig
|
||||
// and use absl::optional for encoder_specific_settings instead.
|
||||
class EncoderSpecificSettings : public rtc::RefCountInterface {
|
||||
public:
|
||||
// TODO(pbos): Remove FillEncoderSpecificSettings as soon as VideoCodec is
|
||||
// not in use and encoder implementations ask for codec-specific structs
|
||||
// directly.
|
||||
void FillEncoderSpecificSettings(VideoCodec* codec_struct) const;
|
||||
|
||||
virtual void FillVideoCodecVp8(VideoCodecVP8* vp8_settings) const;
|
||||
virtual void FillVideoCodecVp9(VideoCodecVP9* vp9_settings) const;
|
||||
virtual void FillVideoCodecAv1(VideoCodecAV1* av1_settings) const;
|
||||
|
||||
private:
|
||||
~EncoderSpecificSettings() override {}
|
||||
friend class VideoEncoderConfig;
|
||||
};
|
||||
|
||||
class Vp8EncoderSpecificSettings : public EncoderSpecificSettings {
|
||||
public:
|
||||
explicit Vp8EncoderSpecificSettings(const VideoCodecVP8& specifics);
|
||||
void FillVideoCodecVp8(VideoCodecVP8* vp8_settings) const override;
|
||||
|
||||
private:
|
||||
VideoCodecVP8 specifics_;
|
||||
};
|
||||
|
||||
class Vp9EncoderSpecificSettings : public EncoderSpecificSettings {
|
||||
public:
|
||||
explicit Vp9EncoderSpecificSettings(const VideoCodecVP9& specifics);
|
||||
void FillVideoCodecVp9(VideoCodecVP9* vp9_settings) const override;
|
||||
|
||||
private:
|
||||
VideoCodecVP9 specifics_;
|
||||
};
|
||||
|
||||
class Av1EncoderSpecificSettings : public EncoderSpecificSettings {
|
||||
public:
|
||||
explicit Av1EncoderSpecificSettings(const VideoCodecAV1& specifics);
|
||||
void FillVideoCodecAv1(VideoCodecAV1* av1_settings) const override;
|
||||
|
||||
private:
|
||||
VideoCodecAV1 specifics_;
|
||||
};
|
||||
|
||||
enum class ContentType {
|
||||
kRealtimeVideo,
|
||||
kScreen,
|
||||
};
|
||||
|
||||
class VideoStreamFactoryInterface : public rtc::RefCountInterface {
|
||||
public:
|
||||
// An implementation should return a std::vector<VideoStream> with the
|
||||
// wanted VideoStream settings for the given video resolution.
|
||||
// The size of the vector may not be larger than
|
||||
// `encoder_config.number_of_streams`.
|
||||
virtual std::vector<VideoStream> CreateEncoderStreams(
|
||||
int frame_width,
|
||||
int frame_height,
|
||||
const VideoEncoderConfig& encoder_config) = 0;
|
||||
|
||||
protected:
|
||||
~VideoStreamFactoryInterface() override {}
|
||||
};
|
||||
|
||||
VideoEncoderConfig& operator=(VideoEncoderConfig&&) = default;
|
||||
VideoEncoderConfig& operator=(const VideoEncoderConfig&) = delete;
|
||||
|
||||
// Mostly used by tests. Avoid creating copies if you can.
|
||||
VideoEncoderConfig Copy() const { return VideoEncoderConfig(*this); }
|
||||
|
||||
VideoEncoderConfig();
|
||||
VideoEncoderConfig(VideoEncoderConfig&&);
|
||||
~VideoEncoderConfig();
|
||||
std::string ToString() const;
|
||||
|
||||
// TODO(bugs.webrtc.org/6883): Consolidate on one of these.
|
||||
VideoCodecType codec_type;
|
||||
SdpVideoFormat video_format;
|
||||
|
||||
// Note: This factory can be unset, and VideoStreamEncoder will
|
||||
// then use the EncoderStreamFactory. The factory is only set by
|
||||
// tests.
|
||||
rtc::scoped_refptr<VideoStreamFactoryInterface> video_stream_factory;
|
||||
std::vector<SpatialLayer> spatial_layers;
|
||||
ContentType content_type;
|
||||
bool frame_drop_enabled;
|
||||
rtc::scoped_refptr<const EncoderSpecificSettings> encoder_specific_settings;
|
||||
|
||||
// Padding will be used up to this bitrate regardless of the bitrate produced
|
||||
// by the encoder. Padding above what's actually produced by the encoder helps
|
||||
// maintaining a higher bitrate estimate. Padding will however not be sent
|
||||
// unless the estimated bandwidth indicates that the link can handle it.
|
||||
int min_transmit_bitrate_bps;
|
||||
int max_bitrate_bps;
|
||||
// The bitrate priority used for all VideoStreams.
|
||||
double bitrate_priority;
|
||||
|
||||
// The simulcast layer's configurations set by the application for this video
|
||||
// sender. These are modified by the video_stream_factory before being passed
|
||||
// down to lower layers for the video encoding.
|
||||
// `simulcast_layers` is also used for configuring non-simulcast (when there
|
||||
// is a single VideoStream).
|
||||
// We have the same number of `simulcast_layers` as we have negotiated
|
||||
// encodings, for example 3 are used in both simulcast and legacy kSVC.
|
||||
std::vector<VideoStream> simulcast_layers;
|
||||
|
||||
// Max number of encoded VideoStreams to produce.
|
||||
// This is the same as the number of encodings negotiated (i.e. SSRCs),
|
||||
// whether or not those encodings are `active`, except for when legacy kSVC
|
||||
// is used. In this case we have three SSRCs but `number_of_streams` is
|
||||
// changed to 1 to tell lower layers to limit the number of streams.
|
||||
size_t number_of_streams;
|
||||
|
||||
// Legacy Google conference mode flag for simulcast screenshare
|
||||
bool legacy_conference_mode;
|
||||
|
||||
// Indicates whether quality scaling can be used or not.
|
||||
bool is_quality_scaling_allowed;
|
||||
|
||||
// Maximum Quantization Parameter.
|
||||
// This value is fed into EncoderStreamFactory that
|
||||
// apply it to all simulcast layers/spatial layers.
|
||||
int max_qp;
|
||||
|
||||
private:
|
||||
// Access to the copy constructor is private to force use of the Copy()
|
||||
// method for those exceptional cases where we do use it.
|
||||
VideoEncoderConfig(const VideoEncoderConfig&);
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // VIDEO_CONFIG_VIDEO_ENCODER_CONFIG_H_
|
||||
204
TMessagesProj/jni/voip/webrtc/video/decode_synchronizer.cc
Normal file
204
TMessagesProj/jni/voip/webrtc/video/decode_synchronizer.cc
Normal file
|
|
@ -0,0 +1,204 @@
|
|||
/*
|
||||
* Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "video/decode_synchronizer.h"
|
||||
|
||||
#include <iterator>
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "api/sequence_checker.h"
|
||||
#include "api/units/time_delta.h"
|
||||
#include "api/units/timestamp.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/trace_event.h"
|
||||
#include "video/frame_decode_scheduler.h"
|
||||
#include "video/frame_decode_timing.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
DecodeSynchronizer::ScheduledFrame::ScheduledFrame(
|
||||
uint32_t rtp_timestamp,
|
||||
FrameDecodeTiming::FrameSchedule schedule,
|
||||
FrameDecodeScheduler::FrameReleaseCallback callback)
|
||||
: rtp_timestamp_(rtp_timestamp),
|
||||
schedule_(std::move(schedule)),
|
||||
callback_(std::move(callback)) {}
|
||||
|
||||
void DecodeSynchronizer::ScheduledFrame::RunFrameReleaseCallback() && {
|
||||
// Inspiration from Chromium base::OnceCallback. Move `*this` to a local
|
||||
// before execution to ensure internal state is cleared after callback
|
||||
// execution.
|
||||
auto sf = std::move(*this);
|
||||
std::move(sf.callback_)(sf.rtp_timestamp_, sf.schedule_.render_time);
|
||||
}
|
||||
|
||||
Timestamp DecodeSynchronizer::ScheduledFrame::LatestDecodeTime() const {
|
||||
return schedule_.latest_decode_time;
|
||||
}
|
||||
|
||||
DecodeSynchronizer::SynchronizedFrameDecodeScheduler::
|
||||
SynchronizedFrameDecodeScheduler(DecodeSynchronizer* sync)
|
||||
: sync_(sync) {
|
||||
RTC_DCHECK(sync_);
|
||||
}
|
||||
|
||||
DecodeSynchronizer::SynchronizedFrameDecodeScheduler::
|
||||
~SynchronizedFrameDecodeScheduler() {
|
||||
RTC_DCHECK(!next_frame_);
|
||||
RTC_DCHECK(stopped_);
|
||||
}
|
||||
|
||||
absl::optional<uint32_t>
|
||||
DecodeSynchronizer::SynchronizedFrameDecodeScheduler::ScheduledRtpTimestamp() {
|
||||
return next_frame_.has_value()
|
||||
? absl::make_optional(next_frame_->rtp_timestamp())
|
||||
: absl::nullopt;
|
||||
}
|
||||
|
||||
DecodeSynchronizer::ScheduledFrame
|
||||
DecodeSynchronizer::SynchronizedFrameDecodeScheduler::ReleaseNextFrame() {
|
||||
RTC_DCHECK(!stopped_);
|
||||
RTC_DCHECK(next_frame_);
|
||||
auto res = std::move(*next_frame_);
|
||||
next_frame_.reset();
|
||||
return res;
|
||||
}
|
||||
|
||||
Timestamp
|
||||
DecodeSynchronizer::SynchronizedFrameDecodeScheduler::LatestDecodeTime() {
|
||||
RTC_DCHECK(next_frame_);
|
||||
return next_frame_->LatestDecodeTime();
|
||||
}
|
||||
|
||||
void DecodeSynchronizer::SynchronizedFrameDecodeScheduler::ScheduleFrame(
|
||||
uint32_t rtp,
|
||||
FrameDecodeTiming::FrameSchedule schedule,
|
||||
FrameReleaseCallback cb) {
|
||||
RTC_DCHECK(!stopped_);
|
||||
RTC_DCHECK(!next_frame_) << "Can not schedule two frames at once.";
|
||||
next_frame_ = ScheduledFrame(rtp, std::move(schedule), std::move(cb));
|
||||
sync_->OnFrameScheduled(this);
|
||||
}
|
||||
|
||||
void DecodeSynchronizer::SynchronizedFrameDecodeScheduler::CancelOutstanding() {
|
||||
next_frame_.reset();
|
||||
}
|
||||
|
||||
void DecodeSynchronizer::SynchronizedFrameDecodeScheduler::Stop() {
|
||||
if (stopped_) {
|
||||
return;
|
||||
}
|
||||
CancelOutstanding();
|
||||
stopped_ = true;
|
||||
sync_->RemoveFrameScheduler(this);
|
||||
}
|
||||
|
||||
DecodeSynchronizer::DecodeSynchronizer(Clock* clock,
|
||||
Metronome* metronome,
|
||||
TaskQueueBase* worker_queue)
|
||||
: clock_(clock), worker_queue_(worker_queue), metronome_(metronome) {
|
||||
RTC_DCHECK(metronome_);
|
||||
RTC_DCHECK(worker_queue_);
|
||||
}
|
||||
|
||||
DecodeSynchronizer::~DecodeSynchronizer() {
|
||||
RTC_DCHECK_RUN_ON(worker_queue_);
|
||||
RTC_CHECK(schedulers_.empty());
|
||||
}
|
||||
|
||||
std::unique_ptr<FrameDecodeScheduler>
|
||||
DecodeSynchronizer::CreateSynchronizedFrameScheduler() {
|
||||
TRACE_EVENT0("webrtc", __func__);
|
||||
RTC_DCHECK_RUN_ON(worker_queue_);
|
||||
auto scheduler = std::make_unique<SynchronizedFrameDecodeScheduler>(this);
|
||||
auto [it, inserted] = schedulers_.emplace(scheduler.get());
|
||||
// If this is the first `scheduler` added, start listening to the metronome.
|
||||
if (inserted && schedulers_.size() == 1) {
|
||||
RTC_DLOG(LS_VERBOSE) << "Listening to metronome";
|
||||
ScheduleNextTick();
|
||||
}
|
||||
|
||||
return std::move(scheduler);
|
||||
}
|
||||
|
||||
void DecodeSynchronizer::OnFrameScheduled(
|
||||
SynchronizedFrameDecodeScheduler* scheduler) {
|
||||
RTC_DCHECK_RUN_ON(worker_queue_);
|
||||
RTC_DCHECK(scheduler->ScheduledRtpTimestamp());
|
||||
|
||||
Timestamp now = clock_->CurrentTime();
|
||||
Timestamp next_tick = expected_next_tick_;
|
||||
// If no tick has registered yet assume it will occur in the tick period.
|
||||
if (next_tick.IsInfinite()) {
|
||||
next_tick = now + metronome_->TickPeriod();
|
||||
}
|
||||
|
||||
// Release the frame right away if the decode time is too soon. Otherwise
|
||||
// the stream may fall behind too much.
|
||||
bool decode_before_next_tick =
|
||||
scheduler->LatestDecodeTime() <
|
||||
(next_tick - FrameDecodeTiming::kMaxAllowedFrameDelay);
|
||||
// Decode immediately if the decode time is in the past.
|
||||
bool decode_time_in_past = scheduler->LatestDecodeTime() < now;
|
||||
|
||||
if (decode_before_next_tick || decode_time_in_past) {
|
||||
ScheduledFrame scheduled_frame = scheduler->ReleaseNextFrame();
|
||||
std::move(scheduled_frame).RunFrameReleaseCallback();
|
||||
}
|
||||
}
|
||||
|
||||
void DecodeSynchronizer::RemoveFrameScheduler(
|
||||
SynchronizedFrameDecodeScheduler* scheduler) {
|
||||
TRACE_EVENT0("webrtc", __func__);
|
||||
RTC_DCHECK_RUN_ON(worker_queue_);
|
||||
RTC_DCHECK(scheduler);
|
||||
auto it = schedulers_.find(scheduler);
|
||||
if (it == schedulers_.end()) {
|
||||
return;
|
||||
}
|
||||
schedulers_.erase(it);
|
||||
// If there are no more schedulers active, stop listening for metronome ticks.
|
||||
if (schedulers_.empty()) {
|
||||
expected_next_tick_ = Timestamp::PlusInfinity();
|
||||
}
|
||||
}
|
||||
|
||||
void DecodeSynchronizer::ScheduleNextTick() {
|
||||
RTC_DCHECK_RUN_ON(worker_queue_);
|
||||
if (tick_scheduled_) {
|
||||
return;
|
||||
}
|
||||
tick_scheduled_ = true;
|
||||
metronome_->RequestCallOnNextTick(
|
||||
SafeTask(safety_.flag(), [this] { OnTick(); }));
|
||||
}
|
||||
|
||||
void DecodeSynchronizer::OnTick() {
|
||||
TRACE_EVENT0("webrtc", __func__);
|
||||
RTC_DCHECK_RUN_ON(worker_queue_);
|
||||
tick_scheduled_ = false;
|
||||
expected_next_tick_ = clock_->CurrentTime() + metronome_->TickPeriod();
|
||||
|
||||
for (auto* scheduler : schedulers_) {
|
||||
if (scheduler->ScheduledRtpTimestamp() &&
|
||||
scheduler->LatestDecodeTime() < expected_next_tick_) {
|
||||
auto scheduled_frame = scheduler->ReleaseNextFrame();
|
||||
std::move(scheduled_frame).RunFrameReleaseCallback();
|
||||
}
|
||||
}
|
||||
|
||||
if (!schedulers_.empty())
|
||||
ScheduleNextTick();
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
138
TMessagesProj/jni/voip/webrtc/video/decode_synchronizer.h
Normal file
138
TMessagesProj/jni/voip/webrtc/video/decode_synchronizer.h
Normal file
|
|
@ -0,0 +1,138 @@
|
|||
/*
|
||||
* Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef VIDEO_DECODE_SYNCHRONIZER_H_
|
||||
#define VIDEO_DECODE_SYNCHRONIZER_H_
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <set>
|
||||
#include <utility>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "api/metronome/metronome.h"
|
||||
#include "api/sequence_checker.h"
|
||||
#include "api/task_queue/task_queue_base.h"
|
||||
#include "api/units/timestamp.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/thread_annotations.h"
|
||||
#include "video/frame_decode_scheduler.h"
|
||||
#include "video/frame_decode_timing.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// DecodeSynchronizer synchronizes the frame scheduling by coalescing decoding
|
||||
// on the metronome.
|
||||
//
|
||||
// A video receive stream can use the DecodeSynchronizer by receiving a
|
||||
// FrameDecodeScheduler instance with `CreateSynchronizedFrameScheduler()`.
|
||||
// This instance implements FrameDecodeScheduler and can be used as a normal
|
||||
// scheduler. This instance is owned by the receive stream, and is borrowed by
|
||||
// the DecodeSynchronizer. The DecodeSynchronizer will stop borrowing the
|
||||
// instance when `FrameDecodeScheduler::Stop()` is called, after which the
|
||||
// scheduler may be destroyed by the receive stream.
|
||||
//
|
||||
// When a frame is scheduled for decode by a receive stream using the
|
||||
// DecodeSynchronizer, it will instead be executed on the metronome during the
|
||||
// tick interval where `max_decode_time` occurs. For example, if a frame is
|
||||
// scheduled for decode in 50ms and the tick interval is 20ms, then the frame
|
||||
// will be released for decoding in 2 ticks. See below for illustration,
|
||||
//
|
||||
// In the case where the decode time is in the past, or must occur before the
|
||||
// next metronome tick then the frame will be released right away, allowing a
|
||||
// delayed stream to catch up quickly.
|
||||
//
|
||||
// DecodeSynchronizer is single threaded - all method calls must run on the
|
||||
// `worker_queue_`.
|
||||
class DecodeSynchronizer {
|
||||
public:
|
||||
DecodeSynchronizer(Clock* clock,
|
||||
Metronome* metronome,
|
||||
TaskQueueBase* worker_queue);
|
||||
~DecodeSynchronizer();
|
||||
DecodeSynchronizer(const DecodeSynchronizer&) = delete;
|
||||
DecodeSynchronizer& operator=(const DecodeSynchronizer&) = delete;
|
||||
|
||||
std::unique_ptr<FrameDecodeScheduler> CreateSynchronizedFrameScheduler();
|
||||
|
||||
private:
|
||||
class ScheduledFrame {
|
||||
public:
|
||||
ScheduledFrame(uint32_t rtp_timestamp,
|
||||
FrameDecodeTiming::FrameSchedule schedule,
|
||||
FrameDecodeScheduler::FrameReleaseCallback callback);
|
||||
|
||||
// Disallow copy since `callback` should only be moved.
|
||||
ScheduledFrame(const ScheduledFrame&) = delete;
|
||||
ScheduledFrame& operator=(const ScheduledFrame&) = delete;
|
||||
ScheduledFrame(ScheduledFrame&&) = default;
|
||||
ScheduledFrame& operator=(ScheduledFrame&&) = default;
|
||||
|
||||
// Executes `callback_`.
|
||||
void RunFrameReleaseCallback() &&;
|
||||
|
||||
uint32_t rtp_timestamp() const { return rtp_timestamp_; }
|
||||
Timestamp LatestDecodeTime() const;
|
||||
|
||||
private:
|
||||
uint32_t rtp_timestamp_;
|
||||
FrameDecodeTiming::FrameSchedule schedule_;
|
||||
FrameDecodeScheduler::FrameReleaseCallback callback_;
|
||||
};
|
||||
|
||||
class SynchronizedFrameDecodeScheduler : public FrameDecodeScheduler {
|
||||
public:
|
||||
explicit SynchronizedFrameDecodeScheduler(DecodeSynchronizer* sync);
|
||||
~SynchronizedFrameDecodeScheduler() override;
|
||||
|
||||
// Releases the outstanding frame for decoding. This invalidates
|
||||
// `next_frame_`. There must be a frame scheduled.
|
||||
ScheduledFrame ReleaseNextFrame();
|
||||
|
||||
// Returns `next_frame_.schedule.max_decode_time`. There must be a frame
|
||||
// scheduled when this is called.
|
||||
Timestamp LatestDecodeTime();
|
||||
|
||||
// FrameDecodeScheduler implementation.
|
||||
absl::optional<uint32_t> ScheduledRtpTimestamp() override;
|
||||
void ScheduleFrame(uint32_t rtp,
|
||||
FrameDecodeTiming::FrameSchedule schedule,
|
||||
FrameReleaseCallback cb) override;
|
||||
void CancelOutstanding() override;
|
||||
void Stop() override;
|
||||
|
||||
private:
|
||||
DecodeSynchronizer* sync_;
|
||||
absl::optional<ScheduledFrame> next_frame_;
|
||||
bool stopped_ = false;
|
||||
};
|
||||
|
||||
void OnFrameScheduled(SynchronizedFrameDecodeScheduler* scheduler);
|
||||
void RemoveFrameScheduler(SynchronizedFrameDecodeScheduler* scheduler);
|
||||
|
||||
void ScheduleNextTick();
|
||||
void OnTick();
|
||||
|
||||
Clock* const clock_;
|
||||
TaskQueueBase* const worker_queue_;
|
||||
Metronome* const metronome_;
|
||||
|
||||
Timestamp expected_next_tick_ = Timestamp::PlusInfinity();
|
||||
std::set<SynchronizedFrameDecodeScheduler*> schedulers_
|
||||
RTC_GUARDED_BY(worker_queue_);
|
||||
bool tick_scheduled_ = false;
|
||||
ScopedTaskSafetyDetached safety_;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // VIDEO_DECODE_SYNCHRONIZER_H_
|
||||
360
TMessagesProj/jni/voip/webrtc/video/encoder_bitrate_adjuster.cc
Normal file
360
TMessagesProj/jni/voip/webrtc/video/encoder_bitrate_adjuster.cc
Normal file
|
|
@ -0,0 +1,360 @@
|
|||
/*
|
||||
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "video/encoder_bitrate_adjuster.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "api/field_trials_view.h"
|
||||
#include "rtc_base/experiments/rate_control_settings.h"
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/time_utils.h"
|
||||
|
||||
namespace webrtc {
|
||||
namespace {
|
||||
// Helper struct with metadata for a single spatial layer.
|
||||
struct LayerRateInfo {
|
||||
double link_utilization_factor = 0.0;
|
||||
double media_utilization_factor = 0.0;
|
||||
DataRate target_rate = DataRate::Zero();
|
||||
|
||||
DataRate WantedOvershoot() const {
|
||||
// If there is headroom, allow bitrate to go up to media rate limit.
|
||||
// Still limit media utilization to 1.0, so we don't overshoot over long
|
||||
// runs even if we have headroom.
|
||||
const double max_media_utilization =
|
||||
std::max(1.0, media_utilization_factor);
|
||||
if (link_utilization_factor > max_media_utilization) {
|
||||
return (link_utilization_factor - max_media_utilization) * target_rate;
|
||||
}
|
||||
return DataRate::Zero();
|
||||
}
|
||||
};
|
||||
} // namespace
|
||||
constexpr int64_t EncoderBitrateAdjuster::kWindowSizeMs;
|
||||
constexpr size_t EncoderBitrateAdjuster::kMinFramesSinceLayoutChange;
|
||||
constexpr double EncoderBitrateAdjuster::kDefaultUtilizationFactor;
|
||||
|
||||
EncoderBitrateAdjuster::EncoderBitrateAdjuster(
|
||||
const VideoCodec& codec_settings,
|
||||
const FieldTrialsView& field_trials)
|
||||
: utilize_bandwidth_headroom_(
|
||||
RateControlSettings::ParseFromKeyValueConfig(&field_trials)
|
||||
.BitrateAdjusterCanUseNetworkHeadroom()),
|
||||
frames_since_layout_change_(0),
|
||||
min_bitrates_bps_{},
|
||||
frame_size_pixels_{},
|
||||
codec_(codec_settings.codecType),
|
||||
codec_mode_(codec_settings.mode) {
|
||||
// TODO(https://crbug.com/webrtc/14891): If we want to support simulcast of
|
||||
// SVC streams, EncoderBitrateAdjuster needs to be updated to care about both
|
||||
// `simulcastStream` and `spatialLayers` at the same time.
|
||||
if (codec_settings.codecType == VideoCodecType::kVideoCodecVP9 &&
|
||||
codec_settings.numberOfSimulcastStreams <= 1) {
|
||||
for (size_t si = 0; si < codec_settings.VP9().numberOfSpatialLayers; ++si) {
|
||||
if (codec_settings.spatialLayers[si].active) {
|
||||
min_bitrates_bps_[si] =
|
||||
std::max(codec_settings.minBitrate * 1000,
|
||||
codec_settings.spatialLayers[si].minBitrate * 1000);
|
||||
frame_size_pixels_[si] = codec_settings.spatialLayers[si].width *
|
||||
codec_settings.spatialLayers[si].height;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for (size_t si = 0; si < codec_settings.numberOfSimulcastStreams; ++si) {
|
||||
if (codec_settings.simulcastStream[si].active) {
|
||||
min_bitrates_bps_[si] =
|
||||
std::max(codec_settings.minBitrate * 1000,
|
||||
codec_settings.simulcastStream[si].minBitrate * 1000);
|
||||
frame_size_pixels_[si] = codec_settings.spatialLayers[si].width *
|
||||
codec_settings.spatialLayers[si].height;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
EncoderBitrateAdjuster::~EncoderBitrateAdjuster() = default;
|
||||
|
||||
VideoBitrateAllocation EncoderBitrateAdjuster::AdjustRateAllocation(
|
||||
const VideoEncoder::RateControlParameters& rates) {
|
||||
current_rate_control_parameters_ = rates;
|
||||
|
||||
// First check that overshoot detectors exist, and store per simulcast/spatial
|
||||
// layer how many active temporal layers we have.
|
||||
size_t active_tls[kMaxSpatialLayers] = {};
|
||||
for (size_t si = 0; si < kMaxSpatialLayers; ++si) {
|
||||
active_tls[si] = 0;
|
||||
for (size_t ti = 0; ti < kMaxTemporalStreams; ++ti) {
|
||||
// Layer is enabled iff it has both positive bitrate and framerate target.
|
||||
if (rates.bitrate.GetBitrate(si, ti) > 0 &&
|
||||
current_fps_allocation_[si].size() > ti &&
|
||||
current_fps_allocation_[si][ti] > 0) {
|
||||
++active_tls[si];
|
||||
if (!overshoot_detectors_[si][ti]) {
|
||||
overshoot_detectors_[si][ti] =
|
||||
std::make_unique<EncoderOvershootDetector>(
|
||||
kWindowSizeMs, codec_,
|
||||
codec_mode_ == VideoCodecMode::kScreensharing);
|
||||
frames_since_layout_change_ = 0;
|
||||
}
|
||||
} else if (overshoot_detectors_[si][ti]) {
|
||||
// Layer removed, destroy overshoot detector.
|
||||
overshoot_detectors_[si][ti].reset();
|
||||
frames_since_layout_change_ = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Next poll the overshoot detectors and populate the adjusted allocation.
|
||||
const int64_t now_ms = rtc::TimeMillis();
|
||||
VideoBitrateAllocation adjusted_allocation;
|
||||
std::vector<LayerRateInfo> layer_infos;
|
||||
DataRate wanted_overshoot_sum = DataRate::Zero();
|
||||
|
||||
for (size_t si = 0; si < kMaxSpatialLayers; ++si) {
|
||||
layer_infos.emplace_back();
|
||||
LayerRateInfo& layer_info = layer_infos.back();
|
||||
|
||||
layer_info.target_rate =
|
||||
DataRate::BitsPerSec(rates.bitrate.GetSpatialLayerSum(si));
|
||||
|
||||
// Adjustment is done per simulcast/spatial layer only (not per temporal
|
||||
// layer).
|
||||
if (frames_since_layout_change_ < kMinFramesSinceLayoutChange) {
|
||||
layer_info.link_utilization_factor = kDefaultUtilizationFactor;
|
||||
layer_info.media_utilization_factor = kDefaultUtilizationFactor;
|
||||
} else if (active_tls[si] == 0 ||
|
||||
layer_info.target_rate == DataRate::Zero()) {
|
||||
// No signaled temporal layers, or no bitrate set. Could either be unused
|
||||
// simulcast/spatial layer or bitrate dynamic mode; pass bitrate through
|
||||
// without any change.
|
||||
layer_info.link_utilization_factor = 1.0;
|
||||
layer_info.media_utilization_factor = 1.0;
|
||||
} else if (active_tls[si] == 1) {
|
||||
// A single active temporal layer, this might mean single layer or that
|
||||
// encoder does not support temporal layers. Merge target bitrates for
|
||||
// this simulcast/spatial layer.
|
||||
RTC_DCHECK(overshoot_detectors_[si][0]);
|
||||
layer_info.link_utilization_factor =
|
||||
overshoot_detectors_[si][0]
|
||||
->GetNetworkRateUtilizationFactor(now_ms)
|
||||
.value_or(kDefaultUtilizationFactor);
|
||||
layer_info.media_utilization_factor =
|
||||
overshoot_detectors_[si][0]
|
||||
->GetMediaRateUtilizationFactor(now_ms)
|
||||
.value_or(kDefaultUtilizationFactor);
|
||||
} else if (layer_info.target_rate > DataRate::Zero()) {
|
||||
// Multiple temporal layers enabled for this simulcast/spatial layer.
|
||||
// Update rate for each of them and make a weighted average of utilization
|
||||
// factors, with bitrate fraction used as weight.
|
||||
// If any layer is missing a utilization factor, fall back to default.
|
||||
layer_info.link_utilization_factor = 0.0;
|
||||
layer_info.media_utilization_factor = 0.0;
|
||||
for (size_t ti = 0; ti < active_tls[si]; ++ti) {
|
||||
RTC_DCHECK(overshoot_detectors_[si][ti]);
|
||||
const absl::optional<double> ti_link_utilization_factor =
|
||||
overshoot_detectors_[si][ti]->GetNetworkRateUtilizationFactor(
|
||||
now_ms);
|
||||
const absl::optional<double> ti_media_utilization_factor =
|
||||
overshoot_detectors_[si][ti]->GetMediaRateUtilizationFactor(now_ms);
|
||||
if (!ti_link_utilization_factor || !ti_media_utilization_factor) {
|
||||
layer_info.link_utilization_factor = kDefaultUtilizationFactor;
|
||||
layer_info.media_utilization_factor = kDefaultUtilizationFactor;
|
||||
break;
|
||||
}
|
||||
const double weight =
|
||||
static_cast<double>(rates.bitrate.GetBitrate(si, ti)) /
|
||||
layer_info.target_rate.bps();
|
||||
layer_info.link_utilization_factor +=
|
||||
weight * ti_link_utilization_factor.value();
|
||||
layer_info.media_utilization_factor +=
|
||||
weight * ti_media_utilization_factor.value();
|
||||
}
|
||||
} else {
|
||||
RTC_DCHECK_NOTREACHED();
|
||||
}
|
||||
|
||||
if (layer_info.link_utilization_factor < 1.0) {
|
||||
// TODO(sprang): Consider checking underuse and allowing it to cancel some
|
||||
// potential overuse by other streams.
|
||||
|
||||
// Don't boost target bitrate if encoder is under-using.
|
||||
layer_info.link_utilization_factor = 1.0;
|
||||
} else {
|
||||
// Don't reduce encoder target below 50%, in which case the frame dropper
|
||||
// should kick in instead.
|
||||
layer_info.link_utilization_factor =
|
||||
std::min(layer_info.link_utilization_factor, 2.0);
|
||||
|
||||
// Keep track of sum of desired overshoot bitrate.
|
||||
wanted_overshoot_sum += layer_info.WantedOvershoot();
|
||||
}
|
||||
}
|
||||
|
||||
// Available link headroom that can be used to fill wanted overshoot.
|
||||
DataRate available_headroom = DataRate::Zero();
|
||||
if (utilize_bandwidth_headroom_) {
|
||||
available_headroom = rates.bandwidth_allocation -
|
||||
DataRate::BitsPerSec(rates.bitrate.get_sum_bps());
|
||||
}
|
||||
|
||||
// All wanted overshoots are satisfied in the same proportion based on
|
||||
// available headroom.
|
||||
const double granted_overshoot_ratio =
|
||||
wanted_overshoot_sum == DataRate::Zero()
|
||||
? 0.0
|
||||
: std::min(1.0, available_headroom.bps<double>() /
|
||||
wanted_overshoot_sum.bps());
|
||||
|
||||
for (size_t si = 0; si < kMaxSpatialLayers; ++si) {
|
||||
LayerRateInfo& layer_info = layer_infos[si];
|
||||
double utilization_factor = layer_info.link_utilization_factor;
|
||||
DataRate allowed_overshoot =
|
||||
granted_overshoot_ratio * layer_info.WantedOvershoot();
|
||||
if (allowed_overshoot > DataRate::Zero()) {
|
||||
// Pretend the target bitrate is higher by the allowed overshoot.
|
||||
// Since utilization_factor = actual_bitrate / target_bitrate, it can be
|
||||
// done by multiplying by old_target_bitrate / new_target_bitrate.
|
||||
utilization_factor *= layer_info.target_rate.bps<double>() /
|
||||
(allowed_overshoot.bps<double>() +
|
||||
layer_info.target_rate.bps<double>());
|
||||
}
|
||||
|
||||
if (min_bitrates_bps_[si] > 0 &&
|
||||
layer_info.target_rate > DataRate::Zero() &&
|
||||
DataRate::BitsPerSec(min_bitrates_bps_[si]) < layer_info.target_rate) {
|
||||
// Make sure rate adjuster doesn't push target bitrate below minimum.
|
||||
utilization_factor =
|
||||
std::min(utilization_factor, layer_info.target_rate.bps<double>() /
|
||||
min_bitrates_bps_[si]);
|
||||
}
|
||||
|
||||
if (layer_info.target_rate > DataRate::Zero()) {
|
||||
RTC_LOG(LS_VERBOSE)
|
||||
<< "Utilization factors for simulcast/spatial index " << si
|
||||
<< ": link = " << layer_info.link_utilization_factor
|
||||
<< ", media = " << layer_info.media_utilization_factor
|
||||
<< ", wanted overshoot = " << layer_info.WantedOvershoot().bps()
|
||||
<< " bps, available headroom = " << available_headroom.bps()
|
||||
<< " bps, total utilization factor = " << utilization_factor;
|
||||
}
|
||||
|
||||
// Populate the adjusted allocation with determined utilization factor.
|
||||
if (active_tls[si] == 1 &&
|
||||
layer_info.target_rate >
|
||||
DataRate::BitsPerSec(rates.bitrate.GetBitrate(si, 0))) {
|
||||
// Bitrate allocation indicates temporal layer usage, but encoder
|
||||
// does not seem to support it. Pipe all bitrate into a single
|
||||
// overshoot detector.
|
||||
uint32_t adjusted_layer_bitrate_bps =
|
||||
std::min(static_cast<uint32_t>(
|
||||
layer_info.target_rate.bps() / utilization_factor + 0.5),
|
||||
layer_info.target_rate.bps<uint32_t>());
|
||||
adjusted_allocation.SetBitrate(si, 0, adjusted_layer_bitrate_bps);
|
||||
} else {
|
||||
for (size_t ti = 0; ti < kMaxTemporalStreams; ++ti) {
|
||||
if (rates.bitrate.HasBitrate(si, ti)) {
|
||||
uint32_t adjusted_layer_bitrate_bps = std::min(
|
||||
static_cast<uint32_t>(
|
||||
rates.bitrate.GetBitrate(si, ti) / utilization_factor + 0.5),
|
||||
rates.bitrate.GetBitrate(si, ti));
|
||||
adjusted_allocation.SetBitrate(si, ti, adjusted_layer_bitrate_bps);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// In case of rounding errors, add bitrate to TL0 until min bitrate
|
||||
// constraint has been met.
|
||||
const uint32_t adjusted_spatial_layer_sum =
|
||||
adjusted_allocation.GetSpatialLayerSum(si);
|
||||
if (layer_info.target_rate > DataRate::Zero() &&
|
||||
adjusted_spatial_layer_sum < min_bitrates_bps_[si]) {
|
||||
adjusted_allocation.SetBitrate(si, 0,
|
||||
adjusted_allocation.GetBitrate(si, 0) +
|
||||
min_bitrates_bps_[si] -
|
||||
adjusted_spatial_layer_sum);
|
||||
}
|
||||
|
||||
// Update all detectors with the new adjusted bitrate targets.
|
||||
for (size_t ti = 0; ti < kMaxTemporalStreams; ++ti) {
|
||||
const uint32_t layer_bitrate_bps = adjusted_allocation.GetBitrate(si, ti);
|
||||
// Overshoot detector may not exist, eg for ScreenshareLayers case.
|
||||
if (layer_bitrate_bps > 0 && overshoot_detectors_[si][ti]) {
|
||||
// Number of frames in this layer alone is not cumulative, so
|
||||
// subtract fps from any low temporal layer.
|
||||
const double fps_fraction =
|
||||
static_cast<double>(
|
||||
current_fps_allocation_[si][ti] -
|
||||
(ti == 0 ? 0 : current_fps_allocation_[si][ti - 1])) /
|
||||
VideoEncoder::EncoderInfo::kMaxFramerateFraction;
|
||||
|
||||
if (fps_fraction <= 0.0) {
|
||||
RTC_LOG(LS_WARNING)
|
||||
<< "Encoder config has temporal layer with non-zero bitrate "
|
||||
"allocation but zero framerate allocation.";
|
||||
continue;
|
||||
}
|
||||
|
||||
overshoot_detectors_[si][ti]->SetTargetRate(
|
||||
DataRate::BitsPerSec(layer_bitrate_bps),
|
||||
fps_fraction * rates.framerate_fps, now_ms);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Since no simulcast/spatial layers or streams are toggled by the adjustment
|
||||
// bw-limited flag stays the same.
|
||||
adjusted_allocation.set_bw_limited(rates.bitrate.is_bw_limited());
|
||||
|
||||
return adjusted_allocation;
|
||||
}
|
||||
|
||||
void EncoderBitrateAdjuster::OnEncoderInfo(
|
||||
const VideoEncoder::EncoderInfo& encoder_info) {
|
||||
// Copy allocation into current state and re-allocate.
|
||||
for (size_t si = 0; si < kMaxSpatialLayers; ++si) {
|
||||
current_fps_allocation_[si] = encoder_info.fps_allocation[si];
|
||||
if (frame_size_pixels_[si] > 0) {
|
||||
if (auto bwlimit = encoder_info.GetEncoderBitrateLimitsForResolution(
|
||||
frame_size_pixels_[si])) {
|
||||
min_bitrates_bps_[si] = bwlimit->min_bitrate_bps;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Trigger re-allocation so that overshoot detectors have correct targets.
|
||||
AdjustRateAllocation(current_rate_control_parameters_);
|
||||
}
|
||||
|
||||
void EncoderBitrateAdjuster::OnEncodedFrame(DataSize size,
|
||||
int stream_index,
|
||||
int temporal_index) {
|
||||
++frames_since_layout_change_;
|
||||
// Detectors may not exist, for instance if ScreenshareLayers is used.
|
||||
auto& detector = overshoot_detectors_[stream_index][temporal_index];
|
||||
if (detector) {
|
||||
detector->OnEncodedFrame(size.bytes(), rtc::TimeMillis());
|
||||
}
|
||||
}
|
||||
|
||||
void EncoderBitrateAdjuster::Reset() {
|
||||
for (size_t si = 0; si < kMaxSpatialLayers; ++si) {
|
||||
for (size_t ti = 0; ti < kMaxTemporalStreams; ++ti) {
|
||||
overshoot_detectors_[si][ti].reset();
|
||||
}
|
||||
}
|
||||
// Call AdjustRateAllocation() with the last know bitrate allocation, so that
|
||||
// the appropriate overuse detectors are immediately re-created.
|
||||
AdjustRateAllocation(current_rate_control_parameters_);
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,91 @@
|
|||
/*
|
||||
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef VIDEO_ENCODER_BITRATE_ADJUSTER_H_
|
||||
#define VIDEO_ENCODER_BITRATE_ADJUSTER_H_
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "api/field_trials_view.h"
|
||||
#include "api/video/encoded_image.h"
|
||||
#include "api/video/video_bitrate_allocation.h"
|
||||
#include "api/video_codecs/video_encoder.h"
|
||||
#include "video/encoder_overshoot_detector.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class EncoderBitrateAdjuster {
|
||||
public:
|
||||
// Size of sliding window used to track overshoot rate.
|
||||
static constexpr int64_t kWindowSizeMs = 3000;
|
||||
// Minimum number of frames since last layout change required to trust the
|
||||
// overshoot statistics. Otherwise falls back to default utilization.
|
||||
// By layout change, we mean any simulcast/spatial/temporal layer being either
|
||||
// enabled or disabled.
|
||||
static constexpr size_t kMinFramesSinceLayoutChange = 30;
|
||||
// Default utilization, before reliable metrics are available, is set to 20%
|
||||
// overshoot. This is conservative so that badly misbehaving encoders don't
|
||||
// build too much queue at the very start.
|
||||
static constexpr double kDefaultUtilizationFactor = 1.2;
|
||||
|
||||
EncoderBitrateAdjuster(const VideoCodec& codec_settings,
|
||||
const FieldTrialsView& field_trials);
|
||||
~EncoderBitrateAdjuster();
|
||||
|
||||
// Adjusts the given rate allocation to make it paceable within the target
|
||||
// rates.
|
||||
VideoBitrateAllocation AdjustRateAllocation(
|
||||
const VideoEncoder::RateControlParameters& rates);
|
||||
|
||||
// Updated overuse detectors with data about the encoder, specifically about
|
||||
// the temporal layer frame rate allocation.
|
||||
void OnEncoderInfo(const VideoEncoder::EncoderInfo& encoder_info);
|
||||
|
||||
// Updates the overuse detectors according to the encoded image size.
|
||||
// `stream_index` is the spatial or simulcast index.
|
||||
// TODO(https://crbug.com/webrtc/14891): If we want to support a mix of
|
||||
// simulcast and SVC we'll also need to consider the case where we have both
|
||||
// simulcast and spatial indices.
|
||||
void OnEncodedFrame(DataSize size, int stream_index, int temporal_index);
|
||||
|
||||
void Reset();
|
||||
|
||||
private:
|
||||
const bool utilize_bandwidth_headroom_;
|
||||
|
||||
VideoEncoder::RateControlParameters current_rate_control_parameters_;
|
||||
// FPS allocation of temporal layers, per simulcast/spatial layer. Represented
|
||||
// as a Q8 fraction; 0 = 0%, 255 = 100%. See
|
||||
// VideoEncoder::EncoderInfo.fps_allocation.
|
||||
absl::InlinedVector<uint8_t, kMaxTemporalStreams>
|
||||
current_fps_allocation_[kMaxSpatialLayers];
|
||||
|
||||
// Frames since layout was changed, mean that any simulcast, spatial or
|
||||
// temporal layer was either disabled or enabled.
|
||||
size_t frames_since_layout_change_;
|
||||
std::unique_ptr<EncoderOvershootDetector>
|
||||
overshoot_detectors_[kMaxSpatialLayers][kMaxTemporalStreams];
|
||||
|
||||
// Minimum bitrates allowed, per spatial layer.
|
||||
uint32_t min_bitrates_bps_[kMaxSpatialLayers];
|
||||
|
||||
// Size in pixels of each spatial layer.
|
||||
uint32_t frame_size_pixels_[kMaxSpatialLayers];
|
||||
|
||||
// Codec type used for encoding.
|
||||
VideoCodecType codec_;
|
||||
|
||||
// Codec mode: { kRealtimeVideo, kScreensharing }.
|
||||
VideoCodecMode codec_mode_;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // VIDEO_ENCODER_BITRATE_ADJUSTER_H_
|
||||
|
|
@ -0,0 +1,281 @@
|
|||
/*
|
||||
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "video/encoder_overshoot_detector.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <string>
|
||||
|
||||
#include "system_wrappers/include/metrics.h"
|
||||
|
||||
namespace webrtc {
|
||||
namespace {
|
||||
// The buffer level for media-rate utilization is allowed to go below zero,
|
||||
// down to
|
||||
// -(`kMaxMediaUnderrunFrames` / `target_framerate_fps_`) * `target_bitrate_`.
|
||||
static constexpr double kMaxMediaUnderrunFrames = 5.0;
|
||||
} // namespace
|
||||
|
||||
EncoderOvershootDetector::EncoderOvershootDetector(int64_t window_size_ms,
|
||||
VideoCodecType codec,
|
||||
bool is_screenshare)
|
||||
: window_size_ms_(window_size_ms),
|
||||
time_last_update_ms_(-1),
|
||||
sum_network_utilization_factors_(0.0),
|
||||
sum_media_utilization_factors_(0.0),
|
||||
target_bitrate_(DataRate::Zero()),
|
||||
target_framerate_fps_(0),
|
||||
network_buffer_level_bits_(0),
|
||||
media_buffer_level_bits_(0),
|
||||
codec_(codec),
|
||||
is_screenshare_(is_screenshare),
|
||||
frame_count_(0),
|
||||
sum_diff_kbps_squared_(0),
|
||||
sum_overshoot_percent_(0) {}
|
||||
|
||||
EncoderOvershootDetector::~EncoderOvershootDetector() {
|
||||
UpdateHistograms();
|
||||
}
|
||||
|
||||
void EncoderOvershootDetector::SetTargetRate(DataRate target_bitrate,
|
||||
double target_framerate_fps,
|
||||
int64_t time_ms) {
|
||||
// First leak bits according to the previous target rate.
|
||||
if (target_bitrate_ != DataRate::Zero()) {
|
||||
LeakBits(time_ms);
|
||||
} else if (target_bitrate != DataRate::Zero()) {
|
||||
// Stream was just enabled, reset state.
|
||||
time_last_update_ms_ = time_ms;
|
||||
utilization_factors_.clear();
|
||||
sum_network_utilization_factors_ = 0.0;
|
||||
sum_media_utilization_factors_ = 0.0;
|
||||
network_buffer_level_bits_ = 0;
|
||||
media_buffer_level_bits_ = 0;
|
||||
}
|
||||
|
||||
target_bitrate_ = target_bitrate;
|
||||
target_framerate_fps_ = target_framerate_fps;
|
||||
}
|
||||
|
||||
void EncoderOvershootDetector::OnEncodedFrame(size_t bytes, int64_t time_ms) {
|
||||
// Leak bits from the virtual pacer buffer, according to the current target
|
||||
// bitrate.
|
||||
LeakBits(time_ms);
|
||||
|
||||
const int64_t frame_size_bits = bytes * 8;
|
||||
// Ideal size of a frame given the current rates.
|
||||
const int64_t ideal_frame_size_bits = IdealFrameSizeBits();
|
||||
if (ideal_frame_size_bits == 0) {
|
||||
// Frame without updated bitrate and/or framerate, ignore it.
|
||||
return;
|
||||
}
|
||||
|
||||
const double network_utilization_factor =
|
||||
HandleEncodedFrame(frame_size_bits, ideal_frame_size_bits, time_ms,
|
||||
&network_buffer_level_bits_);
|
||||
const double media_utilization_factor =
|
||||
HandleEncodedFrame(frame_size_bits, ideal_frame_size_bits, time_ms,
|
||||
&media_buffer_level_bits_);
|
||||
|
||||
sum_network_utilization_factors_ += network_utilization_factor;
|
||||
sum_media_utilization_factors_ += media_utilization_factor;
|
||||
|
||||
// Calculate the bitrate diff in kbps
|
||||
int64_t diff_kbits = (frame_size_bits - ideal_frame_size_bits) / 1000;
|
||||
sum_diff_kbps_squared_ += diff_kbits * diff_kbits;
|
||||
sum_overshoot_percent_ += diff_kbits * 100 * 1000 / ideal_frame_size_bits;
|
||||
++frame_count_;
|
||||
|
||||
utilization_factors_.emplace_back(network_utilization_factor,
|
||||
media_utilization_factor, time_ms);
|
||||
}
|
||||
|
||||
double EncoderOvershootDetector::HandleEncodedFrame(
|
||||
size_t frame_size_bits,
|
||||
int64_t ideal_frame_size_bits,
|
||||
int64_t time_ms,
|
||||
int64_t* buffer_level_bits) const {
|
||||
// Add new frame to the buffer level. If doing so exceeds the ideal buffer
|
||||
// size, penalize this frame but cap overshoot to current buffer level rather
|
||||
// than size of this frame. This is done so that a single large frame is not
|
||||
// penalized if the encoder afterwards compensates by dropping frames and/or
|
||||
// reducing frame size. If however a large frame is followed by more data,
|
||||
// we cannot pace that next frame out within one frame space.
|
||||
const int64_t bitsum = frame_size_bits + *buffer_level_bits;
|
||||
int64_t overshoot_bits = 0;
|
||||
if (bitsum > ideal_frame_size_bits) {
|
||||
overshoot_bits =
|
||||
std::min(*buffer_level_bits, bitsum - ideal_frame_size_bits);
|
||||
}
|
||||
|
||||
// Add entry for the (over) utilization for this frame. Factor is capped
|
||||
// at 1.0 so that we don't risk overshooting on sudden changes.
|
||||
double utilization_factor;
|
||||
if (utilization_factors_.empty()) {
|
||||
// First frame, cannot estimate overshoot based on previous one so
|
||||
// for this particular frame, just like as size vs optimal size.
|
||||
utilization_factor = std::max(
|
||||
1.0, static_cast<double>(frame_size_bits) / ideal_frame_size_bits);
|
||||
} else {
|
||||
utilization_factor =
|
||||
1.0 + (static_cast<double>(overshoot_bits) / ideal_frame_size_bits);
|
||||
}
|
||||
|
||||
// Remove the overshot bits from the virtual buffer so we don't penalize
|
||||
// those bits multiple times.
|
||||
*buffer_level_bits -= overshoot_bits;
|
||||
*buffer_level_bits += frame_size_bits;
|
||||
|
||||
return utilization_factor;
|
||||
}
|
||||
|
||||
absl::optional<double>
|
||||
EncoderOvershootDetector::GetNetworkRateUtilizationFactor(int64_t time_ms) {
|
||||
CullOldUpdates(time_ms);
|
||||
|
||||
// No data points within window, return.
|
||||
if (utilization_factors_.empty()) {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
// TODO(sprang): Consider changing from arithmetic mean to some other
|
||||
// function such as 90th percentile.
|
||||
return sum_network_utilization_factors_ / utilization_factors_.size();
|
||||
}
|
||||
|
||||
absl::optional<double> EncoderOvershootDetector::GetMediaRateUtilizationFactor(
|
||||
int64_t time_ms) {
|
||||
CullOldUpdates(time_ms);
|
||||
|
||||
// No data points within window, return.
|
||||
if (utilization_factors_.empty()) {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
return sum_media_utilization_factors_ / utilization_factors_.size();
|
||||
}
|
||||
|
||||
void EncoderOvershootDetector::Reset() {
|
||||
UpdateHistograms();
|
||||
sum_diff_kbps_squared_ = 0;
|
||||
frame_count_ = 0;
|
||||
sum_overshoot_percent_ = 0;
|
||||
time_last_update_ms_ = -1;
|
||||
utilization_factors_.clear();
|
||||
target_bitrate_ = DataRate::Zero();
|
||||
sum_network_utilization_factors_ = 0.0;
|
||||
sum_media_utilization_factors_ = 0.0;
|
||||
target_framerate_fps_ = 0.0;
|
||||
network_buffer_level_bits_ = 0;
|
||||
media_buffer_level_bits_ = 0;
|
||||
}
|
||||
|
||||
int64_t EncoderOvershootDetector::IdealFrameSizeBits() const {
|
||||
if (target_framerate_fps_ <= 0 || target_bitrate_ == DataRate::Zero()) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Current ideal frame size, based on the current target bitrate.
|
||||
return static_cast<int64_t>(
|
||||
(target_bitrate_.bps() + target_framerate_fps_ / 2) /
|
||||
target_framerate_fps_);
|
||||
}
|
||||
|
||||
void EncoderOvershootDetector::LeakBits(int64_t time_ms) {
|
||||
if (time_last_update_ms_ != -1 && target_bitrate_ > DataRate::Zero()) {
|
||||
int64_t time_delta_ms = time_ms - time_last_update_ms_;
|
||||
// Leak bits according to the current target bitrate.
|
||||
const int64_t leaked_bits = (target_bitrate_.bps() * time_delta_ms) / 1000;
|
||||
|
||||
// Network buffer may not go below zero.
|
||||
network_buffer_level_bits_ =
|
||||
std::max<int64_t>(0, network_buffer_level_bits_ - leaked_bits);
|
||||
|
||||
// Media buffer my go down to minus `kMaxMediaUnderrunFrames` frames worth
|
||||
// of data.
|
||||
const double max_underrun_seconds =
|
||||
std::min(kMaxMediaUnderrunFrames, target_framerate_fps_) /
|
||||
target_framerate_fps_;
|
||||
media_buffer_level_bits_ = std::max<int64_t>(
|
||||
-max_underrun_seconds * target_bitrate_.bps<int64_t>(),
|
||||
media_buffer_level_bits_ - leaked_bits);
|
||||
}
|
||||
time_last_update_ms_ = time_ms;
|
||||
}
|
||||
|
||||
void EncoderOvershootDetector::CullOldUpdates(int64_t time_ms) {
|
||||
// Cull old data points.
|
||||
const int64_t cutoff_time_ms = time_ms - window_size_ms_;
|
||||
while (!utilization_factors_.empty() &&
|
||||
utilization_factors_.front().update_time_ms < cutoff_time_ms) {
|
||||
// Make sure sum is never allowed to become negative due rounding errors.
|
||||
sum_network_utilization_factors_ = std::max(
|
||||
0.0, sum_network_utilization_factors_ -
|
||||
utilization_factors_.front().network_utilization_factor);
|
||||
sum_media_utilization_factors_ = std::max(
|
||||
0.0, sum_media_utilization_factors_ -
|
||||
utilization_factors_.front().media_utilization_factor);
|
||||
utilization_factors_.pop_front();
|
||||
}
|
||||
}
|
||||
|
||||
void EncoderOvershootDetector::UpdateHistograms() {
|
||||
if (frame_count_ == 0)
|
||||
return;
|
||||
|
||||
int64_t bitrate_rmse = std::sqrt(sum_diff_kbps_squared_ / frame_count_);
|
||||
int64_t average_overshoot_percent = sum_overshoot_percent_ / frame_count_;
|
||||
const std::string rmse_histogram_prefix =
|
||||
is_screenshare_ ? "WebRTC.Video.Screenshare.RMSEOfEncodingBitrateInKbps."
|
||||
: "WebRTC.Video.RMSEOfEncodingBitrateInKbps.";
|
||||
const std::string overshoot_histogram_prefix =
|
||||
is_screenshare_ ? "WebRTC.Video.Screenshare.EncodingBitrateOvershoot."
|
||||
: "WebRTC.Video.EncodingBitrateOvershoot.";
|
||||
// index = 1 represents screensharing histograms recording.
|
||||
// index = 0 represents normal video histograms recording.
|
||||
const int index = is_screenshare_ ? 1 : 0;
|
||||
switch (codec_) {
|
||||
case VideoCodecType::kVideoCodecAV1:
|
||||
RTC_HISTOGRAMS_COUNTS_10000(index, rmse_histogram_prefix + "Av1",
|
||||
bitrate_rmse);
|
||||
RTC_HISTOGRAMS_COUNTS_10000(index, overshoot_histogram_prefix + "Av1",
|
||||
average_overshoot_percent);
|
||||
break;
|
||||
case VideoCodecType::kVideoCodecVP9:
|
||||
RTC_HISTOGRAMS_COUNTS_10000(index, rmse_histogram_prefix + "Vp9",
|
||||
bitrate_rmse);
|
||||
RTC_HISTOGRAMS_COUNTS_10000(index, overshoot_histogram_prefix + "Vp9",
|
||||
average_overshoot_percent);
|
||||
break;
|
||||
case VideoCodecType::kVideoCodecVP8:
|
||||
RTC_HISTOGRAMS_COUNTS_10000(index, rmse_histogram_prefix + "Vp8",
|
||||
bitrate_rmse);
|
||||
RTC_HISTOGRAMS_COUNTS_10000(index, overshoot_histogram_prefix + "Vp8",
|
||||
average_overshoot_percent);
|
||||
break;
|
||||
case VideoCodecType::kVideoCodecH264:
|
||||
RTC_HISTOGRAMS_COUNTS_10000(index, rmse_histogram_prefix + "H264",
|
||||
bitrate_rmse);
|
||||
RTC_HISTOGRAMS_COUNTS_10000(index, overshoot_histogram_prefix + "H264",
|
||||
average_overshoot_percent);
|
||||
break;
|
||||
case VideoCodecType::kVideoCodecH265:
|
||||
RTC_HISTOGRAMS_COUNTS_10000(index, rmse_histogram_prefix + "H265",
|
||||
bitrate_rmse);
|
||||
RTC_HISTOGRAMS_COUNTS_10000(index, overshoot_histogram_prefix + "H265",
|
||||
average_overshoot_percent);
|
||||
break;
|
||||
case VideoCodecType::kVideoCodecGeneric:
|
||||
case VideoCodecType::kVideoCodecMultiplex:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,87 @@
|
|||
/*
|
||||
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef VIDEO_ENCODER_OVERSHOOT_DETECTOR_H_
|
||||
#define VIDEO_ENCODER_OVERSHOOT_DETECTOR_H_
|
||||
|
||||
#include <deque>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "api/units/data_rate.h"
|
||||
#include "api/video_codecs/video_codec.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class EncoderOvershootDetector {
|
||||
public:
|
||||
explicit EncoderOvershootDetector(int64_t window_size_ms,
|
||||
VideoCodecType codec,
|
||||
bool is_screenshare);
|
||||
~EncoderOvershootDetector();
|
||||
|
||||
void SetTargetRate(DataRate target_bitrate,
|
||||
double target_framerate_fps,
|
||||
int64_t time_ms);
|
||||
// A frame has been encoded or dropped. `bytes` == 0 indicates a drop.
|
||||
void OnEncodedFrame(size_t bytes, int64_t time_ms);
|
||||
// This utilization factor reaches 1.0 only if the encoder produces encoded
|
||||
// frame in such a way that they can be sent onto the network at
|
||||
// `target_bitrate` without building growing queues.
|
||||
absl::optional<double> GetNetworkRateUtilizationFactor(int64_t time_ms);
|
||||
// This utilization factor is based just on actual encoded frame sizes in
|
||||
// relation to ideal sizes. An undershoot may be compensated by an
|
||||
// overshoot so that the average over time is close to `target_bitrate`.
|
||||
absl::optional<double> GetMediaRateUtilizationFactor(int64_t time_ms);
|
||||
void Reset();
|
||||
|
||||
private:
|
||||
int64_t IdealFrameSizeBits() const;
|
||||
void LeakBits(int64_t time_ms);
|
||||
void CullOldUpdates(int64_t time_ms);
|
||||
// Updates provided buffer and checks if overuse ensues, returns
|
||||
// the calculated utilization factor for this frame.
|
||||
double HandleEncodedFrame(size_t frame_size_bits,
|
||||
int64_t ideal_frame_size_bits,
|
||||
int64_t time_ms,
|
||||
int64_t* buffer_level_bits) const;
|
||||
|
||||
const int64_t window_size_ms_;
|
||||
int64_t time_last_update_ms_;
|
||||
struct BitrateUpdate {
|
||||
BitrateUpdate(double network_utilization_factor,
|
||||
double media_utilization_factor,
|
||||
int64_t update_time_ms)
|
||||
: network_utilization_factor(network_utilization_factor),
|
||||
media_utilization_factor(media_utilization_factor),
|
||||
update_time_ms(update_time_ms) {}
|
||||
// The utilization factor based on strict network rate.
|
||||
double network_utilization_factor;
|
||||
// The utilization based on average media rate.
|
||||
double media_utilization_factor;
|
||||
int64_t update_time_ms;
|
||||
};
|
||||
void UpdateHistograms();
|
||||
std::deque<BitrateUpdate> utilization_factors_;
|
||||
double sum_network_utilization_factors_;
|
||||
double sum_media_utilization_factors_;
|
||||
DataRate target_bitrate_;
|
||||
double target_framerate_fps_;
|
||||
int64_t network_buffer_level_bits_;
|
||||
int64_t media_buffer_level_bits_;
|
||||
VideoCodecType codec_;
|
||||
bool is_screenshare_;
|
||||
int64_t frame_count_;
|
||||
int64_t sum_diff_kbps_squared_;
|
||||
int64_t sum_overshoot_percent_;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // VIDEO_ENCODER_OVERSHOOT_DETECTOR_H_
|
||||
139
TMessagesProj/jni/voip/webrtc/video/encoder_rtcp_feedback.cc
Normal file
139
TMessagesProj/jni/voip/webrtc/video/encoder_rtcp_feedback.cc
Normal file
|
|
@ -0,0 +1,139 @@
|
|||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "video/encoder_rtcp_feedback.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <utility>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "api/video_codecs/video_encoder.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/experiments/keyframe_interval_settings.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace {
|
||||
constexpr int kMinKeyframeSendIntervalMs = 300;
|
||||
} // namespace
|
||||
|
||||
EncoderRtcpFeedback::EncoderRtcpFeedback(
|
||||
Clock* clock,
|
||||
const std::vector<uint32_t>& ssrcs,
|
||||
VideoStreamEncoderInterface* encoder,
|
||||
std::function<std::vector<RtpSequenceNumberMap::Info>(
|
||||
uint32_t ssrc,
|
||||
const std::vector<uint16_t>& seq_nums)> get_packet_infos)
|
||||
: clock_(clock),
|
||||
ssrcs_(ssrcs),
|
||||
get_packet_infos_(std::move(get_packet_infos)),
|
||||
video_stream_encoder_(encoder),
|
||||
time_last_packet_delivery_queue_(Timestamp::Zero()),
|
||||
min_keyframe_send_interval_(
|
||||
TimeDelta::Millis(KeyframeIntervalSettings::ParseFromFieldTrials()
|
||||
.MinKeyframeSendIntervalMs()
|
||||
.value_or(kMinKeyframeSendIntervalMs))) {
|
||||
RTC_DCHECK(!ssrcs.empty());
|
||||
packet_delivery_queue_.Detach();
|
||||
}
|
||||
|
||||
// Called via Call::DeliverRtcp.
|
||||
void EncoderRtcpFeedback::OnReceivedIntraFrameRequest(uint32_t ssrc) {
|
||||
RTC_DCHECK_RUN_ON(&packet_delivery_queue_);
|
||||
RTC_DCHECK(std::find(ssrcs_.begin(), ssrcs_.end(), ssrc) != ssrcs_.end());
|
||||
|
||||
const Timestamp now = clock_->CurrentTime();
|
||||
if (time_last_packet_delivery_queue_ + min_keyframe_send_interval_ > now)
|
||||
return;
|
||||
|
||||
time_last_packet_delivery_queue_ = now;
|
||||
|
||||
// Always produce key frame for all streams.
|
||||
video_stream_encoder_->SendKeyFrame();
|
||||
}
|
||||
|
||||
void EncoderRtcpFeedback::OnReceivedLossNotification(
|
||||
uint32_t ssrc,
|
||||
uint16_t seq_num_of_last_decodable,
|
||||
uint16_t seq_num_of_last_received,
|
||||
bool decodability_flag) {
|
||||
RTC_DCHECK(get_packet_infos_) << "Object initialization incomplete.";
|
||||
|
||||
const std::vector<uint16_t> seq_nums = {seq_num_of_last_decodable,
|
||||
seq_num_of_last_received};
|
||||
const std::vector<RtpSequenceNumberMap::Info> infos =
|
||||
get_packet_infos_(ssrc, seq_nums);
|
||||
if (infos.empty()) {
|
||||
return;
|
||||
}
|
||||
RTC_DCHECK_EQ(infos.size(), 2u);
|
||||
|
||||
const RtpSequenceNumberMap::Info& last_decodable = infos[0];
|
||||
const RtpSequenceNumberMap::Info& last_received = infos[1];
|
||||
|
||||
VideoEncoder::LossNotification loss_notification;
|
||||
loss_notification.timestamp_of_last_decodable = last_decodable.timestamp;
|
||||
loss_notification.timestamp_of_last_received = last_received.timestamp;
|
||||
|
||||
// Deduce decodability of the last received frame and of its dependencies.
|
||||
if (last_received.is_first && last_received.is_last) {
|
||||
// The frame consists of a single packet, and that packet has evidently
|
||||
// been received in full; the frame is therefore assemblable.
|
||||
// In this case, the decodability of the dependencies is communicated by
|
||||
// the decodability flag, and the frame itself is decodable if and only
|
||||
// if they are decodable.
|
||||
loss_notification.dependencies_of_last_received_decodable =
|
||||
decodability_flag;
|
||||
loss_notification.last_received_decodable = decodability_flag;
|
||||
} else if (last_received.is_first && !last_received.is_last) {
|
||||
// In this case, the decodability flag communicates the decodability of
|
||||
// the dependencies. If any is undecodable, we also know that the frame
|
||||
// itself will not be decodable; if all are decodable, the frame's own
|
||||
// decodability will remain unknown, as not all of its packets have
|
||||
// been received.
|
||||
loss_notification.dependencies_of_last_received_decodable =
|
||||
decodability_flag;
|
||||
loss_notification.last_received_decodable =
|
||||
!decodability_flag ? absl::make_optional(false) : absl::nullopt;
|
||||
} else if (!last_received.is_first && last_received.is_last) {
|
||||
if (decodability_flag) {
|
||||
// The frame has been received in full, and found to be decodable.
|
||||
// (Messages of this type are not sent by WebRTC at the moment, but are
|
||||
// theoretically possible, for example for serving as acks.)
|
||||
loss_notification.dependencies_of_last_received_decodable = true;
|
||||
loss_notification.last_received_decodable = true;
|
||||
} else {
|
||||
// It is impossible to tell whether some dependencies were undecodable,
|
||||
// or whether the frame was unassemblable, but in either case, the frame
|
||||
// itself was undecodable.
|
||||
loss_notification.dependencies_of_last_received_decodable = absl::nullopt;
|
||||
loss_notification.last_received_decodable = false;
|
||||
}
|
||||
} else { // !last_received.is_first && !last_received.is_last
|
||||
if (decodability_flag) {
|
||||
// The frame has not yet been received in full, but no gaps have
|
||||
// been encountered so far, and the dependencies were all decodable.
|
||||
// (Messages of this type are not sent by WebRTC at the moment, but are
|
||||
// theoretically possible, for example for serving as acks.)
|
||||
loss_notification.dependencies_of_last_received_decodable = true;
|
||||
loss_notification.last_received_decodable = absl::nullopt;
|
||||
} else {
|
||||
// It is impossible to tell whether some dependencies were undecodable,
|
||||
// or whether the frame was unassemblable, but in either case, the frame
|
||||
// itself was undecodable.
|
||||
loss_notification.dependencies_of_last_received_decodable = absl::nullopt;
|
||||
loss_notification.last_received_decodable = false;
|
||||
}
|
||||
}
|
||||
|
||||
video_stream_encoder_->OnLossNotification(loss_notification);
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
69
TMessagesProj/jni/voip/webrtc/video/encoder_rtcp_feedback.h
Normal file
69
TMessagesProj/jni/voip/webrtc/video/encoder_rtcp_feedback.h
Normal file
|
|
@ -0,0 +1,69 @@
|
|||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
#ifndef VIDEO_ENCODER_RTCP_FEEDBACK_H_
|
||||
#define VIDEO_ENCODER_RTCP_FEEDBACK_H_
|
||||
|
||||
#include <functional>
|
||||
#include <vector>
|
||||
|
||||
#include "api/sequence_checker.h"
|
||||
#include "api/units/time_delta.h"
|
||||
#include "api/units/timestamp.h"
|
||||
#include "call/rtp_video_sender_interface.h"
|
||||
#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
|
||||
#include "rtc_base/system/no_unique_address.h"
|
||||
#include "system_wrappers/include/clock.h"
|
||||
#include "video/video_stream_encoder_interface.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class VideoStreamEncoderInterface;
|
||||
|
||||
// This class passes feedback (such as key frame requests or loss notifications)
|
||||
// from the RtpRtcp module.
|
||||
class EncoderRtcpFeedback : public RtcpIntraFrameObserver,
|
||||
public RtcpLossNotificationObserver {
|
||||
public:
|
||||
EncoderRtcpFeedback(
|
||||
Clock* clock,
|
||||
const std::vector<uint32_t>& ssrcs,
|
||||
VideoStreamEncoderInterface* encoder,
|
||||
std::function<std::vector<RtpSequenceNumberMap::Info>(
|
||||
uint32_t ssrc,
|
||||
const std::vector<uint16_t>& seq_nums)> get_packet_infos);
|
||||
~EncoderRtcpFeedback() override = default;
|
||||
|
||||
void OnReceivedIntraFrameRequest(uint32_t ssrc) override;
|
||||
|
||||
// Implements RtcpLossNotificationObserver.
|
||||
void OnReceivedLossNotification(uint32_t ssrc,
|
||||
uint16_t seq_num_of_last_decodable,
|
||||
uint16_t seq_num_of_last_received,
|
||||
bool decodability_flag) override;
|
||||
|
||||
private:
|
||||
Clock* const clock_;
|
||||
const std::vector<uint32_t> ssrcs_;
|
||||
const std::function<std::vector<RtpSequenceNumberMap::Info>(
|
||||
uint32_t ssrc,
|
||||
const std::vector<uint16_t>& seq_nums)>
|
||||
get_packet_infos_;
|
||||
VideoStreamEncoderInterface* const video_stream_encoder_;
|
||||
|
||||
RTC_NO_UNIQUE_ADDRESS SequenceChecker packet_delivery_queue_;
|
||||
Timestamp time_last_packet_delivery_queue_
|
||||
RTC_GUARDED_BY(packet_delivery_queue_);
|
||||
|
||||
const TimeDelta min_keyframe_send_interval_;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // VIDEO_ENCODER_RTCP_FEEDBACK_H_
|
||||
1044
TMessagesProj/jni/voip/webrtc/video/frame_cadence_adapter.cc
Normal file
1044
TMessagesProj/jni/voip/webrtc/video/frame_cadence_adapter.cc
Normal file
File diff suppressed because it is too large
Load diff
128
TMessagesProj/jni/voip/webrtc/video/frame_cadence_adapter.h
Normal file
128
TMessagesProj/jni/voip/webrtc/video/frame_cadence_adapter.h
Normal file
|
|
@ -0,0 +1,128 @@
|
|||
/*
|
||||
* Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef VIDEO_FRAME_CADENCE_ADAPTER_H_
|
||||
#define VIDEO_FRAME_CADENCE_ADAPTER_H_
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "absl/base/attributes.h"
|
||||
#include "api/field_trials_view.h"
|
||||
#include "api/metronome/metronome.h"
|
||||
#include "api/task_queue/task_queue_base.h"
|
||||
#include "api/units/time_delta.h"
|
||||
#include "api/video/video_frame.h"
|
||||
#include "api/video/video_sink_interface.h"
|
||||
#include "rtc_base/synchronization/mutex.h"
|
||||
#include "rtc_base/thread_annotations.h"
|
||||
#include "system_wrappers/include/clock.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// A sink adapter implementing mutations to the received frame cadence.
|
||||
// With the exception of the constructor and the methods overridden in
|
||||
// VideoSinkInterface, the rest of the interface to this class (including dtor)
|
||||
// needs to happen on the queue passed in Create.
|
||||
class FrameCadenceAdapterInterface
|
||||
: public rtc::VideoSinkInterface<VideoFrame> {
|
||||
public:
|
||||
// Averaging window spanning 90 frames at default 30fps, matching old media
|
||||
// optimization module defaults.
|
||||
// TODO(crbug.com/1255737): Use TimeDelta.
|
||||
static constexpr int64_t kFrameRateAveragingWindowSizeMs = (1000 / 30) * 90;
|
||||
// In zero-hertz mode, the idle repeat rate is a compromise between
|
||||
// RTP receiver keyframe-requesting timeout (3s), other backend limitations
|
||||
// and some worst case RTT.
|
||||
static constexpr TimeDelta kZeroHertzIdleRepeatRatePeriod =
|
||||
TimeDelta::Millis(1000);
|
||||
// The number of frame periods to wait for new frames until starting to
|
||||
// request refresh frames.
|
||||
static constexpr int kOnDiscardedFrameRefreshFramePeriod = 3;
|
||||
|
||||
struct ZeroHertzModeParams {
|
||||
// The number of simulcast layers used in this configuration.
|
||||
size_t num_simulcast_layers = 0;
|
||||
};
|
||||
|
||||
// Callback interface used to inform instance owners.
|
||||
class Callback {
|
||||
public:
|
||||
virtual ~Callback() = default;
|
||||
|
||||
// Called when a frame arrives on the |queue| specified in Create.
|
||||
//
|
||||
// The |post_time| parameter indicates the current time sampled when
|
||||
// FrameCadenceAdapterInterface::OnFrame was called.
|
||||
//
|
||||
// |queue_overload| is true if the frame cadence adapter notices it's
|
||||
// not able to deliver the incoming |frame| to the |queue| in the expected
|
||||
// time.
|
||||
virtual void OnFrame(Timestamp post_time,
|
||||
bool queue_overload,
|
||||
const VideoFrame& frame) = 0;
|
||||
|
||||
// Called when the source has discarded a frame.
|
||||
virtual void OnDiscardedFrame() = 0;
|
||||
|
||||
// Called when the adapter needs the source to send a refresh frame.
|
||||
virtual void RequestRefreshFrame() = 0;
|
||||
};
|
||||
|
||||
// Factory function creating a production instance. Deletion of the returned
|
||||
// instance needs to happen on the same sequence that Create() was called on.
|
||||
// Frames arriving in FrameCadenceAdapterInterface::OnFrame are posted to
|
||||
// Callback::OnFrame on the |queue|.
|
||||
static std::unique_ptr<FrameCadenceAdapterInterface> Create(
|
||||
Clock* clock,
|
||||
TaskQueueBase* queue,
|
||||
Metronome* metronome,
|
||||
TaskQueueBase* worker_queue,
|
||||
const FieldTrialsView& field_trials);
|
||||
|
||||
// Call before using the rest of the API.
|
||||
virtual void Initialize(Callback* callback) = 0;
|
||||
|
||||
// Pass zero hertz parameters in |params| as a prerequisite to enable
|
||||
// zero-hertz operation. If absl:::nullopt is passed, the cadence adapter will
|
||||
// switch to passthrough mode.
|
||||
virtual void SetZeroHertzModeEnabled(
|
||||
absl::optional<ZeroHertzModeParams> params) = 0;
|
||||
|
||||
// Returns the input framerate. This is measured by RateStatistics when
|
||||
// zero-hertz mode is off, and returns the max framerate in zero-hertz mode.
|
||||
virtual absl::optional<uint32_t> GetInputFrameRateFps() = 0;
|
||||
|
||||
// Updates frame rate. This is done unconditionally irrespective of adapter
|
||||
// mode.
|
||||
virtual void UpdateFrameRate() = 0;
|
||||
|
||||
// Updates quality convergence status for an enabled spatial layer.
|
||||
// Convergence means QP has dropped to a low-enough level to warrant ceasing
|
||||
// to send identical frames at high frequency.
|
||||
virtual void UpdateLayerQualityConvergence(size_t spatial_index,
|
||||
bool converged) = 0;
|
||||
|
||||
// Updates spatial layer enabled status.
|
||||
virtual void UpdateLayerStatus(size_t spatial_index, bool enabled) = 0;
|
||||
|
||||
// Updates the restrictions of max frame rate for the video source.
|
||||
// The new `max_frame_rate` will only affect the cadence of Callback::OnFrame
|
||||
// for non-idle (non converged) repeated frames.
|
||||
virtual void UpdateVideoSourceRestrictions(
|
||||
absl::optional<double> max_frame_rate) = 0;
|
||||
|
||||
// Conditionally requests a refresh frame via
|
||||
// Callback::RequestRefreshFrame.
|
||||
virtual void ProcessKeyFrameRequest() = 0;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // VIDEO_FRAME_CADENCE_ADAPTER_H_
|
||||
52
TMessagesProj/jni/voip/webrtc/video/frame_decode_scheduler.h
Normal file
52
TMessagesProj/jni/voip/webrtc/video/frame_decode_scheduler.h
Normal file
|
|
@ -0,0 +1,52 @@
|
|||
/*
|
||||
* Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef VIDEO_FRAME_DECODE_SCHEDULER_H_
|
||||
#define VIDEO_FRAME_DECODE_SCHEDULER_H_
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include "absl/functional/any_invocable.h"
|
||||
#include "absl/types/optional.h"
|
||||
#include "api/units/timestamp.h"
|
||||
#include "video/frame_decode_timing.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class FrameDecodeScheduler {
|
||||
public:
|
||||
// Invoked when a frame with `rtp_timestamp` is ready for decoding.
|
||||
using FrameReleaseCallback =
|
||||
absl::AnyInvocable<void(uint32_t rtp_timestamp,
|
||||
Timestamp render_time) &&>;
|
||||
|
||||
virtual ~FrameDecodeScheduler() = default;
|
||||
|
||||
// Returns the rtp timestamp of the next frame scheduled for release, or
|
||||
// `nullopt` if no frame is currently scheduled.
|
||||
virtual absl::optional<uint32_t> ScheduledRtpTimestamp() = 0;
|
||||
|
||||
// Schedules a frame for release based on `schedule`. When released,
|
||||
// `callback` will be invoked with the `rtp` timestamp of the frame and the
|
||||
// `render_time`
|
||||
virtual void ScheduleFrame(uint32_t rtp,
|
||||
FrameDecodeTiming::FrameSchedule schedule,
|
||||
FrameReleaseCallback callback) = 0;
|
||||
|
||||
// Cancels all scheduled frames.
|
||||
virtual void CancelOutstanding() = 0;
|
||||
|
||||
// Stop() Must be called before destruction.
|
||||
virtual void Stop() = 0;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // VIDEO_FRAME_DECODE_SCHEDULER_H_
|
||||
60
TMessagesProj/jni/voip/webrtc/video/frame_decode_timing.cc
Normal file
60
TMessagesProj/jni/voip/webrtc/video/frame_decode_timing.cc
Normal file
|
|
@ -0,0 +1,60 @@
|
|||
/*
|
||||
* Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "video/frame_decode_timing.h"
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "api/units/time_delta.h"
|
||||
#include "rtc_base/logging.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
FrameDecodeTiming::FrameDecodeTiming(Clock* clock,
|
||||
webrtc::VCMTiming const* timing)
|
||||
: clock_(clock), timing_(timing) {
|
||||
RTC_DCHECK(clock_);
|
||||
RTC_DCHECK(timing_);
|
||||
}
|
||||
|
||||
absl::optional<FrameDecodeTiming::FrameSchedule>
|
||||
FrameDecodeTiming::OnFrameBufferUpdated(uint32_t next_temporal_unit_rtp,
|
||||
uint32_t last_temporal_unit_rtp,
|
||||
TimeDelta max_wait_for_frame,
|
||||
bool too_many_frames_queued) {
|
||||
RTC_DCHECK_GE(max_wait_for_frame, TimeDelta::Zero());
|
||||
const Timestamp now = clock_->CurrentTime();
|
||||
Timestamp render_time = timing_->RenderTime(next_temporal_unit_rtp, now);
|
||||
TimeDelta max_wait =
|
||||
timing_->MaxWaitingTime(render_time, now, too_many_frames_queued);
|
||||
|
||||
// If the delay is not too far in the past, or this is the last decodable
|
||||
// frame then it is the best frame to be decoded. Otherwise, fast-forward
|
||||
// to the next frame in the buffer.
|
||||
if (max_wait <= -kMaxAllowedFrameDelay &&
|
||||
next_temporal_unit_rtp != last_temporal_unit_rtp) {
|
||||
RTC_DLOG(LS_VERBOSE) << "Fast-forwarded frame " << next_temporal_unit_rtp
|
||||
<< " render time " << render_time << " with delay "
|
||||
<< max_wait;
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
max_wait.Clamp(TimeDelta::Zero(), max_wait_for_frame);
|
||||
RTC_DLOG(LS_VERBOSE) << "Selected frame with rtp " << next_temporal_unit_rtp
|
||||
<< " render time " << render_time
|
||||
<< " with a max wait of " << max_wait_for_frame
|
||||
<< " clamped to " << max_wait;
|
||||
Timestamp latest_decode_time = now + max_wait;
|
||||
return FrameSchedule{.latest_decode_time = latest_decode_time,
|
||||
.render_time = render_time};
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
54
TMessagesProj/jni/voip/webrtc/video/frame_decode_timing.h
Normal file
54
TMessagesProj/jni/voip/webrtc/video/frame_decode_timing.h
Normal file
|
|
@ -0,0 +1,54 @@
|
|||
/*
|
||||
* Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef VIDEO_FRAME_DECODE_TIMING_H_
|
||||
#define VIDEO_FRAME_DECODE_TIMING_H_
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include <functional>
|
||||
|
||||
#include "api/task_queue/pending_task_safety_flag.h"
|
||||
#include "api/task_queue/task_queue_base.h"
|
||||
#include "modules/video_coding/timing/timing.h"
|
||||
#include "system_wrappers/include/clock.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class FrameDecodeTiming {
|
||||
public:
|
||||
FrameDecodeTiming(Clock* clock, webrtc::VCMTiming const* timing);
|
||||
~FrameDecodeTiming() = default;
|
||||
FrameDecodeTiming(const FrameDecodeTiming&) = delete;
|
||||
FrameDecodeTiming& operator=(const FrameDecodeTiming&) = delete;
|
||||
|
||||
// Any frame that has decode delay more than this in the past can be
|
||||
// fast-forwarded.
|
||||
static constexpr TimeDelta kMaxAllowedFrameDelay = TimeDelta::Millis(5);
|
||||
|
||||
struct FrameSchedule {
|
||||
Timestamp latest_decode_time;
|
||||
Timestamp render_time;
|
||||
};
|
||||
|
||||
absl::optional<FrameSchedule> OnFrameBufferUpdated(
|
||||
uint32_t next_temporal_unit_rtp,
|
||||
uint32_t last_temporal_unit_rtp,
|
||||
TimeDelta max_wait_for_frame,
|
||||
bool too_many_frames_queued);
|
||||
|
||||
private:
|
||||
Clock* const clock_;
|
||||
webrtc::VCMTiming const* const timing_;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // VIDEO_FRAME_DECODE_TIMING_H_
|
||||
89
TMessagesProj/jni/voip/webrtc/video/frame_dumping_decoder.cc
Normal file
89
TMessagesProj/jni/voip/webrtc/video/frame_dumping_decoder.cc
Normal file
|
|
@ -0,0 +1,89 @@
|
|||
/*
|
||||
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "video/frame_dumping_decoder.h"
|
||||
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
|
||||
#include "modules/video_coding/include/video_codec_interface.h"
|
||||
#include "modules/video_coding/utility/ivf_file_writer.h"
|
||||
|
||||
namespace webrtc {
|
||||
namespace {
|
||||
|
||||
class FrameDumpingDecoder : public VideoDecoder {
|
||||
public:
|
||||
FrameDumpingDecoder(std::unique_ptr<VideoDecoder> decoder, FileWrapper file);
|
||||
~FrameDumpingDecoder() override;
|
||||
|
||||
bool Configure(const Settings& settings) override;
|
||||
int32_t Decode(const EncodedImage& input_image,
|
||||
int64_t render_time_ms) override;
|
||||
int32_t RegisterDecodeCompleteCallback(
|
||||
DecodedImageCallback* callback) override;
|
||||
int32_t Release() override;
|
||||
DecoderInfo GetDecoderInfo() const override;
|
||||
const char* ImplementationName() const override;
|
||||
|
||||
private:
|
||||
std::unique_ptr<VideoDecoder> decoder_;
|
||||
VideoCodecType codec_type_ = VideoCodecType::kVideoCodecGeneric;
|
||||
std::unique_ptr<IvfFileWriter> writer_;
|
||||
};
|
||||
|
||||
FrameDumpingDecoder::FrameDumpingDecoder(std::unique_ptr<VideoDecoder> decoder,
|
||||
FileWrapper file)
|
||||
: decoder_(std::move(decoder)),
|
||||
writer_(IvfFileWriter::Wrap(std::move(file),
|
||||
/* byte_limit= */ 100000000)) {}
|
||||
|
||||
FrameDumpingDecoder::~FrameDumpingDecoder() = default;
|
||||
|
||||
bool FrameDumpingDecoder::Configure(const Settings& settings) {
|
||||
codec_type_ = settings.codec_type();
|
||||
return decoder_->Configure(settings);
|
||||
}
|
||||
|
||||
int32_t FrameDumpingDecoder::Decode(const EncodedImage& input_image,
|
||||
int64_t render_time_ms) {
|
||||
int32_t ret = decoder_->Decode(input_image, render_time_ms);
|
||||
writer_->WriteFrame(input_image, codec_type_);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int32_t FrameDumpingDecoder::RegisterDecodeCompleteCallback(
|
||||
DecodedImageCallback* callback) {
|
||||
return decoder_->RegisterDecodeCompleteCallback(callback);
|
||||
}
|
||||
|
||||
int32_t FrameDumpingDecoder::Release() {
|
||||
return decoder_->Release();
|
||||
}
|
||||
|
||||
VideoDecoder::DecoderInfo FrameDumpingDecoder::GetDecoderInfo() const {
|
||||
return decoder_->GetDecoderInfo();
|
||||
}
|
||||
|
||||
const char* FrameDumpingDecoder::ImplementationName() const {
|
||||
return decoder_->ImplementationName();
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
std::unique_ptr<VideoDecoder> CreateFrameDumpingDecoderWrapper(
|
||||
std::unique_ptr<VideoDecoder> decoder,
|
||||
FileWrapper file) {
|
||||
return std::make_unique<FrameDumpingDecoder>(std::move(decoder),
|
||||
std::move(file));
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
28
TMessagesProj/jni/voip/webrtc/video/frame_dumping_decoder.h
Normal file
28
TMessagesProj/jni/voip/webrtc/video/frame_dumping_decoder.h
Normal file
|
|
@ -0,0 +1,28 @@
|
|||
/*
|
||||
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef VIDEO_FRAME_DUMPING_DECODER_H_
|
||||
#define VIDEO_FRAME_DUMPING_DECODER_H_
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "api/video_codecs/video_decoder.h"
|
||||
#include "rtc_base/system/file_wrapper.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// Creates a decoder wrapper that writes the encoded frames to an IVF file.
|
||||
std::unique_ptr<VideoDecoder> CreateFrameDumpingDecoderWrapper(
|
||||
std::unique_ptr<VideoDecoder> decoder,
|
||||
FileWrapper file);
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // VIDEO_FRAME_DUMPING_DECODER_H_
|
||||
144
TMessagesProj/jni/voip/webrtc/video/frame_dumping_encoder.cc
Normal file
144
TMessagesProj/jni/voip/webrtc/video/frame_dumping_encoder.cc
Normal file
|
|
@ -0,0 +1,144 @@
|
|||
/*
|
||||
* Copyright (c) 2023 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "video/frame_dumping_encoder.h"
|
||||
|
||||
#include <map>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/algorithm/container.h"
|
||||
#include "api/sequence_checker.h"
|
||||
#include "api/video/video_codec_type.h"
|
||||
#include "modules/video_coding/utility/ivf_file_writer.h"
|
||||
#include "rtc_base/strings/string_builder.h"
|
||||
#include "rtc_base/system/file_wrapper.h"
|
||||
#include "rtc_base/time_utils.h"
|
||||
|
||||
namespace webrtc {
|
||||
namespace {
|
||||
|
||||
constexpr auto kEncoderDataDumpDirectoryFieldTrial =
|
||||
"WebRTC-EncoderDataDumpDirectory";
|
||||
|
||||
class FrameDumpingEncoder : public VideoEncoder, public EncodedImageCallback {
|
||||
public:
|
||||
FrameDumpingEncoder(std::unique_ptr<VideoEncoder> wrapped,
|
||||
int64_t origin_time_micros,
|
||||
std::string output_directory)
|
||||
: wrapped_(std::move(wrapped)),
|
||||
output_directory_(output_directory),
|
||||
origin_time_micros_(origin_time_micros) {}
|
||||
|
||||
~FrameDumpingEncoder() override {
|
||||
MutexLock lock(&mu_);
|
||||
writers_by_simulcast_index_.clear();
|
||||
}
|
||||
|
||||
// VideoEncoder overloads.
|
||||
void SetFecControllerOverride(
|
||||
FecControllerOverride* fec_controller_override) override {
|
||||
wrapped_->SetFecControllerOverride(fec_controller_override);
|
||||
}
|
||||
int InitEncode(const VideoCodec* codec_settings,
|
||||
const VideoEncoder::Settings& settings) override {
|
||||
codec_settings_ = *codec_settings;
|
||||
return wrapped_->InitEncode(codec_settings, settings);
|
||||
}
|
||||
int32_t RegisterEncodeCompleteCallback(
|
||||
EncodedImageCallback* callback) override {
|
||||
callback_ = callback;
|
||||
return wrapped_->RegisterEncodeCompleteCallback(this);
|
||||
}
|
||||
int32_t Release() override { return wrapped_->Release(); }
|
||||
int32_t Encode(const VideoFrame& frame,
|
||||
const std::vector<VideoFrameType>* frame_types) override {
|
||||
return wrapped_->Encode(frame, frame_types);
|
||||
}
|
||||
void SetRates(const RateControlParameters& parameters) override {
|
||||
wrapped_->SetRates(parameters);
|
||||
}
|
||||
void OnPacketLossRateUpdate(float packet_loss_rate) override {
|
||||
wrapped_->OnPacketLossRateUpdate(packet_loss_rate);
|
||||
}
|
||||
void OnRttUpdate(int64_t rtt_ms) override { wrapped_->OnRttUpdate(rtt_ms); }
|
||||
void OnLossNotification(const LossNotification& loss_notification) override {
|
||||
wrapped_->OnLossNotification(loss_notification);
|
||||
}
|
||||
EncoderInfo GetEncoderInfo() const override {
|
||||
return wrapped_->GetEncoderInfo();
|
||||
}
|
||||
|
||||
// EncodedImageCallback overrides.
|
||||
Result OnEncodedImage(const EncodedImage& encoded_image,
|
||||
const CodecSpecificInfo* codec_specific_info) override {
|
||||
{
|
||||
MutexLock lock(&mu_);
|
||||
GetFileWriterForSimulcastIndex(encoded_image.SimulcastIndex().value_or(0))
|
||||
.WriteFrame(encoded_image, codec_settings_.codecType);
|
||||
}
|
||||
return callback_->OnEncodedImage(encoded_image, codec_specific_info);
|
||||
}
|
||||
void OnDroppedFrame(DropReason reason) override {
|
||||
callback_->OnDroppedFrame(reason);
|
||||
}
|
||||
|
||||
private:
|
||||
std::string FilenameFromSimulcastIndex(int index)
|
||||
RTC_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
|
||||
char filename_buffer[1024];
|
||||
rtc::SimpleStringBuilder builder(filename_buffer);
|
||||
builder << output_directory_ << "/webrtc_encoded_frames"
|
||||
<< "." << origin_time_micros_ << "." << index << ".ivf";
|
||||
return builder.str();
|
||||
}
|
||||
|
||||
IvfFileWriter& GetFileWriterForSimulcastIndex(int index)
|
||||
RTC_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
|
||||
const auto& it = writers_by_simulcast_index_.find(index);
|
||||
if (it != writers_by_simulcast_index_.end()) {
|
||||
return *it->second;
|
||||
}
|
||||
auto writer = IvfFileWriter::Wrap(
|
||||
FileWrapper::OpenWriteOnly(FilenameFromSimulcastIndex(index)),
|
||||
/*byte_limit=*/100'000'000);
|
||||
auto* writer_ptr = writer.get();
|
||||
writers_by_simulcast_index_.insert(
|
||||
std::make_pair(index, std::move(writer)));
|
||||
return *writer_ptr;
|
||||
}
|
||||
|
||||
std::unique_ptr<VideoEncoder> wrapped_;
|
||||
Mutex mu_;
|
||||
std::map<int, std::unique_ptr<IvfFileWriter>> writers_by_simulcast_index_
|
||||
RTC_GUARDED_BY(mu_);
|
||||
VideoCodec codec_settings_;
|
||||
EncodedImageCallback* callback_ = nullptr;
|
||||
std::string output_directory_;
|
||||
int64_t origin_time_micros_ = 0;
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
std::unique_ptr<VideoEncoder> MaybeCreateFrameDumpingEncoderWrapper(
|
||||
std::unique_ptr<VideoEncoder> encoder,
|
||||
const FieldTrialsView& field_trials) {
|
||||
auto output_directory =
|
||||
field_trials.Lookup(kEncoderDataDumpDirectoryFieldTrial);
|
||||
if (output_directory.empty() || !encoder) {
|
||||
return encoder;
|
||||
}
|
||||
absl::c_replace(output_directory, ';', '/');
|
||||
return std::make_unique<FrameDumpingEncoder>(
|
||||
std::move(encoder), rtc::TimeMicros(), output_directory);
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
34
TMessagesProj/jni/voip/webrtc/video/frame_dumping_encoder.h
Normal file
34
TMessagesProj/jni/voip/webrtc/video/frame_dumping_encoder.h
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
/*
|
||||
* Copyright (c) 2023 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef VIDEO_FRAME_DUMPING_ENCODER_H_
|
||||
#define VIDEO_FRAME_DUMPING_ENCODER_H_
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "api/field_trials_view.h"
|
||||
#include "api/video_codecs/video_encoder.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// Creates an encoder that wraps another passed encoder and dumps its encoded
|
||||
// frames out into a unique IVF file into the directory specified by the
|
||||
// "WebRTC-EncoderDataDumpDirectory" field trial. Each file generated is
|
||||
// suffixed by the simulcast index of the encoded frames. If the passed encoder
|
||||
// is nullptr, or the field trial is not setup, the function just returns the
|
||||
// passed encoder. The directory specified by the field trial parameter should
|
||||
// be delimited by ';'.
|
||||
std::unique_ptr<VideoEncoder> MaybeCreateFrameDumpingEncoderWrapper(
|
||||
std::unique_ptr<VideoEncoder> encoder,
|
||||
const FieldTrialsView& field_trials);
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // VIDEO_FRAME_DUMPING_ENCODER_H_
|
||||
|
|
@ -0,0 +1,276 @@
|
|||
/*
|
||||
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "video/frame_encode_metadata_writer.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
|
||||
#include "common_video/h264/sps_vui_rewriter.h"
|
||||
#include "modules/include/module_common_types_public.h"
|
||||
#include "modules/video_coding/include/video_coding_defines.h"
|
||||
#include "modules/video_coding/svc/create_scalability_structure.h"
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/time_utils.h"
|
||||
|
||||
namespace webrtc {
|
||||
namespace {
|
||||
const int kMessagesThrottlingThreshold = 2;
|
||||
const int kThrottleRatio = 100000;
|
||||
|
||||
class EncodedImageBufferWrapper : public EncodedImageBufferInterface {
|
||||
public:
|
||||
explicit EncodedImageBufferWrapper(rtc::Buffer&& buffer)
|
||||
: buffer_(std::move(buffer)) {}
|
||||
|
||||
const uint8_t* data() const override { return buffer_.data(); }
|
||||
uint8_t* data() override { return buffer_.data(); }
|
||||
size_t size() const override { return buffer_.size(); }
|
||||
|
||||
private:
|
||||
rtc::Buffer buffer_;
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
FrameEncodeMetadataWriter::TimingFramesLayerInfo::TimingFramesLayerInfo() =
|
||||
default;
|
||||
FrameEncodeMetadataWriter::TimingFramesLayerInfo::~TimingFramesLayerInfo() =
|
||||
default;
|
||||
|
||||
FrameEncodeMetadataWriter::FrameEncodeMetadataWriter(
|
||||
EncodedImageCallback* frame_drop_callback)
|
||||
: frame_drop_callback_(frame_drop_callback),
|
||||
framerate_fps_(0),
|
||||
last_timing_frame_time_ms_(-1),
|
||||
reordered_frames_logged_messages_(0),
|
||||
stalled_encoder_logged_messages_(0) {
|
||||
codec_settings_.timing_frame_thresholds = {-1, 0};
|
||||
}
|
||||
FrameEncodeMetadataWriter::~FrameEncodeMetadataWriter() {}
|
||||
|
||||
void FrameEncodeMetadataWriter::OnEncoderInit(const VideoCodec& codec) {
|
||||
MutexLock lock(&lock_);
|
||||
codec_settings_ = codec;
|
||||
size_t num_spatial_layers = codec_settings_.numberOfSimulcastStreams;
|
||||
if (codec_settings_.codecType == kVideoCodecVP9) {
|
||||
num_spatial_layers = std::max(
|
||||
num_spatial_layers,
|
||||
static_cast<size_t>(codec_settings_.VP9()->numberOfSpatialLayers));
|
||||
} else if (codec_settings_.codecType == kVideoCodecAV1 &&
|
||||
codec_settings_.GetScalabilityMode().has_value()) {
|
||||
std::unique_ptr<ScalableVideoController> structure =
|
||||
CreateScalabilityStructure(*codec_settings_.GetScalabilityMode());
|
||||
if (structure) {
|
||||
num_spatial_layers = structure->StreamConfig().num_spatial_layers;
|
||||
} else {
|
||||
// |structure| maybe nullptr if the scalability mode is invalid.
|
||||
RTC_LOG(LS_WARNING) << "Cannot create ScalabilityStructure, since the "
|
||||
"scalability mode is invalid";
|
||||
}
|
||||
}
|
||||
num_spatial_layers_ = std::max(num_spatial_layers, size_t{1});
|
||||
}
|
||||
|
||||
void FrameEncodeMetadataWriter::OnSetRates(
|
||||
const VideoBitrateAllocation& bitrate_allocation,
|
||||
uint32_t framerate_fps) {
|
||||
MutexLock lock(&lock_);
|
||||
framerate_fps_ = framerate_fps;
|
||||
if (timing_frames_info_.size() < num_spatial_layers_) {
|
||||
timing_frames_info_.resize(num_spatial_layers_);
|
||||
}
|
||||
for (size_t i = 0; i < num_spatial_layers_; ++i) {
|
||||
timing_frames_info_[i].target_bitrate_bytes_per_sec =
|
||||
bitrate_allocation.GetSpatialLayerSum(i) / 8;
|
||||
}
|
||||
}
|
||||
|
||||
void FrameEncodeMetadataWriter::OnEncodeStarted(const VideoFrame& frame) {
|
||||
MutexLock lock(&lock_);
|
||||
|
||||
timing_frames_info_.resize(num_spatial_layers_);
|
||||
FrameMetadata metadata;
|
||||
metadata.rtp_timestamp = frame.timestamp();
|
||||
metadata.encode_start_time_ms = rtc::TimeMillis();
|
||||
metadata.ntp_time_ms = frame.ntp_time_ms();
|
||||
metadata.timestamp_us = frame.timestamp_us();
|
||||
metadata.rotation = frame.rotation();
|
||||
metadata.color_space = frame.color_space();
|
||||
metadata.packet_infos = frame.packet_infos();
|
||||
for (size_t si = 0; si < num_spatial_layers_; ++si) {
|
||||
RTC_DCHECK(timing_frames_info_[si].frames.empty() ||
|
||||
rtc::TimeDiff(
|
||||
frame.render_time_ms(),
|
||||
timing_frames_info_[si].frames.back().timestamp_us / 1000) >=
|
||||
0);
|
||||
// If stream is disabled due to low bandwidth OnEncodeStarted still will be
|
||||
// called and have to be ignored.
|
||||
if (timing_frames_info_[si].target_bitrate_bytes_per_sec == 0)
|
||||
continue;
|
||||
if (timing_frames_info_[si].frames.size() == kMaxEncodeStartTimeListSize) {
|
||||
++stalled_encoder_logged_messages_;
|
||||
if (stalled_encoder_logged_messages_ <= kMessagesThrottlingThreshold ||
|
||||
stalled_encoder_logged_messages_ % kThrottleRatio == 0) {
|
||||
RTC_LOG(LS_WARNING) << "Too many frames in the encode_start_list."
|
||||
" Did encoder stall?";
|
||||
if (stalled_encoder_logged_messages_ == kMessagesThrottlingThreshold) {
|
||||
RTC_LOG(LS_WARNING)
|
||||
<< "Too many log messages. Further stalled encoder"
|
||||
"warnings will be throttled.";
|
||||
}
|
||||
}
|
||||
frame_drop_callback_->OnDroppedFrame(EncodedImageCallback::DropReason::kDroppedByEncoder);
|
||||
timing_frames_info_[si].frames.pop_front();
|
||||
}
|
||||
timing_frames_info_[si].frames.emplace_back(metadata);
|
||||
}
|
||||
}
|
||||
|
||||
void FrameEncodeMetadataWriter::FillTimingInfo(size_t simulcast_svc_idx,
|
||||
EncodedImage* encoded_image) {
|
||||
MutexLock lock(&lock_);
|
||||
absl::optional<size_t> outlier_frame_size;
|
||||
absl::optional<int64_t> encode_start_ms;
|
||||
uint8_t timing_flags = VideoSendTiming::kNotTriggered;
|
||||
|
||||
int64_t encode_done_ms = rtc::TimeMillis();
|
||||
|
||||
encode_start_ms =
|
||||
ExtractEncodeStartTimeAndFillMetadata(simulcast_svc_idx, encoded_image);
|
||||
|
||||
if (timing_frames_info_.size() > simulcast_svc_idx) {
|
||||
size_t target_bitrate =
|
||||
timing_frames_info_[simulcast_svc_idx].target_bitrate_bytes_per_sec;
|
||||
if (framerate_fps_ > 0 && target_bitrate > 0) {
|
||||
// framerate and target bitrate were reported by encoder.
|
||||
size_t average_frame_size = target_bitrate / framerate_fps_;
|
||||
outlier_frame_size.emplace(
|
||||
average_frame_size *
|
||||
codec_settings_.timing_frame_thresholds.outlier_ratio_percent / 100);
|
||||
}
|
||||
}
|
||||
|
||||
// Outliers trigger timing frames, but do not affect scheduled timing
|
||||
// frames.
|
||||
if (outlier_frame_size && encoded_image->size() >= *outlier_frame_size) {
|
||||
timing_flags |= VideoSendTiming::kTriggeredBySize;
|
||||
}
|
||||
|
||||
// Check if it's time to send a timing frame.
|
||||
int64_t timing_frame_delay_ms =
|
||||
encoded_image->capture_time_ms_ - last_timing_frame_time_ms_;
|
||||
// Trigger threshold if it's a first frame, too long passed since the last
|
||||
// timing frame, or we already sent timing frame on a different simulcast
|
||||
// stream with the same capture time.
|
||||
if (last_timing_frame_time_ms_ == -1 ||
|
||||
timing_frame_delay_ms >=
|
||||
codec_settings_.timing_frame_thresholds.delay_ms ||
|
||||
timing_frame_delay_ms == 0) {
|
||||
timing_flags |= VideoSendTiming::kTriggeredByTimer;
|
||||
last_timing_frame_time_ms_ = encoded_image->capture_time_ms_;
|
||||
}
|
||||
|
||||
// If encode start is not available that means that encoder uses internal
|
||||
// source. In that case capture timestamp may be from a different clock with a
|
||||
// drift relative to rtc::TimeMillis(). We can't use it for Timing frames,
|
||||
// because to being sent in the network capture time required to be less than
|
||||
// all the other timestamps.
|
||||
if (encode_start_ms) {
|
||||
encoded_image->SetEncodeTime(*encode_start_ms, encode_done_ms);
|
||||
encoded_image->timing_.flags = timing_flags;
|
||||
} else {
|
||||
encoded_image->timing_.flags = VideoSendTiming::kInvalid;
|
||||
}
|
||||
}
|
||||
|
||||
void FrameEncodeMetadataWriter::UpdateBitstream(
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
EncodedImage* encoded_image) {
|
||||
if (!codec_specific_info ||
|
||||
codec_specific_info->codecType != kVideoCodecH264 ||
|
||||
encoded_image->_frameType != VideoFrameType::kVideoFrameKey) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Make sure that the data is not copied if owned by EncodedImage.
|
||||
const EncodedImage& buffer = *encoded_image;
|
||||
rtc::Buffer modified_buffer =
|
||||
SpsVuiRewriter::ParseOutgoingBitstreamAndRewrite(
|
||||
buffer, encoded_image->ColorSpace());
|
||||
|
||||
encoded_image->SetEncodedData(
|
||||
rtc::make_ref_counted<EncodedImageBufferWrapper>(
|
||||
std::move(modified_buffer)));
|
||||
}
|
||||
|
||||
void FrameEncodeMetadataWriter::Reset() {
|
||||
MutexLock lock(&lock_);
|
||||
for (auto& info : timing_frames_info_) {
|
||||
info.frames.clear();
|
||||
}
|
||||
last_timing_frame_time_ms_ = -1;
|
||||
reordered_frames_logged_messages_ = 0;
|
||||
stalled_encoder_logged_messages_ = 0;
|
||||
}
|
||||
|
||||
absl::optional<int64_t>
|
||||
FrameEncodeMetadataWriter::ExtractEncodeStartTimeAndFillMetadata(
|
||||
size_t simulcast_svc_idx,
|
||||
EncodedImage* encoded_image) {
|
||||
absl::optional<int64_t> result;
|
||||
size_t num_simulcast_svc_streams = timing_frames_info_.size();
|
||||
if (simulcast_svc_idx < num_simulcast_svc_streams) {
|
||||
auto metadata_list = &timing_frames_info_[simulcast_svc_idx].frames;
|
||||
// Skip frames for which there was OnEncodeStarted but no OnEncodedImage
|
||||
// call. These are dropped by encoder internally.
|
||||
// Because some hardware encoders don't preserve capture timestamp we
|
||||
// use RTP timestamps here.
|
||||
while (!metadata_list->empty() &&
|
||||
IsNewerTimestamp(encoded_image->RtpTimestamp(),
|
||||
metadata_list->front().rtp_timestamp)) {
|
||||
frame_drop_callback_->OnDroppedFrame(EncodedImageCallback::DropReason::kDroppedByEncoder);
|
||||
metadata_list->pop_front();
|
||||
}
|
||||
|
||||
encoded_image->content_type_ =
|
||||
(codec_settings_.mode == VideoCodecMode::kScreensharing)
|
||||
? VideoContentType::SCREENSHARE
|
||||
: VideoContentType::UNSPECIFIED;
|
||||
|
||||
if (!metadata_list->empty() &&
|
||||
metadata_list->front().rtp_timestamp == encoded_image->RtpTimestamp()) {
|
||||
result.emplace(metadata_list->front().encode_start_time_ms);
|
||||
encoded_image->capture_time_ms_ =
|
||||
metadata_list->front().timestamp_us / 1000;
|
||||
encoded_image->ntp_time_ms_ = metadata_list->front().ntp_time_ms;
|
||||
encoded_image->rotation_ = metadata_list->front().rotation;
|
||||
encoded_image->SetColorSpace(metadata_list->front().color_space);
|
||||
encoded_image->SetPacketInfos(metadata_list->front().packet_infos);
|
||||
metadata_list->pop_front();
|
||||
} else {
|
||||
++reordered_frames_logged_messages_;
|
||||
if (reordered_frames_logged_messages_ <= kMessagesThrottlingThreshold ||
|
||||
reordered_frames_logged_messages_ % kThrottleRatio == 0) {
|
||||
RTC_LOG(LS_WARNING) << "Frame with no encode started time recordings. "
|
||||
"Encoder may be reordering frames "
|
||||
"or not preserving RTP timestamps.";
|
||||
if (reordered_frames_logged_messages_ == kMessagesThrottlingThreshold) {
|
||||
RTC_LOG(LS_WARNING) << "Too many log messages. Further frames "
|
||||
"reordering warnings will be throttled.";
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,82 @@
|
|||
/*
|
||||
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef VIDEO_FRAME_ENCODE_METADATA_WRITER_H_
|
||||
#define VIDEO_FRAME_ENCODE_METADATA_WRITER_H_
|
||||
|
||||
#include <list>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "api/video/encoded_image.h"
|
||||
#include "api/video_codecs/video_codec.h"
|
||||
#include "api/video_codecs/video_encoder.h"
|
||||
#include "modules/video_coding/include/video_codec_interface.h"
|
||||
#include "rtc_base/synchronization/mutex.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class FrameEncodeMetadataWriter {
|
||||
public:
|
||||
explicit FrameEncodeMetadataWriter(EncodedImageCallback* frame_drop_callback);
|
||||
~FrameEncodeMetadataWriter();
|
||||
|
||||
void OnEncoderInit(const VideoCodec& codec);
|
||||
void OnSetRates(const VideoBitrateAllocation& bitrate_allocation,
|
||||
uint32_t framerate_fps);
|
||||
|
||||
void OnEncodeStarted(const VideoFrame& frame);
|
||||
|
||||
void FillTimingInfo(size_t simulcast_svc_idx, EncodedImage* encoded_image);
|
||||
|
||||
void UpdateBitstream(const CodecSpecificInfo* codec_specific_info,
|
||||
EncodedImage* encoded_image);
|
||||
|
||||
void Reset();
|
||||
|
||||
private:
|
||||
// For non-internal-source encoders, returns encode started time and fixes
|
||||
// capture timestamp for the frame, if corrupted by the encoder.
|
||||
absl::optional<int64_t> ExtractEncodeStartTimeAndFillMetadata(
|
||||
size_t simulcast_svc_idx,
|
||||
EncodedImage* encoded_image) RTC_EXCLUSIVE_LOCKS_REQUIRED(lock_);
|
||||
|
||||
struct FrameMetadata {
|
||||
uint32_t rtp_timestamp;
|
||||
int64_t encode_start_time_ms;
|
||||
int64_t ntp_time_ms = 0;
|
||||
int64_t timestamp_us = 0;
|
||||
VideoRotation rotation = kVideoRotation_0;
|
||||
absl::optional<ColorSpace> color_space;
|
||||
RtpPacketInfos packet_infos;
|
||||
};
|
||||
struct TimingFramesLayerInfo {
|
||||
TimingFramesLayerInfo();
|
||||
~TimingFramesLayerInfo();
|
||||
size_t target_bitrate_bytes_per_sec = 0;
|
||||
std::list<FrameMetadata> frames;
|
||||
};
|
||||
|
||||
Mutex lock_;
|
||||
EncodedImageCallback* const frame_drop_callback_;
|
||||
VideoCodec codec_settings_ RTC_GUARDED_BY(&lock_);
|
||||
uint32_t framerate_fps_ RTC_GUARDED_BY(&lock_);
|
||||
|
||||
size_t num_spatial_layers_ RTC_GUARDED_BY(&lock_);
|
||||
// Separate instance for each simulcast stream or spatial layer.
|
||||
std::vector<TimingFramesLayerInfo> timing_frames_info_ RTC_GUARDED_BY(&lock_);
|
||||
int64_t last_timing_frame_time_ms_ RTC_GUARDED_BY(&lock_);
|
||||
size_t reordered_frames_logged_messages_ RTC_GUARDED_BY(&lock_);
|
||||
size_t stalled_encoder_logged_messages_ RTC_GUARDED_BY(&lock_);
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // VIDEO_FRAME_ENCODE_METADATA_WRITER_H_
|
||||
|
|
@ -0,0 +1,52 @@
|
|||
/*
|
||||
* Copyright 2019 The WebRTC Project Authors. All rights reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "video/quality_limitation_reason_tracker.h"
|
||||
|
||||
#include <utility>
|
||||
|
||||
#include "rtc_base/checks.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
QualityLimitationReasonTracker::QualityLimitationReasonTracker(Clock* clock)
|
||||
: clock_(clock),
|
||||
current_reason_(QualityLimitationReason::kNone),
|
||||
current_reason_updated_timestamp_ms_(clock_->TimeInMilliseconds()),
|
||||
durations_ms_({std::make_pair(QualityLimitationReason::kNone, 0),
|
||||
std::make_pair(QualityLimitationReason::kCpu, 0),
|
||||
std::make_pair(QualityLimitationReason::kBandwidth, 0),
|
||||
std::make_pair(QualityLimitationReason::kOther, 0)}) {}
|
||||
|
||||
QualityLimitationReason QualityLimitationReasonTracker::current_reason() const {
|
||||
return current_reason_;
|
||||
}
|
||||
|
||||
void QualityLimitationReasonTracker::SetReason(QualityLimitationReason reason) {
|
||||
if (reason == current_reason_)
|
||||
return;
|
||||
int64_t now_ms = clock_->TimeInMilliseconds();
|
||||
durations_ms_[current_reason_] +=
|
||||
now_ms - current_reason_updated_timestamp_ms_;
|
||||
current_reason_ = reason;
|
||||
current_reason_updated_timestamp_ms_ = now_ms;
|
||||
}
|
||||
|
||||
std::map<QualityLimitationReason, int64_t>
|
||||
QualityLimitationReasonTracker::DurationsMs() const {
|
||||
std::map<QualityLimitationReason, int64_t> total_durations_ms = durations_ms_;
|
||||
auto it = total_durations_ms.find(current_reason_);
|
||||
RTC_DCHECK(it != total_durations_ms.end());
|
||||
it->second +=
|
||||
clock_->TimeInMilliseconds() - current_reason_updated_timestamp_ms_;
|
||||
return total_durations_ms;
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,58 @@
|
|||
/*
|
||||
* Copyright 2019 The WebRTC Project Authors. All rights reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef VIDEO_QUALITY_LIMITATION_REASON_TRACKER_H_
|
||||
#define VIDEO_QUALITY_LIMITATION_REASON_TRACKER_H_
|
||||
|
||||
#include <map>
|
||||
|
||||
#include "common_video/include/quality_limitation_reason.h"
|
||||
#include "system_wrappers/include/clock.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// A tracker of quality limitation reasons. The quality limitation reason is the
|
||||
// primary reason for limiting resolution and/or framerate (such as CPU or
|
||||
// bandwidth limitations). The tracker keeps track of the current reason and the
|
||||
// duration of time spent in each reason. See qualityLimitationReason[1],
|
||||
// qualityLimitationDurations[2], and qualityLimitationResolutionChanges[3] in
|
||||
// the webrtc-stats spec.
|
||||
// Note that the specification defines the durations in seconds while the
|
||||
// internal data structures defines it in milliseconds.
|
||||
// [1]
|
||||
// https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-qualitylimitationreason
|
||||
// [2]
|
||||
// https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-qualitylimitationdurations
|
||||
// [3]
|
||||
// https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-qualitylimitationresolutionchanges
|
||||
class QualityLimitationReasonTracker {
|
||||
public:
|
||||
// The caller is responsible for making sure `clock` outlives the tracker.
|
||||
explicit QualityLimitationReasonTracker(Clock* clock);
|
||||
|
||||
// The current reason defaults to QualityLimitationReason::kNone.
|
||||
QualityLimitationReason current_reason() const;
|
||||
void SetReason(QualityLimitationReason reason);
|
||||
std::map<QualityLimitationReason, int64_t> DurationsMs() const;
|
||||
|
||||
private:
|
||||
Clock* const clock_;
|
||||
QualityLimitationReason current_reason_;
|
||||
int64_t current_reason_updated_timestamp_ms_;
|
||||
// The total amount of time spent in each reason at time
|
||||
// `current_reason_updated_timestamp_ms_`. To get the total amount duration
|
||||
// so-far, including the time spent in `current_reason_` elapsed since the
|
||||
// last time `current_reason_` was updated, see DurationsMs().
|
||||
std::map<QualityLimitationReason, int64_t> durations_ms_;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // VIDEO_QUALITY_LIMITATION_REASON_TRACKER_H_
|
||||
855
TMessagesProj/jni/voip/webrtc/video/receive_statistics_proxy.cc
Normal file
855
TMessagesProj/jni/voip/webrtc/video/receive_statistics_proxy.cc
Normal file
|
|
@ -0,0 +1,855 @@
|
|||
/*
|
||||
* Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "video/receive_statistics_proxy.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <cmath>
|
||||
#include <utility>
|
||||
|
||||
#include "modules/video_coding/include/video_codec_interface.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/strings/string_builder.h"
|
||||
#include "rtc_base/thread.h"
|
||||
#include "rtc_base/time_utils.h"
|
||||
#include "system_wrappers/include/clock.h"
|
||||
#include "system_wrappers/include/metrics.h"
|
||||
#include "video/video_receive_stream2.h"
|
||||
|
||||
namespace webrtc {
|
||||
namespace internal {
|
||||
namespace {
|
||||
// Periodic time interval for processing samples for `freq_offset_counter_`.
|
||||
const int64_t kFreqOffsetProcessIntervalMs = 40000;
|
||||
|
||||
// Some metrics are reported as a maximum over this period.
|
||||
// This should be synchronized with a typical getStats polling interval in
|
||||
// the clients.
|
||||
const int kMovingMaxWindowMs = 1000;
|
||||
|
||||
// How large window we use to calculate the framerate/bitrate.
|
||||
const int kRateStatisticsWindowSizeMs = 1000;
|
||||
|
||||
// Some sane ballpark estimate for maximum common value of inter-frame delay.
|
||||
// Values below that will be stored explicitly in the array,
|
||||
// values above - in the map.
|
||||
const int kMaxCommonInterframeDelayMs = 500;
|
||||
|
||||
const char* UmaPrefixForContentType(VideoContentType content_type) {
|
||||
if (videocontenttypehelpers::IsScreenshare(content_type))
|
||||
return "WebRTC.Video.Screenshare";
|
||||
return "WebRTC.Video";
|
||||
}
|
||||
|
||||
// TODO(https://bugs.webrtc.org/11572): Workaround for an issue with some
|
||||
// rtc::Thread instances and/or implementations that don't register as the
|
||||
// current task queue.
|
||||
bool IsCurrentTaskQueueOrThread(TaskQueueBase* task_queue) {
|
||||
if (task_queue->IsCurrent())
|
||||
return true;
|
||||
|
||||
rtc::Thread* current_thread = rtc::ThreadManager::Instance()->CurrentThread();
|
||||
if (!current_thread)
|
||||
return false;
|
||||
|
||||
return static_cast<TaskQueueBase*>(current_thread) == task_queue;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
ReceiveStatisticsProxy::ReceiveStatisticsProxy(uint32_t remote_ssrc,
|
||||
Clock* clock,
|
||||
TaskQueueBase* worker_thread)
|
||||
: clock_(clock),
|
||||
start_ms_(clock->TimeInMilliseconds()),
|
||||
remote_ssrc_(remote_ssrc),
|
||||
// 1000ms window, scale 1000 for ms to s.
|
||||
decode_fps_estimator_(1000, 1000),
|
||||
renders_fps_estimator_(1000, 1000),
|
||||
render_fps_tracker_(100, 10u),
|
||||
render_pixel_tracker_(100, 10u),
|
||||
video_quality_observer_(new VideoQualityObserver()),
|
||||
interframe_delay_max_moving_(kMovingMaxWindowMs),
|
||||
freq_offset_counter_(clock, nullptr, kFreqOffsetProcessIntervalMs),
|
||||
last_content_type_(VideoContentType::UNSPECIFIED),
|
||||
last_codec_type_(kVideoCodecVP8),
|
||||
num_delayed_frames_rendered_(0),
|
||||
sum_missed_render_deadline_ms_(0),
|
||||
timing_frame_info_counter_(kMovingMaxWindowMs),
|
||||
worker_thread_(worker_thread) {
|
||||
RTC_DCHECK(worker_thread);
|
||||
decode_queue_.Detach();
|
||||
incoming_render_queue_.Detach();
|
||||
stats_.ssrc = remote_ssrc_;
|
||||
}
|
||||
|
||||
ReceiveStatisticsProxy::~ReceiveStatisticsProxy() {
|
||||
RTC_DCHECK_RUN_ON(&main_thread_);
|
||||
}
|
||||
|
||||
void ReceiveStatisticsProxy::UpdateHistograms(
|
||||
absl::optional<int> fraction_lost,
|
||||
const StreamDataCounters& rtp_stats,
|
||||
const StreamDataCounters* rtx_stats) {
|
||||
RTC_DCHECK_RUN_ON(&main_thread_);
|
||||
|
||||
char log_stream_buf[8 * 1024];
|
||||
rtc::SimpleStringBuilder log_stream(log_stream_buf);
|
||||
|
||||
int stream_duration_sec = (clock_->TimeInMilliseconds() - start_ms_) / 1000;
|
||||
|
||||
if (stats_.frame_counts.key_frames > 0 ||
|
||||
stats_.frame_counts.delta_frames > 0) {
|
||||
RTC_HISTOGRAM_COUNTS_100000("WebRTC.Video.ReceiveStreamLifetimeInSeconds",
|
||||
stream_duration_sec);
|
||||
log_stream << "WebRTC.Video.ReceiveStreamLifetimeInSeconds "
|
||||
<< stream_duration_sec << '\n';
|
||||
}
|
||||
|
||||
log_stream << "Frames decoded " << stats_.frames_decoded << '\n';
|
||||
|
||||
if (num_unique_frames_) {
|
||||
int num_dropped_frames = *num_unique_frames_ - stats_.frames_decoded;
|
||||
RTC_HISTOGRAM_COUNTS_1000("WebRTC.Video.DroppedFrames.Receiver",
|
||||
num_dropped_frames);
|
||||
log_stream << "WebRTC.Video.DroppedFrames.Receiver " << num_dropped_frames
|
||||
<< '\n';
|
||||
}
|
||||
|
||||
if (fraction_lost && stream_duration_sec >= metrics::kMinRunTimeInSeconds) {
|
||||
RTC_HISTOGRAM_PERCENTAGE("WebRTC.Video.ReceivedPacketsLostInPercent",
|
||||
*fraction_lost);
|
||||
log_stream << "WebRTC.Video.ReceivedPacketsLostInPercent " << *fraction_lost
|
||||
<< '\n';
|
||||
}
|
||||
|
||||
if (first_decoded_frame_time_ms_) {
|
||||
const int64_t elapsed_ms =
|
||||
(clock_->TimeInMilliseconds() - *first_decoded_frame_time_ms_);
|
||||
if (elapsed_ms >=
|
||||
metrics::kMinRunTimeInSeconds * rtc::kNumMillisecsPerSec) {
|
||||
int decoded_fps = static_cast<int>(
|
||||
(stats_.frames_decoded * 1000.0f / elapsed_ms) + 0.5f);
|
||||
RTC_HISTOGRAM_COUNTS_100("WebRTC.Video.DecodedFramesPerSecond",
|
||||
decoded_fps);
|
||||
log_stream << "WebRTC.Video.DecodedFramesPerSecond " << decoded_fps
|
||||
<< '\n';
|
||||
|
||||
const uint32_t frames_rendered = stats_.frames_rendered;
|
||||
if (frames_rendered > 0) {
|
||||
RTC_HISTOGRAM_PERCENTAGE("WebRTC.Video.DelayedFramesToRenderer",
|
||||
static_cast<int>(num_delayed_frames_rendered_ *
|
||||
100 / frames_rendered));
|
||||
if (num_delayed_frames_rendered_ > 0) {
|
||||
RTC_HISTOGRAM_COUNTS_1000(
|
||||
"WebRTC.Video.DelayedFramesToRenderer_AvgDelayInMs",
|
||||
static_cast<int>(sum_missed_render_deadline_ms_ /
|
||||
num_delayed_frames_rendered_));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const int kMinRequiredSamples = 200;
|
||||
int samples = static_cast<int>(render_fps_tracker_.TotalSampleCount());
|
||||
if (samples >= kMinRequiredSamples) {
|
||||
int rendered_fps = round(render_fps_tracker_.ComputeTotalRate());
|
||||
RTC_HISTOGRAM_COUNTS_100("WebRTC.Video.RenderFramesPerSecond",
|
||||
rendered_fps);
|
||||
log_stream << "WebRTC.Video.RenderFramesPerSecond " << rendered_fps << '\n';
|
||||
RTC_HISTOGRAM_COUNTS_100000(
|
||||
"WebRTC.Video.RenderSqrtPixelsPerSecond",
|
||||
round(render_pixel_tracker_.ComputeTotalRate()));
|
||||
}
|
||||
|
||||
absl::optional<int> sync_offset_ms =
|
||||
sync_offset_counter_.Avg(kMinRequiredSamples);
|
||||
if (sync_offset_ms) {
|
||||
RTC_HISTOGRAM_COUNTS_10000("WebRTC.Video.AVSyncOffsetInMs",
|
||||
*sync_offset_ms);
|
||||
log_stream << "WebRTC.Video.AVSyncOffsetInMs " << *sync_offset_ms << '\n';
|
||||
}
|
||||
AggregatedStats freq_offset_stats = freq_offset_counter_.GetStats();
|
||||
if (freq_offset_stats.num_samples > 0) {
|
||||
RTC_HISTOGRAM_COUNTS_10000("WebRTC.Video.RtpToNtpFreqOffsetInKhz",
|
||||
freq_offset_stats.average);
|
||||
log_stream << "WebRTC.Video.RtpToNtpFreqOffsetInKhz "
|
||||
<< freq_offset_stats.ToString() << '\n';
|
||||
}
|
||||
|
||||
int num_total_frames =
|
||||
stats_.frame_counts.key_frames + stats_.frame_counts.delta_frames;
|
||||
if (num_total_frames >= kMinRequiredSamples) {
|
||||
int num_key_frames = stats_.frame_counts.key_frames;
|
||||
int key_frames_permille =
|
||||
(num_key_frames * 1000 + num_total_frames / 2) / num_total_frames;
|
||||
RTC_HISTOGRAM_COUNTS_1000("WebRTC.Video.KeyFramesReceivedInPermille",
|
||||
key_frames_permille);
|
||||
log_stream << "WebRTC.Video.KeyFramesReceivedInPermille "
|
||||
<< key_frames_permille << '\n';
|
||||
}
|
||||
|
||||
absl::optional<int> qp = qp_counters_.vp8.Avg(kMinRequiredSamples);
|
||||
if (qp) {
|
||||
RTC_HISTOGRAM_COUNTS_200("WebRTC.Video.Decoded.Vp8.Qp", *qp);
|
||||
log_stream << "WebRTC.Video.Decoded.Vp8.Qp " << *qp << '\n';
|
||||
}
|
||||
|
||||
absl::optional<int> decode_ms = decode_time_counter_.Avg(kMinRequiredSamples);
|
||||
if (decode_ms) {
|
||||
RTC_HISTOGRAM_COUNTS_1000("WebRTC.Video.DecodeTimeInMs", *decode_ms);
|
||||
log_stream << "WebRTC.Video.DecodeTimeInMs " << *decode_ms << '\n';
|
||||
}
|
||||
absl::optional<int> jb_delay_ms =
|
||||
jitter_delay_counter_.Avg(kMinRequiredSamples);
|
||||
if (jb_delay_ms) {
|
||||
RTC_HISTOGRAM_COUNTS_10000("WebRTC.Video.JitterBufferDelayInMs",
|
||||
*jb_delay_ms);
|
||||
log_stream << "WebRTC.Video.JitterBufferDelayInMs " << *jb_delay_ms << '\n';
|
||||
}
|
||||
|
||||
absl::optional<int> target_delay_ms =
|
||||
target_delay_counter_.Avg(kMinRequiredSamples);
|
||||
if (target_delay_ms) {
|
||||
RTC_HISTOGRAM_COUNTS_10000("WebRTC.Video.TargetDelayInMs",
|
||||
*target_delay_ms);
|
||||
log_stream << "WebRTC.Video.TargetDelayInMs " << *target_delay_ms << '\n';
|
||||
}
|
||||
absl::optional<int> current_delay_ms =
|
||||
current_delay_counter_.Avg(kMinRequiredSamples);
|
||||
if (current_delay_ms) {
|
||||
RTC_HISTOGRAM_COUNTS_10000("WebRTC.Video.CurrentDelayInMs",
|
||||
*current_delay_ms);
|
||||
log_stream << "WebRTC.Video.CurrentDelayInMs " << *current_delay_ms << '\n';
|
||||
}
|
||||
absl::optional<int> delay_ms = oneway_delay_counter_.Avg(kMinRequiredSamples);
|
||||
if (delay_ms)
|
||||
RTC_HISTOGRAM_COUNTS_10000("WebRTC.Video.OnewayDelayInMs", *delay_ms);
|
||||
|
||||
// Aggregate content_specific_stats_ by removing experiment or simulcast
|
||||
// information;
|
||||
std::map<VideoContentType, ContentSpecificStats> aggregated_stats;
|
||||
for (const auto& it : content_specific_stats_) {
|
||||
// Calculate simulcast specific metrics (".S0" ... ".S2" suffixes).
|
||||
VideoContentType content_type = it.first;
|
||||
// Calculate aggregated metrics (no suffixes. Aggregated on everything).
|
||||
content_type = it.first;
|
||||
aggregated_stats[content_type].Add(it.second);
|
||||
}
|
||||
|
||||
for (const auto& it : aggregated_stats) {
|
||||
// For the metric Foo we report the following slices:
|
||||
// WebRTC.Video.Foo,
|
||||
// WebRTC.Video.Screenshare.Foo,
|
||||
auto content_type = it.first;
|
||||
auto stats = it.second;
|
||||
std::string uma_prefix = UmaPrefixForContentType(content_type);
|
||||
|
||||
absl::optional<int> e2e_delay_ms =
|
||||
stats.e2e_delay_counter.Avg(kMinRequiredSamples);
|
||||
if (e2e_delay_ms) {
|
||||
RTC_HISTOGRAM_COUNTS_SPARSE_10000(uma_prefix + ".EndToEndDelayInMs",
|
||||
*e2e_delay_ms);
|
||||
log_stream << uma_prefix << ".EndToEndDelayInMs"
|
||||
<< " " << *e2e_delay_ms << '\n';
|
||||
}
|
||||
absl::optional<int> e2e_delay_max_ms = stats.e2e_delay_counter.Max();
|
||||
if (e2e_delay_max_ms && e2e_delay_ms) {
|
||||
RTC_HISTOGRAM_COUNTS_SPARSE_100000(uma_prefix + ".EndToEndDelayMaxInMs",
|
||||
*e2e_delay_max_ms);
|
||||
log_stream << uma_prefix << ".EndToEndDelayMaxInMs"
|
||||
<< " " << *e2e_delay_max_ms << '\n';
|
||||
}
|
||||
absl::optional<int> interframe_delay_ms =
|
||||
stats.interframe_delay_counter.Avg(kMinRequiredSamples);
|
||||
if (interframe_delay_ms) {
|
||||
RTC_HISTOGRAM_COUNTS_SPARSE_10000(uma_prefix + ".InterframeDelayInMs",
|
||||
*interframe_delay_ms);
|
||||
log_stream << uma_prefix << ".InterframeDelayInMs"
|
||||
<< " " << *interframe_delay_ms << '\n';
|
||||
}
|
||||
absl::optional<int> interframe_delay_max_ms =
|
||||
stats.interframe_delay_counter.Max();
|
||||
if (interframe_delay_max_ms && interframe_delay_ms) {
|
||||
RTC_HISTOGRAM_COUNTS_SPARSE_10000(uma_prefix + ".InterframeDelayMaxInMs",
|
||||
*interframe_delay_max_ms);
|
||||
log_stream << uma_prefix << ".InterframeDelayMaxInMs"
|
||||
<< " " << *interframe_delay_max_ms << '\n';
|
||||
}
|
||||
|
||||
absl::optional<uint32_t> interframe_delay_95p_ms =
|
||||
stats.interframe_delay_percentiles.GetPercentile(0.95f);
|
||||
if (interframe_delay_95p_ms && interframe_delay_ms != -1) {
|
||||
RTC_HISTOGRAM_COUNTS_SPARSE_10000(
|
||||
uma_prefix + ".InterframeDelay95PercentileInMs",
|
||||
*interframe_delay_95p_ms);
|
||||
log_stream << uma_prefix << ".InterframeDelay95PercentileInMs"
|
||||
<< " " << *interframe_delay_95p_ms << '\n';
|
||||
}
|
||||
|
||||
absl::optional<int> width = stats.received_width.Avg(kMinRequiredSamples);
|
||||
if (width) {
|
||||
RTC_HISTOGRAM_COUNTS_SPARSE_10000(uma_prefix + ".ReceivedWidthInPixels",
|
||||
*width);
|
||||
log_stream << uma_prefix << ".ReceivedWidthInPixels"
|
||||
<< " " << *width << '\n';
|
||||
}
|
||||
|
||||
absl::optional<int> height = stats.received_height.Avg(kMinRequiredSamples);
|
||||
if (height) {
|
||||
RTC_HISTOGRAM_COUNTS_SPARSE_10000(uma_prefix + ".ReceivedHeightInPixels",
|
||||
*height);
|
||||
log_stream << uma_prefix << ".ReceivedHeightInPixels"
|
||||
<< " " << *height << '\n';
|
||||
}
|
||||
|
||||
if (content_type != VideoContentType::UNSPECIFIED) {
|
||||
// Don't report these 3 metrics unsliced, as more precise variants
|
||||
// are reported separately in this method.
|
||||
float flow_duration_sec = stats.flow_duration_ms / 1000.0;
|
||||
if (flow_duration_sec >= metrics::kMinRunTimeInSeconds) {
|
||||
int media_bitrate_kbps = static_cast<int>(stats.total_media_bytes * 8 /
|
||||
flow_duration_sec / 1000);
|
||||
RTC_HISTOGRAM_COUNTS_SPARSE_10000(
|
||||
uma_prefix + ".MediaBitrateReceivedInKbps", media_bitrate_kbps);
|
||||
log_stream << uma_prefix << ".MediaBitrateReceivedInKbps"
|
||||
<< " " << media_bitrate_kbps << '\n';
|
||||
}
|
||||
|
||||
int num_total_frames =
|
||||
stats.frame_counts.key_frames + stats.frame_counts.delta_frames;
|
||||
if (num_total_frames >= kMinRequiredSamples) {
|
||||
int num_key_frames = stats.frame_counts.key_frames;
|
||||
int key_frames_permille =
|
||||
(num_key_frames * 1000 + num_total_frames / 2) / num_total_frames;
|
||||
RTC_HISTOGRAM_COUNTS_SPARSE_1000(
|
||||
uma_prefix + ".KeyFramesReceivedInPermille", key_frames_permille);
|
||||
log_stream << uma_prefix << ".KeyFramesReceivedInPermille"
|
||||
<< " " << key_frames_permille << '\n';
|
||||
}
|
||||
|
||||
absl::optional<int> qp = stats.qp_counter.Avg(kMinRequiredSamples);
|
||||
if (qp) {
|
||||
RTC_HISTOGRAM_COUNTS_SPARSE_200(uma_prefix + ".Decoded.Vp8.Qp", *qp);
|
||||
log_stream << uma_prefix << ".Decoded.Vp8.Qp"
|
||||
<< " " << *qp << '\n';
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
StreamDataCounters rtp_rtx_stats = rtp_stats;
|
||||
if (rtx_stats)
|
||||
rtp_rtx_stats.Add(*rtx_stats);
|
||||
|
||||
TimeDelta elapsed = rtp_rtx_stats.TimeSinceFirstPacket(clock_->CurrentTime());
|
||||
if (elapsed >= TimeDelta::Seconds(metrics::kMinRunTimeInSeconds)) {
|
||||
int64_t elapsed_sec = elapsed.seconds();
|
||||
RTC_HISTOGRAM_COUNTS_10000(
|
||||
"WebRTC.Video.BitrateReceivedInKbps",
|
||||
static_cast<int>(rtp_rtx_stats.transmitted.TotalBytes() * 8 /
|
||||
elapsed_sec / 1000));
|
||||
int media_bitrate_kbs = static_cast<int>(rtp_stats.MediaPayloadBytes() * 8 /
|
||||
elapsed_sec / 1000);
|
||||
RTC_HISTOGRAM_COUNTS_10000("WebRTC.Video.MediaBitrateReceivedInKbps",
|
||||
media_bitrate_kbs);
|
||||
log_stream << "WebRTC.Video.MediaBitrateReceivedInKbps "
|
||||
<< media_bitrate_kbs << '\n';
|
||||
RTC_HISTOGRAM_COUNTS_10000(
|
||||
"WebRTC.Video.PaddingBitrateReceivedInKbps",
|
||||
static_cast<int>(rtp_rtx_stats.transmitted.padding_bytes * 8 /
|
||||
elapsed_sec / 1000));
|
||||
RTC_HISTOGRAM_COUNTS_10000(
|
||||
"WebRTC.Video.RetransmittedBitrateReceivedInKbps",
|
||||
static_cast<int>(rtp_rtx_stats.retransmitted.TotalBytes() * 8 /
|
||||
elapsed_sec / 1000));
|
||||
if (rtx_stats) {
|
||||
RTC_HISTOGRAM_COUNTS_10000(
|
||||
"WebRTC.Video.RtxBitrateReceivedInKbps",
|
||||
static_cast<int>(rtx_stats->transmitted.TotalBytes() * 8 /
|
||||
elapsed_sec / 1000));
|
||||
}
|
||||
const RtcpPacketTypeCounter& counters = stats_.rtcp_packet_type_counts;
|
||||
RTC_HISTOGRAM_COUNTS_10000("WebRTC.Video.NackPacketsSentPerMinute",
|
||||
counters.nack_packets * 60 / elapsed_sec);
|
||||
RTC_HISTOGRAM_COUNTS_10000("WebRTC.Video.FirPacketsSentPerMinute",
|
||||
counters.fir_packets * 60 / elapsed_sec);
|
||||
RTC_HISTOGRAM_COUNTS_10000("WebRTC.Video.PliPacketsSentPerMinute",
|
||||
counters.pli_packets * 60 / elapsed_sec);
|
||||
if (counters.nack_requests > 0) {
|
||||
RTC_HISTOGRAM_PERCENTAGE("WebRTC.Video.UniqueNackRequestsSentInPercent",
|
||||
counters.UniqueNackRequestsInPercent());
|
||||
}
|
||||
}
|
||||
|
||||
RTC_LOG(LS_INFO) << log_stream.str();
|
||||
video_quality_observer_->UpdateHistograms(
|
||||
videocontenttypehelpers::IsScreenshare(last_content_type_));
|
||||
}
|
||||
|
||||
void ReceiveStatisticsProxy::UpdateFramerate(int64_t now_ms) const {
|
||||
RTC_DCHECK_RUN_ON(&main_thread_);
|
||||
|
||||
int64_t old_frames_ms = now_ms - kRateStatisticsWindowSizeMs;
|
||||
while (!frame_window_.empty() &&
|
||||
frame_window_.begin()->first < old_frames_ms) {
|
||||
frame_window_.erase(frame_window_.begin());
|
||||
}
|
||||
|
||||
size_t framerate =
|
||||
(frame_window_.size() * 1000 + 500) / kRateStatisticsWindowSizeMs;
|
||||
|
||||
stats_.network_frame_rate = static_cast<int>(framerate);
|
||||
}
|
||||
|
||||
absl::optional<int64_t>
|
||||
ReceiveStatisticsProxy::GetCurrentEstimatedPlayoutNtpTimestampMs(
|
||||
int64_t now_ms) const {
|
||||
RTC_DCHECK_RUN_ON(&main_thread_);
|
||||
if (!last_estimated_playout_ntp_timestamp_ms_ ||
|
||||
!last_estimated_playout_time_ms_) {
|
||||
return absl::nullopt;
|
||||
}
|
||||
int64_t elapsed_ms = now_ms - *last_estimated_playout_time_ms_;
|
||||
return *last_estimated_playout_ntp_timestamp_ms_ + elapsed_ms;
|
||||
}
|
||||
|
||||
VideoReceiveStreamInterface::Stats ReceiveStatisticsProxy::GetStats() const {
|
||||
RTC_DCHECK_RUN_ON(&main_thread_);
|
||||
|
||||
// Like VideoReceiveStreamInterface::GetStats, called on the worker thread
|
||||
// from StatsCollector::ExtractMediaInfo via worker_thread()->BlockingCall().
|
||||
// WebRtcVideoChannel::GetStats(), GetVideoReceiverInfo.
|
||||
|
||||
// Get current frame rates here, as only updating them on new frames prevents
|
||||
// us from ever correctly displaying frame rate of 0.
|
||||
int64_t now_ms = clock_->TimeInMilliseconds();
|
||||
UpdateFramerate(now_ms);
|
||||
|
||||
stats_.render_frame_rate = renders_fps_estimator_.Rate(now_ms).value_or(0);
|
||||
stats_.decode_frame_rate = decode_fps_estimator_.Rate(now_ms).value_or(0);
|
||||
|
||||
if (last_decoded_frame_time_ms_) {
|
||||
// Avoid using a newer timestamp than might be pending for decoded frames.
|
||||
// If we do use now_ms, we might roll the max window to a value that is
|
||||
// higher than that of a decoded frame timestamp that we haven't yet
|
||||
// captured the data for (i.e. pending call to OnDecodedFrame).
|
||||
stats_.interframe_delay_max_ms =
|
||||
interframe_delay_max_moving_.Max(*last_decoded_frame_time_ms_)
|
||||
.value_or(-1);
|
||||
} else {
|
||||
// We're paused. Avoid changing the state of `interframe_delay_max_moving_`.
|
||||
stats_.interframe_delay_max_ms = -1;
|
||||
}
|
||||
|
||||
stats_.freeze_count = video_quality_observer_->NumFreezes();
|
||||
stats_.pause_count = video_quality_observer_->NumPauses();
|
||||
stats_.total_freezes_duration_ms =
|
||||
video_quality_observer_->TotalFreezesDurationMs();
|
||||
stats_.total_pauses_duration_ms =
|
||||
video_quality_observer_->TotalPausesDurationMs();
|
||||
stats_.total_inter_frame_delay =
|
||||
static_cast<double>(video_quality_observer_->TotalFramesDurationMs()) /
|
||||
rtc::kNumMillisecsPerSec;
|
||||
stats_.total_squared_inter_frame_delay =
|
||||
video_quality_observer_->SumSquaredFrameDurationsSec();
|
||||
|
||||
stats_.content_type = last_content_type_;
|
||||
stats_.timing_frame_info = timing_frame_info_counter_.Max(now_ms);
|
||||
stats_.estimated_playout_ntp_timestamp_ms =
|
||||
GetCurrentEstimatedPlayoutNtpTimestampMs(now_ms);
|
||||
return stats_;
|
||||
}
|
||||
|
||||
void ReceiveStatisticsProxy::OnIncomingPayloadType(int payload_type) {
|
||||
RTC_DCHECK_RUN_ON(&decode_queue_);
|
||||
worker_thread_->PostTask(SafeTask(task_safety_.flag(), [payload_type, this] {
|
||||
RTC_DCHECK_RUN_ON(&main_thread_);
|
||||
stats_.current_payload_type = payload_type;
|
||||
}));
|
||||
}
|
||||
|
||||
void ReceiveStatisticsProxy::OnDecoderInfo(
|
||||
const VideoDecoder::DecoderInfo& decoder_info) {
|
||||
RTC_DCHECK_RUN_ON(&decode_queue_);
|
||||
worker_thread_->PostTask(SafeTask(
|
||||
task_safety_.flag(),
|
||||
[this, name = decoder_info.implementation_name,
|
||||
is_hardware_accelerated = decoder_info.is_hardware_accelerated]() {
|
||||
RTC_DCHECK_RUN_ON(&main_thread_);
|
||||
stats_.decoder_implementation_name = name;
|
||||
stats_.power_efficient_decoder = is_hardware_accelerated;
|
||||
}));
|
||||
}
|
||||
|
||||
void ReceiveStatisticsProxy::OnDecodableFrame(TimeDelta jitter_buffer_delay,
|
||||
TimeDelta target_delay,
|
||||
TimeDelta minimum_delay) {
|
||||
RTC_DCHECK_RUN_ON(&main_thread_);
|
||||
// Cumulative stats exposed through standardized GetStats.
|
||||
stats_.jitter_buffer_delay += jitter_buffer_delay;
|
||||
stats_.jitter_buffer_target_delay += target_delay;
|
||||
++stats_.jitter_buffer_emitted_count;
|
||||
stats_.jitter_buffer_minimum_delay += minimum_delay;
|
||||
}
|
||||
|
||||
void ReceiveStatisticsProxy::OnFrameBufferTimingsUpdated(
|
||||
int estimated_max_decode_time_ms,
|
||||
int current_delay_ms,
|
||||
int target_delay_ms,
|
||||
int jitter_delay_ms,
|
||||
int min_playout_delay_ms,
|
||||
int render_delay_ms) {
|
||||
RTC_DCHECK_RUN_ON(&main_thread_);
|
||||
// Instantaneous stats exposed through legacy GetStats.
|
||||
stats_.max_decode_ms = estimated_max_decode_time_ms;
|
||||
stats_.current_delay_ms = current_delay_ms;
|
||||
stats_.target_delay_ms = target_delay_ms;
|
||||
stats_.jitter_buffer_ms = jitter_delay_ms;
|
||||
stats_.min_playout_delay_ms = min_playout_delay_ms;
|
||||
stats_.render_delay_ms = render_delay_ms;
|
||||
|
||||
// UMA stats.
|
||||
jitter_delay_counter_.Add(jitter_delay_ms);
|
||||
target_delay_counter_.Add(target_delay_ms);
|
||||
current_delay_counter_.Add(current_delay_ms);
|
||||
// Estimated one-way delay: network delay (rtt/2) + target_delay_ms (jitter
|
||||
// delay + decode time + render delay).
|
||||
oneway_delay_counter_.Add(target_delay_ms + avg_rtt_ms_ / 2);
|
||||
}
|
||||
|
||||
void ReceiveStatisticsProxy::OnUniqueFramesCounted(int num_unique_frames) {
|
||||
RTC_DCHECK_RUN_ON(&main_thread_);
|
||||
num_unique_frames_.emplace(num_unique_frames);
|
||||
}
|
||||
|
||||
void ReceiveStatisticsProxy::OnTimingFrameInfoUpdated(
|
||||
const TimingFrameInfo& info) {
|
||||
RTC_DCHECK_RUN_ON(&main_thread_);
|
||||
if (info.flags != VideoSendTiming::kInvalid) {
|
||||
int64_t now_ms = clock_->TimeInMilliseconds();
|
||||
timing_frame_info_counter_.Add(info, now_ms);
|
||||
}
|
||||
|
||||
// Measure initial decoding latency between the first frame arriving and
|
||||
// the first frame being decoded.
|
||||
if (!first_frame_received_time_ms_.has_value()) {
|
||||
first_frame_received_time_ms_ = info.receive_finish_ms;
|
||||
}
|
||||
if (stats_.first_frame_received_to_decoded_ms == -1 &&
|
||||
first_decoded_frame_time_ms_) {
|
||||
stats_.first_frame_received_to_decoded_ms =
|
||||
*first_decoded_frame_time_ms_ - *first_frame_received_time_ms_;
|
||||
}
|
||||
}
|
||||
|
||||
void ReceiveStatisticsProxy::RtcpPacketTypesCounterUpdated(
|
||||
uint32_t ssrc,
|
||||
const RtcpPacketTypeCounter& packet_counter) {
|
||||
if (ssrc != remote_ssrc_)
|
||||
return;
|
||||
|
||||
if (!IsCurrentTaskQueueOrThread(worker_thread_)) {
|
||||
// RtpRtcpInterface::Configuration has a single
|
||||
// RtcpPacketTypeCounterObserver and that same configuration may be used for
|
||||
// both receiver and sender (see ModuleRtpRtcpImpl::ModuleRtpRtcpImpl). The
|
||||
// RTCPSender implementation currently makes calls to this function on a
|
||||
// process thread whereas the RTCPReceiver implementation calls back on the
|
||||
// [main] worker thread.
|
||||
// So until the sender implementation has been updated, we work around this
|
||||
// here by posting the update to the expected thread. We make a by value
|
||||
// copy of the `task_safety_` to handle the case if the queued task
|
||||
// runs after the `ReceiveStatisticsProxy` has been deleted. In such a
|
||||
// case the packet_counter update won't be recorded.
|
||||
worker_thread_->PostTask(
|
||||
SafeTask(task_safety_.flag(), [ssrc, packet_counter, this]() {
|
||||
RtcpPacketTypesCounterUpdated(ssrc, packet_counter);
|
||||
}));
|
||||
return;
|
||||
}
|
||||
|
||||
RTC_DCHECK_RUN_ON(&main_thread_);
|
||||
stats_.rtcp_packet_type_counts = packet_counter;
|
||||
}
|
||||
|
||||
void ReceiveStatisticsProxy::OnCname(uint32_t ssrc, absl::string_view cname) {
|
||||
RTC_DCHECK_RUN_ON(&main_thread_);
|
||||
// TODO(pbos): Handle both local and remote ssrcs here and RTC_DCHECK that we
|
||||
// receive stats from one of them.
|
||||
if (remote_ssrc_ != ssrc)
|
||||
return;
|
||||
|
||||
stats_.c_name = std::string(cname);
|
||||
}
|
||||
|
||||
void ReceiveStatisticsProxy::OnDecodedFrame(const VideoFrame& frame,
|
||||
absl::optional<uint8_t> qp,
|
||||
TimeDelta decode_time,
|
||||
VideoContentType content_type,
|
||||
VideoFrameType frame_type) {
|
||||
TimeDelta processing_delay = TimeDelta::Zero();
|
||||
webrtc::Timestamp current_time = clock_->CurrentTime();
|
||||
// TODO(bugs.webrtc.org/13984): some tests do not fill packet_infos().
|
||||
TimeDelta assembly_time = TimeDelta::Zero();
|
||||
if (frame.packet_infos().size() > 0) {
|
||||
const auto [first_packet, last_packet] = std::minmax_element(
|
||||
frame.packet_infos().cbegin(), frame.packet_infos().cend(),
|
||||
[](const webrtc::RtpPacketInfo& a, const webrtc::RtpPacketInfo& b) {
|
||||
return a.receive_time() < b.receive_time();
|
||||
});
|
||||
if (first_packet->receive_time().IsFinite()) {
|
||||
processing_delay = current_time - first_packet->receive_time();
|
||||
// Extract frame assembly time (i.e. time between earliest and latest
|
||||
// packet arrival). Note: for single-packet frames this will be 0.
|
||||
assembly_time =
|
||||
last_packet->receive_time() - first_packet->receive_time();
|
||||
}
|
||||
}
|
||||
// See VCMDecodedFrameCallback::Decoded for more info on what thread/queue we
|
||||
// may be on. E.g. on iOS this gets called on
|
||||
// "com.apple.coremedia.decompressionsession.clientcallback"
|
||||
VideoFrameMetaData meta(frame, current_time);
|
||||
worker_thread_->PostTask(SafeTask(
|
||||
task_safety_.flag(), [meta, qp, decode_time, processing_delay,
|
||||
assembly_time, content_type, frame_type, this]() {
|
||||
OnDecodedFrame(meta, qp, decode_time, processing_delay, assembly_time,
|
||||
content_type, frame_type);
|
||||
}));
|
||||
}
|
||||
|
||||
void ReceiveStatisticsProxy::OnDecodedFrame(
|
||||
const VideoFrameMetaData& frame_meta,
|
||||
absl::optional<uint8_t> qp,
|
||||
TimeDelta decode_time,
|
||||
TimeDelta processing_delay,
|
||||
TimeDelta assembly_time,
|
||||
VideoContentType content_type,
|
||||
VideoFrameType frame_type) {
|
||||
RTC_DCHECK_RUN_ON(&main_thread_);
|
||||
|
||||
const bool is_screenshare =
|
||||
videocontenttypehelpers::IsScreenshare(content_type);
|
||||
const bool was_screenshare =
|
||||
videocontenttypehelpers::IsScreenshare(last_content_type_);
|
||||
|
||||
if (is_screenshare != was_screenshare) {
|
||||
// Reset the quality observer if content type is switched. But first report
|
||||
// stats for the previous part of the call.
|
||||
video_quality_observer_->UpdateHistograms(was_screenshare);
|
||||
video_quality_observer_.reset(new VideoQualityObserver());
|
||||
}
|
||||
|
||||
video_quality_observer_->OnDecodedFrame(frame_meta.rtp_timestamp, qp,
|
||||
last_codec_type_);
|
||||
|
||||
ContentSpecificStats* content_specific_stats =
|
||||
&content_specific_stats_[content_type];
|
||||
|
||||
++stats_.frames_decoded;
|
||||
if (frame_type == VideoFrameType::kVideoFrameKey) {
|
||||
++stats_.frame_counts.key_frames;
|
||||
} else {
|
||||
++stats_.frame_counts.delta_frames;
|
||||
}
|
||||
if (qp) {
|
||||
if (!stats_.qp_sum) {
|
||||
if (stats_.frames_decoded != 1) {
|
||||
RTC_LOG(LS_WARNING)
|
||||
<< "Frames decoded was not 1 when first qp value was received.";
|
||||
}
|
||||
stats_.qp_sum = 0;
|
||||
}
|
||||
*stats_.qp_sum += *qp;
|
||||
content_specific_stats->qp_counter.Add(*qp);
|
||||
} else if (stats_.qp_sum) {
|
||||
RTC_LOG(LS_WARNING)
|
||||
<< "QP sum was already set and no QP was given for a frame.";
|
||||
stats_.qp_sum.reset();
|
||||
}
|
||||
decode_time_counter_.Add(decode_time.ms());
|
||||
stats_.decode_ms = decode_time.ms();
|
||||
stats_.total_decode_time += decode_time;
|
||||
stats_.total_processing_delay += processing_delay;
|
||||
stats_.total_assembly_time += assembly_time;
|
||||
if (!assembly_time.IsZero()) {
|
||||
++stats_.frames_assembled_from_multiple_packets;
|
||||
}
|
||||
|
||||
last_content_type_ = content_type;
|
||||
decode_fps_estimator_.Update(1, frame_meta.decode_timestamp.ms());
|
||||
|
||||
if (last_decoded_frame_time_ms_) {
|
||||
int64_t interframe_delay_ms =
|
||||
frame_meta.decode_timestamp.ms() - *last_decoded_frame_time_ms_;
|
||||
RTC_DCHECK_GE(interframe_delay_ms, 0);
|
||||
interframe_delay_max_moving_.Add(interframe_delay_ms,
|
||||
frame_meta.decode_timestamp.ms());
|
||||
content_specific_stats->interframe_delay_counter.Add(interframe_delay_ms);
|
||||
content_specific_stats->interframe_delay_percentiles.Add(
|
||||
interframe_delay_ms);
|
||||
content_specific_stats->flow_duration_ms += interframe_delay_ms;
|
||||
}
|
||||
if (stats_.frames_decoded == 1) {
|
||||
first_decoded_frame_time_ms_.emplace(frame_meta.decode_timestamp.ms());
|
||||
}
|
||||
last_decoded_frame_time_ms_.emplace(frame_meta.decode_timestamp.ms());
|
||||
}
|
||||
|
||||
void ReceiveStatisticsProxy::OnRenderedFrame(
|
||||
const VideoFrameMetaData& frame_meta) {
|
||||
RTC_DCHECK_RUN_ON(&main_thread_);
|
||||
// Called from VideoReceiveStream2::OnFrame.
|
||||
|
||||
RTC_DCHECK_GT(frame_meta.width, 0);
|
||||
RTC_DCHECK_GT(frame_meta.height, 0);
|
||||
|
||||
video_quality_observer_->OnRenderedFrame(frame_meta);
|
||||
|
||||
ContentSpecificStats* content_specific_stats =
|
||||
&content_specific_stats_[last_content_type_];
|
||||
renders_fps_estimator_.Update(1, frame_meta.decode_timestamp.ms());
|
||||
|
||||
++stats_.frames_rendered;
|
||||
stats_.width = frame_meta.width;
|
||||
stats_.height = frame_meta.height;
|
||||
|
||||
render_fps_tracker_.AddSamples(1);
|
||||
render_pixel_tracker_.AddSamples(sqrt(frame_meta.width * frame_meta.height));
|
||||
content_specific_stats->received_width.Add(frame_meta.width);
|
||||
content_specific_stats->received_height.Add(frame_meta.height);
|
||||
|
||||
// Consider taking stats_.render_delay_ms into account.
|
||||
const int64_t time_until_rendering_ms =
|
||||
frame_meta.render_time_ms() - frame_meta.decode_timestamp.ms();
|
||||
if (time_until_rendering_ms < 0) {
|
||||
sum_missed_render_deadline_ms_ += -time_until_rendering_ms;
|
||||
++num_delayed_frames_rendered_;
|
||||
}
|
||||
|
||||
if (frame_meta.ntp_time_ms > 0) {
|
||||
int64_t delay_ms =
|
||||
clock_->CurrentNtpInMilliseconds() - frame_meta.ntp_time_ms;
|
||||
if (delay_ms >= 0) {
|
||||
content_specific_stats->e2e_delay_counter.Add(delay_ms);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ReceiveStatisticsProxy::OnSyncOffsetUpdated(int64_t video_playout_ntp_ms,
|
||||
int64_t sync_offset_ms,
|
||||
double estimated_freq_khz) {
|
||||
RTC_DCHECK_RUN_ON(&main_thread_);
|
||||
|
||||
const int64_t now_ms = clock_->TimeInMilliseconds();
|
||||
sync_offset_counter_.Add(std::abs(sync_offset_ms));
|
||||
stats_.sync_offset_ms = sync_offset_ms;
|
||||
last_estimated_playout_ntp_timestamp_ms_ = video_playout_ntp_ms;
|
||||
last_estimated_playout_time_ms_ = now_ms;
|
||||
|
||||
const double kMaxFreqKhz = 10000.0;
|
||||
int offset_khz = kMaxFreqKhz;
|
||||
// Should not be zero or negative. If so, report max.
|
||||
if (estimated_freq_khz < kMaxFreqKhz && estimated_freq_khz > 0.0)
|
||||
offset_khz = static_cast<int>(std::fabs(estimated_freq_khz - 90.0) + 0.5);
|
||||
|
||||
freq_offset_counter_.Add(offset_khz);
|
||||
}
|
||||
|
||||
void ReceiveStatisticsProxy::OnCompleteFrame(bool is_keyframe,
|
||||
size_t size_bytes,
|
||||
VideoContentType content_type) {
|
||||
RTC_DCHECK_RUN_ON(&main_thread_);
|
||||
|
||||
// Content type extension is set only for keyframes and should be propagated
|
||||
// for all the following delta frames. Here we may receive frames out of order
|
||||
// and miscategorise some delta frames near the layer switch.
|
||||
// This may slightly offset calculated bitrate and keyframes permille metrics.
|
||||
VideoContentType propagated_content_type =
|
||||
is_keyframe ? content_type : last_content_type_;
|
||||
|
||||
ContentSpecificStats* content_specific_stats =
|
||||
&content_specific_stats_[propagated_content_type];
|
||||
|
||||
content_specific_stats->total_media_bytes += size_bytes;
|
||||
if (is_keyframe) {
|
||||
++content_specific_stats->frame_counts.key_frames;
|
||||
} else {
|
||||
++content_specific_stats->frame_counts.delta_frames;
|
||||
}
|
||||
|
||||
int64_t now_ms = clock_->TimeInMilliseconds();
|
||||
frame_window_.insert(std::make_pair(now_ms, size_bytes));
|
||||
UpdateFramerate(now_ms);
|
||||
}
|
||||
|
||||
void ReceiveStatisticsProxy::OnDroppedFrames(uint32_t frames_dropped) {
|
||||
// Can be called on either the decode queue or the worker thread
|
||||
// See FrameBuffer2 for more details.
|
||||
worker_thread_->PostTask(
|
||||
SafeTask(task_safety_.flag(), [frames_dropped, this]() {
|
||||
RTC_DCHECK_RUN_ON(&main_thread_);
|
||||
stats_.frames_dropped += frames_dropped;
|
||||
}));
|
||||
}
|
||||
|
||||
void ReceiveStatisticsProxy::OnPreDecode(VideoCodecType codec_type, int qp) {
|
||||
RTC_DCHECK_RUN_ON(&main_thread_);
|
||||
last_codec_type_ = codec_type;
|
||||
if (last_codec_type_ == kVideoCodecVP8 && qp != -1) {
|
||||
qp_counters_.vp8.Add(qp);
|
||||
}
|
||||
}
|
||||
|
||||
void ReceiveStatisticsProxy::OnStreamInactive() {
|
||||
RTC_DCHECK_RUN_ON(&main_thread_);
|
||||
|
||||
// TODO(sprang): Figure out any other state that should be reset.
|
||||
|
||||
// Don't report inter-frame delay if stream was paused.
|
||||
last_decoded_frame_time_ms_.reset();
|
||||
|
||||
video_quality_observer_->OnStreamInactive();
|
||||
}
|
||||
|
||||
void ReceiveStatisticsProxy::OnRttUpdate(int64_t avg_rtt_ms) {
|
||||
RTC_DCHECK_RUN_ON(&main_thread_);
|
||||
avg_rtt_ms_ = avg_rtt_ms;
|
||||
}
|
||||
|
||||
void ReceiveStatisticsProxy::DecoderThreadStarting() {
|
||||
RTC_DCHECK_RUN_ON(&main_thread_);
|
||||
}
|
||||
|
||||
void ReceiveStatisticsProxy::DecoderThreadStopped() {
|
||||
RTC_DCHECK_RUN_ON(&main_thread_);
|
||||
decode_queue_.Detach();
|
||||
}
|
||||
|
||||
ReceiveStatisticsProxy::ContentSpecificStats::ContentSpecificStats()
|
||||
: interframe_delay_percentiles(kMaxCommonInterframeDelayMs) {}
|
||||
|
||||
ReceiveStatisticsProxy::ContentSpecificStats::~ContentSpecificStats() = default;
|
||||
|
||||
void ReceiveStatisticsProxy::ContentSpecificStats::Add(
|
||||
const ContentSpecificStats& other) {
|
||||
e2e_delay_counter.Add(other.e2e_delay_counter);
|
||||
interframe_delay_counter.Add(other.interframe_delay_counter);
|
||||
flow_duration_ms += other.flow_duration_ms;
|
||||
total_media_bytes += other.total_media_bytes;
|
||||
received_height.Add(other.received_height);
|
||||
received_width.Add(other.received_width);
|
||||
qp_counter.Add(other.qp_counter);
|
||||
frame_counts.key_frames += other.frame_counts.key_frames;
|
||||
frame_counts.delta_frames += other.frame_counts.delta_frames;
|
||||
interframe_delay_percentiles.Add(other.interframe_delay_percentiles);
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace webrtc
|
||||
215
TMessagesProj/jni/voip/webrtc/video/receive_statistics_proxy.h
Normal file
215
TMessagesProj/jni/voip/webrtc/video/receive_statistics_proxy.h
Normal file
|
|
@ -0,0 +1,215 @@
|
|||
/*
|
||||
* Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef VIDEO_RECEIVE_STATISTICS_PROXY_H_
|
||||
#define VIDEO_RECEIVE_STATISTICS_PROXY_H_
|
||||
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "api/sequence_checker.h"
|
||||
#include "api/task_queue/pending_task_safety_flag.h"
|
||||
#include "api/task_queue/task_queue_base.h"
|
||||
#include "api/units/timestamp.h"
|
||||
#include "api/video_codecs/video_decoder.h"
|
||||
#include "call/video_receive_stream.h"
|
||||
#include "modules/include/module_common_types.h"
|
||||
#include "rtc_base/numerics/histogram_percentile_counter.h"
|
||||
#include "rtc_base/numerics/moving_max_counter.h"
|
||||
#include "rtc_base/numerics/sample_counter.h"
|
||||
#include "rtc_base/rate_statistics.h"
|
||||
#include "rtc_base/rate_tracker.h"
|
||||
#include "rtc_base/system/no_unique_address.h"
|
||||
#include "rtc_base/thread_annotations.h"
|
||||
#include "video/stats_counter.h"
|
||||
#include "video/video_quality_observer2.h"
|
||||
#include "video/video_stream_buffer_controller.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class Clock;
|
||||
struct CodecSpecificInfo;
|
||||
|
||||
namespace internal {
|
||||
// Declared in video_receive_stream2.h.
|
||||
struct VideoFrameMetaData;
|
||||
|
||||
class ReceiveStatisticsProxy : public VideoStreamBufferControllerStatsObserver,
|
||||
public RtcpCnameCallback,
|
||||
public RtcpPacketTypeCounterObserver {
|
||||
public:
|
||||
ReceiveStatisticsProxy(uint32_t remote_ssrc,
|
||||
Clock* clock,
|
||||
TaskQueueBase* worker_thread);
|
||||
~ReceiveStatisticsProxy() override;
|
||||
|
||||
VideoReceiveStreamInterface::Stats GetStats() const;
|
||||
|
||||
void OnDecodedFrame(const VideoFrame& frame,
|
||||
absl::optional<uint8_t> qp,
|
||||
TimeDelta decode_time,
|
||||
VideoContentType content_type,
|
||||
VideoFrameType frame_type);
|
||||
|
||||
// Called asyncronously on the worker thread as a result of a call to the
|
||||
// above OnDecodedFrame method, which is called back on the thread where
|
||||
// the actual decoding happens.
|
||||
void OnDecodedFrame(const VideoFrameMetaData& frame_meta,
|
||||
absl::optional<uint8_t> qp,
|
||||
TimeDelta decode_time,
|
||||
TimeDelta processing_delay,
|
||||
TimeDelta assembly_time,
|
||||
VideoContentType content_type,
|
||||
VideoFrameType frame_type);
|
||||
|
||||
void OnSyncOffsetUpdated(int64_t video_playout_ntp_ms,
|
||||
int64_t sync_offset_ms,
|
||||
double estimated_freq_khz);
|
||||
void OnRenderedFrame(const VideoFrameMetaData& frame_meta);
|
||||
void OnIncomingPayloadType(int payload_type);
|
||||
void OnDecoderInfo(const VideoDecoder::DecoderInfo& decoder_info);
|
||||
|
||||
void OnPreDecode(VideoCodecType codec_type, int qp);
|
||||
|
||||
void OnUniqueFramesCounted(int num_unique_frames);
|
||||
|
||||
// Indicates video stream has been paused (no incoming packets).
|
||||
void OnStreamInactive();
|
||||
|
||||
// Implements VideoStreamBufferControllerStatsObserver.
|
||||
void OnCompleteFrame(bool is_keyframe,
|
||||
size_t size_bytes,
|
||||
VideoContentType content_type) override;
|
||||
void OnDroppedFrames(uint32_t frames_dropped) override;
|
||||
void OnDecodableFrame(TimeDelta jitter_buffer_delay,
|
||||
TimeDelta target_delay,
|
||||
TimeDelta minimum_delay) override;
|
||||
void OnFrameBufferTimingsUpdated(int estimated_max_decode_time_ms,
|
||||
int current_delay_ms,
|
||||
int target_delay_ms,
|
||||
int jitter_delay_ms,
|
||||
int min_playout_delay_ms,
|
||||
int render_delay_ms) override;
|
||||
void OnTimingFrameInfoUpdated(const TimingFrameInfo& info) override;
|
||||
|
||||
// Implements RtcpCnameCallback.
|
||||
void OnCname(uint32_t ssrc, absl::string_view cname) override;
|
||||
|
||||
// Implements RtcpPacketTypeCounterObserver.
|
||||
void RtcpPacketTypesCounterUpdated(
|
||||
uint32_t ssrc,
|
||||
const RtcpPacketTypeCounter& packet_counter) override;
|
||||
|
||||
void OnRttUpdate(int64_t avg_rtt_ms);
|
||||
|
||||
// Notification methods that are used to check our internal state and validate
|
||||
// threading assumptions. These are called by VideoReceiveStreamInterface.
|
||||
void DecoderThreadStarting();
|
||||
void DecoderThreadStopped();
|
||||
|
||||
// Produce histograms. Must be called after DecoderThreadStopped(), typically
|
||||
// at the end of the call.
|
||||
void UpdateHistograms(absl::optional<int> fraction_lost,
|
||||
const StreamDataCounters& rtp_stats,
|
||||
const StreamDataCounters* rtx_stats);
|
||||
|
||||
private:
|
||||
struct QpCounters {
|
||||
rtc::SampleCounter vp8;
|
||||
};
|
||||
|
||||
struct ContentSpecificStats {
|
||||
ContentSpecificStats();
|
||||
~ContentSpecificStats();
|
||||
|
||||
void Add(const ContentSpecificStats& other);
|
||||
|
||||
rtc::SampleCounter e2e_delay_counter;
|
||||
rtc::SampleCounter interframe_delay_counter;
|
||||
int64_t flow_duration_ms = 0;
|
||||
int64_t total_media_bytes = 0;
|
||||
rtc::SampleCounter received_width;
|
||||
rtc::SampleCounter received_height;
|
||||
rtc::SampleCounter qp_counter;
|
||||
FrameCounts frame_counts;
|
||||
rtc::HistogramPercentileCounter interframe_delay_percentiles;
|
||||
};
|
||||
|
||||
// Removes info about old frames and then updates the framerate.
|
||||
void UpdateFramerate(int64_t now_ms) const;
|
||||
|
||||
absl::optional<int64_t> GetCurrentEstimatedPlayoutNtpTimestampMs(
|
||||
int64_t now_ms) const;
|
||||
|
||||
Clock* const clock_;
|
||||
const int64_t start_ms_;
|
||||
|
||||
// Note: The `stats_.rtp_stats` member is not used or populated by this class.
|
||||
mutable VideoReceiveStreamInterface::Stats stats_
|
||||
RTC_GUARDED_BY(main_thread_);
|
||||
// Same as stats_.ssrc, but const (no lock required).
|
||||
const uint32_t remote_ssrc_;
|
||||
RateStatistics decode_fps_estimator_ RTC_GUARDED_BY(main_thread_);
|
||||
RateStatistics renders_fps_estimator_ RTC_GUARDED_BY(main_thread_);
|
||||
rtc::RateTracker render_fps_tracker_ RTC_GUARDED_BY(main_thread_);
|
||||
rtc::RateTracker render_pixel_tracker_ RTC_GUARDED_BY(main_thread_);
|
||||
rtc::SampleCounter sync_offset_counter_ RTC_GUARDED_BY(main_thread_);
|
||||
rtc::SampleCounter decode_time_counter_ RTC_GUARDED_BY(main_thread_);
|
||||
rtc::SampleCounter jitter_delay_counter_ RTC_GUARDED_BY(main_thread_);
|
||||
rtc::SampleCounter target_delay_counter_ RTC_GUARDED_BY(main_thread_);
|
||||
rtc::SampleCounter current_delay_counter_ RTC_GUARDED_BY(main_thread_);
|
||||
rtc::SampleCounter oneway_delay_counter_ RTC_GUARDED_BY(main_thread_);
|
||||
std::unique_ptr<VideoQualityObserver> video_quality_observer_
|
||||
RTC_GUARDED_BY(main_thread_);
|
||||
mutable rtc::MovingMaxCounter<int> interframe_delay_max_moving_
|
||||
RTC_GUARDED_BY(main_thread_);
|
||||
std::map<VideoContentType, ContentSpecificStats> content_specific_stats_
|
||||
RTC_GUARDED_BY(main_thread_);
|
||||
MaxCounter freq_offset_counter_ RTC_GUARDED_BY(main_thread_);
|
||||
QpCounters qp_counters_ RTC_GUARDED_BY(main_thread_);
|
||||
int64_t avg_rtt_ms_ RTC_GUARDED_BY(main_thread_) = 0;
|
||||
mutable std::map<int64_t, size_t> frame_window_ RTC_GUARDED_BY(main_thread_);
|
||||
VideoContentType last_content_type_ RTC_GUARDED_BY(&main_thread_);
|
||||
VideoCodecType last_codec_type_ RTC_GUARDED_BY(main_thread_);
|
||||
absl::optional<int64_t> first_frame_received_time_ms_
|
||||
RTC_GUARDED_BY(main_thread_);
|
||||
absl::optional<int64_t> first_decoded_frame_time_ms_
|
||||
RTC_GUARDED_BY(main_thread_);
|
||||
absl::optional<int64_t> last_decoded_frame_time_ms_
|
||||
RTC_GUARDED_BY(main_thread_);
|
||||
size_t num_delayed_frames_rendered_ RTC_GUARDED_BY(main_thread_);
|
||||
int64_t sum_missed_render_deadline_ms_ RTC_GUARDED_BY(main_thread_);
|
||||
// Mutable because calling Max() on MovingMaxCounter is not const. Yet it is
|
||||
// called from const GetStats().
|
||||
mutable rtc::MovingMaxCounter<TimingFrameInfo> timing_frame_info_counter_
|
||||
RTC_GUARDED_BY(main_thread_);
|
||||
absl::optional<int> num_unique_frames_ RTC_GUARDED_BY(main_thread_);
|
||||
absl::optional<int64_t> last_estimated_playout_ntp_timestamp_ms_
|
||||
RTC_GUARDED_BY(main_thread_);
|
||||
absl::optional<int64_t> last_estimated_playout_time_ms_
|
||||
RTC_GUARDED_BY(main_thread_);
|
||||
|
||||
// The thread on which this instance is constructed and some of its main
|
||||
// methods are invoked on such as GetStats().
|
||||
TaskQueueBase* const worker_thread_;
|
||||
|
||||
ScopedTaskSafety task_safety_;
|
||||
|
||||
RTC_NO_UNIQUE_ADDRESS SequenceChecker decode_queue_;
|
||||
SequenceChecker main_thread_;
|
||||
RTC_NO_UNIQUE_ADDRESS SequenceChecker incoming_render_queue_;
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace webrtc
|
||||
#endif // VIDEO_RECEIVE_STATISTICS_PROXY_H_
|
||||
|
|
@ -0,0 +1,72 @@
|
|||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "video/render/incoming_video_stream.h"
|
||||
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "api/units/time_delta.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/trace_event.h"
|
||||
#include "video/render/video_render_frames.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
IncomingVideoStream::IncomingVideoStream(
|
||||
TaskQueueFactory* task_queue_factory,
|
||||
int32_t delay_ms,
|
||||
rtc::VideoSinkInterface<VideoFrame>* callback)
|
||||
: render_buffers_(delay_ms),
|
||||
callback_(callback),
|
||||
incoming_render_queue_(task_queue_factory->CreateTaskQueue(
|
||||
"IncomingVideoStream",
|
||||
TaskQueueFactory::Priority::HIGH)) {}
|
||||
|
||||
IncomingVideoStream::~IncomingVideoStream() {
|
||||
RTC_DCHECK(main_thread_checker_.IsCurrent());
|
||||
// The queue must be destroyed before its pointer is invalidated to avoid race
|
||||
// between destructor and posting task to the task queue from itself.
|
||||
// std::unique_ptr destructor does the same two operations in reverse order as
|
||||
// it doesn't expect member would be used after its destruction has started.
|
||||
incoming_render_queue_.get_deleter()(incoming_render_queue_.get());
|
||||
incoming_render_queue_.release();
|
||||
}
|
||||
|
||||
void IncomingVideoStream::OnFrame(const VideoFrame& video_frame) {
|
||||
TRACE_EVENT0("webrtc", "IncomingVideoStream::OnFrame");
|
||||
RTC_CHECK_RUNS_SERIALIZED(&decoder_race_checker_);
|
||||
RTC_DCHECK(!incoming_render_queue_->IsCurrent());
|
||||
// TODO(srte): Using video_frame = std::move(video_frame) would move the frame
|
||||
// into the lambda instead of copying it, but it doesn't work unless we change
|
||||
// OnFrame to take its frame argument by value instead of const reference.
|
||||
incoming_render_queue_->PostTask([this, video_frame = video_frame]() mutable {
|
||||
RTC_DCHECK_RUN_ON(incoming_render_queue_.get());
|
||||
if (render_buffers_.AddFrame(std::move(video_frame)) == 1)
|
||||
Dequeue();
|
||||
});
|
||||
}
|
||||
|
||||
void IncomingVideoStream::Dequeue() {
|
||||
TRACE_EVENT0("webrtc", "IncomingVideoStream::Dequeue");
|
||||
RTC_DCHECK_RUN_ON(incoming_render_queue_.get());
|
||||
absl::optional<VideoFrame> frame_to_render = render_buffers_.FrameToRender();
|
||||
if (frame_to_render)
|
||||
callback_->OnFrame(*frame_to_render);
|
||||
|
||||
if (render_buffers_.HasPendingFrames()) {
|
||||
uint32_t wait_time = render_buffers_.TimeToNextFrameRelease();
|
||||
incoming_render_queue_->PostDelayedHighPrecisionTask(
|
||||
[this]() { Dequeue(); }, TimeDelta::Millis(wait_time));
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,50 @@
|
|||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef VIDEO_RENDER_INCOMING_VIDEO_STREAM_H_
|
||||
#define VIDEO_RENDER_INCOMING_VIDEO_STREAM_H_
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "api/sequence_checker.h"
|
||||
#include "api/task_queue/task_queue_base.h"
|
||||
#include "api/task_queue/task_queue_factory.h"
|
||||
#include "api/video/video_frame.h"
|
||||
#include "api/video/video_sink_interface.h"
|
||||
#include "rtc_base/race_checker.h"
|
||||
#include "rtc_base/thread_annotations.h"
|
||||
#include "video/render/video_render_frames.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class IncomingVideoStream : public rtc::VideoSinkInterface<VideoFrame> {
|
||||
public:
|
||||
IncomingVideoStream(TaskQueueFactory* task_queue_factory,
|
||||
int32_t delay_ms,
|
||||
rtc::VideoSinkInterface<VideoFrame>* callback);
|
||||
~IncomingVideoStream() override;
|
||||
|
||||
private:
|
||||
void OnFrame(const VideoFrame& video_frame) override;
|
||||
void Dequeue();
|
||||
|
||||
SequenceChecker main_thread_checker_;
|
||||
rtc::RaceChecker decoder_race_checker_;
|
||||
|
||||
VideoRenderFrames render_buffers_ RTC_GUARDED_BY(incoming_render_queue_);
|
||||
rtc::VideoSinkInterface<VideoFrame>* const callback_;
|
||||
std::unique_ptr<TaskQueueBase, TaskQueueDeleter> incoming_render_queue_;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // VIDEO_RENDER_INCOMING_VIDEO_STREAM_H_
|
||||
|
|
@ -0,0 +1,116 @@
|
|||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "video/render/video_render_frames.h"
|
||||
|
||||
#include <type_traits>
|
||||
#include <utility>
|
||||
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/time_utils.h"
|
||||
#include "system_wrappers/include/metrics.h"
|
||||
|
||||
namespace webrtc {
|
||||
namespace {
|
||||
// Don't render frames with timestamp older than 500ms from now.
|
||||
const int kOldRenderTimestampMS = 500;
|
||||
// Don't render frames with timestamp more than 10s into the future.
|
||||
const int kFutureRenderTimestampMS = 10000;
|
||||
|
||||
const uint32_t kEventMaxWaitTimeMs = 200;
|
||||
const uint32_t kMinRenderDelayMs = 10;
|
||||
const uint32_t kMaxRenderDelayMs = 500;
|
||||
const size_t kMaxIncomingFramesBeforeLogged = 100;
|
||||
|
||||
uint32_t EnsureValidRenderDelay(uint32_t render_delay) {
|
||||
return (render_delay < kMinRenderDelayMs || render_delay > kMaxRenderDelayMs)
|
||||
? kMinRenderDelayMs
|
||||
: render_delay;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
VideoRenderFrames::VideoRenderFrames(uint32_t render_delay_ms)
|
||||
: render_delay_ms_(EnsureValidRenderDelay(render_delay_ms)) {}
|
||||
|
||||
VideoRenderFrames::~VideoRenderFrames() {
|
||||
frames_dropped_ += incoming_frames_.size();
|
||||
RTC_HISTOGRAM_COUNTS_1000("WebRTC.Video.DroppedFrames.RenderQueue",
|
||||
frames_dropped_);
|
||||
RTC_LOG(LS_INFO) << "WebRTC.Video.DroppedFrames.RenderQueue "
|
||||
<< frames_dropped_;
|
||||
}
|
||||
|
||||
int32_t VideoRenderFrames::AddFrame(VideoFrame&& new_frame) {
|
||||
const int64_t time_now = rtc::TimeMillis();
|
||||
|
||||
// Drop old frames only when there are other frames in the queue, otherwise, a
|
||||
// really slow system never renders any frames.
|
||||
if (!incoming_frames_.empty() &&
|
||||
new_frame.render_time_ms() + kOldRenderTimestampMS < time_now) {
|
||||
RTC_LOG(LS_WARNING) << "Too old frame, timestamp=" << new_frame.timestamp();
|
||||
++frames_dropped_;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (new_frame.render_time_ms() > time_now + kFutureRenderTimestampMS) {
|
||||
RTC_LOG(LS_WARNING) << "Frame too long into the future, timestamp="
|
||||
<< new_frame.timestamp();
|
||||
++frames_dropped_;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (new_frame.render_time_ms() < last_render_time_ms_) {
|
||||
RTC_LOG(LS_WARNING) << "Frame scheduled out of order, render_time="
|
||||
<< new_frame.render_time_ms()
|
||||
<< ", latest=" << last_render_time_ms_;
|
||||
// For more details, see bug:
|
||||
// https://bugs.chromium.org/p/webrtc/issues/detail?id=7253
|
||||
++frames_dropped_;
|
||||
return -1;
|
||||
}
|
||||
|
||||
last_render_time_ms_ = new_frame.render_time_ms();
|
||||
incoming_frames_.emplace_back(std::move(new_frame));
|
||||
|
||||
if (incoming_frames_.size() > kMaxIncomingFramesBeforeLogged) {
|
||||
RTC_LOG(LS_WARNING) << "Stored incoming frames: "
|
||||
<< incoming_frames_.size();
|
||||
}
|
||||
return static_cast<int32_t>(incoming_frames_.size());
|
||||
}
|
||||
|
||||
absl::optional<VideoFrame> VideoRenderFrames::FrameToRender() {
|
||||
absl::optional<VideoFrame> render_frame;
|
||||
// Get the newest frame that can be released for rendering.
|
||||
while (!incoming_frames_.empty() && TimeToNextFrameRelease() <= 0) {
|
||||
if (render_frame) {
|
||||
++frames_dropped_;
|
||||
}
|
||||
render_frame = std::move(incoming_frames_.front());
|
||||
incoming_frames_.pop_front();
|
||||
}
|
||||
return render_frame;
|
||||
}
|
||||
|
||||
uint32_t VideoRenderFrames::TimeToNextFrameRelease() {
|
||||
if (incoming_frames_.empty()) {
|
||||
return kEventMaxWaitTimeMs;
|
||||
}
|
||||
const int64_t time_to_release = incoming_frames_.front().render_time_ms() -
|
||||
render_delay_ms_ - rtc::TimeMillis();
|
||||
return time_to_release < 0 ? 0u : static_cast<uint32_t>(time_to_release);
|
||||
}
|
||||
|
||||
bool VideoRenderFrames::HasPendingFrames() const {
|
||||
return !incoming_frames_.empty();
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,55 @@
|
|||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef VIDEO_RENDER_VIDEO_RENDER_FRAMES_H_
|
||||
#define VIDEO_RENDER_VIDEO_RENDER_FRAMES_H_
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#include <list>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "api/video/video_frame.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// Class definitions
|
||||
class VideoRenderFrames {
|
||||
public:
|
||||
explicit VideoRenderFrames(uint32_t render_delay_ms);
|
||||
VideoRenderFrames(const VideoRenderFrames&) = delete;
|
||||
~VideoRenderFrames();
|
||||
|
||||
// Add a frame to the render queue
|
||||
int32_t AddFrame(VideoFrame&& new_frame);
|
||||
|
||||
// Get a frame for rendering, or false if it's not time to render.
|
||||
absl::optional<VideoFrame> FrameToRender();
|
||||
|
||||
// Returns the number of ms to next frame to render
|
||||
uint32_t TimeToNextFrameRelease();
|
||||
|
||||
bool HasPendingFrames() const;
|
||||
|
||||
private:
|
||||
// Sorted list with framed to be rendered, oldest first.
|
||||
std::list<VideoFrame> incoming_frames_;
|
||||
|
||||
// Estimated delay from a frame is released until it's rendered.
|
||||
const uint32_t render_delay_ms_;
|
||||
|
||||
int64_t last_render_time_ms_ = 0;
|
||||
size_t frames_dropped_ = 0;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // VIDEO_RENDER_VIDEO_RENDER_FRAMES_H_
|
||||
65
TMessagesProj/jni/voip/webrtc/video/report_block_stats.cc
Normal file
65
TMessagesProj/jni/voip/webrtc/video/report_block_stats.cc
Normal file
|
|
@ -0,0 +1,65 @@
|
|||
/*
|
||||
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "video/report_block_stats.h"
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace {
|
||||
int FractionLost(uint32_t num_lost_sequence_numbers,
|
||||
uint32_t num_sequence_numbers) {
|
||||
if (num_sequence_numbers == 0) {
|
||||
return 0;
|
||||
}
|
||||
return ((num_lost_sequence_numbers * 255) + (num_sequence_numbers / 2)) /
|
||||
num_sequence_numbers;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
// Helper class for rtcp statistics.
|
||||
ReportBlockStats::ReportBlockStats()
|
||||
: num_sequence_numbers_(0), num_lost_sequence_numbers_(0) {}
|
||||
|
||||
ReportBlockStats::~ReportBlockStats() {}
|
||||
|
||||
void ReportBlockStats::Store(uint32_t ssrc,
|
||||
int packets_lost,
|
||||
uint32_t extended_highest_sequence_number) {
|
||||
Report report;
|
||||
report.packets_lost = packets_lost;
|
||||
report.extended_highest_sequence_number = extended_highest_sequence_number;
|
||||
|
||||
// Get diff with previous report block.
|
||||
const auto prev_report = prev_reports_.find(ssrc);
|
||||
if (prev_report != prev_reports_.end()) {
|
||||
int seq_num_diff = report.extended_highest_sequence_number -
|
||||
prev_report->second.extended_highest_sequence_number;
|
||||
int cum_loss_diff = report.packets_lost - prev_report->second.packets_lost;
|
||||
if (seq_num_diff >= 0 && cum_loss_diff >= 0) {
|
||||
// Update total number of packets/lost packets.
|
||||
num_sequence_numbers_ += seq_num_diff;
|
||||
num_lost_sequence_numbers_ += cum_loss_diff;
|
||||
}
|
||||
}
|
||||
// Store current report block.
|
||||
prev_reports_[ssrc] = report;
|
||||
}
|
||||
|
||||
int ReportBlockStats::FractionLostInPercent() const {
|
||||
if (num_sequence_numbers_ == 0) {
|
||||
return -1;
|
||||
}
|
||||
return FractionLost(num_lost_sequence_numbers_, num_sequence_numbers_) * 100 /
|
||||
255;
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
58
TMessagesProj/jni/voip/webrtc/video/report_block_stats.h
Normal file
58
TMessagesProj/jni/voip/webrtc/video/report_block_stats.h
Normal file
|
|
@ -0,0 +1,58 @@
|
|||
/*
|
||||
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef VIDEO_REPORT_BLOCK_STATS_H_
|
||||
#define VIDEO_REPORT_BLOCK_STATS_H_
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include <map>
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// TODO(nisse): Usefulness of this class is somewhat unclear. The inputs are
|
||||
// cumulative counters, from which we compute deltas, and then accumulate the
|
||||
// deltas. May be needed on the send side, to handle wraparound in the short
|
||||
// counters received over RTCP, but should not be needed on the receive side
|
||||
// where we can use large enough types for all counters we need.
|
||||
|
||||
// Helper class for rtcp statistics.
|
||||
class ReportBlockStats {
|
||||
public:
|
||||
ReportBlockStats();
|
||||
~ReportBlockStats();
|
||||
|
||||
// Updates stats and stores report block.
|
||||
void Store(uint32_t ssrc,
|
||||
int packets_lost,
|
||||
uint32_t extended_highest_sequence_number);
|
||||
|
||||
// Returns the total fraction of lost packets (or -1 if less than two report
|
||||
// blocks have been stored).
|
||||
int FractionLostInPercent() const;
|
||||
|
||||
private:
|
||||
// The information from an RTCP report block that we need.
|
||||
struct Report {
|
||||
uint32_t extended_highest_sequence_number;
|
||||
int32_t packets_lost;
|
||||
};
|
||||
|
||||
// The total number of packets/lost packets.
|
||||
uint32_t num_sequence_numbers_;
|
||||
uint32_t num_lost_sequence_numbers_;
|
||||
|
||||
// Map holding the last stored report (mapped by the source SSRC).
|
||||
std::map<uint32_t, Report> prev_reports_;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // VIDEO_REPORT_BLOCK_STATS_H_
|
||||
219
TMessagesProj/jni/voip/webrtc/video/rtp_streams_synchronizer2.cc
Normal file
219
TMessagesProj/jni/voip/webrtc/video/rtp_streams_synchronizer2.cc
Normal file
|
|
@ -0,0 +1,219 @@
|
|||
/*
|
||||
* Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "video/rtp_streams_synchronizer2.h"
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "call/syncable.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/time_utils.h"
|
||||
#include "rtc_base/trace_event.h"
|
||||
#include "system_wrappers/include/rtp_to_ntp_estimator.h"
|
||||
|
||||
namespace webrtc {
|
||||
namespace internal {
|
||||
namespace {
|
||||
// Time interval for logging stats.
|
||||
constexpr int64_t kStatsLogIntervalMs = 10000;
|
||||
constexpr TimeDelta kSyncInterval = TimeDelta::Millis(1000);
|
||||
|
||||
bool UpdateMeasurements(StreamSynchronization::Measurements* stream,
|
||||
const Syncable::Info& info) {
|
||||
stream->latest_timestamp = info.latest_received_capture_timestamp;
|
||||
stream->latest_receive_time_ms = info.latest_receive_time_ms;
|
||||
return stream->rtp_to_ntp.UpdateMeasurements(
|
||||
NtpTime(info.capture_time_ntp_secs, info.capture_time_ntp_frac),
|
||||
info.capture_time_source_clock) !=
|
||||
RtpToNtpEstimator::kInvalidMeasurement;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
RtpStreamsSynchronizer::RtpStreamsSynchronizer(TaskQueueBase* main_queue,
|
||||
Syncable* syncable_video)
|
||||
: task_queue_(main_queue),
|
||||
syncable_video_(syncable_video),
|
||||
last_stats_log_ms_(rtc::TimeMillis()) {
|
||||
RTC_DCHECK(syncable_video);
|
||||
}
|
||||
|
||||
RtpStreamsSynchronizer::~RtpStreamsSynchronizer() {
|
||||
RTC_DCHECK_RUN_ON(&main_checker_);
|
||||
repeating_task_.Stop();
|
||||
}
|
||||
|
||||
void RtpStreamsSynchronizer::ConfigureSync(Syncable* syncable_audio) {
|
||||
RTC_DCHECK_RUN_ON(&main_checker_);
|
||||
|
||||
// Prevent expensive no-ops.
|
||||
if (syncable_audio == syncable_audio_)
|
||||
return;
|
||||
|
||||
syncable_audio_ = syncable_audio;
|
||||
sync_.reset(nullptr);
|
||||
if (!syncable_audio_) {
|
||||
repeating_task_.Stop();
|
||||
return;
|
||||
}
|
||||
|
||||
sync_.reset(
|
||||
new StreamSynchronization(syncable_video_->id(), syncable_audio_->id()));
|
||||
|
||||
if (repeating_task_.Running())
|
||||
return;
|
||||
|
||||
repeating_task_ =
|
||||
RepeatingTaskHandle::DelayedStart(task_queue_, kSyncInterval, [this]() {
|
||||
UpdateDelay();
|
||||
return kSyncInterval;
|
||||
});
|
||||
}
|
||||
|
||||
void RtpStreamsSynchronizer::UpdateDelay() {
|
||||
RTC_DCHECK_RUN_ON(&main_checker_);
|
||||
|
||||
if (!syncable_audio_)
|
||||
return;
|
||||
|
||||
RTC_DCHECK(sync_.get());
|
||||
|
||||
bool log_stats = false;
|
||||
const int64_t now_ms = rtc::TimeMillis();
|
||||
if (now_ms - last_stats_log_ms_ > kStatsLogIntervalMs) {
|
||||
last_stats_log_ms_ = now_ms;
|
||||
log_stats = true;
|
||||
}
|
||||
|
||||
int64_t last_audio_receive_time_ms =
|
||||
audio_measurement_.latest_receive_time_ms;
|
||||
absl::optional<Syncable::Info> audio_info = syncable_audio_->GetInfo();
|
||||
if (!audio_info || !UpdateMeasurements(&audio_measurement_, *audio_info)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (last_audio_receive_time_ms == audio_measurement_.latest_receive_time_ms) {
|
||||
// No new audio packet has been received since last update.
|
||||
return;
|
||||
}
|
||||
|
||||
int64_t last_video_receive_ms = video_measurement_.latest_receive_time_ms;
|
||||
absl::optional<Syncable::Info> video_info = syncable_video_->GetInfo();
|
||||
if (!video_info || !UpdateMeasurements(&video_measurement_, *video_info)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (last_video_receive_ms == video_measurement_.latest_receive_time_ms) {
|
||||
// No new video packet has been received since last update.
|
||||
return;
|
||||
}
|
||||
|
||||
int relative_delay_ms;
|
||||
// Calculate how much later or earlier the audio stream is compared to video.
|
||||
if (!sync_->ComputeRelativeDelay(audio_measurement_, video_measurement_,
|
||||
&relative_delay_ms)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (log_stats) {
|
||||
RTC_LOG(LS_INFO) << "Sync info stats: " << now_ms
|
||||
<< ", {ssrc: " << sync_->audio_stream_id() << ", "
|
||||
<< "cur_delay_ms: " << audio_info->current_delay_ms
|
||||
<< "} {ssrc: " << sync_->video_stream_id() << ", "
|
||||
<< "cur_delay_ms: " << video_info->current_delay_ms
|
||||
<< "} {relative_delay_ms: " << relative_delay_ms << "} ";
|
||||
}
|
||||
|
||||
TRACE_COUNTER1("webrtc", "SyncCurrentVideoDelay",
|
||||
video_info->current_delay_ms);
|
||||
TRACE_COUNTER1("webrtc", "SyncCurrentAudioDelay",
|
||||
audio_info->current_delay_ms);
|
||||
TRACE_COUNTER1("webrtc", "SyncRelativeDelay", relative_delay_ms);
|
||||
|
||||
int target_audio_delay_ms = 0;
|
||||
int target_video_delay_ms = video_info->current_delay_ms;
|
||||
// Calculate the necessary extra audio delay and desired total video
|
||||
// delay to get the streams in sync.
|
||||
if (!sync_->ComputeDelays(relative_delay_ms, audio_info->current_delay_ms,
|
||||
&target_audio_delay_ms, &target_video_delay_ms)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (log_stats) {
|
||||
RTC_LOG(LS_INFO) << "Sync delay stats: " << now_ms
|
||||
<< ", {ssrc: " << sync_->audio_stream_id() << ", "
|
||||
<< "target_delay_ms: " << target_audio_delay_ms
|
||||
<< "} {ssrc: " << sync_->video_stream_id() << ", "
|
||||
<< "target_delay_ms: " << target_video_delay_ms << "} ";
|
||||
}
|
||||
|
||||
if (!syncable_audio_->SetMinimumPlayoutDelay(target_audio_delay_ms)) {
|
||||
sync_->ReduceAudioDelay();
|
||||
}
|
||||
if (!syncable_video_->SetMinimumPlayoutDelay(target_video_delay_ms)) {
|
||||
sync_->ReduceVideoDelay();
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(https://bugs.webrtc.org/7065): Move RtpToNtpEstimator out of
|
||||
// RtpStreamsSynchronizer and into respective receive stream to always populate
|
||||
// the estimated playout timestamp.
|
||||
bool RtpStreamsSynchronizer::GetStreamSyncOffsetInMs(
|
||||
uint32_t rtp_timestamp,
|
||||
int64_t render_time_ms,
|
||||
int64_t* video_playout_ntp_ms,
|
||||
int64_t* stream_offset_ms,
|
||||
double* estimated_freq_khz) const {
|
||||
RTC_DCHECK_RUN_ON(&main_checker_);
|
||||
|
||||
if (!syncable_audio_)
|
||||
return false;
|
||||
|
||||
uint32_t audio_rtp_timestamp;
|
||||
int64_t time_ms;
|
||||
if (!syncable_audio_->GetPlayoutRtpTimestamp(&audio_rtp_timestamp,
|
||||
&time_ms)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
NtpTime latest_audio_ntp =
|
||||
audio_measurement_.rtp_to_ntp.Estimate(audio_rtp_timestamp);
|
||||
if (!latest_audio_ntp.Valid()) {
|
||||
return false;
|
||||
}
|
||||
int64_t latest_audio_ntp_ms = latest_audio_ntp.ToMs();
|
||||
|
||||
syncable_audio_->SetEstimatedPlayoutNtpTimestampMs(latest_audio_ntp_ms,
|
||||
time_ms);
|
||||
|
||||
NtpTime latest_video_ntp =
|
||||
video_measurement_.rtp_to_ntp.Estimate(rtp_timestamp);
|
||||
if (!latest_video_ntp.Valid()) {
|
||||
return false;
|
||||
}
|
||||
int64_t latest_video_ntp_ms = latest_video_ntp.ToMs();
|
||||
|
||||
// Current audio ntp.
|
||||
int64_t now_ms = rtc::TimeMillis();
|
||||
latest_audio_ntp_ms += (now_ms - time_ms);
|
||||
|
||||
// Remove video playout delay.
|
||||
int64_t time_to_render_ms = render_time_ms - now_ms;
|
||||
if (time_to_render_ms > 0)
|
||||
latest_video_ntp_ms -= time_to_render_ms;
|
||||
|
||||
*video_playout_ntp_ms = latest_video_ntp_ms;
|
||||
*stream_offset_ms = latest_audio_ntp_ms - latest_video_ntp_ms;
|
||||
*estimated_freq_khz = video_measurement_.rtp_to_ntp.EstimatedFrequencyKhz();
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,75 @@
|
|||
/*
|
||||
* Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef VIDEO_RTP_STREAMS_SYNCHRONIZER2_H_
|
||||
#define VIDEO_RTP_STREAMS_SYNCHRONIZER2_H_
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "api/sequence_checker.h"
|
||||
#include "api/task_queue/task_queue_base.h"
|
||||
#include "rtc_base/system/no_unique_address.h"
|
||||
#include "rtc_base/task_utils/repeating_task.h"
|
||||
#include "video/stream_synchronization.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class Syncable;
|
||||
|
||||
namespace internal {
|
||||
|
||||
// RtpStreamsSynchronizer is responsible for synchronizing audio and video for
|
||||
// a given audio receive stream and video receive stream.
|
||||
class RtpStreamsSynchronizer {
|
||||
public:
|
||||
RtpStreamsSynchronizer(TaskQueueBase* main_queue, Syncable* syncable_video);
|
||||
~RtpStreamsSynchronizer();
|
||||
|
||||
void ConfigureSync(Syncable* syncable_audio);
|
||||
|
||||
// Gets the estimated playout NTP timestamp for the video frame with
|
||||
// `rtp_timestamp` and the sync offset between the current played out audio
|
||||
// frame and the video frame. Returns true on success, false otherwise.
|
||||
// The `estimated_freq_khz` is the frequency used in the RTP to NTP timestamp
|
||||
// conversion.
|
||||
bool GetStreamSyncOffsetInMs(uint32_t rtp_timestamp,
|
||||
int64_t render_time_ms,
|
||||
int64_t* video_playout_ntp_ms,
|
||||
int64_t* stream_offset_ms,
|
||||
double* estimated_freq_khz) const;
|
||||
|
||||
private:
|
||||
void UpdateDelay();
|
||||
|
||||
TaskQueueBase* const task_queue_;
|
||||
|
||||
// Used to check if we're running on the main thread/task queue.
|
||||
// The reason we currently don't use RTC_DCHECK_RUN_ON(task_queue_) is because
|
||||
// we might be running on an rtc::Thread implementation of TaskQueue, which
|
||||
// does not consistently set itself as the active TaskQueue.
|
||||
// Instead, we rely on a SequenceChecker for now.
|
||||
RTC_NO_UNIQUE_ADDRESS SequenceChecker main_checker_;
|
||||
|
||||
Syncable* const syncable_video_;
|
||||
|
||||
Syncable* syncable_audio_ RTC_GUARDED_BY(main_checker_) = nullptr;
|
||||
std::unique_ptr<StreamSynchronization> sync_ RTC_GUARDED_BY(main_checker_);
|
||||
StreamSynchronization::Measurements audio_measurement_
|
||||
RTC_GUARDED_BY(main_checker_);
|
||||
StreamSynchronization::Measurements video_measurement_
|
||||
RTC_GUARDED_BY(main_checker_);
|
||||
RepeatingTaskHandle repeating_task_ RTC_GUARDED_BY(main_checker_);
|
||||
int64_t last_stats_log_ms_ RTC_GUARDED_BY(&main_checker_);
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // VIDEO_RTP_STREAMS_SYNCHRONIZER2_H_
|
||||
1360
TMessagesProj/jni/voip/webrtc/video/rtp_video_stream_receiver2.cc
Normal file
1360
TMessagesProj/jni/voip/webrtc/video/rtp_video_stream_receiver2.cc
Normal file
File diff suppressed because it is too large
Load diff
448
TMessagesProj/jni/voip/webrtc/video/rtp_video_stream_receiver2.h
Normal file
448
TMessagesProj/jni/voip/webrtc/video/rtp_video_stream_receiver2.h
Normal file
|
|
@ -0,0 +1,448 @@
|
|||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef VIDEO_RTP_VIDEO_STREAM_RECEIVER2_H_
|
||||
#define VIDEO_RTP_VIDEO_STREAM_RECEIVER2_H_
|
||||
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "api/crypto/frame_decryptor_interface.h"
|
||||
#include "api/sequence_checker.h"
|
||||
#include "api/units/timestamp.h"
|
||||
#include "api/video/color_space.h"
|
||||
#include "api/video/video_codec_type.h"
|
||||
#include "call/rtp_packet_sink_interface.h"
|
||||
#include "call/syncable.h"
|
||||
#include "call/video_receive_stream.h"
|
||||
#include "modules/rtp_rtcp/include/receive_statistics.h"
|
||||
#include "modules/rtp_rtcp/include/recovered_packet_receiver.h"
|
||||
#include "modules/rtp_rtcp/include/remote_ntp_time_estimator.h"
|
||||
#include "modules/rtp_rtcp/include/rtp_header_extension_map.h"
|
||||
#include "modules/rtp_rtcp/source/absolute_capture_time_interpolator.h"
|
||||
#include "modules/rtp_rtcp/source/capture_clock_offset_updater.h"
|
||||
#include "modules/rtp_rtcp/source/rtp_dependency_descriptor_extension.h"
|
||||
#include "modules/rtp_rtcp/source/rtp_packet_received.h"
|
||||
#include "modules/rtp_rtcp/source/rtp_rtcp_impl2.h"
|
||||
#include "modules/rtp_rtcp/source/rtp_rtcp_interface.h"
|
||||
#include "modules/rtp_rtcp/source/rtp_video_header.h"
|
||||
#include "modules/rtp_rtcp/source/rtp_video_stream_receiver_frame_transformer_delegate.h"
|
||||
#include "modules/rtp_rtcp/source/video_rtp_depacketizer.h"
|
||||
#include "modules/video_coding/h264_sps_pps_tracker.h"
|
||||
#include "modules/video_coding/h265_vps_sps_pps_tracker.h"
|
||||
#include "modules/video_coding/loss_notification_controller.h"
|
||||
#include "modules/video_coding/nack_requester.h"
|
||||
#include "modules/video_coding/packet_buffer.h"
|
||||
#include "modules/video_coding/rtp_frame_reference_finder.h"
|
||||
#include "rtc_base/experiments/field_trial_parser.h"
|
||||
#include "rtc_base/numerics/sequence_number_unwrapper.h"
|
||||
#include "rtc_base/system/no_unique_address.h"
|
||||
#include "rtc_base/thread_annotations.h"
|
||||
#include "video/buffered_frame_decryptor.h"
|
||||
#include "video/unique_timestamp_counter.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class NackRequester;
|
||||
class PacketRouter;
|
||||
class ReceiveStatistics;
|
||||
class RtcpRttStats;
|
||||
class RtpPacketReceived;
|
||||
class Transport;
|
||||
class UlpfecReceiver;
|
||||
|
||||
class RtpVideoStreamReceiver2 : public LossNotificationSender,
|
||||
public RecoveredPacketReceiver,
|
||||
public RtpPacketSinkInterface,
|
||||
public KeyFrameRequestSender,
|
||||
public NackSender,
|
||||
public OnDecryptedFrameCallback,
|
||||
public OnDecryptionStatusChangeCallback,
|
||||
public RtpVideoFrameReceiver {
|
||||
public:
|
||||
// A complete frame is a frame which has received all its packets and all its
|
||||
// references are known.
|
||||
class OnCompleteFrameCallback {
|
||||
public:
|
||||
virtual ~OnCompleteFrameCallback() {}
|
||||
virtual void OnCompleteFrame(std::unique_ptr<EncodedFrame> frame) = 0;
|
||||
};
|
||||
|
||||
RtpVideoStreamReceiver2(
|
||||
TaskQueueBase* current_queue,
|
||||
Clock* clock,
|
||||
Transport* transport,
|
||||
RtcpRttStats* rtt_stats,
|
||||
// The packet router is optional; if provided, the RtpRtcp module for this
|
||||
// stream is registered as a candidate for sending REMB and transport
|
||||
// feedback.
|
||||
PacketRouter* packet_router,
|
||||
const VideoReceiveStreamInterface::Config* config,
|
||||
ReceiveStatistics* rtp_receive_statistics,
|
||||
RtcpPacketTypeCounterObserver* rtcp_packet_type_counter_observer,
|
||||
RtcpCnameCallback* rtcp_cname_callback,
|
||||
NackPeriodicProcessor* nack_periodic_processor,
|
||||
// The KeyFrameRequestSender is optional; if not provided, key frame
|
||||
// requests are sent via the internal RtpRtcp module.
|
||||
OnCompleteFrameCallback* complete_frame_callback,
|
||||
rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor,
|
||||
rtc::scoped_refptr<FrameTransformerInterface> frame_transformer,
|
||||
const FieldTrialsView& field_trials,
|
||||
RtcEventLog* event_log);
|
||||
~RtpVideoStreamReceiver2() override;
|
||||
|
||||
void AddReceiveCodec(uint8_t payload_type,
|
||||
VideoCodecType video_codec,
|
||||
const webrtc::CodecParameterMap& codec_params,
|
||||
bool raw_payload);
|
||||
void RemoveReceiveCodec(uint8_t payload_type);
|
||||
|
||||
// Clears state for all receive codecs added via `AddReceiveCodec`.
|
||||
void RemoveReceiveCodecs();
|
||||
|
||||
void StartReceive();
|
||||
void StopReceive();
|
||||
|
||||
// Produces the transport-related timestamps; current_delay_ms is left unset.
|
||||
absl::optional<Syncable::Info> GetSyncInfo() const;
|
||||
|
||||
bool DeliverRtcp(const uint8_t* rtcp_packet, size_t rtcp_packet_length);
|
||||
|
||||
void FrameContinuous(int64_t seq_num);
|
||||
|
||||
void FrameDecoded(int64_t seq_num);
|
||||
|
||||
void SignalNetworkState(NetworkState state);
|
||||
|
||||
// Returns number of different frames seen.
|
||||
int GetUniqueFramesSeen() const {
|
||||
RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
|
||||
return frame_counter_.GetUniqueSeen();
|
||||
}
|
||||
|
||||
// Implements RtpPacketSinkInterface.
|
||||
void OnRtpPacket(const RtpPacketReceived& packet) override;
|
||||
|
||||
// Public only for tests.
|
||||
// Returns true if the packet should be stashed and retried at a later stage.
|
||||
bool OnReceivedPayloadData(rtc::CopyOnWriteBuffer codec_payload,
|
||||
const RtpPacketReceived& rtp_packet,
|
||||
const RTPVideoHeader& video,
|
||||
int times_nacked);
|
||||
|
||||
// Implements RecoveredPacketReceiver.
|
||||
void OnRecoveredPacket(const RtpPacketReceived& packet) override;
|
||||
|
||||
// Send an RTCP keyframe request.
|
||||
void RequestKeyFrame() override;
|
||||
|
||||
// Implements NackSender.
|
||||
void SendNack(const std::vector<uint16_t>& sequence_numbers,
|
||||
bool buffering_allowed) override;
|
||||
|
||||
// Implements LossNotificationSender.
|
||||
void SendLossNotification(uint16_t last_decoded_seq_num,
|
||||
uint16_t last_received_seq_num,
|
||||
bool decodability_flag,
|
||||
bool buffering_allowed) override;
|
||||
|
||||
// Returns true if a decryptor is attached and frames can be decrypted.
|
||||
// Updated by OnDecryptionStatusChangeCallback. Note this refers to Frame
|
||||
// Decryption not SRTP.
|
||||
bool IsDecryptable() const;
|
||||
|
||||
// Implements OnDecryptedFrameCallback.
|
||||
void OnDecryptedFrame(std::unique_ptr<RtpFrameObject> frame) override;
|
||||
|
||||
// Implements OnDecryptionStatusChangeCallback.
|
||||
void OnDecryptionStatusChange(
|
||||
FrameDecryptorInterface::Status status) override;
|
||||
|
||||
// Optionally set a frame decryptor after a stream has started. This will not
|
||||
// reset the decoder state.
|
||||
void SetFrameDecryptor(
|
||||
rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor);
|
||||
|
||||
// Sets a frame transformer after a stream has started, if no transformer
|
||||
// has previously been set. Does not reset the decoder state.
|
||||
void SetDepacketizerToDecoderFrameTransformer(
|
||||
rtc::scoped_refptr<FrameTransformerInterface> frame_transformer);
|
||||
|
||||
// Called by VideoReceiveStreamInterface when stats are updated.
|
||||
void UpdateRtt(int64_t max_rtt_ms);
|
||||
|
||||
// Called when the local_ssrc is changed to match with a sender.
|
||||
void OnLocalSsrcChange(uint32_t local_ssrc);
|
||||
|
||||
// Forwards the call to set rtcp_sender_ to the RTCP mode of the rtcp sender.
|
||||
void SetRtcpMode(RtcpMode mode);
|
||||
|
||||
void SetReferenceTimeReport(bool enabled);
|
||||
|
||||
// Sets or clears the callback sink that gets called for RTP packets. Used for
|
||||
// packet handlers such as FlexFec. Must be called on the packet delivery
|
||||
// thread (same context as `OnRtpPacket` is called on).
|
||||
// TODO(bugs.webrtc.org/11993): Packet delivery thread today means `worker
|
||||
// thread` but will be `network thread`.
|
||||
void SetPacketSink(RtpPacketSinkInterface* packet_sink);
|
||||
|
||||
// Turns on/off loss notifications. Must be called on the packet delivery
|
||||
// thread.
|
||||
void SetLossNotificationEnabled(bool enabled);
|
||||
|
||||
void SetNackHistory(TimeDelta history);
|
||||
|
||||
int ulpfec_payload_type() const;
|
||||
int red_payload_type() const;
|
||||
void SetProtectionPayloadTypes(int red_payload_type, int ulpfec_payload_type);
|
||||
|
||||
absl::optional<int64_t> LastReceivedPacketMs() const;
|
||||
absl::optional<uint32_t> LastReceivedFrameRtpTimestamp() const;
|
||||
absl::optional<int64_t> LastReceivedKeyframePacketMs() const;
|
||||
|
||||
private:
|
||||
// Implements RtpVideoFrameReceiver.
|
||||
void ManageFrame(std::unique_ptr<RtpFrameObject> frame) override;
|
||||
|
||||
void OnCompleteFrames(RtpFrameReferenceFinder::ReturnVector frame)
|
||||
RTC_RUN_ON(packet_sequence_checker_);
|
||||
|
||||
// Used for buffering RTCP feedback messages and sending them all together.
|
||||
// Note:
|
||||
// 1. Key frame requests and NACKs are mutually exclusive, with the
|
||||
// former taking precedence over the latter.
|
||||
// 2. Loss notifications are orthogonal to either. (That is, may be sent
|
||||
// alongside either.)
|
||||
class RtcpFeedbackBuffer : public KeyFrameRequestSender,
|
||||
public NackSender,
|
||||
public LossNotificationSender {
|
||||
public:
|
||||
RtcpFeedbackBuffer(KeyFrameRequestSender* key_frame_request_sender,
|
||||
NackSender* nack_sender,
|
||||
LossNotificationSender* loss_notification_sender);
|
||||
|
||||
~RtcpFeedbackBuffer() override = default;
|
||||
|
||||
// KeyFrameRequestSender implementation.
|
||||
void RequestKeyFrame() override;
|
||||
|
||||
// NackSender implementation.
|
||||
void SendNack(const std::vector<uint16_t>& sequence_numbers,
|
||||
bool buffering_allowed) override;
|
||||
|
||||
// LossNotificationSender implementation.
|
||||
void SendLossNotification(uint16_t last_decoded_seq_num,
|
||||
uint16_t last_received_seq_num,
|
||||
bool decodability_flag,
|
||||
bool buffering_allowed) override;
|
||||
|
||||
// Send all RTCP feedback messages buffered thus far.
|
||||
void SendBufferedRtcpFeedback();
|
||||
|
||||
void ClearLossNotificationState();
|
||||
|
||||
private:
|
||||
// LNTF-related state.
|
||||
struct LossNotificationState {
|
||||
LossNotificationState(uint16_t last_decoded_seq_num,
|
||||
uint16_t last_received_seq_num,
|
||||
bool decodability_flag)
|
||||
: last_decoded_seq_num(last_decoded_seq_num),
|
||||
last_received_seq_num(last_received_seq_num),
|
||||
decodability_flag(decodability_flag) {}
|
||||
|
||||
uint16_t last_decoded_seq_num;
|
||||
uint16_t last_received_seq_num;
|
||||
bool decodability_flag;
|
||||
};
|
||||
|
||||
RTC_NO_UNIQUE_ADDRESS SequenceChecker packet_sequence_checker_;
|
||||
KeyFrameRequestSender* const key_frame_request_sender_;
|
||||
NackSender* const nack_sender_;
|
||||
LossNotificationSender* const loss_notification_sender_;
|
||||
|
||||
// Key-frame-request-related state.
|
||||
bool request_key_frame_ RTC_GUARDED_BY(packet_sequence_checker_);
|
||||
|
||||
// NACK-related state.
|
||||
std::vector<uint16_t> nack_sequence_numbers_
|
||||
RTC_GUARDED_BY(packet_sequence_checker_);
|
||||
|
||||
absl::optional<LossNotificationState> lntf_state_
|
||||
RTC_GUARDED_BY(packet_sequence_checker_);
|
||||
};
|
||||
enum ParseGenericDependenciesResult {
|
||||
kStashPacket,
|
||||
kDropPacket,
|
||||
kHasGenericDescriptor,
|
||||
kNoGenericDescriptor
|
||||
};
|
||||
|
||||
// Entry point doing non-stats work for a received packet. Called
|
||||
// for the same packet both before and after RED decapsulation.
|
||||
void ReceivePacket(const RtpPacketReceived& packet)
|
||||
RTC_RUN_ON(packet_sequence_checker_);
|
||||
|
||||
// Parses and handles RED headers.
|
||||
// This function assumes that it's being called from only one thread.
|
||||
void ParseAndHandleEncapsulatingHeader(const RtpPacketReceived& packet)
|
||||
RTC_RUN_ON(packet_sequence_checker_);
|
||||
void NotifyReceiverOfEmptyPacket(uint16_t seq_num)
|
||||
RTC_RUN_ON(packet_sequence_checker_);
|
||||
bool IsRedEnabled() const;
|
||||
void InsertSpsPpsIntoTracker(uint8_t payload_type)
|
||||
RTC_RUN_ON(packet_sequence_checker_);
|
||||
void OnInsertedPacket(video_coding::PacketBuffer::InsertResult result)
|
||||
RTC_RUN_ON(packet_sequence_checker_);
|
||||
ParseGenericDependenciesResult ParseGenericDependenciesExtension(
|
||||
const RtpPacketReceived& rtp_packet,
|
||||
RTPVideoHeader* video_header) RTC_RUN_ON(packet_sequence_checker_);
|
||||
void OnAssembledFrame(std::unique_ptr<RtpFrameObject> frame)
|
||||
RTC_RUN_ON(packet_sequence_checker_);
|
||||
void UpdatePacketReceiveTimestamps(const RtpPacketReceived& packet,
|
||||
bool is_keyframe)
|
||||
RTC_RUN_ON(packet_sequence_checker_);
|
||||
|
||||
const FieldTrialsView& field_trials_;
|
||||
TaskQueueBase* const worker_queue_;
|
||||
Clock* const clock_;
|
||||
// Ownership of this object lies with VideoReceiveStreamInterface, which owns
|
||||
// `this`.
|
||||
const VideoReceiveStreamInterface::Config& config_;
|
||||
PacketRouter* const packet_router_;
|
||||
|
||||
RemoteNtpTimeEstimator ntp_estimator_;
|
||||
|
||||
// Set by the field trial WebRTC-ForcePlayoutDelay to override any playout
|
||||
// delay that is specified in the received packets.
|
||||
FieldTrialOptional<int> forced_playout_delay_max_ms_;
|
||||
FieldTrialOptional<int> forced_playout_delay_min_ms_;
|
||||
ReceiveStatistics* const rtp_receive_statistics_;
|
||||
std::unique_ptr<UlpfecReceiver> ulpfec_receiver_
|
||||
RTC_GUARDED_BY(packet_sequence_checker_);
|
||||
int red_payload_type_ RTC_GUARDED_BY(packet_sequence_checker_);
|
||||
|
||||
RTC_NO_UNIQUE_ADDRESS SequenceChecker worker_task_checker_;
|
||||
// TODO(bugs.webrtc.org/11993): This checker conceptually represents
|
||||
// operations that belong to the network thread. The Call class is currently
|
||||
// moving towards handling network packets on the network thread and while
|
||||
// that work is ongoing, this checker may in practice represent the worker
|
||||
// thread, but still serves as a mechanism of grouping together concepts
|
||||
// that belong to the network thread. Once the packets are fully delivered
|
||||
// on the network thread, this comment will be deleted.
|
||||
RTC_NO_UNIQUE_ADDRESS SequenceChecker packet_sequence_checker_;
|
||||
RtpPacketSinkInterface* packet_sink_ RTC_GUARDED_BY(packet_sequence_checker_);
|
||||
bool receiving_ RTC_GUARDED_BY(packet_sequence_checker_);
|
||||
int64_t last_packet_log_ms_ RTC_GUARDED_BY(packet_sequence_checker_);
|
||||
|
||||
const std::unique_ptr<ModuleRtpRtcpImpl2> rtp_rtcp_;
|
||||
|
||||
NackPeriodicProcessor* const nack_periodic_processor_;
|
||||
OnCompleteFrameCallback* complete_frame_callback_;
|
||||
const KeyFrameReqMethod keyframe_request_method_;
|
||||
|
||||
RtcpFeedbackBuffer rtcp_feedback_buffer_;
|
||||
// TODO(tommi): Consider absl::optional<NackRequester> instead of unique_ptr
|
||||
// since nack is usually configured.
|
||||
std::unique_ptr<NackRequester> nack_module_
|
||||
RTC_GUARDED_BY(packet_sequence_checker_);
|
||||
std::unique_ptr<LossNotificationController> loss_notification_controller_
|
||||
RTC_GUARDED_BY(packet_sequence_checker_);
|
||||
|
||||
video_coding::PacketBuffer packet_buffer_
|
||||
RTC_GUARDED_BY(packet_sequence_checker_);
|
||||
UniqueTimestampCounter frame_counter_
|
||||
RTC_GUARDED_BY(packet_sequence_checker_);
|
||||
SeqNumUnwrapper<uint16_t> frame_id_unwrapper_
|
||||
RTC_GUARDED_BY(packet_sequence_checker_);
|
||||
|
||||
// Video structure provided in the dependency descriptor in a first packet
|
||||
// of a key frame. It is required to parse dependency descriptor in the
|
||||
// following delta packets.
|
||||
std::unique_ptr<FrameDependencyStructure> video_structure_
|
||||
RTC_GUARDED_BY(packet_sequence_checker_);
|
||||
// Frame id of the last frame with the attached video structure.
|
||||
// absl::nullopt when `video_structure_ == nullptr`;
|
||||
absl::optional<int64_t> video_structure_frame_id_
|
||||
RTC_GUARDED_BY(packet_sequence_checker_);
|
||||
Timestamp last_logged_failed_to_parse_dd_
|
||||
RTC_GUARDED_BY(packet_sequence_checker_) = Timestamp::MinusInfinity();
|
||||
|
||||
std::unique_ptr<RtpFrameReferenceFinder> reference_finder_
|
||||
RTC_GUARDED_BY(packet_sequence_checker_);
|
||||
absl::optional<VideoCodecType> current_codec_
|
||||
RTC_GUARDED_BY(packet_sequence_checker_);
|
||||
uint32_t last_assembled_frame_rtp_timestamp_
|
||||
RTC_GUARDED_BY(packet_sequence_checker_);
|
||||
|
||||
std::map<int64_t, uint16_t> last_seq_num_for_pic_id_
|
||||
RTC_GUARDED_BY(packet_sequence_checker_);
|
||||
video_coding::H264SpsPpsTracker tracker_
|
||||
RTC_GUARDED_BY(packet_sequence_checker_);
|
||||
video_coding::H265VpsSpsPpsTracker h265_tracker_;
|
||||
RTC_GUARDED_BY(packet_sequence_checker_);
|
||||
|
||||
// Maps payload id to the depacketizer.
|
||||
std::map<uint8_t, std::unique_ptr<VideoRtpDepacketizer>> payload_type_map_
|
||||
RTC_GUARDED_BY(packet_sequence_checker_);
|
||||
|
||||
// TODO(johan): Remove pt_codec_params_ once
|
||||
// https://bugs.chromium.org/p/webrtc/issues/detail?id=6883 is resolved.
|
||||
// Maps a payload type to a map of out-of-band supplied codec parameters.
|
||||
std::map<uint8_t, webrtc::CodecParameterMap> pt_codec_params_
|
||||
RTC_GUARDED_BY(packet_sequence_checker_);
|
||||
int16_t last_payload_type_ RTC_GUARDED_BY(packet_sequence_checker_) = -1;
|
||||
|
||||
bool has_received_frame_ RTC_GUARDED_BY(packet_sequence_checker_);
|
||||
|
||||
absl::optional<uint32_t> last_received_rtp_timestamp_
|
||||
RTC_GUARDED_BY(packet_sequence_checker_);
|
||||
absl::optional<uint32_t> last_received_keyframe_rtp_timestamp_
|
||||
RTC_GUARDED_BY(packet_sequence_checker_);
|
||||
absl::optional<Timestamp> last_received_rtp_system_time_
|
||||
RTC_GUARDED_BY(packet_sequence_checker_);
|
||||
absl::optional<Timestamp> last_received_keyframe_rtp_system_time_
|
||||
RTC_GUARDED_BY(packet_sequence_checker_);
|
||||
|
||||
// Handles incoming encrypted frames and forwards them to the
|
||||
// rtp_reference_finder if they are decryptable.
|
||||
std::unique_ptr<BufferedFrameDecryptor> buffered_frame_decryptor_
|
||||
RTC_PT_GUARDED_BY(packet_sequence_checker_);
|
||||
bool frames_decryptable_ RTC_GUARDED_BY(worker_task_checker_);
|
||||
absl::optional<ColorSpace> last_color_space_;
|
||||
|
||||
AbsoluteCaptureTimeInterpolator absolute_capture_time_interpolator_
|
||||
RTC_GUARDED_BY(packet_sequence_checker_);
|
||||
|
||||
CaptureClockOffsetUpdater capture_clock_offset_updater_
|
||||
RTC_GUARDED_BY(packet_sequence_checker_);
|
||||
|
||||
int64_t last_completed_picture_id_ = 0;
|
||||
|
||||
rtc::scoped_refptr<RtpVideoStreamReceiverFrameTransformerDelegate>
|
||||
frame_transformer_delegate_;
|
||||
|
||||
SeqNumUnwrapper<uint16_t> rtp_seq_num_unwrapper_
|
||||
RTC_GUARDED_BY(packet_sequence_checker_);
|
||||
std::map<int64_t, RtpPacketInfo> packet_infos_
|
||||
RTC_GUARDED_BY(packet_sequence_checker_);
|
||||
std::vector<RtpPacketReceived> stashed_packets_
|
||||
RTC_GUARDED_BY(packet_sequence_checker_);
|
||||
|
||||
Timestamp next_keyframe_request_for_missing_video_structure_ =
|
||||
Timestamp::MinusInfinity();
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // VIDEO_RTP_VIDEO_STREAM_RECEIVER2_H_
|
||||
379
TMessagesProj/jni/voip/webrtc/video/screenshare_loopback.cc
Normal file
379
TMessagesProj/jni/voip/webrtc/video/screenshare_loopback.cc
Normal file
|
|
@ -0,0 +1,379 @@
|
|||
/*
|
||||
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/flags/flag.h"
|
||||
#include "absl/flags/parse.h"
|
||||
#include "absl/types/optional.h"
|
||||
#include "api/test/simulated_network.h"
|
||||
#include "api/test/video_quality_test_fixture.h"
|
||||
#include "api/transport/bitrate_settings.h"
|
||||
#include "api/video_codecs/video_codec.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/string_encode.h"
|
||||
#include "system_wrappers/include/field_trial.h"
|
||||
#include "test/field_trial.h"
|
||||
#include "test/gtest.h"
|
||||
#include "test/run_test.h"
|
||||
#include "test/test_flags.h"
|
||||
#include "video/video_quality_test.h"
|
||||
|
||||
using ::webrtc::BitrateConstraints;
|
||||
using ::webrtc::BuiltInNetworkBehaviorConfig;
|
||||
using ::webrtc::InterLayerPredMode;
|
||||
using ::webrtc::SdpVideoFormat;
|
||||
using ::webrtc::VideoQualityTest;
|
||||
|
||||
// Flags common with video loopback, with different default values.
|
||||
ABSL_FLAG(int, width, 1850, "Video width (crops source).");
|
||||
size_t Width() {
|
||||
return static_cast<size_t>(absl::GetFlag(FLAGS_width));
|
||||
}
|
||||
|
||||
ABSL_FLAG(int, height, 1110, "Video height (crops source).");
|
||||
size_t Height() {
|
||||
return static_cast<size_t>(absl::GetFlag(FLAGS_height));
|
||||
}
|
||||
|
||||
ABSL_FLAG(int, fps, 5, "Frames per second.");
|
||||
int Fps() {
|
||||
return absl::GetFlag(FLAGS_fps);
|
||||
}
|
||||
|
||||
ABSL_FLAG(int, min_bitrate, 50, "Call and stream min bitrate in kbps.");
|
||||
int MinBitrateKbps() {
|
||||
return absl::GetFlag(FLAGS_min_bitrate);
|
||||
}
|
||||
|
||||
ABSL_FLAG(int, start_bitrate, 300, "Call start bitrate in kbps.");
|
||||
int StartBitrateKbps() {
|
||||
return absl::GetFlag(FLAGS_start_bitrate);
|
||||
}
|
||||
|
||||
ABSL_FLAG(int, target_bitrate, 200, "Stream target bitrate in kbps.");
|
||||
int TargetBitrateKbps() {
|
||||
return absl::GetFlag(FLAGS_target_bitrate);
|
||||
}
|
||||
|
||||
ABSL_FLAG(int, max_bitrate, 1000, "Call and stream max bitrate in kbps.");
|
||||
int MaxBitrateKbps() {
|
||||
return absl::GetFlag(FLAGS_max_bitrate);
|
||||
}
|
||||
|
||||
ABSL_FLAG(int, num_temporal_layers, 2, "Number of temporal layers to use.");
|
||||
int NumTemporalLayers() {
|
||||
return absl::GetFlag(FLAGS_num_temporal_layers);
|
||||
}
|
||||
|
||||
// Flags common with video loopback, with equal default values.
|
||||
ABSL_FLAG(std::string, codec, "VP8", "Video codec to use.");
|
||||
std::string Codec() {
|
||||
return absl::GetFlag(FLAGS_codec);
|
||||
}
|
||||
|
||||
ABSL_FLAG(std::string,
|
||||
rtc_event_log_name,
|
||||
"",
|
||||
"Filename for rtc event log. Two files "
|
||||
"with \"_send\" and \"_recv\" suffixes will be created.");
|
||||
std::string RtcEventLogName() {
|
||||
return absl::GetFlag(FLAGS_rtc_event_log_name);
|
||||
}
|
||||
|
||||
ABSL_FLAG(std::string,
|
||||
rtp_dump_name,
|
||||
"",
|
||||
"Filename for dumped received RTP stream.");
|
||||
std::string RtpDumpName() {
|
||||
return absl::GetFlag(FLAGS_rtp_dump_name);
|
||||
}
|
||||
|
||||
ABSL_FLAG(int,
|
||||
selected_tl,
|
||||
-1,
|
||||
"Temporal layer to show or analyze. -1 to disable filtering.");
|
||||
int SelectedTL() {
|
||||
return absl::GetFlag(FLAGS_selected_tl);
|
||||
}
|
||||
|
||||
ABSL_FLAG(
|
||||
int,
|
||||
duration,
|
||||
0,
|
||||
"Duration of the test in seconds. If 0, rendered will be shown instead.");
|
||||
int DurationSecs() {
|
||||
return absl::GetFlag(FLAGS_duration);
|
||||
}
|
||||
|
||||
ABSL_FLAG(std::string, output_filename, "", "Target graph data filename.");
|
||||
std::string OutputFilename() {
|
||||
return absl::GetFlag(FLAGS_output_filename);
|
||||
}
|
||||
|
||||
ABSL_FLAG(std::string,
|
||||
graph_title,
|
||||
"",
|
||||
"If empty, title will be generated automatically.");
|
||||
std::string GraphTitle() {
|
||||
return absl::GetFlag(FLAGS_graph_title);
|
||||
}
|
||||
|
||||
ABSL_FLAG(int, loss_percent, 0, "Percentage of packets randomly lost.");
|
||||
int LossPercent() {
|
||||
return absl::GetFlag(FLAGS_loss_percent);
|
||||
}
|
||||
|
||||
ABSL_FLAG(int,
|
||||
link_capacity,
|
||||
0,
|
||||
"Capacity (kbps) of the fake link. 0 means infinite.");
|
||||
int LinkCapacityKbps() {
|
||||
return absl::GetFlag(FLAGS_link_capacity);
|
||||
}
|
||||
|
||||
ABSL_FLAG(int, queue_size, 0, "Size of the bottleneck link queue in packets.");
|
||||
int QueueSize() {
|
||||
return absl::GetFlag(FLAGS_queue_size);
|
||||
}
|
||||
|
||||
ABSL_FLAG(int,
|
||||
avg_propagation_delay_ms,
|
||||
0,
|
||||
"Average link propagation delay in ms.");
|
||||
int AvgPropagationDelayMs() {
|
||||
return absl::GetFlag(FLAGS_avg_propagation_delay_ms);
|
||||
}
|
||||
|
||||
ABSL_FLAG(int,
|
||||
std_propagation_delay_ms,
|
||||
0,
|
||||
"Link propagation delay standard deviation in ms.");
|
||||
int StdPropagationDelayMs() {
|
||||
return absl::GetFlag(FLAGS_std_propagation_delay_ms);
|
||||
}
|
||||
|
||||
ABSL_FLAG(int, num_streams, 0, "Number of streams to show or analyze.");
|
||||
int NumStreams() {
|
||||
return absl::GetFlag(FLAGS_num_streams);
|
||||
}
|
||||
|
||||
ABSL_FLAG(int,
|
||||
selected_stream,
|
||||
0,
|
||||
"ID of the stream to show or analyze. "
|
||||
"Set to the number of streams to show them all.");
|
||||
int SelectedStream() {
|
||||
return absl::GetFlag(FLAGS_selected_stream);
|
||||
}
|
||||
|
||||
ABSL_FLAG(int, num_spatial_layers, 1, "Number of spatial layers to use.");
|
||||
int NumSpatialLayers() {
|
||||
return absl::GetFlag(FLAGS_num_spatial_layers);
|
||||
}
|
||||
|
||||
ABSL_FLAG(int,
|
||||
inter_layer_pred,
|
||||
0,
|
||||
"Inter-layer prediction mode. "
|
||||
"0 - enabled, 1 - disabled, 2 - enabled only for key pictures.");
|
||||
InterLayerPredMode InterLayerPred() {
|
||||
if (absl::GetFlag(FLAGS_inter_layer_pred) == 0) {
|
||||
return webrtc::InterLayerPredMode::kOn;
|
||||
} else if (absl::GetFlag(FLAGS_inter_layer_pred) == 1) {
|
||||
return webrtc::InterLayerPredMode::kOff;
|
||||
} else {
|
||||
RTC_DCHECK_EQ(absl::GetFlag(FLAGS_inter_layer_pred), 2);
|
||||
return webrtc::InterLayerPredMode::kOnKeyPic;
|
||||
}
|
||||
}
|
||||
|
||||
ABSL_FLAG(int,
|
||||
selected_sl,
|
||||
-1,
|
||||
"Spatial layer to show or analyze. -1 to disable filtering.");
|
||||
int SelectedSL() {
|
||||
return absl::GetFlag(FLAGS_selected_sl);
|
||||
}
|
||||
|
||||
ABSL_FLAG(std::string,
|
||||
stream0,
|
||||
"",
|
||||
"Comma separated values describing VideoStream for stream #0.");
|
||||
std::string Stream0() {
|
||||
return absl::GetFlag(FLAGS_stream0);
|
||||
}
|
||||
|
||||
ABSL_FLAG(std::string,
|
||||
stream1,
|
||||
"",
|
||||
"Comma separated values describing VideoStream for stream #1.");
|
||||
std::string Stream1() {
|
||||
return absl::GetFlag(FLAGS_stream1);
|
||||
}
|
||||
|
||||
ABSL_FLAG(std::string,
|
||||
sl0,
|
||||
"",
|
||||
"Comma separated values describing SpatialLayer for layer #0.");
|
||||
std::string SL0() {
|
||||
return absl::GetFlag(FLAGS_sl0);
|
||||
}
|
||||
|
||||
ABSL_FLAG(std::string,
|
||||
sl1,
|
||||
"",
|
||||
"Comma separated values describing SpatialLayer for layer #1.");
|
||||
std::string SL1() {
|
||||
return absl::GetFlag(FLAGS_sl1);
|
||||
}
|
||||
|
||||
ABSL_FLAG(std::string,
|
||||
encoded_frame_path,
|
||||
"",
|
||||
"The base path for encoded frame logs. Created files will have "
|
||||
"the form <encoded_frame_path>.<n>.(recv|send.<m>).ivf");
|
||||
std::string EncodedFramePath() {
|
||||
return absl::GetFlag(FLAGS_encoded_frame_path);
|
||||
}
|
||||
|
||||
ABSL_FLAG(bool, logs, false, "print logs to stderr");
|
||||
|
||||
ABSL_FLAG(bool, send_side_bwe, true, "Use send-side bandwidth estimation");
|
||||
|
||||
ABSL_FLAG(bool, generic_descriptor, false, "Use the generic frame descriptor.");
|
||||
|
||||
ABSL_FLAG(bool, allow_reordering, false, "Allow packet reordering to occur");
|
||||
|
||||
// Screenshare-specific flags.
|
||||
ABSL_FLAG(int,
|
||||
min_transmit_bitrate,
|
||||
400,
|
||||
"Min transmit bitrate incl. padding.");
|
||||
int MinTransmitBitrateKbps() {
|
||||
return absl::GetFlag(FLAGS_min_transmit_bitrate);
|
||||
}
|
||||
|
||||
ABSL_FLAG(bool,
|
||||
generate_slides,
|
||||
false,
|
||||
"Whether to use randomly generated slides or read them from files.");
|
||||
bool GenerateSlides() {
|
||||
return absl::GetFlag(FLAGS_generate_slides);
|
||||
}
|
||||
|
||||
ABSL_FLAG(int,
|
||||
slide_change_interval,
|
||||
10,
|
||||
"Interval (in seconds) between simulated slide changes.");
|
||||
int SlideChangeInterval() {
|
||||
return absl::GetFlag(FLAGS_slide_change_interval);
|
||||
}
|
||||
|
||||
ABSL_FLAG(
|
||||
int,
|
||||
scroll_duration,
|
||||
0,
|
||||
"Duration (in seconds) during which a slide will be scrolled into place.");
|
||||
int ScrollDuration() {
|
||||
return absl::GetFlag(FLAGS_scroll_duration);
|
||||
}
|
||||
|
||||
ABSL_FLAG(std::string,
|
||||
slides,
|
||||
"",
|
||||
"Comma-separated list of *.yuv files to display as slides.");
|
||||
std::vector<std::string> Slides() {
|
||||
std::vector<std::string> slides;
|
||||
std::string slides_list = absl::GetFlag(FLAGS_slides);
|
||||
rtc::tokenize(slides_list, ',', &slides);
|
||||
return slides;
|
||||
}
|
||||
|
||||
void Loopback() {
|
||||
BuiltInNetworkBehaviorConfig pipe_config;
|
||||
pipe_config.loss_percent = LossPercent();
|
||||
pipe_config.link_capacity_kbps = LinkCapacityKbps();
|
||||
pipe_config.queue_length_packets = QueueSize();
|
||||
pipe_config.queue_delay_ms = AvgPropagationDelayMs();
|
||||
pipe_config.delay_standard_deviation_ms = StdPropagationDelayMs();
|
||||
pipe_config.allow_reordering = absl::GetFlag(FLAGS_allow_reordering);
|
||||
|
||||
BitrateConstraints call_bitrate_config;
|
||||
call_bitrate_config.min_bitrate_bps = MinBitrateKbps() * 1000;
|
||||
call_bitrate_config.start_bitrate_bps = StartBitrateKbps() * 1000;
|
||||
call_bitrate_config.max_bitrate_bps = -1; // Don't cap bandwidth estimate.
|
||||
|
||||
VideoQualityTest::Params params;
|
||||
params.call.send_side_bwe = absl::GetFlag(FLAGS_send_side_bwe);
|
||||
params.call.generic_descriptor = absl::GetFlag(FLAGS_generic_descriptor);
|
||||
params.call.call_bitrate_config = call_bitrate_config;
|
||||
params.video[0].enabled = true;
|
||||
params.video[0].width = Width();
|
||||
params.video[0].height = Height();
|
||||
params.video[0].fps = Fps();
|
||||
params.video[0].min_bitrate_bps = MinBitrateKbps() * 1000;
|
||||
params.video[0].target_bitrate_bps = TargetBitrateKbps() * 1000;
|
||||
params.video[0].max_bitrate_bps = MaxBitrateKbps() * 1000;
|
||||
params.video[0].codec = Codec();
|
||||
params.video[0].num_temporal_layers = NumTemporalLayers();
|
||||
params.video[0].selected_tl = SelectedTL();
|
||||
params.video[0].min_transmit_bps = MinTransmitBitrateKbps() * 1000;
|
||||
params.screenshare[0].enabled = true;
|
||||
params.screenshare[0].generate_slides = GenerateSlides();
|
||||
params.screenshare[0].slide_change_interval = SlideChangeInterval();
|
||||
params.screenshare[0].scroll_duration = ScrollDuration();
|
||||
params.screenshare[0].slides = Slides();
|
||||
params.config = pipe_config;
|
||||
params.logging.rtc_event_log_name = RtcEventLogName();
|
||||
params.logging.rtp_dump_name = RtpDumpName();
|
||||
params.logging.encoded_frame_base_path = EncodedFramePath();
|
||||
|
||||
if (NumStreams() > 1 && Stream0().empty() && Stream1().empty()) {
|
||||
params.ss[0].infer_streams = true;
|
||||
}
|
||||
|
||||
std::vector<std::string> stream_descriptors;
|
||||
stream_descriptors.push_back(Stream0());
|
||||
stream_descriptors.push_back(Stream1());
|
||||
std::vector<std::string> SL_descriptors;
|
||||
SL_descriptors.push_back(SL0());
|
||||
SL_descriptors.push_back(SL1());
|
||||
VideoQualityTest::FillScalabilitySettings(
|
||||
¶ms, 0, stream_descriptors, NumStreams(), SelectedStream(),
|
||||
NumSpatialLayers(), SelectedSL(), InterLayerPred(), SL_descriptors);
|
||||
|
||||
auto fixture = std::make_unique<VideoQualityTest>(nullptr);
|
||||
if (DurationSecs()) {
|
||||
fixture->RunWithAnalyzer(params);
|
||||
} else {
|
||||
fixture->RunWithRenderers(params);
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
::testing::InitGoogleTest(&argc, argv);
|
||||
absl::ParseCommandLine(argc, argv);
|
||||
|
||||
rtc::LogMessage::SetLogToStderr(absl::GetFlag(FLAGS_logs));
|
||||
|
||||
// InitFieldTrialsFromString stores the char*, so the char array must outlive
|
||||
// the application.
|
||||
const std::string field_trials = absl::GetFlag(FLAGS_force_fieldtrials);
|
||||
webrtc::field_trial::InitFieldTrialsFromString(field_trials.c_str());
|
||||
|
||||
webrtc::test::RunTest(Loopback);
|
||||
return 0;
|
||||
}
|
||||
117
TMessagesProj/jni/voip/webrtc/video/send_delay_stats.cc
Normal file
117
TMessagesProj/jni/voip/webrtc/video/send_delay_stats.cc
Normal file
|
|
@ -0,0 +1,117 @@
|
|||
/*
|
||||
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "video/send_delay_stats.h"
|
||||
|
||||
#include <utility>
|
||||
|
||||
#include "rtc_base/logging.h"
|
||||
#include "system_wrappers/include/metrics.h"
|
||||
|
||||
namespace webrtc {
|
||||
namespace {
|
||||
// Packet with a larger delay are removed and excluded from the delay stats.
|
||||
// Set to larger than max histogram delay which is 10 seconds.
|
||||
constexpr TimeDelta kMaxSentPacketDelay = TimeDelta::Seconds(11);
|
||||
constexpr size_t kMaxPacketMapSize = 2000;
|
||||
|
||||
// Limit for the maximum number of streams to calculate stats for.
|
||||
constexpr size_t kMaxSsrcMapSize = 50;
|
||||
constexpr int kMinRequiredPeriodicSamples = 5;
|
||||
} // namespace
|
||||
|
||||
SendDelayStats::SendDelayStats(Clock* clock)
|
||||
: clock_(clock), num_old_packets_(0), num_skipped_packets_(0) {}
|
||||
|
||||
SendDelayStats::~SendDelayStats() {
|
||||
if (num_old_packets_ > 0 || num_skipped_packets_ > 0) {
|
||||
RTC_LOG(LS_WARNING) << "Delay stats: number of old packets "
|
||||
<< num_old_packets_ << ", skipped packets "
|
||||
<< num_skipped_packets_ << ". Number of streams "
|
||||
<< send_delay_counters_.size();
|
||||
}
|
||||
UpdateHistograms();
|
||||
}
|
||||
|
||||
void SendDelayStats::UpdateHistograms() {
|
||||
MutexLock lock(&mutex_);
|
||||
for (auto& [unused, counter] : send_delay_counters_) {
|
||||
AggregatedStats stats = counter.GetStats();
|
||||
if (stats.num_samples >= kMinRequiredPeriodicSamples) {
|
||||
RTC_HISTOGRAM_COUNTS_10000("WebRTC.Video.SendDelayInMs", stats.average);
|
||||
RTC_LOG(LS_INFO) << "WebRTC.Video.SendDelayInMs, " << stats.ToString();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void SendDelayStats::AddSsrcs(const VideoSendStream::Config& config) {
|
||||
MutexLock lock(&mutex_);
|
||||
if (send_delay_counters_.size() + config.rtp.ssrcs.size() > kMaxSsrcMapSize)
|
||||
return;
|
||||
for (uint32_t ssrc : config.rtp.ssrcs) {
|
||||
send_delay_counters_.try_emplace(ssrc, clock_, nullptr, false);
|
||||
}
|
||||
}
|
||||
|
||||
void SendDelayStats::OnSendPacket(uint16_t packet_id,
|
||||
Timestamp capture_time,
|
||||
uint32_t ssrc) {
|
||||
// Packet sent to transport.
|
||||
MutexLock lock(&mutex_);
|
||||
auto it = send_delay_counters_.find(ssrc);
|
||||
if (it == send_delay_counters_.end())
|
||||
return;
|
||||
|
||||
Timestamp now = clock_->CurrentTime();
|
||||
RemoveOld(now);
|
||||
|
||||
if (packets_.size() > kMaxPacketMapSize) {
|
||||
++num_skipped_packets_;
|
||||
return;
|
||||
}
|
||||
// `send_delay_counters_` is an std::map - adding new entries doesn't
|
||||
// invalidate existent iterators, and it has pointer stability for values.
|
||||
// Entries are never remove from the `send_delay_counters_`.
|
||||
// Thus memorizing pointer to the AvgCounter is safe.
|
||||
packets_.emplace(packet_id, Packet{.send_delay = &it->second,
|
||||
.capture_time = capture_time,
|
||||
.send_time = now});
|
||||
}
|
||||
|
||||
bool SendDelayStats::OnSentPacket(int packet_id, Timestamp time) {
|
||||
// Packet leaving socket.
|
||||
if (packet_id == -1)
|
||||
return false;
|
||||
|
||||
MutexLock lock(&mutex_);
|
||||
auto it = packets_.find(packet_id);
|
||||
if (it == packets_.end())
|
||||
return false;
|
||||
|
||||
// TODO(asapersson): Remove SendSideDelayUpdated(), use capture -> sent.
|
||||
// Elapsed time from send (to transport) -> sent (leaving socket).
|
||||
TimeDelta diff = time - it->second.send_time;
|
||||
it->second.send_delay->Add(diff.ms());
|
||||
packets_.erase(it);
|
||||
return true;
|
||||
}
|
||||
|
||||
void SendDelayStats::RemoveOld(Timestamp now) {
|
||||
while (!packets_.empty()) {
|
||||
auto it = packets_.begin();
|
||||
if (now - it->second.capture_time < kMaxSentPacketDelay)
|
||||
break;
|
||||
|
||||
packets_.erase(it);
|
||||
++num_old_packets_;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
79
TMessagesProj/jni/voip/webrtc/video/send_delay_stats.h
Normal file
79
TMessagesProj/jni/voip/webrtc/video/send_delay_stats.h
Normal file
|
|
@ -0,0 +1,79 @@
|
|||
/*
|
||||
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef VIDEO_SEND_DELAY_STATS_H_
|
||||
#define VIDEO_SEND_DELAY_STATS_H_
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#include <map>
|
||||
|
||||
#include "api/units/timestamp.h"
|
||||
#include "call/video_send_stream.h"
|
||||
#include "modules/include/module_common_types_public.h"
|
||||
#include "rtc_base/synchronization/mutex.h"
|
||||
#include "rtc_base/thread_annotations.h"
|
||||
#include "system_wrappers/include/clock.h"
|
||||
#include "video/stats_counter.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// Used to collect delay stats for video streams. The class gets callbacks
|
||||
// from more than one threads and internally uses a mutex for data access
|
||||
// synchronization.
|
||||
// TODO(bugs.webrtc.org/11993): OnSendPacket and OnSentPacket will eventually
|
||||
// be called consistently on the same thread. Once we're there, we should be
|
||||
// able to avoid locking (at least for the fast path).
|
||||
class SendDelayStats {
|
||||
public:
|
||||
explicit SendDelayStats(Clock* clock);
|
||||
~SendDelayStats();
|
||||
|
||||
// Adds the configured ssrcs for the rtp streams.
|
||||
// Stats will be calculated for these streams.
|
||||
void AddSsrcs(const VideoSendStream::Config& config);
|
||||
|
||||
// Called when a packet is sent (leaving socket).
|
||||
bool OnSentPacket(int packet_id, Timestamp time);
|
||||
|
||||
// Called when a packet is sent to the transport.
|
||||
void OnSendPacket(uint16_t packet_id, Timestamp capture_time, uint32_t ssrc);
|
||||
|
||||
private:
|
||||
// Map holding sent packets (mapped by sequence number).
|
||||
struct SequenceNumberOlderThan {
|
||||
bool operator()(uint16_t seq1, uint16_t seq2) const {
|
||||
return IsNewerSequenceNumber(seq2, seq1);
|
||||
}
|
||||
};
|
||||
struct Packet {
|
||||
AvgCounter* send_delay;
|
||||
Timestamp capture_time;
|
||||
Timestamp send_time;
|
||||
};
|
||||
|
||||
void UpdateHistograms();
|
||||
void RemoveOld(Timestamp now) RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
|
||||
|
||||
Clock* const clock_;
|
||||
Mutex mutex_;
|
||||
|
||||
std::map<uint16_t, Packet, SequenceNumberOlderThan> packets_
|
||||
RTC_GUARDED_BY(mutex_);
|
||||
size_t num_old_packets_ RTC_GUARDED_BY(mutex_);
|
||||
size_t num_skipped_packets_ RTC_GUARDED_BY(mutex_);
|
||||
|
||||
// Mapped by SSRC.
|
||||
std::map<uint32_t, AvgCounter> send_delay_counters_ RTC_GUARDED_BY(mutex_);
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
#endif // VIDEO_SEND_DELAY_STATS_H_
|
||||
1577
TMessagesProj/jni/voip/webrtc/video/send_statistics_proxy.cc
Normal file
1577
TMessagesProj/jni/voip/webrtc/video/send_statistics_proxy.cc
Normal file
File diff suppressed because it is too large
Load diff
396
TMessagesProj/jni/voip/webrtc/video/send_statistics_proxy.h
Normal file
396
TMessagesProj/jni/voip/webrtc/video/send_statistics_proxy.h
Normal file
|
|
@ -0,0 +1,396 @@
|
|||
/*
|
||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef VIDEO_SEND_STATISTICS_PROXY_H_
|
||||
#define VIDEO_SEND_STATISTICS_PROXY_H_
|
||||
|
||||
#include <array>
|
||||
#include <deque>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "api/field_trials_view.h"
|
||||
#include "api/video/video_codec_constants.h"
|
||||
#include "call/video_send_stream.h"
|
||||
#include "modules/include/module_common_types_public.h"
|
||||
#include "modules/rtp_rtcp/include/report_block_data.h"
|
||||
#include "modules/video_coding/include/video_codec_interface.h"
|
||||
#include "modules/video_coding/include/video_coding_defines.h"
|
||||
#include "rtc_base/numerics/exp_filter.h"
|
||||
#include "rtc_base/rate_tracker.h"
|
||||
#include "rtc_base/synchronization/mutex.h"
|
||||
#include "rtc_base/thread_annotations.h"
|
||||
#include "system_wrappers/include/clock.h"
|
||||
#include "video/config/video_encoder_config.h"
|
||||
#include "video/quality_limitation_reason_tracker.h"
|
||||
#include "video/report_block_stats.h"
|
||||
#include "video/stats_counter.h"
|
||||
#include "video/video_stream_encoder_observer.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class SendStatisticsProxy : public VideoStreamEncoderObserver,
|
||||
public ReportBlockDataObserver,
|
||||
public RtcpPacketTypeCounterObserver,
|
||||
public StreamDataCountersCallback,
|
||||
public BitrateStatisticsObserver,
|
||||
public FrameCountObserver {
|
||||
public:
|
||||
static constexpr TimeDelta kStatsTimeout = TimeDelta::Seconds(5);
|
||||
// Number of required samples to be collected before a metric is added
|
||||
// to a rtc histogram.
|
||||
static const int kMinRequiredMetricsSamples = 200;
|
||||
|
||||
SendStatisticsProxy(Clock* clock,
|
||||
const VideoSendStream::Config& config,
|
||||
VideoEncoderConfig::ContentType content_type,
|
||||
const FieldTrialsView& field_trials);
|
||||
~SendStatisticsProxy() override;
|
||||
|
||||
virtual VideoSendStream::Stats GetStats();
|
||||
|
||||
void OnSendEncodedImage(const EncodedImage& encoded_image,
|
||||
const CodecSpecificInfo* codec_info) override;
|
||||
|
||||
void OnEncoderImplementationChanged(
|
||||
EncoderImplementation implementation) override;
|
||||
|
||||
// Used to update incoming frame rate.
|
||||
void OnIncomingFrame(int width, int height) override;
|
||||
|
||||
// Dropped frame stats.
|
||||
void OnFrameDropped(DropReason) override;
|
||||
|
||||
// Adaptation stats.
|
||||
void OnAdaptationChanged(
|
||||
VideoAdaptationReason reason,
|
||||
const VideoAdaptationCounters& cpu_counters,
|
||||
const VideoAdaptationCounters& quality_counters) override;
|
||||
void ClearAdaptationStats() override;
|
||||
void UpdateAdaptationSettings(AdaptationSettings cpu_settings,
|
||||
AdaptationSettings quality_settings) override;
|
||||
|
||||
void OnBitrateAllocationUpdated(
|
||||
const VideoCodec& codec,
|
||||
const VideoBitrateAllocation& allocation) override;
|
||||
|
||||
void OnEncoderInternalScalerUpdate(bool is_scaled) override;
|
||||
|
||||
void OnMinPixelLimitReached() override;
|
||||
void OnInitialQualityResolutionAdaptDown() override;
|
||||
|
||||
void OnSuspendChange(bool is_suspended) override;
|
||||
void OnInactiveSsrc(uint32_t ssrc);
|
||||
|
||||
// Used to indicate change in content type, which may require a change in
|
||||
// how stats are collected.
|
||||
void OnEncoderReconfigured(const VideoEncoderConfig& encoder_config,
|
||||
const std::vector<VideoStream>& streams) override;
|
||||
|
||||
// Used to update the encoder target rate.
|
||||
void OnSetEncoderTargetRate(uint32_t bitrate_bps);
|
||||
|
||||
// Implements CpuOveruseMetricsObserver.
|
||||
void OnEncodedFrameTimeMeasured(int encode_time_ms,
|
||||
int encode_usage_percent) override;
|
||||
|
||||
void OnSendPacket(uint32_t ssrc, Timestamp capture_time);
|
||||
|
||||
int GetInputFrameRate() const override;
|
||||
int GetSendFrameRate() const;
|
||||
|
||||
protected:
|
||||
// From ReportBlockDataObserver.
|
||||
void OnReportBlockDataUpdated(ReportBlockData report_block_data) override;
|
||||
// From RtcpPacketTypeCounterObserver.
|
||||
void RtcpPacketTypesCounterUpdated(
|
||||
uint32_t ssrc,
|
||||
const RtcpPacketTypeCounter& packet_counter) override;
|
||||
// From StreamDataCountersCallback.
|
||||
void DataCountersUpdated(const StreamDataCounters& counters,
|
||||
uint32_t ssrc) override;
|
||||
|
||||
// From BitrateStatisticsObserver.
|
||||
void Notify(uint32_t total_bitrate_bps,
|
||||
uint32_t retransmit_bitrate_bps,
|
||||
uint32_t ssrc) override;
|
||||
|
||||
// From FrameCountObserver.
|
||||
void FrameCountUpdated(const FrameCounts& frame_counts,
|
||||
uint32_t ssrc) override;
|
||||
|
||||
private:
|
||||
class SampleCounter {
|
||||
public:
|
||||
SampleCounter() : sum(0), num_samples(0) {}
|
||||
~SampleCounter() {}
|
||||
void Add(int sample);
|
||||
int Avg(int64_t min_required_samples) const;
|
||||
|
||||
private:
|
||||
int64_t sum;
|
||||
int64_t num_samples;
|
||||
};
|
||||
class BoolSampleCounter {
|
||||
public:
|
||||
BoolSampleCounter() : sum(0), num_samples(0) {}
|
||||
~BoolSampleCounter() {}
|
||||
void Add(bool sample);
|
||||
void Add(bool sample, int64_t count);
|
||||
int Percent(int64_t min_required_samples) const;
|
||||
int Permille(int64_t min_required_samples) const;
|
||||
|
||||
private:
|
||||
int Fraction(int64_t min_required_samples, float multiplier) const;
|
||||
int64_t sum;
|
||||
int64_t num_samples;
|
||||
};
|
||||
struct TargetRateUpdates {
|
||||
TargetRateUpdates()
|
||||
: pause_resume_events(0), last_paused_or_resumed(false), last_ms(-1) {}
|
||||
int pause_resume_events;
|
||||
bool last_paused_or_resumed;
|
||||
int64_t last_ms;
|
||||
};
|
||||
struct FallbackEncoderInfo {
|
||||
FallbackEncoderInfo();
|
||||
bool is_possible = true;
|
||||
bool is_active = false;
|
||||
int on_off_events = 0;
|
||||
int64_t elapsed_ms = 0;
|
||||
absl::optional<int64_t> last_update_ms;
|
||||
const int max_frame_diff_ms = 2000;
|
||||
};
|
||||
struct FallbackEncoderInfoDisabled {
|
||||
bool is_possible = true;
|
||||
bool min_pixel_limit_reached = false;
|
||||
};
|
||||
struct StatsTimer {
|
||||
void Start(int64_t now_ms);
|
||||
void Stop(int64_t now_ms);
|
||||
void Restart(int64_t now_ms);
|
||||
int64_t start_ms = -1;
|
||||
int64_t total_ms = 0;
|
||||
};
|
||||
struct QpCounters {
|
||||
SampleCounter vp8; // QP range: 0-127.
|
||||
SampleCounter vp9; // QP range: 0-255.
|
||||
SampleCounter h264; // QP range: 0-51.
|
||||
SampleCounter h265; // QP range: 0-51.
|
||||
};
|
||||
struct AdaptChanges {
|
||||
int down = 0;
|
||||
int up = 0;
|
||||
};
|
||||
|
||||
// Map holding encoded frames (mapped by timestamp).
|
||||
// If simulcast layers are encoded on different threads, there is no guarantee
|
||||
// that one frame of all layers are encoded before the next start.
|
||||
struct TimestampOlderThan {
|
||||
bool operator()(uint32_t ts1, uint32_t ts2) const {
|
||||
return IsNewerTimestamp(ts2, ts1);
|
||||
}
|
||||
};
|
||||
struct Frame {
|
||||
Frame(int64_t send_ms, uint32_t width, uint32_t height, int simulcast_idx)
|
||||
: send_ms(send_ms),
|
||||
max_width(width),
|
||||
max_height(height),
|
||||
max_simulcast_idx(simulcast_idx) {}
|
||||
const int64_t
|
||||
send_ms; // Time when first frame with this timestamp is sent.
|
||||
uint32_t max_width; // Max width with this timestamp.
|
||||
uint32_t max_height; // Max height with this timestamp.
|
||||
int max_simulcast_idx; // Max simulcast index with this timestamp.
|
||||
};
|
||||
typedef std::map<uint32_t, Frame, TimestampOlderThan> EncodedFrameMap;
|
||||
|
||||
void PurgeOldStats() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
|
||||
VideoSendStream::StreamStats* GetStatsEntry(uint32_t ssrc)
|
||||
RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
|
||||
|
||||
struct MaskedAdaptationCounts {
|
||||
absl::optional<int> resolution_adaptations = absl::nullopt;
|
||||
absl::optional<int> num_framerate_reductions = absl::nullopt;
|
||||
};
|
||||
|
||||
struct Adaptations {
|
||||
public:
|
||||
MaskedAdaptationCounts MaskedCpuCounts() const;
|
||||
MaskedAdaptationCounts MaskedQualityCounts() const;
|
||||
|
||||
void set_cpu_counts(const VideoAdaptationCounters& cpu_counts);
|
||||
void set_quality_counts(const VideoAdaptationCounters& quality_counts);
|
||||
|
||||
VideoAdaptationCounters cpu_counts() const;
|
||||
VideoAdaptationCounters quality_counts() const;
|
||||
|
||||
void UpdateMaskingSettings(AdaptationSettings cpu_settings,
|
||||
AdaptationSettings quality_settings);
|
||||
|
||||
private:
|
||||
VideoAdaptationCounters cpu_counts_;
|
||||
AdaptationSettings cpu_settings_;
|
||||
VideoAdaptationCounters quality_counts_;
|
||||
AdaptationSettings quality_settings_;
|
||||
|
||||
MaskedAdaptationCounts Mask(const VideoAdaptationCounters& counters,
|
||||
const AdaptationSettings& settings) const;
|
||||
};
|
||||
// Collection of various stats that are tracked per ssrc.
|
||||
struct Trackers {
|
||||
struct SendDelayEntry {
|
||||
Timestamp when;
|
||||
TimeDelta send_delay;
|
||||
};
|
||||
|
||||
Trackers();
|
||||
Trackers(const Trackers&) = delete;
|
||||
Trackers& operator=(const Trackers&) = delete;
|
||||
|
||||
void AddSendDelay(Timestamp now, TimeDelta send_delay);
|
||||
|
||||
Timestamp resolution_update = Timestamp::MinusInfinity();
|
||||
rtc::RateTracker encoded_frame_rate;
|
||||
|
||||
std::deque<SendDelayEntry> send_delays;
|
||||
|
||||
// The sum of `send_delay` in `send_delays`.
|
||||
TimeDelta send_delay_sum = TimeDelta::Zero();
|
||||
|
||||
// Pointer to the maximum `send_delay` in `send_delays` or nullptr if
|
||||
// `send_delays.empty()`
|
||||
const TimeDelta* send_delay_max = nullptr;
|
||||
};
|
||||
|
||||
void SetAdaptTimer(const MaskedAdaptationCounts& counts, StatsTimer* timer)
|
||||
RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
|
||||
void UpdateAdaptationStats() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
|
||||
void TryUpdateInitialQualityResolutionAdaptUp(
|
||||
absl::optional<int> old_quality_downscales,
|
||||
absl::optional<int> updated_quality_downscales)
|
||||
RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
|
||||
|
||||
void UpdateEncoderFallbackStats(const CodecSpecificInfo* codec_info,
|
||||
int pixels,
|
||||
int simulcast_index)
|
||||
RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
|
||||
void UpdateFallbackDisabledStats(const CodecSpecificInfo* codec_info,
|
||||
int pixels,
|
||||
int simulcast_index)
|
||||
RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
|
||||
|
||||
Clock* const clock_;
|
||||
const std::string payload_name_;
|
||||
const RtpConfig rtp_config_;
|
||||
const absl::optional<int> fallback_max_pixels_;
|
||||
const absl::optional<int> fallback_max_pixels_disabled_;
|
||||
mutable Mutex mutex_;
|
||||
VideoEncoderConfig::ContentType content_type_ RTC_GUARDED_BY(mutex_);
|
||||
const int64_t start_ms_;
|
||||
VideoSendStream::Stats stats_ RTC_GUARDED_BY(mutex_);
|
||||
rtc::ExpFilter encode_time_ RTC_GUARDED_BY(mutex_);
|
||||
QualityLimitationReasonTracker quality_limitation_reason_tracker_
|
||||
RTC_GUARDED_BY(mutex_);
|
||||
rtc::RateTracker media_byte_rate_tracker_ RTC_GUARDED_BY(mutex_);
|
||||
rtc::RateTracker encoded_frame_rate_tracker_ RTC_GUARDED_BY(mutex_);
|
||||
// Trackers mapped by ssrc.
|
||||
std::map<uint32_t, Trackers> trackers_ RTC_GUARDED_BY(mutex_);
|
||||
|
||||
absl::optional<int64_t> last_outlier_timestamp_ RTC_GUARDED_BY(mutex_);
|
||||
|
||||
int last_num_spatial_layers_ RTC_GUARDED_BY(mutex_);
|
||||
int last_num_simulcast_streams_ RTC_GUARDED_BY(mutex_);
|
||||
std::array<bool, kMaxSpatialLayers> last_spatial_layer_use_
|
||||
RTC_GUARDED_BY(mutex_);
|
||||
// Indicates if the latest bitrate allocation had layers disabled by low
|
||||
// available bandwidth.
|
||||
bool bw_limited_layers_ RTC_GUARDED_BY(mutex_);
|
||||
// Indicastes if the encoder internally downscales input image.
|
||||
bool internal_encoder_scaler_ RTC_GUARDED_BY(mutex_);
|
||||
Adaptations adaptation_limitations_ RTC_GUARDED_BY(mutex_);
|
||||
|
||||
struct EncoderChangeEvent {
|
||||
std::string previous_encoder_implementation;
|
||||
std::string new_encoder_implementation;
|
||||
};
|
||||
// Stores the last change in encoder implementation in an optional, so that
|
||||
// the event can be consumed.
|
||||
absl::optional<EncoderChangeEvent> encoder_changed_;
|
||||
|
||||
// Contains stats used for UMA histograms. These stats will be reset if
|
||||
// content type changes between real-time video and screenshare, since these
|
||||
// will be reported separately.
|
||||
struct UmaSamplesContainer {
|
||||
UmaSamplesContainer(const char* prefix,
|
||||
const VideoSendStream::Stats& start_stats,
|
||||
Clock* clock);
|
||||
~UmaSamplesContainer();
|
||||
|
||||
void UpdateHistograms(const RtpConfig& rtp_config,
|
||||
const VideoSendStream::Stats& current_stats);
|
||||
|
||||
void InitializeBitrateCounters(const VideoSendStream::Stats& stats);
|
||||
|
||||
bool InsertEncodedFrame(const EncodedImage& encoded_frame,
|
||||
int simulcast_idx);
|
||||
void RemoveOld(int64_t now_ms);
|
||||
|
||||
const std::string uma_prefix_;
|
||||
Clock* const clock_;
|
||||
SampleCounter input_width_counter_;
|
||||
SampleCounter input_height_counter_;
|
||||
SampleCounter sent_width_counter_;
|
||||
SampleCounter sent_height_counter_;
|
||||
SampleCounter encode_time_counter_;
|
||||
BoolSampleCounter key_frame_counter_;
|
||||
BoolSampleCounter quality_limited_frame_counter_;
|
||||
SampleCounter quality_downscales_counter_;
|
||||
BoolSampleCounter cpu_limited_frame_counter_;
|
||||
BoolSampleCounter bw_limited_frame_counter_;
|
||||
SampleCounter bw_resolutions_disabled_counter_;
|
||||
SampleCounter delay_counter_;
|
||||
SampleCounter max_delay_counter_;
|
||||
rtc::RateTracker input_frame_rate_tracker_;
|
||||
RateCounter input_fps_counter_;
|
||||
RateCounter sent_fps_counter_;
|
||||
RateAccCounter total_byte_counter_;
|
||||
RateAccCounter media_byte_counter_;
|
||||
RateAccCounter rtx_byte_counter_;
|
||||
RateAccCounter padding_byte_counter_;
|
||||
RateAccCounter retransmit_byte_counter_;
|
||||
RateAccCounter fec_byte_counter_;
|
||||
int64_t first_rtcp_stats_time_ms_;
|
||||
int64_t first_rtp_stats_time_ms_;
|
||||
StatsTimer cpu_adapt_timer_;
|
||||
StatsTimer quality_adapt_timer_;
|
||||
BoolSampleCounter paused_time_counter_;
|
||||
TargetRateUpdates target_rate_updates_;
|
||||
BoolSampleCounter fallback_active_counter_;
|
||||
FallbackEncoderInfo fallback_info_;
|
||||
FallbackEncoderInfoDisabled fallback_info_disabled_;
|
||||
ReportBlockStats report_block_stats_;
|
||||
const VideoSendStream::Stats start_stats_;
|
||||
size_t num_streams_; // Number of configured streams to encoder.
|
||||
size_t num_pixels_highest_stream_;
|
||||
EncodedFrameMap encoded_frames_;
|
||||
AdaptChanges initial_quality_changes_;
|
||||
|
||||
std::map<int, QpCounters>
|
||||
qp_counters_; // QP counters mapped by spatial idx.
|
||||
};
|
||||
|
||||
std::unique_ptr<UmaSamplesContainer> uma_container_ RTC_GUARDED_BY(mutex_);
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
#endif // VIDEO_SEND_STATISTICS_PROXY_H_
|
||||
463
TMessagesProj/jni/voip/webrtc/video/stats_counter.cc
Normal file
463
TMessagesProj/jni/voip/webrtc/video/stats_counter.cc
Normal file
|
|
@ -0,0 +1,463 @@
|
|||
/*
|
||||
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "video/stats_counter.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <limits>
|
||||
#include <map>
|
||||
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/strings/string_builder.h"
|
||||
#include "system_wrappers/include/clock.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace {
|
||||
// Default periodic time interval for processing samples.
|
||||
const int64_t kDefaultProcessIntervalMs = 2000;
|
||||
const uint32_t kStreamId0 = 0;
|
||||
} // namespace
|
||||
|
||||
std::string AggregatedStats::ToString() const {
|
||||
return ToStringWithMultiplier(1);
|
||||
}
|
||||
|
||||
std::string AggregatedStats::ToStringWithMultiplier(int multiplier) const {
|
||||
rtc::StringBuilder ss;
|
||||
ss << "periodic_samples:" << num_samples << ", {";
|
||||
ss << "min:" << (min * multiplier) << ", ";
|
||||
ss << "avg:" << (average * multiplier) << ", ";
|
||||
ss << "max:" << (max * multiplier) << "}";
|
||||
return ss.Release();
|
||||
}
|
||||
|
||||
// Class holding periodically computed metrics.
|
||||
class AggregatedCounter {
|
||||
public:
|
||||
AggregatedCounter() : last_sample_(0), sum_samples_(0) {}
|
||||
~AggregatedCounter() {}
|
||||
|
||||
void Add(int sample) {
|
||||
last_sample_ = sample;
|
||||
sum_samples_ += sample;
|
||||
++stats_.num_samples;
|
||||
if (stats_.num_samples == 1) {
|
||||
stats_.min = sample;
|
||||
stats_.max = sample;
|
||||
}
|
||||
stats_.min = std::min(sample, stats_.min);
|
||||
stats_.max = std::max(sample, stats_.max);
|
||||
}
|
||||
|
||||
AggregatedStats ComputeStats() {
|
||||
Compute();
|
||||
return stats_;
|
||||
}
|
||||
|
||||
bool Empty() const { return stats_.num_samples == 0; }
|
||||
|
||||
int last_sample() const { return last_sample_; }
|
||||
|
||||
private:
|
||||
void Compute() {
|
||||
if (stats_.num_samples == 0)
|
||||
return;
|
||||
|
||||
stats_.average =
|
||||
(sum_samples_ + stats_.num_samples / 2) / stats_.num_samples;
|
||||
}
|
||||
int last_sample_;
|
||||
int64_t sum_samples_;
|
||||
AggregatedStats stats_;
|
||||
};
|
||||
|
||||
// Class holding gathered samples within a process interval.
|
||||
class Samples {
|
||||
public:
|
||||
Samples() : total_count_(0) {}
|
||||
~Samples() {}
|
||||
|
||||
void Add(int sample, uint32_t stream_id) {
|
||||
samples_[stream_id].Add(sample);
|
||||
++total_count_;
|
||||
}
|
||||
void Set(int64_t sample, uint32_t stream_id) {
|
||||
samples_[stream_id].Set(sample);
|
||||
++total_count_;
|
||||
}
|
||||
void SetLast(int64_t sample, uint32_t stream_id) {
|
||||
samples_[stream_id].SetLast(sample);
|
||||
}
|
||||
int64_t GetLast(uint32_t stream_id) { return samples_[stream_id].GetLast(); }
|
||||
|
||||
int64_t Count() const { return total_count_; }
|
||||
bool Empty() const { return total_count_ == 0; }
|
||||
|
||||
int64_t Sum() const {
|
||||
int64_t sum = 0;
|
||||
for (const auto& it : samples_)
|
||||
sum += it.second.sum_;
|
||||
return sum;
|
||||
}
|
||||
|
||||
int Max() const {
|
||||
int max = std::numeric_limits<int>::min();
|
||||
for (const auto& it : samples_)
|
||||
max = std::max(it.second.max_, max);
|
||||
return max;
|
||||
}
|
||||
|
||||
void Reset() {
|
||||
for (auto& it : samples_)
|
||||
it.second.Reset();
|
||||
total_count_ = 0;
|
||||
}
|
||||
|
||||
int64_t Diff() const {
|
||||
int64_t sum_diff = 0;
|
||||
int count = 0;
|
||||
for (const auto& it : samples_) {
|
||||
if (it.second.count_ > 0) {
|
||||
int64_t diff = it.second.sum_ - it.second.last_sum_;
|
||||
if (diff >= 0) {
|
||||
sum_diff += diff;
|
||||
++count;
|
||||
}
|
||||
}
|
||||
}
|
||||
return (count > 0) ? sum_diff : -1;
|
||||
}
|
||||
|
||||
private:
|
||||
struct Stats {
|
||||
void Add(int sample) {
|
||||
sum_ += sample;
|
||||
++count_;
|
||||
max_ = std::max(sample, max_);
|
||||
}
|
||||
void Set(int64_t sample) {
|
||||
sum_ = sample;
|
||||
++count_;
|
||||
}
|
||||
void SetLast(int64_t sample) { last_sum_ = sample; }
|
||||
int64_t GetLast() const { return last_sum_; }
|
||||
void Reset() {
|
||||
if (count_ > 0)
|
||||
last_sum_ = sum_;
|
||||
sum_ = 0;
|
||||
count_ = 0;
|
||||
max_ = std::numeric_limits<int>::min();
|
||||
}
|
||||
|
||||
int max_ = std::numeric_limits<int>::min();
|
||||
int64_t count_ = 0;
|
||||
int64_t sum_ = 0;
|
||||
int64_t last_sum_ = 0;
|
||||
};
|
||||
|
||||
int64_t total_count_;
|
||||
std::map<uint32_t, Stats> samples_; // Gathered samples mapped by stream id.
|
||||
};
|
||||
|
||||
// StatsCounter class.
|
||||
StatsCounter::StatsCounter(Clock* clock,
|
||||
int64_t process_intervals_ms,
|
||||
bool include_empty_intervals,
|
||||
StatsCounterObserver* observer)
|
||||
: include_empty_intervals_(include_empty_intervals),
|
||||
process_intervals_ms_(process_intervals_ms),
|
||||
aggregated_counter_(new AggregatedCounter()),
|
||||
samples_(new Samples()),
|
||||
clock_(clock),
|
||||
observer_(observer),
|
||||
last_process_time_ms_(-1),
|
||||
paused_(false),
|
||||
pause_time_ms_(-1),
|
||||
min_pause_time_ms_(0) {
|
||||
RTC_DCHECK_GT(process_intervals_ms_, 0);
|
||||
}
|
||||
|
||||
StatsCounter::~StatsCounter() {}
|
||||
|
||||
AggregatedStats StatsCounter::GetStats() {
|
||||
return aggregated_counter_->ComputeStats();
|
||||
}
|
||||
|
||||
AggregatedStats StatsCounter::ProcessAndGetStats() {
|
||||
if (HasSample())
|
||||
TryProcess();
|
||||
return aggregated_counter_->ComputeStats();
|
||||
}
|
||||
|
||||
void StatsCounter::ProcessAndPauseForDuration(int64_t min_pause_time_ms) {
|
||||
ProcessAndPause();
|
||||
min_pause_time_ms_ = min_pause_time_ms;
|
||||
}
|
||||
|
||||
void StatsCounter::ProcessAndPause() {
|
||||
if (HasSample())
|
||||
TryProcess();
|
||||
paused_ = true;
|
||||
pause_time_ms_ = clock_->TimeInMilliseconds();
|
||||
}
|
||||
|
||||
void StatsCounter::ProcessAndStopPause() {
|
||||
if (HasSample())
|
||||
TryProcess();
|
||||
Resume();
|
||||
}
|
||||
|
||||
bool StatsCounter::HasSample() const {
|
||||
return last_process_time_ms_ != -1;
|
||||
}
|
||||
|
||||
bool StatsCounter::TimeToProcess(int* elapsed_intervals) {
|
||||
int64_t now = clock_->TimeInMilliseconds();
|
||||
if (last_process_time_ms_ == -1)
|
||||
last_process_time_ms_ = now;
|
||||
|
||||
int64_t diff_ms = now - last_process_time_ms_;
|
||||
if (diff_ms < process_intervals_ms_)
|
||||
return false;
|
||||
|
||||
// Advance number of complete `process_intervals_ms_` that have passed.
|
||||
int64_t num_intervals = diff_ms / process_intervals_ms_;
|
||||
last_process_time_ms_ += num_intervals * process_intervals_ms_;
|
||||
|
||||
*elapsed_intervals = num_intervals;
|
||||
return true;
|
||||
}
|
||||
|
||||
void StatsCounter::Add(int sample) {
|
||||
TryProcess();
|
||||
samples_->Add(sample, kStreamId0);
|
||||
ResumeIfMinTimePassed();
|
||||
}
|
||||
|
||||
void StatsCounter::Set(int64_t sample, uint32_t stream_id) {
|
||||
if (paused_ && sample == samples_->GetLast(stream_id)) {
|
||||
// Do not add same sample while paused (will reset pause).
|
||||
return;
|
||||
}
|
||||
TryProcess();
|
||||
samples_->Set(sample, stream_id);
|
||||
ResumeIfMinTimePassed();
|
||||
}
|
||||
|
||||
void StatsCounter::SetLast(int64_t sample, uint32_t stream_id) {
|
||||
RTC_DCHECK(!HasSample()) << "Should be set before first sample is added.";
|
||||
samples_->SetLast(sample, stream_id);
|
||||
}
|
||||
|
||||
// Reports periodically computed metric.
|
||||
void StatsCounter::ReportMetricToAggregatedCounter(
|
||||
int value,
|
||||
int num_values_to_add) const {
|
||||
for (int i = 0; i < num_values_to_add; ++i) {
|
||||
aggregated_counter_->Add(value);
|
||||
if (observer_)
|
||||
observer_->OnMetricUpdated(value);
|
||||
}
|
||||
}
|
||||
|
||||
void StatsCounter::TryProcess() {
|
||||
int elapsed_intervals;
|
||||
if (!TimeToProcess(&elapsed_intervals))
|
||||
return;
|
||||
|
||||
// Get and report periodically computed metric.
|
||||
int metric;
|
||||
if (GetMetric(&metric))
|
||||
ReportMetricToAggregatedCounter(metric, 1);
|
||||
|
||||
// Report value for elapsed intervals without samples.
|
||||
if (IncludeEmptyIntervals()) {
|
||||
// If there are no samples, all elapsed intervals are empty (otherwise one
|
||||
// interval contains sample(s), discard this interval).
|
||||
int empty_intervals =
|
||||
samples_->Empty() ? elapsed_intervals : (elapsed_intervals - 1);
|
||||
ReportMetricToAggregatedCounter(GetValueForEmptyInterval(),
|
||||
empty_intervals);
|
||||
}
|
||||
|
||||
// Reset samples for elapsed interval.
|
||||
samples_->Reset();
|
||||
}
|
||||
|
||||
bool StatsCounter::IncludeEmptyIntervals() const {
|
||||
return include_empty_intervals_ && !paused_ && !aggregated_counter_->Empty();
|
||||
}
|
||||
void StatsCounter::ResumeIfMinTimePassed() {
|
||||
if (paused_ &&
|
||||
(clock_->TimeInMilliseconds() - pause_time_ms_) >= min_pause_time_ms_) {
|
||||
Resume();
|
||||
}
|
||||
}
|
||||
|
||||
void StatsCounter::Resume() {
|
||||
paused_ = false;
|
||||
min_pause_time_ms_ = 0;
|
||||
}
|
||||
|
||||
// StatsCounter sub-classes.
|
||||
AvgCounter::AvgCounter(Clock* clock,
|
||||
StatsCounterObserver* observer,
|
||||
bool include_empty_intervals)
|
||||
: StatsCounter(clock,
|
||||
kDefaultProcessIntervalMs,
|
||||
include_empty_intervals,
|
||||
observer) {}
|
||||
|
||||
void AvgCounter::Add(int sample) {
|
||||
StatsCounter::Add(sample);
|
||||
}
|
||||
|
||||
bool AvgCounter::GetMetric(int* metric) const {
|
||||
int64_t count = samples_->Count();
|
||||
if (count == 0)
|
||||
return false;
|
||||
|
||||
*metric = (samples_->Sum() + count / 2) / count;
|
||||
return true;
|
||||
}
|
||||
|
||||
int AvgCounter::GetValueForEmptyInterval() const {
|
||||
return aggregated_counter_->last_sample();
|
||||
}
|
||||
|
||||
MaxCounter::MaxCounter(Clock* clock,
|
||||
StatsCounterObserver* observer,
|
||||
int64_t process_intervals_ms)
|
||||
: StatsCounter(clock,
|
||||
process_intervals_ms,
|
||||
false, // `include_empty_intervals`
|
||||
observer) {}
|
||||
|
||||
void MaxCounter::Add(int sample) {
|
||||
StatsCounter::Add(sample);
|
||||
}
|
||||
|
||||
bool MaxCounter::GetMetric(int* metric) const {
|
||||
if (samples_->Empty())
|
||||
return false;
|
||||
|
||||
*metric = samples_->Max();
|
||||
return true;
|
||||
}
|
||||
|
||||
int MaxCounter::GetValueForEmptyInterval() const {
|
||||
RTC_DCHECK_NOTREACHED();
|
||||
return 0;
|
||||
}
|
||||
|
||||
PercentCounter::PercentCounter(Clock* clock, StatsCounterObserver* observer)
|
||||
: StatsCounter(clock,
|
||||
kDefaultProcessIntervalMs,
|
||||
false, // `include_empty_intervals`
|
||||
observer) {}
|
||||
|
||||
void PercentCounter::Add(bool sample) {
|
||||
StatsCounter::Add(sample ? 1 : 0);
|
||||
}
|
||||
|
||||
bool PercentCounter::GetMetric(int* metric) const {
|
||||
int64_t count = samples_->Count();
|
||||
if (count == 0)
|
||||
return false;
|
||||
|
||||
*metric = (samples_->Sum() * 100 + count / 2) / count;
|
||||
return true;
|
||||
}
|
||||
|
||||
int PercentCounter::GetValueForEmptyInterval() const {
|
||||
RTC_DCHECK_NOTREACHED();
|
||||
return 0;
|
||||
}
|
||||
|
||||
PermilleCounter::PermilleCounter(Clock* clock, StatsCounterObserver* observer)
|
||||
: StatsCounter(clock,
|
||||
kDefaultProcessIntervalMs,
|
||||
false, // `include_empty_intervals`
|
||||
observer) {}
|
||||
|
||||
void PermilleCounter::Add(bool sample) {
|
||||
StatsCounter::Add(sample ? 1 : 0);
|
||||
}
|
||||
|
||||
bool PermilleCounter::GetMetric(int* metric) const {
|
||||
int64_t count = samples_->Count();
|
||||
if (count == 0)
|
||||
return false;
|
||||
|
||||
*metric = (samples_->Sum() * 1000 + count / 2) / count;
|
||||
return true;
|
||||
}
|
||||
|
||||
int PermilleCounter::GetValueForEmptyInterval() const {
|
||||
RTC_DCHECK_NOTREACHED();
|
||||
return 0;
|
||||
}
|
||||
|
||||
RateCounter::RateCounter(Clock* clock,
|
||||
StatsCounterObserver* observer,
|
||||
bool include_empty_intervals)
|
||||
: StatsCounter(clock,
|
||||
kDefaultProcessIntervalMs,
|
||||
include_empty_intervals,
|
||||
observer) {}
|
||||
|
||||
void RateCounter::Add(int sample) {
|
||||
StatsCounter::Add(sample);
|
||||
}
|
||||
|
||||
bool RateCounter::GetMetric(int* metric) const {
|
||||
if (samples_->Empty())
|
||||
return false;
|
||||
|
||||
*metric = (samples_->Sum() * 1000 + process_intervals_ms_ / 2) /
|
||||
process_intervals_ms_;
|
||||
return true;
|
||||
}
|
||||
|
||||
int RateCounter::GetValueForEmptyInterval() const {
|
||||
return 0;
|
||||
}
|
||||
|
||||
RateAccCounter::RateAccCounter(Clock* clock,
|
||||
StatsCounterObserver* observer,
|
||||
bool include_empty_intervals)
|
||||
: StatsCounter(clock,
|
||||
kDefaultProcessIntervalMs,
|
||||
include_empty_intervals,
|
||||
observer) {}
|
||||
|
||||
void RateAccCounter::Set(int64_t sample, uint32_t stream_id) {
|
||||
StatsCounter::Set(sample, stream_id);
|
||||
}
|
||||
|
||||
void RateAccCounter::SetLast(int64_t sample, uint32_t stream_id) {
|
||||
StatsCounter::SetLast(sample, stream_id);
|
||||
}
|
||||
|
||||
bool RateAccCounter::GetMetric(int* metric) const {
|
||||
int64_t diff = samples_->Diff();
|
||||
if (diff < 0 || (!include_empty_intervals_ && diff == 0))
|
||||
return false;
|
||||
|
||||
*metric = (diff * 1000 + process_intervals_ms_ / 2) / process_intervals_ms_;
|
||||
return true;
|
||||
}
|
||||
|
||||
int RateAccCounter::GetValueForEmptyInterval() const {
|
||||
return 0;
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
296
TMessagesProj/jni/voip/webrtc/video/stats_counter.h
Normal file
296
TMessagesProj/jni/voip/webrtc/video/stats_counter.h
Normal file
|
|
@ -0,0 +1,296 @@
|
|||
/*
|
||||
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef VIDEO_STATS_COUNTER_H_
|
||||
#define VIDEO_STATS_COUNTER_H_
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class AggregatedCounter;
|
||||
class Clock;
|
||||
class Samples;
|
||||
|
||||
// `StatsCounterObserver` is called periodically when a metric is updated.
|
||||
class StatsCounterObserver {
|
||||
public:
|
||||
virtual void OnMetricUpdated(int sample) = 0;
|
||||
|
||||
virtual ~StatsCounterObserver() {}
|
||||
};
|
||||
|
||||
struct AggregatedStats {
|
||||
std::string ToString() const;
|
||||
std::string ToStringWithMultiplier(int multiplier) const;
|
||||
|
||||
int64_t num_samples = 0;
|
||||
int min = -1;
|
||||
int max = -1;
|
||||
int average = -1;
|
||||
// TODO(asapersson): Consider adding median/percentiles.
|
||||
};
|
||||
|
||||
// Classes which periodically computes a metric.
|
||||
//
|
||||
// During a period, `kProcessIntervalMs`, different metrics can be computed e.g:
|
||||
// - `AvgCounter`: average of samples
|
||||
// - `PercentCounter`: percentage of samples
|
||||
// - `PermilleCounter`: permille of samples
|
||||
//
|
||||
// Each periodic metric can be either:
|
||||
// - reported to an `observer` each period
|
||||
// - aggregated during the call (e.g. min, max, average)
|
||||
//
|
||||
// periodically computed
|
||||
// GetMetric() GetMetric() => AggregatedStats
|
||||
// ^ ^ (e.g. min/max/avg)
|
||||
// | |
|
||||
// | * * * * | ** * * * * | ...
|
||||
// |<- process interval ->|
|
||||
//
|
||||
// (*) - samples
|
||||
//
|
||||
//
|
||||
// Example usage:
|
||||
//
|
||||
// AvgCounter counter(&clock, nullptr);
|
||||
// counter.Add(5);
|
||||
// counter.Add(1);
|
||||
// counter.Add(6); // process interval passed -> GetMetric() avg:4
|
||||
// counter.Add(7);
|
||||
// counter.Add(3); // process interval passed -> GetMetric() avg:5
|
||||
// counter.Add(10);
|
||||
// counter.Add(20); // process interval passed -> GetMetric() avg:15
|
||||
// AggregatedStats stats = counter.GetStats();
|
||||
// stats: {min:4, max:15, avg:8}
|
||||
//
|
||||
|
||||
// Note: StatsCounter takes ownership of `observer`.
|
||||
|
||||
class StatsCounter {
|
||||
public:
|
||||
virtual ~StatsCounter();
|
||||
|
||||
// Gets metric within an interval. Returns true on success false otherwise.
|
||||
virtual bool GetMetric(int* metric) const = 0;
|
||||
|
||||
// Gets the value to use for an interval without samples.
|
||||
virtual int GetValueForEmptyInterval() const = 0;
|
||||
|
||||
// Gets aggregated stats (i.e. aggregate of periodically computed metrics).
|
||||
AggregatedStats GetStats();
|
||||
|
||||
// Reports metrics for elapsed intervals to AggregatedCounter and GetStats.
|
||||
AggregatedStats ProcessAndGetStats();
|
||||
|
||||
// Reports metrics for elapsed intervals to AggregatedCounter and pauses stats
|
||||
// (i.e. empty intervals will be discarded until next sample is added).
|
||||
void ProcessAndPause();
|
||||
|
||||
// As above with a minimum pause time. Added samples within this interval will
|
||||
// not resume the stats (i.e. stop the pause).
|
||||
void ProcessAndPauseForDuration(int64_t min_pause_time_ms);
|
||||
|
||||
// Reports metrics for elapsed intervals to AggregatedCounter and stops pause.
|
||||
void ProcessAndStopPause();
|
||||
|
||||
// Checks if a sample has been added (i.e. Add or Set called).
|
||||
bool HasSample() const;
|
||||
|
||||
protected:
|
||||
StatsCounter(Clock* clock,
|
||||
int64_t process_intervals_ms,
|
||||
bool include_empty_intervals,
|
||||
StatsCounterObserver* observer);
|
||||
|
||||
void Add(int sample);
|
||||
void Set(int64_t sample, uint32_t stream_id);
|
||||
void SetLast(int64_t sample, uint32_t stream_id);
|
||||
|
||||
const bool include_empty_intervals_;
|
||||
const int64_t process_intervals_ms_;
|
||||
const std::unique_ptr<AggregatedCounter> aggregated_counter_;
|
||||
const std::unique_ptr<Samples> samples_;
|
||||
|
||||
private:
|
||||
bool TimeToProcess(int* num_elapsed_intervals);
|
||||
void TryProcess();
|
||||
void ReportMetricToAggregatedCounter(int value, int num_values_to_add) const;
|
||||
bool IncludeEmptyIntervals() const;
|
||||
void Resume();
|
||||
void ResumeIfMinTimePassed();
|
||||
|
||||
Clock* const clock_;
|
||||
const std::unique_ptr<StatsCounterObserver> observer_;
|
||||
int64_t last_process_time_ms_;
|
||||
bool paused_;
|
||||
int64_t pause_time_ms_;
|
||||
int64_t min_pause_time_ms_;
|
||||
};
|
||||
|
||||
// AvgCounter: average of samples
|
||||
//
|
||||
// | * * * | * * | ...
|
||||
// | Add(5) Add(1) Add(6) | Add(5) Add(5) |
|
||||
// GetMetric | (5 + 1 + 6) / 3 | (5 + 5) / 2 |
|
||||
//
|
||||
// `include_empty_intervals`: If set, intervals without samples will be included
|
||||
// in the stats. The value for an interval is
|
||||
// determined by GetValueForEmptyInterval().
|
||||
//
|
||||
class AvgCounter : public StatsCounter {
|
||||
public:
|
||||
AvgCounter(Clock* clock,
|
||||
StatsCounterObserver* observer,
|
||||
bool include_empty_intervals);
|
||||
~AvgCounter() override {}
|
||||
|
||||
AvgCounter(const AvgCounter&) = delete;
|
||||
AvgCounter& operator=(const AvgCounter&) = delete;
|
||||
|
||||
void Add(int sample);
|
||||
|
||||
private:
|
||||
bool GetMetric(int* metric) const override;
|
||||
|
||||
// Returns the last computed metric (i.e. from GetMetric).
|
||||
int GetValueForEmptyInterval() const override;
|
||||
};
|
||||
|
||||
// MaxCounter: maximum of samples
|
||||
//
|
||||
// | * * * | * * | ...
|
||||
// | Add(5) Add(1) Add(6) | Add(5) Add(5) |
|
||||
// GetMetric | max: (5, 1, 6) | max: (5, 5) |
|
||||
//
|
||||
class MaxCounter : public StatsCounter {
|
||||
public:
|
||||
MaxCounter(Clock* clock,
|
||||
StatsCounterObserver* observer,
|
||||
int64_t process_intervals_ms);
|
||||
~MaxCounter() override {}
|
||||
|
||||
MaxCounter(const MaxCounter&) = delete;
|
||||
MaxCounter& operator=(const MaxCounter&) = delete;
|
||||
|
||||
void Add(int sample);
|
||||
|
||||
private:
|
||||
bool GetMetric(int* metric) const override;
|
||||
int GetValueForEmptyInterval() const override;
|
||||
};
|
||||
|
||||
// PercentCounter: percentage of samples
|
||||
//
|
||||
// | * * * | * * | ...
|
||||
// | Add(T) Add(F) Add(T) | Add(F) Add(T) |
|
||||
// GetMetric | 100 * 2 / 3 | 100 * 1 / 2 |
|
||||
//
|
||||
class PercentCounter : public StatsCounter {
|
||||
public:
|
||||
PercentCounter(Clock* clock, StatsCounterObserver* observer);
|
||||
~PercentCounter() override {}
|
||||
|
||||
PercentCounter(const PercentCounter&) = delete;
|
||||
PercentCounter& operator=(const PercentCounter&) = delete;
|
||||
|
||||
void Add(bool sample);
|
||||
|
||||
private:
|
||||
bool GetMetric(int* metric) const override;
|
||||
int GetValueForEmptyInterval() const override;
|
||||
};
|
||||
|
||||
// PermilleCounter: permille of samples
|
||||
//
|
||||
// | * * * | * * | ...
|
||||
// | Add(T) Add(F) Add(T) | Add(F) Add(T) |
|
||||
// GetMetric | 1000 * 2 / 3 | 1000 * 1 / 2 |
|
||||
//
|
||||
class PermilleCounter : public StatsCounter {
|
||||
public:
|
||||
PermilleCounter(Clock* clock, StatsCounterObserver* observer);
|
||||
~PermilleCounter() override {}
|
||||
|
||||
PermilleCounter(const PermilleCounter&) = delete;
|
||||
PermilleCounter& operator=(const PermilleCounter&) = delete;
|
||||
|
||||
void Add(bool sample);
|
||||
|
||||
private:
|
||||
bool GetMetric(int* metric) const override;
|
||||
int GetValueForEmptyInterval() const override;
|
||||
};
|
||||
|
||||
// RateCounter: units per second
|
||||
//
|
||||
// | * * * | * * | ...
|
||||
// | Add(5) Add(1) Add(6) | Add(5) Add(5) |
|
||||
// |<------ 2 sec ------->| |
|
||||
// GetMetric | (5 + 1 + 6) / 2 | (5 + 5) / 2 |
|
||||
//
|
||||
// `include_empty_intervals`: If set, intervals without samples will be included
|
||||
// in the stats. The value for an interval is
|
||||
// determined by GetValueForEmptyInterval().
|
||||
//
|
||||
class RateCounter : public StatsCounter {
|
||||
public:
|
||||
RateCounter(Clock* clock,
|
||||
StatsCounterObserver* observer,
|
||||
bool include_empty_intervals);
|
||||
~RateCounter() override {}
|
||||
|
||||
RateCounter(const RateCounter&) = delete;
|
||||
RateCounter& operator=(const RateCounter&) = delete;
|
||||
|
||||
void Add(int sample);
|
||||
|
||||
private:
|
||||
bool GetMetric(int* metric) const override;
|
||||
int GetValueForEmptyInterval() const override; // Returns zero.
|
||||
};
|
||||
|
||||
// RateAccCounter: units per second (used for counters)
|
||||
//
|
||||
// | * * * | * * | ...
|
||||
// | Set(5) Set(6) Set(8) | Set(11) Set(13) |
|
||||
// |<------ 2 sec ------->| |
|
||||
// GetMetric | (8 - 0) / 2 | (13 - 8) / 2 |
|
||||
//
|
||||
// `include_empty_intervals`: If set, intervals without samples will be included
|
||||
// in the stats. The value for an interval is
|
||||
// determined by GetValueForEmptyInterval().
|
||||
//
|
||||
class RateAccCounter : public StatsCounter {
|
||||
public:
|
||||
RateAccCounter(Clock* clock,
|
||||
StatsCounterObserver* observer,
|
||||
bool include_empty_intervals);
|
||||
~RateAccCounter() override {}
|
||||
|
||||
RateAccCounter(const RateAccCounter&) = delete;
|
||||
RateAccCounter& operator=(const RateAccCounter&) = delete;
|
||||
|
||||
void Set(int64_t sample, uint32_t stream_id);
|
||||
|
||||
// Sets the value for previous interval.
|
||||
// To be used if a value other than zero is initially required.
|
||||
void SetLast(int64_t sample, uint32_t stream_id);
|
||||
|
||||
private:
|
||||
bool GetMetric(int* metric) const override;
|
||||
int GetValueForEmptyInterval() const override; // Returns zero.
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // VIDEO_STATS_COUNTER_H_
|
||||
195
TMessagesProj/jni/voip/webrtc/video/stream_synchronization.cc
Normal file
195
TMessagesProj/jni/voip/webrtc/video/stream_synchronization.cc
Normal file
|
|
@ -0,0 +1,195 @@
|
|||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "video/stream_synchronization.h"
|
||||
|
||||
#include <stdlib.h>
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
#include "rtc_base/logging.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
static const int kMaxChangeMs = 80;
|
||||
static const int kMaxDeltaDelayMs = 10000;
|
||||
static const int kFilterLength = 4;
|
||||
// Minimum difference between audio and video to warrant a change.
|
||||
static const int kMinDeltaMs = 30;
|
||||
|
||||
StreamSynchronization::StreamSynchronization(uint32_t video_stream_id,
|
||||
uint32_t audio_stream_id)
|
||||
: video_stream_id_(video_stream_id),
|
||||
audio_stream_id_(audio_stream_id),
|
||||
base_target_delay_ms_(0),
|
||||
avg_diff_ms_(0) {}
|
||||
|
||||
bool StreamSynchronization::ComputeRelativeDelay(
|
||||
const Measurements& audio_measurement,
|
||||
const Measurements& video_measurement,
|
||||
int* relative_delay_ms) {
|
||||
NtpTime audio_last_capture_time =
|
||||
audio_measurement.rtp_to_ntp.Estimate(audio_measurement.latest_timestamp);
|
||||
if (!audio_last_capture_time.Valid()) {
|
||||
return false;
|
||||
}
|
||||
NtpTime video_last_capture_time =
|
||||
video_measurement.rtp_to_ntp.Estimate(video_measurement.latest_timestamp);
|
||||
if (!video_last_capture_time.Valid()) {
|
||||
return false;
|
||||
}
|
||||
int64_t audio_last_capture_time_ms = audio_last_capture_time.ToMs();
|
||||
int64_t video_last_capture_time_ms = video_last_capture_time.ToMs();
|
||||
|
||||
// Positive diff means that video_measurement is behind audio_measurement.
|
||||
*relative_delay_ms =
|
||||
video_measurement.latest_receive_time_ms -
|
||||
audio_measurement.latest_receive_time_ms -
|
||||
(video_last_capture_time_ms - audio_last_capture_time_ms);
|
||||
|
||||
if (*relative_delay_ms > kMaxDeltaDelayMs ||
|
||||
*relative_delay_ms < -kMaxDeltaDelayMs) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool StreamSynchronization::ComputeDelays(int relative_delay_ms,
|
||||
int current_audio_delay_ms,
|
||||
int* total_audio_delay_target_ms,
|
||||
int* total_video_delay_target_ms) {
|
||||
int current_video_delay_ms = *total_video_delay_target_ms;
|
||||
|
||||
RTC_LOG(LS_VERBOSE) << "Audio delay: " << current_audio_delay_ms
|
||||
<< " current diff: " << relative_delay_ms
|
||||
<< " for stream " << audio_stream_id_;
|
||||
|
||||
// Calculate the difference between the lowest possible video delay and the
|
||||
// current audio delay.
|
||||
int current_diff_ms =
|
||||
current_video_delay_ms - current_audio_delay_ms + relative_delay_ms;
|
||||
|
||||
avg_diff_ms_ =
|
||||
((kFilterLength - 1) * avg_diff_ms_ + current_diff_ms) / kFilterLength;
|
||||
if (abs(avg_diff_ms_) < kMinDeltaMs) {
|
||||
// Don't adjust if the diff is within our margin.
|
||||
return false;
|
||||
}
|
||||
|
||||
// Make sure we don't move too fast.
|
||||
int diff_ms = avg_diff_ms_ / 2;
|
||||
diff_ms = std::min(diff_ms, kMaxChangeMs);
|
||||
diff_ms = std::max(diff_ms, -kMaxChangeMs);
|
||||
|
||||
// Reset the average after a move to prevent overshooting reaction.
|
||||
avg_diff_ms_ = 0;
|
||||
|
||||
if (diff_ms > 0) {
|
||||
// The minimum video delay is longer than the current audio delay.
|
||||
// We need to decrease extra video delay, or add extra audio delay.
|
||||
if (video_delay_.extra_ms > base_target_delay_ms_) {
|
||||
// We have extra delay added to ViE. Reduce this delay before adding
|
||||
// extra delay to VoE.
|
||||
video_delay_.extra_ms -= diff_ms;
|
||||
audio_delay_.extra_ms = base_target_delay_ms_;
|
||||
} else { // video_delay_.extra_ms > 0
|
||||
// We have no extra video delay to remove, increase the audio delay.
|
||||
audio_delay_.extra_ms += diff_ms;
|
||||
video_delay_.extra_ms = base_target_delay_ms_;
|
||||
}
|
||||
} else { // if (diff_ms > 0)
|
||||
// The video delay is lower than the current audio delay.
|
||||
// We need to decrease extra audio delay, or add extra video delay.
|
||||
if (audio_delay_.extra_ms > base_target_delay_ms_) {
|
||||
// We have extra delay in VoiceEngine.
|
||||
// Start with decreasing the voice delay.
|
||||
// Note: diff_ms is negative; add the negative difference.
|
||||
audio_delay_.extra_ms += diff_ms;
|
||||
video_delay_.extra_ms = base_target_delay_ms_;
|
||||
} else { // audio_delay_.extra_ms > base_target_delay_ms_
|
||||
// We have no extra delay in VoiceEngine, increase the video delay.
|
||||
// Note: diff_ms is negative; subtract the negative difference.
|
||||
video_delay_.extra_ms -= diff_ms; // X - (-Y) = X + Y.
|
||||
audio_delay_.extra_ms = base_target_delay_ms_;
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure that video is never below our target.
|
||||
video_delay_.extra_ms =
|
||||
std::max(video_delay_.extra_ms, base_target_delay_ms_);
|
||||
|
||||
int new_video_delay_ms;
|
||||
if (video_delay_.extra_ms > base_target_delay_ms_) {
|
||||
new_video_delay_ms = video_delay_.extra_ms;
|
||||
} else {
|
||||
// No change to the extra video delay. We are changing audio and we only
|
||||
// allow to change one at the time.
|
||||
new_video_delay_ms = video_delay_.last_ms;
|
||||
}
|
||||
|
||||
// Make sure that we don't go below the extra video delay.
|
||||
new_video_delay_ms = std::max(new_video_delay_ms, video_delay_.extra_ms);
|
||||
|
||||
// Verify we don't go above the maximum allowed video delay.
|
||||
new_video_delay_ms =
|
||||
std::min(new_video_delay_ms, base_target_delay_ms_ + kMaxDeltaDelayMs);
|
||||
|
||||
int new_audio_delay_ms;
|
||||
if (audio_delay_.extra_ms > base_target_delay_ms_) {
|
||||
new_audio_delay_ms = audio_delay_.extra_ms;
|
||||
} else {
|
||||
// No change to the audio delay. We are changing video and we only allow to
|
||||
// change one at the time.
|
||||
new_audio_delay_ms = audio_delay_.last_ms;
|
||||
}
|
||||
|
||||
// Make sure that we don't go below the extra audio delay.
|
||||
new_audio_delay_ms = std::max(new_audio_delay_ms, audio_delay_.extra_ms);
|
||||
|
||||
// Verify we don't go above the maximum allowed audio delay.
|
||||
new_audio_delay_ms =
|
||||
std::min(new_audio_delay_ms, base_target_delay_ms_ + kMaxDeltaDelayMs);
|
||||
|
||||
video_delay_.last_ms = new_video_delay_ms;
|
||||
audio_delay_.last_ms = new_audio_delay_ms;
|
||||
|
||||
RTC_LOG(LS_VERBOSE) << "Sync video delay " << new_video_delay_ms
|
||||
<< " for video stream " << video_stream_id_
|
||||
<< " and audio delay " << audio_delay_.extra_ms
|
||||
<< " for audio stream " << audio_stream_id_;
|
||||
|
||||
*total_video_delay_target_ms = new_video_delay_ms;
|
||||
*total_audio_delay_target_ms = new_audio_delay_ms;
|
||||
return true;
|
||||
}
|
||||
|
||||
void StreamSynchronization::SetTargetBufferingDelay(int target_delay_ms) {
|
||||
// Initial extra delay for audio (accounting for existing extra delay).
|
||||
audio_delay_.extra_ms += target_delay_ms - base_target_delay_ms_;
|
||||
audio_delay_.last_ms += target_delay_ms - base_target_delay_ms_;
|
||||
|
||||
// The video delay is compared to the last value (and how much we can update
|
||||
// is limited by that as well).
|
||||
video_delay_.last_ms += target_delay_ms - base_target_delay_ms_;
|
||||
video_delay_.extra_ms += target_delay_ms - base_target_delay_ms_;
|
||||
|
||||
// Video is already delayed by the desired amount.
|
||||
base_target_delay_ms_ = target_delay_ms;
|
||||
}
|
||||
|
||||
void StreamSynchronization::ReduceAudioDelay() {
|
||||
audio_delay_.extra_ms *= 0.9f;
|
||||
}
|
||||
|
||||
void StreamSynchronization::ReduceVideoDelay() {
|
||||
video_delay_.extra_ms *= 0.9f;
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
71
TMessagesProj/jni/voip/webrtc/video/stream_synchronization.h
Normal file
71
TMessagesProj/jni/voip/webrtc/video/stream_synchronization.h
Normal file
|
|
@ -0,0 +1,71 @@
|
|||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef VIDEO_STREAM_SYNCHRONIZATION_H_
|
||||
#define VIDEO_STREAM_SYNCHRONIZATION_H_
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include "system_wrappers/include/rtp_to_ntp_estimator.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class StreamSynchronization {
|
||||
public:
|
||||
struct Measurements {
|
||||
Measurements() : latest_receive_time_ms(0), latest_timestamp(0) {}
|
||||
RtpToNtpEstimator rtp_to_ntp;
|
||||
int64_t latest_receive_time_ms;
|
||||
uint32_t latest_timestamp;
|
||||
};
|
||||
|
||||
StreamSynchronization(uint32_t video_stream_id, uint32_t audio_stream_id);
|
||||
|
||||
bool ComputeDelays(int relative_delay_ms,
|
||||
int current_audio_delay_ms,
|
||||
int* total_audio_delay_target_ms,
|
||||
int* total_video_delay_target_ms);
|
||||
|
||||
// On success `relative_delay_ms` contains the number of milliseconds later
|
||||
// video is rendered relative audio. If audio is played back later than video
|
||||
// `relative_delay_ms` will be negative.
|
||||
static bool ComputeRelativeDelay(const Measurements& audio_measurement,
|
||||
const Measurements& video_measurement,
|
||||
int* relative_delay_ms);
|
||||
|
||||
// Set target buffering delay. Audio and video will be delayed by at least
|
||||
// `target_delay_ms`.
|
||||
void SetTargetBufferingDelay(int target_delay_ms);
|
||||
|
||||
// Lowers the audio delay by 10%. Can be used to recover from errors.
|
||||
void ReduceAudioDelay();
|
||||
|
||||
// Lowers the video delay by 10%. Can be used to recover from errors.
|
||||
void ReduceVideoDelay();
|
||||
|
||||
uint32_t audio_stream_id() const { return audio_stream_id_; }
|
||||
uint32_t video_stream_id() const { return video_stream_id_; }
|
||||
|
||||
private:
|
||||
struct SynchronizationDelays {
|
||||
int extra_ms = 0;
|
||||
int last_ms = 0;
|
||||
};
|
||||
|
||||
const uint32_t video_stream_id_;
|
||||
const uint32_t audio_stream_id_;
|
||||
SynchronizationDelays audio_delay_;
|
||||
SynchronizationDelays video_delay_;
|
||||
int base_target_delay_ms_;
|
||||
int avg_diff_ms_;
|
||||
};
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // VIDEO_STREAM_SYNCHRONIZATION_H_
|
||||
711
TMessagesProj/jni/voip/webrtc/video/sv_loopback.cc
Normal file
711
TMessagesProj/jni/voip/webrtc/video/sv_loopback.cc
Normal file
|
|
@ -0,0 +1,711 @@
|
|||
/*
|
||||
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/flags/flag.h"
|
||||
#include "absl/flags/parse.h"
|
||||
#include "absl/types/optional.h"
|
||||
#include "api/test/simulated_network.h"
|
||||
#include "api/test/video_quality_test_fixture.h"
|
||||
#include "api/transport/bitrate_settings.h"
|
||||
#include "api/video_codecs/video_codec.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/string_encode.h"
|
||||
#include "system_wrappers/include/field_trial.h"
|
||||
#include "test/field_trial.h"
|
||||
#include "test/gtest.h"
|
||||
#include "test/run_test.h"
|
||||
#include "test/test_flags.h"
|
||||
#include "video/video_quality_test.h"
|
||||
|
||||
// Flags for video.
|
||||
ABSL_FLAG(int, vwidth, 640, "Video width.");
|
||||
|
||||
ABSL_FLAG(int, vheight, 480, "Video height.");
|
||||
|
||||
ABSL_FLAG(int, vfps, 30, "Video frames per second.");
|
||||
|
||||
ABSL_FLAG(int,
|
||||
capture_device_index,
|
||||
0,
|
||||
"Capture device to select for video stream");
|
||||
|
||||
ABSL_FLAG(int, vtarget_bitrate, 400, "Video stream target bitrate in kbps.");
|
||||
|
||||
ABSL_FLAG(int, vmin_bitrate, 100, "Video stream min bitrate in kbps.");
|
||||
|
||||
ABSL_FLAG(int, vmax_bitrate, 2000, "Video stream max bitrate in kbps.");
|
||||
|
||||
ABSL_FLAG(bool,
|
||||
suspend_below_min_bitrate,
|
||||
false,
|
||||
"Suspends video below the configured min bitrate.");
|
||||
|
||||
ABSL_FLAG(int,
|
||||
vnum_temporal_layers,
|
||||
1,
|
||||
"Number of temporal layers for video. Set to 1-4 to override.");
|
||||
|
||||
ABSL_FLAG(int, vnum_streams, 0, "Number of video streams to show or analyze.");
|
||||
|
||||
ABSL_FLAG(int,
|
||||
vnum_spatial_layers,
|
||||
1,
|
||||
"Number of video spatial layers to use.");
|
||||
|
||||
ABSL_FLAG(int,
|
||||
vinter_layer_pred,
|
||||
2,
|
||||
"Video inter-layer prediction mode. "
|
||||
"0 - enabled, 1 - disabled, 2 - enabled only for key pictures.");
|
||||
|
||||
ABSL_FLAG(std::string,
|
||||
vstream0,
|
||||
"",
|
||||
"Comma separated values describing VideoStream for video stream #0.");
|
||||
|
||||
ABSL_FLAG(std::string,
|
||||
vstream1,
|
||||
"",
|
||||
"Comma separated values describing VideoStream for video stream #1.");
|
||||
|
||||
ABSL_FLAG(std::string,
|
||||
vsl0,
|
||||
"",
|
||||
"Comma separated values describing SpatialLayer for video layer #0.");
|
||||
|
||||
ABSL_FLAG(std::string,
|
||||
vsl1,
|
||||
"",
|
||||
"Comma separated values describing SpatialLayer for video layer #1.");
|
||||
|
||||
ABSL_FLAG(int,
|
||||
vselected_tl,
|
||||
-1,
|
||||
"Temporal layer to show or analyze for screenshare. -1 to disable "
|
||||
"filtering.");
|
||||
|
||||
ABSL_FLAG(int,
|
||||
vselected_stream,
|
||||
0,
|
||||
"ID of the stream to show or analyze for screenshare."
|
||||
"Set to the number of streams to show them all.");
|
||||
|
||||
ABSL_FLAG(int,
|
||||
vselected_sl,
|
||||
-1,
|
||||
"Spatial layer to show or analyze for screenshare. -1 to disable "
|
||||
"filtering.");
|
||||
|
||||
// Flags for screenshare.
|
||||
ABSL_FLAG(int,
|
||||
min_transmit_bitrate,
|
||||
400,
|
||||
"Min transmit bitrate incl. padding for screenshare.");
|
||||
|
||||
ABSL_FLAG(int, swidth, 1850, "Screenshare width (crops source).");
|
||||
|
||||
ABSL_FLAG(int, sheight, 1110, "Screenshare height (crops source).");
|
||||
|
||||
ABSL_FLAG(int, sfps, 5, "Frames per second for screenshare.");
|
||||
|
||||
ABSL_FLAG(int,
|
||||
starget_bitrate,
|
||||
100,
|
||||
"Screenshare stream target bitrate in kbps.");
|
||||
|
||||
ABSL_FLAG(int, smin_bitrate, 100, "Screenshare stream min bitrate in kbps.");
|
||||
|
||||
ABSL_FLAG(int, smax_bitrate, 2000, "Screenshare stream max bitrate in kbps.");
|
||||
|
||||
ABSL_FLAG(int,
|
||||
snum_temporal_layers,
|
||||
2,
|
||||
"Number of temporal layers to use in screenshare.");
|
||||
|
||||
ABSL_FLAG(int,
|
||||
snum_streams,
|
||||
0,
|
||||
"Number of screenshare streams to show or analyze.");
|
||||
|
||||
ABSL_FLAG(int,
|
||||
snum_spatial_layers,
|
||||
1,
|
||||
"Number of screenshare spatial layers to use.");
|
||||
|
||||
ABSL_FLAG(int,
|
||||
sinter_layer_pred,
|
||||
0,
|
||||
"Screenshare inter-layer prediction mode. "
|
||||
"0 - enabled, 1 - disabled, 2 - enabled only for key pictures.");
|
||||
|
||||
ABSL_FLAG(
|
||||
std::string,
|
||||
sstream0,
|
||||
"",
|
||||
"Comma separated values describing VideoStream for screenshare stream #0.");
|
||||
|
||||
ABSL_FLAG(
|
||||
std::string,
|
||||
sstream1,
|
||||
"",
|
||||
"Comma separated values describing VideoStream for screenshare stream #1.");
|
||||
|
||||
ABSL_FLAG(
|
||||
std::string,
|
||||
ssl0,
|
||||
"",
|
||||
"Comma separated values describing SpatialLayer for screenshare layer #0.");
|
||||
|
||||
ABSL_FLAG(
|
||||
std::string,
|
||||
ssl1,
|
||||
"",
|
||||
"Comma separated values describing SpatialLayer for screenshare layer #1.");
|
||||
|
||||
ABSL_FLAG(int,
|
||||
sselected_tl,
|
||||
-1,
|
||||
"Temporal layer to show or analyze for screenshare. -1 to disable "
|
||||
"filtering.");
|
||||
|
||||
ABSL_FLAG(int,
|
||||
sselected_stream,
|
||||
0,
|
||||
"ID of the stream to show or analyze for screenshare."
|
||||
"Set to the number of streams to show them all.");
|
||||
|
||||
ABSL_FLAG(int,
|
||||
sselected_sl,
|
||||
-1,
|
||||
"Spatial layer to show or analyze for screenshare. -1 to disable "
|
||||
"filtering.");
|
||||
|
||||
ABSL_FLAG(bool,
|
||||
generate_slides,
|
||||
false,
|
||||
"Whether to use randomly generated slides or read them from files.");
|
||||
|
||||
ABSL_FLAG(int,
|
||||
slide_change_interval,
|
||||
10,
|
||||
"Interval (in seconds) between simulated slide changes.");
|
||||
|
||||
ABSL_FLAG(
|
||||
int,
|
||||
scroll_duration,
|
||||
0,
|
||||
"Duration (in seconds) during which a slide will be scrolled into place.");
|
||||
|
||||
ABSL_FLAG(std::string,
|
||||
slides,
|
||||
"",
|
||||
"Comma-separated list of *.yuv files to display as slides.");
|
||||
|
||||
// Flags common with screenshare and video loopback, with equal default values.
|
||||
ABSL_FLAG(int, start_bitrate, 600, "Call start bitrate in kbps.");
|
||||
|
||||
ABSL_FLAG(std::string, codec, "VP8", "Video codec to use.");
|
||||
|
||||
ABSL_FLAG(bool,
|
||||
analyze_video,
|
||||
false,
|
||||
"Analyze video stream (if --duration is present)");
|
||||
|
||||
ABSL_FLAG(bool,
|
||||
analyze_screenshare,
|
||||
false,
|
||||
"Analyze screenshare stream (if --duration is present)");
|
||||
|
||||
ABSL_FLAG(
|
||||
int,
|
||||
duration,
|
||||
0,
|
||||
"Duration of the test in seconds. If 0, rendered will be shown instead.");
|
||||
|
||||
ABSL_FLAG(std::string, output_filename, "", "Target graph data filename.");
|
||||
|
||||
ABSL_FLAG(std::string,
|
||||
graph_title,
|
||||
"",
|
||||
"If empty, title will be generated automatically.");
|
||||
|
||||
ABSL_FLAG(int, loss_percent, 0, "Percentage of packets randomly lost.");
|
||||
|
||||
ABSL_FLAG(int,
|
||||
avg_burst_loss_length,
|
||||
-1,
|
||||
"Average burst length of lost packets.");
|
||||
|
||||
ABSL_FLAG(int,
|
||||
link_capacity,
|
||||
0,
|
||||
"Capacity (kbps) of the fake link. 0 means infinite.");
|
||||
|
||||
ABSL_FLAG(int, queue_size, 0, "Size of the bottleneck link queue in packets.");
|
||||
|
||||
ABSL_FLAG(int,
|
||||
avg_propagation_delay_ms,
|
||||
0,
|
||||
"Average link propagation delay in ms.");
|
||||
|
||||
ABSL_FLAG(std::string,
|
||||
rtc_event_log_name,
|
||||
"",
|
||||
"Filename for rtc event log. Two files "
|
||||
"with \"_send\" and \"_recv\" suffixes will be created. "
|
||||
"Works only when --duration is set.");
|
||||
|
||||
ABSL_FLAG(std::string,
|
||||
rtp_dump_name,
|
||||
"",
|
||||
"Filename for dumped received RTP stream.");
|
||||
|
||||
ABSL_FLAG(int,
|
||||
std_propagation_delay_ms,
|
||||
0,
|
||||
"Link propagation delay standard deviation in ms.");
|
||||
|
||||
ABSL_FLAG(std::string,
|
||||
encoded_frame_path,
|
||||
"",
|
||||
"The base path for encoded frame logs. Created files will have "
|
||||
"the form <encoded_frame_path>.<n>.(recv|send.<m>).ivf");
|
||||
|
||||
ABSL_FLAG(bool, logs, false, "print logs to stderr");
|
||||
|
||||
ABSL_FLAG(bool, send_side_bwe, true, "Use send-side bandwidth estimation");
|
||||
|
||||
ABSL_FLAG(bool, generic_descriptor, false, "Use the generic frame descriptor.");
|
||||
|
||||
ABSL_FLAG(bool, allow_reordering, false, "Allow packet reordering to occur");
|
||||
|
||||
ABSL_FLAG(bool, use_ulpfec, false, "Use RED+ULPFEC forward error correction.");
|
||||
|
||||
ABSL_FLAG(bool, use_flexfec, false, "Use FlexFEC forward error correction.");
|
||||
|
||||
ABSL_FLAG(bool, audio, false, "Add audio stream");
|
||||
|
||||
ABSL_FLAG(bool,
|
||||
audio_video_sync,
|
||||
false,
|
||||
"Sync audio and video stream (no effect if"
|
||||
" audio is false)");
|
||||
|
||||
ABSL_FLAG(bool,
|
||||
audio_dtx,
|
||||
false,
|
||||
"Enable audio DTX (no effect if audio is false)");
|
||||
|
||||
ABSL_FLAG(bool, video, true, "Add video stream");
|
||||
|
||||
// Video-specific flags.
|
||||
ABSL_FLAG(std::string,
|
||||
vclip,
|
||||
"",
|
||||
"Name of the clip to show. If empty, the camera is used. Use "
|
||||
"\"Generator\" for chroma generator.");
|
||||
|
||||
namespace webrtc {
|
||||
namespace {
|
||||
|
||||
InterLayerPredMode IntToInterLayerPredMode(int inter_layer_pred) {
|
||||
if (inter_layer_pred == 0) {
|
||||
return InterLayerPredMode::kOn;
|
||||
} else if (inter_layer_pred == 1) {
|
||||
return InterLayerPredMode::kOff;
|
||||
} else {
|
||||
RTC_DCHECK_EQ(inter_layer_pred, 2);
|
||||
return InterLayerPredMode::kOnKeyPic;
|
||||
}
|
||||
}
|
||||
|
||||
size_t VideoWidth() {
|
||||
return static_cast<size_t>(absl::GetFlag(FLAGS_vwidth));
|
||||
}
|
||||
|
||||
size_t VideoHeight() {
|
||||
return static_cast<size_t>(absl::GetFlag(FLAGS_vheight));
|
||||
}
|
||||
|
||||
int VideoFps() {
|
||||
return absl::GetFlag(FLAGS_vfps);
|
||||
}
|
||||
|
||||
size_t GetCaptureDevice() {
|
||||
return static_cast<size_t>(absl::GetFlag(FLAGS_capture_device_index));
|
||||
}
|
||||
|
||||
int VideoTargetBitrateKbps() {
|
||||
return absl::GetFlag(FLAGS_vtarget_bitrate);
|
||||
}
|
||||
|
||||
int VideoMinBitrateKbps() {
|
||||
return absl::GetFlag(FLAGS_vmin_bitrate);
|
||||
}
|
||||
|
||||
int VideoMaxBitrateKbps() {
|
||||
return absl::GetFlag(FLAGS_vmax_bitrate);
|
||||
}
|
||||
|
||||
int VideoNumTemporalLayers() {
|
||||
return absl::GetFlag(FLAGS_vnum_temporal_layers);
|
||||
}
|
||||
|
||||
int VideoNumStreams() {
|
||||
return absl::GetFlag(FLAGS_vnum_streams);
|
||||
}
|
||||
|
||||
int VideoNumSpatialLayers() {
|
||||
return absl::GetFlag(FLAGS_vnum_spatial_layers);
|
||||
}
|
||||
|
||||
InterLayerPredMode VideoInterLayerPred() {
|
||||
return IntToInterLayerPredMode(absl::GetFlag(FLAGS_vinter_layer_pred));
|
||||
}
|
||||
|
||||
std::string VideoStream0() {
|
||||
return absl::GetFlag(FLAGS_vstream0);
|
||||
}
|
||||
|
||||
std::string VideoStream1() {
|
||||
return absl::GetFlag(FLAGS_vstream1);
|
||||
}
|
||||
|
||||
std::string VideoSL0() {
|
||||
return absl::GetFlag(FLAGS_vsl0);
|
||||
}
|
||||
|
||||
std::string VideoSL1() {
|
||||
return absl::GetFlag(FLAGS_vsl1);
|
||||
}
|
||||
|
||||
int VideoSelectedTL() {
|
||||
return absl::GetFlag(FLAGS_vselected_tl);
|
||||
}
|
||||
|
||||
int VideoSelectedStream() {
|
||||
return absl::GetFlag(FLAGS_vselected_stream);
|
||||
}
|
||||
|
||||
int VideoSelectedSL() {
|
||||
return absl::GetFlag(FLAGS_vselected_sl);
|
||||
}
|
||||
|
||||
int ScreenshareMinTransmitBitrateKbps() {
|
||||
return absl::GetFlag(FLAGS_min_transmit_bitrate);
|
||||
}
|
||||
|
||||
size_t ScreenshareWidth() {
|
||||
return static_cast<size_t>(absl::GetFlag(FLAGS_swidth));
|
||||
}
|
||||
|
||||
size_t ScreenshareHeight() {
|
||||
return static_cast<size_t>(absl::GetFlag(FLAGS_sheight));
|
||||
}
|
||||
|
||||
int ScreenshareFps() {
|
||||
return absl::GetFlag(FLAGS_sfps);
|
||||
}
|
||||
|
||||
int ScreenshareTargetBitrateKbps() {
|
||||
return absl::GetFlag(FLAGS_starget_bitrate);
|
||||
}
|
||||
|
||||
int ScreenshareMinBitrateKbps() {
|
||||
return absl::GetFlag(FLAGS_smin_bitrate);
|
||||
}
|
||||
|
||||
int ScreenshareMaxBitrateKbps() {
|
||||
return absl::GetFlag(FLAGS_smax_bitrate);
|
||||
}
|
||||
|
||||
int ScreenshareNumTemporalLayers() {
|
||||
return absl::GetFlag(FLAGS_snum_temporal_layers);
|
||||
}
|
||||
|
||||
int ScreenshareNumStreams() {
|
||||
return absl::GetFlag(FLAGS_snum_streams);
|
||||
}
|
||||
|
||||
int ScreenshareNumSpatialLayers() {
|
||||
return absl::GetFlag(FLAGS_snum_spatial_layers);
|
||||
}
|
||||
|
||||
InterLayerPredMode ScreenshareInterLayerPred() {
|
||||
return IntToInterLayerPredMode(absl::GetFlag(FLAGS_sinter_layer_pred));
|
||||
}
|
||||
|
||||
std::string ScreenshareStream0() {
|
||||
return absl::GetFlag(FLAGS_sstream0);
|
||||
}
|
||||
|
||||
std::string ScreenshareStream1() {
|
||||
return absl::GetFlag(FLAGS_sstream1);
|
||||
}
|
||||
|
||||
std::string ScreenshareSL0() {
|
||||
return absl::GetFlag(FLAGS_ssl0);
|
||||
}
|
||||
|
||||
std::string ScreenshareSL1() {
|
||||
return absl::GetFlag(FLAGS_ssl1);
|
||||
}
|
||||
|
||||
int ScreenshareSelectedTL() {
|
||||
return absl::GetFlag(FLAGS_sselected_tl);
|
||||
}
|
||||
|
||||
int ScreenshareSelectedStream() {
|
||||
return absl::GetFlag(FLAGS_sselected_stream);
|
||||
}
|
||||
|
||||
int ScreenshareSelectedSL() {
|
||||
return absl::GetFlag(FLAGS_sselected_sl);
|
||||
}
|
||||
|
||||
bool GenerateSlides() {
|
||||
return absl::GetFlag(FLAGS_generate_slides);
|
||||
}
|
||||
|
||||
int SlideChangeInterval() {
|
||||
return absl::GetFlag(FLAGS_slide_change_interval);
|
||||
}
|
||||
|
||||
int ScrollDuration() {
|
||||
return absl::GetFlag(FLAGS_scroll_duration);
|
||||
}
|
||||
|
||||
std::vector<std::string> Slides() {
|
||||
std::vector<std::string> slides;
|
||||
std::string slides_list = absl::GetFlag(FLAGS_slides);
|
||||
rtc::tokenize(slides_list, ',', &slides);
|
||||
return slides;
|
||||
}
|
||||
|
||||
int StartBitrateKbps() {
|
||||
return absl::GetFlag(FLAGS_start_bitrate);
|
||||
}
|
||||
|
||||
std::string Codec() {
|
||||
return absl::GetFlag(FLAGS_codec);
|
||||
}
|
||||
|
||||
bool AnalyzeVideo() {
|
||||
return absl::GetFlag(FLAGS_analyze_video);
|
||||
}
|
||||
|
||||
bool AnalyzeScreenshare() {
|
||||
return absl::GetFlag(FLAGS_analyze_screenshare);
|
||||
}
|
||||
|
||||
int DurationSecs() {
|
||||
return absl::GetFlag(FLAGS_duration);
|
||||
}
|
||||
|
||||
std::string OutputFilename() {
|
||||
return absl::GetFlag(FLAGS_output_filename);
|
||||
}
|
||||
|
||||
std::string GraphTitle() {
|
||||
return absl::GetFlag(FLAGS_graph_title);
|
||||
}
|
||||
|
||||
int LossPercent() {
|
||||
return absl::GetFlag(FLAGS_loss_percent);
|
||||
}
|
||||
|
||||
int AvgBurstLossLength() {
|
||||
return absl::GetFlag(FLAGS_avg_burst_loss_length);
|
||||
}
|
||||
|
||||
int LinkCapacityKbps() {
|
||||
return absl::GetFlag(FLAGS_link_capacity);
|
||||
}
|
||||
|
||||
int QueueSize() {
|
||||
return absl::GetFlag(FLAGS_queue_size);
|
||||
}
|
||||
|
||||
int AvgPropagationDelayMs() {
|
||||
return absl::GetFlag(FLAGS_avg_propagation_delay_ms);
|
||||
}
|
||||
|
||||
std::string RtcEventLogName() {
|
||||
return absl::GetFlag(FLAGS_rtc_event_log_name);
|
||||
}
|
||||
|
||||
std::string RtpDumpName() {
|
||||
return absl::GetFlag(FLAGS_rtp_dump_name);
|
||||
}
|
||||
|
||||
int StdPropagationDelayMs() {
|
||||
return absl::GetFlag(FLAGS_std_propagation_delay_ms);
|
||||
}
|
||||
|
||||
std::string EncodedFramePath() {
|
||||
return absl::GetFlag(FLAGS_encoded_frame_path);
|
||||
}
|
||||
|
||||
std::string VideoClip() {
|
||||
return absl::GetFlag(FLAGS_vclip);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
void Loopback() {
|
||||
int camera_idx, screenshare_idx;
|
||||
RTC_CHECK(!(AnalyzeScreenshare() && AnalyzeVideo()))
|
||||
<< "Select only one of video or screenshare.";
|
||||
RTC_CHECK(!DurationSecs() || AnalyzeScreenshare() || AnalyzeVideo())
|
||||
<< "If duration is set, exactly one of analyze_* flags should be set.";
|
||||
// Default: camera feed first, if nothing selected.
|
||||
if (AnalyzeVideo() || !AnalyzeScreenshare()) {
|
||||
camera_idx = 0;
|
||||
screenshare_idx = 1;
|
||||
} else {
|
||||
camera_idx = 1;
|
||||
screenshare_idx = 0;
|
||||
}
|
||||
|
||||
BuiltInNetworkBehaviorConfig pipe_config;
|
||||
pipe_config.loss_percent = LossPercent();
|
||||
pipe_config.avg_burst_loss_length = AvgBurstLossLength();
|
||||
pipe_config.link_capacity_kbps = LinkCapacityKbps();
|
||||
pipe_config.queue_length_packets = QueueSize();
|
||||
pipe_config.queue_delay_ms = AvgPropagationDelayMs();
|
||||
pipe_config.delay_standard_deviation_ms = StdPropagationDelayMs();
|
||||
pipe_config.allow_reordering = absl::GetFlag(FLAGS_allow_reordering);
|
||||
|
||||
BitrateConstraints call_bitrate_config;
|
||||
call_bitrate_config.min_bitrate_bps =
|
||||
(ScreenshareMinBitrateKbps() + VideoMinBitrateKbps()) * 1000;
|
||||
call_bitrate_config.start_bitrate_bps = StartBitrateKbps() * 1000;
|
||||
call_bitrate_config.max_bitrate_bps =
|
||||
(ScreenshareMaxBitrateKbps() + VideoMaxBitrateKbps()) * 1000;
|
||||
|
||||
VideoQualityTest::Params params;
|
||||
params.call.send_side_bwe = absl::GetFlag(FLAGS_send_side_bwe);
|
||||
params.call.generic_descriptor = absl::GetFlag(FLAGS_generic_descriptor);
|
||||
params.call.call_bitrate_config = call_bitrate_config;
|
||||
params.call.dual_video = true;
|
||||
params.video[screenshare_idx].enabled = true;
|
||||
params.video[screenshare_idx].width = ScreenshareWidth();
|
||||
params.video[screenshare_idx].height = ScreenshareHeight();
|
||||
params.video[screenshare_idx].fps = ScreenshareFps();
|
||||
params.video[screenshare_idx].min_bitrate_bps =
|
||||
ScreenshareMinBitrateKbps() * 1000;
|
||||
params.video[screenshare_idx].target_bitrate_bps =
|
||||
ScreenshareTargetBitrateKbps() * 1000;
|
||||
params.video[screenshare_idx].max_bitrate_bps =
|
||||
ScreenshareMaxBitrateKbps() * 1000;
|
||||
params.video[screenshare_idx].codec = Codec();
|
||||
params.video[screenshare_idx].num_temporal_layers =
|
||||
ScreenshareNumTemporalLayers();
|
||||
params.video[screenshare_idx].selected_tl = ScreenshareSelectedTL();
|
||||
params.video[screenshare_idx].min_transmit_bps =
|
||||
ScreenshareMinTransmitBitrateKbps() * 1000;
|
||||
params.video[camera_idx].enabled = absl::GetFlag(FLAGS_video);
|
||||
params.video[camera_idx].width = VideoWidth();
|
||||
params.video[camera_idx].height = VideoHeight();
|
||||
params.video[camera_idx].fps = VideoFps();
|
||||
params.video[camera_idx].min_bitrate_bps = VideoMinBitrateKbps() * 1000;
|
||||
params.video[camera_idx].target_bitrate_bps = VideoTargetBitrateKbps() * 1000;
|
||||
params.video[camera_idx].max_bitrate_bps = VideoMaxBitrateKbps() * 1000;
|
||||
params.video[camera_idx].suspend_below_min_bitrate =
|
||||
absl::GetFlag(FLAGS_suspend_below_min_bitrate);
|
||||
params.video[camera_idx].codec = Codec();
|
||||
params.video[camera_idx].num_temporal_layers = VideoNumTemporalLayers();
|
||||
params.video[camera_idx].selected_tl = VideoSelectedTL();
|
||||
params.video[camera_idx].ulpfec = absl::GetFlag(FLAGS_use_ulpfec);
|
||||
params.video[camera_idx].flexfec = absl::GetFlag(FLAGS_use_flexfec);
|
||||
params.video[camera_idx].clip_path = VideoClip();
|
||||
params.video[camera_idx].capture_device_index = GetCaptureDevice();
|
||||
params.audio.enabled = absl::GetFlag(FLAGS_audio);
|
||||
params.audio.sync_video = absl::GetFlag(FLAGS_audio_video_sync);
|
||||
params.audio.dtx = absl::GetFlag(FLAGS_audio_dtx);
|
||||
params.logging.rtc_event_log_name = RtcEventLogName();
|
||||
params.logging.rtp_dump_name = RtpDumpName();
|
||||
params.logging.encoded_frame_base_path = EncodedFramePath();
|
||||
params.analyzer.test_label = "dual_streams";
|
||||
params.analyzer.test_durations_secs = DurationSecs();
|
||||
params.analyzer.graph_data_output_filename = OutputFilename();
|
||||
params.analyzer.graph_title = GraphTitle();
|
||||
params.config = pipe_config;
|
||||
|
||||
params.screenshare[camera_idx].enabled = false;
|
||||
params.screenshare[screenshare_idx].enabled = true;
|
||||
params.screenshare[screenshare_idx].generate_slides = GenerateSlides();
|
||||
params.screenshare[screenshare_idx].slide_change_interval =
|
||||
SlideChangeInterval();
|
||||
params.screenshare[screenshare_idx].scroll_duration = ScrollDuration();
|
||||
params.screenshare[screenshare_idx].slides = Slides();
|
||||
|
||||
if (VideoNumStreams() > 1 && VideoStream0().empty() &&
|
||||
VideoStream1().empty()) {
|
||||
params.ss[camera_idx].infer_streams = true;
|
||||
}
|
||||
|
||||
if (ScreenshareNumStreams() > 1 && ScreenshareStream0().empty() &&
|
||||
ScreenshareStream1().empty()) {
|
||||
params.ss[screenshare_idx].infer_streams = true;
|
||||
}
|
||||
|
||||
std::vector<std::string> stream_descriptors;
|
||||
stream_descriptors.push_back(ScreenshareStream0());
|
||||
stream_descriptors.push_back(ScreenshareStream1());
|
||||
std::vector<std::string> SL_descriptors;
|
||||
SL_descriptors.push_back(ScreenshareSL0());
|
||||
SL_descriptors.push_back(ScreenshareSL1());
|
||||
VideoQualityTest::FillScalabilitySettings(
|
||||
¶ms, screenshare_idx, stream_descriptors, ScreenshareNumStreams(),
|
||||
ScreenshareSelectedStream(), ScreenshareNumSpatialLayers(),
|
||||
ScreenshareSelectedSL(), ScreenshareInterLayerPred(), SL_descriptors);
|
||||
|
||||
stream_descriptors.clear();
|
||||
stream_descriptors.push_back(VideoStream0());
|
||||
stream_descriptors.push_back(VideoStream1());
|
||||
SL_descriptors.clear();
|
||||
SL_descriptors.push_back(VideoSL0());
|
||||
SL_descriptors.push_back(VideoSL1());
|
||||
VideoQualityTest::FillScalabilitySettings(
|
||||
¶ms, camera_idx, stream_descriptors, VideoNumStreams(),
|
||||
VideoSelectedStream(), VideoNumSpatialLayers(), VideoSelectedSL(),
|
||||
VideoInterLayerPred(), SL_descriptors);
|
||||
|
||||
auto fixture = std::make_unique<VideoQualityTest>(nullptr);
|
||||
if (DurationSecs()) {
|
||||
fixture->RunWithAnalyzer(params);
|
||||
} else {
|
||||
fixture->RunWithRenderers(params);
|
||||
}
|
||||
}
|
||||
} // namespace webrtc
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
::testing::InitGoogleTest(&argc, argv);
|
||||
absl::ParseCommandLine(argc, argv);
|
||||
|
||||
rtc::LogMessage::SetLogToStderr(absl::GetFlag(FLAGS_logs));
|
||||
|
||||
// InitFieldTrialsFromString stores the char*, so the char array must outlive
|
||||
// the application.
|
||||
const std::string field_trials = absl::GetFlag(FLAGS_force_fieldtrials);
|
||||
webrtc::field_trial::InitFieldTrialsFromString(field_trials.c_str());
|
||||
|
||||
webrtc::test::RunTest(webrtc::Loopback);
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -0,0 +1,76 @@
|
|||
/*
|
||||
* Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "video/task_queue_frame_decode_scheduler.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <utility>
|
||||
|
||||
#include "api/sequence_checker.h"
|
||||
#include "api/task_queue/task_queue_base.h"
|
||||
#include "rtc_base/checks.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
TaskQueueFrameDecodeScheduler::TaskQueueFrameDecodeScheduler(
|
||||
Clock* clock,
|
||||
TaskQueueBase* const bookkeeping_queue)
|
||||
: clock_(clock), bookkeeping_queue_(bookkeeping_queue) {
|
||||
RTC_DCHECK(clock_);
|
||||
RTC_DCHECK(bookkeeping_queue_);
|
||||
}
|
||||
|
||||
TaskQueueFrameDecodeScheduler::~TaskQueueFrameDecodeScheduler() {
|
||||
RTC_DCHECK(stopped_);
|
||||
RTC_DCHECK(!scheduled_rtp_) << "Outstanding scheduled rtp=" << *scheduled_rtp_
|
||||
<< ". Call CancelOutstanding before destruction.";
|
||||
}
|
||||
|
||||
void TaskQueueFrameDecodeScheduler::ScheduleFrame(
|
||||
uint32_t rtp,
|
||||
FrameDecodeTiming::FrameSchedule schedule,
|
||||
FrameReleaseCallback cb) {
|
||||
RTC_DCHECK(!stopped_) << "Can not schedule frames after stopped.";
|
||||
RTC_DCHECK(!scheduled_rtp_.has_value())
|
||||
<< "Can not schedule two frames for release at the same time.";
|
||||
RTC_DCHECK(cb);
|
||||
scheduled_rtp_ = rtp;
|
||||
|
||||
TimeDelta wait = std::max(
|
||||
TimeDelta::Zero(), schedule.latest_decode_time - clock_->CurrentTime());
|
||||
bookkeeping_queue_->PostDelayedHighPrecisionTask(
|
||||
SafeTask(task_safety_.flag(),
|
||||
[this, rtp, schedule, cb = std::move(cb)]() mutable {
|
||||
RTC_DCHECK_RUN_ON(bookkeeping_queue_);
|
||||
// If the next frame rtp has changed since this task was
|
||||
// this scheduled release should be skipped.
|
||||
if (scheduled_rtp_ != rtp)
|
||||
return;
|
||||
scheduled_rtp_ = absl::nullopt;
|
||||
std::move(cb)(rtp, schedule.render_time);
|
||||
}),
|
||||
wait);
|
||||
}
|
||||
|
||||
void TaskQueueFrameDecodeScheduler::CancelOutstanding() {
|
||||
scheduled_rtp_ = absl::nullopt;
|
||||
}
|
||||
|
||||
absl::optional<uint32_t>
|
||||
TaskQueueFrameDecodeScheduler::ScheduledRtpTimestamp() {
|
||||
return scheduled_rtp_;
|
||||
}
|
||||
|
||||
void TaskQueueFrameDecodeScheduler::Stop() {
|
||||
CancelOutstanding();
|
||||
stopped_ = true;
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,48 @@
|
|||
/*
|
||||
* Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef VIDEO_TASK_QUEUE_FRAME_DECODE_SCHEDULER_H_
|
||||
#define VIDEO_TASK_QUEUE_FRAME_DECODE_SCHEDULER_H_
|
||||
|
||||
#include "video/frame_decode_scheduler.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// An implementation of FrameDecodeScheduler that is based on TaskQueues. This
|
||||
// is the default implementation for general use.
|
||||
class TaskQueueFrameDecodeScheduler : public FrameDecodeScheduler {
|
||||
public:
|
||||
TaskQueueFrameDecodeScheduler(Clock* clock,
|
||||
TaskQueueBase* const bookkeeping_queue);
|
||||
~TaskQueueFrameDecodeScheduler() override;
|
||||
TaskQueueFrameDecodeScheduler(const TaskQueueFrameDecodeScheduler&) = delete;
|
||||
TaskQueueFrameDecodeScheduler& operator=(
|
||||
const TaskQueueFrameDecodeScheduler&) = delete;
|
||||
|
||||
// FrameDecodeScheduler implementation.
|
||||
absl::optional<uint32_t> ScheduledRtpTimestamp() override;
|
||||
void ScheduleFrame(uint32_t rtp,
|
||||
FrameDecodeTiming::FrameSchedule schedule,
|
||||
FrameReleaseCallback cb) override;
|
||||
void CancelOutstanding() override;
|
||||
void Stop() override;
|
||||
|
||||
private:
|
||||
Clock* const clock_;
|
||||
TaskQueueBase* const bookkeeping_queue_;
|
||||
|
||||
absl::optional<uint32_t> scheduled_rtp_;
|
||||
ScopedTaskSafetyDetached task_safety_;
|
||||
bool stopped_ = false;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // VIDEO_TASK_QUEUE_FRAME_DECODE_SCHEDULER_H_
|
||||
49
TMessagesProj/jni/voip/webrtc/video/transport_adapter.cc
Normal file
49
TMessagesProj/jni/voip/webrtc/video/transport_adapter.cc
Normal file
|
|
@ -0,0 +1,49 @@
|
|||
/*
|
||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "video/transport_adapter.h"
|
||||
|
||||
#include "rtc_base/checks.h"
|
||||
|
||||
namespace webrtc {
|
||||
namespace internal {
|
||||
|
||||
TransportAdapter::TransportAdapter(Transport* transport)
|
||||
: transport_(transport), enabled_(false) {
|
||||
RTC_DCHECK(nullptr != transport);
|
||||
}
|
||||
|
||||
TransportAdapter::~TransportAdapter() = default;
|
||||
|
||||
bool TransportAdapter::SendRtp(rtc::ArrayView<const uint8_t> packet,
|
||||
const PacketOptions& options) {
|
||||
if (!enabled_.load())
|
||||
return false;
|
||||
|
||||
return transport_->SendRtp(packet, options);
|
||||
}
|
||||
|
||||
bool TransportAdapter::SendRtcp(rtc::ArrayView<const uint8_t> packet) {
|
||||
if (!enabled_.load())
|
||||
return false;
|
||||
|
||||
return transport_->SendRtcp(packet);
|
||||
}
|
||||
|
||||
void TransportAdapter::Enable() {
|
||||
enabled_.store(true);
|
||||
}
|
||||
|
||||
void TransportAdapter::Disable() {
|
||||
enabled_.store(false);
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace webrtc
|
||||
42
TMessagesProj/jni/voip/webrtc/video/transport_adapter.h
Normal file
42
TMessagesProj/jni/voip/webrtc/video/transport_adapter.h
Normal file
|
|
@ -0,0 +1,42 @@
|
|||
/*
|
||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
#ifndef VIDEO_TRANSPORT_ADAPTER_H_
|
||||
#define VIDEO_TRANSPORT_ADAPTER_H_
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#include <atomic>
|
||||
|
||||
#include "api/call/transport.h"
|
||||
|
||||
namespace webrtc {
|
||||
namespace internal {
|
||||
|
||||
class TransportAdapter : public Transport {
|
||||
public:
|
||||
explicit TransportAdapter(Transport* transport);
|
||||
~TransportAdapter() override;
|
||||
|
||||
bool SendRtp(rtc::ArrayView<const uint8_t> packet,
|
||||
const PacketOptions& options) override;
|
||||
bool SendRtcp(rtc::ArrayView<const uint8_t> packet) override;
|
||||
|
||||
void Enable();
|
||||
void Disable();
|
||||
|
||||
private:
|
||||
Transport* transport_;
|
||||
std::atomic<bool> enabled_;
|
||||
};
|
||||
} // namespace internal
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // VIDEO_TRANSPORT_ADAPTER_H_
|
||||
|
|
@ -0,0 +1,41 @@
|
|||
/*
|
||||
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "video/unique_timestamp_counter.h"
|
||||
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
#include <set>
|
||||
|
||||
namespace webrtc {
|
||||
namespace {
|
||||
|
||||
constexpr int kMaxHistory = 1000;
|
||||
|
||||
} // namespace
|
||||
|
||||
UniqueTimestampCounter::UniqueTimestampCounter()
|
||||
: latest_(std::make_unique<uint32_t[]>(kMaxHistory)) {}
|
||||
|
||||
void UniqueTimestampCounter::Add(uint32_t value) {
|
||||
if (value == last_ || !search_index_.insert(value).second) {
|
||||
// Already known.
|
||||
return;
|
||||
}
|
||||
int index = unique_seen_ % kMaxHistory;
|
||||
if (unique_seen_ >= kMaxHistory) {
|
||||
search_index_.erase(latest_[index]);
|
||||
}
|
||||
latest_[index] = value;
|
||||
last_ = value;
|
||||
++unique_seen_;
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,44 @@
|
|||
/*
|
||||
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
#ifndef VIDEO_UNIQUE_TIMESTAMP_COUNTER_H_
|
||||
#define VIDEO_UNIQUE_TIMESTAMP_COUNTER_H_
|
||||
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
#include <set>
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// Counts number of uniquely seen frames (aka pictures, aka temporal units)
|
||||
// identified by their rtp timestamp.
|
||||
class UniqueTimestampCounter {
|
||||
public:
|
||||
UniqueTimestampCounter();
|
||||
UniqueTimestampCounter(const UniqueTimestampCounter&) = delete;
|
||||
UniqueTimestampCounter& operator=(const UniqueTimestampCounter&) = delete;
|
||||
~UniqueTimestampCounter() = default;
|
||||
|
||||
void Add(uint32_t timestamp);
|
||||
// Returns number of different `timestamp` passed to the UniqueCounter.
|
||||
int GetUniqueSeen() const { return unique_seen_; }
|
||||
|
||||
private:
|
||||
int unique_seen_ = 0;
|
||||
// Stores several last seen unique values for quick search.
|
||||
std::set<uint32_t> search_index_;
|
||||
// The same unique values in the circular buffer in the insertion order.
|
||||
std::unique_ptr<uint32_t[]> latest_;
|
||||
// Last inserted value for optimization purpose.
|
||||
int64_t last_ = -1;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // VIDEO_UNIQUE_TIMESTAMP_COUNTER_H_
|
||||
1047
TMessagesProj/jni/voip/webrtc/video/video_analyzer.cc
Normal file
1047
TMessagesProj/jni/voip/webrtc/video/video_analyzer.cc
Normal file
File diff suppressed because it is too large
Load diff
321
TMessagesProj/jni/voip/webrtc/video/video_analyzer.h
Normal file
321
TMessagesProj/jni/voip/webrtc/video/video_analyzer.h
Normal file
|
|
@ -0,0 +1,321 @@
|
|||
/*
|
||||
* Copyright 2018 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
#ifndef VIDEO_VIDEO_ANALYZER_H_
|
||||
#define VIDEO_VIDEO_ANALYZER_H_
|
||||
|
||||
#include <deque>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/strings/string_view.h"
|
||||
#include "api/numerics/samples_stats_counter.h"
|
||||
#include "api/task_queue/task_queue_base.h"
|
||||
#include "api/test/metrics/metric.h"
|
||||
#include "api/video/video_source_interface.h"
|
||||
#include "modules/rtp_rtcp/source/rtp_packet.h"
|
||||
#include "modules/rtp_rtcp/source/video_rtp_depacketizer.h"
|
||||
#include "rtc_base/event.h"
|
||||
#include "rtc_base/numerics/running_statistics.h"
|
||||
#include "rtc_base/numerics/sequence_number_unwrapper.h"
|
||||
#include "rtc_base/platform_thread.h"
|
||||
#include "rtc_base/synchronization/mutex.h"
|
||||
#include "test/layer_filtering_transport.h"
|
||||
#include "test/rtp_file_writer.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class VideoAnalyzer : public PacketReceiver,
|
||||
public Transport,
|
||||
public rtc::VideoSinkInterface<VideoFrame> {
|
||||
public:
|
||||
VideoAnalyzer(test::LayerFilteringTransport* transport,
|
||||
const std::string& test_label,
|
||||
double avg_psnr_threshold,
|
||||
double avg_ssim_threshold,
|
||||
int duration_frames,
|
||||
TimeDelta test_duration,
|
||||
FILE* graph_data_output_file,
|
||||
const std::string& graph_title,
|
||||
uint32_t ssrc_to_analyze,
|
||||
uint32_t rtx_ssrc_to_analyze,
|
||||
size_t selected_stream,
|
||||
int selected_sl,
|
||||
int selected_tl,
|
||||
bool is_quick_test_enabled,
|
||||
Clock* clock,
|
||||
std::string rtp_dump_name,
|
||||
TaskQueueBase* task_queue);
|
||||
~VideoAnalyzer();
|
||||
|
||||
virtual void SetReceiver(PacketReceiver* receiver);
|
||||
void SetSource(rtc::VideoSourceInterface<VideoFrame>* video_source,
|
||||
bool respect_sink_wants);
|
||||
void SetCall(Call* call);
|
||||
void SetSendStream(VideoSendStream* stream);
|
||||
void SetReceiveStream(VideoReceiveStreamInterface* stream);
|
||||
void SetAudioReceiveStream(AudioReceiveStreamInterface* recv_stream);
|
||||
|
||||
rtc::VideoSinkInterface<VideoFrame>* InputInterface();
|
||||
rtc::VideoSourceInterface<VideoFrame>* OutputInterface();
|
||||
|
||||
void DeliverRtcpPacket(rtc::CopyOnWriteBuffer packet) override;
|
||||
void DeliverRtpPacket(MediaType media_type,
|
||||
RtpPacketReceived packet,
|
||||
PacketReceiver::OnUndemuxablePacketHandler
|
||||
undemuxable_packet_handler) override;
|
||||
|
||||
void PreEncodeOnFrame(const VideoFrame& video_frame);
|
||||
void PostEncodeOnFrame(size_t stream_id, uint32_t timestamp);
|
||||
|
||||
bool SendRtp(rtc::ArrayView<const uint8_t> packet,
|
||||
const PacketOptions& options) override;
|
||||
|
||||
bool SendRtcp(rtc::ArrayView<const uint8_t> packet) override;
|
||||
void OnFrame(const VideoFrame& video_frame) override;
|
||||
void Wait();
|
||||
|
||||
void StartMeasuringCpuProcessTime();
|
||||
void StopMeasuringCpuProcessTime();
|
||||
void StartExcludingCpuThreadTime() RTC_LOCKS_EXCLUDED(cpu_measurement_lock_);
|
||||
void StopExcludingCpuThreadTime() RTC_LOCKS_EXCLUDED(cpu_measurement_lock_);
|
||||
double GetCpuUsagePercent() RTC_LOCKS_EXCLUDED(cpu_measurement_lock_);
|
||||
|
||||
test::LayerFilteringTransport* const transport_;
|
||||
PacketReceiver* receiver_;
|
||||
|
||||
private:
|
||||
struct FrameComparison {
|
||||
FrameComparison();
|
||||
FrameComparison(const VideoFrame& reference,
|
||||
const VideoFrame& render,
|
||||
bool dropped,
|
||||
int64_t input_time_ms,
|
||||
int64_t send_time_ms,
|
||||
int64_t recv_time_ms,
|
||||
int64_t render_time_ms,
|
||||
size_t encoded_frame_size);
|
||||
FrameComparison(bool dropped,
|
||||
int64_t input_time_ms,
|
||||
int64_t send_time_ms,
|
||||
int64_t recv_time_ms,
|
||||
int64_t render_time_ms,
|
||||
size_t encoded_frame_size);
|
||||
|
||||
absl::optional<VideoFrame> reference;
|
||||
absl::optional<VideoFrame> render;
|
||||
bool dropped;
|
||||
int64_t input_time_ms;
|
||||
int64_t send_time_ms;
|
||||
int64_t recv_time_ms;
|
||||
int64_t render_time_ms;
|
||||
size_t encoded_frame_size;
|
||||
};
|
||||
|
||||
struct Sample {
|
||||
Sample(int dropped,
|
||||
int64_t input_time_ms,
|
||||
int64_t send_time_ms,
|
||||
int64_t recv_time_ms,
|
||||
int64_t render_time_ms,
|
||||
size_t encoded_frame_size,
|
||||
double psnr,
|
||||
double ssim);
|
||||
|
||||
int dropped;
|
||||
int64_t input_time_ms;
|
||||
int64_t send_time_ms;
|
||||
int64_t recv_time_ms;
|
||||
int64_t render_time_ms;
|
||||
size_t encoded_frame_size;
|
||||
double psnr;
|
||||
double ssim;
|
||||
};
|
||||
|
||||
// Implements VideoSinkInterface to receive captured frames from a
|
||||
// FrameGeneratorCapturer. Implements VideoSourceInterface to be able to act
|
||||
// as a source to VideoSendStream.
|
||||
// It forwards all input frames to the VideoAnalyzer for later comparison and
|
||||
// forwards the captured frames to the VideoSendStream.
|
||||
class CapturedFrameForwarder : public rtc::VideoSinkInterface<VideoFrame>,
|
||||
public rtc::VideoSourceInterface<VideoFrame> {
|
||||
public:
|
||||
CapturedFrameForwarder(VideoAnalyzer* analyzer,
|
||||
Clock* clock,
|
||||
int frames_to_capture,
|
||||
TimeDelta test_duration);
|
||||
void SetSource(rtc::VideoSourceInterface<VideoFrame>* video_source);
|
||||
|
||||
private:
|
||||
void OnFrame(const VideoFrame& video_frame)
|
||||
RTC_LOCKS_EXCLUDED(lock_) override;
|
||||
|
||||
// Called when `send_stream_.SetSource()` is called.
|
||||
void AddOrUpdateSink(rtc::VideoSinkInterface<VideoFrame>* sink,
|
||||
const rtc::VideoSinkWants& wants)
|
||||
RTC_LOCKS_EXCLUDED(lock_) override;
|
||||
|
||||
// Called by `send_stream_` when `send_stream_.SetSource()` is called.
|
||||
void RemoveSink(rtc::VideoSinkInterface<VideoFrame>* sink)
|
||||
RTC_LOCKS_EXCLUDED(lock_) override;
|
||||
|
||||
VideoAnalyzer* const analyzer_;
|
||||
Mutex lock_;
|
||||
rtc::VideoSinkInterface<VideoFrame>* send_stream_input_
|
||||
RTC_GUARDED_BY(lock_);
|
||||
VideoSourceInterface<VideoFrame>* video_source_;
|
||||
Clock* clock_;
|
||||
int captured_frames_ RTC_GUARDED_BY(lock_);
|
||||
const int frames_to_capture_;
|
||||
const Timestamp test_end_;
|
||||
};
|
||||
|
||||
struct FrameWithPsnr {
|
||||
double psnr;
|
||||
VideoFrame frame;
|
||||
};
|
||||
|
||||
bool IsInSelectedSpatialAndTemporalLayer(const RtpPacket& rtp_packet);
|
||||
|
||||
void AddFrameComparison(const VideoFrame& reference,
|
||||
const VideoFrame& render,
|
||||
bool dropped,
|
||||
int64_t render_time_ms)
|
||||
RTC_EXCLUSIVE_LOCKS_REQUIRED(lock_);
|
||||
|
||||
void PollStats() RTC_LOCKS_EXCLUDED(comparison_lock_);
|
||||
static void FrameComparisonThread(void* obj);
|
||||
bool CompareFrames();
|
||||
bool PopComparison(FrameComparison* comparison);
|
||||
// Increment counter for number of frames received for comparison.
|
||||
void FrameRecorded() RTC_EXCLUSIVE_LOCKS_REQUIRED(comparison_lock_);
|
||||
// Returns true if all frames to be compared have been taken from the queue.
|
||||
bool AllFramesRecorded() RTC_LOCKS_EXCLUDED(comparison_lock_);
|
||||
bool AllFramesRecordedLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(comparison_lock_);
|
||||
// Increase count of number of frames processed. Returns true if this was the
|
||||
// last frame to be processed.
|
||||
bool FrameProcessed() RTC_LOCKS_EXCLUDED(comparison_lock_);
|
||||
void PrintResults() RTC_LOCKS_EXCLUDED(lock_, comparison_lock_);
|
||||
void PerformFrameComparison(const FrameComparison& comparison)
|
||||
RTC_LOCKS_EXCLUDED(comparison_lock_);
|
||||
void PrintResult(absl::string_view result_type,
|
||||
const SamplesStatsCounter& stats,
|
||||
webrtc::test::Unit unit,
|
||||
webrtc::test::ImprovementDirection improvement_direction);
|
||||
void PrintResultWithExternalMean(
|
||||
absl::string_view result_type,
|
||||
double mean,
|
||||
const SamplesStatsCounter& stats,
|
||||
webrtc::test::Unit unit,
|
||||
webrtc::test::ImprovementDirection improvement_direction);
|
||||
void PrintSamplesToFile(void) RTC_LOCKS_EXCLUDED(comparison_lock_);
|
||||
void AddCapturedFrameForComparison(const VideoFrame& video_frame)
|
||||
RTC_LOCKS_EXCLUDED(lock_, comparison_lock_);
|
||||
|
||||
Call* call_;
|
||||
VideoSendStream* send_stream_;
|
||||
VideoReceiveStreamInterface* receive_stream_;
|
||||
AudioReceiveStreamInterface* audio_receive_stream_;
|
||||
CapturedFrameForwarder captured_frame_forwarder_;
|
||||
const std::string test_label_;
|
||||
FILE* const graph_data_output_file_;
|
||||
const std::string graph_title_;
|
||||
const uint32_t ssrc_to_analyze_;
|
||||
const uint32_t rtx_ssrc_to_analyze_;
|
||||
const size_t selected_stream_;
|
||||
const int selected_sl_;
|
||||
const int selected_tl_;
|
||||
|
||||
Mutex comparison_lock_;
|
||||
std::vector<Sample> samples_ RTC_GUARDED_BY(comparison_lock_);
|
||||
SamplesStatsCounter sender_time_ RTC_GUARDED_BY(comparison_lock_);
|
||||
SamplesStatsCounter receiver_time_ RTC_GUARDED_BY(comparison_lock_);
|
||||
SamplesStatsCounter network_time_ RTC_GUARDED_BY(comparison_lock_);
|
||||
SamplesStatsCounter psnr_ RTC_GUARDED_BY(comparison_lock_);
|
||||
SamplesStatsCounter ssim_ RTC_GUARDED_BY(comparison_lock_);
|
||||
SamplesStatsCounter end_to_end_ RTC_GUARDED_BY(comparison_lock_);
|
||||
SamplesStatsCounter rendered_delta_ RTC_GUARDED_BY(comparison_lock_);
|
||||
SamplesStatsCounter encoded_frame_size_ RTC_GUARDED_BY(comparison_lock_);
|
||||
SamplesStatsCounter encode_frame_rate_ RTC_GUARDED_BY(comparison_lock_);
|
||||
SamplesStatsCounter encode_time_ms_ RTC_GUARDED_BY(comparison_lock_);
|
||||
SamplesStatsCounter encode_usage_percent_ RTC_GUARDED_BY(comparison_lock_);
|
||||
double mean_decode_time_ms_ RTC_GUARDED_BY(comparison_lock_);
|
||||
SamplesStatsCounter decode_time_ms_ RTC_GUARDED_BY(comparison_lock_);
|
||||
SamplesStatsCounter decode_time_max_ms_ RTC_GUARDED_BY(comparison_lock_);
|
||||
SamplesStatsCounter media_bitrate_bps_ RTC_GUARDED_BY(comparison_lock_);
|
||||
SamplesStatsCounter fec_bitrate_bps_ RTC_GUARDED_BY(comparison_lock_);
|
||||
SamplesStatsCounter send_bandwidth_bps_ RTC_GUARDED_BY(comparison_lock_);
|
||||
SamplesStatsCounter memory_usage_ RTC_GUARDED_BY(comparison_lock_);
|
||||
SamplesStatsCounter audio_expand_rate_ RTC_GUARDED_BY(comparison_lock_);
|
||||
SamplesStatsCounter audio_accelerate_rate_ RTC_GUARDED_BY(comparison_lock_);
|
||||
SamplesStatsCounter audio_jitter_buffer_ms_ RTC_GUARDED_BY(comparison_lock_);
|
||||
SamplesStatsCounter pixels_ RTC_GUARDED_BY(comparison_lock_);
|
||||
// Rendered frame with worst PSNR is saved for further analysis.
|
||||
absl::optional<FrameWithPsnr> worst_frame_ RTC_GUARDED_BY(comparison_lock_);
|
||||
// Freeze metrics.
|
||||
SamplesStatsCounter time_between_freezes_ RTC_GUARDED_BY(comparison_lock_);
|
||||
uint32_t freeze_count_ RTC_GUARDED_BY(comparison_lock_);
|
||||
uint32_t total_freezes_duration_ms_ RTC_GUARDED_BY(comparison_lock_);
|
||||
double total_inter_frame_delay_ RTC_GUARDED_BY(comparison_lock_);
|
||||
double total_squared_inter_frame_delay_ RTC_GUARDED_BY(comparison_lock_);
|
||||
|
||||
double decode_frame_rate_ RTC_GUARDED_BY(comparison_lock_);
|
||||
double render_frame_rate_ RTC_GUARDED_BY(comparison_lock_);
|
||||
|
||||
size_t last_fec_bytes_;
|
||||
|
||||
Mutex lock_ RTC_ACQUIRED_BEFORE(comparison_lock_)
|
||||
RTC_ACQUIRED_BEFORE(cpu_measurement_lock_);
|
||||
const int frames_to_process_;
|
||||
const Timestamp test_end_;
|
||||
int frames_recorded_ RTC_GUARDED_BY(comparison_lock_);
|
||||
int frames_processed_ RTC_GUARDED_BY(comparison_lock_);
|
||||
int captured_frames_ RTC_GUARDED_BY(comparison_lock_);
|
||||
int dropped_frames_ RTC_GUARDED_BY(comparison_lock_);
|
||||
int dropped_frames_before_first_encode_ RTC_GUARDED_BY(lock_);
|
||||
int dropped_frames_before_rendering_ RTC_GUARDED_BY(lock_);
|
||||
int64_t last_render_time_ RTC_GUARDED_BY(comparison_lock_);
|
||||
int64_t last_render_delta_ms_ RTC_GUARDED_BY(comparison_lock_);
|
||||
int64_t last_unfreeze_time_ms_ RTC_GUARDED_BY(comparison_lock_);
|
||||
uint32_t rtp_timestamp_delta_ RTC_GUARDED_BY(lock_);
|
||||
|
||||
Mutex cpu_measurement_lock_;
|
||||
int64_t cpu_time_ RTC_GUARDED_BY(cpu_measurement_lock_);
|
||||
int64_t wallclock_time_ RTC_GUARDED_BY(cpu_measurement_lock_);
|
||||
|
||||
std::deque<VideoFrame> frames_ RTC_GUARDED_BY(lock_);
|
||||
absl::optional<VideoFrame> last_rendered_frame_ RTC_GUARDED_BY(lock_);
|
||||
RtpTimestampUnwrapper wrap_handler_ RTC_GUARDED_BY(lock_);
|
||||
std::map<int64_t, int64_t> send_times_ RTC_GUARDED_BY(lock_);
|
||||
std::map<int64_t, int64_t> recv_times_ RTC_GUARDED_BY(lock_);
|
||||
std::map<int64_t, size_t> encoded_frame_sizes_ RTC_GUARDED_BY(lock_);
|
||||
absl::optional<uint32_t> first_encoded_timestamp_ RTC_GUARDED_BY(lock_);
|
||||
absl::optional<uint32_t> first_sent_timestamp_ RTC_GUARDED_BY(lock_);
|
||||
const double avg_psnr_threshold_;
|
||||
const double avg_ssim_threshold_;
|
||||
bool is_quick_test_enabled_;
|
||||
|
||||
std::vector<rtc::PlatformThread> comparison_thread_pool_;
|
||||
rtc::Event comparison_available_event_;
|
||||
std::deque<FrameComparison> comparisons_ RTC_GUARDED_BY(comparison_lock_);
|
||||
bool quit_ RTC_GUARDED_BY(comparison_lock_);
|
||||
rtc::Event done_;
|
||||
|
||||
std::unique_ptr<VideoRtpDepacketizer> vp8_depacketizer_;
|
||||
std::unique_ptr<VideoRtpDepacketizer> vp9_depacketizer_;
|
||||
std::unique_ptr<test::RtpFileWriter> rtp_file_writer_;
|
||||
Clock* const clock_;
|
||||
const int64_t start_ms_;
|
||||
TaskQueueBase* task_queue_;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
#endif // VIDEO_VIDEO_ANALYZER_H_
|
||||
448
TMessagesProj/jni/voip/webrtc/video/video_loopback.cc
Normal file
448
TMessagesProj/jni/voip/webrtc/video/video_loopback.cc
Normal file
|
|
@ -0,0 +1,448 @@
|
|||
/*
|
||||
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
#include "video/video_loopback.h"
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/flags/flag.h"
|
||||
#include "absl/flags/parse.h"
|
||||
#include "absl/types/optional.h"
|
||||
#include "api/test/simulated_network.h"
|
||||
#include "api/test/video_quality_test_fixture.h"
|
||||
#include "api/transport/bitrate_settings.h"
|
||||
#include "api/video_codecs/video_codec.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/logging.h"
|
||||
#include "system_wrappers/include/field_trial.h"
|
||||
#include "test/field_trial.h"
|
||||
#include "test/gtest.h"
|
||||
#include "test/run_test.h"
|
||||
#include "test/test_flags.h"
|
||||
#include "video/video_quality_test.h"
|
||||
|
||||
// Flags common with screenshare loopback, with different default values.
|
||||
ABSL_FLAG(int, width, 640, "Video width.");
|
||||
|
||||
ABSL_FLAG(int, height, 480, "Video height.");
|
||||
|
||||
ABSL_FLAG(int, fps, 30, "Frames per second.");
|
||||
|
||||
ABSL_FLAG(int, capture_device_index, 0, "Capture device to select");
|
||||
|
||||
ABSL_FLAG(int, min_bitrate, 50, "Call and stream min bitrate in kbps.");
|
||||
|
||||
ABSL_FLAG(int, start_bitrate, 300, "Call start bitrate in kbps.");
|
||||
|
||||
ABSL_FLAG(int, target_bitrate, 800, "Stream target bitrate in kbps.");
|
||||
|
||||
ABSL_FLAG(int, max_bitrate, 800, "Call and stream max bitrate in kbps.");
|
||||
|
||||
ABSL_FLAG(bool,
|
||||
suspend_below_min_bitrate,
|
||||
false,
|
||||
"Suspends video below the configured min bitrate.");
|
||||
|
||||
ABSL_FLAG(int,
|
||||
num_temporal_layers,
|
||||
1,
|
||||
"Number of temporal layers. Set to 1-4 to override.");
|
||||
|
||||
ABSL_FLAG(int,
|
||||
inter_layer_pred,
|
||||
2,
|
||||
"Inter-layer prediction mode. "
|
||||
"0 - enabled, 1 - disabled, 2 - enabled only for key pictures.");
|
||||
|
||||
// Flags common with screenshare loopback, with equal default values.
|
||||
ABSL_FLAG(std::string, codec, "VP8", "Video codec to use.");
|
||||
|
||||
ABSL_FLAG(int,
|
||||
selected_tl,
|
||||
-1,
|
||||
"Temporal layer to show or analyze. -1 to disable filtering.");
|
||||
|
||||
ABSL_FLAG(
|
||||
int,
|
||||
duration,
|
||||
0,
|
||||
"Duration of the test in seconds. If 0, rendered will be shown instead.");
|
||||
|
||||
ABSL_FLAG(std::string, output_filename, "", "Target graph data filename.");
|
||||
|
||||
ABSL_FLAG(std::string,
|
||||
graph_title,
|
||||
"",
|
||||
"If empty, title will be generated automatically.");
|
||||
|
||||
ABSL_FLAG(int, loss_percent, 0, "Percentage of packets randomly lost.");
|
||||
|
||||
ABSL_FLAG(int,
|
||||
avg_burst_loss_length,
|
||||
-1,
|
||||
"Average burst length of lost packets.");
|
||||
|
||||
ABSL_FLAG(int,
|
||||
link_capacity,
|
||||
0,
|
||||
"Capacity (kbps) of the fake link. 0 means infinite.");
|
||||
|
||||
ABSL_FLAG(int, queue_size, 0, "Size of the bottleneck link queue in packets.");
|
||||
|
||||
ABSL_FLAG(int,
|
||||
avg_propagation_delay_ms,
|
||||
0,
|
||||
"Average link propagation delay in ms.");
|
||||
|
||||
ABSL_FLAG(std::string,
|
||||
rtc_event_log_name,
|
||||
"",
|
||||
"Filename for rtc event log. Two files "
|
||||
"with \"_send\" and \"_recv\" suffixes will be created.");
|
||||
|
||||
ABSL_FLAG(std::string,
|
||||
rtp_dump_name,
|
||||
"",
|
||||
"Filename for dumped received RTP stream.");
|
||||
|
||||
ABSL_FLAG(int,
|
||||
std_propagation_delay_ms,
|
||||
0,
|
||||
"Link propagation delay standard deviation in ms.");
|
||||
|
||||
ABSL_FLAG(int, num_streams, 0, "Number of streams to show or analyze.");
|
||||
|
||||
ABSL_FLAG(int,
|
||||
selected_stream,
|
||||
0,
|
||||
"ID of the stream to show or analyze. "
|
||||
"Set to the number of streams to show them all.");
|
||||
|
||||
ABSL_FLAG(int, num_spatial_layers, 1, "Number of spatial layers to use.");
|
||||
|
||||
ABSL_FLAG(int,
|
||||
selected_sl,
|
||||
-1,
|
||||
"Spatial layer to show or analyze. -1 to disable filtering.");
|
||||
|
||||
ABSL_FLAG(std::string,
|
||||
stream0,
|
||||
"",
|
||||
"Comma separated values describing VideoStream for stream #0.");
|
||||
|
||||
ABSL_FLAG(std::string,
|
||||
stream1,
|
||||
"",
|
||||
"Comma separated values describing VideoStream for stream #1.");
|
||||
|
||||
ABSL_FLAG(std::string,
|
||||
sl0,
|
||||
"",
|
||||
"Comma separated values describing SpatialLayer for layer #0.");
|
||||
|
||||
ABSL_FLAG(std::string,
|
||||
sl1,
|
||||
"",
|
||||
"Comma separated values describing SpatialLayer for layer #1.");
|
||||
|
||||
ABSL_FLAG(std::string,
|
||||
sl2,
|
||||
"",
|
||||
"Comma separated values describing SpatialLayer for layer #2.");
|
||||
|
||||
ABSL_FLAG(std::string,
|
||||
encoded_frame_path,
|
||||
"",
|
||||
"The base path for encoded frame logs. Created files will have "
|
||||
"the form <encoded_frame_path>.<n>.(recv|send.<m>).ivf");
|
||||
|
||||
ABSL_FLAG(bool, logs, false, "print logs to stderr");
|
||||
|
||||
ABSL_FLAG(bool, send_side_bwe, true, "Use send-side bandwidth estimation");
|
||||
|
||||
ABSL_FLAG(bool, generic_descriptor, false, "Use the generic frame descriptor.");
|
||||
|
||||
ABSL_FLAG(bool, dependency_descriptor, false, "Use the dependency descriptor.");
|
||||
|
||||
ABSL_FLAG(bool, allow_reordering, false, "Allow packet reordering to occur");
|
||||
|
||||
ABSL_FLAG(bool, use_ulpfec, false, "Use RED+ULPFEC forward error correction.");
|
||||
|
||||
ABSL_FLAG(bool, use_flexfec, false, "Use FlexFEC forward error correction.");
|
||||
|
||||
ABSL_FLAG(bool, audio, false, "Add audio stream");
|
||||
|
||||
ABSL_FLAG(bool,
|
||||
use_real_adm,
|
||||
false,
|
||||
"Use real ADM instead of fake (no effect if audio is false)");
|
||||
|
||||
ABSL_FLAG(bool,
|
||||
audio_video_sync,
|
||||
false,
|
||||
"Sync audio and video stream (no effect if"
|
||||
" audio is false)");
|
||||
|
||||
ABSL_FLAG(bool,
|
||||
audio_dtx,
|
||||
false,
|
||||
"Enable audio DTX (no effect if audio is false)");
|
||||
|
||||
ABSL_FLAG(bool, video, true, "Add video stream");
|
||||
|
||||
// Video-specific flags.
|
||||
ABSL_FLAG(std::string,
|
||||
clip,
|
||||
"",
|
||||
"Name of the clip to show. If empty, using chroma generator.");
|
||||
|
||||
namespace webrtc {
|
||||
namespace {
|
||||
|
||||
size_t Width() {
|
||||
return static_cast<size_t>(absl::GetFlag(FLAGS_width));
|
||||
}
|
||||
|
||||
size_t Height() {
|
||||
return static_cast<size_t>(absl::GetFlag(FLAGS_height));
|
||||
}
|
||||
|
||||
int Fps() {
|
||||
return absl::GetFlag(FLAGS_fps);
|
||||
}
|
||||
|
||||
size_t GetCaptureDevice() {
|
||||
return static_cast<size_t>(absl::GetFlag(FLAGS_capture_device_index));
|
||||
}
|
||||
|
||||
int MinBitrateKbps() {
|
||||
return absl::GetFlag(FLAGS_min_bitrate);
|
||||
}
|
||||
|
||||
int StartBitrateKbps() {
|
||||
return absl::GetFlag(FLAGS_start_bitrate);
|
||||
}
|
||||
|
||||
int TargetBitrateKbps() {
|
||||
return absl::GetFlag(FLAGS_target_bitrate);
|
||||
}
|
||||
|
||||
int MaxBitrateKbps() {
|
||||
return absl::GetFlag(FLAGS_max_bitrate);
|
||||
}
|
||||
|
||||
int NumTemporalLayers() {
|
||||
return absl::GetFlag(FLAGS_num_temporal_layers);
|
||||
}
|
||||
|
||||
InterLayerPredMode InterLayerPred() {
|
||||
if (absl::GetFlag(FLAGS_inter_layer_pred) == 0) {
|
||||
return InterLayerPredMode::kOn;
|
||||
} else if (absl::GetFlag(FLAGS_inter_layer_pred) == 1) {
|
||||
return InterLayerPredMode::kOff;
|
||||
} else {
|
||||
RTC_DCHECK_EQ(absl::GetFlag(FLAGS_inter_layer_pred), 2);
|
||||
return InterLayerPredMode::kOnKeyPic;
|
||||
}
|
||||
}
|
||||
|
||||
std::string Codec() {
|
||||
return absl::GetFlag(FLAGS_codec);
|
||||
}
|
||||
|
||||
int SelectedTL() {
|
||||
return absl::GetFlag(FLAGS_selected_tl);
|
||||
}
|
||||
|
||||
int DurationSecs() {
|
||||
return absl::GetFlag(FLAGS_duration);
|
||||
}
|
||||
|
||||
std::string OutputFilename() {
|
||||
return absl::GetFlag(FLAGS_output_filename);
|
||||
}
|
||||
|
||||
std::string GraphTitle() {
|
||||
return absl::GetFlag(FLAGS_graph_title);
|
||||
}
|
||||
|
||||
int LossPercent() {
|
||||
return static_cast<int>(absl::GetFlag(FLAGS_loss_percent));
|
||||
}
|
||||
|
||||
int AvgBurstLossLength() {
|
||||
return static_cast<int>(absl::GetFlag(FLAGS_avg_burst_loss_length));
|
||||
}
|
||||
|
||||
int LinkCapacityKbps() {
|
||||
return static_cast<int>(absl::GetFlag(FLAGS_link_capacity));
|
||||
}
|
||||
|
||||
int QueueSize() {
|
||||
return static_cast<int>(absl::GetFlag(FLAGS_queue_size));
|
||||
}
|
||||
|
||||
int AvgPropagationDelayMs() {
|
||||
return static_cast<int>(absl::GetFlag(FLAGS_avg_propagation_delay_ms));
|
||||
}
|
||||
|
||||
std::string RtcEventLogName() {
|
||||
return absl::GetFlag(FLAGS_rtc_event_log_name);
|
||||
}
|
||||
|
||||
std::string RtpDumpName() {
|
||||
return absl::GetFlag(FLAGS_rtp_dump_name);
|
||||
}
|
||||
|
||||
int StdPropagationDelayMs() {
|
||||
return absl::GetFlag(FLAGS_std_propagation_delay_ms);
|
||||
}
|
||||
|
||||
int NumStreams() {
|
||||
return absl::GetFlag(FLAGS_num_streams);
|
||||
}
|
||||
|
||||
int SelectedStream() {
|
||||
return absl::GetFlag(FLAGS_selected_stream);
|
||||
}
|
||||
|
||||
int NumSpatialLayers() {
|
||||
return absl::GetFlag(FLAGS_num_spatial_layers);
|
||||
}
|
||||
|
||||
int SelectedSL() {
|
||||
return absl::GetFlag(FLAGS_selected_sl);
|
||||
}
|
||||
|
||||
std::string Stream0() {
|
||||
return absl::GetFlag(FLAGS_stream0);
|
||||
}
|
||||
|
||||
std::string Stream1() {
|
||||
return absl::GetFlag(FLAGS_stream1);
|
||||
}
|
||||
|
||||
std::string SL0() {
|
||||
return absl::GetFlag(FLAGS_sl0);
|
||||
}
|
||||
|
||||
std::string SL1() {
|
||||
return absl::GetFlag(FLAGS_sl1);
|
||||
}
|
||||
|
||||
std::string SL2() {
|
||||
return absl::GetFlag(FLAGS_sl2);
|
||||
}
|
||||
|
||||
std::string EncodedFramePath() {
|
||||
return absl::GetFlag(FLAGS_encoded_frame_path);
|
||||
}
|
||||
|
||||
std::string Clip() {
|
||||
return absl::GetFlag(FLAGS_clip);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
void Loopback() {
|
||||
BuiltInNetworkBehaviorConfig pipe_config;
|
||||
pipe_config.loss_percent = LossPercent();
|
||||
pipe_config.avg_burst_loss_length = AvgBurstLossLength();
|
||||
pipe_config.link_capacity_kbps = LinkCapacityKbps();
|
||||
pipe_config.queue_length_packets = QueueSize();
|
||||
pipe_config.queue_delay_ms = AvgPropagationDelayMs();
|
||||
pipe_config.delay_standard_deviation_ms = StdPropagationDelayMs();
|
||||
pipe_config.allow_reordering = absl::GetFlag(FLAGS_allow_reordering);
|
||||
|
||||
BitrateConstraints call_bitrate_config;
|
||||
call_bitrate_config.min_bitrate_bps = MinBitrateKbps() * 1000;
|
||||
call_bitrate_config.start_bitrate_bps = StartBitrateKbps() * 1000;
|
||||
call_bitrate_config.max_bitrate_bps = -1; // Don't cap bandwidth estimate.
|
||||
|
||||
VideoQualityTest::Params params;
|
||||
params.call.send_side_bwe = absl::GetFlag(FLAGS_send_side_bwe);
|
||||
params.call.generic_descriptor = absl::GetFlag(FLAGS_generic_descriptor);
|
||||
params.call.dependency_descriptor =
|
||||
absl::GetFlag(FLAGS_dependency_descriptor);
|
||||
params.call.call_bitrate_config = call_bitrate_config;
|
||||
|
||||
params.video[0].enabled = absl::GetFlag(FLAGS_video);
|
||||
params.video[0].width = Width();
|
||||
params.video[0].height = Height();
|
||||
params.video[0].fps = Fps();
|
||||
params.video[0].min_bitrate_bps = MinBitrateKbps() * 1000;
|
||||
params.video[0].target_bitrate_bps = TargetBitrateKbps() * 1000;
|
||||
params.video[0].max_bitrate_bps = MaxBitrateKbps() * 1000;
|
||||
params.video[0].suspend_below_min_bitrate =
|
||||
absl::GetFlag(FLAGS_suspend_below_min_bitrate);
|
||||
params.video[0].codec = Codec();
|
||||
params.video[0].num_temporal_layers = NumTemporalLayers();
|
||||
params.video[0].selected_tl = SelectedTL();
|
||||
params.video[0].min_transmit_bps = 0;
|
||||
params.video[0].ulpfec = absl::GetFlag(FLAGS_use_ulpfec);
|
||||
params.video[0].flexfec = absl::GetFlag(FLAGS_use_flexfec);
|
||||
params.video[0].automatic_scaling = NumStreams() < 2;
|
||||
params.video[0].clip_path = Clip();
|
||||
params.video[0].capture_device_index = GetCaptureDevice();
|
||||
params.audio.enabled = absl::GetFlag(FLAGS_audio);
|
||||
params.audio.sync_video = absl::GetFlag(FLAGS_audio_video_sync);
|
||||
params.audio.dtx = absl::GetFlag(FLAGS_audio_dtx);
|
||||
params.audio.use_real_adm = absl::GetFlag(FLAGS_use_real_adm);
|
||||
params.logging.rtc_event_log_name = RtcEventLogName();
|
||||
params.logging.rtp_dump_name = RtpDumpName();
|
||||
params.logging.encoded_frame_base_path = EncodedFramePath();
|
||||
params.screenshare[0].enabled = false;
|
||||
params.analyzer.test_label = "video";
|
||||
params.analyzer.test_durations_secs = DurationSecs();
|
||||
params.analyzer.graph_data_output_filename = OutputFilename();
|
||||
params.analyzer.graph_title = GraphTitle();
|
||||
params.config = pipe_config;
|
||||
|
||||
if (NumStreams() > 1 && Stream0().empty() && Stream1().empty()) {
|
||||
params.ss[0].infer_streams = true;
|
||||
}
|
||||
|
||||
std::vector<std::string> stream_descriptors;
|
||||
stream_descriptors.push_back(Stream0());
|
||||
stream_descriptors.push_back(Stream1());
|
||||
std::vector<std::string> SL_descriptors;
|
||||
SL_descriptors.push_back(SL0());
|
||||
SL_descriptors.push_back(SL1());
|
||||
SL_descriptors.push_back(SL2());
|
||||
VideoQualityTest::FillScalabilitySettings(
|
||||
¶ms, 0, stream_descriptors, NumStreams(), SelectedStream(),
|
||||
NumSpatialLayers(), SelectedSL(), InterLayerPred(), SL_descriptors);
|
||||
|
||||
auto fixture = std::make_unique<VideoQualityTest>(nullptr);
|
||||
if (DurationSecs()) {
|
||||
fixture->RunWithAnalyzer(params);
|
||||
} else {
|
||||
fixture->RunWithRenderers(params);
|
||||
}
|
||||
}
|
||||
|
||||
int RunLoopbackTest(int argc, char* argv[]) {
|
||||
::testing::InitGoogleTest(&argc, argv);
|
||||
absl::ParseCommandLine(argc, argv);
|
||||
|
||||
rtc::LogMessage::SetLogToStderr(absl::GetFlag(FLAGS_logs));
|
||||
|
||||
// InitFieldTrialsFromString stores the char*, so the char array must outlive
|
||||
// the application.
|
||||
const std::string field_trials = absl::GetFlag(FLAGS_force_fieldtrials);
|
||||
webrtc::field_trial::InitFieldTrialsFromString(field_trials.c_str());
|
||||
|
||||
webrtc::test::RunTest(webrtc::Loopback);
|
||||
return 0;
|
||||
}
|
||||
} // namespace webrtc
|
||||
19
TMessagesProj/jni/voip/webrtc/video/video_loopback.h
Normal file
19
TMessagesProj/jni/voip/webrtc/video/video_loopback.h
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
/*
|
||||
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef VIDEO_VIDEO_LOOPBACK_H_
|
||||
#define VIDEO_VIDEO_LOOPBACK_H_
|
||||
|
||||
namespace webrtc {
|
||||
// Expose the main test method.
|
||||
int RunLoopbackTest(int argc, char* argv[]);
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // VIDEO_VIDEO_LOOPBACK_H_
|
||||
15
TMessagesProj/jni/voip/webrtc/video/video_loopback_main.cc
Normal file
15
TMessagesProj/jni/voip/webrtc/video/video_loopback_main.cc
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
/*
|
||||
* Copyright 2019 The WebRTC Project Authors. All rights reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "video/video_loopback.h"
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
webrtc::RunLoopbackTest(argc, argv);
|
||||
}
|
||||
17
TMessagesProj/jni/voip/webrtc/video/video_loopback_main.mm
Normal file
17
TMessagesProj/jni/voip/webrtc/video/video_loopback_main.mm
Normal file
|
|
@ -0,0 +1,17 @@
|
|||
/*
|
||||
* Copyright 2019 The WebRTC Project Authors. All rights reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#import "video/video_loopback.h"
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
@autoreleasepool {
|
||||
webrtc::RunLoopbackTest(argc, argv);
|
||||
}
|
||||
}
|
||||
296
TMessagesProj/jni/voip/webrtc/video/video_quality_observer2.cc
Normal file
296
TMessagesProj/jni/voip/webrtc/video/video_quality_observer2.cc
Normal file
|
|
@ -0,0 +1,296 @@
|
|||
/*
|
||||
* Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "video/video_quality_observer2.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <cmath>
|
||||
#include <cstdint>
|
||||
#include <string>
|
||||
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/strings/string_builder.h"
|
||||
#include "system_wrappers/include/metrics.h"
|
||||
#include "video/video_receive_stream2.h"
|
||||
|
||||
namespace webrtc {
|
||||
namespace internal {
|
||||
const uint32_t VideoQualityObserver::kMinFrameSamplesToDetectFreeze = 5;
|
||||
const uint32_t VideoQualityObserver::kMinIncreaseForFreezeMs = 150;
|
||||
const uint32_t VideoQualityObserver::kAvgInterframeDelaysWindowSizeFrames = 30;
|
||||
|
||||
namespace {
|
||||
constexpr int kMinVideoDurationMs = 3000;
|
||||
constexpr int kMinRequiredSamples = 1;
|
||||
constexpr int kPixelsInHighResolution =
|
||||
960 * 540; // CPU-adapted HD still counts.
|
||||
constexpr int kPixelsInMediumResolution = 640 * 360;
|
||||
constexpr int kBlockyQpThresholdVp8 = 70;
|
||||
constexpr int kBlockyQpThresholdVp9 = 180;
|
||||
constexpr int kMaxNumCachedBlockyFrames = 100;
|
||||
// TODO(ilnik): Add H264/HEVC thresholds.
|
||||
} // namespace
|
||||
|
||||
VideoQualityObserver::VideoQualityObserver()
|
||||
: last_frame_rendered_ms_(-1),
|
||||
num_frames_rendered_(0),
|
||||
first_frame_rendered_ms_(-1),
|
||||
last_frame_pixels_(0),
|
||||
is_last_frame_blocky_(false),
|
||||
last_unfreeze_time_ms_(0),
|
||||
render_interframe_delays_(kAvgInterframeDelaysWindowSizeFrames),
|
||||
sum_squared_interframe_delays_secs_(0.0),
|
||||
time_in_resolution_ms_(3, 0),
|
||||
current_resolution_(Resolution::Low),
|
||||
num_resolution_downgrades_(0),
|
||||
time_in_blocky_video_ms_(0),
|
||||
is_paused_(false) {}
|
||||
|
||||
void VideoQualityObserver::UpdateHistograms(bool screenshare) {
|
||||
// TODO(bugs.webrtc.org/11489): Called on the decoder thread - which _might_
|
||||
// be the same as the construction thread.
|
||||
|
||||
// Don't report anything on an empty video stream.
|
||||
if (num_frames_rendered_ == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
char log_stream_buf[2 * 1024];
|
||||
rtc::SimpleStringBuilder log_stream(log_stream_buf);
|
||||
|
||||
if (last_frame_rendered_ms_ > last_unfreeze_time_ms_) {
|
||||
smooth_playback_durations_.Add(last_frame_rendered_ms_ -
|
||||
last_unfreeze_time_ms_);
|
||||
}
|
||||
|
||||
std::string uma_prefix =
|
||||
screenshare ? "WebRTC.Video.Screenshare" : "WebRTC.Video";
|
||||
|
||||
auto mean_time_between_freezes =
|
||||
smooth_playback_durations_.Avg(kMinRequiredSamples);
|
||||
if (mean_time_between_freezes) {
|
||||
RTC_HISTOGRAM_COUNTS_SPARSE_100000(uma_prefix + ".MeanTimeBetweenFreezesMs",
|
||||
*mean_time_between_freezes);
|
||||
log_stream << uma_prefix << ".MeanTimeBetweenFreezesMs "
|
||||
<< *mean_time_between_freezes << "\n";
|
||||
}
|
||||
auto avg_freeze_length = freezes_durations_.Avg(kMinRequiredSamples);
|
||||
if (avg_freeze_length) {
|
||||
RTC_HISTOGRAM_COUNTS_SPARSE_100000(uma_prefix + ".MeanFreezeDurationMs",
|
||||
*avg_freeze_length);
|
||||
log_stream << uma_prefix << ".MeanFreezeDurationMs " << *avg_freeze_length
|
||||
<< "\n";
|
||||
}
|
||||
|
||||
int64_t video_duration_ms =
|
||||
last_frame_rendered_ms_ - first_frame_rendered_ms_;
|
||||
|
||||
if (video_duration_ms >= kMinVideoDurationMs) {
|
||||
int time_spent_in_hd_percentage = static_cast<int>(
|
||||
time_in_resolution_ms_[Resolution::High] * 100 / video_duration_ms);
|
||||
RTC_HISTOGRAM_COUNTS_SPARSE_100(uma_prefix + ".TimeInHdPercentage",
|
||||
time_spent_in_hd_percentage);
|
||||
log_stream << uma_prefix << ".TimeInHdPercentage "
|
||||
<< time_spent_in_hd_percentage << "\n";
|
||||
|
||||
int time_with_blocky_video_percentage =
|
||||
static_cast<int>(time_in_blocky_video_ms_ * 100 / video_duration_ms);
|
||||
RTC_HISTOGRAM_COUNTS_SPARSE_100(uma_prefix + ".TimeInBlockyVideoPercentage",
|
||||
time_with_blocky_video_percentage);
|
||||
log_stream << uma_prefix << ".TimeInBlockyVideoPercentage "
|
||||
<< time_with_blocky_video_percentage << "\n";
|
||||
|
||||
int num_resolution_downgrades_per_minute =
|
||||
num_resolution_downgrades_ * 60000 / video_duration_ms;
|
||||
if (!screenshare) {
|
||||
RTC_HISTOGRAM_COUNTS_SPARSE_100(
|
||||
uma_prefix + ".NumberResolutionDownswitchesPerMinute",
|
||||
num_resolution_downgrades_per_minute);
|
||||
log_stream << uma_prefix << ".NumberResolutionDownswitchesPerMinute "
|
||||
<< num_resolution_downgrades_per_minute << "\n";
|
||||
}
|
||||
|
||||
int num_freezes_per_minute =
|
||||
freezes_durations_.NumSamples() * 60000 / video_duration_ms;
|
||||
RTC_HISTOGRAM_COUNTS_SPARSE_100(uma_prefix + ".NumberFreezesPerMinute",
|
||||
num_freezes_per_minute);
|
||||
log_stream << uma_prefix << ".NumberFreezesPerMinute "
|
||||
<< num_freezes_per_minute << "\n";
|
||||
|
||||
if (sum_squared_interframe_delays_secs_ > 0.0) {
|
||||
int harmonic_framerate_fps = std::round(
|
||||
video_duration_ms / (1000 * sum_squared_interframe_delays_secs_));
|
||||
RTC_HISTOGRAM_COUNTS_SPARSE_100(uma_prefix + ".HarmonicFrameRate",
|
||||
harmonic_framerate_fps);
|
||||
log_stream << uma_prefix << ".HarmonicFrameRate "
|
||||
<< harmonic_framerate_fps << "\n";
|
||||
}
|
||||
}
|
||||
RTC_LOG(LS_INFO) << log_stream.str();
|
||||
}
|
||||
|
||||
void VideoQualityObserver::OnRenderedFrame(
|
||||
const VideoFrameMetaData& frame_meta) {
|
||||
RTC_DCHECK_LE(last_frame_rendered_ms_, frame_meta.decode_timestamp.ms());
|
||||
RTC_DCHECK_LE(last_unfreeze_time_ms_, frame_meta.decode_timestamp.ms());
|
||||
|
||||
if (num_frames_rendered_ == 0) {
|
||||
first_frame_rendered_ms_ = last_unfreeze_time_ms_ =
|
||||
frame_meta.decode_timestamp.ms();
|
||||
}
|
||||
|
||||
auto blocky_frame_it = blocky_frames_.find(frame_meta.rtp_timestamp);
|
||||
|
||||
if (num_frames_rendered_ > 0) {
|
||||
// Process inter-frame delay.
|
||||
const int64_t interframe_delay_ms =
|
||||
frame_meta.decode_timestamp.ms() - last_frame_rendered_ms_;
|
||||
const double interframe_delays_secs = interframe_delay_ms / 1000.0;
|
||||
|
||||
// Sum of squared inter frame intervals is used to calculate the harmonic
|
||||
// frame rate metric. The metric aims to reflect overall experience related
|
||||
// to smoothness of video playback and includes both freezes and pauses.
|
||||
sum_squared_interframe_delays_secs_ +=
|
||||
interframe_delays_secs * interframe_delays_secs;
|
||||
|
||||
if (!is_paused_) {
|
||||
render_interframe_delays_.AddSample(interframe_delay_ms);
|
||||
|
||||
bool was_freeze = false;
|
||||
if (render_interframe_delays_.Size() >= kMinFrameSamplesToDetectFreeze) {
|
||||
const absl::optional<int64_t> avg_interframe_delay =
|
||||
render_interframe_delays_.GetAverageRoundedDown();
|
||||
RTC_DCHECK(avg_interframe_delay);
|
||||
was_freeze = interframe_delay_ms >=
|
||||
std::max(3 * *avg_interframe_delay,
|
||||
*avg_interframe_delay + kMinIncreaseForFreezeMs);
|
||||
}
|
||||
|
||||
if (was_freeze) {
|
||||
freezes_durations_.Add(interframe_delay_ms);
|
||||
smooth_playback_durations_.Add(last_frame_rendered_ms_ -
|
||||
last_unfreeze_time_ms_);
|
||||
last_unfreeze_time_ms_ = frame_meta.decode_timestamp.ms();
|
||||
} else {
|
||||
// Count spatial metrics if there were no freeze.
|
||||
time_in_resolution_ms_[current_resolution_] += interframe_delay_ms;
|
||||
|
||||
if (is_last_frame_blocky_) {
|
||||
time_in_blocky_video_ms_ += interframe_delay_ms;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (is_paused_) {
|
||||
// If the stream was paused since the previous frame, do not count the
|
||||
// pause toward smooth playback. Explicitly count the part before it and
|
||||
// start the new smooth playback interval from this frame.
|
||||
is_paused_ = false;
|
||||
if (last_frame_rendered_ms_ > last_unfreeze_time_ms_) {
|
||||
smooth_playback_durations_.Add(last_frame_rendered_ms_ -
|
||||
last_unfreeze_time_ms_);
|
||||
}
|
||||
last_unfreeze_time_ms_ = frame_meta.decode_timestamp.ms();
|
||||
|
||||
if (num_frames_rendered_ > 0) {
|
||||
pauses_durations_.Add(frame_meta.decode_timestamp.ms() -
|
||||
last_frame_rendered_ms_);
|
||||
}
|
||||
}
|
||||
|
||||
int64_t pixels = frame_meta.width * frame_meta.height;
|
||||
if (pixels >= kPixelsInHighResolution) {
|
||||
current_resolution_ = Resolution::High;
|
||||
} else if (pixels >= kPixelsInMediumResolution) {
|
||||
current_resolution_ = Resolution::Medium;
|
||||
} else {
|
||||
current_resolution_ = Resolution::Low;
|
||||
}
|
||||
|
||||
if (pixels < last_frame_pixels_) {
|
||||
++num_resolution_downgrades_;
|
||||
}
|
||||
|
||||
last_frame_pixels_ = pixels;
|
||||
last_frame_rendered_ms_ = frame_meta.decode_timestamp.ms();
|
||||
|
||||
is_last_frame_blocky_ = blocky_frame_it != blocky_frames_.end();
|
||||
if (is_last_frame_blocky_) {
|
||||
blocky_frames_.erase(blocky_frames_.begin(), ++blocky_frame_it);
|
||||
}
|
||||
|
||||
++num_frames_rendered_;
|
||||
}
|
||||
|
||||
void VideoQualityObserver::OnDecodedFrame(uint32_t rtp_frame_timestamp,
|
||||
absl::optional<uint8_t> qp,
|
||||
VideoCodecType codec) {
|
||||
if (!qp)
|
||||
return;
|
||||
|
||||
absl::optional<int> qp_blocky_threshold;
|
||||
// TODO(ilnik): add other codec types when we have QP for them.
|
||||
switch (codec) {
|
||||
case kVideoCodecVP8:
|
||||
qp_blocky_threshold = kBlockyQpThresholdVp8;
|
||||
break;
|
||||
case kVideoCodecVP9:
|
||||
qp_blocky_threshold = kBlockyQpThresholdVp9;
|
||||
break;
|
||||
default:
|
||||
qp_blocky_threshold = absl::nullopt;
|
||||
}
|
||||
|
||||
RTC_DCHECK(blocky_frames_.find(rtp_frame_timestamp) == blocky_frames_.end());
|
||||
|
||||
if (qp_blocky_threshold && *qp > *qp_blocky_threshold) {
|
||||
// Cache blocky frame. Its duration will be calculated in render callback.
|
||||
if (blocky_frames_.size() > kMaxNumCachedBlockyFrames) {
|
||||
RTC_LOG(LS_WARNING) << "Overflow of blocky frames cache.";
|
||||
blocky_frames_.erase(
|
||||
blocky_frames_.begin(),
|
||||
std::next(blocky_frames_.begin(), kMaxNumCachedBlockyFrames / 2));
|
||||
}
|
||||
|
||||
blocky_frames_.insert(rtp_frame_timestamp);
|
||||
}
|
||||
}
|
||||
|
||||
void VideoQualityObserver::OnStreamInactive() {
|
||||
is_paused_ = true;
|
||||
}
|
||||
|
||||
uint32_t VideoQualityObserver::NumFreezes() const {
|
||||
return freezes_durations_.NumSamples();
|
||||
}
|
||||
|
||||
uint32_t VideoQualityObserver::NumPauses() const {
|
||||
return pauses_durations_.NumSamples();
|
||||
}
|
||||
|
||||
uint32_t VideoQualityObserver::TotalFreezesDurationMs() const {
|
||||
return freezes_durations_.Sum(kMinRequiredSamples).value_or(0);
|
||||
}
|
||||
|
||||
uint32_t VideoQualityObserver::TotalPausesDurationMs() const {
|
||||
return pauses_durations_.Sum(kMinRequiredSamples).value_or(0);
|
||||
}
|
||||
|
||||
uint32_t VideoQualityObserver::TotalFramesDurationMs() const {
|
||||
return last_frame_rendered_ms_ - first_frame_rendered_ms_;
|
||||
}
|
||||
|
||||
double VideoQualityObserver::SumSquaredFrameDurationsSec() const {
|
||||
return sum_squared_interframe_delays_secs_;
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace webrtc
|
||||
101
TMessagesProj/jni/voip/webrtc/video/video_quality_observer2.h
Normal file
101
TMessagesProj/jni/voip/webrtc/video/video_quality_observer2.h
Normal file
|
|
@ -0,0 +1,101 @@
|
|||
/*
|
||||
* Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef VIDEO_VIDEO_QUALITY_OBSERVER2_H_
|
||||
#define VIDEO_VIDEO_QUALITY_OBSERVER2_H_
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include <set>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "api/video/video_codec_type.h"
|
||||
#include "api/video/video_content_type.h"
|
||||
#include "rtc_base/numerics/moving_average.h"
|
||||
#include "rtc_base/numerics/sample_counter.h"
|
||||
|
||||
namespace webrtc {
|
||||
namespace internal {
|
||||
// Declared in video_receive_stream2.h.
|
||||
struct VideoFrameMetaData;
|
||||
|
||||
// Calculates spatial and temporal quality metrics and reports them to UMA
|
||||
// stats.
|
||||
class VideoQualityObserver {
|
||||
public:
|
||||
// Use either VideoQualityObserver::kBlockyQpThresholdVp8 or
|
||||
// VideoQualityObserver::kBlockyQpThresholdVp9.
|
||||
VideoQualityObserver();
|
||||
~VideoQualityObserver() = default;
|
||||
|
||||
void OnDecodedFrame(uint32_t rtp_frame_timestamp,
|
||||
absl::optional<uint8_t> qp,
|
||||
VideoCodecType codec);
|
||||
|
||||
void OnRenderedFrame(const VideoFrameMetaData& frame_meta);
|
||||
|
||||
void OnStreamInactive();
|
||||
|
||||
uint32_t NumFreezes() const;
|
||||
uint32_t NumPauses() const;
|
||||
uint32_t TotalFreezesDurationMs() const;
|
||||
uint32_t TotalPausesDurationMs() const;
|
||||
uint32_t TotalFramesDurationMs() const;
|
||||
double SumSquaredFrameDurationsSec() const;
|
||||
|
||||
// Set `screenshare` to true if the last decoded frame was for screenshare.
|
||||
void UpdateHistograms(bool screenshare);
|
||||
|
||||
static const uint32_t kMinFrameSamplesToDetectFreeze;
|
||||
static const uint32_t kMinIncreaseForFreezeMs;
|
||||
static const uint32_t kAvgInterframeDelaysWindowSizeFrames;
|
||||
|
||||
private:
|
||||
enum Resolution {
|
||||
Low = 0,
|
||||
Medium = 1,
|
||||
High = 2,
|
||||
};
|
||||
|
||||
int64_t last_frame_rendered_ms_;
|
||||
int64_t num_frames_rendered_;
|
||||
int64_t first_frame_rendered_ms_;
|
||||
int64_t last_frame_pixels_;
|
||||
bool is_last_frame_blocky_;
|
||||
// Decoded timestamp of the last delayed frame.
|
||||
int64_t last_unfreeze_time_ms_;
|
||||
rtc::MovingAverage render_interframe_delays_;
|
||||
double sum_squared_interframe_delays_secs_;
|
||||
// An inter-frame delay is counted as a freeze if it's significantly longer
|
||||
// than average inter-frame delay.
|
||||
rtc::SampleCounter freezes_durations_;
|
||||
rtc::SampleCounter pauses_durations_;
|
||||
// Time between freezes.
|
||||
rtc::SampleCounter smooth_playback_durations_;
|
||||
// Counters for time spent in different resolutions. Time between each two
|
||||
// Consecutive frames is counted to bin corresponding to the first frame
|
||||
// resolution.
|
||||
std::vector<int64_t> time_in_resolution_ms_;
|
||||
// Resolution of the last decoded frame. Resolution enum is used as an index.
|
||||
Resolution current_resolution_;
|
||||
int num_resolution_downgrades_;
|
||||
// Similar to resolution, time spent in high-QP video.
|
||||
int64_t time_in_blocky_video_ms_;
|
||||
bool is_paused_;
|
||||
|
||||
// Set of decoded frames with high QP value.
|
||||
std::set<int64_t> blocky_frames_;
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // VIDEO_VIDEO_QUALITY_OBSERVER2_H_
|
||||
1102
TMessagesProj/jni/voip/webrtc/video/video_receive_stream2.cc
Normal file
1102
TMessagesProj/jni/voip/webrtc/video/video_receive_stream2.cc
Normal file
File diff suppressed because it is too large
Load diff
358
TMessagesProj/jni/voip/webrtc/video/video_receive_stream2.h
Normal file
358
TMessagesProj/jni/voip/webrtc/video/video_receive_stream2.h
Normal file
|
|
@ -0,0 +1,358 @@
|
|||
/*
|
||||
* Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef VIDEO_VIDEO_RECEIVE_STREAM2_H_
|
||||
#define VIDEO_VIDEO_RECEIVE_STREAM2_H_
|
||||
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "api/environment/environment.h"
|
||||
#include "api/sequence_checker.h"
|
||||
#include "api/task_queue/pending_task_safety_flag.h"
|
||||
#include "api/task_queue/task_queue_base.h"
|
||||
#include "api/units/time_delta.h"
|
||||
#include "api/units/timestamp.h"
|
||||
#include "api/video/recordable_encoded_frame.h"
|
||||
#include "call/call.h"
|
||||
#include "call/rtp_packet_sink_interface.h"
|
||||
#include "call/syncable.h"
|
||||
#include "call/video_receive_stream.h"
|
||||
#include "modules/rtp_rtcp/source/source_tracker.h"
|
||||
#include "modules/video_coding/nack_requester.h"
|
||||
#include "modules/video_coding/video_receiver2.h"
|
||||
#include "rtc_base/system/no_unique_address.h"
|
||||
#include "rtc_base/thread_annotations.h"
|
||||
#include "video/receive_statistics_proxy.h"
|
||||
#include "video/rtp_streams_synchronizer2.h"
|
||||
#include "video/rtp_video_stream_receiver2.h"
|
||||
#include "video/transport_adapter.h"
|
||||
#include "video/video_stream_buffer_controller.h"
|
||||
#include "video/video_stream_decoder2.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class RtpStreamReceiverInterface;
|
||||
class RtpStreamReceiverControllerInterface;
|
||||
class RtxReceiveStream;
|
||||
class VCMTiming;
|
||||
|
||||
constexpr TimeDelta kMaxWaitForKeyFrame = TimeDelta::Millis(200);
|
||||
constexpr TimeDelta kMaxWaitForFrame = TimeDelta::Seconds(3);
|
||||
|
||||
namespace internal {
|
||||
|
||||
class CallStats;
|
||||
|
||||
// Utility struct for grabbing metadata from a VideoFrame and processing it
|
||||
// asynchronously without needing the actual frame data.
|
||||
// Additionally the caller can bundle information from the current clock
|
||||
// when the metadata is captured, for accurate reporting and not needing
|
||||
// multiple calls to clock->Now().
|
||||
struct VideoFrameMetaData {
|
||||
VideoFrameMetaData(const webrtc::VideoFrame& frame, Timestamp now)
|
||||
: rtp_timestamp(frame.timestamp()),
|
||||
timestamp_us(frame.timestamp_us()),
|
||||
ntp_time_ms(frame.ntp_time_ms()),
|
||||
width(frame.width()),
|
||||
height(frame.height()),
|
||||
decode_timestamp(now) {}
|
||||
|
||||
int64_t render_time_ms() const {
|
||||
return timestamp_us / rtc::kNumMicrosecsPerMillisec;
|
||||
}
|
||||
|
||||
const uint32_t rtp_timestamp;
|
||||
const int64_t timestamp_us;
|
||||
const int64_t ntp_time_ms;
|
||||
const int width;
|
||||
const int height;
|
||||
|
||||
const Timestamp decode_timestamp;
|
||||
};
|
||||
|
||||
class VideoReceiveStream2
|
||||
: public webrtc::VideoReceiveStreamInterface,
|
||||
public rtc::VideoSinkInterface<VideoFrame>,
|
||||
public RtpVideoStreamReceiver2::OnCompleteFrameCallback,
|
||||
public Syncable,
|
||||
public CallStatsObserver,
|
||||
public FrameSchedulingReceiver {
|
||||
public:
|
||||
// The maximum number of buffered encoded frames when encoded output is
|
||||
// configured.
|
||||
static constexpr size_t kBufferedEncodedFramesMaxSize = 60;
|
||||
|
||||
VideoReceiveStream2(const Environment& env,
|
||||
Call* call,
|
||||
int num_cpu_cores,
|
||||
PacketRouter* packet_router,
|
||||
VideoReceiveStreamInterface::Config config,
|
||||
CallStats* call_stats,
|
||||
std::unique_ptr<VCMTiming> timing,
|
||||
NackPeriodicProcessor* nack_periodic_processor,
|
||||
DecodeSynchronizer* decode_sync);
|
||||
// Destruction happens on the worker thread. Prior to destruction the caller
|
||||
// must ensure that a registration with the transport has been cleared. See
|
||||
// `RegisterWithTransport` for details.
|
||||
// TODO(tommi): As a further improvement to this, performing the full
|
||||
// destruction on the network thread could be made the default.
|
||||
~VideoReceiveStream2() override;
|
||||
|
||||
// Called on `packet_sequence_checker_` to register/unregister with the
|
||||
// network transport.
|
||||
void RegisterWithTransport(
|
||||
RtpStreamReceiverControllerInterface* receiver_controller);
|
||||
// If registration has previously been done (via `RegisterWithTransport`) then
|
||||
// `UnregisterFromTransport` must be called prior to destruction, on the
|
||||
// network thread.
|
||||
void UnregisterFromTransport();
|
||||
|
||||
// Accessor for the a/v sync group. This value may change and the caller
|
||||
// must be on the packet delivery thread.
|
||||
const std::string& sync_group() const;
|
||||
|
||||
// Getters for const remote SSRC values that won't change throughout the
|
||||
// object's lifetime.
|
||||
uint32_t remote_ssrc() const { return config_.rtp.remote_ssrc; }
|
||||
// RTX ssrc can be updated.
|
||||
uint32_t rtx_ssrc() const {
|
||||
RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
|
||||
return updated_rtx_ssrc_.value_or(config_.rtp.rtx_ssrc);
|
||||
}
|
||||
|
||||
void SignalNetworkState(NetworkState state);
|
||||
bool DeliverRtcp(const uint8_t* packet, size_t length);
|
||||
|
||||
void SetSync(Syncable* audio_syncable);
|
||||
|
||||
// Updates the `rtp_video_stream_receiver_`'s `local_ssrc` when the default
|
||||
// sender has been created, changed or removed.
|
||||
void SetLocalSsrc(uint32_t local_ssrc);
|
||||
|
||||
// Implements webrtc::VideoReceiveStreamInterface.
|
||||
void Start() override;
|
||||
void Stop() override;
|
||||
|
||||
void SetRtcpMode(RtcpMode mode) override;
|
||||
void SetFlexFecProtection(RtpPacketSinkInterface* flexfec_sink) override;
|
||||
void SetLossNotificationEnabled(bool enabled) override;
|
||||
void SetNackHistory(TimeDelta history) override;
|
||||
void SetProtectionPayloadTypes(int red_payload_type,
|
||||
int ulpfec_payload_type) override;
|
||||
void SetRtcpXr(Config::Rtp::RtcpXr rtcp_xr) override;
|
||||
void SetAssociatedPayloadTypes(
|
||||
std::map<int, int> associated_payload_types) override;
|
||||
|
||||
webrtc::VideoReceiveStreamInterface::Stats GetStats() const override;
|
||||
|
||||
// SetBaseMinimumPlayoutDelayMs and GetBaseMinimumPlayoutDelayMs are called
|
||||
// from webrtc/api level and requested by user code. For e.g. blink/js layer
|
||||
// in Chromium.
|
||||
bool SetBaseMinimumPlayoutDelayMs(int delay_ms) override;
|
||||
int GetBaseMinimumPlayoutDelayMs() const override;
|
||||
|
||||
void SetFrameDecryptor(
|
||||
rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor) override;
|
||||
void SetDepacketizerToDecoderFrameTransformer(
|
||||
rtc::scoped_refptr<FrameTransformerInterface> frame_transformer) override;
|
||||
|
||||
// Implements rtc::VideoSinkInterface<VideoFrame>.
|
||||
void OnFrame(const VideoFrame& video_frame) override;
|
||||
|
||||
// Implements RtpVideoStreamReceiver2::OnCompleteFrameCallback.
|
||||
void OnCompleteFrame(std::unique_ptr<EncodedFrame> frame) override;
|
||||
|
||||
// Implements CallStatsObserver::OnRttUpdate
|
||||
void OnRttUpdate(int64_t avg_rtt_ms, int64_t max_rtt_ms) override;
|
||||
|
||||
// Implements Syncable.
|
||||
uint32_t id() const override;
|
||||
absl::optional<Syncable::Info> GetInfo() const override;
|
||||
bool GetPlayoutRtpTimestamp(uint32_t* rtp_timestamp,
|
||||
int64_t* time_ms) const override;
|
||||
void SetEstimatedPlayoutNtpTimestampMs(int64_t ntp_timestamp_ms,
|
||||
int64_t time_ms) override;
|
||||
|
||||
// SetMinimumPlayoutDelay is only called by A/V sync.
|
||||
bool SetMinimumPlayoutDelay(int delay_ms) override;
|
||||
|
||||
std::vector<webrtc::RtpSource> GetSources() const override;
|
||||
|
||||
RecordingState SetAndGetRecordingState(RecordingState state,
|
||||
bool generate_key_frame) override;
|
||||
void GenerateKeyFrame() override;
|
||||
|
||||
void UpdateRtxSsrc(uint32_t ssrc) override;
|
||||
|
||||
private:
|
||||
// FrameSchedulingReceiver implementation.
|
||||
// Called on packet sequence.
|
||||
void OnEncodedFrame(std::unique_ptr<EncodedFrame> frame) override;
|
||||
// Called on packet sequence.
|
||||
void OnDecodableFrameTimeout(TimeDelta wait) override;
|
||||
|
||||
void CreateAndRegisterExternalDecoder(const Decoder& decoder);
|
||||
|
||||
struct DecodeFrameResult {
|
||||
// True if the decoder returned code WEBRTC_VIDEO_CODEC_OK_REQUEST_KEYFRAME,
|
||||
// or if the decoder failed and a keyframe is required. When true, a
|
||||
// keyframe request should be sent even if a keyframe request was sent
|
||||
// recently.
|
||||
bool force_request_key_frame;
|
||||
|
||||
// The picture id of the frame that was decoded, or nullopt if the frame was
|
||||
// not decoded.
|
||||
absl::optional<int64_t> decoded_frame_picture_id;
|
||||
|
||||
// True if the next frame decoded must be a keyframe. This value will set
|
||||
// the value of `keyframe_required_`, which will force the frame buffer to
|
||||
// drop all frames that are not keyframes.
|
||||
bool keyframe_required;
|
||||
};
|
||||
|
||||
DecodeFrameResult HandleEncodedFrameOnDecodeQueue(
|
||||
std::unique_ptr<EncodedFrame> frame,
|
||||
bool keyframe_request_is_due,
|
||||
bool keyframe_required) RTC_RUN_ON(decode_sequence_checker_);
|
||||
void UpdatePlayoutDelays() const
|
||||
RTC_EXCLUSIVE_LOCKS_REQUIRED(worker_sequence_checker_);
|
||||
void RequestKeyFrame(Timestamp now) RTC_RUN_ON(packet_sequence_checker_);
|
||||
void HandleKeyFrameGeneration(bool received_frame_is_keyframe,
|
||||
Timestamp now,
|
||||
bool always_request_key_frame,
|
||||
bool keyframe_request_is_due)
|
||||
RTC_RUN_ON(packet_sequence_checker_);
|
||||
bool IsReceivingKeyFrame(Timestamp timestamp) const
|
||||
RTC_RUN_ON(packet_sequence_checker_);
|
||||
int DecodeAndMaybeDispatchEncodedFrame(std::unique_ptr<EncodedFrame> frame)
|
||||
RTC_RUN_ON(decode_sequence_checker_);
|
||||
|
||||
void UpdateHistograms();
|
||||
|
||||
const Environment env_;
|
||||
|
||||
RTC_NO_UNIQUE_ADDRESS SequenceChecker worker_sequence_checker_;
|
||||
// TODO(bugs.webrtc.org/11993): This checker conceptually represents
|
||||
// operations that belong to the network thread. The Call class is currently
|
||||
// moving towards handling network packets on the network thread and while
|
||||
// that work is ongoing, this checker may in practice represent the worker
|
||||
// thread, but still serves as a mechanism of grouping together concepts
|
||||
// that belong to the network thread. Once the packets are fully delivered
|
||||
// on the network thread, this comment will be deleted.
|
||||
RTC_NO_UNIQUE_ADDRESS SequenceChecker packet_sequence_checker_;
|
||||
|
||||
RTC_NO_UNIQUE_ADDRESS SequenceChecker decode_sequence_checker_;
|
||||
|
||||
TransportAdapter transport_adapter_;
|
||||
const VideoReceiveStreamInterface::Config config_;
|
||||
const int num_cpu_cores_;
|
||||
Call* const call_;
|
||||
|
||||
CallStats* const call_stats_;
|
||||
|
||||
bool decoder_running_ RTC_GUARDED_BY(worker_sequence_checker_) = false;
|
||||
bool decoder_stopped_ RTC_GUARDED_BY(decode_sequence_checker_) = true;
|
||||
|
||||
SourceTracker source_tracker_;
|
||||
ReceiveStatisticsProxy stats_proxy_;
|
||||
// Shared by media and rtx stream receivers, since the latter has no RtpRtcp
|
||||
// module of its own.
|
||||
const std::unique_ptr<ReceiveStatistics> rtp_receive_statistics_;
|
||||
|
||||
std::unique_ptr<VCMTiming> timing_; // Jitter buffer experiment.
|
||||
VideoReceiver2 video_receiver_;
|
||||
std::unique_ptr<rtc::VideoSinkInterface<VideoFrame>> incoming_video_stream_;
|
||||
RtpVideoStreamReceiver2 rtp_video_stream_receiver_;
|
||||
std::unique_ptr<VideoStreamDecoder> video_stream_decoder_;
|
||||
RtpStreamsSynchronizer rtp_stream_sync_;
|
||||
|
||||
std::unique_ptr<VideoStreamBufferController> buffer_;
|
||||
|
||||
// `receiver_controller_` is valid from when RegisterWithTransport is invoked
|
||||
// until UnregisterFromTransport.
|
||||
RtpStreamReceiverControllerInterface* receiver_controller_
|
||||
RTC_GUARDED_BY(packet_sequence_checker_) = nullptr;
|
||||
|
||||
std::unique_ptr<RtpStreamReceiverInterface> media_receiver_
|
||||
RTC_GUARDED_BY(packet_sequence_checker_);
|
||||
std::unique_ptr<RtxReceiveStream> rtx_receive_stream_
|
||||
RTC_GUARDED_BY(packet_sequence_checker_);
|
||||
absl::optional<uint32_t> updated_rtx_ssrc_
|
||||
RTC_GUARDED_BY(packet_sequence_checker_);
|
||||
std::unique_ptr<RtpStreamReceiverInterface> rtx_receiver_
|
||||
RTC_GUARDED_BY(packet_sequence_checker_);
|
||||
|
||||
// Whenever we are in an undecodable state (stream has just started or due to
|
||||
// a decoding error) we require a keyframe to restart the stream.
|
||||
bool keyframe_required_ RTC_GUARDED_BY(packet_sequence_checker_) = true;
|
||||
|
||||
// If we have successfully decoded any frame.
|
||||
bool frame_decoded_ RTC_GUARDED_BY(decode_sequence_checker_) = false;
|
||||
|
||||
absl::optional<Timestamp> last_keyframe_request_
|
||||
RTC_GUARDED_BY(packet_sequence_checker_);
|
||||
|
||||
// Keyframe request intervals are configurable through field trials.
|
||||
TimeDelta max_wait_for_keyframe_ RTC_GUARDED_BY(packet_sequence_checker_);
|
||||
TimeDelta max_wait_for_frame_ RTC_GUARDED_BY(packet_sequence_checker_);
|
||||
|
||||
// All of them tries to change current min_playout_delay on `timing_` but
|
||||
// source of the change request is different in each case. Among them the
|
||||
// biggest delay is used. -1 means use default value from the `timing_`.
|
||||
//
|
||||
// Minimum delay as decided by the RTP playout delay extension.
|
||||
absl::optional<TimeDelta> frame_minimum_playout_delay_
|
||||
RTC_GUARDED_BY(worker_sequence_checker_);
|
||||
// Minimum delay as decided by the setLatency function in "webrtc/api".
|
||||
absl::optional<TimeDelta> base_minimum_playout_delay_
|
||||
RTC_GUARDED_BY(worker_sequence_checker_);
|
||||
// Minimum delay as decided by the A/V synchronization feature.
|
||||
absl::optional<TimeDelta> syncable_minimum_playout_delay_
|
||||
RTC_GUARDED_BY(worker_sequence_checker_);
|
||||
|
||||
// Maximum delay as decided by the RTP playout delay extension.
|
||||
absl::optional<TimeDelta> frame_maximum_playout_delay_
|
||||
RTC_GUARDED_BY(worker_sequence_checker_);
|
||||
|
||||
// Function that is triggered with encoded frames, if not empty.
|
||||
std::function<void(const RecordableEncodedFrame&)>
|
||||
encoded_frame_buffer_function_ RTC_GUARDED_BY(decode_sequence_checker_);
|
||||
// Set to true while we're requesting keyframes but not yet received one.
|
||||
bool keyframe_generation_requested_ RTC_GUARDED_BY(packet_sequence_checker_) =
|
||||
false;
|
||||
// Lock to avoid unnecessary per-frame idle wakeups in the code.
|
||||
webrtc::Mutex pending_resolution_mutex_;
|
||||
// Signal from decode queue to OnFrame callback to fill pending_resolution_.
|
||||
// absl::nullopt - no resolution needed. 0x0 - next OnFrame to fill with
|
||||
// received resolution. Not 0x0 - OnFrame has filled a resolution.
|
||||
absl::optional<RecordableEncodedFrame::EncodedResolution> pending_resolution_
|
||||
RTC_GUARDED_BY(pending_resolution_mutex_);
|
||||
// Buffered encoded frames held while waiting for decoded resolution.
|
||||
std::vector<std::unique_ptr<EncodedFrame>> buffered_encoded_frames_
|
||||
RTC_GUARDED_BY(decode_sequence_checker_);
|
||||
|
||||
// Used to signal destruction to potentially pending tasks.
|
||||
ScopedTaskSafety task_safety_;
|
||||
|
||||
// Defined last so they are destroyed before all other members, in particular
|
||||
// `decode_queue_` should be stopped before `decode_sequence_checker_` is
|
||||
// destructed to avoid races when running tasks on the `decode_queue_` during
|
||||
// VideoReceiveStream2 destruction.
|
||||
std::unique_ptr<TaskQueueBase, TaskQueueDeleter> decode_queue_;
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // VIDEO_VIDEO_RECEIVE_STREAM2_H_
|
||||
|
|
@ -0,0 +1,98 @@
|
|||
/*
|
||||
* Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "video/video_receive_stream_timeout_tracker.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <utility>
|
||||
|
||||
#include "rtc_base/logging.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
VideoReceiveStreamTimeoutTracker::VideoReceiveStreamTimeoutTracker(
|
||||
Clock* clock,
|
||||
TaskQueueBase* const bookkeeping_queue,
|
||||
const Timeouts& timeouts,
|
||||
TimeoutCallback callback)
|
||||
: clock_(clock),
|
||||
bookkeeping_queue_(bookkeeping_queue),
|
||||
timeouts_(timeouts),
|
||||
timeout_cb_(std::move(callback)) {}
|
||||
|
||||
VideoReceiveStreamTimeoutTracker::~VideoReceiveStreamTimeoutTracker() {
|
||||
RTC_DCHECK(!timeout_task_.Running());
|
||||
}
|
||||
|
||||
bool VideoReceiveStreamTimeoutTracker::Running() const {
|
||||
return timeout_task_.Running();
|
||||
}
|
||||
|
||||
TimeDelta VideoReceiveStreamTimeoutTracker::TimeUntilTimeout() const {
|
||||
return std::max(timeout_ - clock_->CurrentTime(), TimeDelta::Zero());
|
||||
}
|
||||
|
||||
void VideoReceiveStreamTimeoutTracker::Start(bool waiting_for_keyframe) {
|
||||
RTC_DCHECK_RUN_ON(bookkeeping_queue_);
|
||||
RTC_DCHECK(!timeout_task_.Running());
|
||||
waiting_for_keyframe_ = waiting_for_keyframe;
|
||||
TimeDelta timeout_delay = TimeoutForNextFrame();
|
||||
last_frame_ = clock_->CurrentTime();
|
||||
timeout_ = last_frame_ + timeout_delay;
|
||||
timeout_task_ =
|
||||
RepeatingTaskHandle::DelayedStart(bookkeeping_queue_, timeout_delay,
|
||||
[this] { return HandleTimeoutTask(); });
|
||||
}
|
||||
|
||||
void VideoReceiveStreamTimeoutTracker::Stop() {
|
||||
timeout_task_.Stop();
|
||||
}
|
||||
|
||||
void VideoReceiveStreamTimeoutTracker::SetWaitingForKeyframe() {
|
||||
RTC_DCHECK_RUN_ON(bookkeeping_queue_);
|
||||
waiting_for_keyframe_ = true;
|
||||
TimeDelta timeout_delay = TimeoutForNextFrame();
|
||||
if (clock_->CurrentTime() + timeout_delay < timeout_) {
|
||||
Stop();
|
||||
Start(waiting_for_keyframe_);
|
||||
}
|
||||
}
|
||||
|
||||
void VideoReceiveStreamTimeoutTracker::OnEncodedFrameReleased() {
|
||||
RTC_DCHECK_RUN_ON(bookkeeping_queue_);
|
||||
// If we were waiting for a keyframe, then it has just been released.
|
||||
waiting_for_keyframe_ = false;
|
||||
last_frame_ = clock_->CurrentTime();
|
||||
timeout_ = last_frame_ + TimeoutForNextFrame();
|
||||
}
|
||||
|
||||
TimeDelta VideoReceiveStreamTimeoutTracker::HandleTimeoutTask() {
|
||||
RTC_DCHECK_RUN_ON(bookkeeping_queue_);
|
||||
Timestamp now = clock_->CurrentTime();
|
||||
// `timeout_` is hit and we have timed out. Schedule the next timeout at
|
||||
// the timeout delay.
|
||||
if (now >= timeout_) {
|
||||
RTC_DLOG(LS_VERBOSE) << "Stream timeout at " << now;
|
||||
TimeDelta timeout_delay = TimeoutForNextFrame();
|
||||
timeout_ = now + timeout_delay;
|
||||
timeout_cb_(now - last_frame_);
|
||||
return timeout_delay;
|
||||
}
|
||||
// Otherwise, `timeout_` changed since we scheduled a timeout. Reschedule
|
||||
// a timeout check.
|
||||
return timeout_ - now;
|
||||
}
|
||||
|
||||
void VideoReceiveStreamTimeoutTracker::SetTimeouts(Timeouts timeouts) {
|
||||
RTC_DCHECK_RUN_ON(bookkeeping_queue_);
|
||||
timeouts_ = timeouts;
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,70 @@
|
|||
/*
|
||||
* Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef VIDEO_VIDEO_RECEIVE_STREAM_TIMEOUT_TRACKER_H_
|
||||
#define VIDEO_VIDEO_RECEIVE_STREAM_TIMEOUT_TRACKER_H_
|
||||
|
||||
#include <functional>
|
||||
|
||||
#include "api/task_queue/task_queue_base.h"
|
||||
#include "api/units/time_delta.h"
|
||||
#include "api/units/timestamp.h"
|
||||
#include "rtc_base/task_utils/repeating_task.h"
|
||||
#include "system_wrappers/include/clock.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class VideoReceiveStreamTimeoutTracker {
|
||||
public:
|
||||
struct Timeouts {
|
||||
TimeDelta max_wait_for_keyframe;
|
||||
TimeDelta max_wait_for_frame;
|
||||
};
|
||||
|
||||
using TimeoutCallback = std::function<void(TimeDelta wait)>;
|
||||
VideoReceiveStreamTimeoutTracker(Clock* clock,
|
||||
TaskQueueBase* const bookkeeping_queue,
|
||||
const Timeouts& timeouts,
|
||||
TimeoutCallback callback);
|
||||
~VideoReceiveStreamTimeoutTracker();
|
||||
VideoReceiveStreamTimeoutTracker(const VideoReceiveStreamTimeoutTracker&) =
|
||||
delete;
|
||||
VideoReceiveStreamTimeoutTracker& operator=(
|
||||
const VideoReceiveStreamTimeoutTracker&) = delete;
|
||||
|
||||
bool Running() const;
|
||||
void Start(bool waiting_for_keyframe);
|
||||
void Stop();
|
||||
void SetWaitingForKeyframe();
|
||||
void OnEncodedFrameReleased();
|
||||
TimeDelta TimeUntilTimeout() const;
|
||||
|
||||
void SetTimeouts(Timeouts timeouts);
|
||||
|
||||
private:
|
||||
TimeDelta TimeoutForNextFrame() const RTC_RUN_ON(bookkeeping_queue_) {
|
||||
return waiting_for_keyframe_ ? timeouts_.max_wait_for_keyframe
|
||||
: timeouts_.max_wait_for_frame;
|
||||
}
|
||||
TimeDelta HandleTimeoutTask();
|
||||
|
||||
Clock* const clock_;
|
||||
TaskQueueBase* const bookkeeping_queue_;
|
||||
Timeouts timeouts_ RTC_GUARDED_BY(bookkeeping_queue_);
|
||||
const TimeoutCallback timeout_cb_;
|
||||
RepeatingTaskHandle timeout_task_;
|
||||
|
||||
Timestamp last_frame_ = Timestamp::MinusInfinity();
|
||||
Timestamp timeout_ = Timestamp::MinusInfinity();
|
||||
bool waiting_for_keyframe_;
|
||||
};
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // VIDEO_VIDEO_RECEIVE_STREAM_TIMEOUT_TRACKER_H_
|
||||
945
TMessagesProj/jni/voip/webrtc/video/video_send_stream_impl.cc
Normal file
945
TMessagesProj/jni/voip/webrtc/video/video_send_stream_impl.cc
Normal file
|
|
@ -0,0 +1,945 @@
|
|||
/*
|
||||
* Copyright 2018 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
#include "video/video_send_stream_impl.h"
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstdint>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/algorithm/container.h"
|
||||
#include "absl/types/optional.h"
|
||||
#include "api/adaptation/resource.h"
|
||||
#include "api/call/bitrate_allocation.h"
|
||||
#include "api/crypto/crypto_options.h"
|
||||
#include "api/fec_controller.h"
|
||||
#include "api/field_trials_view.h"
|
||||
#include "api/metronome/metronome.h"
|
||||
#include "api/rtp_parameters.h"
|
||||
#include "api/rtp_sender_interface.h"
|
||||
#include "api/scoped_refptr.h"
|
||||
#include "api/sequence_checker.h"
|
||||
#include "api/task_queue/pending_task_safety_flag.h"
|
||||
#include "api/task_queue/task_queue_base.h"
|
||||
#include "api/task_queue/task_queue_factory.h"
|
||||
#include "api/units/data_rate.h"
|
||||
#include "api/units/time_delta.h"
|
||||
#include "api/video/encoded_image.h"
|
||||
#include "api/video/video_bitrate_allocation.h"
|
||||
#include "api/video/video_codec_constants.h"
|
||||
#include "api/video/video_codec_type.h"
|
||||
#include "api/video/video_frame.h"
|
||||
#include "api/video/video_frame_type.h"
|
||||
#include "api/video/video_layers_allocation.h"
|
||||
#include "api/video/video_source_interface.h"
|
||||
#include "api/video/video_stream_encoder_settings.h"
|
||||
#include "api/video_codecs/video_codec.h"
|
||||
#include "api/video_codecs/video_encoder.h"
|
||||
#include "api/video_codecs/video_encoder_factory.h"
|
||||
#include "call/bitrate_allocator.h"
|
||||
#include "call/rtp_config.h"
|
||||
#include "call/rtp_transport_controller_send_interface.h"
|
||||
#include "call/video_send_stream.h"
|
||||
#include "modules/pacing/pacing_controller.h"
|
||||
#include "modules/rtp_rtcp/include/rtp_header_extension_map.h"
|
||||
#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
|
||||
#include "modules/rtp_rtcp/source/rtp_header_extension_size.h"
|
||||
#include "modules/rtp_rtcp/source/rtp_sender.h"
|
||||
#include "modules/video_coding/include/video_codec_interface.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/experiments/alr_experiment.h"
|
||||
#include "rtc_base/experiments/field_trial_parser.h"
|
||||
#include "rtc_base/experiments/min_video_bitrate_experiment.h"
|
||||
#include "rtc_base/experiments/rate_control_settings.h"
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/numerics/safe_conversions.h"
|
||||
#include "rtc_base/strings/string_builder.h"
|
||||
#include "rtc_base/task_utils/repeating_task.h"
|
||||
#include "rtc_base/trace_event.h"
|
||||
#include "system_wrappers/include/clock.h"
|
||||
#include "video/adaptation/overuse_frame_detector.h"
|
||||
#include "video/config/video_encoder_config.h"
|
||||
#include "video/encoder_rtcp_feedback.h"
|
||||
#include "video/frame_cadence_adapter.h"
|
||||
#include "video/send_delay_stats.h"
|
||||
#include "video/send_statistics_proxy.h"
|
||||
#include "video/video_stream_encoder.h"
|
||||
#include "video/video_stream_encoder_interface.h"
|
||||
|
||||
namespace webrtc {
|
||||
namespace internal {
|
||||
namespace {
|
||||
|
||||
// Max positive size difference to treat allocations as "similar".
|
||||
static constexpr int kMaxVbaSizeDifferencePercent = 10;
|
||||
// Max time we will throttle similar video bitrate allocations.
|
||||
static constexpr int64_t kMaxVbaThrottleTimeMs = 500;
|
||||
|
||||
constexpr TimeDelta kEncoderTimeOut = TimeDelta::Seconds(2);
|
||||
|
||||
constexpr double kVideoHysteresis = 1.2;
|
||||
constexpr double kScreenshareHysteresis = 1.35;
|
||||
|
||||
constexpr int kMinDefaultAv1BitrateBps =
|
||||
15000; // This value acts as an absolute minimum AV1 bitrate limit.
|
||||
|
||||
// When send-side BWE is used a stricter 1.1x pacing factor is used, rather than
|
||||
// the 2.5x which is used with receive-side BWE. Provides a more careful
|
||||
// bandwidth rampup with less risk of overshoots causing adverse effects like
|
||||
// packet loss. Not used for receive side BWE, since there we lack the probing
|
||||
// feature and so may result in too slow initial rampup.
|
||||
static constexpr double kStrictPacingMultiplier = 1.1;
|
||||
|
||||
bool TransportSeqNumExtensionConfigured(const VideoSendStream::Config& config) {
|
||||
const std::vector<RtpExtension>& extensions = config.rtp.extensions;
|
||||
return absl::c_any_of(extensions, [](const RtpExtension& ext) {
|
||||
return ext.uri == RtpExtension::kTransportSequenceNumberUri;
|
||||
});
|
||||
}
|
||||
|
||||
// Calculate max padding bitrate for a multi layer codec.
|
||||
int CalculateMaxPadBitrateBps(const std::vector<VideoStream>& streams,
|
||||
bool is_svc,
|
||||
VideoEncoderConfig::ContentType content_type,
|
||||
int min_transmit_bitrate_bps,
|
||||
bool pad_to_min_bitrate,
|
||||
bool alr_probing) {
|
||||
int pad_up_to_bitrate_bps = 0;
|
||||
|
||||
RTC_DCHECK(!is_svc || streams.size() <= 1) << "Only one stream is allowed in "
|
||||
"SVC mode.";
|
||||
|
||||
// Filter out only the active streams;
|
||||
std::vector<VideoStream> active_streams;
|
||||
for (const VideoStream& stream : streams) {
|
||||
if (stream.active)
|
||||
active_streams.emplace_back(stream);
|
||||
}
|
||||
|
||||
if (active_streams.size() > 1 || (!active_streams.empty() && is_svc)) {
|
||||
// Simulcast or SVC is used.
|
||||
// if SVC is used, stream bitrates should already encode svc bitrates:
|
||||
// min_bitrate = min bitrate of a lowest svc layer.
|
||||
// target_bitrate = sum of target bitrates of lower layers + min bitrate
|
||||
// of the last one (as used in the calculations below).
|
||||
// max_bitrate = sum of all active layers' max_bitrate.
|
||||
if (alr_probing) {
|
||||
// With alr probing, just pad to the min bitrate of the lowest stream,
|
||||
// probing will handle the rest of the rampup.
|
||||
pad_up_to_bitrate_bps = active_streams[0].min_bitrate_bps;
|
||||
} else {
|
||||
// Without alr probing, pad up to start bitrate of the
|
||||
// highest active stream.
|
||||
const double hysteresis_factor =
|
||||
content_type == VideoEncoderConfig::ContentType::kScreen
|
||||
? kScreenshareHysteresis
|
||||
: kVideoHysteresis;
|
||||
if (is_svc) {
|
||||
// For SVC, since there is only one "stream", the padding bitrate
|
||||
// needed to enable the top spatial layer is stored in the
|
||||
// `target_bitrate_bps` field.
|
||||
// TODO(sprang): This behavior needs to die.
|
||||
pad_up_to_bitrate_bps = static_cast<int>(
|
||||
hysteresis_factor * active_streams[0].target_bitrate_bps + 0.5);
|
||||
} else {
|
||||
const size_t top_active_stream_idx = active_streams.size() - 1;
|
||||
pad_up_to_bitrate_bps = std::min(
|
||||
static_cast<int>(
|
||||
hysteresis_factor *
|
||||
active_streams[top_active_stream_idx].min_bitrate_bps +
|
||||
0.5),
|
||||
active_streams[top_active_stream_idx].target_bitrate_bps);
|
||||
|
||||
// Add target_bitrate_bps of the lower active streams.
|
||||
for (size_t i = 0; i < top_active_stream_idx; ++i) {
|
||||
pad_up_to_bitrate_bps += active_streams[i].target_bitrate_bps;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (!active_streams.empty() && pad_to_min_bitrate) {
|
||||
pad_up_to_bitrate_bps = active_streams[0].min_bitrate_bps;
|
||||
}
|
||||
|
||||
pad_up_to_bitrate_bps =
|
||||
std::max(pad_up_to_bitrate_bps, min_transmit_bitrate_bps);
|
||||
|
||||
return pad_up_to_bitrate_bps;
|
||||
}
|
||||
|
||||
absl::optional<AlrExperimentSettings> GetAlrSettings(
|
||||
const FieldTrialsView& field_trials,
|
||||
VideoEncoderConfig::ContentType content_type) {
|
||||
if (content_type == VideoEncoderConfig::ContentType::kScreen) {
|
||||
return AlrExperimentSettings::CreateFromFieldTrial(
|
||||
field_trials,
|
||||
AlrExperimentSettings::kScreenshareProbingBweExperimentName);
|
||||
}
|
||||
return AlrExperimentSettings::CreateFromFieldTrial(
|
||||
field_trials,
|
||||
AlrExperimentSettings::kStrictPacingAndProbingExperimentName);
|
||||
}
|
||||
|
||||
bool SameStreamsEnabled(const VideoBitrateAllocation& lhs,
|
||||
const VideoBitrateAllocation& rhs) {
|
||||
for (size_t si = 0; si < kMaxSpatialLayers; ++si) {
|
||||
for (size_t ti = 0; ti < kMaxTemporalStreams; ++ti) {
|
||||
if (lhs.HasBitrate(si, ti) != rhs.HasBitrate(si, ti)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// Returns an optional that has value iff TransportSeqNumExtensionConfigured
|
||||
// is `true` for the given video send stream config.
|
||||
absl::optional<float> GetConfiguredPacingFactor(
|
||||
const VideoSendStream::Config& config,
|
||||
VideoEncoderConfig::ContentType content_type,
|
||||
const PacingConfig& default_pacing_config,
|
||||
const FieldTrialsView& field_trials) {
|
||||
if (!TransportSeqNumExtensionConfigured(config))
|
||||
return absl::nullopt;
|
||||
|
||||
absl::optional<AlrExperimentSettings> alr_settings =
|
||||
GetAlrSettings(field_trials, content_type);
|
||||
if (alr_settings)
|
||||
return alr_settings->pacing_factor;
|
||||
|
||||
RateControlSettings rate_control_settings =
|
||||
RateControlSettings::ParseFromKeyValueConfig(&field_trials);
|
||||
return rate_control_settings.GetPacingFactor().value_or(
|
||||
default_pacing_config.pacing_factor);
|
||||
}
|
||||
|
||||
int GetEncoderPriorityBitrate(std::string codec_name,
|
||||
const FieldTrialsView& field_trials) {
|
||||
int priority_bitrate = 0;
|
||||
if (PayloadStringToCodecType(codec_name) == VideoCodecType::kVideoCodecAV1) {
|
||||
webrtc::FieldTrialParameter<int> av1_priority_bitrate("bitrate", 0);
|
||||
webrtc::ParseFieldTrial(
|
||||
{&av1_priority_bitrate},
|
||||
field_trials.Lookup("WebRTC-AV1-OverridePriorityBitrate"));
|
||||
priority_bitrate = av1_priority_bitrate;
|
||||
}
|
||||
return priority_bitrate;
|
||||
}
|
||||
|
||||
uint32_t GetInitialEncoderMaxBitrate(int initial_encoder_max_bitrate) {
|
||||
if (initial_encoder_max_bitrate > 0)
|
||||
return rtc::dchecked_cast<uint32_t>(initial_encoder_max_bitrate);
|
||||
|
||||
// TODO(srte): Make sure max bitrate is not set to negative values. We don't
|
||||
// have any way to handle unset values in downstream code, such as the
|
||||
// bitrate allocator. Previously -1 was implicitly casted to UINT32_MAX, a
|
||||
// behaviour that is not safe. Converting to 10 Mbps should be safe for
|
||||
// reasonable use cases as it allows adding the max of multiple streams
|
||||
// without wrappping around.
|
||||
const int kFallbackMaxBitrateBps = 10000000;
|
||||
RTC_DLOG(LS_ERROR) << "ERROR: Initial encoder max bitrate = "
|
||||
<< initial_encoder_max_bitrate << " which is <= 0!";
|
||||
RTC_DLOG(LS_INFO) << "Using default encoder max bitrate = 10 Mbps";
|
||||
return kFallbackMaxBitrateBps;
|
||||
}
|
||||
|
||||
int GetDefaultMinVideoBitrateBps(VideoCodecType codec_type) {
|
||||
if (codec_type == VideoCodecType::kVideoCodecAV1) {
|
||||
return kMinDefaultAv1BitrateBps;
|
||||
}
|
||||
return kDefaultMinVideoBitrateBps;
|
||||
}
|
||||
|
||||
size_t CalculateMaxHeaderSize(const RtpConfig& config) {
|
||||
size_t header_size = kRtpHeaderSize;
|
||||
size_t extensions_size = 0;
|
||||
size_t fec_extensions_size = 0;
|
||||
if (!config.extensions.empty()) {
|
||||
RtpHeaderExtensionMap extensions_map(config.extensions);
|
||||
extensions_size = RtpHeaderExtensionSize(RTPSender::VideoExtensionSizes(),
|
||||
extensions_map);
|
||||
fec_extensions_size =
|
||||
RtpHeaderExtensionSize(RTPSender::FecExtensionSizes(), extensions_map);
|
||||
}
|
||||
header_size += extensions_size;
|
||||
if (config.flexfec.payload_type >= 0) {
|
||||
// All FEC extensions again plus maximum FlexFec overhead.
|
||||
header_size += fec_extensions_size + 32;
|
||||
} else {
|
||||
if (config.ulpfec.ulpfec_payload_type >= 0) {
|
||||
// Header with all the FEC extensions will be repeated plus maximum
|
||||
// UlpFec overhead.
|
||||
header_size += fec_extensions_size + 18;
|
||||
}
|
||||
if (config.ulpfec.red_payload_type >= 0) {
|
||||
header_size += 1; // RED header.
|
||||
}
|
||||
}
|
||||
// Additional room for Rtx.
|
||||
if (config.rtx.payload_type >= 0)
|
||||
header_size += kRtxHeaderSize;
|
||||
return header_size;
|
||||
}
|
||||
|
||||
VideoStreamEncoder::BitrateAllocationCallbackType
|
||||
GetBitrateAllocationCallbackType(const VideoSendStream::Config& config,
|
||||
const FieldTrialsView& field_trials) {
|
||||
if (webrtc::RtpExtension::FindHeaderExtensionByUri(
|
||||
config.rtp.extensions,
|
||||
webrtc::RtpExtension::kVideoLayersAllocationUri,
|
||||
config.crypto_options.srtp.enable_encrypted_rtp_header_extensions
|
||||
? RtpExtension::Filter::kPreferEncryptedExtension
|
||||
: RtpExtension::Filter::kDiscardEncryptedExtension)) {
|
||||
return VideoStreamEncoder::BitrateAllocationCallbackType::
|
||||
kVideoLayersAllocation;
|
||||
}
|
||||
if (field_trials.IsEnabled("WebRTC-Target-Bitrate-Rtcp")) {
|
||||
return VideoStreamEncoder::BitrateAllocationCallbackType::
|
||||
kVideoBitrateAllocation;
|
||||
}
|
||||
return VideoStreamEncoder::BitrateAllocationCallbackType::
|
||||
kVideoBitrateAllocationWhenScreenSharing;
|
||||
}
|
||||
|
||||
RtpSenderFrameEncryptionConfig CreateFrameEncryptionConfig(
|
||||
const VideoSendStream::Config* config) {
|
||||
RtpSenderFrameEncryptionConfig frame_encryption_config;
|
||||
frame_encryption_config.frame_encryptor = config->frame_encryptor.get();
|
||||
frame_encryption_config.crypto_options = config->crypto_options;
|
||||
return frame_encryption_config;
|
||||
}
|
||||
|
||||
RtpSenderObservers CreateObservers(RtcpRttStats* call_stats,
|
||||
EncoderRtcpFeedback* encoder_feedback,
|
||||
SendStatisticsProxy* stats_proxy,
|
||||
SendPacketObserver* send_packet_observer) {
|
||||
RtpSenderObservers observers;
|
||||
observers.rtcp_rtt_stats = call_stats;
|
||||
observers.intra_frame_callback = encoder_feedback;
|
||||
observers.rtcp_loss_notification_observer = encoder_feedback;
|
||||
observers.report_block_data_observer = stats_proxy;
|
||||
observers.rtp_stats = stats_proxy;
|
||||
observers.bitrate_observer = stats_proxy;
|
||||
observers.frame_count_observer = stats_proxy;
|
||||
observers.rtcp_type_observer = stats_proxy;
|
||||
observers.send_packet_observer = send_packet_observer;
|
||||
return observers;
|
||||
}
|
||||
|
||||
std::unique_ptr<VideoStreamEncoderInterface> CreateVideoStreamEncoder(
|
||||
Clock* clock,
|
||||
int num_cpu_cores,
|
||||
TaskQueueFactory* task_queue_factory,
|
||||
SendStatisticsProxy* stats_proxy,
|
||||
const VideoStreamEncoderSettings& encoder_settings,
|
||||
VideoStreamEncoder::BitrateAllocationCallbackType
|
||||
bitrate_allocation_callback_type,
|
||||
const FieldTrialsView& field_trials,
|
||||
Metronome* metronome,
|
||||
webrtc::VideoEncoderFactory::EncoderSelectorInterface* encoder_selector) {
|
||||
std::unique_ptr<TaskQueueBase, TaskQueueDeleter> encoder_queue =
|
||||
task_queue_factory->CreateTaskQueue("EncoderQueue",
|
||||
TaskQueueFactory::Priority::NORMAL);
|
||||
TaskQueueBase* encoder_queue_ptr = encoder_queue.get();
|
||||
return std::make_unique<VideoStreamEncoder>(
|
||||
clock, num_cpu_cores, stats_proxy, encoder_settings,
|
||||
std::make_unique<OveruseFrameDetector>(stats_proxy),
|
||||
FrameCadenceAdapterInterface::Create(
|
||||
clock, encoder_queue_ptr, metronome,
|
||||
/*worker_queue=*/TaskQueueBase::Current(), field_trials),
|
||||
std::move(encoder_queue), bitrate_allocation_callback_type, field_trials,
|
||||
encoder_selector);
|
||||
}
|
||||
|
||||
bool HasActiveEncodings(const VideoEncoderConfig& config) {
|
||||
for (const VideoStream& stream : config.simulcast_layers) {
|
||||
if (stream.active) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
PacingConfig::PacingConfig(const FieldTrialsView& field_trials)
|
||||
: pacing_factor("factor", kStrictPacingMultiplier),
|
||||
max_pacing_delay("max_delay", PacingController::kMaxExpectedQueueLength) {
|
||||
ParseFieldTrial({&pacing_factor, &max_pacing_delay},
|
||||
field_trials.Lookup("WebRTC-Video-Pacing"));
|
||||
}
|
||||
PacingConfig::PacingConfig(const PacingConfig&) = default;
|
||||
PacingConfig::~PacingConfig() = default;
|
||||
|
||||
VideoSendStreamImpl::VideoSendStreamImpl(
|
||||
Clock* clock,
|
||||
int num_cpu_cores,
|
||||
TaskQueueFactory* task_queue_factory,
|
||||
RtcpRttStats* call_stats,
|
||||
RtpTransportControllerSendInterface* transport,
|
||||
Metronome* metronome,
|
||||
BitrateAllocatorInterface* bitrate_allocator,
|
||||
SendDelayStats* send_delay_stats,
|
||||
RtcEventLog* event_log,
|
||||
VideoSendStream::Config config,
|
||||
VideoEncoderConfig encoder_config,
|
||||
const std::map<uint32_t, RtpState>& suspended_ssrcs,
|
||||
const std::map<uint32_t, RtpPayloadState>& suspended_payload_states,
|
||||
std::unique_ptr<FecController> fec_controller,
|
||||
const FieldTrialsView& field_trials,
|
||||
std::unique_ptr<VideoStreamEncoderInterface> video_stream_encoder_for_test)
|
||||
: transport_(transport),
|
||||
stats_proxy_(clock, config, encoder_config.content_type, field_trials),
|
||||
send_packet_observer_(&stats_proxy_, send_delay_stats),
|
||||
config_(std::move(config)),
|
||||
content_type_(encoder_config.content_type),
|
||||
video_stream_encoder_(
|
||||
video_stream_encoder_for_test
|
||||
? std::move(video_stream_encoder_for_test)
|
||||
: CreateVideoStreamEncoder(
|
||||
clock,
|
||||
num_cpu_cores,
|
||||
task_queue_factory,
|
||||
&stats_proxy_,
|
||||
config_.encoder_settings,
|
||||
GetBitrateAllocationCallbackType(config_, field_trials),
|
||||
field_trials,
|
||||
metronome,
|
||||
config_.encoder_selector)),
|
||||
encoder_feedback_(
|
||||
clock,
|
||||
config_.rtp.ssrcs,
|
||||
video_stream_encoder_.get(),
|
||||
[this](uint32_t ssrc, const std::vector<uint16_t>& seq_nums) {
|
||||
return rtp_video_sender_->GetSentRtpPacketInfos(ssrc, seq_nums);
|
||||
}),
|
||||
rtp_video_sender_(transport->CreateRtpVideoSender(
|
||||
suspended_ssrcs,
|
||||
suspended_payload_states,
|
||||
config_.rtp,
|
||||
config_.rtcp_report_interval_ms,
|
||||
config_.send_transport,
|
||||
CreateObservers(call_stats,
|
||||
&encoder_feedback_,
|
||||
&stats_proxy_,
|
||||
&send_packet_observer_),
|
||||
event_log,
|
||||
std::move(fec_controller),
|
||||
CreateFrameEncryptionConfig(&config_),
|
||||
config_.frame_transformer)),
|
||||
clock_(clock),
|
||||
has_alr_probing_(
|
||||
config_.periodic_alr_bandwidth_probing ||
|
||||
GetAlrSettings(field_trials, encoder_config.content_type)),
|
||||
pacing_config_(PacingConfig(field_trials)),
|
||||
worker_queue_(TaskQueueBase::Current()),
|
||||
timed_out_(false),
|
||||
|
||||
bitrate_allocator_(bitrate_allocator),
|
||||
has_active_encodings_(HasActiveEncodings(encoder_config)),
|
||||
disable_padding_(true),
|
||||
max_padding_bitrate_(0),
|
||||
encoder_min_bitrate_bps_(0),
|
||||
encoder_max_bitrate_bps_(
|
||||
GetInitialEncoderMaxBitrate(encoder_config.max_bitrate_bps)),
|
||||
encoder_target_rate_bps_(0),
|
||||
encoder_bitrate_priority_(encoder_config.bitrate_priority),
|
||||
encoder_av1_priority_bitrate_override_bps_(
|
||||
GetEncoderPriorityBitrate(config_.rtp.payload_name, field_trials)),
|
||||
configured_pacing_factor_(GetConfiguredPacingFactor(config_,
|
||||
content_type_,
|
||||
pacing_config_,
|
||||
field_trials)) {
|
||||
RTC_DCHECK_GE(config_.rtp.payload_type, 0);
|
||||
RTC_DCHECK_LE(config_.rtp.payload_type, 127);
|
||||
RTC_DCHECK(!config_.rtp.ssrcs.empty());
|
||||
RTC_DCHECK(transport_);
|
||||
RTC_DCHECK_NE(encoder_max_bitrate_bps_, 0);
|
||||
RTC_LOG(LS_INFO) << "VideoSendStreamImpl: " << config_.ToString();
|
||||
|
||||
RTC_CHECK(AlrExperimentSettings::MaxOneFieldTrialEnabled(field_trials));
|
||||
|
||||
absl::optional<bool> enable_alr_bw_probing;
|
||||
|
||||
// If send-side BWE is enabled, check if we should apply updated probing and
|
||||
// pacing settings.
|
||||
if (configured_pacing_factor_) {
|
||||
absl::optional<AlrExperimentSettings> alr_settings =
|
||||
GetAlrSettings(field_trials, content_type_);
|
||||
int queue_time_limit_ms;
|
||||
if (alr_settings) {
|
||||
enable_alr_bw_probing = true;
|
||||
queue_time_limit_ms = alr_settings->max_paced_queue_time;
|
||||
} else {
|
||||
RateControlSettings rate_control_settings =
|
||||
RateControlSettings::ParseFromKeyValueConfig(&field_trials);
|
||||
enable_alr_bw_probing = rate_control_settings.UseAlrProbing();
|
||||
queue_time_limit_ms = pacing_config_.max_pacing_delay.Get().ms();
|
||||
}
|
||||
|
||||
transport_->SetQueueTimeLimit(queue_time_limit_ms);
|
||||
}
|
||||
|
||||
if (config_.periodic_alr_bandwidth_probing) {
|
||||
enable_alr_bw_probing = config_.periodic_alr_bandwidth_probing;
|
||||
}
|
||||
|
||||
if (enable_alr_bw_probing) {
|
||||
transport->EnablePeriodicAlrProbing(*enable_alr_bw_probing);
|
||||
}
|
||||
|
||||
if (configured_pacing_factor_)
|
||||
transport_->SetPacingFactor(*configured_pacing_factor_);
|
||||
|
||||
// Only request rotation at the source when we positively know that the remote
|
||||
// side doesn't support the rotation extension. This allows us to prepare the
|
||||
// encoder in the expectation that rotation is supported - which is the common
|
||||
// case.
|
||||
bool rotation_applied = absl::c_none_of(
|
||||
config_.rtp.extensions, [](const RtpExtension& extension) {
|
||||
return extension.uri == RtpExtension::kVideoRotationUri;
|
||||
});
|
||||
|
||||
video_stream_encoder_->SetSink(this, rotation_applied);
|
||||
video_stream_encoder_->SetStartBitrate(
|
||||
bitrate_allocator_->GetStartBitrate(this));
|
||||
video_stream_encoder_->SetFecControllerOverride(rtp_video_sender_);
|
||||
ReconfigureVideoEncoder(std::move(encoder_config));
|
||||
}
|
||||
|
||||
VideoSendStreamImpl::~VideoSendStreamImpl() {
|
||||
RTC_DCHECK_RUN_ON(&thread_checker_);
|
||||
RTC_LOG(LS_INFO) << "~VideoSendStreamImpl: " << config_.ToString();
|
||||
RTC_DCHECK(!started());
|
||||
RTC_DCHECK(!IsRunning());
|
||||
transport_->DestroyRtpVideoSender(rtp_video_sender_);
|
||||
}
|
||||
|
||||
void VideoSendStreamImpl::AddAdaptationResource(
|
||||
rtc::scoped_refptr<Resource> resource) {
|
||||
RTC_DCHECK_RUN_ON(&thread_checker_);
|
||||
video_stream_encoder_->AddAdaptationResource(resource);
|
||||
}
|
||||
|
||||
std::vector<rtc::scoped_refptr<Resource>>
|
||||
VideoSendStreamImpl::GetAdaptationResources() {
|
||||
RTC_DCHECK_RUN_ON(&thread_checker_);
|
||||
return video_stream_encoder_->GetAdaptationResources();
|
||||
}
|
||||
|
||||
void VideoSendStreamImpl::SetSource(
|
||||
rtc::VideoSourceInterface<webrtc::VideoFrame>* source,
|
||||
const DegradationPreference& degradation_preference) {
|
||||
RTC_DCHECK_RUN_ON(&thread_checker_);
|
||||
video_stream_encoder_->SetSource(source, degradation_preference);
|
||||
}
|
||||
|
||||
void VideoSendStreamImpl::ReconfigureVideoEncoder(VideoEncoderConfig config) {
|
||||
ReconfigureVideoEncoder(std::move(config), nullptr);
|
||||
}
|
||||
|
||||
void VideoSendStreamImpl::ReconfigureVideoEncoder(
|
||||
VideoEncoderConfig config,
|
||||
SetParametersCallback callback) {
|
||||
RTC_DCHECK_RUN_ON(&thread_checker_);
|
||||
RTC_DCHECK_EQ(content_type_, config.content_type);
|
||||
RTC_LOG(LS_VERBOSE) << "Encoder config: " << config.ToString()
|
||||
<< " VideoSendStream config: " << config_.ToString();
|
||||
|
||||
has_active_encodings_ = HasActiveEncodings(config);
|
||||
if (has_active_encodings_ && rtp_video_sender_->IsActive() && !IsRunning()) {
|
||||
StartupVideoSendStream();
|
||||
} else if (!has_active_encodings_ && IsRunning()) {
|
||||
StopVideoSendStream();
|
||||
}
|
||||
video_stream_encoder_->ConfigureEncoder(
|
||||
std::move(config),
|
||||
config_.rtp.max_packet_size - CalculateMaxHeaderSize(config_.rtp),
|
||||
std::move(callback));
|
||||
}
|
||||
|
||||
VideoSendStream::Stats VideoSendStreamImpl::GetStats() {
|
||||
RTC_DCHECK_RUN_ON(&thread_checker_);
|
||||
return stats_proxy_.GetStats();
|
||||
}
|
||||
|
||||
absl::optional<float> VideoSendStreamImpl::GetPacingFactorOverride() const {
|
||||
return configured_pacing_factor_;
|
||||
}
|
||||
|
||||
void VideoSendStreamImpl::StopPermanentlyAndGetRtpStates(
|
||||
VideoSendStreamImpl::RtpStateMap* rtp_state_map,
|
||||
VideoSendStreamImpl::RtpPayloadStateMap* payload_state_map) {
|
||||
RTC_DCHECK_RUN_ON(&thread_checker_);
|
||||
video_stream_encoder_->Stop();
|
||||
|
||||
running_ = false;
|
||||
// Always run these cleanup steps regardless of whether running_ was set
|
||||
// or not. This will unregister callbacks before destruction.
|
||||
// See `VideoSendStreamImpl::StopVideoSendStream` for more.
|
||||
Stop();
|
||||
*rtp_state_map = GetRtpStates();
|
||||
*payload_state_map = GetRtpPayloadStates();
|
||||
}
|
||||
|
||||
void VideoSendStreamImpl::GenerateKeyFrame(
|
||||
const std::vector<std::string>& rids) {
|
||||
RTC_DCHECK_RUN_ON(&thread_checker_);
|
||||
// Map rids to layers. If rids is empty, generate a keyframe for all layers.
|
||||
std::vector<VideoFrameType> next_frames(config_.rtp.ssrcs.size(),
|
||||
VideoFrameType::kVideoFrameKey);
|
||||
if (!config_.rtp.rids.empty() && !rids.empty()) {
|
||||
std::fill(next_frames.begin(), next_frames.end(),
|
||||
VideoFrameType::kVideoFrameDelta);
|
||||
for (const auto& rid : rids) {
|
||||
for (size_t i = 0; i < config_.rtp.rids.size(); i++) {
|
||||
if (config_.rtp.rids[i] == rid) {
|
||||
next_frames[i] = VideoFrameType::kVideoFrameKey;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (video_stream_encoder_) {
|
||||
video_stream_encoder_->SendKeyFrame(next_frames);
|
||||
}
|
||||
}
|
||||
|
||||
void VideoSendStreamImpl::DeliverRtcp(const uint8_t* packet, size_t length) {
|
||||
RTC_DCHECK_RUN_ON(&thread_checker_);
|
||||
rtp_video_sender_->DeliverRtcp(packet, length);
|
||||
}
|
||||
|
||||
bool VideoSendStreamImpl::started() {
|
||||
RTC_DCHECK_RUN_ON(&thread_checker_);
|
||||
return rtp_video_sender_->IsActive();
|
||||
}
|
||||
|
||||
void VideoSendStreamImpl::Start() {
|
||||
RTC_DCHECK_RUN_ON(&thread_checker_);
|
||||
// This sender is allowed to send RTP packets. Start monitoring and allocating
|
||||
// a rate if there is also active encodings. (has_active_encodings_).
|
||||
rtp_video_sender_->SetSending(true);
|
||||
if (!IsRunning() && has_active_encodings_) {
|
||||
StartupVideoSendStream();
|
||||
}
|
||||
}
|
||||
|
||||
bool VideoSendStreamImpl::IsRunning() const {
|
||||
RTC_DCHECK_RUN_ON(&thread_checker_);
|
||||
return check_encoder_activity_task_.Running();
|
||||
}
|
||||
|
||||
void VideoSendStreamImpl::StartupVideoSendStream() {
|
||||
RTC_DCHECK_RUN_ON(&thread_checker_);
|
||||
RTC_DCHECK(rtp_video_sender_->IsActive());
|
||||
RTC_DCHECK(has_active_encodings_);
|
||||
|
||||
bitrate_allocator_->AddObserver(this, GetAllocationConfig());
|
||||
// Start monitoring encoder activity.
|
||||
{
|
||||
RTC_DCHECK(!check_encoder_activity_task_.Running());
|
||||
|
||||
activity_ = false;
|
||||
timed_out_ = false;
|
||||
check_encoder_activity_task_ = RepeatingTaskHandle::DelayedStart(
|
||||
worker_queue_, kEncoderTimeOut, [this] {
|
||||
RTC_DCHECK_RUN_ON(&thread_checker_);
|
||||
if (!activity_) {
|
||||
if (!timed_out_) {
|
||||
SignalEncoderTimedOut();
|
||||
}
|
||||
timed_out_ = true;
|
||||
disable_padding_ = true;
|
||||
} else if (timed_out_) {
|
||||
SignalEncoderActive();
|
||||
timed_out_ = false;
|
||||
}
|
||||
activity_ = false;
|
||||
return kEncoderTimeOut;
|
||||
});
|
||||
}
|
||||
|
||||
video_stream_encoder_->SendKeyFrame();
|
||||
}
|
||||
|
||||
void VideoSendStreamImpl::Stop() {
|
||||
RTC_DCHECK_RUN_ON(&thread_checker_);
|
||||
RTC_LOG(LS_INFO) << "VideoSendStreamImpl::Stop";
|
||||
if (!rtp_video_sender_->IsActive())
|
||||
return;
|
||||
|
||||
TRACE_EVENT_INSTANT0("webrtc", "VideoSendStream::Stop");
|
||||
rtp_video_sender_->SetSending(false);
|
||||
if (IsRunning()) {
|
||||
StopVideoSendStream();
|
||||
}
|
||||
}
|
||||
|
||||
void VideoSendStreamImpl::StopVideoSendStream() {
|
||||
RTC_DCHECK_RUN_ON(&thread_checker_);
|
||||
bitrate_allocator_->RemoveObserver(this);
|
||||
check_encoder_activity_task_.Stop();
|
||||
video_stream_encoder_->OnBitrateUpdated(DataRate::Zero(), DataRate::Zero(),
|
||||
DataRate::Zero(), 0, 0, 0);
|
||||
stats_proxy_.OnSetEncoderTargetRate(0);
|
||||
}
|
||||
|
||||
void VideoSendStreamImpl::SignalEncoderTimedOut() {
|
||||
RTC_DCHECK_RUN_ON(&thread_checker_);
|
||||
// If the encoder has not produced anything the last kEncoderTimeOut and it
|
||||
// is supposed to, deregister as BitrateAllocatorObserver. This can happen
|
||||
// if a camera stops producing frames.
|
||||
if (encoder_target_rate_bps_ > 0) {
|
||||
RTC_LOG(LS_INFO) << "SignalEncoderTimedOut, Encoder timed out.";
|
||||
bitrate_allocator_->RemoveObserver(this);
|
||||
}
|
||||
}
|
||||
|
||||
void VideoSendStreamImpl::OnBitrateAllocationUpdated(
|
||||
const VideoBitrateAllocation& allocation) {
|
||||
// OnBitrateAllocationUpdated is invoked from the encoder task queue or
|
||||
// the worker_queue_.
|
||||
auto task = [this, allocation] {
|
||||
RTC_DCHECK_RUN_ON(&thread_checker_);
|
||||
if (encoder_target_rate_bps_ == 0) {
|
||||
return;
|
||||
}
|
||||
int64_t now_ms = clock_->TimeInMilliseconds();
|
||||
if (video_bitrate_allocation_context_) {
|
||||
// If new allocation is within kMaxVbaSizeDifferencePercent larger
|
||||
// than the previously sent allocation and the same streams are still
|
||||
// enabled, it is considered "similar". We do not want send similar
|
||||
// allocations more once per kMaxVbaThrottleTimeMs.
|
||||
const VideoBitrateAllocation& last =
|
||||
video_bitrate_allocation_context_->last_sent_allocation;
|
||||
const bool is_similar =
|
||||
allocation.get_sum_bps() >= last.get_sum_bps() &&
|
||||
allocation.get_sum_bps() <
|
||||
(last.get_sum_bps() * (100 + kMaxVbaSizeDifferencePercent)) /
|
||||
100 &&
|
||||
SameStreamsEnabled(allocation, last);
|
||||
if (is_similar &&
|
||||
(now_ms - video_bitrate_allocation_context_->last_send_time_ms) <
|
||||
kMaxVbaThrottleTimeMs) {
|
||||
// This allocation is too similar, cache it and return.
|
||||
video_bitrate_allocation_context_->throttled_allocation = allocation;
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
video_bitrate_allocation_context_.emplace();
|
||||
}
|
||||
|
||||
video_bitrate_allocation_context_->last_sent_allocation = allocation;
|
||||
video_bitrate_allocation_context_->throttled_allocation.reset();
|
||||
video_bitrate_allocation_context_->last_send_time_ms = now_ms;
|
||||
|
||||
// Send bitrate allocation metadata only if encoder is not paused.
|
||||
rtp_video_sender_->OnBitrateAllocationUpdated(allocation);
|
||||
};
|
||||
if (!worker_queue_->IsCurrent()) {
|
||||
worker_queue_->PostTask(
|
||||
SafeTask(worker_queue_safety_.flag(), std::move(task)));
|
||||
} else {
|
||||
task();
|
||||
}
|
||||
}
|
||||
|
||||
void VideoSendStreamImpl::OnVideoLayersAllocationUpdated(
|
||||
VideoLayersAllocation allocation) {
|
||||
// OnVideoLayersAllocationUpdated is handled on the encoder task queue in
|
||||
// order to not race with OnEncodedImage callbacks.
|
||||
rtp_video_sender_->OnVideoLayersAllocationUpdated(allocation);
|
||||
}
|
||||
|
||||
void VideoSendStreamImpl::SignalEncoderActive() {
|
||||
RTC_DCHECK_RUN_ON(&thread_checker_);
|
||||
if (IsRunning()) {
|
||||
RTC_LOG(LS_INFO) << "SignalEncoderActive, Encoder is active.";
|
||||
bitrate_allocator_->AddObserver(this, GetAllocationConfig());
|
||||
}
|
||||
}
|
||||
|
||||
MediaStreamAllocationConfig VideoSendStreamImpl::GetAllocationConfig() const {
|
||||
return MediaStreamAllocationConfig{
|
||||
static_cast<uint32_t>(encoder_min_bitrate_bps_),
|
||||
encoder_max_bitrate_bps_,
|
||||
static_cast<uint32_t>(disable_padding_ ? 0 : max_padding_bitrate_),
|
||||
encoder_av1_priority_bitrate_override_bps_,
|
||||
!config_.suspend_below_min_bitrate,
|
||||
encoder_bitrate_priority_};
|
||||
}
|
||||
|
||||
void VideoSendStreamImpl::OnEncoderConfigurationChanged(
|
||||
std::vector<VideoStream> streams,
|
||||
bool is_svc,
|
||||
VideoEncoderConfig::ContentType content_type,
|
||||
int min_transmit_bitrate_bps) {
|
||||
// Currently called on the encoder TQ
|
||||
RTC_DCHECK(!worker_queue_->IsCurrent());
|
||||
auto closure = [this, streams = std::move(streams), is_svc, content_type,
|
||||
min_transmit_bitrate_bps]() mutable {
|
||||
RTC_DCHECK_GE(config_.rtp.ssrcs.size(), streams.size());
|
||||
TRACE_EVENT0("webrtc", "VideoSendStream::OnEncoderConfigurationChanged");
|
||||
RTC_DCHECK_RUN_ON(&thread_checker_);
|
||||
|
||||
const VideoCodecType codec_type =
|
||||
PayloadStringToCodecType(config_.rtp.payload_name);
|
||||
|
||||
const absl::optional<DataRate> experimental_min_bitrate =
|
||||
GetExperimentalMinVideoBitrate(codec_type);
|
||||
encoder_min_bitrate_bps_ =
|
||||
experimental_min_bitrate
|
||||
? experimental_min_bitrate->bps()
|
||||
: std::max(streams[0].min_bitrate_bps,
|
||||
GetDefaultMinVideoBitrateBps(codec_type));
|
||||
|
||||
encoder_max_bitrate_bps_ = 0;
|
||||
double stream_bitrate_priority_sum = 0;
|
||||
for (const auto& stream : streams) {
|
||||
// We don't want to allocate more bitrate than needed to inactive streams.
|
||||
if (stream.active) {
|
||||
encoder_max_bitrate_bps_ += stream.max_bitrate_bps;
|
||||
}
|
||||
if (stream.bitrate_priority) {
|
||||
RTC_DCHECK_GT(*stream.bitrate_priority, 0);
|
||||
stream_bitrate_priority_sum += *stream.bitrate_priority;
|
||||
}
|
||||
}
|
||||
RTC_DCHECK_GT(stream_bitrate_priority_sum, 0);
|
||||
encoder_bitrate_priority_ = stream_bitrate_priority_sum;
|
||||
encoder_max_bitrate_bps_ =
|
||||
std::max(static_cast<uint32_t>(encoder_min_bitrate_bps_),
|
||||
encoder_max_bitrate_bps_);
|
||||
|
||||
// TODO(bugs.webrtc.org/10266): Query the VideoBitrateAllocator instead.
|
||||
max_padding_bitrate_ = CalculateMaxPadBitrateBps(
|
||||
streams, is_svc, content_type, min_transmit_bitrate_bps,
|
||||
config_.suspend_below_min_bitrate, has_alr_probing_);
|
||||
|
||||
// Clear stats for disabled layers.
|
||||
for (size_t i = streams.size(); i < config_.rtp.ssrcs.size(); ++i) {
|
||||
stats_proxy_.OnInactiveSsrc(config_.rtp.ssrcs[i]);
|
||||
}
|
||||
|
||||
const size_t num_temporal_layers =
|
||||
streams.back().num_temporal_layers.value_or(1);
|
||||
|
||||
rtp_video_sender_->SetEncodingData(streams[0].width, streams[0].height,
|
||||
num_temporal_layers);
|
||||
|
||||
if (IsRunning()) {
|
||||
// The send stream is started already. Update the allocator with new
|
||||
// bitrate limits.
|
||||
bitrate_allocator_->AddObserver(this, GetAllocationConfig());
|
||||
}
|
||||
};
|
||||
|
||||
worker_queue_->PostTask(
|
||||
SafeTask(worker_queue_safety_.flag(), std::move(closure)));
|
||||
}
|
||||
|
||||
EncodedImageCallback::Result VideoSendStreamImpl::OnEncodedImage(
|
||||
const EncodedImage& encoded_image,
|
||||
const CodecSpecificInfo* codec_specific_info) {
|
||||
// Encoded is called on whatever thread the real encoder implementation run
|
||||
// on. In the case of hardware encoders, there might be several encoders
|
||||
// running in parallel on different threads.
|
||||
|
||||
// Indicate that there still is activity going on.
|
||||
activity_ = true;
|
||||
RTC_DCHECK(!worker_queue_->IsCurrent());
|
||||
|
||||
auto task_to_run_on_worker = [this]() {
|
||||
RTC_DCHECK_RUN_ON(&thread_checker_);
|
||||
if (disable_padding_) {
|
||||
disable_padding_ = false;
|
||||
// To ensure that padding bitrate is propagated to the bitrate allocator.
|
||||
SignalEncoderActive();
|
||||
}
|
||||
// Check if there's a throttled VideoBitrateAllocation that we should try
|
||||
// sending.
|
||||
auto& context = video_bitrate_allocation_context_;
|
||||
if (context && context->throttled_allocation) {
|
||||
OnBitrateAllocationUpdated(*context->throttled_allocation);
|
||||
}
|
||||
};
|
||||
worker_queue_->PostTask(
|
||||
SafeTask(worker_queue_safety_.flag(), std::move(task_to_run_on_worker)));
|
||||
|
||||
return rtp_video_sender_->OnEncodedImage(encoded_image, codec_specific_info);
|
||||
}
|
||||
|
||||
void VideoSendStreamImpl::OnDroppedFrame(
|
||||
EncodedImageCallback::DropReason reason) {
|
||||
activity_ = true;
|
||||
}
|
||||
|
||||
std::map<uint32_t, RtpState> VideoSendStreamImpl::GetRtpStates() const {
|
||||
return rtp_video_sender_->GetRtpStates();
|
||||
}
|
||||
|
||||
std::map<uint32_t, RtpPayloadState> VideoSendStreamImpl::GetRtpPayloadStates()
|
||||
const {
|
||||
return rtp_video_sender_->GetRtpPayloadStates();
|
||||
}
|
||||
|
||||
uint32_t VideoSendStreamImpl::OnBitrateUpdated(BitrateAllocationUpdate update) {
|
||||
RTC_DCHECK_RUN_ON(&thread_checker_);
|
||||
RTC_DCHECK(rtp_video_sender_->IsActive())
|
||||
<< "VideoSendStream::Start has not been called.";
|
||||
|
||||
// When the BWE algorithm doesn't pass a stable estimate, we'll use the
|
||||
// unstable one instead.
|
||||
if (update.stable_target_bitrate.IsZero()) {
|
||||
update.stable_target_bitrate = update.target_bitrate;
|
||||
}
|
||||
|
||||
rtp_video_sender_->OnBitrateUpdated(update, stats_proxy_.GetSendFrameRate());
|
||||
encoder_target_rate_bps_ = rtp_video_sender_->GetPayloadBitrateBps();
|
||||
const uint32_t protection_bitrate_bps =
|
||||
rtp_video_sender_->GetProtectionBitrateBps();
|
||||
DataRate link_allocation = DataRate::Zero();
|
||||
if (encoder_target_rate_bps_ > protection_bitrate_bps) {
|
||||
link_allocation =
|
||||
DataRate::BitsPerSec(encoder_target_rate_bps_ - protection_bitrate_bps);
|
||||
}
|
||||
DataRate overhead =
|
||||
update.target_bitrate - DataRate::BitsPerSec(encoder_target_rate_bps_);
|
||||
DataRate encoder_stable_target_rate = update.stable_target_bitrate;
|
||||
if (encoder_stable_target_rate > overhead) {
|
||||
encoder_stable_target_rate = encoder_stable_target_rate - overhead;
|
||||
} else {
|
||||
encoder_stable_target_rate = DataRate::BitsPerSec(encoder_target_rate_bps_);
|
||||
}
|
||||
|
||||
encoder_target_rate_bps_ =
|
||||
std::min(encoder_max_bitrate_bps_, encoder_target_rate_bps_);
|
||||
|
||||
encoder_stable_target_rate =
|
||||
std::min(DataRate::BitsPerSec(encoder_max_bitrate_bps_),
|
||||
encoder_stable_target_rate);
|
||||
|
||||
DataRate encoder_target_rate = DataRate::BitsPerSec(encoder_target_rate_bps_);
|
||||
link_allocation = std::max(encoder_target_rate, link_allocation);
|
||||
video_stream_encoder_->OnBitrateUpdated(
|
||||
encoder_target_rate, encoder_stable_target_rate, link_allocation,
|
||||
rtc::dchecked_cast<uint8_t>(update.packet_loss_ratio * 256),
|
||||
update.round_trip_time.ms(), update.cwnd_reduce_ratio);
|
||||
stats_proxy_.OnSetEncoderTargetRate(encoder_target_rate_bps_);
|
||||
return protection_bitrate_bps;
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace webrtc
|
||||
242
TMessagesProj/jni/voip/webrtc/video/video_send_stream_impl.h
Normal file
242
TMessagesProj/jni/voip/webrtc/video/video_send_stream_impl.h
Normal file
|
|
@ -0,0 +1,242 @@
|
|||
/*
|
||||
* Copyright 2018 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
#ifndef VIDEO_VIDEO_SEND_STREAM_IMPL_H_
|
||||
#define VIDEO_VIDEO_SEND_STREAM_IMPL_H_
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#include <atomic>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "api/field_trials_view.h"
|
||||
#include "api/metronome/metronome.h"
|
||||
#include "api/task_queue/pending_task_safety_flag.h"
|
||||
#include "api/task_queue/task_queue_base.h"
|
||||
#include "api/video/encoded_image.h"
|
||||
#include "api/video/video_bitrate_allocation.h"
|
||||
#include "api/video_codecs/video_encoder.h"
|
||||
#include "call/bitrate_allocator.h"
|
||||
#include "call/rtp_config.h"
|
||||
#include "call/rtp_transport_controller_send_interface.h"
|
||||
#include "call/rtp_video_sender_interface.h"
|
||||
#include "call/video_send_stream.h"
|
||||
#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
|
||||
#include "modules/video_coding/include/video_codec_interface.h"
|
||||
#include "rtc_base/experiments/field_trial_parser.h"
|
||||
#include "rtc_base/system/no_unique_address.h"
|
||||
#include "rtc_base/task_utils/repeating_task.h"
|
||||
#include "rtc_base/thread_annotations.h"
|
||||
#include "video/config/video_encoder_config.h"
|
||||
#include "video/encoder_rtcp_feedback.h"
|
||||
#include "video/send_delay_stats.h"
|
||||
#include "video/send_statistics_proxy.h"
|
||||
#include "video/video_stream_encoder_interface.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace test {
|
||||
class VideoSendStreamPeer;
|
||||
} // namespace test
|
||||
|
||||
namespace internal {
|
||||
|
||||
// Pacing buffer config; overridden by ALR config if provided.
|
||||
struct PacingConfig {
|
||||
explicit PacingConfig(const FieldTrialsView& field_trials);
|
||||
PacingConfig(const PacingConfig&);
|
||||
PacingConfig& operator=(const PacingConfig&) = default;
|
||||
~PacingConfig();
|
||||
FieldTrialParameter<double> pacing_factor;
|
||||
FieldTrialParameter<TimeDelta> max_pacing_delay;
|
||||
};
|
||||
|
||||
// VideoSendStreamImpl implements webrtc::VideoSendStream.
|
||||
// It is created and destroyed on `worker queue`. The intent is to
|
||||
// An encoder may deliver frames through the EncodedImageCallback on an
|
||||
// arbitrary thread.
|
||||
class VideoSendStreamImpl : public webrtc::VideoSendStream,
|
||||
public webrtc::BitrateAllocatorObserver,
|
||||
public VideoStreamEncoderInterface::EncoderSink {
|
||||
public:
|
||||
using RtpStateMap = std::map<uint32_t, RtpState>;
|
||||
using RtpPayloadStateMap = std::map<uint32_t, RtpPayloadState>;
|
||||
|
||||
VideoSendStreamImpl(Clock* clock,
|
||||
int num_cpu_cores,
|
||||
TaskQueueFactory* task_queue_factory,
|
||||
RtcpRttStats* call_stats,
|
||||
RtpTransportControllerSendInterface* transport,
|
||||
Metronome* metronome,
|
||||
BitrateAllocatorInterface* bitrate_allocator,
|
||||
SendDelayStats* send_delay_stats,
|
||||
RtcEventLog* event_log,
|
||||
VideoSendStream::Config config,
|
||||
VideoEncoderConfig encoder_config,
|
||||
const RtpStateMap& suspended_ssrcs,
|
||||
const RtpPayloadStateMap& suspended_payload_states,
|
||||
std::unique_ptr<FecController> fec_controller,
|
||||
const FieldTrialsView& field_trials,
|
||||
std::unique_ptr<VideoStreamEncoderInterface>
|
||||
video_stream_encoder_for_test = nullptr);
|
||||
~VideoSendStreamImpl() override;
|
||||
|
||||
void DeliverRtcp(const uint8_t* packet, size_t length);
|
||||
|
||||
// webrtc::VideoSendStream implementation.
|
||||
void Start() override;
|
||||
void Stop() override;
|
||||
bool started() override;
|
||||
|
||||
void AddAdaptationResource(rtc::scoped_refptr<Resource> resource) override;
|
||||
std::vector<rtc::scoped_refptr<Resource>> GetAdaptationResources() override;
|
||||
|
||||
void SetSource(rtc::VideoSourceInterface<webrtc::VideoFrame>* source,
|
||||
const DegradationPreference& degradation_preference) override;
|
||||
|
||||
void ReconfigureVideoEncoder(VideoEncoderConfig config) override;
|
||||
void ReconfigureVideoEncoder(VideoEncoderConfig config,
|
||||
SetParametersCallback callback) override;
|
||||
Stats GetStats() override;
|
||||
|
||||
void StopPermanentlyAndGetRtpStates(RtpStateMap* rtp_state_map,
|
||||
RtpPayloadStateMap* payload_state_map);
|
||||
void GenerateKeyFrame(const std::vector<std::string>& rids) override;
|
||||
|
||||
// TODO(holmer): Move these to RtpTransportControllerSend.
|
||||
std::map<uint32_t, RtpState> GetRtpStates() const;
|
||||
|
||||
std::map<uint32_t, RtpPayloadState> GetRtpPayloadStates() const;
|
||||
|
||||
const absl::optional<float>& configured_pacing_factor() const {
|
||||
return configured_pacing_factor_;
|
||||
}
|
||||
|
||||
private:
|
||||
friend class test::VideoSendStreamPeer;
|
||||
class OnSendPacketObserver : public SendPacketObserver {
|
||||
public:
|
||||
OnSendPacketObserver(SendStatisticsProxy* stats_proxy,
|
||||
SendDelayStats* send_delay_stats)
|
||||
: stats_proxy_(*stats_proxy), send_delay_stats_(*send_delay_stats) {}
|
||||
|
||||
void OnSendPacket(absl::optional<uint16_t> packet_id,
|
||||
Timestamp capture_time,
|
||||
uint32_t ssrc) override {
|
||||
stats_proxy_.OnSendPacket(ssrc, capture_time);
|
||||
if (packet_id.has_value()) {
|
||||
send_delay_stats_.OnSendPacket(*packet_id, capture_time, ssrc);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
SendStatisticsProxy& stats_proxy_;
|
||||
SendDelayStats& send_delay_stats_;
|
||||
};
|
||||
|
||||
absl::optional<float> GetPacingFactorOverride() const;
|
||||
// Implements BitrateAllocatorObserver.
|
||||
uint32_t OnBitrateUpdated(BitrateAllocationUpdate update) override;
|
||||
|
||||
// Implements VideoStreamEncoderInterface::EncoderSink
|
||||
void OnEncoderConfigurationChanged(
|
||||
std::vector<VideoStream> streams,
|
||||
bool is_svc,
|
||||
VideoEncoderConfig::ContentType content_type,
|
||||
int min_transmit_bitrate_bps) override;
|
||||
|
||||
void OnBitrateAllocationUpdated(
|
||||
const VideoBitrateAllocation& allocation) override;
|
||||
void OnVideoLayersAllocationUpdated(
|
||||
VideoLayersAllocation allocation) override;
|
||||
|
||||
// Implements EncodedImageCallback. The implementation routes encoded frames
|
||||
// to the `payload_router_` and `config.pre_encode_callback` if set.
|
||||
// Called on an arbitrary encoder callback thread.
|
||||
EncodedImageCallback::Result OnEncodedImage(
|
||||
const EncodedImage& encoded_image,
|
||||
const CodecSpecificInfo* codec_specific_info) override;
|
||||
|
||||
// Implements EncodedImageCallback.
|
||||
void OnDroppedFrame(EncodedImageCallback::DropReason reason) override;
|
||||
|
||||
// Starts monitoring and sends a keyframe.
|
||||
void StartupVideoSendStream();
|
||||
// Removes the bitrate observer, stops monitoring and notifies the video
|
||||
// encoder of the bitrate update.
|
||||
void StopVideoSendStream() RTC_RUN_ON(thread_checker_);
|
||||
|
||||
void ConfigureProtection();
|
||||
void ConfigureSsrcs();
|
||||
void SignalEncoderTimedOut();
|
||||
void SignalEncoderActive();
|
||||
// A video send stream is running if VideoSendStream::Start has been invoked
|
||||
// and there is an active encoding.
|
||||
bool IsRunning() const;
|
||||
MediaStreamAllocationConfig GetAllocationConfig() const
|
||||
RTC_RUN_ON(thread_checker_);
|
||||
|
||||
RTC_NO_UNIQUE_ADDRESS SequenceChecker thread_checker_;
|
||||
|
||||
RtpTransportControllerSendInterface* const transport_;
|
||||
|
||||
SendStatisticsProxy stats_proxy_;
|
||||
OnSendPacketObserver send_packet_observer_;
|
||||
const VideoSendStream::Config config_;
|
||||
const VideoEncoderConfig::ContentType content_type_;
|
||||
std::unique_ptr<VideoStreamEncoderInterface> video_stream_encoder_;
|
||||
EncoderRtcpFeedback encoder_feedback_;
|
||||
RtpVideoSenderInterface* const rtp_video_sender_;
|
||||
bool running_ RTC_GUARDED_BY(thread_checker_) = false;
|
||||
|
||||
Clock* const clock_;
|
||||
const bool has_alr_probing_;
|
||||
const PacingConfig pacing_config_;
|
||||
|
||||
TaskQueueBase* const worker_queue_;
|
||||
|
||||
RepeatingTaskHandle check_encoder_activity_task_
|
||||
RTC_GUARDED_BY(thread_checker_);
|
||||
|
||||
std::atomic_bool activity_;
|
||||
bool timed_out_ RTC_GUARDED_BY(thread_checker_);
|
||||
|
||||
BitrateAllocatorInterface* const bitrate_allocator_;
|
||||
|
||||
bool has_active_encodings_ RTC_GUARDED_BY(thread_checker_);
|
||||
bool disable_padding_ RTC_GUARDED_BY(thread_checker_);
|
||||
int max_padding_bitrate_ RTC_GUARDED_BY(thread_checker_);
|
||||
int encoder_min_bitrate_bps_ RTC_GUARDED_BY(thread_checker_);
|
||||
uint32_t encoder_max_bitrate_bps_ RTC_GUARDED_BY(thread_checker_);
|
||||
uint32_t encoder_target_rate_bps_ RTC_GUARDED_BY(thread_checker_);
|
||||
double encoder_bitrate_priority_ RTC_GUARDED_BY(thread_checker_);
|
||||
const int encoder_av1_priority_bitrate_override_bps_
|
||||
RTC_GUARDED_BY(thread_checker_);
|
||||
|
||||
ScopedTaskSafety worker_queue_safety_;
|
||||
|
||||
// Context for the most recent and last sent video bitrate allocation. Used to
|
||||
// throttle sending of similar bitrate allocations.
|
||||
struct VbaSendContext {
|
||||
VideoBitrateAllocation last_sent_allocation;
|
||||
absl::optional<VideoBitrateAllocation> throttled_allocation;
|
||||
int64_t last_send_time_ms;
|
||||
};
|
||||
absl::optional<VbaSendContext> video_bitrate_allocation_context_
|
||||
RTC_GUARDED_BY(thread_checker_);
|
||||
const absl::optional<float> configured_pacing_factor_;
|
||||
};
|
||||
} // namespace internal
|
||||
} // namespace webrtc
|
||||
#endif // VIDEO_VIDEO_SEND_STREAM_IMPL_H_
|
||||
|
|
@ -0,0 +1,198 @@
|
|||
/*
|
||||
* Copyright 2020 The WebRTC Project Authors. All rights reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "video/video_source_sink_controller.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <limits>
|
||||
#include <utility>
|
||||
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/numerics/safe_conversions.h"
|
||||
#include "rtc_base/strings/string_builder.h"
|
||||
#include "tgnet/FileLog.h"
|
||||
|
||||
namespace webrtc {
|
||||
VideoSourceSinkController::VideoSourceSinkController(
|
||||
rtc::VideoSinkInterface<VideoFrame>* sink,
|
||||
rtc::VideoSourceInterface<VideoFrame>* source)
|
||||
: sink_(sink), source_(source) {
|
||||
RTC_DCHECK(sink_);
|
||||
}
|
||||
|
||||
VideoSourceSinkController::~VideoSourceSinkController() {
|
||||
RTC_DCHECK_RUN_ON(&sequence_checker_);
|
||||
}
|
||||
|
||||
void VideoSourceSinkController::SetSource(
|
||||
rtc::VideoSourceInterface<VideoFrame>* source) {
|
||||
RTC_DCHECK_RUN_ON(&sequence_checker_);
|
||||
|
||||
rtc::VideoSourceInterface<VideoFrame>* old_source = source_;
|
||||
source_ = source;
|
||||
DEBUG_D("$%d: SetSource(): new source as %ld", a, source_);
|
||||
|
||||
if (old_source != source && old_source)
|
||||
old_source->RemoveSink(sink_);
|
||||
|
||||
if (!source)
|
||||
return;
|
||||
|
||||
source->AddOrUpdateSink(sink_, CurrentSettingsToSinkWants());
|
||||
}
|
||||
|
||||
bool VideoSourceSinkController::HasSource() const {
|
||||
RTC_DCHECK_RUN_ON(&sequence_checker_);
|
||||
return source_ != nullptr;
|
||||
}
|
||||
|
||||
void VideoSourceSinkController::RequestRefreshFrame() {
|
||||
RTC_DCHECK_RUN_ON(&sequence_checker_);
|
||||
if (source_)
|
||||
source_->RequestRefreshFrame();
|
||||
}
|
||||
|
||||
void VideoSourceSinkController::PushSourceSinkSettings() {
|
||||
RTC_DCHECK_RUN_ON(&sequence_checker_);
|
||||
DEBUG_D("$%d: PushSourceSinkSettings(): 1", a);
|
||||
if (!source_)
|
||||
return;
|
||||
DEBUG_D("$%d: PushSourceSinkSettings(): 2 src=%ld", a, source_);
|
||||
rtc::VideoSinkWants wants = CurrentSettingsToSinkWants();
|
||||
DEBUG_D("$%d: PushSourceSinkSettings(): 3", a);
|
||||
source_->AddOrUpdateSink(sink_, wants);
|
||||
DEBUG_D("$%d: PushSourceSinkSettings(): 4", a);
|
||||
}
|
||||
|
||||
VideoSourceRestrictions VideoSourceSinkController::restrictions() const {
|
||||
RTC_DCHECK_RUN_ON(&sequence_checker_);
|
||||
return restrictions_;
|
||||
}
|
||||
|
||||
absl::optional<size_t> VideoSourceSinkController::pixels_per_frame_upper_limit()
|
||||
const {
|
||||
RTC_DCHECK_RUN_ON(&sequence_checker_);
|
||||
return pixels_per_frame_upper_limit_;
|
||||
}
|
||||
|
||||
absl::optional<double> VideoSourceSinkController::frame_rate_upper_limit()
|
||||
const {
|
||||
RTC_DCHECK_RUN_ON(&sequence_checker_);
|
||||
return frame_rate_upper_limit_;
|
||||
}
|
||||
|
||||
bool VideoSourceSinkController::rotation_applied() const {
|
||||
RTC_DCHECK_RUN_ON(&sequence_checker_);
|
||||
return rotation_applied_;
|
||||
}
|
||||
|
||||
int VideoSourceSinkController::resolution_alignment() const {
|
||||
RTC_DCHECK_RUN_ON(&sequence_checker_);
|
||||
return resolution_alignment_;
|
||||
}
|
||||
|
||||
const std::vector<rtc::VideoSinkWants::FrameSize>&
|
||||
VideoSourceSinkController::resolutions() const {
|
||||
RTC_DCHECK_RUN_ON(&sequence_checker_);
|
||||
return resolutions_;
|
||||
}
|
||||
|
||||
bool VideoSourceSinkController::active() const {
|
||||
RTC_DCHECK_RUN_ON(&sequence_checker_);
|
||||
return active_;
|
||||
}
|
||||
|
||||
absl::optional<rtc::VideoSinkWants::FrameSize>
|
||||
VideoSourceSinkController::requested_resolution() const {
|
||||
RTC_DCHECK_RUN_ON(&sequence_checker_);
|
||||
return requested_resolution_;
|
||||
}
|
||||
|
||||
void VideoSourceSinkController::SetRestrictions(
|
||||
VideoSourceRestrictions restrictions) {
|
||||
RTC_DCHECK_RUN_ON(&sequence_checker_);
|
||||
restrictions_ = std::move(restrictions);
|
||||
}
|
||||
|
||||
void VideoSourceSinkController::SetPixelsPerFrameUpperLimit(
|
||||
absl::optional<size_t> pixels_per_frame_upper_limit) {
|
||||
RTC_DCHECK_RUN_ON(&sequence_checker_);
|
||||
pixels_per_frame_upper_limit_ = std::move(pixels_per_frame_upper_limit);
|
||||
}
|
||||
|
||||
void VideoSourceSinkController::SetFrameRateUpperLimit(
|
||||
absl::optional<double> frame_rate_upper_limit) {
|
||||
RTC_DCHECK_RUN_ON(&sequence_checker_);
|
||||
frame_rate_upper_limit_ = std::move(frame_rate_upper_limit);
|
||||
}
|
||||
|
||||
void VideoSourceSinkController::SetRotationApplied(bool rotation_applied) {
|
||||
RTC_DCHECK_RUN_ON(&sequence_checker_);
|
||||
rotation_applied_ = rotation_applied;
|
||||
}
|
||||
|
||||
void VideoSourceSinkController::SetResolutionAlignment(
|
||||
int resolution_alignment) {
|
||||
RTC_DCHECK_RUN_ON(&sequence_checker_);
|
||||
resolution_alignment_ = resolution_alignment;
|
||||
}
|
||||
|
||||
void VideoSourceSinkController::SetResolutions(
|
||||
std::vector<rtc::VideoSinkWants::FrameSize> resolutions) {
|
||||
RTC_DCHECK_RUN_ON(&sequence_checker_);
|
||||
resolutions_ = std::move(resolutions);
|
||||
}
|
||||
|
||||
void VideoSourceSinkController::SetActive(bool active) {
|
||||
RTC_DCHECK_RUN_ON(&sequence_checker_);
|
||||
active_ = active;
|
||||
}
|
||||
|
||||
void VideoSourceSinkController::SetRequestedResolution(
|
||||
absl::optional<rtc::VideoSinkWants::FrameSize> requested_resolution) {
|
||||
RTC_DCHECK_RUN_ON(&sequence_checker_);
|
||||
requested_resolution_ = std::move(requested_resolution);
|
||||
}
|
||||
|
||||
// RTC_EXCLUSIVE_LOCKS_REQUIRED(sequence_checker_)
|
||||
rtc::VideoSinkWants VideoSourceSinkController::CurrentSettingsToSinkWants()
|
||||
const {
|
||||
rtc::VideoSinkWants wants;
|
||||
wants.rotation_applied = rotation_applied_;
|
||||
// `wants.black_frames` is not used, it always has its default value false.
|
||||
wants.max_pixel_count =
|
||||
rtc::dchecked_cast<int>(restrictions_.max_pixels_per_frame().value_or(
|
||||
std::numeric_limits<int>::max()));
|
||||
wants.target_pixel_count =
|
||||
restrictions_.target_pixels_per_frame().has_value()
|
||||
? absl::optional<int>(rtc::dchecked_cast<int>(
|
||||
restrictions_.target_pixels_per_frame().value()))
|
||||
: absl::nullopt;
|
||||
wants.max_framerate_fps =
|
||||
restrictions_.max_frame_rate().has_value()
|
||||
? static_cast<int>(restrictions_.max_frame_rate().value())
|
||||
: std::numeric_limits<int>::max();
|
||||
wants.resolution_alignment = resolution_alignment_;
|
||||
wants.max_pixel_count =
|
||||
std::min(wants.max_pixel_count,
|
||||
rtc::dchecked_cast<int>(pixels_per_frame_upper_limit_.value_or(
|
||||
std::numeric_limits<int>::max())));
|
||||
wants.max_framerate_fps =
|
||||
std::min(wants.max_framerate_fps,
|
||||
frame_rate_upper_limit_.has_value()
|
||||
? static_cast<int>(frame_rate_upper_limit_.value())
|
||||
: std::numeric_limits<int>::max());
|
||||
wants.resolutions = resolutions_;
|
||||
wants.is_active = active_;
|
||||
wants.requested_resolution = requested_resolution_;
|
||||
return wants;
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,102 @@
|
|||
/*
|
||||
* Copyright 2020 The WebRTC Project Authors. All rights reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef VIDEO_VIDEO_SOURCE_SINK_CONTROLLER_H_
|
||||
#define VIDEO_VIDEO_SOURCE_SINK_CONTROLLER_H_
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "api/sequence_checker.h"
|
||||
#include "api/video/video_frame.h"
|
||||
#include "api/video/video_sink_interface.h"
|
||||
#include "api/video/video_source_interface.h"
|
||||
#include "call/adaptation/video_source_restrictions.h"
|
||||
#include "rtc_base/system/no_unique_address.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// Responsible for configuring source/sink settings, i.e. performing
|
||||
// rtc::VideoSourceInterface<VideoFrame>::AddOrUpdateSink(). It does this by
|
||||
// storing settings internally which are converted to rtc::VideoSinkWants when
|
||||
// PushSourceSinkSettings() is performed.
|
||||
class VideoSourceSinkController {
|
||||
public:
|
||||
VideoSourceSinkController(rtc::VideoSinkInterface<VideoFrame>* sink,
|
||||
rtc::VideoSourceInterface<VideoFrame>* source);
|
||||
|
||||
~VideoSourceSinkController();
|
||||
|
||||
void SetSource(rtc::VideoSourceInterface<VideoFrame>* source);
|
||||
bool HasSource() const;
|
||||
|
||||
// Requests a refresh frame from the current source, if set.
|
||||
void RequestRefreshFrame();
|
||||
|
||||
// Must be called in order for changes to settings to have an effect. This
|
||||
// allows you to modify multiple properties in a single push to the sink.
|
||||
void PushSourceSinkSettings();
|
||||
|
||||
VideoSourceRestrictions restrictions() const;
|
||||
absl::optional<size_t> pixels_per_frame_upper_limit() const;
|
||||
absl::optional<double> frame_rate_upper_limit() const;
|
||||
bool rotation_applied() const;
|
||||
int resolution_alignment() const;
|
||||
const std::vector<rtc::VideoSinkWants::FrameSize>& resolutions() const;
|
||||
bool active() const;
|
||||
absl::optional<rtc::VideoSinkWants::FrameSize> requested_resolution() const;
|
||||
|
||||
// Updates the settings stored internally. In order for these settings to be
|
||||
// applied to the sink, PushSourceSinkSettings() must subsequently be called.
|
||||
void SetRestrictions(VideoSourceRestrictions restrictions);
|
||||
void SetPixelsPerFrameUpperLimit(
|
||||
absl::optional<size_t> pixels_per_frame_upper_limit);
|
||||
void SetFrameRateUpperLimit(absl::optional<double> frame_rate_upper_limit);
|
||||
void SetRotationApplied(bool rotation_applied);
|
||||
void SetResolutionAlignment(int resolution_alignment);
|
||||
void SetResolutions(std::vector<rtc::VideoSinkWants::FrameSize> resolutions);
|
||||
void SetActive(bool active);
|
||||
void SetRequestedResolution(
|
||||
absl::optional<rtc::VideoSinkWants::FrameSize> requested_resolution);
|
||||
|
||||
private:
|
||||
rtc::VideoSinkWants CurrentSettingsToSinkWants() const
|
||||
RTC_EXCLUSIVE_LOCKS_REQUIRED(sequence_checker_);
|
||||
|
||||
// Used to ensure that this class is called on threads/sequences that it and
|
||||
// downstream implementations were designed for.
|
||||
// In practice, this represent's libjingle's worker thread.
|
||||
RTC_NO_UNIQUE_ADDRESS SequenceChecker sequence_checker_;
|
||||
int a;
|
||||
rtc::VideoSinkInterface<VideoFrame>* const sink_;
|
||||
rtc::VideoSourceInterface<VideoFrame>* source_
|
||||
RTC_GUARDED_BY(&sequence_checker_);
|
||||
// Pixel and frame rate restrictions.
|
||||
VideoSourceRestrictions restrictions_ RTC_GUARDED_BY(&sequence_checker_);
|
||||
// Ensures that even if we are not restricted, the sink is never configured
|
||||
// above this limit. Example: We are not CPU limited (no `restrictions_`) but
|
||||
// our encoder is capped at 30 fps (= `frame_rate_upper_limit_`).
|
||||
absl::optional<size_t> pixels_per_frame_upper_limit_
|
||||
RTC_GUARDED_BY(&sequence_checker_);
|
||||
absl::optional<double> frame_rate_upper_limit_
|
||||
RTC_GUARDED_BY(&sequence_checker_);
|
||||
bool rotation_applied_ RTC_GUARDED_BY(&sequence_checker_) = false;
|
||||
int resolution_alignment_ RTC_GUARDED_BY(&sequence_checker_) = 1;
|
||||
std::vector<rtc::VideoSinkWants::FrameSize> resolutions_
|
||||
RTC_GUARDED_BY(&sequence_checker_);
|
||||
bool active_ RTC_GUARDED_BY(&sequence_checker_) = true;
|
||||
absl::optional<rtc::VideoSinkWants::FrameSize> requested_resolution_
|
||||
RTC_GUARDED_BY(&sequence_checker_);
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // VIDEO_VIDEO_SOURCE_SINK_CONTROLLER_H_
|
||||
|
|
@ -0,0 +1,437 @@
|
|||
/*
|
||||
* Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "video/video_stream_buffer_controller.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
|
||||
#include "absl/base/attributes.h"
|
||||
#include "absl/functional/bind_front.h"
|
||||
#include "absl/types/optional.h"
|
||||
#include "api/sequence_checker.h"
|
||||
#include "api/task_queue/task_queue_base.h"
|
||||
#include "api/units/data_size.h"
|
||||
#include "api/units/time_delta.h"
|
||||
#include "api/units/timestamp.h"
|
||||
#include "api/video/encoded_frame.h"
|
||||
#include "api/video/frame_buffer.h"
|
||||
#include "api/video/video_content_type.h"
|
||||
#include "modules/video_coding/frame_helpers.h"
|
||||
#include "modules/video_coding/timing/inter_frame_delay_variation_calculator.h"
|
||||
#include "modules/video_coding/timing/jitter_estimator.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/thread_annotations.h"
|
||||
#include "video/frame_decode_scheduler.h"
|
||||
#include "video/frame_decode_timing.h"
|
||||
#include "video/task_queue_frame_decode_scheduler.h"
|
||||
#include "video/video_receive_stream_timeout_tracker.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace {
|
||||
|
||||
// Max number of frames the buffer will hold.
|
||||
static constexpr size_t kMaxFramesBuffered = 800;
|
||||
// Max number of decoded frame info that will be saved.
|
||||
static constexpr int kMaxFramesHistory = 1 << 13;
|
||||
|
||||
// Default value for the maximum decode queue size that is used when the
|
||||
// low-latency renderer is used.
|
||||
static constexpr size_t kZeroPlayoutDelayDefaultMaxDecodeQueueSize = 8;
|
||||
|
||||
struct FrameMetadata {
|
||||
explicit FrameMetadata(const EncodedFrame& frame)
|
||||
: is_last_spatial_layer(frame.is_last_spatial_layer),
|
||||
is_keyframe(frame.is_keyframe()),
|
||||
size(frame.size()),
|
||||
contentType(frame.contentType()),
|
||||
delayed_by_retransmission(frame.delayed_by_retransmission()),
|
||||
rtp_timestamp(frame.RtpTimestamp()),
|
||||
receive_time(frame.ReceivedTimestamp()) {}
|
||||
|
||||
const bool is_last_spatial_layer;
|
||||
const bool is_keyframe;
|
||||
const size_t size;
|
||||
const VideoContentType contentType;
|
||||
const bool delayed_by_retransmission;
|
||||
const uint32_t rtp_timestamp;
|
||||
const absl::optional<Timestamp> receive_time;
|
||||
};
|
||||
|
||||
Timestamp MinReceiveTime(const EncodedFrame& frame) {
|
||||
Timestamp first_recv_time = Timestamp::PlusInfinity();
|
||||
for (const auto& packet_info : frame.PacketInfos()) {
|
||||
if (packet_info.receive_time().IsFinite()) {
|
||||
first_recv_time = std::min(first_recv_time, packet_info.receive_time());
|
||||
}
|
||||
}
|
||||
return first_recv_time;
|
||||
}
|
||||
|
||||
Timestamp ReceiveTime(const EncodedFrame& frame) {
|
||||
absl::optional<Timestamp> ts = frame.ReceivedTimestamp();
|
||||
RTC_DCHECK(ts.has_value()) << "Received frame must have a timestamp set!";
|
||||
return *ts;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
VideoStreamBufferController::VideoStreamBufferController(
|
||||
Clock* clock,
|
||||
TaskQueueBase* worker_queue,
|
||||
VCMTiming* timing,
|
||||
VideoStreamBufferControllerStatsObserver* stats_proxy,
|
||||
FrameSchedulingReceiver* receiver,
|
||||
TimeDelta max_wait_for_keyframe,
|
||||
TimeDelta max_wait_for_frame,
|
||||
std::unique_ptr<FrameDecodeScheduler> frame_decode_scheduler,
|
||||
const FieldTrialsView& field_trials)
|
||||
: field_trials_(field_trials),
|
||||
clock_(clock),
|
||||
stats_proxy_(stats_proxy),
|
||||
receiver_(receiver),
|
||||
timing_(timing),
|
||||
frame_decode_scheduler_(std::move(frame_decode_scheduler)),
|
||||
jitter_estimator_(clock_, field_trials),
|
||||
buffer_(std::make_unique<FrameBuffer>(kMaxFramesBuffered,
|
||||
kMaxFramesHistory,
|
||||
field_trials)),
|
||||
decode_timing_(clock_, timing_),
|
||||
timeout_tracker_(
|
||||
clock_,
|
||||
worker_queue,
|
||||
VideoReceiveStreamTimeoutTracker::Timeouts{
|
||||
.max_wait_for_keyframe = max_wait_for_keyframe,
|
||||
.max_wait_for_frame = max_wait_for_frame},
|
||||
absl::bind_front(&VideoStreamBufferController::OnTimeout, this)),
|
||||
zero_playout_delay_max_decode_queue_size_(
|
||||
"max_decode_queue_size",
|
||||
kZeroPlayoutDelayDefaultMaxDecodeQueueSize) {
|
||||
RTC_DCHECK(stats_proxy_);
|
||||
RTC_DCHECK(receiver_);
|
||||
RTC_DCHECK(timing_);
|
||||
RTC_DCHECK(clock_);
|
||||
RTC_DCHECK(frame_decode_scheduler_);
|
||||
|
||||
ParseFieldTrial({&zero_playout_delay_max_decode_queue_size_},
|
||||
field_trials.Lookup("WebRTC-ZeroPlayoutDelay"));
|
||||
}
|
||||
|
||||
void VideoStreamBufferController::Stop() {
|
||||
RTC_DCHECK_RUN_ON(&worker_sequence_checker_);
|
||||
frame_decode_scheduler_->Stop();
|
||||
timeout_tracker_.Stop();
|
||||
decoder_ready_for_new_frame_ = false;
|
||||
}
|
||||
|
||||
void VideoStreamBufferController::SetProtectionMode(
|
||||
VCMVideoProtection protection_mode) {
|
||||
RTC_DCHECK_RUN_ON(&worker_sequence_checker_);
|
||||
protection_mode_ = protection_mode;
|
||||
}
|
||||
|
||||
void VideoStreamBufferController::Clear() {
|
||||
RTC_DCHECK_RUN_ON(&worker_sequence_checker_);
|
||||
stats_proxy_->OnDroppedFrames(buffer_->CurrentSize());
|
||||
buffer_ = std::make_unique<FrameBuffer>(kMaxFramesBuffered, kMaxFramesHistory,
|
||||
field_trials_);
|
||||
frame_decode_scheduler_->CancelOutstanding();
|
||||
}
|
||||
|
||||
absl::optional<int64_t> VideoStreamBufferController::InsertFrame(
|
||||
std::unique_ptr<EncodedFrame> frame) {
|
||||
RTC_DCHECK_RUN_ON(&worker_sequence_checker_);
|
||||
FrameMetadata metadata(*frame);
|
||||
int complete_units = buffer_->GetTotalNumberOfContinuousTemporalUnits();
|
||||
if (buffer_->InsertFrame(std::move(frame))) {
|
||||
RTC_DCHECK(metadata.receive_time) << "Frame receive time must be set!";
|
||||
if (!metadata.delayed_by_retransmission && metadata.receive_time &&
|
||||
(field_trials_.IsDisabled("WebRTC-IncomingTimestampOnMarkerBitOnly") ||
|
||||
metadata.is_last_spatial_layer)) {
|
||||
timing_->IncomingTimestamp(metadata.rtp_timestamp,
|
||||
*metadata.receive_time);
|
||||
}
|
||||
if (complete_units < buffer_->GetTotalNumberOfContinuousTemporalUnits()) {
|
||||
stats_proxy_->OnCompleteFrame(metadata.is_keyframe, metadata.size,
|
||||
metadata.contentType);
|
||||
MaybeScheduleFrameForRelease();
|
||||
}
|
||||
}
|
||||
|
||||
return buffer_->LastContinuousFrameId();
|
||||
}
|
||||
|
||||
void VideoStreamBufferController::UpdateRtt(int64_t max_rtt_ms) {
|
||||
RTC_DCHECK_RUN_ON(&worker_sequence_checker_);
|
||||
jitter_estimator_.UpdateRtt(TimeDelta::Millis(max_rtt_ms));
|
||||
}
|
||||
|
||||
void VideoStreamBufferController::SetMaxWaits(TimeDelta max_wait_for_keyframe,
|
||||
TimeDelta max_wait_for_frame) {
|
||||
RTC_DCHECK_RUN_ON(&worker_sequence_checker_);
|
||||
timeout_tracker_.SetTimeouts({.max_wait_for_keyframe = max_wait_for_keyframe,
|
||||
.max_wait_for_frame = max_wait_for_frame});
|
||||
}
|
||||
|
||||
void VideoStreamBufferController::StartNextDecode(bool keyframe_required) {
|
||||
RTC_DCHECK_RUN_ON(&worker_sequence_checker_);
|
||||
if (!timeout_tracker_.Running())
|
||||
timeout_tracker_.Start(keyframe_required);
|
||||
keyframe_required_ = keyframe_required;
|
||||
if (keyframe_required_) {
|
||||
timeout_tracker_.SetWaitingForKeyframe();
|
||||
}
|
||||
decoder_ready_for_new_frame_ = true;
|
||||
MaybeScheduleFrameForRelease();
|
||||
}
|
||||
|
||||
int VideoStreamBufferController::Size() {
|
||||
RTC_DCHECK_RUN_ON(&worker_sequence_checker_);
|
||||
return buffer_->CurrentSize();
|
||||
}
|
||||
|
||||
void VideoStreamBufferController::OnFrameReady(
|
||||
absl::InlinedVector<std::unique_ptr<EncodedFrame>, 4> frames,
|
||||
Timestamp render_time) {
|
||||
RTC_DCHECK_RUN_ON(&worker_sequence_checker_);
|
||||
RTC_CHECK(!frames.empty())
|
||||
<< "Callers must ensure there is at least one frame to decode.";
|
||||
|
||||
timeout_tracker_.OnEncodedFrameReleased();
|
||||
|
||||
Timestamp now = clock_->CurrentTime();
|
||||
bool superframe_delayed_by_retransmission = false;
|
||||
DataSize superframe_size = DataSize::Zero();
|
||||
const EncodedFrame& first_frame = *frames.front();
|
||||
Timestamp min_receive_time = MinReceiveTime(first_frame);
|
||||
Timestamp max_receive_time = ReceiveTime(first_frame);
|
||||
|
||||
if (first_frame.is_keyframe())
|
||||
keyframe_required_ = false;
|
||||
|
||||
// Gracefully handle bad RTP timestamps and render time issues.
|
||||
if (FrameHasBadRenderTiming(render_time, now) ||
|
||||
TargetVideoDelayIsTooLarge(timing_->TargetVideoDelay())) {
|
||||
RTC_LOG(LS_WARNING) << "Resetting jitter estimator and timing module due "
|
||||
"to bad render timing for rtp_timestamp="
|
||||
<< first_frame.RtpTimestamp();
|
||||
jitter_estimator_.Reset();
|
||||
timing_->Reset();
|
||||
render_time = timing_->RenderTime(first_frame.RtpTimestamp(), now);
|
||||
}
|
||||
|
||||
for (std::unique_ptr<EncodedFrame>& frame : frames) {
|
||||
frame->SetRenderTime(render_time.ms());
|
||||
|
||||
superframe_delayed_by_retransmission |= frame->delayed_by_retransmission();
|
||||
min_receive_time = std::min(min_receive_time, MinReceiveTime(*frame));
|
||||
max_receive_time = std::max(max_receive_time, ReceiveTime(*frame));
|
||||
superframe_size += DataSize::Bytes(frame->size());
|
||||
}
|
||||
|
||||
if (!superframe_delayed_by_retransmission) {
|
||||
absl::optional<TimeDelta> inter_frame_delay_variation =
|
||||
ifdv_calculator_.Calculate(first_frame.RtpTimestamp(),
|
||||
max_receive_time);
|
||||
if (inter_frame_delay_variation) {
|
||||
jitter_estimator_.UpdateEstimate(*inter_frame_delay_variation,
|
||||
superframe_size);
|
||||
}
|
||||
|
||||
float rtt_mult = protection_mode_ == kProtectionNackFEC ? 0.0 : 1.0;
|
||||
absl::optional<TimeDelta> rtt_mult_add_cap_ms = absl::nullopt;
|
||||
if (rtt_mult_settings_.has_value()) {
|
||||
rtt_mult = rtt_mult_settings_->rtt_mult_setting;
|
||||
rtt_mult_add_cap_ms =
|
||||
TimeDelta::Millis(rtt_mult_settings_->rtt_mult_add_cap_ms);
|
||||
}
|
||||
timing_->SetJitterDelay(
|
||||
jitter_estimator_.GetJitterEstimate(rtt_mult, rtt_mult_add_cap_ms));
|
||||
timing_->UpdateCurrentDelay(render_time, now);
|
||||
} else if (RttMultExperiment::RttMultEnabled()) {
|
||||
jitter_estimator_.FrameNacked();
|
||||
}
|
||||
|
||||
// Update stats.
|
||||
UpdateDroppedFrames();
|
||||
UpdateFrameBufferTimings(min_receive_time, now);
|
||||
UpdateTimingFrameInfo();
|
||||
|
||||
std::unique_ptr<EncodedFrame> frame =
|
||||
CombineAndDeleteFrames(std::move(frames));
|
||||
|
||||
timing_->SetLastDecodeScheduledTimestamp(now);
|
||||
|
||||
decoder_ready_for_new_frame_ = false;
|
||||
receiver_->OnEncodedFrame(std::move(frame));
|
||||
}
|
||||
|
||||
void VideoStreamBufferController::OnTimeout(TimeDelta delay) {
|
||||
RTC_DCHECK_RUN_ON(&worker_sequence_checker_);
|
||||
|
||||
// Stop sending timeouts until receiver starts waiting for a new frame.
|
||||
timeout_tracker_.Stop();
|
||||
|
||||
// If the stream is paused then ignore the timeout.
|
||||
if (!decoder_ready_for_new_frame_) {
|
||||
return;
|
||||
}
|
||||
decoder_ready_for_new_frame_ = false;
|
||||
receiver_->OnDecodableFrameTimeout(delay);
|
||||
}
|
||||
|
||||
void VideoStreamBufferController::FrameReadyForDecode(uint32_t rtp_timestamp,
|
||||
Timestamp render_time) {
|
||||
RTC_DCHECK_RUN_ON(&worker_sequence_checker_);
|
||||
// Check that the frame to decode is still valid before passing the frame for
|
||||
// decoding.
|
||||
auto decodable_tu_info = buffer_->DecodableTemporalUnitsInfo();
|
||||
if (!decodable_tu_info) {
|
||||
RTC_LOG(LS_ERROR)
|
||||
<< "The frame buffer became undecodable during the wait "
|
||||
"to decode frame with rtp-timestamp "
|
||||
<< rtp_timestamp
|
||||
<< ". Cancelling the decode of this frame, decoding "
|
||||
"will resume when the frame buffers become decodable again.";
|
||||
return;
|
||||
}
|
||||
RTC_DCHECK_EQ(rtp_timestamp, decodable_tu_info->next_rtp_timestamp)
|
||||
<< "Frame buffer's next decodable frame was not the one sent for "
|
||||
"extraction.";
|
||||
auto frames = buffer_->ExtractNextDecodableTemporalUnit();
|
||||
if (frames.empty()) {
|
||||
RTC_LOG(LS_ERROR)
|
||||
<< "The frame buffer should never return an empty temporal until list "
|
||||
"when there is a decodable temporal unit.";
|
||||
RTC_DCHECK_NOTREACHED();
|
||||
return;
|
||||
}
|
||||
OnFrameReady(std::move(frames), render_time);
|
||||
}
|
||||
|
||||
void VideoStreamBufferController::UpdateDroppedFrames()
|
||||
RTC_RUN_ON(&worker_sequence_checker_) {
|
||||
const int dropped_frames = buffer_->GetTotalNumberOfDroppedFrames() -
|
||||
frames_dropped_before_last_new_frame_;
|
||||
if (dropped_frames > 0)
|
||||
stats_proxy_->OnDroppedFrames(dropped_frames);
|
||||
frames_dropped_before_last_new_frame_ =
|
||||
buffer_->GetTotalNumberOfDroppedFrames();
|
||||
}
|
||||
|
||||
void VideoStreamBufferController::UpdateFrameBufferTimings(
|
||||
Timestamp min_receive_time,
|
||||
Timestamp now) {
|
||||
// Update instantaneous delays.
|
||||
auto timings = timing_->GetTimings();
|
||||
if (timings.num_decoded_frames) {
|
||||
stats_proxy_->OnFrameBufferTimingsUpdated(
|
||||
timings.estimated_max_decode_time.ms(), timings.current_delay.ms(),
|
||||
timings.target_delay.ms(), timings.minimum_delay.ms(),
|
||||
timings.min_playout_delay.ms(), timings.render_delay.ms());
|
||||
}
|
||||
|
||||
// The spec mandates that `jitterBufferDelay` is the "time the first
|
||||
// packet is received by the jitter buffer (ingest timestamp) to the time it
|
||||
// exits the jitter buffer (emit timestamp)". Since the "jitter buffer"
|
||||
// is not a monolith in the webrtc.org implementation, we take the freedom to
|
||||
// define "ingest timestamp" as "first packet received by
|
||||
// RtpVideoStreamReceiver2" and "emit timestamp" as "decodable frame released
|
||||
// by VideoStreamBufferController".
|
||||
//
|
||||
// https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-jitterbufferdelay
|
||||
TimeDelta jitter_buffer_delay =
|
||||
std::max(TimeDelta::Zero(), now - min_receive_time);
|
||||
stats_proxy_->OnDecodableFrame(jitter_buffer_delay, timings.target_delay,
|
||||
timings.minimum_delay);
|
||||
}
|
||||
|
||||
void VideoStreamBufferController::UpdateTimingFrameInfo() {
|
||||
absl::optional<TimingFrameInfo> info = timing_->GetTimingFrameInfo();
|
||||
if (info)
|
||||
stats_proxy_->OnTimingFrameInfoUpdated(*info);
|
||||
}
|
||||
|
||||
bool VideoStreamBufferController::IsTooManyFramesQueued() const
|
||||
RTC_RUN_ON(&worker_sequence_checker_) {
|
||||
return buffer_->CurrentSize() > zero_playout_delay_max_decode_queue_size_;
|
||||
}
|
||||
|
||||
void VideoStreamBufferController::ForceKeyFrameReleaseImmediately()
|
||||
RTC_RUN_ON(&worker_sequence_checker_) {
|
||||
RTC_DCHECK(keyframe_required_);
|
||||
// Iterate through the frame buffer until there is a complete keyframe and
|
||||
// release this right away.
|
||||
while (buffer_->DecodableTemporalUnitsInfo()) {
|
||||
auto next_frame = buffer_->ExtractNextDecodableTemporalUnit();
|
||||
if (next_frame.empty()) {
|
||||
RTC_DCHECK_NOTREACHED()
|
||||
<< "Frame buffer should always return at least 1 frame.";
|
||||
continue;
|
||||
}
|
||||
// Found keyframe - decode right away.
|
||||
if (next_frame.front()->is_keyframe()) {
|
||||
auto render_time = timing_->RenderTime(next_frame.front()->RtpTimestamp(),
|
||||
clock_->CurrentTime());
|
||||
OnFrameReady(std::move(next_frame), render_time);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void VideoStreamBufferController::MaybeScheduleFrameForRelease()
|
||||
RTC_RUN_ON(&worker_sequence_checker_) {
|
||||
auto decodable_tu_info = buffer_->DecodableTemporalUnitsInfo();
|
||||
if (!decoder_ready_for_new_frame_ || !decodable_tu_info) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (keyframe_required_) {
|
||||
return ForceKeyFrameReleaseImmediately();
|
||||
}
|
||||
|
||||
// If already scheduled then abort.
|
||||
if (frame_decode_scheduler_->ScheduledRtpTimestamp() ==
|
||||
decodable_tu_info->next_rtp_timestamp) {
|
||||
return;
|
||||
}
|
||||
|
||||
TimeDelta max_wait = timeout_tracker_.TimeUntilTimeout();
|
||||
// Ensures the frame is scheduled for decode before the stream times out.
|
||||
// This is otherwise a race condition.
|
||||
max_wait = std::max(max_wait - TimeDelta::Millis(1), TimeDelta::Zero());
|
||||
absl::optional<FrameDecodeTiming::FrameSchedule> schedule;
|
||||
while (decodable_tu_info) {
|
||||
schedule = decode_timing_.OnFrameBufferUpdated(
|
||||
decodable_tu_info->next_rtp_timestamp,
|
||||
decodable_tu_info->last_rtp_timestamp, max_wait,
|
||||
IsTooManyFramesQueued());
|
||||
if (schedule) {
|
||||
// Don't schedule if already waiting for the same frame.
|
||||
if (frame_decode_scheduler_->ScheduledRtpTimestamp() !=
|
||||
decodable_tu_info->next_rtp_timestamp) {
|
||||
frame_decode_scheduler_->CancelOutstanding();
|
||||
frame_decode_scheduler_->ScheduleFrame(
|
||||
decodable_tu_info->next_rtp_timestamp, *schedule,
|
||||
absl::bind_front(&VideoStreamBufferController::FrameReadyForDecode,
|
||||
this));
|
||||
}
|
||||
return;
|
||||
}
|
||||
// If no schedule for current rtp, drop and try again.
|
||||
buffer_->DropNextDecodableTemporalUnit();
|
||||
decodable_tu_info = buffer_->DecodableTemporalUnitsInfo();
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,148 @@
|
|||
/*
|
||||
* Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef VIDEO_VIDEO_STREAM_BUFFER_CONTROLLER_H_
|
||||
#define VIDEO_VIDEO_STREAM_BUFFER_CONTROLLER_H_
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "api/field_trials_view.h"
|
||||
#include "api/task_queue/task_queue_base.h"
|
||||
#include "api/video/encoded_frame.h"
|
||||
#include "api/video/frame_buffer.h"
|
||||
#include "modules/video_coding/include/video_coding_defines.h"
|
||||
#include "modules/video_coding/timing/inter_frame_delay_variation_calculator.h"
|
||||
#include "modules/video_coding/timing/jitter_estimator.h"
|
||||
#include "modules/video_coding/timing/timing.h"
|
||||
#include "rtc_base/experiments/rtt_mult_experiment.h"
|
||||
#include "system_wrappers/include/clock.h"
|
||||
#include "video/decode_synchronizer.h"
|
||||
#include "video/video_receive_stream_timeout_tracker.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class FrameSchedulingReceiver {
|
||||
public:
|
||||
virtual ~FrameSchedulingReceiver() = default;
|
||||
|
||||
virtual void OnEncodedFrame(std::unique_ptr<EncodedFrame> frame) = 0;
|
||||
virtual void OnDecodableFrameTimeout(TimeDelta wait_time) = 0;
|
||||
};
|
||||
|
||||
class VideoStreamBufferControllerStatsObserver {
|
||||
public:
|
||||
virtual ~VideoStreamBufferControllerStatsObserver() = default;
|
||||
|
||||
virtual void OnCompleteFrame(bool is_keyframe,
|
||||
size_t size_bytes,
|
||||
VideoContentType content_type) = 0;
|
||||
|
||||
virtual void OnDroppedFrames(uint32_t frames_dropped) = 0;
|
||||
|
||||
// `jitter_buffer_delay` is the delay experienced by a single frame,
|
||||
// whereas `target_delay` and `minimum_delay` are the current delays
|
||||
// applied by the jitter buffer.
|
||||
virtual void OnDecodableFrame(TimeDelta jitter_buffer_delay,
|
||||
TimeDelta target_delay,
|
||||
TimeDelta minimum_delay) = 0;
|
||||
|
||||
// Various jitter buffer delays determined by VCMTiming.
|
||||
virtual void OnFrameBufferTimingsUpdated(int estimated_max_decode_time_ms,
|
||||
int current_delay_ms,
|
||||
int target_delay_ms,
|
||||
int jitter_delay_ms,
|
||||
int min_playout_delay_ms,
|
||||
int render_delay_ms) = 0;
|
||||
|
||||
virtual void OnTimingFrameInfoUpdated(const TimingFrameInfo& info) = 0;
|
||||
};
|
||||
|
||||
class VideoStreamBufferController {
|
||||
public:
|
||||
VideoStreamBufferController(
|
||||
Clock* clock,
|
||||
TaskQueueBase* worker_queue,
|
||||
VCMTiming* timing,
|
||||
VideoStreamBufferControllerStatsObserver* stats_proxy,
|
||||
FrameSchedulingReceiver* receiver,
|
||||
TimeDelta max_wait_for_keyframe,
|
||||
TimeDelta max_wait_for_frame,
|
||||
std::unique_ptr<FrameDecodeScheduler> frame_decode_scheduler,
|
||||
const FieldTrialsView& field_trials);
|
||||
virtual ~VideoStreamBufferController() = default;
|
||||
|
||||
void Stop();
|
||||
void SetProtectionMode(VCMVideoProtection protection_mode);
|
||||
void Clear();
|
||||
absl::optional<int64_t> InsertFrame(std::unique_ptr<EncodedFrame> frame);
|
||||
void UpdateRtt(int64_t max_rtt_ms);
|
||||
void SetMaxWaits(TimeDelta max_wait_for_keyframe,
|
||||
TimeDelta max_wait_for_frame);
|
||||
void StartNextDecode(bool keyframe_required);
|
||||
int Size();
|
||||
|
||||
private:
|
||||
void OnFrameReady(
|
||||
absl::InlinedVector<std::unique_ptr<EncodedFrame>, 4> frames,
|
||||
Timestamp render_time);
|
||||
void OnTimeout(TimeDelta delay);
|
||||
void FrameReadyForDecode(uint32_t rtp_timestamp, Timestamp render_time);
|
||||
void UpdateDroppedFrames() RTC_RUN_ON(&worker_sequence_checker_);
|
||||
void UpdateFrameBufferTimings(Timestamp min_receive_time, Timestamp now);
|
||||
void UpdateTimingFrameInfo();
|
||||
bool IsTooManyFramesQueued() const RTC_RUN_ON(&worker_sequence_checker_);
|
||||
void ForceKeyFrameReleaseImmediately() RTC_RUN_ON(&worker_sequence_checker_);
|
||||
void MaybeScheduleFrameForRelease() RTC_RUN_ON(&worker_sequence_checker_);
|
||||
|
||||
RTC_NO_UNIQUE_ADDRESS SequenceChecker worker_sequence_checker_;
|
||||
const FieldTrialsView& field_trials_;
|
||||
const absl::optional<RttMultExperiment::Settings> rtt_mult_settings_ =
|
||||
RttMultExperiment::GetRttMultValue();
|
||||
Clock* const clock_;
|
||||
VideoStreamBufferControllerStatsObserver* const stats_proxy_;
|
||||
FrameSchedulingReceiver* const receiver_;
|
||||
VCMTiming* const timing_;
|
||||
const std::unique_ptr<FrameDecodeScheduler> frame_decode_scheduler_
|
||||
RTC_GUARDED_BY(&worker_sequence_checker_);
|
||||
|
||||
JitterEstimator jitter_estimator_ RTC_GUARDED_BY(&worker_sequence_checker_);
|
||||
InterFrameDelayVariationCalculator ifdv_calculator_
|
||||
RTC_GUARDED_BY(&worker_sequence_checker_);
|
||||
bool keyframe_required_ RTC_GUARDED_BY(&worker_sequence_checker_) = false;
|
||||
std::unique_ptr<FrameBuffer> buffer_
|
||||
RTC_GUARDED_BY(&worker_sequence_checker_);
|
||||
FrameDecodeTiming decode_timing_ RTC_GUARDED_BY(&worker_sequence_checker_);
|
||||
VideoReceiveStreamTimeoutTracker timeout_tracker_
|
||||
RTC_GUARDED_BY(&worker_sequence_checker_);
|
||||
int frames_dropped_before_last_new_frame_
|
||||
RTC_GUARDED_BY(&worker_sequence_checker_) = 0;
|
||||
VCMVideoProtection protection_mode_
|
||||
RTC_GUARDED_BY(&worker_sequence_checker_) = kProtectionNack;
|
||||
|
||||
// This flag guards frames from queuing in front of the decoder. Without this
|
||||
// guard, encoded frames will not wait for the decoder to finish decoding a
|
||||
// frame and just queue up, meaning frames will not be dropped or
|
||||
// fast-forwarded when the decoder is slow or hangs.
|
||||
bool decoder_ready_for_new_frame_ RTC_GUARDED_BY(&worker_sequence_checker_) =
|
||||
false;
|
||||
|
||||
// Maximum number of frames in the decode queue to allow pacing. If the
|
||||
// queue grows beyond the max limit, pacing will be disabled and frames will
|
||||
// be pushed to the decoder as soon as possible. This only has an effect
|
||||
// when the low-latency rendering path is active, which is indicated by
|
||||
// the frame's render time == 0.
|
||||
FieldTrialParameter<unsigned> zero_playout_delay_max_decode_queue_size_;
|
||||
|
||||
ScopedTaskSafety worker_safety_;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // VIDEO_VIDEO_STREAM_BUFFER_CONTROLLER_H_
|
||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue