Repo created

This commit is contained in:
Fr4nz D13trich 2025-11-22 14:04:28 +01:00
parent 81b91f4139
commit f8c34fa5ee
22732 changed files with 4815320 additions and 2 deletions

View file

@ -0,0 +1,116 @@
# Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
import("//third_party/libaom/options.gni")
import("../../../../webrtc.gni")
rtc_library("av1_svc_config") {
sources = [
"av1_svc_config.cc",
"av1_svc_config.h",
]
deps = [
"../../../../api/video_codecs:video_codecs_api",
"../../../../rtc_base:checks",
"../../../../rtc_base:logging",
"../../../../rtc_base:stringutils",
"../../svc:scalability_mode_util",
"../../svc:scalability_structures",
"../../svc:scalable_video_controller",
]
absl_deps = [ "//third_party/abseil-cpp/absl/container:inlined_vector" ]
}
rtc_library("dav1d_decoder") {
visibility = [ "*" ]
poisonous = [ "software_video_codecs" ]
public = [ "dav1d_decoder.h" ]
sources = [ "dav1d_decoder.cc" ]
deps = [
"../..:video_codec_interface",
"../../../../api:scoped_refptr",
"../../../../api/video:encoded_image",
"../../../../api/video:video_frame",
"../../../../api/video_codecs:video_codecs_api",
"../../../../common_video",
"../../../../rtc_base:logging",
"//third_party/dav1d",
"//third_party/libyuv",
]
absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
}
rtc_library("libaom_av1_encoder") {
visibility = [ "*" ]
poisonous = [ "software_video_codecs" ]
public = [ "libaom_av1_encoder.h" ]
sources = [ "libaom_av1_encoder.cc" ]
deps = [
"../..:video_codec_interface",
"../../../../api:field_trials_view",
"../../../../api:scoped_refptr",
"../../../../api/transport:field_trial_based_config",
"../../../../api/video:encoded_image",
"../../../../api/video:video_frame",
"../../../../api/video_codecs:scalability_mode",
"../../../../api/video_codecs:video_codecs_api",
"../../../../common_video",
"../../../../rtc_base:checks",
"../../../../rtc_base:logging",
"../../../../rtc_base:rtc_numerics",
"../../../../rtc_base/experiments:encoder_info_settings",
"../../svc:scalability_structures",
"../../svc:scalable_video_controller",
"//third_party/libaom",
]
absl_deps = [
"//third_party/abseil-cpp/absl/algorithm:container",
"//third_party/abseil-cpp/absl/base:core_headers",
"//third_party/abseil-cpp/absl/strings:strings",
"//third_party/abseil-cpp/absl/types:optional",
]
}
if (rtc_include_tests) {
rtc_library("video_coding_codecs_av1_tests") {
testonly = true
sources = [ "av1_svc_config_unittest.cc" ]
deps = [
":av1_svc_config",
"../../../../api/video_codecs:video_codecs_api",
"../../../../test:test_support",
]
if (enable_libaom) {
sources += [
"libaom_av1_encoder_unittest.cc",
"libaom_av1_unittest.cc",
]
deps += [
":dav1d_decoder",
":libaom_av1_encoder",
"../..:encoded_video_frame_producer",
"../..:video_codec_interface",
"../../../../api:create_frame_generator",
"../../../../api:frame_generator_api",
"../../../../api:mock_video_encoder",
"../../../../api/units:data_size",
"../../../../api/units:time_delta",
"../../../../api/video:video_frame",
"../../../../test:field_trial",
"../../svc:scalability_mode_util",
"../../svc:scalability_structures",
"../../svc:scalable_video_controller",
]
absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
}
}
}

View file

@ -0,0 +1,4 @@
include_rules = [
"+third_party/libaom",
"+third_party/dav1d",
]

View file

@ -0,0 +1,145 @@
/*
* Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/video_coding/codecs/av1/av1_svc_config.h"
#include <algorithm>
#include <cmath>
#include <memory>
#include "modules/video_coding/svc/create_scalability_structure.h"
#include "modules/video_coding/svc/scalability_mode_util.h"
#include "modules/video_coding/svc/scalable_video_controller.h"
#include "rtc_base/checks.h"
#include "rtc_base/logging.h"
#include "rtc_base/strings/string_builder.h"
namespace webrtc {
namespace {
const int kMinAv1SpatialLayerLongSideLength = 240;
const int kMinAv1SpatialLayerShortSideLength = 135;
int GetLimitedNumSpatialLayers(int width, int height) {
const bool is_landscape = width >= height;
const int min_width = is_landscape ? kMinAv1SpatialLayerLongSideLength
: kMinAv1SpatialLayerShortSideLength;
const int min_height = is_landscape ? kMinAv1SpatialLayerShortSideLength
: kMinAv1SpatialLayerLongSideLength;
const int num_layers_fit_horz = static_cast<int>(
std::floor(1 + std::max(0.0f, std::log2(1.0f * width / min_width))));
const int num_layers_fit_vert = static_cast<int>(
std::floor(1 + std::max(0.0f, std::log2(1.0f * height / min_height))));
return std::min(num_layers_fit_horz, num_layers_fit_vert);
}
absl::optional<ScalabilityMode> BuildScalabilityMode(int num_temporal_layers,
int num_spatial_layers) {
char name[20];
rtc::SimpleStringBuilder ss(name);
ss << "L" << num_spatial_layers << "T" << num_temporal_layers;
if (num_spatial_layers > 1) {
ss << "_KEY";
}
return ScalabilityModeFromString(name);
}
} // namespace
absl::InlinedVector<ScalabilityMode, kScalabilityModeCount>
LibaomAv1EncoderSupportedScalabilityModes() {
absl::InlinedVector<ScalabilityMode, kScalabilityModeCount> scalability_modes;
for (ScalabilityMode scalability_mode : kAllScalabilityModes) {
if (ScalabilityStructureConfig(scalability_mode) != absl::nullopt) {
scalability_modes.push_back(scalability_mode);
}
}
return scalability_modes;
}
bool LibaomAv1EncoderSupportsScalabilityMode(ScalabilityMode scalability_mode) {
// For libaom AV1, the scalability mode is supported if we can create the
// scalability structure.
return ScalabilityStructureConfig(scalability_mode) != absl::nullopt;
}
bool SetAv1SvcConfig(VideoCodec& video_codec,
int num_temporal_layers,
int num_spatial_layers) {
RTC_DCHECK_EQ(video_codec.codecType, kVideoCodecAV1);
absl::optional<ScalabilityMode> scalability_mode =
video_codec.GetScalabilityMode();
if (!scalability_mode.has_value()) {
scalability_mode =
BuildScalabilityMode(num_temporal_layers, num_spatial_layers);
if (!scalability_mode) {
RTC_LOG(LS_WARNING) << "Scalability mode is not set, using 'L1T1'.";
scalability_mode = ScalabilityMode::kL1T1;
}
}
bool requested_single_spatial_layer =
ScalabilityModeToNumSpatialLayers(*scalability_mode) == 1;
if (ScalabilityMode reduced = LimitNumSpatialLayers(
*scalability_mode,
GetLimitedNumSpatialLayers(video_codec.width, video_codec.height));
*scalability_mode != reduced) {
RTC_LOG(LS_WARNING) << "Reduced number of spatial layers from "
<< ScalabilityModeToString(*scalability_mode) << " to "
<< ScalabilityModeToString(reduced);
scalability_mode = reduced;
}
std::unique_ptr<ScalableVideoController> structure =
CreateScalabilityStructure(*scalability_mode);
if (structure == nullptr) {
RTC_LOG(LS_WARNING) << "Failed to create structure "
<< static_cast<int>(*scalability_mode);
return false;
}
video_codec.SetScalabilityMode(*scalability_mode);
ScalableVideoController::StreamLayersConfig info = structure->StreamConfig();
for (int sl_idx = 0; sl_idx < info.num_spatial_layers; ++sl_idx) {
SpatialLayer& spatial_layer = video_codec.spatialLayers[sl_idx];
spatial_layer.width = video_codec.width * info.scaling_factor_num[sl_idx] /
info.scaling_factor_den[sl_idx];
spatial_layer.height = video_codec.height *
info.scaling_factor_num[sl_idx] /
info.scaling_factor_den[sl_idx];
spatial_layer.maxFramerate = video_codec.maxFramerate;
spatial_layer.numberOfTemporalLayers = info.num_temporal_layers;
spatial_layer.active = true;
}
if (requested_single_spatial_layer) {
SpatialLayer& spatial_layer = video_codec.spatialLayers[0];
spatial_layer.minBitrate = video_codec.minBitrate;
spatial_layer.maxBitrate = video_codec.maxBitrate;
spatial_layer.targetBitrate =
(video_codec.minBitrate + video_codec.maxBitrate) / 2;
return true;
}
for (int sl_idx = 0; sl_idx < info.num_spatial_layers; ++sl_idx) {
SpatialLayer& spatial_layer = video_codec.spatialLayers[sl_idx];
const int num_pixels = spatial_layer.width * spatial_layer.height;
int min_bitrate_kbps = (480.0 * std::sqrt(num_pixels) - 95'000.0) / 1000.0;
spatial_layer.minBitrate = std::max(min_bitrate_kbps, 20);
spatial_layer.maxBitrate = 50 + static_cast<int>(1.6 * num_pixels / 1000.0);
spatial_layer.targetBitrate =
(spatial_layer.minBitrate + spatial_layer.maxBitrate) / 2;
}
return true;
}
} // namespace webrtc

View file

@ -0,0 +1,32 @@
/* Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_VIDEO_CODING_CODECS_AV1_AV1_SVC_CONFIG_H_
#define MODULES_VIDEO_CODING_CODECS_AV1_AV1_SVC_CONFIG_H_
#include <vector>
#include "absl/container/inlined_vector.h"
#include "api/video_codecs/video_codec.h"
namespace webrtc {
absl::InlinedVector<ScalabilityMode, kScalabilityModeCount>
LibaomAv1EncoderSupportedScalabilityModes();
bool LibaomAv1EncoderSupportsScalabilityMode(ScalabilityMode scalability_mode);
// Fills `video_codec.spatialLayers` using other members.
bool SetAv1SvcConfig(VideoCodec& video_codec,
int num_temporal_layers,
int num_spatial_layers);
} // namespace webrtc
#endif // MODULES_VIDEO_CODING_CODECS_AV1_AV1_SVC_CONFIG_H_

View file

@ -0,0 +1,174 @@
/*
* Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/video_coding/codecs/av1/av1_svc_config.h"
#include "api/video_codecs/video_codec.h"
#include "test/gmock.h"
#include "test/gtest.h"
namespace webrtc {
namespace {
constexpr int kDontCare = 0;
VideoCodec GetDefaultVideoCodec() {
VideoCodec video_codec;
video_codec.codecType = kVideoCodecAV1;
video_codec.width = 1280;
video_codec.height = 720;
return video_codec;
}
TEST(Av1SvcConfigTest, TreatsEmptyAsL1T1) {
VideoCodec video_codec = GetDefaultVideoCodec();
EXPECT_TRUE(SetAv1SvcConfig(video_codec, /*num_temporal_layers=*/kDontCare,
/*num_spatial_layers=*/kDontCare));
EXPECT_TRUE(video_codec.spatialLayers[0].active);
EXPECT_EQ(video_codec.spatialLayers[0].numberOfTemporalLayers, 1);
EXPECT_FALSE(video_codec.spatialLayers[1].active);
}
TEST(Av1SvcConfigTest, ScalabilityModeFromNumberOfTemporalLayers) {
VideoCodec video_codec = GetDefaultVideoCodec();
EXPECT_TRUE(SetAv1SvcConfig(video_codec, /*num_temporal_layers=*/3,
/*num_spatial_layers=*/1));
EXPECT_EQ(video_codec.spatialLayers[0].numberOfTemporalLayers, 3);
}
TEST(Av1SvcConfigTest, ScalabilityModeFromNumberOfSpatialLayers) {
VideoCodec video_codec = GetDefaultVideoCodec();
EXPECT_TRUE(SetAv1SvcConfig(video_codec, /*num_temporal_layers=*/3,
/*num_spatial_layers=*/2));
EXPECT_EQ(video_codec.spatialLayers[0].numberOfTemporalLayers, 3);
EXPECT_TRUE(video_codec.spatialLayers[0].active);
EXPECT_TRUE(video_codec.spatialLayers[1].active);
EXPECT_FALSE(video_codec.spatialLayers[2].active);
}
TEST(Av1SvcConfigTest, SetsActiveSpatialLayersFromScalabilityMode) {
VideoCodec video_codec = GetDefaultVideoCodec();
video_codec.SetScalabilityMode(ScalabilityMode::kL2T1);
EXPECT_TRUE(SetAv1SvcConfig(video_codec, /*num_temporal_layers=*/kDontCare,
/*num_spatial_layers=*/kDontCare));
EXPECT_TRUE(video_codec.spatialLayers[0].active);
EXPECT_TRUE(video_codec.spatialLayers[1].active);
EXPECT_FALSE(video_codec.spatialLayers[2].active);
}
TEST(Av1SvcConfigTest, ConfiguresDobuleResolutionRatioFromScalabilityMode) {
VideoCodec video_codec;
video_codec.codecType = kVideoCodecAV1;
video_codec.SetScalabilityMode(ScalabilityMode::kL2T1);
video_codec.width = 1200;
video_codec.height = 800;
EXPECT_TRUE(SetAv1SvcConfig(video_codec, /*num_temporal_layers=*/kDontCare,
/*num_spatial_layers=*/kDontCare));
EXPECT_EQ(video_codec.spatialLayers[0].width, 600);
EXPECT_EQ(video_codec.spatialLayers[0].height, 400);
EXPECT_EQ(video_codec.spatialLayers[1].width, 1200);
EXPECT_EQ(video_codec.spatialLayers[1].height, 800);
}
TEST(Av1SvcConfigTest, ConfiguresSmallResolutionRatioFromScalabilityMode) {
VideoCodec video_codec;
video_codec.codecType = kVideoCodecAV1;
// h mode uses 1.5:1 ratio
video_codec.SetScalabilityMode(ScalabilityMode::kL2T1h);
video_codec.width = 1500;
video_codec.height = 900;
EXPECT_TRUE(SetAv1SvcConfig(video_codec, /*num_temporal_layers=*/kDontCare,
/*num_spatial_layers=*/kDontCare));
EXPECT_EQ(video_codec.spatialLayers[0].width, 1000);
EXPECT_EQ(video_codec.spatialLayers[0].height, 600);
EXPECT_EQ(video_codec.spatialLayers[1].width, 1500);
EXPECT_EQ(video_codec.spatialLayers[1].height, 900);
}
TEST(Av1SvcConfigTest, CopiesFramrate) {
VideoCodec video_codec = GetDefaultVideoCodec();
video_codec.SetScalabilityMode(ScalabilityMode::kL2T1);
video_codec.maxFramerate = 27;
EXPECT_TRUE(SetAv1SvcConfig(video_codec, /*num_temporal_layers=*/kDontCare,
/*num_spatial_layers=*/kDontCare));
EXPECT_EQ(video_codec.spatialLayers[0].maxFramerate, 27);
EXPECT_EQ(video_codec.spatialLayers[1].maxFramerate, 27);
}
TEST(Av1SvcConfigTest, SetsNumberOfTemporalLayers) {
VideoCodec video_codec = GetDefaultVideoCodec();
video_codec.SetScalabilityMode(ScalabilityMode::kL1T3);
EXPECT_TRUE(SetAv1SvcConfig(video_codec, /*num_temporal_layers=*/kDontCare,
/*num_spatial_layers=*/kDontCare));
EXPECT_EQ(video_codec.spatialLayers[0].numberOfTemporalLayers, 3);
}
TEST(Av1SvcConfigTest, CopiesMinMaxBitrateForSingleSpatialLayer) {
VideoCodec video_codec;
video_codec.codecType = kVideoCodecAV1;
video_codec.SetScalabilityMode(ScalabilityMode::kL1T3);
video_codec.minBitrate = 100;
video_codec.maxBitrate = 500;
EXPECT_TRUE(SetAv1SvcConfig(video_codec, /*num_temporal_layers=*/kDontCare,
/*num_spatial_layers=*/kDontCare));
EXPECT_EQ(video_codec.spatialLayers[0].minBitrate, 100u);
EXPECT_EQ(video_codec.spatialLayers[0].maxBitrate, 500u);
EXPECT_LE(video_codec.spatialLayers[0].minBitrate,
video_codec.spatialLayers[0].targetBitrate);
EXPECT_LE(video_codec.spatialLayers[0].targetBitrate,
video_codec.spatialLayers[0].maxBitrate);
}
TEST(Av1SvcConfigTest, SetsBitratesForMultipleSpatialLayers) {
VideoCodec video_codec;
video_codec.codecType = kVideoCodecAV1;
video_codec.width = 640;
video_codec.height = 360;
video_codec.SetScalabilityMode(ScalabilityMode::kL2T2);
EXPECT_TRUE(SetAv1SvcConfig(video_codec, /*num_temporal_layers=*/kDontCare,
/*num_spatial_layers=*/kDontCare));
EXPECT_EQ(video_codec.spatialLayers[0].minBitrate, 20u);
EXPECT_EQ(video_codec.spatialLayers[0].maxBitrate, 142u);
EXPECT_EQ(video_codec.spatialLayers[1].minBitrate, 135u);
EXPECT_EQ(video_codec.spatialLayers[1].maxBitrate, 418u);
}
TEST(Av1SvcConfigTest, ReduceSpatialLayersOnInsufficentInputResolution) {
VideoCodec video_codec = GetDefaultVideoCodec();
video_codec.width = 640;
video_codec.height = 360;
video_codec.SetScalabilityMode(ScalabilityMode::kL3T3);
EXPECT_TRUE(SetAv1SvcConfig(video_codec, /*num_temporal_layers=*/kDontCare,
/*num_spatial_layers=*/kDontCare));
EXPECT_EQ(*video_codec.GetScalabilityMode(), ScalabilityMode::kL2T3);
}
} // namespace
} // namespace webrtc

View file

@ -0,0 +1,206 @@
/*
* Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/video_coding/codecs/av1/dav1d_decoder.h"
#include <algorithm>
#include "api/scoped_refptr.h"
#include "api/video/encoded_image.h"
#include "api/video/video_frame_buffer.h"
#include "common_video/include/video_frame_buffer.h"
#include "modules/video_coding/include/video_error_codes.h"
#include "rtc_base/logging.h"
#include "third_party/dav1d/libdav1d/include/dav1d/dav1d.h"
#include "third_party/libyuv/include/libyuv/convert.h"
#include "third_party/libyuv/include/libyuv/planar_functions.h"
namespace webrtc {
namespace {
class Dav1dDecoder : public VideoDecoder {
public:
Dav1dDecoder();
Dav1dDecoder(const Dav1dDecoder&) = delete;
Dav1dDecoder& operator=(const Dav1dDecoder&) = delete;
~Dav1dDecoder() override;
bool Configure(const Settings& settings) override;
int32_t Decode(const EncodedImage& encoded_image,
int64_t render_time_ms) override;
int32_t RegisterDecodeCompleteCallback(
DecodedImageCallback* callback) override;
int32_t Release() override;
DecoderInfo GetDecoderInfo() const override;
const char* ImplementationName() const override;
private:
Dav1dContext* context_ = nullptr;
DecodedImageCallback* decode_complete_callback_ = nullptr;
};
class ScopedDav1dData {
public:
~ScopedDav1dData() { dav1d_data_unref(&data_); }
Dav1dData& Data() { return data_; }
private:
Dav1dData data_ = {};
};
class ScopedDav1dPicture
: public rtc::RefCountedNonVirtual<ScopedDav1dPicture> {
public:
~ScopedDav1dPicture() { dav1d_picture_unref(&picture_); }
Dav1dPicture& Picture() { return picture_; }
using rtc::RefCountedNonVirtual<ScopedDav1dPicture>::HasOneRef;
private:
Dav1dPicture picture_ = {};
};
constexpr char kDav1dName[] = "dav1d";
// Calling `dav1d_data_wrap` requires a `free_callback` to be registered.
void NullFreeCallback(const uint8_t* buffer, void* opaque) {}
Dav1dDecoder::Dav1dDecoder() = default;
Dav1dDecoder::~Dav1dDecoder() {
Release();
}
bool Dav1dDecoder::Configure(const Settings& settings) {
Dav1dSettings s;
dav1d_default_settings(&s);
s.n_threads = std::max(2, settings.number_of_cores());
s.max_frame_delay = 1; // For low latency decoding.
s.all_layers = 0; // Don't output a frame for every spatial layer.
// Limit max frame size to avoid OOM'ing fuzzers. crbug.com/325284120.
s.frame_size_limit = 16384 * 16384;
s.operating_point = 31; // Decode all operating points.
return dav1d_open(&context_, &s) == 0;
}
int32_t Dav1dDecoder::RegisterDecodeCompleteCallback(
DecodedImageCallback* decode_complete_callback) {
decode_complete_callback_ = decode_complete_callback;
return WEBRTC_VIDEO_CODEC_OK;
}
int32_t Dav1dDecoder::Release() {
dav1d_close(&context_);
if (context_ != nullptr) {
return WEBRTC_VIDEO_CODEC_MEMORY;
}
return WEBRTC_VIDEO_CODEC_OK;
}
VideoDecoder::DecoderInfo Dav1dDecoder::GetDecoderInfo() const {
DecoderInfo info;
info.implementation_name = kDav1dName;
info.is_hardware_accelerated = false;
return info;
}
const char* Dav1dDecoder::ImplementationName() const {
return kDav1dName;
}
int32_t Dav1dDecoder::Decode(const EncodedImage& encoded_image,
int64_t /*render_time_ms*/) {
if (!context_ || decode_complete_callback_ == nullptr) {
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
ScopedDav1dData scoped_dav1d_data;
Dav1dData& dav1d_data = scoped_dav1d_data.Data();
dav1d_data_wrap(&dav1d_data, encoded_image.data(), encoded_image.size(),
/*free_callback=*/&NullFreeCallback,
/*user_data=*/nullptr);
if (int decode_res = dav1d_send_data(context_, &dav1d_data)) {
RTC_LOG(LS_WARNING)
<< "Dav1dDecoder::Decode decoding failed with error code "
<< decode_res;
return WEBRTC_VIDEO_CODEC_ERROR;
}
rtc::scoped_refptr<ScopedDav1dPicture> scoped_dav1d_picture(
new ScopedDav1dPicture{});
Dav1dPicture& dav1d_picture = scoped_dav1d_picture->Picture();
if (int get_picture_res = dav1d_get_picture(context_, &dav1d_picture)) {
RTC_LOG(LS_WARNING)
<< "Dav1dDecoder::Decode getting picture failed with error code "
<< get_picture_res;
return WEBRTC_VIDEO_CODEC_ERROR;
}
if (dav1d_picture.p.bpc != 8) {
// Only accept 8 bit depth.
RTC_LOG(LS_ERROR) << "Dav1dDecoder::Decode unhandled bit depth: "
<< dav1d_picture.p.bpc;
return WEBRTC_VIDEO_CODEC_ERROR;
}
rtc::scoped_refptr<VideoFrameBuffer> wrapped_buffer;
if (dav1d_picture.p.layout == DAV1D_PIXEL_LAYOUT_I420) {
wrapped_buffer = WrapI420Buffer(
dav1d_picture.p.w, dav1d_picture.p.h,
static_cast<uint8_t*>(dav1d_picture.data[0]), dav1d_picture.stride[0],
static_cast<uint8_t*>(dav1d_picture.data[1]), dav1d_picture.stride[1],
static_cast<uint8_t*>(dav1d_picture.data[2]), dav1d_picture.stride[1],
// To keep |scoped_dav1d_picture.Picture()| alive
[scoped_dav1d_picture] {});
} else if (dav1d_picture.p.layout == DAV1D_PIXEL_LAYOUT_I444) {
wrapped_buffer = WrapI444Buffer(
dav1d_picture.p.w, dav1d_picture.p.h,
static_cast<uint8_t*>(dav1d_picture.data[0]), dav1d_picture.stride[0],
static_cast<uint8_t*>(dav1d_picture.data[1]), dav1d_picture.stride[1],
static_cast<uint8_t*>(dav1d_picture.data[2]), dav1d_picture.stride[1],
// To keep |scoped_dav1d_picture.Picture()| alive
[scoped_dav1d_picture] {});
} else {
// Only accept I420 or I444 pixel format.
RTC_LOG(LS_ERROR) << "Dav1dDecoder::Decode unhandled pixel layout: "
<< dav1d_picture.p.layout;
return WEBRTC_VIDEO_CODEC_ERROR;
}
if (!wrapped_buffer.get()) {
return WEBRTC_VIDEO_CODEC_ERROR;
}
VideoFrame decoded_frame =
VideoFrame::Builder()
.set_video_frame_buffer(wrapped_buffer)
.set_timestamp_rtp(encoded_image.RtpTimestamp())
.set_ntp_time_ms(encoded_image.ntp_time_ms_)
.set_color_space(encoded_image.ColorSpace())
.build();
decode_complete_callback_->Decoded(decoded_frame, absl::nullopt,
absl::nullopt);
return WEBRTC_VIDEO_CODEC_OK;
}
} // namespace
std::unique_ptr<VideoDecoder> CreateDav1dDecoder() {
return std::make_unique<Dav1dDecoder>();
}
} // namespace webrtc

View file

@ -0,0 +1,23 @@
/*
* Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_VIDEO_CODING_CODECS_AV1_DAV1D_DECODER_H_
#define MODULES_VIDEO_CODING_CODECS_AV1_DAV1D_DECODER_H_
#include <memory>
#include "api/video_codecs/video_decoder.h"
namespace webrtc {
std::unique_ptr<VideoDecoder> CreateDav1dDecoder();
} // namespace webrtc
#endif // MODULES_VIDEO_CODING_CODECS_AV1_DAV1D_DECODER_H_

View file

@ -0,0 +1,882 @@
/*
* Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/video_coding/codecs/av1/libaom_av1_encoder.h"
#include <stddef.h>
#include <stdint.h>
#include <memory>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/macros.h"
#include "absl/strings/match.h"
#include "absl/types/optional.h"
#include "api/field_trials_view.h"
#include "api/scoped_refptr.h"
#include "api/transport/field_trial_based_config.h"
#include "api/video/encoded_image.h"
#include "api/video/i420_buffer.h"
#include "api/video/video_frame.h"
#include "api/video_codecs/scalability_mode.h"
#include "api/video_codecs/video_codec.h"
#include "api/video_codecs/video_encoder.h"
#include "modules/video_coding/include/video_codec_interface.h"
#include "modules/video_coding/include/video_error_codes.h"
#include "modules/video_coding/svc/create_scalability_structure.h"
#include "modules/video_coding/svc/scalable_video_controller.h"
#include "modules/video_coding/svc/scalable_video_controller_no_layering.h"
#include "rtc_base/checks.h"
#include "rtc_base/experiments/encoder_info_settings.h"
#include "rtc_base/logging.h"
#include "third_party/libaom/source/libaom/aom/aom_codec.h"
#include "third_party/libaom/source/libaom/aom/aom_encoder.h"
#include "third_party/libaom/source/libaom/aom/aomcx.h"
#if (defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64)) && \
(defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS))
#define MOBILE_ARM
#endif
#define SET_ENCODER_PARAM_OR_RETURN_ERROR(param_id, param_value) \
do { \
if (!SetEncoderControlParameters(param_id, param_value)) { \
return WEBRTC_VIDEO_CODEC_ERROR; \
} \
} while (0)
namespace webrtc {
namespace {
// Encoder configuration parameters
constexpr int kQpMin = 10;
constexpr int kUsageProfile = AOM_USAGE_REALTIME;
constexpr int kMinQindex = 145; // Min qindex threshold for QP scaling.
constexpr int kMaxQindex = 205; // Max qindex threshold for QP scaling.
constexpr int kBitDepth = 8;
constexpr int kLagInFrames = 0; // No look ahead.
constexpr int kRtpTicksPerSecond = 90000;
constexpr double kMinimumFrameRate = 1.0;
aom_superblock_size_t GetSuperblockSize(int width, int height, int threads) {
int resolution = width * height;
if (threads >= 4 && resolution >= 960 * 540 && resolution < 1920 * 1080)
return AOM_SUPERBLOCK_SIZE_64X64;
else
return AOM_SUPERBLOCK_SIZE_DYNAMIC;
}
class LibaomAv1Encoder final : public VideoEncoder {
public:
LibaomAv1Encoder(const absl::optional<LibaomAv1EncoderAuxConfig>& aux_config,
const FieldTrialsView& trials);
~LibaomAv1Encoder();
int InitEncode(const VideoCodec* codec_settings,
const Settings& settings) override;
int32_t RegisterEncodeCompleteCallback(
EncodedImageCallback* encoded_image_callback) override;
int32_t Release() override;
int32_t Encode(const VideoFrame& frame,
const std::vector<VideoFrameType>* frame_types) override;
void SetRates(const RateControlParameters& parameters) override;
EncoderInfo GetEncoderInfo() const override;
private:
template <typename P>
bool SetEncoderControlParameters(int param_id, P param_value);
// Get value to be used for encoder cpu_speed setting
int GetCpuSpeed(int width, int height);
// Determine number of encoder threads to use.
int NumberOfThreads(int width, int height, int number_of_cores);
bool SvcEnabled() const { return svc_params_.has_value(); }
// Fills svc_params_ memeber value. Returns false on error.
bool SetSvcParams(ScalableVideoController::StreamLayersConfig svc_config);
// Configures the encoder with layer for the next frame.
void SetSvcLayerId(
const ScalableVideoController::LayerFrameConfig& layer_frame);
// Configures the encoder which buffers next frame updates and can reference.
void SetSvcRefFrameConfig(
const ScalableVideoController::LayerFrameConfig& layer_frame);
// If pixel format doesn't match, then reallocate.
void MaybeRewrapImgWithFormat(const aom_img_fmt_t fmt);
std::unique_ptr<ScalableVideoController> svc_controller_;
absl::optional<ScalabilityMode> scalability_mode_;
bool inited_;
bool rates_configured_;
absl::optional<aom_svc_params_t> svc_params_;
VideoCodec encoder_settings_;
absl::optional<LibaomAv1EncoderAuxConfig> aux_config_;
aom_image_t* frame_for_encode_;
aom_codec_ctx_t ctx_;
aom_codec_enc_cfg_t cfg_;
EncodedImageCallback* encoded_image_callback_;
int64_t timestamp_;
const LibaomAv1EncoderInfoSettings encoder_info_override_;
// TODO(webrtc:15225): Kill switch for disabling frame dropping. Remove it
// after frame dropping is fully rolled out.
bool disable_frame_dropping_;
int max_consec_frame_drop_;
};
int32_t VerifyCodecSettings(const VideoCodec& codec_settings) {
if (codec_settings.width < 1) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
if (codec_settings.height < 1) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
// maxBitrate == 0 represents an unspecified maxBitRate.
if (codec_settings.maxBitrate > 0 &&
codec_settings.minBitrate > codec_settings.maxBitrate) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
if (codec_settings.maxBitrate > 0 &&
codec_settings.startBitrate > codec_settings.maxBitrate) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
if (codec_settings.startBitrate < codec_settings.minBitrate) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
if (codec_settings.maxFramerate < 1) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
if (codec_settings.qpMax < kQpMin || codec_settings.qpMax > 63) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
return WEBRTC_VIDEO_CODEC_OK;
}
int GetMaxConsecutiveFrameDrop(const FieldTrialsView& field_trials) {
webrtc::FieldTrialParameter<int> maxdrop("maxdrop", 0);
webrtc::ParseFieldTrial(
{&maxdrop},
field_trials.Lookup("WebRTC-LibaomAv1Encoder-MaxConsecFrameDrop"));
return maxdrop;
}
LibaomAv1Encoder::LibaomAv1Encoder(
const absl::optional<LibaomAv1EncoderAuxConfig>& aux_config,
const FieldTrialsView& trials)
: inited_(false),
rates_configured_(false),
aux_config_(aux_config),
frame_for_encode_(nullptr),
encoded_image_callback_(nullptr),
timestamp_(0),
disable_frame_dropping_(absl::StartsWith(
trials.Lookup("WebRTC-LibaomAv1Encoder-DisableFrameDropping"),
"Enabled")),
max_consec_frame_drop_(GetMaxConsecutiveFrameDrop(trials)) {}
LibaomAv1Encoder::~LibaomAv1Encoder() {
Release();
}
int LibaomAv1Encoder::InitEncode(const VideoCodec* codec_settings,
const Settings& settings) {
if (codec_settings == nullptr) {
RTC_LOG(LS_WARNING) << "No codec settings provided to "
"LibaomAv1Encoder.";
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
if (settings.number_of_cores < 1) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
if (inited_) {
RTC_LOG(LS_WARNING) << "Initing LibaomAv1Encoder without first releasing.";
Release();
}
encoder_settings_ = *codec_settings;
// Sanity checks for encoder configuration.
const int32_t result = VerifyCodecSettings(encoder_settings_);
if (result < 0) {
RTC_LOG(LS_WARNING) << "Incorrect codec settings provided to "
"LibaomAv1Encoder.";
return result;
}
if (encoder_settings_.numberOfSimulcastStreams > 1) {
RTC_LOG(LS_WARNING) << "Simulcast is not implemented by LibaomAv1Encoder.";
return result;
}
scalability_mode_ = encoder_settings_.GetScalabilityMode();
if (!scalability_mode_.has_value()) {
RTC_LOG(LS_WARNING) << "Scalability mode is not set, using 'L1T1'.";
scalability_mode_ = ScalabilityMode::kL1T1;
}
svc_controller_ = CreateScalabilityStructure(*scalability_mode_);
if (svc_controller_ == nullptr) {
RTC_LOG(LS_WARNING) << "Failed to set scalability mode "
<< static_cast<int>(*scalability_mode_);
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
if (!SetSvcParams(svc_controller_->StreamConfig())) {
return WEBRTC_VIDEO_CODEC_ERROR;
}
// Initialize encoder configuration structure with default values
aom_codec_err_t ret =
aom_codec_enc_config_default(aom_codec_av1_cx(), &cfg_, kUsageProfile);
if (ret != AOM_CODEC_OK) {
RTC_LOG(LS_WARNING) << "LibaomAv1Encoder::EncodeInit returned " << ret
<< " on aom_codec_enc_config_default.";
return WEBRTC_VIDEO_CODEC_ERROR;
}
// Overwrite default config with input encoder settings & RTC-relevant values.
cfg_.g_w = encoder_settings_.width;
cfg_.g_h = encoder_settings_.height;
cfg_.g_threads =
NumberOfThreads(cfg_.g_w, cfg_.g_h, settings.number_of_cores);
cfg_.g_timebase.num = 1;
cfg_.g_timebase.den = kRtpTicksPerSecond;
cfg_.rc_target_bitrate = encoder_settings_.startBitrate; // kilobits/sec.
cfg_.rc_dropframe_thresh =
(!disable_frame_dropping_ && encoder_settings_.GetFrameDropEnabled()) ? 30
: 0;
cfg_.g_input_bit_depth = kBitDepth;
cfg_.kf_mode = AOM_KF_DISABLED;
cfg_.rc_min_quantizer = kQpMin;
cfg_.rc_max_quantizer = encoder_settings_.qpMax;
cfg_.rc_undershoot_pct = 50;
cfg_.rc_overshoot_pct = 50;
cfg_.rc_buf_initial_sz = 600;
cfg_.rc_buf_optimal_sz = 600;
cfg_.rc_buf_sz = 1000;
cfg_.g_usage = kUsageProfile;
cfg_.g_error_resilient = 0;
// Low-latency settings.
cfg_.rc_end_usage = AOM_CBR; // Constant Bit Rate (CBR) mode
cfg_.g_pass = AOM_RC_ONE_PASS; // One-pass rate control
cfg_.g_lag_in_frames = kLagInFrames; // No look ahead when lag equals 0.
if (frame_for_encode_ != nullptr) {
aom_img_free(frame_for_encode_);
frame_for_encode_ = nullptr;
}
// Flag options: AOM_CODEC_USE_PSNR and AOM_CODEC_USE_HIGHBITDEPTH
aom_codec_flags_t flags = 0;
// Initialize an encoder instance.
ret = aom_codec_enc_init(&ctx_, aom_codec_av1_cx(), &cfg_, flags);
if (ret != AOM_CODEC_OK) {
RTC_LOG(LS_WARNING) << "LibaomAv1Encoder::EncodeInit returned " << ret
<< " on aom_codec_enc_init.";
return WEBRTC_VIDEO_CODEC_ERROR;
}
inited_ = true;
// Set control parameters
SET_ENCODER_PARAM_OR_RETURN_ERROR(AOME_SET_CPUUSED,
GetCpuSpeed(cfg_.g_w, cfg_.g_h));
SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_CDEF, 1);
SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_TPL_MODEL, 0);
SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_DELTAQ_MODE, 0);
SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_ORDER_HINT, 0);
SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_AQ_MODE, 3);
SET_ENCODER_PARAM_OR_RETURN_ERROR(AOME_SET_MAX_INTRA_BITRATE_PCT, 300);
SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_COEFF_COST_UPD_FREQ, 3);
SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_MODE_COST_UPD_FREQ, 3);
SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_MV_COST_UPD_FREQ, 3);
if (codec_settings->mode == VideoCodecMode::kScreensharing) {
SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_TUNE_CONTENT,
AOM_CONTENT_SCREEN);
SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_PALETTE, 1);
} else {
SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_PALETTE, 0);
}
if (codec_settings->mode == VideoCodecMode::kRealtimeVideo &&
encoder_settings_.GetFrameDropEnabled() && max_consec_frame_drop_ > 0) {
SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_MAX_CONSEC_FRAME_DROP_CBR,
max_consec_frame_drop_);
}
if (cfg_.g_threads == 8) {
// Values passed to AV1E_SET_TILE_ROWS and AV1E_SET_TILE_COLUMNS are log2()
// based.
// Use 4 tile columns x 2 tile rows for 8 threads.
SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_TILE_ROWS, 1);
SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_TILE_COLUMNS, 2);
} else if (cfg_.g_threads == 4) {
// Use 2 tile columns x 2 tile rows for 4 threads.
SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_TILE_ROWS, 1);
SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_TILE_COLUMNS, 1);
} else {
SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_TILE_COLUMNS,
static_cast<int>(log2(cfg_.g_threads)));
}
SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ROW_MT, 1);
SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_OBMC, 0);
SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_NOISE_SENSITIVITY, 0);
SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_WARPED_MOTION, 0);
SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_GLOBAL_MOTION, 0);
SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_REF_FRAME_MVS, 0);
SET_ENCODER_PARAM_OR_RETURN_ERROR(
AV1E_SET_SUPERBLOCK_SIZE,
GetSuperblockSize(cfg_.g_w, cfg_.g_h, cfg_.g_threads));
SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_CFL_INTRA, 0);
SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_SMOOTH_INTRA, 0);
SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_ANGLE_DELTA, 0);
SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_FILTER_INTRA, 0);
SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_INTRA_DEFAULT_TX_ONLY, 1);
SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_DISABLE_TRELLIS_QUANT, 1);
SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_DIST_WTD_COMP, 0);
SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_DIFF_WTD_COMP, 0);
SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_DUAL_FILTER, 0);
SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_INTERINTRA_COMP, 0);
SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_INTERINTRA_WEDGE, 0);
SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_INTRA_EDGE_FILTER, 0);
SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_INTRABC, 0);
SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_MASKED_COMP, 0);
SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_PAETH_INTRA, 0);
SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_QM, 0);
SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_RECT_PARTITIONS, 0);
SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_RESTORATION, 0);
SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_SMOOTH_INTERINTRA, 0);
SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_TX64, 0);
SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_MAX_REFERENCE_FRAMES, 3);
return WEBRTC_VIDEO_CODEC_OK;
}
template <typename P>
bool LibaomAv1Encoder::SetEncoderControlParameters(int param_id,
P param_value) {
aom_codec_err_t error_code = aom_codec_control(&ctx_, param_id, param_value);
if (error_code != AOM_CODEC_OK) {
RTC_LOG(LS_WARNING)
<< "LibaomAv1Encoder::SetEncoderControlParameters returned "
<< error_code << " on id: " << param_id << ".";
}
return error_code == AOM_CODEC_OK;
}
// Only positive speeds, range for real-time coding currently is: 6 - 8.
// Lower means slower/better quality, higher means fastest/lower quality.
int LibaomAv1Encoder::GetCpuSpeed(int width, int height) {
if (aux_config_) {
if (auto it = aux_config_->max_pixel_count_to_cpu_speed.lower_bound(width *
height);
it != aux_config_->max_pixel_count_to_cpu_speed.end()) {
return it->second;
}
return 10;
} else {
// For smaller resolutions, use lower speed setting (get some coding gain at
// the cost of increased encoding complexity).
switch (encoder_settings_.GetVideoEncoderComplexity()) {
case VideoCodecComplexity::kComplexityHigh:
if (width * height <= 320 * 180)
return 8;
else if (width * height <= 640 * 360)
return 9;
else
return 10;
case VideoCodecComplexity::kComplexityHigher:
if (width * height <= 320 * 180)
return 7;
else if (width * height <= 640 * 360)
return 8;
else if (width * height <= 1280 * 720)
return 9;
else
return 10;
case VideoCodecComplexity::kComplexityMax:
if (width * height <= 320 * 180)
return 6;
else if (width * height <= 640 * 360)
return 7;
else if (width * height <= 1280 * 720)
return 8;
else
return 9;
default:
return 10;
}
}
}
int LibaomAv1Encoder::NumberOfThreads(int width,
int height,
int number_of_cores) {
// Keep the number of encoder threads equal to the possible number of
// column/row tiles, which is (1, 2, 4, 8). See comments below for
// AV1E_SET_TILE_COLUMNS/ROWS.
if (width * height > 1280 * 720 && number_of_cores > 8) {
return 8;
} else if (width * height >= 640 * 360 && number_of_cores > 4) {
return 4;
} else if (width * height >= 320 * 180 && number_of_cores > 2) {
return 2;
} else {
// Use 2 threads for low res on ARM.
#ifdef MOBILE_ARM
if (width * height >= 320 * 180 && number_of_cores > 2) {
return 2;
}
#endif
// 1 thread less than VGA.
return 1;
}
}
bool LibaomAv1Encoder::SetSvcParams(
ScalableVideoController::StreamLayersConfig svc_config) {
bool svc_enabled =
svc_config.num_spatial_layers > 1 || svc_config.num_temporal_layers > 1;
if (!svc_enabled) {
svc_params_ = absl::nullopt;
return true;
}
if (svc_config.num_spatial_layers < 1 || svc_config.num_spatial_layers > 4) {
RTC_LOG(LS_WARNING) << "Av1 supports up to 4 spatial layers. "
<< svc_config.num_spatial_layers << " configured.";
return false;
}
if (svc_config.num_temporal_layers < 1 ||
svc_config.num_temporal_layers > 8) {
RTC_LOG(LS_WARNING) << "Av1 supports up to 8 temporal layers. "
<< svc_config.num_temporal_layers << " configured.";
return false;
}
aom_svc_params_t& svc_params = svc_params_.emplace();
svc_params.number_spatial_layers = svc_config.num_spatial_layers;
svc_params.number_temporal_layers = svc_config.num_temporal_layers;
int num_layers =
svc_config.num_spatial_layers * svc_config.num_temporal_layers;
for (int i = 0; i < num_layers; ++i) {
svc_params.min_quantizers[i] = kQpMin;
svc_params.max_quantizers[i] = encoder_settings_.qpMax;
}
// Assume each temporal layer doubles framerate.
for (int tid = 0; tid < svc_config.num_temporal_layers; ++tid) {
svc_params.framerate_factor[tid] =
1 << (svc_config.num_temporal_layers - tid - 1);
}
for (int sid = 0; sid < svc_config.num_spatial_layers; ++sid) {
svc_params.scaling_factor_num[sid] = svc_config.scaling_factor_num[sid];
svc_params.scaling_factor_den[sid] = svc_config.scaling_factor_den[sid];
}
// svc_params.layer_target_bitrate is set in SetRates() before svc_params is
// passed to SetEncoderControlParameters(AV1E_SET_SVC_PARAMS).
return true;
}
void LibaomAv1Encoder::SetSvcLayerId(
const ScalableVideoController::LayerFrameConfig& layer_frame) {
aom_svc_layer_id_t layer_id = {};
layer_id.spatial_layer_id = layer_frame.SpatialId();
layer_id.temporal_layer_id = layer_frame.TemporalId();
SetEncoderControlParameters(AV1E_SET_SVC_LAYER_ID, &layer_id);
}
void LibaomAv1Encoder::SetSvcRefFrameConfig(
const ScalableVideoController::LayerFrameConfig& layer_frame) {
// Buffer name to use for each layer_frame.buffers position. In particular
// when there are 2 buffers are referenced, prefer name them last and golden,
// because av1 bitstream format has dedicated fields for these two names.
// See last_frame_idx and golden_frame_idx in the av1 spec
// https://aomediacodec.github.io/av1-spec/av1-spec.pdf
static constexpr int kPreferedSlotName[] = {0, // Last
3, // Golden
1, 2, 4, 5, 6};
static constexpr int kAv1NumBuffers = 8;
aom_svc_ref_frame_config_t ref_frame_config = {};
RTC_CHECK_LE(layer_frame.Buffers().size(), ABSL_ARRAYSIZE(kPreferedSlotName));
for (size_t i = 0; i < layer_frame.Buffers().size(); ++i) {
const CodecBufferUsage& buffer = layer_frame.Buffers()[i];
int slot_name = kPreferedSlotName[i];
RTC_CHECK_GE(buffer.id, 0);
RTC_CHECK_LT(buffer.id, kAv1NumBuffers);
ref_frame_config.ref_idx[slot_name] = buffer.id;
if (buffer.referenced) {
ref_frame_config.reference[slot_name] = 1;
}
if (buffer.updated) {
ref_frame_config.refresh[buffer.id] = 1;
}
}
SetEncoderControlParameters(AV1E_SET_SVC_REF_FRAME_CONFIG, &ref_frame_config);
}
int32_t LibaomAv1Encoder::RegisterEncodeCompleteCallback(
EncodedImageCallback* encoded_image_callback) {
encoded_image_callback_ = encoded_image_callback;
return WEBRTC_VIDEO_CODEC_OK;
}
int32_t LibaomAv1Encoder::Release() {
if (frame_for_encode_ != nullptr) {
aom_img_free(frame_for_encode_);
frame_for_encode_ = nullptr;
}
if (inited_) {
if (aom_codec_destroy(&ctx_)) {
return WEBRTC_VIDEO_CODEC_MEMORY;
}
inited_ = false;
}
rates_configured_ = false;
return WEBRTC_VIDEO_CODEC_OK;
}
void LibaomAv1Encoder::MaybeRewrapImgWithFormat(const aom_img_fmt_t fmt) {
if (!frame_for_encode_) {
frame_for_encode_ =
aom_img_wrap(nullptr, fmt, cfg_.g_w, cfg_.g_h, 1, nullptr);
} else if (frame_for_encode_->fmt != fmt) {
RTC_LOG(LS_INFO) << "Switching AV1 encoder pixel format to "
<< (fmt == AOM_IMG_FMT_NV12 ? "NV12" : "I420");
aom_img_free(frame_for_encode_);
frame_for_encode_ =
aom_img_wrap(nullptr, fmt, cfg_.g_w, cfg_.g_h, 1, nullptr);
}
// else no-op since the image is already in the right format.
}
int32_t LibaomAv1Encoder::Encode(
const VideoFrame& frame,
const std::vector<VideoFrameType>* frame_types) {
if (!inited_ || encoded_image_callback_ == nullptr || !rates_configured_) {
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
bool keyframe_required =
frame_types != nullptr &&
absl::c_linear_search(*frame_types, VideoFrameType::kVideoFrameKey);
std::vector<ScalableVideoController::LayerFrameConfig> layer_frames =
svc_controller_->NextFrameConfig(keyframe_required);
if (layer_frames.empty()) {
RTC_LOG(LS_ERROR) << "SVCController returned no configuration for a frame.";
return WEBRTC_VIDEO_CODEC_ERROR;
}
rtc::scoped_refptr<VideoFrameBuffer> buffer = frame.video_frame_buffer();
absl::InlinedVector<VideoFrameBuffer::Type, kMaxPreferredPixelFormats>
supported_formats = {VideoFrameBuffer::Type::kI420,
VideoFrameBuffer::Type::kNV12};
rtc::scoped_refptr<VideoFrameBuffer> mapped_buffer;
if (buffer->type() != VideoFrameBuffer::Type::kNative) {
// `buffer` is already mapped.
mapped_buffer = buffer;
} else {
// Attempt to map to one of the supported formats.
mapped_buffer = buffer->GetMappedFrameBuffer(supported_formats);
}
// Convert input frame to I420, if needed.
if (!mapped_buffer ||
(absl::c_find(supported_formats, mapped_buffer->type()) ==
supported_formats.end() &&
mapped_buffer->type() != VideoFrameBuffer::Type::kI420A)) {
rtc::scoped_refptr<I420BufferInterface> converted_buffer(buffer->ToI420());
if (!converted_buffer) {
RTC_LOG(LS_ERROR) << "Failed to convert "
<< VideoFrameBufferTypeToString(
frame.video_frame_buffer()->type())
<< " image to I420. Can't encode frame.";
return WEBRTC_VIDEO_CODEC_ENCODER_FAILURE;
}
RTC_CHECK(converted_buffer->type() == VideoFrameBuffer::Type::kI420 ||
converted_buffer->type() == VideoFrameBuffer::Type::kI420A);
mapped_buffer = converted_buffer;
}
switch (mapped_buffer->type()) {
case VideoFrameBuffer::Type::kI420:
case VideoFrameBuffer::Type::kI420A: {
// Set frame_for_encode_ data pointers and strides.
MaybeRewrapImgWithFormat(AOM_IMG_FMT_I420);
auto i420_buffer = mapped_buffer->GetI420();
RTC_DCHECK(i420_buffer);
frame_for_encode_->planes[AOM_PLANE_Y] =
const_cast<unsigned char*>(i420_buffer->DataY());
frame_for_encode_->planes[AOM_PLANE_U] =
const_cast<unsigned char*>(i420_buffer->DataU());
frame_for_encode_->planes[AOM_PLANE_V] =
const_cast<unsigned char*>(i420_buffer->DataV());
frame_for_encode_->stride[AOM_PLANE_Y] = i420_buffer->StrideY();
frame_for_encode_->stride[AOM_PLANE_U] = i420_buffer->StrideU();
frame_for_encode_->stride[AOM_PLANE_V] = i420_buffer->StrideV();
break;
}
case VideoFrameBuffer::Type::kNV12: {
MaybeRewrapImgWithFormat(AOM_IMG_FMT_NV12);
const NV12BufferInterface* nv12_buffer = mapped_buffer->GetNV12();
RTC_DCHECK(nv12_buffer);
frame_for_encode_->planes[AOM_PLANE_Y] =
const_cast<unsigned char*>(nv12_buffer->DataY());
frame_for_encode_->planes[AOM_PLANE_U] =
const_cast<unsigned char*>(nv12_buffer->DataUV());
frame_for_encode_->planes[AOM_PLANE_V] = nullptr;
frame_for_encode_->stride[AOM_PLANE_Y] = nv12_buffer->StrideY();
frame_for_encode_->stride[AOM_PLANE_U] = nv12_buffer->StrideUV();
frame_for_encode_->stride[AOM_PLANE_V] = 0;
break;
}
default:
return WEBRTC_VIDEO_CODEC_ENCODER_FAILURE;
}
const uint32_t duration =
kRtpTicksPerSecond / static_cast<float>(encoder_settings_.maxFramerate);
timestamp_ += duration;
const size_t num_spatial_layers =
svc_params_ ? svc_params_->number_spatial_layers : 1;
auto next_layer_frame = layer_frames.begin();
for (size_t i = 0; i < num_spatial_layers; ++i) {
// The libaom AV1 encoder requires that `aom_codec_encode` is called for
// every spatial layer, even if the configured bitrate for that layer is
// zero. For zero bitrate spatial layers no frames will be produced.
absl::optional<ScalableVideoController::LayerFrameConfig>
non_encoded_layer_frame;
ScalableVideoController::LayerFrameConfig* layer_frame;
if (next_layer_frame != layer_frames.end() &&
next_layer_frame->SpatialId() == static_cast<int>(i)) {
layer_frame = &*next_layer_frame;
++next_layer_frame;
} else {
// For layers that are not encoded only the spatial id matters.
non_encoded_layer_frame.emplace().S(i);
layer_frame = &*non_encoded_layer_frame;
}
const bool end_of_picture = (next_layer_frame == layer_frames.end());
aom_enc_frame_flags_t flags =
layer_frame->IsKeyframe() ? AOM_EFLAG_FORCE_KF : 0;
if (SvcEnabled()) {
SetSvcLayerId(*layer_frame);
SetSvcRefFrameConfig(*layer_frame);
}
// Encode a frame. The presentation timestamp `pts` should not use real
// timestamps from frames or the wall clock, as that can cause the rate
// controller to misbehave.
aom_codec_err_t ret =
aom_codec_encode(&ctx_, frame_for_encode_, timestamp_, duration, flags);
if (ret != AOM_CODEC_OK) {
RTC_LOG(LS_WARNING) << "LibaomAv1Encoder::Encode returned " << ret
<< " on aom_codec_encode.";
return WEBRTC_VIDEO_CODEC_ERROR;
}
if (non_encoded_layer_frame) {
continue;
}
// Get encoded image data.
EncodedImage encoded_image;
aom_codec_iter_t iter = nullptr;
int data_pkt_count = 0;
while (const aom_codec_cx_pkt_t* pkt =
aom_codec_get_cx_data(&ctx_, &iter)) {
if (pkt->kind == AOM_CODEC_CX_FRAME_PKT && pkt->data.frame.sz > 0) {
if (data_pkt_count > 0) {
RTC_LOG(LS_WARNING) << "LibaomAv1Encoder::Encoder returned more than "
"one data packet for an input video frame.";
Release();
}
encoded_image.SetEncodedData(EncodedImageBuffer::Create(
/*data=*/static_cast<const uint8_t*>(pkt->data.frame.buf),
/*size=*/pkt->data.frame.sz));
if ((pkt->data.frame.flags & AOM_EFLAG_FORCE_KF) != 0) {
layer_frame->Keyframe();
}
encoded_image._frameType = layer_frame->IsKeyframe()
? VideoFrameType::kVideoFrameKey
: VideoFrameType::kVideoFrameDelta;
encoded_image.SetRtpTimestamp(frame.timestamp());
encoded_image.SetCaptureTimeIdentifier(frame.capture_time_identifier());
encoded_image.capture_time_ms_ = frame.render_time_ms();
encoded_image.rotation_ = frame.rotation();
encoded_image.content_type_ = VideoContentType::UNSPECIFIED;
// If encoded image width/height info are added to aom_codec_cx_pkt_t,
// use those values in lieu of the values in frame.
if (svc_params_) {
int n = svc_params_->scaling_factor_num[layer_frame->SpatialId()];
int d = svc_params_->scaling_factor_den[layer_frame->SpatialId()];
encoded_image._encodedWidth = cfg_.g_w * n / d;
encoded_image._encodedHeight = cfg_.g_h * n / d;
encoded_image.SetSpatialIndex(layer_frame->SpatialId());
encoded_image.SetTemporalIndex(layer_frame->TemporalId());
} else {
encoded_image._encodedWidth = cfg_.g_w;
encoded_image._encodedHeight = cfg_.g_h;
}
encoded_image.timing_.flags = VideoSendTiming::kInvalid;
int qp = -1;
SET_ENCODER_PARAM_OR_RETURN_ERROR(AOME_GET_LAST_QUANTIZER, &qp);
encoded_image.qp_ = qp;
encoded_image.SetColorSpace(frame.color_space());
++data_pkt_count;
}
}
// Deliver encoded image data.
if (encoded_image.size() > 0) {
CodecSpecificInfo codec_specific_info;
codec_specific_info.codecType = kVideoCodecAV1;
codec_specific_info.end_of_picture = end_of_picture;
codec_specific_info.scalability_mode = scalability_mode_;
bool is_keyframe = layer_frame->IsKeyframe();
codec_specific_info.generic_frame_info =
svc_controller_->OnEncodeDone(*layer_frame);
if (is_keyframe && codec_specific_info.generic_frame_info) {
codec_specific_info.template_structure =
svc_controller_->DependencyStructure();
auto& resolutions = codec_specific_info.template_structure->resolutions;
if (SvcEnabled()) {
resolutions.resize(svc_params_->number_spatial_layers);
for (int sid = 0; sid < svc_params_->number_spatial_layers; ++sid) {
int n = svc_params_->scaling_factor_num[sid];
int d = svc_params_->scaling_factor_den[sid];
resolutions[sid] =
RenderResolution(cfg_.g_w * n / d, cfg_.g_h * n / d);
}
} else {
resolutions = {RenderResolution(cfg_.g_w, cfg_.g_h)};
}
}
encoded_image_callback_->OnEncodedImage(encoded_image,
&codec_specific_info);
}
}
return WEBRTC_VIDEO_CODEC_OK;
}
void LibaomAv1Encoder::SetRates(const RateControlParameters& parameters) {
if (!inited_) {
RTC_LOG(LS_WARNING) << "SetRates() while encoder is not initialized";
return;
}
if (parameters.framerate_fps < kMinimumFrameRate) {
RTC_LOG(LS_WARNING) << "Unsupported framerate (must be >= "
<< kMinimumFrameRate
<< " ): " << parameters.framerate_fps;
return;
}
if (parameters.bitrate.get_sum_bps() == 0) {
RTC_LOG(LS_WARNING) << "Attempt to set target bit rate to zero";
return;
}
// The bitrates caluclated internally in libaom when `AV1E_SET_SVC_PARAMS` is
// called depends on the currently configured `rc_target_bitrate`. If the
// total target bitrate is not updated first a division by zero could happen.
svc_controller_->OnRatesUpdated(parameters.bitrate);
cfg_.rc_target_bitrate = parameters.bitrate.get_sum_kbps();
aom_codec_err_t error_code = aom_codec_enc_config_set(&ctx_, &cfg_);
if (error_code != AOM_CODEC_OK) {
RTC_LOG(LS_WARNING) << "Error configuring encoder, error code: "
<< error_code;
}
if (SvcEnabled()) {
for (int sid = 0; sid < svc_params_->number_spatial_layers; ++sid) {
// libaom bitrate for spatial id S and temporal id T means bitrate
// of frames with spatial_id=S and temporal_id<=T
// while `parameters.bitrate` provdies bitrate of frames with
// spatial_id=S and temporal_id=T
int accumulated_bitrate_bps = 0;
for (int tid = 0; tid < svc_params_->number_temporal_layers; ++tid) {
int layer_index = sid * svc_params_->number_temporal_layers + tid;
accumulated_bitrate_bps += parameters.bitrate.GetBitrate(sid, tid);
// `svc_params_->layer_target_bitrate` expects bitrate in kbps.
svc_params_->layer_target_bitrate[layer_index] =
accumulated_bitrate_bps / 1000;
}
}
SetEncoderControlParameters(AV1E_SET_SVC_PARAMS, &*svc_params_);
}
rates_configured_ = true;
// Set frame rate to closest integer value.
encoder_settings_.maxFramerate =
static_cast<uint32_t>(parameters.framerate_fps + 0.5);
}
VideoEncoder::EncoderInfo LibaomAv1Encoder::GetEncoderInfo() const {
EncoderInfo info;
info.supports_native_handle = false;
info.implementation_name = "libaom";
info.has_trusted_rate_controller = true;
info.is_hardware_accelerated = false;
info.scaling_settings =
(inited_ && !encoder_settings_.AV1().automatic_resize_on)
? VideoEncoder::ScalingSettings::kOff
: VideoEncoder::ScalingSettings(kMinQindex, kMaxQindex);
info.preferred_pixel_formats = {VideoFrameBuffer::Type::kI420,
VideoFrameBuffer::Type::kNV12};
if (SvcEnabled()) {
for (int sid = 0; sid < svc_params_->number_spatial_layers; ++sid) {
info.fps_allocation[sid].resize(svc_params_->number_temporal_layers);
for (int tid = 0; tid < svc_params_->number_temporal_layers; ++tid) {
info.fps_allocation[sid][tid] = EncoderInfo::kMaxFramerateFraction /
svc_params_->framerate_factor[tid];
}
}
}
if (!encoder_info_override_.resolution_bitrate_limits().empty()) {
info.resolution_bitrate_limits =
encoder_info_override_.resolution_bitrate_limits();
}
return info;
}
} // namespace
std::unique_ptr<VideoEncoder> CreateLibaomAv1Encoder() {
return std::make_unique<LibaomAv1Encoder>(absl::nullopt,
FieldTrialBasedConfig());
}
std::unique_ptr<VideoEncoder> CreateLibaomAv1Encoder(
const LibaomAv1EncoderAuxConfig& aux_config) {
return std::make_unique<LibaomAv1Encoder>(aux_config,
FieldTrialBasedConfig());
}
} // namespace webrtc

View file

@ -0,0 +1,31 @@
/*
* Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_VIDEO_CODING_CODECS_AV1_LIBAOM_AV1_ENCODER_H_
#define MODULES_VIDEO_CODING_CODECS_AV1_LIBAOM_AV1_ENCODER_H_
#include <map>
#include <memory>
#include "absl/strings/string_view.h"
#include "api/video_codecs/video_encoder.h"
namespace webrtc {
struct LibaomAv1EncoderAuxConfig {
// A map of max pixel count --> cpu speed.
std::map<int, int> max_pixel_count_to_cpu_speed;
};
std::unique_ptr<VideoEncoder> CreateLibaomAv1Encoder();
std::unique_ptr<VideoEncoder> CreateLibaomAv1Encoder(
const LibaomAv1EncoderAuxConfig& aux_config);
} // namespace webrtc
#endif // MODULES_VIDEO_CODING_CODECS_AV1_LIBAOM_AV1_ENCODER_H_

View file

@ -0,0 +1,439 @@
/*
* Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/video_coding/codecs/av1/libaom_av1_encoder.h"
#include <limits>
#include <memory>
#include <vector>
#include "absl/types/optional.h"
#include "api/test/create_frame_generator.h"
#include "api/test/frame_generator_interface.h"
#include "api/video_codecs/video_codec.h"
#include "api/video_codecs/video_encoder.h"
#include "modules/video_coding/codecs/test/encoded_video_frame_producer.h"
#include "modules/video_coding/include/video_error_codes.h"
#include "test/field_trial.h"
#include "test/gmock.h"
#include "test/gtest.h"
namespace webrtc {
namespace {
using ::testing::ElementsAre;
using ::testing::Eq;
using ::testing::Field;
using ::testing::IsEmpty;
using ::testing::SizeIs;
VideoCodec DefaultCodecSettings() {
VideoCodec codec_settings;
codec_settings.codecType = kVideoCodecAV1;
codec_settings.width = 320;
codec_settings.height = 180;
codec_settings.maxFramerate = 30;
codec_settings.startBitrate = 1000;
codec_settings.qpMax = 63;
return codec_settings;
}
VideoEncoder::Settings DefaultEncoderSettings() {
return VideoEncoder::Settings(
VideoEncoder::Capabilities(/*loss_notification=*/false),
/*number_of_cores=*/1, /*max_payload_size=*/1200);
}
TEST(LibaomAv1EncoderTest, CanCreate) {
std::unique_ptr<VideoEncoder> encoder = CreateLibaomAv1Encoder();
EXPECT_TRUE(encoder);
}
TEST(LibaomAv1EncoderTest, InitAndRelease) {
std::unique_ptr<VideoEncoder> encoder = CreateLibaomAv1Encoder();
ASSERT_TRUE(encoder);
VideoCodec codec_settings = DefaultCodecSettings();
EXPECT_EQ(encoder->InitEncode(&codec_settings, DefaultEncoderSettings()),
WEBRTC_VIDEO_CODEC_OK);
EXPECT_EQ(encoder->Release(), WEBRTC_VIDEO_CODEC_OK);
}
TEST(LibaomAv1EncoderTest, NoBitrateOnTopLayerRefecltedInActiveDecodeTargets) {
// Configure encoder with 2 temporal layers.
std::unique_ptr<VideoEncoder> encoder = CreateLibaomAv1Encoder();
VideoCodec codec_settings = DefaultCodecSettings();
codec_settings.SetScalabilityMode(ScalabilityMode::kL1T2);
ASSERT_EQ(encoder->InitEncode(&codec_settings, DefaultEncoderSettings()),
WEBRTC_VIDEO_CODEC_OK);
VideoEncoder::RateControlParameters rate_parameters;
rate_parameters.framerate_fps = 30;
rate_parameters.bitrate.SetBitrate(0, /*temporal_index=*/0, 300'000);
rate_parameters.bitrate.SetBitrate(0, /*temporal_index=*/1, 0);
encoder->SetRates(rate_parameters);
std::vector<EncodedVideoFrameProducer::EncodedFrame> encoded_frames =
EncodedVideoFrameProducer(*encoder).SetNumInputFrames(1).Encode();
ASSERT_THAT(encoded_frames, SizeIs(1));
ASSERT_NE(encoded_frames[0].codec_specific_info.generic_frame_info,
absl::nullopt);
// Assuming L1T2 structure uses 1st decode target for T0 and 2nd decode target
// for T0+T1 frames, expect only 1st decode target is active.
EXPECT_EQ(encoded_frames[0]
.codec_specific_info.generic_frame_info->active_decode_targets,
0b01);
}
TEST(LibaomAv1EncoderTest,
SpatialScalabilityInTemporalUnitReportedAsDeltaFrame) {
std::unique_ptr<VideoEncoder> encoder = CreateLibaomAv1Encoder();
VideoCodec codec_settings = DefaultCodecSettings();
codec_settings.SetScalabilityMode(ScalabilityMode::kL2T1);
ASSERT_EQ(encoder->InitEncode(&codec_settings, DefaultEncoderSettings()),
WEBRTC_VIDEO_CODEC_OK);
VideoEncoder::RateControlParameters rate_parameters;
rate_parameters.framerate_fps = 30;
rate_parameters.bitrate.SetBitrate(/*spatial_index=*/0, 0, 300'000);
rate_parameters.bitrate.SetBitrate(/*spatial_index=*/1, 0, 300'000);
encoder->SetRates(rate_parameters);
std::vector<EncodedVideoFrameProducer::EncodedFrame> encoded_frames =
EncodedVideoFrameProducer(*encoder).SetNumInputFrames(1).Encode();
ASSERT_THAT(encoded_frames, SizeIs(2));
EXPECT_THAT(encoded_frames[0].encoded_image._frameType,
Eq(VideoFrameType::kVideoFrameKey));
EXPECT_THAT(encoded_frames[1].encoded_image._frameType,
Eq(VideoFrameType::kVideoFrameDelta));
}
TEST(LibaomAv1EncoderTest, NoBitrateOnTopSpatialLayerProduceDeltaFrames) {
std::unique_ptr<VideoEncoder> encoder = CreateLibaomAv1Encoder();
VideoCodec codec_settings = DefaultCodecSettings();
codec_settings.SetScalabilityMode(ScalabilityMode::kL2T1);
ASSERT_EQ(encoder->InitEncode(&codec_settings, DefaultEncoderSettings()),
WEBRTC_VIDEO_CODEC_OK);
VideoEncoder::RateControlParameters rate_parameters;
rate_parameters.framerate_fps = 30;
rate_parameters.bitrate.SetBitrate(/*spatial_index=*/0, 0, 300'000);
rate_parameters.bitrate.SetBitrate(/*spatial_index=*/1, 0, 0);
encoder->SetRates(rate_parameters);
std::vector<EncodedVideoFrameProducer::EncodedFrame> encoded_frames =
EncodedVideoFrameProducer(*encoder).SetNumInputFrames(2).Encode();
ASSERT_THAT(encoded_frames, SizeIs(2));
EXPECT_THAT(encoded_frames[0].encoded_image._frameType,
Eq(VideoFrameType::kVideoFrameKey));
EXPECT_THAT(encoded_frames[1].encoded_image._frameType,
Eq(VideoFrameType::kVideoFrameDelta));
}
TEST(LibaomAv1EncoderTest, SetsEndOfPictureForLastFrameInTemporalUnit) {
VideoBitrateAllocation allocation;
allocation.SetBitrate(0, 0, 30000);
allocation.SetBitrate(1, 0, 40000);
allocation.SetBitrate(2, 0, 30000);
std::unique_ptr<VideoEncoder> encoder = CreateLibaomAv1Encoder();
VideoCodec codec_settings = DefaultCodecSettings();
// Configure encoder with 3 spatial layers.
codec_settings.SetScalabilityMode(ScalabilityMode::kL3T1);
codec_settings.startBitrate = allocation.get_sum_kbps();
ASSERT_EQ(encoder->InitEncode(&codec_settings, DefaultEncoderSettings()),
WEBRTC_VIDEO_CODEC_OK);
encoder->SetRates(VideoEncoder::RateControlParameters(
allocation, codec_settings.maxFramerate));
std::vector<EncodedVideoFrameProducer::EncodedFrame> encoded_frames =
EncodedVideoFrameProducer(*encoder).SetNumInputFrames(2).Encode();
ASSERT_THAT(encoded_frames, SizeIs(6));
EXPECT_FALSE(encoded_frames[0].codec_specific_info.end_of_picture);
EXPECT_FALSE(encoded_frames[1].codec_specific_info.end_of_picture);
EXPECT_TRUE(encoded_frames[2].codec_specific_info.end_of_picture);
EXPECT_FALSE(encoded_frames[3].codec_specific_info.end_of_picture);
EXPECT_FALSE(encoded_frames[4].codec_specific_info.end_of_picture);
EXPECT_TRUE(encoded_frames[5].codec_specific_info.end_of_picture);
}
TEST(LibaomAv1EncoderTest, CheckOddDimensionsWithSpatialLayers) {
VideoBitrateAllocation allocation;
allocation.SetBitrate(0, 0, 30000);
allocation.SetBitrate(1, 0, 40000);
allocation.SetBitrate(2, 0, 30000);
std::unique_ptr<VideoEncoder> encoder = CreateLibaomAv1Encoder();
VideoCodec codec_settings = DefaultCodecSettings();
// Configure encoder with 3 spatial layers.
codec_settings.SetScalabilityMode(ScalabilityMode::kL3T1);
// Odd width and height values should not make encoder crash.
codec_settings.width = 623;
codec_settings.height = 405;
codec_settings.startBitrate = allocation.get_sum_kbps();
ASSERT_EQ(encoder->InitEncode(&codec_settings, DefaultEncoderSettings()),
WEBRTC_VIDEO_CODEC_OK);
encoder->SetRates(VideoEncoder::RateControlParameters(
allocation, codec_settings.maxFramerate));
EncodedVideoFrameProducer evfp(*encoder);
evfp.SetResolution(RenderResolution{623, 405});
std::vector<EncodedVideoFrameProducer::EncodedFrame> encoded_frames =
evfp.SetNumInputFrames(2).Encode();
ASSERT_THAT(encoded_frames, SizeIs(6));
}
TEST(LibaomAv1EncoderTest, WithMaximumConsecutiveFrameDrop) {
test::ScopedFieldTrials field_trials(
"WebRTC-LibaomAv1Encoder-MaxConsecFrameDrop/maxdrop:2/");
VideoBitrateAllocation allocation;
allocation.SetBitrate(0, 0, 1000); // some very low bitrate
std::unique_ptr<VideoEncoder> encoder = CreateLibaomAv1Encoder();
VideoCodec codec_settings = DefaultCodecSettings();
codec_settings.SetFrameDropEnabled(true);
codec_settings.SetScalabilityMode(ScalabilityMode::kL1T1);
codec_settings.startBitrate = allocation.get_sum_kbps();
ASSERT_EQ(encoder->InitEncode(&codec_settings, DefaultEncoderSettings()),
WEBRTC_VIDEO_CODEC_OK);
encoder->SetRates(VideoEncoder::RateControlParameters(
allocation, codec_settings.maxFramerate));
EncodedVideoFrameProducer evfp(*encoder);
evfp.SetResolution(
RenderResolution{codec_settings.width, codec_settings.height});
// We should code the first frame, skip two, then code another frame.
std::vector<EncodedVideoFrameProducer::EncodedFrame> encoded_frames =
evfp.SetNumInputFrames(4).Encode();
ASSERT_THAT(encoded_frames, SizeIs(2));
// The 4 frames have default Rtp-timestamps of 1000, 4000, 7000, 10000.
ASSERT_THAT(encoded_frames[1].encoded_image.RtpTimestamp(), 10000);
}
TEST(LibaomAv1EncoderTest, EncoderInfoWithoutResolutionBitrateLimits) {
std::unique_ptr<VideoEncoder> encoder = CreateLibaomAv1Encoder();
EXPECT_TRUE(encoder->GetEncoderInfo().resolution_bitrate_limits.empty());
}
TEST(LibaomAv1EncoderTest, EncoderInfoWithBitrateLimitsFromFieldTrial) {
test::ScopedFieldTrials field_trials(
"WebRTC-Av1-GetEncoderInfoOverride/"
"frame_size_pixels:123|456|789,"
"min_start_bitrate_bps:11000|22000|33000,"
"min_bitrate_bps:44000|55000|66000,"
"max_bitrate_bps:77000|88000|99000/");
std::unique_ptr<VideoEncoder> encoder = CreateLibaomAv1Encoder();
EXPECT_THAT(
encoder->GetEncoderInfo().resolution_bitrate_limits,
::testing::ElementsAre(
VideoEncoder::ResolutionBitrateLimits{123, 11000, 44000, 77000},
VideoEncoder::ResolutionBitrateLimits{456, 22000, 55000, 88000},
VideoEncoder::ResolutionBitrateLimits{789, 33000, 66000, 99000}));
}
TEST(LibaomAv1EncoderTest, EncoderInfoProvidesFpsAllocation) {
std::unique_ptr<VideoEncoder> encoder = CreateLibaomAv1Encoder();
VideoCodec codec_settings = DefaultCodecSettings();
codec_settings.SetScalabilityMode(ScalabilityMode::kL3T3);
codec_settings.maxFramerate = 60;
ASSERT_EQ(encoder->InitEncode(&codec_settings, DefaultEncoderSettings()),
WEBRTC_VIDEO_CODEC_OK);
const auto& encoder_info = encoder->GetEncoderInfo();
EXPECT_THAT(encoder_info.fps_allocation[0],
ElementsAre(255 / 4, 255 / 2, 255));
EXPECT_THAT(encoder_info.fps_allocation[1],
ElementsAre(255 / 4, 255 / 2, 255));
EXPECT_THAT(encoder_info.fps_allocation[2],
ElementsAre(255 / 4, 255 / 2, 255));
EXPECT_THAT(encoder_info.fps_allocation[3], IsEmpty());
}
TEST(LibaomAv1EncoderTest, PopulatesEncodedFrameSize) {
VideoBitrateAllocation allocation;
allocation.SetBitrate(0, 0, 30000);
allocation.SetBitrate(1, 0, 40000);
allocation.SetBitrate(2, 0, 30000);
std::unique_ptr<VideoEncoder> encoder = CreateLibaomAv1Encoder();
VideoCodec codec_settings = DefaultCodecSettings();
codec_settings.startBitrate = allocation.get_sum_kbps();
ASSERT_GT(codec_settings.width, 4);
// Configure encoder with 3 spatial layers.
codec_settings.SetScalabilityMode(ScalabilityMode::kL3T1);
ASSERT_EQ(encoder->InitEncode(&codec_settings, DefaultEncoderSettings()),
WEBRTC_VIDEO_CODEC_OK);
encoder->SetRates(VideoEncoder::RateControlParameters(
allocation, codec_settings.maxFramerate));
using Frame = EncodedVideoFrameProducer::EncodedFrame;
std::vector<Frame> encoded_frames =
EncodedVideoFrameProducer(*encoder).SetNumInputFrames(1).Encode();
EXPECT_THAT(
encoded_frames,
ElementsAre(
Field(&Frame::encoded_image,
AllOf(Field(&EncodedImage::_encodedWidth,
codec_settings.width / 4),
Field(&EncodedImage::_encodedHeight,
codec_settings.height / 4))),
Field(&Frame::encoded_image,
AllOf(Field(&EncodedImage::_encodedWidth,
codec_settings.width / 2),
Field(&EncodedImage::_encodedHeight,
codec_settings.height / 2))),
Field(&Frame::encoded_image,
AllOf(Field(&EncodedImage::_encodedWidth, codec_settings.width),
Field(&EncodedImage::_encodedHeight,
codec_settings.height)))));
}
TEST(LibaomAv1EncoderTest, RtpTimestampWrap) {
std::unique_ptr<VideoEncoder> encoder = CreateLibaomAv1Encoder();
VideoCodec codec_settings = DefaultCodecSettings();
codec_settings.SetScalabilityMode(ScalabilityMode::kL1T1);
ASSERT_EQ(encoder->InitEncode(&codec_settings, DefaultEncoderSettings()),
WEBRTC_VIDEO_CODEC_OK);
VideoEncoder::RateControlParameters rate_parameters;
rate_parameters.framerate_fps = 30;
rate_parameters.bitrate.SetBitrate(/*spatial_index=*/0, 0, 300'000);
encoder->SetRates(rate_parameters);
std::vector<EncodedVideoFrameProducer::EncodedFrame> encoded_frames =
EncodedVideoFrameProducer(*encoder)
.SetNumInputFrames(2)
.SetRtpTimestamp(std::numeric_limits<uint32_t>::max())
.Encode();
ASSERT_THAT(encoded_frames, SizeIs(2));
EXPECT_THAT(encoded_frames[0].encoded_image._frameType,
Eq(VideoFrameType::kVideoFrameKey));
EXPECT_THAT(encoded_frames[1].encoded_image._frameType,
Eq(VideoFrameType::kVideoFrameDelta));
}
TEST(LibaomAv1EncoderTest, TestCaptureTimeId) {
std::unique_ptr<VideoEncoder> encoder = CreateLibaomAv1Encoder();
const Timestamp capture_time_id = Timestamp::Micros(2000);
VideoCodec codec_settings = DefaultCodecSettings();
codec_settings.SetScalabilityMode(ScalabilityMode::kL2T1);
ASSERT_EQ(encoder->InitEncode(&codec_settings, DefaultEncoderSettings()),
WEBRTC_VIDEO_CODEC_OK);
VideoEncoder::RateControlParameters rate_parameters;
rate_parameters.framerate_fps = 30;
rate_parameters.bitrate.SetBitrate(/*spatial_index=*/0, /*temporal_index=*/0,
300'000);
rate_parameters.bitrate.SetBitrate(/*spatial_index=*/1, /*temporal_index=*/0,
300'000);
encoder->SetRates(rate_parameters);
std::vector<EncodedVideoFrameProducer::EncodedFrame> encoded_frames =
EncodedVideoFrameProducer(*encoder)
.SetNumInputFrames(1)
.SetCaptureTimeIdentifier(capture_time_id)
.Encode();
ASSERT_THAT(encoded_frames, SizeIs(2));
ASSERT_TRUE(
encoded_frames[0].encoded_image.CaptureTimeIdentifier().has_value());
ASSERT_TRUE(
encoded_frames[1].encoded_image.CaptureTimeIdentifier().has_value());
EXPECT_EQ(encoded_frames[0].encoded_image.CaptureTimeIdentifier()->us(),
capture_time_id.us());
EXPECT_EQ(encoded_frames[1].encoded_image.CaptureTimeIdentifier()->us(),
capture_time_id.us());
}
TEST(LibaomAv1EncoderTest, AdheresToTargetBitrateDespiteUnevenFrameTiming) {
std::unique_ptr<VideoEncoder> encoder = CreateLibaomAv1Encoder();
VideoCodec codec_settings = DefaultCodecSettings();
codec_settings.SetScalabilityMode(ScalabilityMode::kL1T1);
codec_settings.startBitrate = 300; // kbps
codec_settings.width = 320;
codec_settings.height = 180;
ASSERT_EQ(encoder->InitEncode(&codec_settings, DefaultEncoderSettings()),
WEBRTC_VIDEO_CODEC_OK);
const int kFps = 30;
const int kTargetBitrateBps = codec_settings.startBitrate * 1000;
VideoEncoder::RateControlParameters rate_parameters;
rate_parameters.framerate_fps = kFps;
rate_parameters.bitrate.SetBitrate(/*spatial_index=*/0, 0, kTargetBitrateBps);
encoder->SetRates(rate_parameters);
class EncoderCallback : public EncodedImageCallback {
public:
EncoderCallback() = default;
DataSize BytesEncoded() const { return bytes_encoded_; }
private:
Result OnEncodedImage(
const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info) override {
bytes_encoded_ += DataSize::Bytes(encoded_image.size());
return Result(Result::Error::OK);
}
DataSize bytes_encoded_ = DataSize::Zero();
} callback;
encoder->RegisterEncodeCompleteCallback(&callback);
// Insert frames with too low rtp timestamp delta compared to what is expected
// based on the framerate, then insert on with 2x the delta it should - making
// the average correct.
const uint32_t kHighTimestampDelta =
static_cast<uint32_t>((90000.0 / kFps) * 2 + 0.5);
const uint32_t kLowTimestampDelta =
static_cast<uint32_t>((90000.0 - kHighTimestampDelta) / (kFps - 1));
std::unique_ptr<test::FrameGeneratorInterface> frame_buffer_generator =
test::CreateSquareFrameGenerator(
codec_settings.width, codec_settings.height,
test::FrameGeneratorInterface::OutputType::kI420, /*num_squares=*/20);
uint32_t rtp_timestamp = 1000;
std::vector<VideoFrameType> frame_types = {VideoFrameType::kVideoFrameKey};
const int kRunTimeSeconds = 3;
for (int i = 0; i < kRunTimeSeconds; ++i) {
for (int j = 0; j < kFps; ++j) {
if (j < kFps - 1) {
rtp_timestamp += kLowTimestampDelta;
} else {
rtp_timestamp += kHighTimestampDelta;
}
VideoFrame frame = VideoFrame::Builder()
.set_video_frame_buffer(
frame_buffer_generator->NextFrame().buffer)
.set_timestamp_rtp(rtp_timestamp)
.build();
RTC_CHECK_EQ(encoder->Encode(frame, &frame_types), WEBRTC_VIDEO_CODEC_OK);
frame_types[0] = VideoFrameType::kVideoFrameDelta;
}
}
// Expect produced bitrate to match, to within 10%.
// This catches an issue that was seen when real frame timestamps with jitter
// was used. It resulted in the overall produced bitrate to be overshot by
// ~30% even though the averages should have been ok.
EXPECT_NEAR(
(callback.BytesEncoded() / TimeDelta::Seconds(kRunTimeSeconds)).bps(),
kTargetBitrateBps, kTargetBitrateBps / 10);
}
TEST(LibaomAv1EncoderTest, DisableAutomaticResize) {
std::unique_ptr<VideoEncoder> encoder = CreateLibaomAv1Encoder();
ASSERT_TRUE(encoder);
VideoCodec codec_settings = DefaultCodecSettings();
codec_settings.AV1()->automatic_resize_on = false;
EXPECT_EQ(encoder->InitEncode(&codec_settings, DefaultEncoderSettings()),
WEBRTC_VIDEO_CODEC_OK);
EXPECT_EQ(encoder->GetEncoderInfo().scaling_settings.thresholds,
absl::nullopt);
}
} // namespace
} // namespace webrtc

View file

@ -0,0 +1,370 @@
/*
* Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <stddef.h>
#include <stdint.h>
#include <map>
#include <memory>
#include <ostream>
#include <tuple>
#include <vector>
#include "absl/types/optional.h"
#include "api/units/data_size.h"
#include "api/units/time_delta.h"
#include "api/video_codecs/video_codec.h"
#include "api/video_codecs/video_encoder.h"
#include "modules/video_coding/codecs/av1/dav1d_decoder.h"
#include "modules/video_coding/codecs/av1/libaom_av1_encoder.h"
#include "modules/video_coding/codecs/test/encoded_video_frame_producer.h"
#include "modules/video_coding/include/video_codec_interface.h"
#include "modules/video_coding/include/video_error_codes.h"
#include "modules/video_coding/svc/create_scalability_structure.h"
#include "modules/video_coding/svc/scalability_mode_util.h"
#include "modules/video_coding/svc/scalable_video_controller.h"
#include "modules/video_coding/svc/scalable_video_controller_no_layering.h"
#include "test/gmock.h"
#include "test/gtest.h"
namespace webrtc {
namespace {
using ::testing::ContainerEq;
using ::testing::Each;
using ::testing::ElementsAreArray;
using ::testing::Ge;
using ::testing::IsEmpty;
using ::testing::Not;
using ::testing::NotNull;
using ::testing::Optional;
using ::testing::Pointwise;
using ::testing::SizeIs;
using ::testing::Truly;
using ::testing::Values;
// Use small resolution for this test to make it faster.
constexpr int kWidth = 320;
constexpr int kHeight = 180;
constexpr int kFramerate = 30;
VideoCodec DefaultCodecSettings() {
VideoCodec codec_settings;
codec_settings.SetScalabilityMode(ScalabilityMode::kL1T1);
codec_settings.width = kWidth;
codec_settings.height = kHeight;
codec_settings.maxFramerate = kFramerate;
codec_settings.maxBitrate = 1000;
codec_settings.startBitrate = 1;
codec_settings.qpMax = 63;
return codec_settings;
}
VideoEncoder::Settings DefaultEncoderSettings() {
return VideoEncoder::Settings(
VideoEncoder::Capabilities(/*loss_notification=*/false),
/*number_of_cores=*/1, /*max_payload_size=*/1200);
}
class TestAv1Decoder {
public:
explicit TestAv1Decoder(int decoder_id)
: decoder_id_(decoder_id), decoder_(CreateDav1dDecoder()) {
if (decoder_ == nullptr) {
ADD_FAILURE() << "Failed to create a decoder#" << decoder_id_;
return;
}
EXPECT_TRUE(decoder_->Configure({}));
EXPECT_EQ(decoder_->RegisterDecodeCompleteCallback(&callback_),
WEBRTC_VIDEO_CODEC_OK);
}
// This class requires pointer stability and thus not copyable nor movable.
TestAv1Decoder(const TestAv1Decoder&) = delete;
TestAv1Decoder& operator=(const TestAv1Decoder&) = delete;
void Decode(int64_t frame_id, const EncodedImage& image) {
ASSERT_THAT(decoder_, NotNull());
int32_t error =
decoder_->Decode(image, /*render_time_ms=*/image.capture_time_ms_);
if (error != WEBRTC_VIDEO_CODEC_OK) {
ADD_FAILURE() << "Failed to decode frame id " << frame_id
<< " with error code " << error << " by decoder#"
<< decoder_id_;
return;
}
decoded_ids_.push_back(frame_id);
}
const std::vector<int64_t>& decoded_frame_ids() const { return decoded_ids_; }
size_t num_output_frames() const { return callback_.num_called(); }
private:
// Decoder callback that only counts how many times it was called.
// While it is tempting to replace it with a simple mock, that one requires
// to set expectation on number of calls in advance. Tests below unsure about
// expected number of calls until after calls are done.
class DecoderCallback : public DecodedImageCallback {
public:
size_t num_called() const { return num_called_; }
private:
int32_t Decoded(VideoFrame& /*decoded_image*/) override {
++num_called_;
return 0;
}
void Decoded(VideoFrame& /*decoded_image*/,
absl::optional<int32_t> /*decode_time_ms*/,
absl::optional<uint8_t> /*qp*/) override {
++num_called_;
}
int num_called_ = 0;
};
const int decoder_id_;
std::vector<int64_t> decoded_ids_;
DecoderCallback callback_;
const std::unique_ptr<VideoDecoder> decoder_;
};
TEST(LibaomAv1Test, EncodeDecode) {
TestAv1Decoder decoder(0);
std::unique_ptr<VideoEncoder> encoder = CreateLibaomAv1Encoder();
VideoCodec codec_settings = DefaultCodecSettings();
ASSERT_EQ(encoder->InitEncode(&codec_settings, DefaultEncoderSettings()),
WEBRTC_VIDEO_CODEC_OK);
VideoBitrateAllocation allocation;
allocation.SetBitrate(0, 0, 300000);
encoder->SetRates(VideoEncoder::RateControlParameters(
allocation, codec_settings.maxFramerate));
std::vector<EncodedVideoFrameProducer::EncodedFrame> encoded_frames =
EncodedVideoFrameProducer(*encoder).SetNumInputFrames(4).Encode();
for (size_t frame_id = 0; frame_id < encoded_frames.size(); ++frame_id) {
decoder.Decode(static_cast<int64_t>(frame_id),
encoded_frames[frame_id].encoded_image);
}
// Check encoder produced some frames for decoder to decode.
ASSERT_THAT(encoded_frames, Not(IsEmpty()));
// Check decoder found all of them valid.
EXPECT_THAT(decoder.decoded_frame_ids(), SizeIs(encoded_frames.size()));
// Check each of them produced an output frame.
EXPECT_EQ(decoder.num_output_frames(), decoder.decoded_frame_ids().size());
}
struct LayerId {
friend bool operator==(const LayerId& lhs, const LayerId& rhs) {
return std::tie(lhs.spatial_id, lhs.temporal_id) ==
std::tie(rhs.spatial_id, rhs.temporal_id);
}
friend bool operator<(const LayerId& lhs, const LayerId& rhs) {
return std::tie(lhs.spatial_id, lhs.temporal_id) <
std::tie(rhs.spatial_id, rhs.temporal_id);
}
friend std::ostream& operator<<(std::ostream& s, const LayerId& layer) {
return s << "S" << layer.spatial_id << "T" << layer.temporal_id;
}
int spatial_id = 0;
int temporal_id = 0;
};
struct SvcTestParam {
ScalabilityMode GetScalabilityMode() const {
absl::optional<ScalabilityMode> scalability_mode =
ScalabilityModeFromString(name);
RTC_CHECK(scalability_mode.has_value());
return *scalability_mode;
}
std::string name;
int num_frames_to_generate;
std::map<LayerId, DataRate> configured_bitrates;
};
class LibaomAv1SvcTest : public ::testing::TestWithParam<SvcTestParam> {};
TEST_P(LibaomAv1SvcTest, EncodeAndDecodeAllDecodeTargets) {
const SvcTestParam param = GetParam();
std::unique_ptr<ScalableVideoController> svc_controller =
CreateScalabilityStructure(param.GetScalabilityMode());
ASSERT_TRUE(svc_controller);
VideoBitrateAllocation allocation;
if (param.configured_bitrates.empty()) {
ScalableVideoController::StreamLayersConfig config =
svc_controller->StreamConfig();
for (int sid = 0; sid < config.num_spatial_layers; ++sid) {
for (int tid = 0; tid < config.num_temporal_layers; ++tid) {
allocation.SetBitrate(sid, tid, 100'000);
}
}
} else {
for (const auto& kv : param.configured_bitrates) {
allocation.SetBitrate(kv.first.spatial_id, kv.first.temporal_id,
kv.second.bps());
}
}
size_t num_decode_targets =
svc_controller->DependencyStructure().num_decode_targets;
std::unique_ptr<VideoEncoder> encoder = CreateLibaomAv1Encoder();
VideoCodec codec_settings = DefaultCodecSettings();
codec_settings.SetScalabilityMode(GetParam().GetScalabilityMode());
ASSERT_EQ(encoder->InitEncode(&codec_settings, DefaultEncoderSettings()),
WEBRTC_VIDEO_CODEC_OK);
encoder->SetRates(VideoEncoder::RateControlParameters(
allocation, codec_settings.maxFramerate));
std::vector<EncodedVideoFrameProducer::EncodedFrame> encoded_frames =
EncodedVideoFrameProducer(*encoder)
.SetNumInputFrames(GetParam().num_frames_to_generate)
.SetResolution({kWidth, kHeight})
.Encode();
ASSERT_THAT(
encoded_frames,
Each(Truly([&](const EncodedVideoFrameProducer::EncodedFrame& frame) {
return frame.codec_specific_info.generic_frame_info &&
frame.codec_specific_info.generic_frame_info
->decode_target_indications.size() == num_decode_targets;
})));
for (size_t dt = 0; dt < num_decode_targets; ++dt) {
TestAv1Decoder decoder(dt);
std::vector<int64_t> requested_ids;
for (int64_t frame_id = 0;
frame_id < static_cast<int64_t>(encoded_frames.size()); ++frame_id) {
const EncodedVideoFrameProducer::EncodedFrame& frame =
encoded_frames[frame_id];
if (frame.codec_specific_info.generic_frame_info
->decode_target_indications[dt] !=
DecodeTargetIndication::kNotPresent) {
requested_ids.push_back(frame_id);
decoder.Decode(frame_id, frame.encoded_image);
}
EXPECT_THAT(frame.codec_specific_info.scalability_mode,
Optional(param.GetScalabilityMode()));
}
ASSERT_THAT(requested_ids, SizeIs(Ge(2u)));
// Check decoder found all of them valid.
EXPECT_THAT(decoder.decoded_frame_ids(), ContainerEq(requested_ids))
<< "Decoder#" << dt;
// Check each of them produced an output frame.
EXPECT_EQ(decoder.num_output_frames(), decoder.decoded_frame_ids().size())
<< "Decoder#" << dt;
}
}
MATCHER(SameLayerIdAndBitrateIsNear, "") {
// First check if layer id is the same.
return std::get<0>(arg).first == std::get<1>(arg).first &&
// check measured bitrate is not much lower than requested.
std::get<0>(arg).second >= std::get<1>(arg).second * 0.75 &&
// check measured bitrate is not much larger than requested.
std::get<0>(arg).second <= std::get<1>(arg).second * 1.25;
}
TEST_P(LibaomAv1SvcTest, SetRatesMatchMeasuredBitrate) {
const SvcTestParam param = GetParam();
if (param.configured_bitrates.empty()) {
// Rates are not configured for this particular structure, skip the test.
return;
}
constexpr TimeDelta kDuration = TimeDelta::Seconds(5);
VideoBitrateAllocation allocation;
for (const auto& kv : param.configured_bitrates) {
allocation.SetBitrate(kv.first.spatial_id, kv.first.temporal_id,
kv.second.bps());
}
std::unique_ptr<VideoEncoder> encoder = CreateLibaomAv1Encoder();
ASSERT_TRUE(encoder);
VideoCodec codec_settings = DefaultCodecSettings();
codec_settings.SetScalabilityMode(param.GetScalabilityMode());
codec_settings.maxBitrate = allocation.get_sum_kbps();
codec_settings.maxFramerate = 30;
ASSERT_EQ(encoder->InitEncode(&codec_settings, DefaultEncoderSettings()),
WEBRTC_VIDEO_CODEC_OK);
encoder->SetRates(VideoEncoder::RateControlParameters(
allocation, codec_settings.maxFramerate));
std::vector<EncodedVideoFrameProducer::EncodedFrame> encoded_frames =
EncodedVideoFrameProducer(*encoder)
.SetNumInputFrames(codec_settings.maxFramerate * kDuration.seconds())
.SetResolution({codec_settings.width, codec_settings.height})
.SetFramerateFps(codec_settings.maxFramerate)
.Encode();
// Calculate size of each layer.
std::map<LayerId, DataSize> layer_size;
for (const auto& frame : encoded_frames) {
ASSERT_TRUE(frame.codec_specific_info.generic_frame_info);
const auto& layer = *frame.codec_specific_info.generic_frame_info;
LayerId layer_id = {layer.spatial_id, layer.temporal_id};
// This is almost same as
// layer_size[layer_id] += DataSize::Bytes(frame.encoded_image.size());
// but avoids calling deleted default constructor for DataSize.
layer_size.emplace(layer_id, DataSize::Zero()).first->second +=
DataSize::Bytes(frame.encoded_image.size());
}
// Convert size of the layer into bitrate of that layer.
std::vector<std::pair<LayerId, DataRate>> measured_bitrates;
for (const auto& kv : layer_size) {
measured_bitrates.emplace_back(kv.first, kv.second / kDuration);
}
EXPECT_THAT(measured_bitrates, Pointwise(SameLayerIdAndBitrateIsNear(),
param.configured_bitrates));
}
INSTANTIATE_TEST_SUITE_P(
Svc,
LibaomAv1SvcTest,
Values(SvcTestParam{"L1T1", /*num_frames_to_generate=*/4},
SvcTestParam{"L1T2",
/*num_frames_to_generate=*/4,
/*configured_bitrates=*/
{{{0, 0}, DataRate::KilobitsPerSec(60)},
{{0, 1}, DataRate::KilobitsPerSec(40)}}},
SvcTestParam{"L1T3", /*num_frames_to_generate=*/8},
SvcTestParam{"L2T1",
/*num_frames_to_generate=*/3,
/*configured_bitrates=*/
{{{0, 0}, DataRate::KilobitsPerSec(30)},
{{1, 0}, DataRate::KilobitsPerSec(70)}}},
SvcTestParam{"L2T1h",
/*num_frames_to_generate=*/3,
/*configured_bitrates=*/
{{{0, 0}, DataRate::KilobitsPerSec(30)},
{{1, 0}, DataRate::KilobitsPerSec(70)}}},
SvcTestParam{"L2T1_KEY", /*num_frames_to_generate=*/3},
SvcTestParam{"L3T1", /*num_frames_to_generate=*/3},
SvcTestParam{"L3T3", /*num_frames_to_generate=*/8},
SvcTestParam{"S2T1", /*num_frames_to_generate=*/3},
// TODO: bugs.webrtc.org/15715 - Re-enable once AV1 is fixed.
// SvcTestParam{"S3T3", /*num_frames_to_generate=*/8},
SvcTestParam{"L2T2", /*num_frames_to_generate=*/4},
SvcTestParam{"L2T2_KEY", /*num_frames_to_generate=*/4},
SvcTestParam{"L2T2_KEY_SHIFT",
/*num_frames_to_generate=*/4,
/*configured_bitrates=*/
{{{0, 0}, DataRate::KilobitsPerSec(70)},
{{0, 1}, DataRate::KilobitsPerSec(30)},
{{1, 0}, DataRate::KilobitsPerSec(110)},
{{1, 1}, DataRate::KilobitsPerSec(80)}}}),
[](const testing::TestParamInfo<SvcTestParam>& info) {
return info.param.name;
});
} // namespace
} // namespace webrtc