Repo created

This commit is contained in:
Fr4nz D13trich 2025-11-22 14:04:28 +01:00
parent 81b91f4139
commit f8c34fa5ee
22732 changed files with 4815320 additions and 2 deletions

View file

@ -0,0 +1,4 @@
magjed@webrtc.org
sprang@webrtc.org
brandtr@webrtc.org
philipel@webrtc.org

View file

@ -0,0 +1,67 @@
/*
* Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/video_codecs/av1_profile.h"
#include <map>
#include <utility>
#include "media/base/media_constants.h"
#include "rtc_base/string_to_number.h"
namespace webrtc {
absl::string_view AV1ProfileToString(AV1Profile profile) {
switch (profile) {
case AV1Profile::kProfile0:
return "0";
case AV1Profile::kProfile1:
return "1";
case AV1Profile::kProfile2:
return "2";
}
return "0";
}
absl::optional<AV1Profile> StringToAV1Profile(absl::string_view str) {
const absl::optional<int> i = rtc::StringToNumber<int>(str);
if (!i.has_value())
return absl::nullopt;
switch (i.value()) {
case 0:
return AV1Profile::kProfile0;
case 1:
return AV1Profile::kProfile1;
case 2:
return AV1Profile::kProfile2;
default:
return absl::nullopt;
}
}
absl::optional<AV1Profile> ParseSdpForAV1Profile(
const CodecParameterMap& params) {
const auto profile_it = params.find(cricket::kAv1FmtpProfile);
if (profile_it == params.end())
return AV1Profile::kProfile0;
const std::string& profile_str = profile_it->second;
return StringToAV1Profile(profile_str);
}
bool AV1IsSameProfile(const CodecParameterMap& params1,
const CodecParameterMap& params2) {
const absl::optional<AV1Profile> profile = ParseSdpForAV1Profile(params1);
const absl::optional<AV1Profile> other_profile =
ParseSdpForAV1Profile(params2);
return profile && other_profile && profile == other_profile;
}
} // namespace webrtc

View file

@ -0,0 +1,54 @@
/*
* Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_VIDEO_CODECS_AV1_PROFILE_H_
#define API_VIDEO_CODECS_AV1_PROFILE_H_
#include <string>
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "api/video_codecs/sdp_video_format.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// Profiles can be found at:
// https://aomedia.org/av1/specification/annex-a/#profiles
// The enum values match the number specified in the SDP.
enum class AV1Profile {
kProfile0 = 0,
kProfile1 = 1,
kProfile2 = 2,
};
// Helper function which converts an AV1Profile to std::string. Returns "0" if
// an unknown value is passed in.
RTC_EXPORT absl::string_view AV1ProfileToString(AV1Profile profile);
// Helper function which converts a std::string to AV1Profile. Returns null if
// |profile| is not a valid profile string.
absl::optional<AV1Profile> StringToAV1Profile(absl::string_view profile);
// Parses an SDP key-value map of format parameters to retrive an AV1 profile.
// Returns an AV1Profile if one has been specified, `kProfile0` if no profile is
// specified and an empty value if the profile key is present but contains an
// invalid value.
RTC_EXPORT absl::optional<AV1Profile> ParseSdpForAV1Profile(
const CodecParameterMap& params);
// Returns true if the parameters have the same AV1 profile or neither contains
// an AV1 profile, otherwise false.
bool AV1IsSameProfile(const CodecParameterMap& params1,
const CodecParameterMap& params2);
} // namespace webrtc
#endif // API_VIDEO_CODECS_AV1_PROFILE_H_

View file

@ -0,0 +1,37 @@
/*
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_VIDEO_CODECS_BITSTREAM_PARSER_H_
#define API_VIDEO_CODECS_BITSTREAM_PARSER_H_
#include <stddef.h>
#include <stdint.h>
#include "absl/types/optional.h"
#include "api/array_view.h"
namespace webrtc {
// This class is an interface for bitstream parsers.
class BitstreamParser {
public:
virtual ~BitstreamParser() = default;
// Parse an additional chunk of the bitstream.
virtual void ParseBitstream(rtc::ArrayView<const uint8_t> bitstream) = 0;
// Get the last extracted QP value from the parsed bitstream. If no QP
// value could be parsed, returns absl::nullopt.
virtual absl::optional<int> GetLastSliceQp() const = 0;
};
} // namespace webrtc
#endif // API_VIDEO_CODECS_BITSTREAM_PARSER_H_

View file

@ -0,0 +1,23 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/video_codecs/builtin_video_decoder_factory.h"
#include <memory>
#include "media/engine/internal_decoder_factory.h"
namespace webrtc {
std::unique_ptr<VideoDecoderFactory> CreateBuiltinVideoDecoderFactory() {
return std::make_unique<InternalDecoderFactory>();
}
} // namespace webrtc

View file

@ -0,0 +1,27 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_VIDEO_CODECS_BUILTIN_VIDEO_DECODER_FACTORY_H_
#define API_VIDEO_CODECS_BUILTIN_VIDEO_DECODER_FACTORY_H_
#include <memory>
#include "api/video_codecs/video_decoder_factory.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// Creates a new factory that can create the built-in types of video decoders.
RTC_EXPORT std::unique_ptr<VideoDecoderFactory>
CreateBuiltinVideoDecoderFactory();
} // namespace webrtc
#endif // API_VIDEO_CODECS_BUILTIN_VIDEO_DECODER_FACTORY_H_

View file

@ -0,0 +1,73 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/video_codecs/builtin_video_encoder_factory.h"
#include <memory>
#include <string>
#include <vector>
#include "absl/strings/match.h"
#include "absl/types/optional.h"
#include "api/video_codecs/sdp_video_format.h"
#include "api/video_codecs/video_encoder.h"
#include "media/base/codec.h"
#include "media/base/media_constants.h"
#include "media/engine/internal_encoder_factory.h"
#include "media/engine/simulcast_encoder_adapter.h"
#include "rtc_base/checks.h"
namespace webrtc {
namespace {
// This class wraps the internal factory and adds simulcast.
class BuiltinVideoEncoderFactory : public VideoEncoderFactory {
public:
BuiltinVideoEncoderFactory()
: internal_encoder_factory_(new InternalEncoderFactory()) {}
std::unique_ptr<VideoEncoder> CreateVideoEncoder(
const SdpVideoFormat& format) override {
// Try creating an InternalEncoderFactory-backed SimulcastEncoderAdapter.
// The adapter has a passthrough mode for the case that simulcast is not
// used, so all responsibility can be delegated to it.
std::unique_ptr<VideoEncoder> encoder;
if (format.IsCodecInList(
internal_encoder_factory_->GetSupportedFormats())) {
encoder = std::make_unique<SimulcastEncoderAdapter>(
internal_encoder_factory_.get(), format);
}
return encoder;
}
std::vector<SdpVideoFormat> GetSupportedFormats() const override {
return internal_encoder_factory_->GetSupportedFormats();
}
CodecSupport QueryCodecSupport(
const SdpVideoFormat& format,
absl::optional<std::string> scalability_mode) const override {
return internal_encoder_factory_->QueryCodecSupport(format,
scalability_mode);
}
private:
const std::unique_ptr<VideoEncoderFactory> internal_encoder_factory_;
};
} // namespace
std::unique_ptr<VideoEncoderFactory> CreateBuiltinVideoEncoderFactory() {
return std::make_unique<BuiltinVideoEncoderFactory>();
}
} // namespace webrtc

View file

@ -0,0 +1,28 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_VIDEO_CODECS_BUILTIN_VIDEO_ENCODER_FACTORY_H_
#define API_VIDEO_CODECS_BUILTIN_VIDEO_ENCODER_FACTORY_H_
#include <memory>
#include "api/video_codecs/video_encoder_factory.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// Creates a new factory that can create the built-in types of video encoders.
// The factory has simulcast support for VP8.
RTC_EXPORT std::unique_ptr<VideoEncoderFactory>
CreateBuiltinVideoEncoderFactory();
} // namespace webrtc
#endif // API_VIDEO_CODECS_BUILTIN_VIDEO_ENCODER_FACTORY_H_

View file

@ -0,0 +1,257 @@
/*
* Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/video_codecs/h264_profile_level_id.h"
#include <cstdio>
#include <cstdlib>
#include <string>
#include "rtc_base/arraysize.h"
#include "rtc_base/checks.h"
namespace webrtc {
namespace {
const char kProfileLevelId[] = "profile-level-id";
// For level_idc=11 and profile_idc=0x42, 0x4D, or 0x58, the constraint set3
// flag specifies if level 1b or level 1.1 is used.
const uint8_t kConstraintSet3Flag = 0x10;
// Convert a string of 8 characters into a byte where the positions containing
// character c will have their bit set. For example, c = 'x', str = "x1xx0000"
// will return 0b10110000. constexpr is used so that the pattern table in
// kProfilePatterns is statically initialized.
constexpr uint8_t ByteMaskString(char c, const char (&str)[9]) {
return (str[0] == c) << 7 | (str[1] == c) << 6 | (str[2] == c) << 5 |
(str[3] == c) << 4 | (str[4] == c) << 3 | (str[5] == c) << 2 |
(str[6] == c) << 1 | (str[7] == c) << 0;
}
// Class for matching bit patterns such as "x1xx0000" where 'x' is allowed to be
// either 0 or 1.
class BitPattern {
public:
explicit constexpr BitPattern(const char (&str)[9])
: mask_(~ByteMaskString('x', str)),
masked_value_(ByteMaskString('1', str)) {}
bool IsMatch(uint8_t value) const { return masked_value_ == (value & mask_); }
private:
const uint8_t mask_;
const uint8_t masked_value_;
};
// Table for converting between profile_idc/profile_iop to H264Profile.
struct ProfilePattern {
const uint8_t profile_idc;
const BitPattern profile_iop;
const H264Profile profile;
};
// This is from https://tools.ietf.org/html/rfc6184#section-8.1.
constexpr ProfilePattern kProfilePatterns[] = {
{0x42, BitPattern("x1xx0000"), H264Profile::kProfileConstrainedBaseline},
{0x4D, BitPattern("1xxx0000"), H264Profile::kProfileConstrainedBaseline},
{0x58, BitPattern("11xx0000"), H264Profile::kProfileConstrainedBaseline},
{0x42, BitPattern("x0xx0000"), H264Profile::kProfileBaseline},
{0x58, BitPattern("10xx0000"), H264Profile::kProfileBaseline},
{0x4D, BitPattern("0x0x0000"), H264Profile::kProfileMain},
{0x64, BitPattern("00000000"), H264Profile::kProfileHigh},
{0x64, BitPattern("00001100"), H264Profile::kProfileConstrainedHigh},
{0xF4, BitPattern("00000000"), H264Profile::kProfilePredictiveHigh444}};
struct LevelConstraint {
const int max_macroblocks_per_second;
const int max_macroblock_frame_size;
const H264Level level;
};
// This is from ITU-T H.264 (02/2016) Table A-1 Level limits.
static constexpr LevelConstraint kLevelConstraints[] = {
{1485, 99, H264Level::kLevel1},
{1485, 99, H264Level::kLevel1_b},
{3000, 396, H264Level::kLevel1_1},
{6000, 396, H264Level::kLevel1_2},
{11880, 396, H264Level::kLevel1_3},
{11880, 396, H264Level::kLevel2},
{19800, 792, H264Level::kLevel2_1},
{20250, 1620, H264Level::kLevel2_2},
{40500, 1620, H264Level::kLevel3},
{108000, 3600, H264Level::kLevel3_1},
{216000, 5120, H264Level::kLevel3_2},
{245760, 8192, H264Level::kLevel4},
{245760, 8192, H264Level::kLevel4_1},
{522240, 8704, H264Level::kLevel4_2},
{589824, 22080, H264Level::kLevel5},
{983040, 36864, H264Level::kLevel5_1},
{2073600, 36864, H264Level::kLevel5_2},
};
} // anonymous namespace
absl::optional<H264ProfileLevelId> ParseH264ProfileLevelId(const char* str) {
// The string should consist of 3 bytes in hexadecimal format.
if (strlen(str) != 6u)
return absl::nullopt;
const uint32_t profile_level_id_numeric = strtol(str, nullptr, 16);
if (profile_level_id_numeric == 0)
return absl::nullopt;
// Separate into three bytes.
const uint8_t level_idc =
static_cast<uint8_t>(profile_level_id_numeric & 0xFF);
const uint8_t profile_iop =
static_cast<uint8_t>((profile_level_id_numeric >> 8) & 0xFF);
const uint8_t profile_idc =
static_cast<uint8_t>((profile_level_id_numeric >> 16) & 0xFF);
// Parse level based on level_idc and constraint set 3 flag.
H264Level level_casted = static_cast<H264Level>(level_idc);
H264Level level;
switch (level_casted) {
case H264Level::kLevel1_1:
level = (profile_iop & kConstraintSet3Flag) != 0 ? H264Level::kLevel1_b
: H264Level::kLevel1_1;
break;
case H264Level::kLevel1:
case H264Level::kLevel1_2:
case H264Level::kLevel1_3:
case H264Level::kLevel2:
case H264Level::kLevel2_1:
case H264Level::kLevel2_2:
case H264Level::kLevel3:
case H264Level::kLevel3_1:
case H264Level::kLevel3_2:
case H264Level::kLevel4:
case H264Level::kLevel4_1:
case H264Level::kLevel4_2:
case H264Level::kLevel5:
case H264Level::kLevel5_1:
case H264Level::kLevel5_2:
level = level_casted;
break;
default:
// Unrecognized level_idc.
return absl::nullopt;
}
// Parse profile_idc/profile_iop into a Profile enum.
for (const ProfilePattern& pattern : kProfilePatterns) {
if (profile_idc == pattern.profile_idc &&
pattern.profile_iop.IsMatch(profile_iop)) {
return H264ProfileLevelId(pattern.profile, level);
}
}
// Unrecognized profile_idc/profile_iop combination.
return absl::nullopt;
}
absl::optional<H264Level> H264SupportedLevel(int max_frame_pixel_count,
float max_fps) {
static const int kPixelsPerMacroblock = 16 * 16;
for (int i = arraysize(kLevelConstraints) - 1; i >= 0; --i) {
const LevelConstraint& level_constraint = kLevelConstraints[i];
if (level_constraint.max_macroblock_frame_size * kPixelsPerMacroblock <=
max_frame_pixel_count &&
level_constraint.max_macroblocks_per_second <=
max_fps * level_constraint.max_macroblock_frame_size) {
return level_constraint.level;
}
}
// No level supported.
return absl::nullopt;
}
absl::optional<H264ProfileLevelId> ParseSdpForH264ProfileLevelId(
const CodecParameterMap& params) {
// TODO(magjed): The default should really be kProfileBaseline and kLevel1
// according to the spec: https://tools.ietf.org/html/rfc6184#section-8.1. In
// order to not break backwards compatibility with older versions of WebRTC
// where external codecs don't have any parameters, use
// kProfileConstrainedBaseline kLevel3_1 instead. This workaround will only be
// done in an interim period to allow external clients to update their code.
// http://crbug/webrtc/6337.
static const H264ProfileLevelId kDefaultProfileLevelId(
H264Profile::kProfileConstrainedBaseline, H264Level::kLevel3_1);
const auto profile_level_id_it = params.find(kProfileLevelId);
return (profile_level_id_it == params.end())
? kDefaultProfileLevelId
: ParseH264ProfileLevelId(profile_level_id_it->second.c_str());
}
absl::optional<std::string> H264ProfileLevelIdToString(
const H264ProfileLevelId& profile_level_id) {
// Handle special case level == 1b.
if (profile_level_id.level == H264Level::kLevel1_b) {
switch (profile_level_id.profile) {
case H264Profile::kProfileConstrainedBaseline:
return {"42f00b"};
case H264Profile::kProfileBaseline:
return {"42100b"};
case H264Profile::kProfileMain:
return {"4d100b"};
// Level 1b is not allowed for other profiles.
default:
return absl::nullopt;
}
}
const char* profile_idc_iop_string;
switch (profile_level_id.profile) {
case H264Profile::kProfileConstrainedBaseline:
profile_idc_iop_string = "42e0";
break;
case H264Profile::kProfileBaseline:
profile_idc_iop_string = "4200";
break;
case H264Profile::kProfileMain:
profile_idc_iop_string = "4d00";
break;
case H264Profile::kProfileConstrainedHigh:
profile_idc_iop_string = "640c";
break;
case H264Profile::kProfileHigh:
profile_idc_iop_string = "6400";
break;
case H264Profile::kProfilePredictiveHigh444:
profile_idc_iop_string = "f400";
break;
// Unrecognized profile.
default:
return absl::nullopt;
}
char str[7];
snprintf(str, 7u, "%s%02x", profile_idc_iop_string,
static_cast<unsigned>(profile_level_id.level));
return {str};
}
bool H264IsSameProfile(const CodecParameterMap& params1,
const CodecParameterMap& params2) {
const absl::optional<H264ProfileLevelId> profile_level_id =
ParseSdpForH264ProfileLevelId(params1);
const absl::optional<H264ProfileLevelId> other_profile_level_id =
ParseSdpForH264ProfileLevelId(params2);
// Compare H264 profiles, but not levels.
return profile_level_id && other_profile_level_id &&
profile_level_id->profile == other_profile_level_id->profile;
}
} // namespace webrtc

View file

@ -0,0 +1,92 @@
/*
* Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_VIDEO_CODECS_H264_PROFILE_LEVEL_ID_H_
#define API_VIDEO_CODECS_H264_PROFILE_LEVEL_ID_H_
#include <string>
#include "absl/types/optional.h"
#include "api/video_codecs/sdp_video_format.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
enum class H264Profile {
kProfileConstrainedBaseline,
kProfileBaseline,
kProfileMain,
kProfileConstrainedHigh,
kProfileHigh,
kProfilePredictiveHigh444,
};
// All values are equal to ten times the level number, except level 1b which is
// special.
enum class H264Level {
kLevel1_b = 0,
kLevel1 = 10,
kLevel1_1 = 11,
kLevel1_2 = 12,
kLevel1_3 = 13,
kLevel2 = 20,
kLevel2_1 = 21,
kLevel2_2 = 22,
kLevel3 = 30,
kLevel3_1 = 31,
kLevel3_2 = 32,
kLevel4 = 40,
kLevel4_1 = 41,
kLevel4_2 = 42,
kLevel5 = 50,
kLevel5_1 = 51,
kLevel5_2 = 52
};
struct H264ProfileLevelId {
constexpr H264ProfileLevelId(H264Profile profile, H264Level level)
: profile(profile), level(level) {}
H264Profile profile;
H264Level level;
};
// Parse profile level id that is represented as a string of 3 hex bytes.
// Nothing will be returned if the string is not a recognized H264
// profile level id.
absl::optional<H264ProfileLevelId> ParseH264ProfileLevelId(const char* str);
// Parse profile level id that is represented as a string of 3 hex bytes
// contained in an SDP key-value map. A default profile level id will be
// returned if the profile-level-id key is missing. Nothing will be returned if
// the key is present but the string is invalid.
RTC_EXPORT absl::optional<H264ProfileLevelId> ParseSdpForH264ProfileLevelId(
const CodecParameterMap& params);
// Given that a decoder supports up to a given frame size (in pixels) at up to a
// given number of frames per second, return the highest H.264 level where it
// can guarantee that it will be able to support all valid encoded streams that
// are within that level.
RTC_EXPORT absl::optional<H264Level> H264SupportedLevel(
int max_frame_pixel_count,
float max_fps);
// Returns canonical string representation as three hex bytes of the profile
// level id, or returns nothing for invalid profile level ids.
RTC_EXPORT absl::optional<std::string> H264ProfileLevelIdToString(
const H264ProfileLevelId& profile_level_id);
// Returns true if the parameters have the same H264 profile (Baseline, High,
// etc).
RTC_EXPORT bool H264IsSameProfile(const CodecParameterMap& params1,
const CodecParameterMap& params2);
} // namespace webrtc
#endif // API_VIDEO_CODECS_H264_PROFILE_LEVEL_ID_H_

View file

@ -0,0 +1,248 @@
/*
* Copyright (c) 2023 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/video_codecs/h265_profile_tier_level.h"
#include <string>
#include "rtc_base/string_to_number.h"
namespace webrtc {
namespace {
const char kH265FmtpProfile[] = "profile-id";
const char kH265FmtpTier[] = "tier-flag";
const char kH265FmtpLevel[] = "level-id";
} // anonymous namespace
// Annex A of https://www.itu.int/rec/T-REC-H.265 (08/21), section A.3.
absl::optional<H265Profile> StringToH265Profile(const std::string& profile) {
absl::optional<int> i = rtc::StringToNumber<int>(profile);
if (!i.has_value()) {
return absl::nullopt;
}
switch (i.value()) {
case 1:
return H265Profile::kProfileMain;
case 2:
return H265Profile::kProfileMain10;
case 3:
return H265Profile::kProfileMainStill;
case 4:
return H265Profile::kProfileRangeExtensions;
case 5:
return H265Profile::kProfileHighThroughput;
case 6:
return H265Profile::kProfileMultiviewMain;
case 7:
return H265Profile::kProfileScalableMain;
case 8:
return H265Profile::kProfile3dMain;
case 9:
return H265Profile::kProfileScreenContentCoding;
case 10:
return H265Profile::kProfileScalableRangeExtensions;
case 11:
return H265Profile::kProfileHighThroughputScreenContentCoding;
default:
return absl::nullopt;
}
}
// Annex A of https://www.itu.int/rec/T-REC-H.265 (08/21), section A.4,
// tiers and levels.
absl::optional<H265Tier> StringToH265Tier(const std::string& tier) {
absl::optional<int> i = rtc::StringToNumber<int>(tier);
if (!i.has_value()) {
return absl::nullopt;
}
switch (i.value()) {
case 0:
return H265Tier::kTier0;
case 1:
return H265Tier::kTier1;
default:
return absl::nullopt;
}
}
absl::optional<H265Level> StringToH265Level(const std::string& level) {
const absl::optional<int> i = rtc::StringToNumber<int>(level);
if (!i.has_value())
return absl::nullopt;
switch (i.value()) {
case 30:
return H265Level::kLevel1;
case 60:
return H265Level::kLevel2;
case 63:
return H265Level::kLevel2_1;
case 90:
return H265Level::kLevel3;
case 93:
return H265Level::kLevel3_1;
case 120:
return H265Level::kLevel4;
case 123:
return H265Level::kLevel4_1;
case 150:
return H265Level::kLevel5;
case 153:
return H265Level::kLevel5_1;
case 156:
return H265Level::kLevel5_2;
case 180:
return H265Level::kLevel6;
case 183:
return H265Level::kLevel6_1;
case 186:
return H265Level::kLevel6_2;
default:
return absl::nullopt;
}
}
std::string H265ProfileToString(H265Profile profile) {
switch (profile) {
case H265Profile::kProfileMain:
return "1";
case H265Profile::kProfileMain10:
return "2";
case H265Profile::kProfileMainStill:
return "3";
case H265Profile::kProfileRangeExtensions:
return "4";
case H265Profile::kProfileHighThroughput:
return "5";
case H265Profile::kProfileMultiviewMain:
return "6";
case H265Profile::kProfileScalableMain:
return "7";
case H265Profile::kProfile3dMain:
return "8";
case H265Profile::kProfileScreenContentCoding:
return "9";
case H265Profile::kProfileScalableRangeExtensions:
return "10";
case H265Profile::kProfileHighThroughputScreenContentCoding:
return "11";
}
}
std::string H265TierToString(H265Tier tier) {
switch (tier) {
case H265Tier::kTier0:
return "0";
case H265Tier::kTier1:
return "1";
}
}
std::string H265LevelToString(H265Level level) {
switch (level) {
case H265Level::kLevel1:
return "30";
case H265Level::kLevel2:
return "60";
case H265Level::kLevel2_1:
return "63";
case H265Level::kLevel3:
return "90";
case H265Level::kLevel3_1:
return "93";
case H265Level::kLevel4:
return "120";
case H265Level::kLevel4_1:
return "123";
case H265Level::kLevel5:
return "150";
case H265Level::kLevel5_1:
return "153";
case H265Level::kLevel5_2:
return "156";
case H265Level::kLevel6:
return "180";
case H265Level::kLevel6_1:
return "183";
case H265Level::kLevel6_2:
return "186";
}
}
absl::optional<H265ProfileTierLevel> ParseSdpForH265ProfileTierLevel(
const CodecParameterMap& params) {
static const H265ProfileTierLevel kDefaultProfileTierLevel(
H265Profile::kProfileMain, H265Tier::kTier0, H265Level::kLevel3_1);
bool profile_tier_level_specified = false;
absl::optional<H265Profile> profile;
const auto profile_it = params.find(kH265FmtpProfile);
if (profile_it != params.end()) {
profile_tier_level_specified = true;
const std::string& profile_str = profile_it->second;
profile = StringToH265Profile(profile_str);
if (!profile) {
return absl::nullopt;
}
} else {
profile = H265Profile::kProfileMain;
}
absl::optional<H265Tier> tier;
const auto tier_it = params.find(kH265FmtpTier);
if (tier_it != params.end()) {
profile_tier_level_specified = true;
const std::string& tier_str = tier_it->second;
tier = StringToH265Tier(tier_str);
if (!tier) {
return absl::nullopt;
}
} else {
tier = H265Tier::kTier0;
}
absl::optional<H265Level> level;
const auto level_it = params.find(kH265FmtpLevel);
if (level_it != params.end()) {
profile_tier_level_specified = true;
const std::string& level_str = level_it->second;
level = StringToH265Level(level_str);
if (!level) {
return absl::nullopt;
}
} else {
level = H265Level::kLevel3_1;
}
// Spec Table A.9, level 1 to level 3.1 does not allow high tiers.
if (level <= H265Level::kLevel3_1 && tier == H265Tier::kTier1) {
return absl::nullopt;
}
return !profile_tier_level_specified
? kDefaultProfileTierLevel
: H265ProfileTierLevel(profile.value(), tier.value(),
level.value());
}
bool H265IsSameProfileTierLevel(const CodecParameterMap& params1,
const CodecParameterMap& params2) {
const absl::optional<H265ProfileTierLevel> ptl1 =
ParseSdpForH265ProfileTierLevel(params1);
const absl::optional<H265ProfileTierLevel> ptl2 =
ParseSdpForH265ProfileTierLevel(params2);
return ptl1 && ptl2 && ptl1->profile == ptl2->profile &&
ptl1->tier == ptl2->tier && ptl1->level == ptl2->level;
}
} // namespace webrtc

View file

@ -0,0 +1,109 @@
/*
* Copyright (c) 2023 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_VIDEO_CODECS_H265_PROFILE_TIER_LEVEL_H_
#define API_VIDEO_CODECS_H265_PROFILE_TIER_LEVEL_H_
#include <string>
#include "absl/types/optional.h"
#include "api/video_codecs/sdp_video_format.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// Profiles can be found at:
// https://www.itu.int/rec/T-REC-H.265
// The enum values match the number specified in the SDP.
enum class H265Profile {
kProfileMain = 1,
kProfileMain10 = 2,
kProfileMainStill = 3,
kProfileRangeExtensions = 4,
kProfileHighThroughput = 5,
kProfileMultiviewMain = 6,
kProfileScalableMain = 7,
kProfile3dMain = 8,
kProfileScreenContentCoding = 9,
kProfileScalableRangeExtensions = 10,
kProfileHighThroughputScreenContentCoding = 11,
};
// Tiers can be found at https://www.itu.int/rec/T-REC-H.265
enum class H265Tier {
kTier0,
kTier1,
};
// All values are equal to 30 times the level number.
enum class H265Level {
kLevel1 = 30,
kLevel2 = 60,
kLevel2_1 = 63,
kLevel3 = 90,
kLevel3_1 = 93,
kLevel4 = 120,
kLevel4_1 = 123,
kLevel5 = 150,
kLevel5_1 = 153,
kLevel5_2 = 156,
kLevel6 = 180,
kLevel6_1 = 183,
kLevel6_2 = 186,
};
struct H265ProfileTierLevel {
constexpr H265ProfileTierLevel(H265Profile profile,
H265Tier tier,
H265Level level)
: profile(profile), tier(tier), level(level) {}
H265Profile profile;
H265Tier tier;
H265Level level;
};
// Helper function to convert H265Profile to std::string.
RTC_EXPORT std::string H265ProfileToString(H265Profile profile);
// Helper function to convert H265Tier to std::string.
RTC_EXPORT std::string H265TierToString(H265Tier tier);
// Helper function to convert H265Level to std::string.
RTC_EXPORT std::string H265LevelToString(H265Level level);
// Helper function to get H265Profile from profile string.
RTC_EXPORT absl::optional<H265Profile> StringToH265Profile(
const std::string& profile);
// Helper function to get H265Tier from tier string.
RTC_EXPORT absl::optional<H265Tier> StringToH265Tier(const std::string& tier);
// Helper function to get H265Level from level string.
RTC_EXPORT absl::optional<H265Level> StringToH265Level(
const std::string& level);
// Parses an SDP key-value map of format parameters to retrive an H265
// profile/tier/level. Returns an H265ProfileTierlevel by setting its
// members. profile defaults to `kProfileMain` if no profile-id is specified.
// tier defaults to "kTier0" if no tier-flag is specified.
// level defaults to "kLevel3_1" if no level-id is specified.
// Returns empty value if any of the profile/tier/level key is present but
// contains an invalid value.
RTC_EXPORT absl::optional<H265ProfileTierLevel> ParseSdpForH265ProfileTierLevel(
const CodecParameterMap& params);
// Returns true if the parameters have the same H265 profile or neither contains
// an H265 profile, otherwise false.
RTC_EXPORT bool H265IsSameProfileTierLevel(const CodecParameterMap& params1,
const CodecParameterMap& params2);
} // namespace webrtc
#endif // API_VIDEO_CODECS_H265_PROFILE_TIER_LEVEL_H_

View file

@ -0,0 +1,91 @@
/*
* Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/video_codecs/scalability_mode.h"
#include "rtc_base/checks.h"
namespace webrtc {
absl::string_view ScalabilityModeToString(ScalabilityMode scalability_mode) {
switch (scalability_mode) {
case ScalabilityMode::kL1T1:
return "L1T1";
case ScalabilityMode::kL1T2:
return "L1T2";
case ScalabilityMode::kL1T3:
return "L1T3";
case ScalabilityMode::kL2T1:
return "L2T1";
case ScalabilityMode::kL2T1h:
return "L2T1h";
case ScalabilityMode::kL2T1_KEY:
return "L2T1_KEY";
case ScalabilityMode::kL2T2:
return "L2T2";
case ScalabilityMode::kL2T2h:
return "L2T2h";
case ScalabilityMode::kL2T2_KEY:
return "L2T2_KEY";
case ScalabilityMode::kL2T2_KEY_SHIFT:
return "L2T2_KEY_SHIFT";
case ScalabilityMode::kL2T3:
return "L2T3";
case ScalabilityMode::kL2T3h:
return "L2T3h";
case ScalabilityMode::kL2T3_KEY:
return "L2T3_KEY";
case ScalabilityMode::kL3T1:
return "L3T1";
case ScalabilityMode::kL3T1h:
return "L3T1h";
case ScalabilityMode::kL3T1_KEY:
return "L3T1_KEY";
case ScalabilityMode::kL3T2:
return "L3T2";
case ScalabilityMode::kL3T2h:
return "L3T2h";
case ScalabilityMode::kL3T2_KEY:
return "L3T2_KEY";
case ScalabilityMode::kL3T3:
return "L3T3";
case ScalabilityMode::kL3T3h:
return "L3T3h";
case ScalabilityMode::kL3T3_KEY:
return "L3T3_KEY";
case ScalabilityMode::kS2T1:
return "S2T1";
case ScalabilityMode::kS2T1h:
return "S2T1h";
case ScalabilityMode::kS2T2:
return "S2T2";
case ScalabilityMode::kS2T2h:
return "S2T2h";
case ScalabilityMode::kS2T3:
return "S2T3";
case ScalabilityMode::kS2T3h:
return "S2T3h";
case ScalabilityMode::kS3T1:
return "S3T1";
case ScalabilityMode::kS3T1h:
return "S3T1h";
case ScalabilityMode::kS3T2:
return "S3T2";
case ScalabilityMode::kS3T2h:
return "S3T2h";
case ScalabilityMode::kS3T3:
return "S3T3";
case ScalabilityMode::kS3T3h:
return "S3T3h";
}
RTC_CHECK_NOTREACHED();
}
} // namespace webrtc

View file

@ -0,0 +1,111 @@
/*
* Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_VIDEO_CODECS_SCALABILITY_MODE_H_
#define API_VIDEO_CODECS_SCALABILITY_MODE_H_
#include <stddef.h>
#include <stdint.h>
#include "absl/strings/string_view.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// Supported scalability modes. Most applications should use the
// PeerConnection-level apis where scalability mode is represented as a string.
// This list of currently recognized modes is intended for the api boundary
// between webrtc and injected encoders. Any application usage outside of
// injected encoders is strongly discouraged.
enum class ScalabilityMode : uint8_t {
kL1T1,
kL1T2,
kL1T3,
kL2T1,
kL2T1h,
kL2T1_KEY,
kL2T2,
kL2T2h,
kL2T2_KEY,
kL2T2_KEY_SHIFT,
kL2T3,
kL2T3h,
kL2T3_KEY,
kL3T1,
kL3T1h,
kL3T1_KEY,
kL3T2,
kL3T2h,
kL3T2_KEY,
kL3T3,
kL3T3h,
kL3T3_KEY,
kS2T1,
kS2T1h,
kS2T2,
kS2T2h,
kS2T3,
kS2T3h,
kS3T1,
kS3T1h,
kS3T2,
kS3T2h,
kS3T3,
kS3T3h,
};
inline constexpr ScalabilityMode kAllScalabilityModes[] = {
// clang-format off
ScalabilityMode::kL1T1,
ScalabilityMode::kL1T2,
ScalabilityMode::kL1T3,
ScalabilityMode::kL2T1,
ScalabilityMode::kL2T1h,
ScalabilityMode::kL2T1_KEY,
ScalabilityMode::kL2T2,
ScalabilityMode::kL2T2h,
ScalabilityMode::kL2T2_KEY,
ScalabilityMode::kL2T2_KEY_SHIFT,
ScalabilityMode::kL2T3,
ScalabilityMode::kL2T3h,
ScalabilityMode::kL2T3_KEY,
ScalabilityMode::kL3T1,
ScalabilityMode::kL3T1h,
ScalabilityMode::kL3T1_KEY,
ScalabilityMode::kL3T2,
ScalabilityMode::kL3T2h,
ScalabilityMode::kL3T2_KEY,
ScalabilityMode::kL3T3,
ScalabilityMode::kL3T3h,
ScalabilityMode::kL3T3_KEY,
ScalabilityMode::kS2T1,
ScalabilityMode::kS2T1h,
ScalabilityMode::kS2T2,
ScalabilityMode::kS2T2h,
ScalabilityMode::kS2T3,
ScalabilityMode::kS2T3h,
ScalabilityMode::kS3T1,
ScalabilityMode::kS3T1h,
ScalabilityMode::kS3T2,
ScalabilityMode::kS3T2h,
ScalabilityMode::kS3T3,
ScalabilityMode::kS3T3h,
// clang-format on
};
inline constexpr size_t kScalabilityModeCount =
sizeof(kAllScalabilityModes) / sizeof(ScalabilityMode);
RTC_EXPORT
absl::string_view ScalabilityModeToString(ScalabilityMode scalability_mode);
} // namespace webrtc
#endif // API_VIDEO_CODECS_SCALABILITY_MODE_H_

View file

@ -0,0 +1,42 @@
/*
* Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/video_codecs/scalability_mode_helper.h"
#include "modules/video_coding/svc/scalability_mode_util.h"
namespace webrtc {
absl::optional<int> ScalabilityModeStringToNumSpatialLayers(
absl::string_view scalability_mode_string) {
absl::optional<ScalabilityMode> scalability_mode =
ScalabilityModeFromString(scalability_mode_string);
if (!scalability_mode.has_value()) {
return absl::nullopt;
}
return ScalabilityModeToNumSpatialLayers(*scalability_mode);
}
absl::optional<int> ScalabilityModeStringToNumTemporalLayers(
absl::string_view scalability_mode_string) {
absl::optional<ScalabilityMode> scalability_mode =
ScalabilityModeFromString(scalability_mode_string);
if (!scalability_mode.has_value()) {
return absl::nullopt;
}
return ScalabilityModeToNumTemporalLayers(*scalability_mode);
}
absl::optional<ScalabilityMode> ScalabilityModeStringToEnum(
absl::string_view scalability_mode_string) {
return ScalabilityModeFromString(scalability_mode_string);
}
} // namespace webrtc

View file

@ -0,0 +1,37 @@
/*
* Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_VIDEO_CODECS_SCALABILITY_MODE_HELPER_H_
#define API_VIDEO_CODECS_SCALABILITY_MODE_HELPER_H_
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "api/video_codecs/scalability_mode.h"
namespace webrtc {
// Returns the number of spatial layers from the `scalability_mode_string`
// or nullopt if the given mode is unknown.
absl::optional<int> ScalabilityModeStringToNumSpatialLayers(
absl::string_view scalability_mode_string);
// Returns the number of temporal layers from the `scalability_mode_string`
// or nullopt if the given mode is unknown.
absl::optional<int> ScalabilityModeStringToNumTemporalLayers(
absl::string_view scalability_mode_string);
// Convert the `scalability_mode_string` to the scalability mode enum value
// or nullopt if the given mode is unknown.
absl::optional<ScalabilityMode> ScalabilityModeStringToEnum(
absl::string_view scalability_mode_string);
} // namespace webrtc
#endif // API_VIDEO_CODECS_SCALABILITY_MODE_HELPER_H_

View file

@ -0,0 +1,209 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/video_codecs/sdp_video_format.h"
#include "absl/strings/match.h"
#include "absl/types/optional.h"
#include "api/array_view.h"
#include "api/video_codecs/av1_profile.h"
#include "api/video_codecs/h264_profile_level_id.h"
#ifdef RTC_ENABLE_H265
#include "api/video_codecs/h265_profile_tier_level.h"
#endif
#include "api/video_codecs/video_codec.h"
#include "api/video_codecs/vp9_profile.h"
#include "media/base/media_constants.h"
#include "rtc_base/checks.h"
#include "rtc_base/logging.h"
#include "rtc_base/strings/string_builder.h"
namespace webrtc {
namespace {
std::string GetFmtpParameterOrDefault(const CodecParameterMap& params,
const std::string& name,
const std::string& default_value) {
const auto it = params.find(name);
if (it != params.end()) {
return it->second;
}
return default_value;
}
std::string H264GetPacketizationModeOrDefault(const CodecParameterMap& params) {
// If packetization-mode is not present, default to "0".
// https://tools.ietf.org/html/rfc6184#section-6.2
return GetFmtpParameterOrDefault(params, cricket::kH264FmtpPacketizationMode,
"0");
}
bool H264IsSamePacketizationMode(const CodecParameterMap& left,
const CodecParameterMap& right) {
return H264GetPacketizationModeOrDefault(left) ==
H264GetPacketizationModeOrDefault(right);
}
std::string AV1GetTierOrDefault(const CodecParameterMap& params) {
// If the parameter is not present, the tier MUST be inferred to be 0.
// https://aomediacodec.github.io/av1-rtp-spec/#72-sdp-parameters
return GetFmtpParameterOrDefault(params, cricket::kAv1FmtpTier, "0");
}
bool AV1IsSameTier(const CodecParameterMap& left,
const CodecParameterMap& right) {
return AV1GetTierOrDefault(left) == AV1GetTierOrDefault(right);
}
std::string AV1GetLevelIdxOrDefault(const CodecParameterMap& params) {
// If the parameter is not present, it MUST be inferred to be 5 (level 3.1).
// https://aomediacodec.github.io/av1-rtp-spec/#72-sdp-parameters
return GetFmtpParameterOrDefault(params, cricket::kAv1FmtpLevelIdx, "5");
}
bool AV1IsSameLevelIdx(const CodecParameterMap& left,
const CodecParameterMap& right) {
return AV1GetLevelIdxOrDefault(left) == AV1GetLevelIdxOrDefault(right);
}
// Some (video) codecs are actually families of codecs and rely on parameters
// to distinguish different incompatible family members.
bool IsSameCodecSpecific(const SdpVideoFormat& format1,
const SdpVideoFormat& format2) {
// The assumption when calling this function is that the two formats have the
// same name.
RTC_DCHECK(absl::EqualsIgnoreCase(format1.name, format2.name));
VideoCodecType codec_type = PayloadStringToCodecType(format1.name);
switch (codec_type) {
case kVideoCodecH264:
return H264IsSameProfile(format1.parameters, format2.parameters) &&
H264IsSamePacketizationMode(format1.parameters,
format2.parameters);
case kVideoCodecVP9:
return VP9IsSameProfile(format1.parameters, format2.parameters);
case kVideoCodecAV1:
return AV1IsSameProfile(format1.parameters, format2.parameters) &&
AV1IsSameTier(format1.parameters, format2.parameters) &&
AV1IsSameLevelIdx(format1.parameters, format2.parameters);
#ifdef RTC_ENABLE_H265
case kVideoCodecH265:
return H265IsSameProfileTierLevel(format1.parameters, format2.parameters);
#endif
default:
return true;
}
}
} // namespace
SdpVideoFormat::SdpVideoFormat(const std::string& name) : name(name) {}
SdpVideoFormat::SdpVideoFormat(const std::string& name,
const CodecParameterMap& parameters)
: name(name), parameters(parameters) {}
SdpVideoFormat::SdpVideoFormat(
const std::string& name,
const CodecParameterMap& parameters,
const absl::InlinedVector<ScalabilityMode, kScalabilityModeCount>&
scalability_modes)
: name(name),
parameters(parameters),
scalability_modes(scalability_modes) {}
SdpVideoFormat::SdpVideoFormat(const SdpVideoFormat&) = default;
SdpVideoFormat::SdpVideoFormat(SdpVideoFormat&&) = default;
SdpVideoFormat& SdpVideoFormat::operator=(const SdpVideoFormat&) = default;
SdpVideoFormat& SdpVideoFormat::operator=(SdpVideoFormat&&) = default;
SdpVideoFormat::~SdpVideoFormat() = default;
std::string SdpVideoFormat::ToString() const {
rtc::StringBuilder builder;
builder << "Codec name: " << name << ", parameters: {";
for (const auto& kv : parameters) {
builder << " " << kv.first << "=" << kv.second;
}
builder << " }";
if (!scalability_modes.empty()) {
builder << ", scalability_modes: [";
bool first = true;
for (const auto scalability_mode : scalability_modes) {
if (first) {
first = false;
} else {
builder << ", ";
}
builder << ScalabilityModeToString(scalability_mode);
}
builder << "]";
}
return builder.str();
}
bool SdpVideoFormat::IsSameCodec(const SdpVideoFormat& other) const {
// Two codecs are considered the same if the name matches (case insensitive)
// and certain codec-specific parameters match.
return absl::EqualsIgnoreCase(name, other.name) &&
IsSameCodecSpecific(*this, other);
}
bool SdpVideoFormat::IsCodecInList(
rtc::ArrayView<const webrtc::SdpVideoFormat> formats) const {
for (const auto& format : formats) {
if (IsSameCodec(format)) {
return true;
}
}
return false;
}
bool operator==(const SdpVideoFormat& a, const SdpVideoFormat& b) {
return a.name == b.name && a.parameters == b.parameters &&
a.scalability_modes == b.scalability_modes;
}
absl::optional<SdpVideoFormat> FuzzyMatchSdpVideoFormat(
rtc::ArrayView<const SdpVideoFormat> supported_formats,
const SdpVideoFormat& format) {
absl::optional<SdpVideoFormat> res;
int best_parameter_match = 0;
for (const auto& supported_format : supported_formats) {
if (absl::EqualsIgnoreCase(supported_format.name, format.name)) {
int matching_parameters = 0;
for (const auto& kv : supported_format.parameters) {
auto it = format.parameters.find(kv.first);
if (it != format.parameters.end() && it->second == kv.second) {
matching_parameters += 1;
}
}
if (!res || matching_parameters > best_parameter_match) {
res = supported_format;
best_parameter_match = matching_parameters;
}
}
}
if (!res) {
RTC_LOG(LS_INFO) << "Failed to match SdpVideoFormat " << format.ToString();
} else if (*res != format) {
RTC_LOG(LS_INFO) << "Matched SdpVideoFormat " << format.ToString()
<< " with " << res->ToString();
}
return res;
}
} // namespace webrtc

View file

@ -0,0 +1,77 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_VIDEO_CODECS_SDP_VIDEO_FORMAT_H_
#define API_VIDEO_CODECS_SDP_VIDEO_FORMAT_H_
#include <map>
#include <string>
#include "absl/container/inlined_vector.h"
#include "absl/types/optional.h"
#include "api/array_view.h"
#include "api/rtp_parameters.h"
#include "api/video_codecs/scalability_mode.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// SDP specification for a single video codec.
// NOTE: This class is still under development and may change without notice.
struct RTC_EXPORT SdpVideoFormat {
using Parameters [[deprecated(("Use webrtc::CodecParameterMap"))]] =
std::map<std::string, std::string>;
explicit SdpVideoFormat(const std::string& name);
SdpVideoFormat(const std::string& name, const CodecParameterMap& parameters);
SdpVideoFormat(
const std::string& name,
const CodecParameterMap& parameters,
const absl::InlinedVector<ScalabilityMode, kScalabilityModeCount>&
scalability_modes);
SdpVideoFormat(const SdpVideoFormat&);
SdpVideoFormat(SdpVideoFormat&&);
SdpVideoFormat& operator=(const SdpVideoFormat&);
SdpVideoFormat& operator=(SdpVideoFormat&&);
~SdpVideoFormat();
// Returns true if the SdpVideoFormats have the same names as well as codec
// specific parameters. Please note that two SdpVideoFormats can represent the
// same codec even though not all parameters are the same.
bool IsSameCodec(const SdpVideoFormat& other) const;
bool IsCodecInList(
rtc::ArrayView<const webrtc::SdpVideoFormat> formats) const;
std::string ToString() const;
friend RTC_EXPORT bool operator==(const SdpVideoFormat& a,
const SdpVideoFormat& b);
friend RTC_EXPORT bool operator!=(const SdpVideoFormat& a,
const SdpVideoFormat& b) {
return !(a == b);
}
std::string name;
CodecParameterMap parameters;
absl::InlinedVector<ScalabilityMode, kScalabilityModeCount> scalability_modes;
};
// For not so good reasons sometimes additional parameters are added to an
// SdpVideoFormat, which makes instances that should compare equal to not match
// anymore. Until we stop misusing SdpVideoFormats provide this convenience
// function to perform fuzzy matching.
absl::optional<SdpVideoFormat> FuzzyMatchSdpVideoFormat(
rtc::ArrayView<const SdpVideoFormat> supported_formats,
const SdpVideoFormat& format);
} // namespace webrtc
#endif // API_VIDEO_CODECS_SDP_VIDEO_FORMAT_H_

View file

@ -0,0 +1,37 @@
/*
* Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/video_codecs/simulcast_stream.h"
#include "rtc_base/checks.h"
namespace webrtc {
unsigned char SimulcastStream::GetNumberOfTemporalLayers() const {
return numberOfTemporalLayers;
}
void SimulcastStream::SetNumberOfTemporalLayers(unsigned char n) {
RTC_DCHECK_GE(n, 1);
RTC_DCHECK_LE(n, 3);
numberOfTemporalLayers = n;
}
ScalabilityMode SimulcastStream::GetScalabilityMode() const {
RTC_CHECK_GE(numberOfTemporalLayers, 1);
RTC_CHECK_LE(numberOfTemporalLayers, 3);
static const ScalabilityMode scalability_modes[3] = {
ScalabilityMode::kL1T1,
ScalabilityMode::kL1T2,
ScalabilityMode::kL1T3,
};
return scalability_modes[numberOfTemporalLayers - 1];
}
} // namespace webrtc

View file

@ -0,0 +1,39 @@
/*
* Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_VIDEO_CODECS_SIMULCAST_STREAM_H_
#define API_VIDEO_CODECS_SIMULCAST_STREAM_H_
#include "api/video_codecs/scalability_mode.h"
namespace webrtc {
// TODO(bugs.webrtc.org/6883): Unify with struct VideoStream, part of
// VideoEncoderConfig.
struct SimulcastStream {
// Temporary utility methods for transition from numberOfTemporalLayers
// setting to ScalabilityMode.
unsigned char GetNumberOfTemporalLayers() const;
ScalabilityMode GetScalabilityMode() const;
void SetNumberOfTemporalLayers(unsigned char n);
int width = 0;
int height = 0;
float maxFramerate = 0; // fps.
unsigned char numberOfTemporalLayers = 1;
unsigned int maxBitrate = 0; // kilobits/sec.
unsigned int targetBitrate = 0; // kilobits/sec.
unsigned int minBitrate = 0; // kilobits/sec.
unsigned int qpMax = 0; // minimum quality
bool active = false; // encoded and sent.
};
} // namespace webrtc
#endif // API_VIDEO_CODECS_SIMULCAST_STREAM_H_

View file

@ -0,0 +1,25 @@
/*
* Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/video_codecs/spatial_layer.h"
namespace webrtc {
bool SpatialLayer::operator==(const SpatialLayer& other) const {
return (width == other.width && height == other.height &&
maxFramerate == other.maxFramerate &&
numberOfTemporalLayers == other.numberOfTemporalLayers &&
maxBitrate == other.maxBitrate &&
targetBitrate == other.targetBitrate &&
minBitrate == other.minBitrate && qpMax == other.qpMax &&
active == other.active);
}
} // namespace webrtc

View file

@ -0,0 +1,32 @@
/*
* Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_VIDEO_CODECS_SPATIAL_LAYER_H_
#define API_VIDEO_CODECS_SPATIAL_LAYER_H_
namespace webrtc {
struct SpatialLayer {
bool operator==(const SpatialLayer& other) const;
bool operator!=(const SpatialLayer& other) const { return !(*this == other); }
unsigned short width; // NOLINT(runtime/int)
unsigned short height; // NOLINT(runtime/int)
float maxFramerate; // fps.
unsigned char numberOfTemporalLayers;
unsigned int maxBitrate; // kilobits/sec.
unsigned int targetBitrate; // kilobits/sec.
unsigned int minBitrate; // kilobits/sec.
unsigned int qpMax; // minimum quality
bool active; // encoded and sent.
};
} // namespace webrtc
#endif // API_VIDEO_CODECS_SPATIAL_LAYER_H_

View file

@ -0,0 +1,89 @@
# Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
import("../../../webrtc.gni")
if (rtc_include_tests) {
rtc_library("video_codecs_api_unittests") {
testonly = true
sources = [
"builtin_video_encoder_factory_unittest.cc",
"h264_profile_level_id_unittest.cc",
"sdp_video_format_unittest.cc",
"video_decoder_software_fallback_wrapper_unittest.cc",
"video_encoder_software_fallback_wrapper_unittest.cc",
]
if (rtc_use_h265) {
sources += [ "h265_profile_tier_level_unittest.cc" ]
}
deps = [
":video_decoder_factory_template_tests",
":video_encoder_factory_template_tests",
"..:builtin_video_encoder_factory",
"..:rtc_software_fallback_wrappers",
"..:video_codecs_api",
"../..:fec_controller_api",
"../..:mock_video_encoder",
"../../../api:scoped_refptr",
"../../../media:media_constants",
"../../../media:rtc_media_base",
"../../../modules/video_coding:video_codec_interface",
"../../../modules/video_coding:video_coding_utility",
"../../../modules/video_coding:webrtc_vp8",
"../../../rtc_base:checks",
"../../../rtc_base:rtc_base_tests_utils",
"../../../test:explicit_key_value_config",
"../../../test:fake_video_codecs",
"../../../test:field_trial",
"../../../test:test_support",
"../../../test:video_test_common",
"../../environment",
"../../environment:environment_factory",
"../../video:encoded_image",
"../../video:video_bitrate_allocation",
"../../video:video_frame",
"../../video:video_rtp_headers",
"//testing/gtest",
]
absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
}
rtc_library("video_encoder_factory_template_tests") {
testonly = true
sources = [ "video_encoder_factory_template_tests.cc" ]
deps = [
"..:video_encoder_factory_template",
"..:video_encoder_factory_template_libaom_av1_adapter",
"..:video_encoder_factory_template_libvpx_vp8_adapter",
"..:video_encoder_factory_template_libvpx_vp9_adapter",
"..:video_encoder_factory_template_open_h264_adapter",
"../../:mock_video_encoder",
"../../../test:test_support",
"//testing/gtest",
]
}
rtc_library("video_decoder_factory_template_tests") {
testonly = true
sources = [ "video_decoder_factory_template_tests.cc" ]
deps = [
"..:video_decoder_factory_template",
"..:video_decoder_factory_template_dav1d_adapter",
"..:video_decoder_factory_template_libvpx_vp8_adapter",
"..:video_decoder_factory_template_libvpx_vp9_adapter",
"..:video_decoder_factory_template_open_h264_adapter",
"../../:mock_video_decoder",
"../../../test:test_support",
"//testing/gtest",
]
}
}

View file

@ -0,0 +1,39 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/video_codecs/builtin_video_encoder_factory.h"
#include <memory>
#include <string>
#include <vector>
#include "api/video_codecs/sdp_video_format.h"
#include "test/gtest.h"
namespace webrtc {
TEST(BuiltinVideoEncoderFactoryTest, AnnouncesVp9AccordingToBuildFlags) {
std::unique_ptr<VideoEncoderFactory> factory =
CreateBuiltinVideoEncoderFactory();
bool claims_vp9_support = false;
for (const SdpVideoFormat& format : factory->GetSupportedFormats()) {
if (format.name == "VP9") {
claims_vp9_support = true;
break;
}
}
#if defined(RTC_ENABLE_VP9)
EXPECT_TRUE(claims_vp9_support);
#else
EXPECT_FALSE(claims_vp9_support);
#endif // defined(RTC_ENABLE_VP9)
}
} // namespace webrtc

View file

@ -0,0 +1,171 @@
/*
* Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/video_codecs/h264_profile_level_id.h"
#include <map>
#include <string>
#include "absl/types/optional.h"
#include "test/gtest.h"
namespace webrtc {
TEST(H264ProfileLevelId, TestParsingInvalid) {
// Malformed strings.
EXPECT_FALSE(ParseH264ProfileLevelId(""));
EXPECT_FALSE(ParseH264ProfileLevelId(" 42e01f"));
EXPECT_FALSE(ParseH264ProfileLevelId("4242e01f"));
EXPECT_FALSE(ParseH264ProfileLevelId("e01f"));
EXPECT_FALSE(ParseH264ProfileLevelId("gggggg"));
// Invalid level.
EXPECT_FALSE(ParseH264ProfileLevelId("42e000"));
EXPECT_FALSE(ParseH264ProfileLevelId("42e00f"));
EXPECT_FALSE(ParseH264ProfileLevelId("42e0ff"));
// Invalid profile.
EXPECT_FALSE(ParseH264ProfileLevelId("42e11f"));
EXPECT_FALSE(ParseH264ProfileLevelId("58601f"));
EXPECT_FALSE(ParseH264ProfileLevelId("64e01f"));
}
TEST(H264ProfileLevelId, TestParsingLevel) {
EXPECT_EQ(H264Level::kLevel3_1, ParseH264ProfileLevelId("42e01f")->level);
EXPECT_EQ(H264Level::kLevel1_1, ParseH264ProfileLevelId("42e00b")->level);
EXPECT_EQ(H264Level::kLevel1_b, ParseH264ProfileLevelId("42f00b")->level);
EXPECT_EQ(H264Level::kLevel4_2, ParseH264ProfileLevelId("42C02A")->level);
EXPECT_EQ(H264Level::kLevel5_2, ParseH264ProfileLevelId("640c34")->level);
}
TEST(H264ProfileLevelId, TestParsingConstrainedBaseline) {
EXPECT_EQ(H264Profile::kProfileConstrainedBaseline,
ParseH264ProfileLevelId("42e01f")->profile);
EXPECT_EQ(H264Profile::kProfileConstrainedBaseline,
ParseH264ProfileLevelId("42C02A")->profile);
EXPECT_EQ(H264Profile::kProfileConstrainedBaseline,
ParseH264ProfileLevelId("4de01f")->profile);
EXPECT_EQ(H264Profile::kProfileConstrainedBaseline,
ParseH264ProfileLevelId("58f01f")->profile);
}
TEST(H264ProfileLevelId, TestParsingBaseline) {
EXPECT_EQ(H264Profile::kProfileBaseline,
ParseH264ProfileLevelId("42a01f")->profile);
EXPECT_EQ(H264Profile::kProfileBaseline,
ParseH264ProfileLevelId("58A01F")->profile);
}
TEST(H264ProfileLevelId, TestParsingMain) {
EXPECT_EQ(H264Profile::kProfileMain,
ParseH264ProfileLevelId("4D401f")->profile);
}
TEST(H264ProfileLevelId, TestParsingHigh) {
EXPECT_EQ(H264Profile::kProfileHigh,
ParseH264ProfileLevelId("64001f")->profile);
}
TEST(H264ProfileLevelId, TestParsingConstrainedHigh) {
EXPECT_EQ(H264Profile::kProfileConstrainedHigh,
ParseH264ProfileLevelId("640c1f")->profile);
}
TEST(H264ProfileLevelId, TestSupportedLevel) {
EXPECT_EQ(H264Level::kLevel2_1, *H264SupportedLevel(640 * 480, 25));
EXPECT_EQ(H264Level::kLevel3_1, *H264SupportedLevel(1280 * 720, 30));
EXPECT_EQ(H264Level::kLevel4_2, *H264SupportedLevel(1920 * 1280, 60));
}
// Test supported level below level 1 requirements.
TEST(H264ProfileLevelId, TestSupportedLevelInvalid) {
EXPECT_FALSE(H264SupportedLevel(0, 0));
// All levels support fps > 5.
EXPECT_FALSE(H264SupportedLevel(1280 * 720, 5));
// All levels support frame sizes > 183 * 137.
EXPECT_FALSE(H264SupportedLevel(183 * 137, 30));
}
TEST(H264ProfileLevelId, TestToString) {
EXPECT_EQ("42e01f", *H264ProfileLevelIdToString(H264ProfileLevelId(
H264Profile::kProfileConstrainedBaseline,
H264Level::kLevel3_1)));
EXPECT_EQ("42000a", *H264ProfileLevelIdToString(H264ProfileLevelId(
H264Profile::kProfileBaseline, H264Level::kLevel1)));
EXPECT_EQ("4d001f", H264ProfileLevelIdToString(H264ProfileLevelId(
H264Profile::kProfileMain, H264Level::kLevel3_1)));
EXPECT_EQ("640c2a",
*H264ProfileLevelIdToString(H264ProfileLevelId(
H264Profile::kProfileConstrainedHigh, H264Level::kLevel4_2)));
EXPECT_EQ("64002a", *H264ProfileLevelIdToString(H264ProfileLevelId(
H264Profile::kProfileHigh, H264Level::kLevel4_2)));
}
TEST(H264ProfileLevelId, TestToStringLevel1b) {
EXPECT_EQ("42f00b", *H264ProfileLevelIdToString(H264ProfileLevelId(
H264Profile::kProfileConstrainedBaseline,
H264Level::kLevel1_b)));
EXPECT_EQ("42100b",
*H264ProfileLevelIdToString(H264ProfileLevelId(
H264Profile::kProfileBaseline, H264Level::kLevel1_b)));
EXPECT_EQ("4d100b", *H264ProfileLevelIdToString(H264ProfileLevelId(
H264Profile::kProfileMain, H264Level::kLevel1_b)));
}
TEST(H264ProfileLevelId, TestToStringRoundTrip) {
EXPECT_EQ("42e01f",
*H264ProfileLevelIdToString(*ParseH264ProfileLevelId("42e01f")));
EXPECT_EQ("42e01f",
*H264ProfileLevelIdToString(*ParseH264ProfileLevelId("42E01F")));
EXPECT_EQ("4d100b",
*H264ProfileLevelIdToString(*ParseH264ProfileLevelId("4d100b")));
EXPECT_EQ("4d100b",
*H264ProfileLevelIdToString(*ParseH264ProfileLevelId("4D100B")));
EXPECT_EQ("640c2a",
*H264ProfileLevelIdToString(*ParseH264ProfileLevelId("640c2a")));
EXPECT_EQ("640c2a",
*H264ProfileLevelIdToString(*ParseH264ProfileLevelId("640C2A")));
}
TEST(H264ProfileLevelId, TestToStringInvalid) {
EXPECT_FALSE(H264ProfileLevelIdToString(
H264ProfileLevelId(H264Profile::kProfileHigh, H264Level::kLevel1_b)));
EXPECT_FALSE(H264ProfileLevelIdToString(H264ProfileLevelId(
H264Profile::kProfileConstrainedHigh, H264Level::kLevel1_b)));
EXPECT_FALSE(H264ProfileLevelIdToString(
H264ProfileLevelId(static_cast<H264Profile>(255), H264Level::kLevel3_1)));
}
TEST(H264ProfileLevelId, TestParseSdpProfileLevelIdEmpty) {
const absl::optional<H264ProfileLevelId> profile_level_id =
ParseSdpForH264ProfileLevelId(CodecParameterMap());
EXPECT_TRUE(profile_level_id);
EXPECT_EQ(H264Profile::kProfileConstrainedBaseline,
profile_level_id->profile);
EXPECT_EQ(H264Level::kLevel3_1, profile_level_id->level);
}
TEST(H264ProfileLevelId, TestParseSdpProfileLevelIdConstrainedHigh) {
CodecParameterMap params;
params["profile-level-id"] = "640c2a";
const absl::optional<H264ProfileLevelId> profile_level_id =
ParseSdpForH264ProfileLevelId(params);
EXPECT_TRUE(profile_level_id);
EXPECT_EQ(H264Profile::kProfileConstrainedHigh, profile_level_id->profile);
EXPECT_EQ(H264Level::kLevel4_2, profile_level_id->level);
}
TEST(H264ProfileLevelId, TestParseSdpProfileLevelIdInvalid) {
CodecParameterMap params;
params["profile-level-id"] = "foobar";
EXPECT_FALSE(ParseSdpForH264ProfileLevelId(params));
}
} // namespace webrtc

View file

@ -0,0 +1,248 @@
/*
* Copyright (c) 2023 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/video_codecs/h265_profile_tier_level.h"
#include <string>
#include "absl/types/optional.h"
#include "test/gtest.h"
namespace webrtc {
TEST(H265ProfileTierLevel, TestLevelToString) {
EXPECT_EQ(H265LevelToString(H265Level::kLevel1), "30");
EXPECT_EQ(H265LevelToString(H265Level::kLevel2), "60");
EXPECT_EQ(H265LevelToString(H265Level::kLevel2_1), "63");
EXPECT_EQ(H265LevelToString(H265Level::kLevel3), "90");
EXPECT_EQ(H265LevelToString(H265Level::kLevel3_1), "93");
EXPECT_EQ(H265LevelToString(H265Level::kLevel4), "120");
EXPECT_EQ(H265LevelToString(H265Level::kLevel4_1), "123");
EXPECT_EQ(H265LevelToString(H265Level::kLevel5), "150");
EXPECT_EQ(H265LevelToString(H265Level::kLevel5_1), "153");
EXPECT_EQ(H265LevelToString(H265Level::kLevel5_2), "156");
EXPECT_EQ(H265LevelToString(H265Level::kLevel6), "180");
EXPECT_EQ(H265LevelToString(H265Level::kLevel6_1), "183");
EXPECT_EQ(H265LevelToString(H265Level::kLevel6_2), "186");
}
TEST(H265ProfileTierLevel, TestProfileToString) {
EXPECT_EQ(H265ProfileToString(H265Profile::kProfileMain), "1");
EXPECT_EQ(H265ProfileToString(H265Profile::kProfileMain10), "2");
EXPECT_EQ(H265ProfileToString(H265Profile::kProfileMainStill), "3");
EXPECT_EQ(H265ProfileToString(H265Profile::kProfileRangeExtensions), "4");
EXPECT_EQ(H265ProfileToString(H265Profile::kProfileHighThroughput), "5");
EXPECT_EQ(H265ProfileToString(H265Profile::kProfileMultiviewMain), "6");
EXPECT_EQ(H265ProfileToString(H265Profile::kProfileScalableMain), "7");
EXPECT_EQ(H265ProfileToString(H265Profile::kProfile3dMain), "8");
EXPECT_EQ(H265ProfileToString(H265Profile::kProfileScreenContentCoding), "9");
EXPECT_EQ(H265ProfileToString(H265Profile::kProfileScalableRangeExtensions),
"10");
EXPECT_EQ(H265ProfileToString(
H265Profile::kProfileHighThroughputScreenContentCoding),
"11");
}
TEST(H265ProfileTierLevel, TestTierToString) {
EXPECT_EQ(H265TierToString(H265Tier::kTier0), "0");
EXPECT_EQ(H265TierToString(H265Tier::kTier1), "1");
}
TEST(H265ProfileTierLevel, TestStringToProfile) {
// Invalid profiles.
EXPECT_FALSE(StringToH265Profile("0"));
EXPECT_FALSE(StringToH265Profile("12"));
// Malformed profiles
EXPECT_FALSE(StringToH265Profile(""));
EXPECT_FALSE(StringToH265Profile(" 1"));
EXPECT_FALSE(StringToH265Profile("12x"));
EXPECT_FALSE(StringToH265Profile("x12"));
EXPECT_FALSE(StringToH265Profile("gggg"));
// Valid profiles.
EXPECT_EQ(StringToH265Profile("1"), H265Profile::kProfileMain);
EXPECT_EQ(StringToH265Profile("2"), H265Profile::kProfileMain10);
EXPECT_EQ(StringToH265Profile("4"), H265Profile::kProfileRangeExtensions);
}
TEST(H265ProfileTierLevel, TestStringToLevel) {
// Invalid levels.
EXPECT_FALSE(StringToH265Level("0"));
EXPECT_FALSE(StringToH265Level("200"));
// Malformed levels.
EXPECT_FALSE(StringToH265Level(""));
EXPECT_FALSE(StringToH265Level(" 30"));
EXPECT_FALSE(StringToH265Level("30x"));
EXPECT_FALSE(StringToH265Level("x30"));
EXPECT_FALSE(StringToH265Level("ggggg"));
// Valid levels.
EXPECT_EQ(StringToH265Level("30"), H265Level::kLevel1);
EXPECT_EQ(StringToH265Level("93"), H265Level::kLevel3_1);
EXPECT_EQ(StringToH265Level("183"), H265Level::kLevel6_1);
}
TEST(H265ProfileTierLevel, TestStringToTier) {
// Invalid tiers.
EXPECT_FALSE(StringToH265Tier("4"));
EXPECT_FALSE(StringToH265Tier("-1"));
// Malformed tiers.
EXPECT_FALSE(StringToH265Tier(""));
EXPECT_FALSE(StringToH265Tier(" 1"));
EXPECT_FALSE(StringToH265Tier("t1"));
// Valid tiers.
EXPECT_EQ(StringToH265Tier("0"), H265Tier::kTier0);
EXPECT_EQ(StringToH265Tier("1"), H265Tier::kTier1);
}
TEST(H265ProfileTierLevel, TestParseSdpProfileTierLevelAllEmpty) {
const absl::optional<H265ProfileTierLevel> profile_tier_level =
ParseSdpForH265ProfileTierLevel(CodecParameterMap());
EXPECT_TRUE(profile_tier_level);
EXPECT_EQ(H265Profile::kProfileMain, profile_tier_level->profile);
EXPECT_EQ(H265Level::kLevel3_1, profile_tier_level->level);
EXPECT_EQ(H265Tier::kTier0, profile_tier_level->tier);
}
TEST(H265ProfileTierLevel, TestParseSdpProfileTierLevelPartialEmpty) {
CodecParameterMap params;
params["profile-id"] = "1";
params["tier-flag"] = "0";
absl::optional<H265ProfileTierLevel> profile_tier_level =
ParseSdpForH265ProfileTierLevel(params);
EXPECT_TRUE(profile_tier_level);
EXPECT_EQ(H265Profile::kProfileMain, profile_tier_level->profile);
EXPECT_EQ(H265Level::kLevel3_1, profile_tier_level->level);
EXPECT_EQ(H265Tier::kTier0, profile_tier_level->tier);
params.clear();
params["profile-id"] = "2";
profile_tier_level = ParseSdpForH265ProfileTierLevel(params);
EXPECT_TRUE(profile_tier_level);
EXPECT_EQ(H265Profile::kProfileMain10, profile_tier_level->profile);
EXPECT_EQ(H265Level::kLevel3_1, profile_tier_level->level);
EXPECT_EQ(H265Tier::kTier0, profile_tier_level->tier);
params.clear();
params["level-id"] = "180";
profile_tier_level = ParseSdpForH265ProfileTierLevel(params);
EXPECT_TRUE(profile_tier_level);
EXPECT_EQ(H265Profile::kProfileMain, profile_tier_level->profile);
EXPECT_EQ(H265Level::kLevel6, profile_tier_level->level);
EXPECT_EQ(H265Tier::kTier0, profile_tier_level->tier);
}
TEST(H265ProfileTierLevel, TestParseSdpProfileTierLevelInvalid) {
CodecParameterMap params;
// Invalid profile-tier-level combination.
params["profile-id"] = "1";
params["tier-flag"] = "1";
params["level-id"] = "93";
absl::optional<H265ProfileTierLevel> profile_tier_level =
ParseSdpForH265ProfileTierLevel(params);
EXPECT_FALSE(profile_tier_level);
params.clear();
params["profile-id"] = "1";
params["tier-flag"] = "4";
params["level-id"] = "180";
profile_tier_level = ParseSdpForH265ProfileTierLevel(params);
EXPECT_FALSE(profile_tier_level);
// Valid profile-tier-level combination.
params.clear();
params["profile-id"] = "1";
params["tier-flag"] = "0";
params["level-id"] = "153";
profile_tier_level = ParseSdpForH265ProfileTierLevel(params);
EXPECT_TRUE(profile_tier_level);
}
TEST(H265ProfileTierLevel, TestToStringRoundTrip) {
CodecParameterMap params;
params["profile-id"] = "1";
params["tier-flag"] = "0";
params["level-id"] = "93";
absl::optional<H265ProfileTierLevel> profile_tier_level =
ParseSdpForH265ProfileTierLevel(params);
EXPECT_TRUE(profile_tier_level);
EXPECT_EQ("1", H265ProfileToString(profile_tier_level->profile));
EXPECT_EQ("0", H265TierToString(profile_tier_level->tier));
EXPECT_EQ("93", H265LevelToString(profile_tier_level->level));
params.clear();
params["profile-id"] = "2";
params["tier-flag"] = "1";
params["level-id"] = "180";
profile_tier_level = ParseSdpForH265ProfileTierLevel(params);
EXPECT_TRUE(profile_tier_level);
EXPECT_EQ("2", H265ProfileToString(profile_tier_level->profile));
EXPECT_EQ("1", H265TierToString(profile_tier_level->tier));
EXPECT_EQ("180", H265LevelToString(profile_tier_level->level));
}
TEST(H265ProfileTierLevel, TestProfileTierLevelCompare) {
CodecParameterMap params1;
CodecParameterMap params2;
// None of profile-id/tier-flag/level-id is specified,
EXPECT_TRUE(H265IsSameProfileTierLevel(params1, params2));
// Same non-empty PTL
params1["profile-id"] = "1";
params1["tier-flag"] = "0";
params1["level-id"] = "120";
params2["profile-id"] = "1";
params2["tier-flag"] = "0";
params2["level-id"] = "120";
EXPECT_TRUE(H265IsSameProfileTierLevel(params1, params2));
// Different profiles.
params1.clear();
params2.clear();
params1["profile-id"] = "1";
params2["profile-id"] = "2";
EXPECT_FALSE(H265IsSameProfileTierLevel(params1, params2));
// Different levels.
params1.clear();
params2.clear();
params1["profile-id"] = "1";
params2["profile-id"] = "1";
params1["level-id"] = "93";
params2["level-id"] = "183";
EXPECT_FALSE(H265IsSameProfileTierLevel(params1, params2));
// Different tiers.
params1.clear();
params2.clear();
params1["profile-id"] = "1";
params2["profile-id"] = "1";
params1["level-id"] = "93";
params2["level-id"] = "93";
params1["tier-flag"] = "0";
params2["tier-flag"] = "1";
EXPECT_FALSE(H265IsSameProfileTierLevel(params1, params2));
// One of the CodecParameterMap is invalid.
params1.clear();
params2.clear();
params1["profile-id"] = "1";
params2["profile-id"] = "1";
params1["tier-flag"] = "0";
params2["tier-flag"] = "4";
EXPECT_FALSE(H265IsSameProfileTierLevel(params1, params2));
}
} // namespace webrtc

View file

@ -0,0 +1,155 @@
/*
* Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/video_codecs/sdp_video_format.h"
#include <stdint.h>
#include "media/base/media_constants.h"
#include "test/gtest.h"
namespace webrtc {
typedef SdpVideoFormat Sdp;
typedef CodecParameterMap Params;
TEST(SdpVideoFormatTest, SameCodecNameNoParameters) {
EXPECT_TRUE(Sdp("H264").IsSameCodec(Sdp("h264")));
EXPECT_TRUE(Sdp("VP8").IsSameCodec(Sdp("vp8")));
EXPECT_TRUE(Sdp("VP9").IsSameCodec(Sdp("vp9")));
EXPECT_TRUE(Sdp("AV1").IsSameCodec(Sdp("Av1")));
#ifdef RTC_ENABLE_H265
EXPECT_TRUE(Sdp("H265").IsSameCodec(Sdp("h265")));
#endif
}
TEST(SdpVideoFormatTest, DifferentCodecNameNoParameters) {
EXPECT_FALSE(Sdp("H264").IsSameCodec(Sdp("VP8")));
EXPECT_FALSE(Sdp("VP8").IsSameCodec(Sdp("VP9")));
EXPECT_FALSE(Sdp("AV1").IsSameCodec(Sdp("VP8")));
#ifdef RTC_ENABLE_H265
EXPECT_FALSE(Sdp("H265").IsSameCodec(Sdp("VP8")));
#endif
}
TEST(SdpVideoFormatTest, SameCodecNameSameParameters) {
EXPECT_TRUE(Sdp("VP9").IsSameCodec(Sdp("VP9", Params{{"profile-id", "0"}})));
EXPECT_TRUE(Sdp("VP9", Params{{"profile-id", "0"}})
.IsSameCodec(Sdp("VP9", Params{{"profile-id", "0"}})));
EXPECT_TRUE(Sdp("VP9", Params{{"profile-id", "2"}})
.IsSameCodec(Sdp("VP9", Params{{"profile-id", "2"}})));
EXPECT_TRUE(
Sdp("H264", Params{{"profile-level-id", "42e01f"}})
.IsSameCodec(Sdp("H264", Params{{"profile-level-id", "42e01f"}})));
EXPECT_TRUE(
Sdp("H264", Params{{"profile-level-id", "640c34"}})
.IsSameCodec(Sdp("H264", Params{{"profile-level-id", "640c34"}})));
EXPECT_TRUE(Sdp("AV1").IsSameCodec(Sdp("AV1", Params{{"profile", "0"}})));
EXPECT_TRUE(Sdp("AV1", Params{{"profile", "0"}})
.IsSameCodec(Sdp("AV1", Params{{"profile", "0"}})));
EXPECT_TRUE(Sdp("AV1", Params{{"profile", "2"}})
.IsSameCodec(Sdp("AV1", Params{{"profile", "2"}})));
#ifdef RTC_ENABLE_H265
EXPECT_TRUE(Sdp("H265").IsSameCodec(Sdp(
"H265",
Params{{"profile-id", "1"}, {"tier-flag", "0"}, {"level-id", "93"}})));
EXPECT_TRUE(
Sdp("H265",
Params{{"profile-id", "2"}, {"tier-flag", "0"}, {"level-id", "93"}})
.IsSameCodec(Sdp("H265", Params{{"profile-id", "2"},
{"tier-flag", "0"},
{"level-id", "93"}})));
#endif
}
TEST(SdpVideoFormatTest, SameCodecNameDifferentParameters) {
EXPECT_FALSE(Sdp("VP9").IsSameCodec(Sdp("VP9", Params{{"profile-id", "2"}})));
EXPECT_FALSE(Sdp("VP9", Params{{"profile-id", "0"}})
.IsSameCodec(Sdp("VP9", Params{{"profile-id", "1"}})));
EXPECT_FALSE(Sdp("VP9", Params{{"profile-id", "2"}})
.IsSameCodec(Sdp("VP9", Params{{"profile-id", "0"}})));
EXPECT_FALSE(
Sdp("H264", Params{{"profile-level-id", "42e01f"}})
.IsSameCodec(Sdp("H264", Params{{"profile-level-id", "640c34"}})));
EXPECT_FALSE(
Sdp("H264", Params{{"profile-level-id", "640c34"}})
.IsSameCodec(Sdp("H264", Params{{"profile-level-id", "42f00b"}})));
EXPECT_FALSE(Sdp("AV1").IsSameCodec(Sdp("AV1", Params{{"profile", "1"}})));
EXPECT_FALSE(Sdp("AV1", Params{{"profile", "0"}})
.IsSameCodec(Sdp("AV1", Params{{"profile", "1"}})));
EXPECT_FALSE(Sdp("AV1", Params{{"profile", "1"}})
.IsSameCodec(Sdp("AV1", Params{{"profile", "2"}})));
#ifdef RTC_ENABLE_H265
EXPECT_FALSE(Sdp("H265").IsSameCodec(Sdp(
"H265",
Params{{"profile-id", "0"}, {"tier-flag", "0"}, {"level-id", "93"}})));
EXPECT_FALSE(Sdp("H265").IsSameCodec(Sdp(
"H265",
Params{{"profile-id", "1"}, {"tier-flag", "1"}, {"level-id", "93"}})));
EXPECT_FALSE(Sdp("H265").IsSameCodec(Sdp(
"H265",
Params{{"profile-id", "1"}, {"tier-flag", "0"}, {"level-id", "90"}})));
EXPECT_FALSE(
Sdp("H265",
Params{{"profile-id", "2"}, {"tier-flag", "0"}, {"level-id", "93"}})
.IsSameCodec(Sdp("H265", Params{{"profile-id", "1"},
{"tier-flag", "0"},
{"level-id", "93"}})));
EXPECT_FALSE(
Sdp("H265",
Params{{"profile-id", "1"}, {"tier-flag", "1"}, {"level-id", "120"}})
.IsSameCodec(Sdp("H265", Params{{"profile-id", "1"},
{"tier-flag", "0"},
{"level-id", "120"}})));
EXPECT_FALSE(
Sdp("H265",
Params{{"profile-id", "1"}, {"tier-flag", "0"}, {"level-id", "93"}})
.IsSameCodec(Sdp("H265", Params{{"profile-id", "1"},
{"tier-flag", "0"},
{"level-id", "90"}})));
#endif
}
TEST(SdpVideoFormatTest, DifferentCodecNameSameParameters) {
EXPECT_FALSE(Sdp("VP9", Params{{"profile-id", "0"}})
.IsSameCodec(Sdp("H264", Params{{"profile-id", "0"}})));
EXPECT_FALSE(Sdp("VP9", Params{{"profile-id", "2"}})
.IsSameCodec(Sdp("VP8", Params{{"profile-id", "2"}})));
EXPECT_FALSE(
Sdp("H264", Params{{"profile-level-id", "42e01f"}})
.IsSameCodec(Sdp("VP9", Params{{"profile-level-id", "42e01f"}})));
EXPECT_FALSE(
Sdp("H264", Params{{"profile-level-id", "640c34"}})
.IsSameCodec(Sdp("VP8", Params{{"profile-level-id", "640c34"}})));
EXPECT_FALSE(Sdp("AV1", Params{{"profile", "0"}})
.IsSameCodec(Sdp("H264", Params{{"profile", "0"}})));
EXPECT_FALSE(Sdp("AV1", Params{{"profile", "2"}})
.IsSameCodec(Sdp("VP9", Params{{"profile", "2"}})));
#ifdef RTC_ENABLE_H265
EXPECT_FALSE(Sdp("H265", Params{{"profile-id", "0"}})
.IsSameCodec(Sdp("H264", Params{{"profile-id", "0"}})));
EXPECT_FALSE(Sdp("H265", Params{{"profile-id", "2"}})
.IsSameCodec(Sdp("VP9", Params{{"profile-id", "2"}})));
#endif
}
TEST(SdpVideoFormatTest, H264PacketizationMode) {
// The default packetization mode is 0.
EXPECT_TRUE(Sdp("H264", Params{{cricket::kH264FmtpPacketizationMode, "0"}})
.IsSameCodec(Sdp("H264")));
EXPECT_FALSE(Sdp("H264", Params{{cricket::kH264FmtpPacketizationMode, "1"}})
.IsSameCodec(Sdp("H264")));
EXPECT_TRUE(
Sdp("H264", Params{{cricket::kH264FmtpPacketizationMode, "1"}})
.IsSameCodec(
Sdp("H264", Params{{cricket::kH264FmtpPacketizationMode, "1"}})));
}
} // namespace webrtc

View file

@ -0,0 +1,123 @@
/*
* Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/test/mock_video_decoder.h"
#include "api/video_codecs/video_decoder_factory_template.h"
#include "api/video_codecs/video_decoder_factory_template_dav1d_adapter.h"
#include "api/video_codecs/video_decoder_factory_template_libvpx_vp8_adapter.h"
#include "api/video_codecs/video_decoder_factory_template_libvpx_vp9_adapter.h"
#include "api/video_codecs/video_decoder_factory_template_open_h264_adapter.h"
#include "test/gmock.h"
#include "test/gtest.h"
using ::testing::Contains;
using ::testing::Each;
using ::testing::Eq;
using ::testing::Field;
using ::testing::IsEmpty;
using ::testing::Ne;
using ::testing::Not;
using ::testing::UnorderedElementsAre;
namespace webrtc {
namespace {
const SdpVideoFormat kFooSdp("Foo");
const SdpVideoFormat kBarLowSdp("Bar", {{"profile", "low"}});
const SdpVideoFormat kBarHighSdp("Bar", {{"profile", "high"}});
struct FooDecoderTemplateAdapter {
static std::vector<SdpVideoFormat> SupportedFormats() { return {kFooSdp}; }
static std::unique_ptr<VideoDecoder> CreateDecoder(
const SdpVideoFormat& format) {
auto decoder = std::make_unique<testing::StrictMock<MockVideoDecoder>>();
EXPECT_CALL(*decoder, Destruct);
return decoder;
}
};
struct BarDecoderTemplateAdapter {
static std::vector<SdpVideoFormat> SupportedFormats() {
return {kBarLowSdp, kBarHighSdp};
}
static std::unique_ptr<VideoDecoder> CreateDecoder(
const SdpVideoFormat& format) {
auto decoder = std::make_unique<testing::StrictMock<MockVideoDecoder>>();
EXPECT_CALL(*decoder, Destruct);
return decoder;
}
};
TEST(VideoDecoderFactoryTemplate, OneTemplateAdapterCreateDecoder) {
VideoDecoderFactoryTemplate<FooDecoderTemplateAdapter> factory;
EXPECT_THAT(factory.GetSupportedFormats(), UnorderedElementsAre(kFooSdp));
EXPECT_THAT(factory.CreateVideoDecoder(kFooSdp), Ne(nullptr));
EXPECT_THAT(factory.CreateVideoDecoder(SdpVideoFormat("FooX")), Eq(nullptr));
}
TEST(VideoDecoderFactoryTemplate, TwoTemplateAdaptersNoDuplicates) {
VideoDecoderFactoryTemplate<FooDecoderTemplateAdapter,
FooDecoderTemplateAdapter>
factory;
EXPECT_THAT(factory.GetSupportedFormats(), UnorderedElementsAre(kFooSdp));
}
TEST(VideoDecoderFactoryTemplate, TwoTemplateAdaptersCreateDecoders) {
VideoDecoderFactoryTemplate<FooDecoderTemplateAdapter,
BarDecoderTemplateAdapter>
factory;
EXPECT_THAT(factory.GetSupportedFormats(),
UnorderedElementsAre(kFooSdp, kBarLowSdp, kBarHighSdp));
EXPECT_THAT(factory.CreateVideoDecoder(kFooSdp), Ne(nullptr));
EXPECT_THAT(factory.CreateVideoDecoder(kBarLowSdp), Ne(nullptr));
EXPECT_THAT(factory.CreateVideoDecoder(kBarHighSdp), Ne(nullptr));
EXPECT_THAT(factory.CreateVideoDecoder(SdpVideoFormat("FooX")), Eq(nullptr));
EXPECT_THAT(factory.CreateVideoDecoder(SdpVideoFormat("Bar")), Eq(nullptr));
}
TEST(VideoDecoderFactoryTemplate, LibvpxVp8) {
VideoDecoderFactoryTemplate<LibvpxVp8DecoderTemplateAdapter> factory;
auto formats = factory.GetSupportedFormats();
EXPECT_THAT(formats.size(), 1);
EXPECT_THAT(formats[0], Field(&SdpVideoFormat::name, "VP8"));
EXPECT_THAT(factory.CreateVideoDecoder(formats[0]), Ne(nullptr));
}
TEST(VideoDecoderFactoryTemplate, LibvpxVp9) {
VideoDecoderFactoryTemplate<LibvpxVp9DecoderTemplateAdapter> factory;
auto formats = factory.GetSupportedFormats();
EXPECT_THAT(formats, Not(IsEmpty()));
EXPECT_THAT(formats, Each(Field(&SdpVideoFormat::name, "VP9")));
EXPECT_THAT(factory.CreateVideoDecoder(formats[0]), Ne(nullptr));
}
// TODO(bugs.webrtc.org/13573): When OpenH264 is no longer a conditional build
// target remove this #ifdef.
#if defined(WEBRTC_USE_H264)
TEST(VideoDecoderFactoryTemplate, OpenH264) {
VideoDecoderFactoryTemplate<OpenH264DecoderTemplateAdapter> factory;
auto formats = factory.GetSupportedFormats();
EXPECT_THAT(formats, Not(IsEmpty()));
EXPECT_THAT(formats, Each(Field(&SdpVideoFormat::name, "H264")));
EXPECT_THAT(factory.CreateVideoDecoder(formats[0]), Ne(nullptr));
}
#endif // defined(WEBRTC_USE_H264)
TEST(VideoDecoderFactoryTemplate, Dav1d) {
VideoDecoderFactoryTemplate<Dav1dDecoderTemplateAdapter> factory;
auto formats = factory.GetSupportedFormats();
EXPECT_THAT(formats, Not(IsEmpty()));
EXPECT_THAT(formats, Each(Field(&SdpVideoFormat::name, "AV1")));
EXPECT_THAT(factory.CreateVideoDecoder(formats[0]), Ne(nullptr));
}
} // namespace
} // namespace webrtc

View file

@ -0,0 +1,309 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/video_codecs/video_decoder_software_fallback_wrapper.h"
#include <stdint.h>
#include "absl/types/optional.h"
#include "api/environment/environment.h"
#include "api/environment/environment_factory.h"
#include "api/video/encoded_image.h"
#include "api/video/video_frame.h"
#include "api/video_codecs/video_decoder.h"
#include "modules/video_coding/codecs/vp8/include/vp8.h"
#include "modules/video_coding/include/video_codec_interface.h"
#include "modules/video_coding/include/video_error_codes.h"
#include "rtc_base/checks.h"
#include "test/explicit_key_value_config.h"
#include "test/gtest.h"
namespace webrtc {
class VideoDecoderSoftwareFallbackWrapperTest : public ::testing::Test {
protected:
VideoDecoderSoftwareFallbackWrapperTest()
: VideoDecoderSoftwareFallbackWrapperTest("") {}
explicit VideoDecoderSoftwareFallbackWrapperTest(
const std::string& field_trials)
: field_trials_(field_trials),
env_(CreateEnvironment(&field_trials_)),
fake_decoder_(new CountingFakeDecoder()),
fallback_wrapper_(CreateVideoDecoderSoftwareFallbackWrapper(
env_,
CreateVp8Decoder(env_),
std::unique_ptr<VideoDecoder>(fake_decoder_))) {}
class CountingFakeDecoder : public VideoDecoder {
public:
bool Configure(const Settings& settings) override {
++configure_count_;
return configure_return_value_;
}
int32_t Decode(const EncodedImage& input_image,
int64_t render_time_ms) override {
++decode_count_;
return decode_return_code_;
}
int32_t RegisterDecodeCompleteCallback(
DecodedImageCallback* callback) override {
decode_complete_callback_ = callback;
return WEBRTC_VIDEO_CODEC_OK;
}
int32_t Release() override {
++release_count_;
return WEBRTC_VIDEO_CODEC_OK;
}
const char* ImplementationName() const override { return "fake-decoder"; }
int configure_count_ = 0;
int decode_count_ = 0;
bool configure_return_value_ = true;
int32_t decode_return_code_ = WEBRTC_VIDEO_CODEC_OK;
DecodedImageCallback* decode_complete_callback_ = nullptr;
int release_count_ = 0;
int reset_count_ = 0;
};
test::ExplicitKeyValueConfig field_trials_;
const Environment env_;
// `fake_decoder_` is owned and released by `fallback_wrapper_`.
CountingFakeDecoder* fake_decoder_;
std::unique_ptr<VideoDecoder> fallback_wrapper_;
};
TEST_F(VideoDecoderSoftwareFallbackWrapperTest, InitializesDecoder) {
fallback_wrapper_->Configure({});
EXPECT_EQ(1, fake_decoder_->configure_count_);
EncodedImage encoded_image;
encoded_image._frameType = VideoFrameType::kVideoFrameKey;
fallback_wrapper_->Decode(encoded_image, -1);
EXPECT_EQ(1, fake_decoder_->configure_count_)
<< "Initialized decoder should not be reinitialized.";
EXPECT_EQ(1, fake_decoder_->decode_count_);
}
TEST_F(VideoDecoderSoftwareFallbackWrapperTest,
UsesFallbackDecoderAfterAnyInitDecodeFailure) {
fake_decoder_->configure_return_value_ = false;
fallback_wrapper_->Configure({});
EXPECT_EQ(1, fake_decoder_->configure_count_);
EncodedImage encoded_image;
encoded_image._frameType = VideoFrameType::kVideoFrameKey;
fallback_wrapper_->Decode(encoded_image, -1);
EXPECT_EQ(1, fake_decoder_->configure_count_)
<< "Should not have attempted reinitializing the fallback decoder on "
"keyframe.";
// Unfortunately faking a VP8 frame is hard. Rely on no Decode -> using SW
// decoder.
EXPECT_EQ(0, fake_decoder_->decode_count_)
<< "Decoder used even though no InitDecode had succeeded.";
}
TEST_F(VideoDecoderSoftwareFallbackWrapperTest, IsSoftwareFallbackSticky) {
fallback_wrapper_->Configure({});
fake_decoder_->decode_return_code_ = WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE;
EncodedImage encoded_image;
fallback_wrapper_->Decode(encoded_image, -1);
EXPECT_EQ(1, fake_decoder_->decode_count_);
// Software fallback should be sticky, fake_decoder_ shouldn't be used.
encoded_image._frameType = VideoFrameType::kVideoFrameKey;
fallback_wrapper_->Decode(encoded_image, -1);
EXPECT_EQ(1, fake_decoder_->decode_count_)
<< "Decoder shouldn't be used after failure.";
// fake_decoder_ should have only been initialized once during the test.
EXPECT_EQ(1, fake_decoder_->configure_count_);
}
TEST_F(VideoDecoderSoftwareFallbackWrapperTest, DoesNotFallbackOnEveryError) {
fallback_wrapper_->Configure({});
fake_decoder_->decode_return_code_ = WEBRTC_VIDEO_CODEC_ERROR;
EncodedImage encoded_image;
EXPECT_EQ(fake_decoder_->decode_return_code_,
fallback_wrapper_->Decode(encoded_image, -1));
EXPECT_EQ(1, fake_decoder_->decode_count_);
fallback_wrapper_->Decode(encoded_image, -1);
EXPECT_EQ(2, fake_decoder_->decode_count_)
<< "Decoder should be active even though previous decode failed.";
}
TEST_F(VideoDecoderSoftwareFallbackWrapperTest, UsesHwDecoderAfterReinit) {
fallback_wrapper_->Configure({});
fake_decoder_->decode_return_code_ = WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE;
EncodedImage encoded_image;
fallback_wrapper_->Decode(encoded_image, -1);
EXPECT_EQ(1, fake_decoder_->decode_count_);
fallback_wrapper_->Release();
fallback_wrapper_->Configure({});
fake_decoder_->decode_return_code_ = WEBRTC_VIDEO_CODEC_OK;
fallback_wrapper_->Decode(encoded_image, -1);
EXPECT_EQ(2, fake_decoder_->decode_count_)
<< "Should not be using fallback after reinit.";
}
TEST_F(VideoDecoderSoftwareFallbackWrapperTest, ForwardsReleaseCall) {
fallback_wrapper_->Configure({});
fallback_wrapper_->Release();
EXPECT_EQ(1, fake_decoder_->release_count_);
fallback_wrapper_->Configure({});
fake_decoder_->decode_return_code_ = WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE;
EncodedImage encoded_image;
fallback_wrapper_->Decode(encoded_image, -1);
EXPECT_EQ(2, fake_decoder_->release_count_)
<< "Decoder should be released during fallback.";
fallback_wrapper_->Release();
EXPECT_EQ(2, fake_decoder_->release_count_);
}
// TODO(pbos): Fake a VP8 frame well enough to actually receive a callback from
// the software decoder.
TEST_F(VideoDecoderSoftwareFallbackWrapperTest,
ForwardsRegisterDecodeCompleteCallback) {
class FakeDecodedImageCallback : public DecodedImageCallback {
int32_t Decoded(VideoFrame& decodedImage) override { return 0; }
int32_t Decoded(webrtc::VideoFrame& decodedImage,
int64_t decode_time_ms) override {
RTC_DCHECK_NOTREACHED();
return -1;
}
void Decoded(webrtc::VideoFrame& decodedImage,
absl::optional<int32_t> decode_time_ms,
absl::optional<uint8_t> qp) override {
RTC_DCHECK_NOTREACHED();
}
} callback;
fallback_wrapper_->Configure({});
fallback_wrapper_->RegisterDecodeCompleteCallback(&callback);
EXPECT_EQ(&callback, fake_decoder_->decode_complete_callback_);
}
TEST_F(VideoDecoderSoftwareFallbackWrapperTest,
ReportsFallbackImplementationName) {
fallback_wrapper_->Configure({});
fake_decoder_->decode_return_code_ = WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE;
EncodedImage encoded_image;
fallback_wrapper_->Decode(encoded_image, -1);
// Hard coded expected value since libvpx is the software implementation name
// for VP8. Change accordingly if the underlying implementation does.
EXPECT_STREQ("libvpx (fallback from: fake-decoder)",
fallback_wrapper_->ImplementationName());
fallback_wrapper_->Release();
}
TEST_F(VideoDecoderSoftwareFallbackWrapperTest, FallbacksOnTooManyErrors) {
fallback_wrapper_->Configure({});
fake_decoder_->decode_return_code_ = WEBRTC_VIDEO_CODEC_ERROR;
EncodedImage encoded_image;
encoded_image._frameType = VideoFrameType::kVideoFrameKey;
// Doesn't fallback from a single error.
fallback_wrapper_->Decode(encoded_image, -1);
EXPECT_STREQ("fake-decoder", fallback_wrapper_->ImplementationName());
// However, many frames with the same error, fallback should happen.
const int kNumFramesToEncode = 10;
for (int i = 0; i < kNumFramesToEncode; ++i) {
fallback_wrapper_->Decode(encoded_image, -1);
}
// Hard coded expected value since libvpx is the software implementation name
// for VP8. Change accordingly if the underlying implementation does.
EXPECT_STREQ("libvpx (fallback from: fake-decoder)",
fallback_wrapper_->ImplementationName());
fallback_wrapper_->Release();
}
TEST_F(VideoDecoderSoftwareFallbackWrapperTest,
DoesNotFallbackOnDeltaFramesErrors) {
fallback_wrapper_->Configure({});
fake_decoder_->decode_return_code_ = WEBRTC_VIDEO_CODEC_ERROR;
EncodedImage encoded_image;
encoded_image._frameType = VideoFrameType::kVideoFrameDelta;
// Many decoded frames with the same error
const int kNumFramesToEncode = 10;
for (int i = 0; i < kNumFramesToEncode; ++i) {
fallback_wrapper_->Decode(encoded_image, -1);
}
EXPECT_STREQ("fake-decoder", fallback_wrapper_->ImplementationName());
fallback_wrapper_->Release();
}
TEST_F(VideoDecoderSoftwareFallbackWrapperTest,
DoesNotFallbacksOnNonConsequtiveErrors) {
fallback_wrapper_->Configure({});
EncodedImage encoded_image;
encoded_image._frameType = VideoFrameType::kVideoFrameKey;
const int kNumFramesToEncode = 10;
for (int i = 0; i < kNumFramesToEncode; ++i) {
// Interleaved errors and successful decodes.
fake_decoder_->decode_return_code_ = WEBRTC_VIDEO_CODEC_ERROR;
fallback_wrapper_->Decode(encoded_image, -1);
fake_decoder_->decode_return_code_ = WEBRTC_VIDEO_CODEC_OK;
fallback_wrapper_->Decode(encoded_image, -1);
}
EXPECT_STREQ("fake-decoder", fallback_wrapper_->ImplementationName());
fallback_wrapper_->Release();
}
class ForcedSoftwareDecoderFallbackTest
: public VideoDecoderSoftwareFallbackWrapperTest {
public:
ForcedSoftwareDecoderFallbackTest()
: VideoDecoderSoftwareFallbackWrapperTest(
"WebRTC-Video-ForcedSwDecoderFallback/Enabled/") {
fake_decoder_ = new CountingFakeDecoder();
sw_fallback_decoder_ = new CountingFakeDecoder();
fallback_wrapper_ = CreateVideoDecoderSoftwareFallbackWrapper(
env_, std::unique_ptr<VideoDecoder>(sw_fallback_decoder_),
std::unique_ptr<VideoDecoder>(fake_decoder_));
}
CountingFakeDecoder* sw_fallback_decoder_;
};
TEST_F(ForcedSoftwareDecoderFallbackTest, UsesForcedFallback) {
fallback_wrapper_->Configure({});
EXPECT_EQ(1, sw_fallback_decoder_->configure_count_);
EncodedImage encoded_image;
encoded_image._frameType = VideoFrameType::kVideoFrameKey;
fallback_wrapper_->Decode(encoded_image, -1);
EXPECT_EQ(1, sw_fallback_decoder_->configure_count_);
EXPECT_EQ(1, sw_fallback_decoder_->decode_count_);
fallback_wrapper_->Release();
EXPECT_EQ(1, sw_fallback_decoder_->release_count_);
// Only fallback decoder should have been used.
EXPECT_EQ(0, fake_decoder_->configure_count_);
EXPECT_EQ(0, fake_decoder_->decode_count_);
EXPECT_EQ(0, fake_decoder_->release_count_);
}
} // namespace webrtc

View file

@ -0,0 +1,173 @@
/*
* Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/test/mock_video_encoder.h"
#include "api/video_codecs/video_encoder_factory_template.h"
#include "api/video_codecs/video_encoder_factory_template_libaom_av1_adapter.h"
#include "api/video_codecs/video_encoder_factory_template_libvpx_vp8_adapter.h"
#include "api/video_codecs/video_encoder_factory_template_libvpx_vp9_adapter.h"
#include "api/video_codecs/video_encoder_factory_template_open_h264_adapter.h"
#include "test/gmock.h"
#include "test/gtest.h"
using ::testing::Contains;
using ::testing::Each;
using ::testing::Eq;
using ::testing::Field;
using ::testing::IsEmpty;
using ::testing::IsNull;
using ::testing::Not;
using ::testing::NotNull;
using ::testing::UnorderedElementsAre;
namespace webrtc {
namespace {
using CodecSupport = VideoEncoderFactory::CodecSupport;
const SdpVideoFormat kFooSdp("Foo");
const SdpVideoFormat kBarLowSdp("Bar", {{"profile", "low"}});
const SdpVideoFormat kBarHighSdp("Bar", {{"profile", "high"}});
struct FooEncoderTemplateAdapter {
static std::vector<SdpVideoFormat> SupportedFormats() { return {kFooSdp}; }
static std::unique_ptr<VideoEncoder> CreateEncoder(
const SdpVideoFormat& format) {
return std::make_unique<testing::StrictMock<MockVideoEncoder>>();
}
static bool IsScalabilityModeSupported(ScalabilityMode scalability_mode) {
return scalability_mode == ScalabilityMode::kL1T2 ||
scalability_mode == ScalabilityMode::kL1T3;
}
};
struct BarEncoderTemplateAdapter {
static std::vector<SdpVideoFormat> SupportedFormats() {
return {kBarLowSdp, kBarHighSdp};
}
static std::unique_ptr<VideoEncoder> CreateEncoder(
const SdpVideoFormat& format) {
return std::make_unique<testing::StrictMock<MockVideoEncoder>>();
}
static bool IsScalabilityModeSupported(ScalabilityMode scalability_mode) {
return scalability_mode == ScalabilityMode::kL1T2 ||
scalability_mode == ScalabilityMode::kL1T3 ||
scalability_mode == ScalabilityMode::kS2T1 ||
scalability_mode == ScalabilityMode::kS3T3;
}
};
TEST(VideoEncoderFactoryTemplate, OneTemplateAdapterCreateEncoder) {
VideoEncoderFactoryTemplate<FooEncoderTemplateAdapter> factory;
EXPECT_THAT(factory.GetSupportedFormats(), UnorderedElementsAre(kFooSdp));
EXPECT_THAT(factory.CreateVideoEncoder(kFooSdp), NotNull());
EXPECT_THAT(factory.CreateVideoEncoder(SdpVideoFormat("FooX")), IsNull());
}
TEST(VideoEncoderFactoryTemplate, OneTemplateAdapterCodecSupport) {
VideoEncoderFactoryTemplate<FooEncoderTemplateAdapter> factory;
EXPECT_THAT(factory.QueryCodecSupport(kFooSdp, absl::nullopt),
Field(&CodecSupport::is_supported, true));
EXPECT_THAT(factory.QueryCodecSupport(kFooSdp, "L1T2"),
Field(&CodecSupport::is_supported, true));
EXPECT_THAT(factory.QueryCodecSupport(kFooSdp, "S3T3"),
Field(&CodecSupport::is_supported, false));
EXPECT_THAT(factory.QueryCodecSupport(SdpVideoFormat("FooX"), absl::nullopt),
Field(&CodecSupport::is_supported, false));
}
TEST(VideoEncoderFactoryTemplate, TwoTemplateAdaptersNoDuplicates) {
VideoEncoderFactoryTemplate<FooEncoderTemplateAdapter,
FooEncoderTemplateAdapter>
factory;
EXPECT_THAT(factory.GetSupportedFormats(), UnorderedElementsAre(kFooSdp));
}
TEST(VideoEncoderFactoryTemplate, TwoTemplateAdaptersCreateEncoders) {
VideoEncoderFactoryTemplate<FooEncoderTemplateAdapter,
BarEncoderTemplateAdapter>
factory;
EXPECT_THAT(factory.GetSupportedFormats(),
UnorderedElementsAre(kFooSdp, kBarLowSdp, kBarHighSdp));
EXPECT_THAT(factory.CreateVideoEncoder(kFooSdp), NotNull());
EXPECT_THAT(factory.CreateVideoEncoder(kBarLowSdp), NotNull());
EXPECT_THAT(factory.CreateVideoEncoder(kBarHighSdp), NotNull());
EXPECT_THAT(factory.CreateVideoEncoder(SdpVideoFormat("FooX")), IsNull());
EXPECT_THAT(factory.CreateVideoEncoder(SdpVideoFormat("Bar")), NotNull());
}
TEST(VideoEncoderFactoryTemplate, TwoTemplateAdaptersCodecSupport) {
VideoEncoderFactoryTemplate<FooEncoderTemplateAdapter,
BarEncoderTemplateAdapter>
factory;
EXPECT_THAT(factory.QueryCodecSupport(kFooSdp, absl::nullopt),
Field(&CodecSupport::is_supported, true));
EXPECT_THAT(factory.QueryCodecSupport(kFooSdp, "L1T2"),
Field(&CodecSupport::is_supported, true));
EXPECT_THAT(factory.QueryCodecSupport(kFooSdp, "S3T3"),
Field(&CodecSupport::is_supported, false));
EXPECT_THAT(factory.QueryCodecSupport(kBarLowSdp, absl::nullopt),
Field(&CodecSupport::is_supported, true));
EXPECT_THAT(factory.QueryCodecSupport(kBarHighSdp, absl::nullopt),
Field(&CodecSupport::is_supported, true));
EXPECT_THAT(factory.QueryCodecSupport(kBarLowSdp, "S2T1"),
Field(&CodecSupport::is_supported, true));
EXPECT_THAT(factory.QueryCodecSupport(kBarHighSdp, "S3T2"),
Field(&CodecSupport::is_supported, false));
}
TEST(VideoEncoderFactoryTemplate, LibvpxVp8) {
VideoEncoderFactoryTemplate<LibvpxVp8EncoderTemplateAdapter> factory;
auto formats = factory.GetSupportedFormats();
EXPECT_THAT(formats.size(), 1);
EXPECT_THAT(formats[0], Field(&SdpVideoFormat::name, "VP8"));
EXPECT_THAT(formats[0], Field(&SdpVideoFormat::scalability_modes,
Contains(ScalabilityMode::kL1T3)));
EXPECT_THAT(factory.CreateVideoEncoder(formats[0]), NotNull());
}
TEST(VideoEncoderFactoryTemplate, LibvpxVp9) {
VideoEncoderFactoryTemplate<LibvpxVp9EncoderTemplateAdapter> factory;
auto formats = factory.GetSupportedFormats();
EXPECT_THAT(formats, Not(IsEmpty()));
EXPECT_THAT(formats, Each(Field(&SdpVideoFormat::name, "VP9")));
EXPECT_THAT(formats, Each(Field(&SdpVideoFormat::scalability_modes,
Contains(ScalabilityMode::kL3T3_KEY))));
EXPECT_THAT(factory.CreateVideoEncoder(formats[0]), NotNull());
}
// TODO(bugs.webrtc.org/13573): When OpenH264 is no longer a conditional build
// target remove this #ifdef.
#if defined(WEBRTC_USE_H264)
TEST(VideoEncoderFactoryTemplate, OpenH264) {
VideoEncoderFactoryTemplate<OpenH264EncoderTemplateAdapter> factory;
auto formats = factory.GetSupportedFormats();
EXPECT_THAT(formats, Not(IsEmpty()));
EXPECT_THAT(formats, Each(Field(&SdpVideoFormat::name, "H264")));
EXPECT_THAT(formats, Each(Field(&SdpVideoFormat::scalability_modes,
Contains(ScalabilityMode::kL1T3))));
EXPECT_THAT(factory.CreateVideoEncoder(formats[0]), NotNull());
}
#endif // defined(WEBRTC_USE_H264)
TEST(VideoEncoderFactoryTemplate, LibaomAv1) {
VideoEncoderFactoryTemplate<LibaomAv1EncoderTemplateAdapter> factory;
auto formats = factory.GetSupportedFormats();
EXPECT_THAT(formats.size(), 1);
EXPECT_THAT(formats[0], Field(&SdpVideoFormat::name, "AV1"));
EXPECT_THAT(formats[0], Field(&SdpVideoFormat::scalability_modes,
Contains(ScalabilityMode::kL3T3_KEY)));
EXPECT_THAT(factory.CreateVideoEncoder(formats[0]), NotNull());
}
} // namespace
} // namespace webrtc

View file

@ -0,0 +1,219 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/video_codecs/video_codec.h"
#include <string.h>
#include <string>
#include "absl/strings/match.h"
#include "rtc_base/checks.h"
#include "rtc_base/strings/string_builder.h"
namespace webrtc {
namespace {
constexpr char kPayloadNameVp8[] = "VP8";
constexpr char kPayloadNameVp9[] = "VP9";
constexpr char kPayloadNameAv1[] = "AV1";
// TODO(bugs.webrtc.org/13166): Remove AV1X when backwards compatibility is not
// needed.
constexpr char kPayloadNameAv1x[] = "AV1X";
constexpr char kPayloadNameH264[] = "H264";
constexpr char kPayloadNameGeneric[] = "Generic";
constexpr char kPayloadNameMultiplex[] = "Multiplex";
constexpr char kPayloadNameH265[] = "H265";
} // namespace
bool VideoCodecVP8::operator==(const VideoCodecVP8& other) const {
return (numberOfTemporalLayers == other.numberOfTemporalLayers &&
denoisingOn == other.denoisingOn &&
automaticResizeOn == other.automaticResizeOn &&
keyFrameInterval == other.keyFrameInterval);
}
bool VideoCodecVP9::operator==(const VideoCodecVP9& other) const {
return (numberOfTemporalLayers == other.numberOfTemporalLayers &&
denoisingOn == other.denoisingOn &&
keyFrameInterval == other.keyFrameInterval &&
adaptiveQpMode == other.adaptiveQpMode &&
automaticResizeOn == other.automaticResizeOn &&
numberOfSpatialLayers == other.numberOfSpatialLayers &&
flexibleMode == other.flexibleMode);
}
bool VideoCodecH264::operator==(const VideoCodecH264& other) const {
return (keyFrameInterval == other.keyFrameInterval &&
numberOfTemporalLayers == other.numberOfTemporalLayers);
}
bool VideoCodecH265::operator==(const VideoCodecH265& other) const {
return (frameDroppingOn == other.frameDroppingOn &&
keyFrameInterval == other.keyFrameInterval &&
vpsLen == other.vpsLen && spsLen == other.spsLen &&
ppsLen == other.ppsLen &&
(spsLen == 0 || memcmp(spsData, other.spsData, spsLen) == 0) &&
(ppsLen == 0 || memcmp(ppsData, other.ppsData, ppsLen) == 0));
}
VideoCodec::VideoCodec()
: codecType(kVideoCodecGeneric),
width(0),
height(0),
startBitrate(0),
maxBitrate(0),
minBitrate(0),
maxFramerate(0),
active(true),
qpMax(0),
numberOfSimulcastStreams(0),
simulcastStream(),
spatialLayers(),
mode(VideoCodecMode::kRealtimeVideo),
expect_encode_from_texture(false),
timing_frame_thresholds({0, 0}),
legacy_conference_mode(false),
codec_specific_(),
complexity_(VideoCodecComplexity::kComplexityNormal) {}
std::string VideoCodec::ToString() const {
char string_buf[2048];
rtc::SimpleStringBuilder ss(string_buf);
ss << "VideoCodec {" << "type: " << CodecTypeToPayloadString(codecType)
<< ", mode: "
<< (mode == VideoCodecMode::kRealtimeVideo ? "RealtimeVideo"
: "Screensharing");
if (IsSinglecast()) {
absl::optional<ScalabilityMode> scalability_mode = GetScalabilityMode();
if (scalability_mode.has_value()) {
ss << ", Singlecast: {" << width << "x" << height << " "
<< ScalabilityModeToString(*scalability_mode)
<< (active ? ", active" : ", inactive") << "}";
}
} else {
ss << ", Simulcast: {";
for (size_t i = 0; i < numberOfSimulcastStreams; ++i) {
const SimulcastStream stream = simulcastStream[i];
ss << "[" << stream.width << "x" << stream.height << " "
<< ScalabilityModeToString(stream.GetScalabilityMode())
<< (stream.active ? ", active" : ", inactive") << "]";
}
ss << "}";
}
ss << "}";
return ss.str();
}
VideoCodecVP8* VideoCodec::VP8() {
RTC_DCHECK_EQ(codecType, kVideoCodecVP8);
return &codec_specific_.VP8;
}
const VideoCodecVP8& VideoCodec::VP8() const {
RTC_DCHECK_EQ(codecType, kVideoCodecVP8);
return codec_specific_.VP8;
}
VideoCodecVP9* VideoCodec::VP9() {
RTC_DCHECK_EQ(codecType, kVideoCodecVP9);
return &codec_specific_.VP9;
}
const VideoCodecVP9& VideoCodec::VP9() const {
RTC_DCHECK_EQ(codecType, kVideoCodecVP9);
return codec_specific_.VP9;
}
VideoCodecH264* VideoCodec::H264() {
RTC_DCHECK_EQ(codecType, kVideoCodecH264);
return &codec_specific_.H264;
}
const VideoCodecH264& VideoCodec::H264() const {
RTC_DCHECK_EQ(codecType, kVideoCodecH264);
return codec_specific_.H264;
}
VideoCodecH265* VideoCodec::H265() {
RTC_DCHECK_EQ(codecType, kVideoCodecH264);
return &codec_specific_.H265;
}
const VideoCodecH265& VideoCodec::H265() const {
RTC_DCHECK_EQ(codecType, kVideoCodecH265);
return codec_specific_.H265;
}
VideoCodecAV1* VideoCodec::AV1() {
RTC_DCHECK_EQ(codecType, kVideoCodecAV1);
return &codec_specific_.AV1;
}
const VideoCodecAV1& VideoCodec::AV1() const {
RTC_DCHECK_EQ(codecType, kVideoCodecAV1);
return codec_specific_.AV1;
}
const char* CodecTypeToPayloadString(VideoCodecType type) {
switch (type) {
case kVideoCodecVP8:
return kPayloadNameVp8;
case kVideoCodecVP9:
return kPayloadNameVp9;
case kVideoCodecAV1:
return kPayloadNameAv1;
case kVideoCodecH264:
return kPayloadNameH264;
case kVideoCodecMultiplex:
return kPayloadNameMultiplex;
case kVideoCodecGeneric:
return kPayloadNameGeneric;
case kVideoCodecH265:
return kPayloadNameH265;
}
RTC_CHECK_NOTREACHED();
}
VideoCodecType PayloadStringToCodecType(const std::string& name) {
if (absl::EqualsIgnoreCase(name, kPayloadNameVp8))
return kVideoCodecVP8;
if (absl::EqualsIgnoreCase(name, kPayloadNameVp9))
return kVideoCodecVP9;
if (absl::EqualsIgnoreCase(name, kPayloadNameAv1) ||
absl::EqualsIgnoreCase(name, kPayloadNameAv1x))
return kVideoCodecAV1;
if (absl::EqualsIgnoreCase(name, kPayloadNameH264))
return kVideoCodecH264;
if (absl::EqualsIgnoreCase(name, kPayloadNameMultiplex))
return kVideoCodecMultiplex;
if (absl::EqualsIgnoreCase(name, kPayloadNameH265))
return kVideoCodecH265;
return kVideoCodecGeneric;
}
VideoCodecComplexity VideoCodec::GetVideoEncoderComplexity() const {
return complexity_;
}
void VideoCodec::SetVideoEncoderComplexity(
VideoCodecComplexity complexity_setting) {
complexity_ = complexity_setting;
}
bool VideoCodec::GetFrameDropEnabled() const {
return frame_drop_enabled_;
}
void VideoCodec::SetFrameDropEnabled(bool enabled) {
frame_drop_enabled_ = enabled;
}
} // namespace webrtc

View file

@ -0,0 +1,246 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_VIDEO_CODECS_VIDEO_CODEC_H_
#define API_VIDEO_CODECS_VIDEO_CODEC_H_
#include <stddef.h>
#include <stdint.h>
#include <string>
#include "absl/strings/string_view.h"
#include "api/video/video_bitrate_allocation.h"
#include "api/video/video_codec_type.h"
#include "api/video_codecs/scalability_mode.h"
#include "api/video_codecs/simulcast_stream.h"
#include "api/video_codecs/spatial_layer.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// The VideoCodec class represents an old defacto-apis, which we're migrating
// away from slowly.
// Video codec
enum class VideoCodecComplexity {
kComplexityLow = -1,
kComplexityNormal = 0,
kComplexityHigh = 1,
kComplexityHigher = 2,
kComplexityMax = 3
};
// VP8 specific
struct VideoCodecVP8 {
bool operator==(const VideoCodecVP8& other) const;
bool operator!=(const VideoCodecVP8& other) const {
return !(*this == other);
}
// Temporary utility method for transition deleting numberOfTemporalLayers
// setting (replaced by ScalabilityMode).
void SetNumberOfTemporalLayers(unsigned char n) {
numberOfTemporalLayers = n;
}
unsigned char numberOfTemporalLayers;
bool denoisingOn;
bool automaticResizeOn;
int keyFrameInterval;
};
enum class InterLayerPredMode : int {
kOff = 0, // Inter-layer prediction is disabled.
kOn = 1, // Inter-layer prediction is enabled.
kOnKeyPic = 2 // Inter-layer prediction is enabled but limited to key frames.
};
// VP9 specific.
struct VideoCodecVP9 {
bool operator==(const VideoCodecVP9& other) const;
bool operator!=(const VideoCodecVP9& other) const {
return !(*this == other);
}
// Temporary utility method for transition deleting numberOfTemporalLayers
// setting (replaced by ScalabilityMode).
void SetNumberOfTemporalLayers(unsigned char n) {
numberOfTemporalLayers = n;
}
unsigned char numberOfTemporalLayers;
bool denoisingOn;
int keyFrameInterval;
bool adaptiveQpMode;
bool automaticResizeOn;
unsigned char numberOfSpatialLayers;
bool flexibleMode;
InterLayerPredMode interLayerPred;
};
// H264 specific.
struct VideoCodecH264 {
bool operator==(const VideoCodecH264& other) const;
bool operator!=(const VideoCodecH264& other) const {
return !(*this == other);
}
// Temporary utility method for transition deleting numberOfTemporalLayers
// setting (replaced by ScalabilityMode).
void SetNumberOfTemporalLayers(unsigned char n) {
numberOfTemporalLayers = n;
}
int keyFrameInterval;
uint8_t numberOfTemporalLayers;
};
// H264 specific.
struct VideoCodecH265 {
bool operator==(const VideoCodecH265& other) const;
bool operator!=(const VideoCodecH265& other) const {
return !(*this == other);
}
bool frameDroppingOn;
int keyFrameInterval;
const uint8_t* vpsData;
size_t vpsLen;
const uint8_t* spsData;
size_t spsLen;
const uint8_t* ppsData;
size_t ppsLen;
};
struct VideoCodecAV1 {
bool operator==(const VideoCodecAV1& other) const {
return automatic_resize_on == other.automatic_resize_on;
}
bool operator!=(const VideoCodecAV1& other) const {
return !(*this == other);
}
bool automatic_resize_on;
};
// Translates from name of codec to codec type and vice versa.
RTC_EXPORT const char* CodecTypeToPayloadString(VideoCodecType type);
RTC_EXPORT VideoCodecType PayloadStringToCodecType(const std::string& name);
union VideoCodecUnion {
VideoCodecVP8 VP8;
VideoCodecVP9 VP9;
VideoCodecH264 H264;
VideoCodecH265 H265;
VideoCodecAV1 AV1;
};
enum class VideoCodecMode { kRealtimeVideo, kScreensharing };
// Common video codec properties
class RTC_EXPORT VideoCodec {
public:
VideoCodec();
// Scalability mode as described in
// https://www.w3.org/TR/webrtc-svc/#scalabilitymodes*
absl::optional<ScalabilityMode> GetScalabilityMode() const {
return scalability_mode_;
}
void SetScalabilityMode(ScalabilityMode scalability_mode) {
scalability_mode_ = scalability_mode;
}
void UnsetScalabilityMode() { scalability_mode_ = absl::nullopt; }
VideoCodecComplexity GetVideoEncoderComplexity() const;
void SetVideoEncoderComplexity(VideoCodecComplexity complexity_setting);
bool GetFrameDropEnabled() const;
void SetFrameDropEnabled(bool enabled);
bool IsSinglecast() const { return numberOfSimulcastStreams <= 1; }
bool IsSimulcast() const { return !IsSinglecast(); }
// Public variables. TODO(hta): Make them private with accessors.
VideoCodecType codecType;
// TODO(nisse): Change to int, for consistency.
uint16_t width;
uint16_t height;
unsigned int startBitrate; // kilobits/sec.
unsigned int maxBitrate; // kilobits/sec.
unsigned int minBitrate; // kilobits/sec.
uint32_t maxFramerate;
// This enables/disables encoding and sending when there aren't multiple
// simulcast streams,by allocating 0 bitrate if inactive.
bool active;
unsigned int qpMax;
// The actual number of simulcast streams. This is <= 1 in singlecast (it can
// be 0 in old code paths), but it is also 1 in the {active,inactive,inactive}
// "single RTP simulcast" use case and the legacy kSVC use case. In all other
// cases this is the same as the number of encodings (which may include
// inactive encodings). In other words:
// - `numberOfSimulcastStreams <= 1` in singlecast and singlecast-like setups
// including legacy kSVC (encodings interpreted as spatial layers) or
// standard kSVC (1 active encoding).
// - `numberOfSimulcastStreams > 1` in simulcast of 2+ active encodings.
unsigned char numberOfSimulcastStreams;
SimulcastStream simulcastStream[kMaxSimulcastStreams];
SpatialLayer spatialLayers[kMaxSpatialLayers];
VideoCodecMode mode;
bool expect_encode_from_texture;
// Timing frames configuration. There is delay of delay_ms between two
// consequent timing frames, excluding outliers. Frame is always made a
// timing frame if it's at least outlier_ratio in percent of "ideal" average
// frame given bitrate and framerate, i.e. if it's bigger than
// |outlier_ratio / 100.0 * bitrate_bps / fps| in bits. This way, timing
// frames will not be sent too often usually. Yet large frames will always
// have timing information for debug purposes because they are more likely to
// cause extra delays.
struct TimingFrameTriggerThresholds {
int64_t delay_ms;
uint16_t outlier_ratio_percent;
} timing_frame_thresholds;
// Legacy Google conference mode flag for simulcast screenshare
bool legacy_conference_mode;
bool operator==(const VideoCodec& other) const = delete;
bool operator!=(const VideoCodec& other) const = delete;
std::string ToString() const;
// Accessors for codec specific information.
// There is a const version of each that returns a reference,
// and a non-const version that returns a pointer, in order
// to allow modification of the parameters.
VideoCodecVP8* VP8();
const VideoCodecVP8& VP8() const;
VideoCodecVP9* VP9();
const VideoCodecVP9& VP9() const;
VideoCodecH264* H264();
const VideoCodecH264& H264() const;
VideoCodecH265* H265();
const VideoCodecH265& H265() const;
VideoCodecAV1* AV1();
const VideoCodecAV1& AV1() const;
private:
// TODO(hta): Consider replacing the union with a pointer type.
// This will allow removing the VideoCodec* types from this file.
VideoCodecUnion codec_specific_;
absl::optional<ScalabilityMode> scalability_mode_;
// 'complexity_' indicates the CPU capability of the client. It's used to
// determine encoder CPU complexity (e.g., cpu_used for VP8, VP9. and AV1).
VideoCodecComplexity complexity_;
bool frame_drop_enabled_ = false;
};
} // namespace webrtc
#endif // API_VIDEO_CODECS_VIDEO_CODEC_H_

View file

@ -0,0 +1,65 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/video_codecs/video_decoder.h"
#include "absl/types/optional.h"
#include "api/video/render_resolution.h"
#include "api/video/video_codec_type.h"
#include "rtc_base/checks.h"
#include "rtc_base/strings/string_builder.h"
namespace webrtc {
int32_t DecodedImageCallback::Decoded(VideoFrame& decodedImage,
int64_t decode_time_ms) {
// The default implementation ignores custom decode time value.
return Decoded(decodedImage);
}
void DecodedImageCallback::Decoded(VideoFrame& decodedImage,
absl::optional<int32_t> decode_time_ms,
absl::optional<uint8_t> qp) {
Decoded(decodedImage, decode_time_ms.value_or(-1));
}
VideoDecoder::DecoderInfo VideoDecoder::GetDecoderInfo() const {
DecoderInfo info;
info.implementation_name = ImplementationName();
return info;
}
const char* VideoDecoder::ImplementationName() const {
return "unknown";
}
std::string VideoDecoder::DecoderInfo::ToString() const {
char string_buf[2048];
rtc::SimpleStringBuilder oss(string_buf);
oss << "DecoderInfo { "
<< "prefers_late_decoding = "
<< "implementation_name = '" << implementation_name << "', "
<< "is_hardware_accelerated = "
<< (is_hardware_accelerated ? "true" : "false") << " }";
return oss.str();
}
bool VideoDecoder::DecoderInfo::operator==(const DecoderInfo& rhs) const {
return is_hardware_accelerated == rhs.is_hardware_accelerated &&
implementation_name == rhs.implementation_name;
}
void VideoDecoder::Settings::set_number_of_cores(int value) {
RTC_DCHECK_GT(value, 0);
number_of_cores_ = value;
}
} // namespace webrtc

View file

@ -0,0 +1,147 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_VIDEO_CODECS_VIDEO_DECODER_H_
#define API_VIDEO_CODECS_VIDEO_DECODER_H_
#include <memory>
#include <string>
#include <vector>
#include "absl/types/optional.h"
#include "api/video/encoded_image.h"
#include "api/video/render_resolution.h"
#include "api/video/video_codec_type.h"
#include "api/video/video_frame.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
class RTC_EXPORT DecodedImageCallback {
public:
virtual ~DecodedImageCallback() {}
virtual int32_t Decoded(VideoFrame& decodedImage) = 0;
// Provides an alternative interface that allows the decoder to specify the
// decode time excluding waiting time for any previous pending frame to
// return. This is necessary for breaking positive feedback in the delay
// estimation when the decoder has a single output buffer.
virtual int32_t Decoded(VideoFrame& decodedImage, int64_t decode_time_ms);
// TODO(sakal): Remove other implementations when upstream projects have been
// updated.
virtual void Decoded(VideoFrame& decodedImage,
absl::optional<int32_t> decode_time_ms,
absl::optional<uint8_t> qp);
};
class RTC_EXPORT VideoDecoder {
public:
struct DecoderInfo {
// Descriptive name of the decoder implementation.
std::string implementation_name;
// True if the decoder is backed by hardware acceleration.
bool is_hardware_accelerated = false;
std::string ToString() const;
bool operator==(const DecoderInfo& rhs) const;
bool operator!=(const DecoderInfo& rhs) const { return !(*this == rhs); }
};
class Settings {
public:
Settings() = default;
Settings(const Settings&) = default;
Settings& operator=(const Settings&) = default;
~Settings() = default;
// The size of pool which is used to store video frame buffers inside
// decoder. If value isn't present some codec-default value will be used. If
// value is present and decoder doesn't have buffer pool the value will be
// ignored.
absl::optional<int> buffer_pool_size() const;
void set_buffer_pool_size(absl::optional<int> value);
// When valid, user of the VideoDecoder interface shouldn't `Decode`
// encoded images with render resolution larger than width and height
// specified here.
RenderResolution max_render_resolution() const;
void set_max_render_resolution(RenderResolution value);
// Maximum number of cpu cores the decoder is allowed to use in parallel.
// Must be positive.
int number_of_cores() const { return number_of_cores_; }
void set_number_of_cores(int value);
// Codec of encoded images user of the VideoDecoder interface will `Decode`.
VideoCodecType codec_type() const { return codec_type_; }
void set_codec_type(VideoCodecType value) { codec_type_ = value; }
private:
absl::optional<int> buffer_pool_size_;
RenderResolution max_resolution_;
int number_of_cores_ = 1;
VideoCodecType codec_type_ = kVideoCodecGeneric;
};
virtual ~VideoDecoder() = default;
// Prepares decoder to handle incoming encoded frames. Can be called multiple
// times, in such case only latest `settings` are in effect.
virtual bool Configure(const Settings& settings) = 0;
// TODO(bugs.webrtc.org/15444): Make pure virtual once all subclasses have
// migrated to implementing this class.
virtual int32_t Decode(const EncodedImage& input_image,
int64_t render_time_ms) {
return Decode(input_image, /*missing_frame=*/false, render_time_ms);
}
// TODO(bugs.webrtc.org/15444): Migrate all subclasses to Decode() without
// missing_frame and delete this.
virtual int32_t Decode(const EncodedImage& input_image,
bool missing_frames,
int64_t render_time_ms) {
return Decode(input_image, render_time_ms);
}
virtual int32_t RegisterDecodeCompleteCallback(
DecodedImageCallback* callback) = 0;
virtual int32_t Release() = 0;
virtual DecoderInfo GetDecoderInfo() const;
// Deprecated, use GetDecoderInfo().implementation_name instead.
virtual const char* ImplementationName() const;
};
inline absl::optional<int> VideoDecoder::Settings::buffer_pool_size() const {
return buffer_pool_size_;
}
inline void VideoDecoder::Settings::set_buffer_pool_size(
absl::optional<int> value) {
buffer_pool_size_ = value;
}
inline RenderResolution VideoDecoder::Settings::max_render_resolution() const {
return max_resolution_;
}
inline void VideoDecoder::Settings::set_max_render_resolution(
RenderResolution value) {
max_resolution_ = value;
}
} // namespace webrtc
#endif // API_VIDEO_CODECS_VIDEO_DECODER_H_

View file

@ -0,0 +1,45 @@
/*
* Copyright (c) 2024 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/video_codecs/video_decoder_factory.h"
#include <memory>
#include "api/video_codecs/sdp_video_format.h"
#include "api/video_codecs/video_decoder.h"
#include "rtc_base/checks.h"
namespace webrtc {
VideoDecoderFactory::CodecSupport VideoDecoderFactory::QueryCodecSupport(
const SdpVideoFormat& format,
bool reference_scaling) const {
// Default implementation, query for supported formats and check if the
// specified format is supported. Returns false if `reference_scaling` is
// true.
return {.is_supported = !reference_scaling &&
format.IsCodecInList(GetSupportedFormats())};
}
std::unique_ptr<VideoDecoder> VideoDecoderFactory::Create(
const Environment& env,
const SdpVideoFormat& format) {
return CreateVideoDecoder(format);
}
std::unique_ptr<VideoDecoder> VideoDecoderFactory::CreateVideoDecoder(
const SdpVideoFormat& format) {
// Newer code shouldn't call this function,
// Older code should implement it in derived classes.
RTC_CHECK_NOTREACHED();
return nullptr;
}
} // namespace webrtc

View file

@ -0,0 +1,66 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_VIDEO_CODECS_VIDEO_DECODER_FACTORY_H_
#define API_VIDEO_CODECS_VIDEO_DECODER_FACTORY_H_
#include <memory>
#include <vector>
#include "api/environment/environment.h"
#include "api/video_codecs/sdp_video_format.h"
#include "api/video_codecs/video_decoder.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// A factory that creates VideoDecoders.
// NOTE: This class is still under development and may change without notice.
class RTC_EXPORT VideoDecoderFactory {
public:
struct CodecSupport {
bool is_supported = false;
bool is_power_efficient = false;
};
virtual ~VideoDecoderFactory() = default;
// Returns a list of supported video formats in order of preference, to use
// for signaling etc.
virtual std::vector<SdpVideoFormat> GetSupportedFormats() const = 0;
// Query whether the specifed format is supported or not and if it will be
// power efficient, which is currently interpreted as if there is support for
// hardware acceleration.
// The parameter `reference_scaling` is used to query support for prediction
// across spatial layers. An example where support for reference scaling is
// needed is if the video stream is produced with a scalability mode that has
// a dependency between the spatial layers. See
// https://w3c.github.io/webrtc-svc/#scalabilitymodes* for a specification of
// different scalabilty modes. NOTE: QueryCodecSupport is currently an
// experimental feature that is subject to change without notice.
virtual CodecSupport QueryCodecSupport(const SdpVideoFormat& format,
bool reference_scaling) const;
// Creates a VideoDecoder for the specified `format`.
// TODO: bugs.webrtc.org/15791 - Make pure virtual when implemented in all
// derived classes.
virtual std::unique_ptr<VideoDecoder> Create(const Environment& env,
const SdpVideoFormat& format);
// TODO: bugs.webrtc.org/15791 - Make private or delete when all callers are
// migrated to `Create`.
virtual std::unique_ptr<VideoDecoder> CreateVideoDecoder(
const SdpVideoFormat& format);
};
} // namespace webrtc
#endif // API_VIDEO_CODECS_VIDEO_DECODER_FACTORY_H_

View file

@ -0,0 +1,95 @@
/*
* Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_VIDEO_CODECS_VIDEO_DECODER_FACTORY_TEMPLATE_H_
#define API_VIDEO_CODECS_VIDEO_DECODER_FACTORY_TEMPLATE_H_
#include <memory>
#include <vector>
#include "absl/algorithm/container.h"
#include "api/array_view.h"
#include "api/video_codecs/video_decoder.h"
#include "api/video_codecs/video_decoder_factory.h"
namespace webrtc {
// The VideoDecoderFactoryTemplate supports decoder implementations given as
// template arguments.
//
// To include a decoder in the factory it requires two static members
// functions to be defined:
//
// // Returns the supported SdpVideoFormats this decoder can decode.
// static std::vector<SdpVideoFormat> SupportedFormats();
//
// // Creates a decoder instance for the given format.
// static std::unique_ptr<VideoDecoder>
// CreateDecoder(const SdpVideoFormat& format);
//
// Note that the order of the template arguments matter as the factory will
// return the first decoder implementation supporting the given SdpVideoFormat.
template <typename... Ts>
class VideoDecoderFactoryTemplate : public VideoDecoderFactory {
public:
std::vector<SdpVideoFormat> GetSupportedFormats() const override {
return GetSupportedFormatsInternal<Ts...>();
}
std::unique_ptr<VideoDecoder> CreateVideoDecoder(
const SdpVideoFormat& format) override {
return CreateVideoDecoderInternal<Ts...>(format);
}
private:
bool IsFormatInList(
const SdpVideoFormat& format,
rtc::ArrayView<const SdpVideoFormat> supported_formats) const {
return absl::c_any_of(
supported_formats, [&](const SdpVideoFormat& supported_format) {
return supported_format.name == format.name &&
supported_format.parameters == format.parameters;
});
}
template <typename V, typename... Vs>
std::vector<SdpVideoFormat> GetSupportedFormatsInternal() const {
auto supported_formats = V::SupportedFormats();
if constexpr (sizeof...(Vs) > 0) {
// Supported formats may overlap between implementations, so duplicates
// should be filtered out.
for (const auto& other_format : GetSupportedFormatsInternal<Vs...>()) {
if (!IsFormatInList(other_format, supported_formats)) {
supported_formats.push_back(other_format);
}
}
}
return supported_formats;
}
template <typename V, typename... Vs>
std::unique_ptr<VideoDecoder> CreateVideoDecoderInternal(
const SdpVideoFormat& format) {
if (IsFormatInList(format, V::SupportedFormats())) {
return V::CreateDecoder(format);
}
if constexpr (sizeof...(Vs) > 0) {
return CreateVideoDecoderInternal<Vs...>(format);
}
return nullptr;
}
};
} // namespace webrtc
#endif // API_VIDEO_CODECS_VIDEO_DECODER_FACTORY_TEMPLATE_H_

View file

@ -0,0 +1,38 @@
/*
* Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_VIDEO_CODECS_VIDEO_DECODER_FACTORY_TEMPLATE_DAV1D_ADAPTER_H_
#define API_VIDEO_CODECS_VIDEO_DECODER_FACTORY_TEMPLATE_DAV1D_ADAPTER_H_
#include <memory>
#include <vector>
#include "api/video_codecs/av1_profile.h"
#include "api/video_codecs/sdp_video_format.h"
#include "modules/video_coding/codecs/av1/dav1d_decoder.h"
namespace webrtc {
struct Dav1dDecoderTemplateAdapter {
static std::vector<SdpVideoFormat> SupportedFormats() {
return {SdpVideoFormat("AV1"),
SdpVideoFormat(
"AV1", {{"profile",
AV1ProfileToString(AV1Profile::kProfile1).data()}})};
}
static std::unique_ptr<VideoDecoder> CreateDecoder(
const SdpVideoFormat& format) {
return CreateDav1dDecoder();
}
};
} // namespace webrtc
#endif // API_VIDEO_CODECS_VIDEO_DECODER_FACTORY_TEMPLATE_DAV1D_ADAPTER_H_

View file

@ -0,0 +1,33 @@
/*
* Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_VIDEO_CODECS_VIDEO_DECODER_FACTORY_TEMPLATE_LIBVPX_VP8_ADAPTER_H_
#define API_VIDEO_CODECS_VIDEO_DECODER_FACTORY_TEMPLATE_LIBVPX_VP8_ADAPTER_H_
#include <memory>
#include <vector>
#include "api/video_codecs/sdp_video_format.h"
#include "modules/video_coding/codecs/vp8/include/vp8.h"
namespace webrtc {
struct LibvpxVp8DecoderTemplateAdapter {
static std::vector<SdpVideoFormat> SupportedFormats() {
return {SdpVideoFormat("VP8")};
}
static std::unique_ptr<VideoDecoder> CreateDecoder(
const SdpVideoFormat& format) {
return VP8Decoder::Create();
}
};
} // namespace webrtc
#endif // API_VIDEO_CODECS_VIDEO_DECODER_FACTORY_TEMPLATE_LIBVPX_VP8_ADAPTER_H_

View file

@ -0,0 +1,32 @@
/*
* Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_VIDEO_CODECS_VIDEO_DECODER_FACTORY_TEMPLATE_LIBVPX_VP9_ADAPTER_H_
#define API_VIDEO_CODECS_VIDEO_DECODER_FACTORY_TEMPLATE_LIBVPX_VP9_ADAPTER_H_
#include <memory>
#include <vector>
#include "modules/video_coding/codecs/vp9/include/vp9.h"
namespace webrtc {
struct LibvpxVp9DecoderTemplateAdapter {
static std::vector<SdpVideoFormat> SupportedFormats() {
return SupportedVP9DecoderCodecs();
}
static std::unique_ptr<VideoDecoder> CreateDecoder(
const SdpVideoFormat& format) {
return VP9Decoder::Create();
}
};
} // namespace webrtc
#endif // API_VIDEO_CODECS_VIDEO_DECODER_FACTORY_TEMPLATE_LIBVPX_VP9_ADAPTER_H_

View file

@ -0,0 +1,44 @@
/*
* Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_VIDEO_CODECS_VIDEO_DECODER_FACTORY_TEMPLATE_OPEN_H264_ADAPTER_H_
#define API_VIDEO_CODECS_VIDEO_DECODER_FACTORY_TEMPLATE_OPEN_H264_ADAPTER_H_
#include <memory>
#include <vector>
#include "modules/video_coding/codecs/h264/include/h264.h"
namespace webrtc {
// TODO(bugs.webrtc.org/13573): When OpenH264 is no longer a conditional build
// target remove #ifdefs.
struct OpenH264DecoderTemplateAdapter {
static std::vector<SdpVideoFormat> SupportedFormats() {
#if defined(WEBRTC_USE_H264)
return SupportedH264DecoderCodecs();
#else
return {};
#endif
}
static std::unique_ptr<VideoDecoder> CreateDecoder(
const SdpVideoFormat& format) {
#if defined(WEBRTC_USE_H264)
return H264Decoder::Create();
#else
return nullptr;
#endif
}
};
} // namespace webrtc
#endif // API_VIDEO_CODECS_VIDEO_DECODER_FACTORY_TEMPLATE_OPEN_H264_ADAPTER_H_

View file

@ -0,0 +1,300 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/video_codecs/video_decoder_software_fallback_wrapper.h"
#include <stdint.h>
#include <memory>
#include <string>
#include <utility>
#include "api/field_trials_view.h"
#include "api/video/encoded_image.h"
#include "api/video_codecs/video_decoder.h"
#include "modules/video_coding/include/video_error_codes.h"
#include "rtc_base/checks.h"
#include "rtc_base/logging.h"
#include "rtc_base/trace_event.h"
#include "system_wrappers/include/field_trial.h"
#include "system_wrappers/include/metrics.h"
namespace webrtc {
namespace {
constexpr size_t kMaxConsequtiveHwErrors = 4;
class VideoDecoderSoftwareFallbackWrapper final : public VideoDecoder {
public:
VideoDecoderSoftwareFallbackWrapper(
std::unique_ptr<VideoDecoder> sw_fallback_decoder,
std::unique_ptr<VideoDecoder> hw_decoder,
bool force_sw_decoder_fallback);
~VideoDecoderSoftwareFallbackWrapper() override;
bool Configure(const Settings& settings) override;
int32_t Decode(const EncodedImage& input_image,
int64_t render_time_ms) override;
int32_t RegisterDecodeCompleteCallback(
DecodedImageCallback* callback) override;
int32_t Release() override;
DecoderInfo GetDecoderInfo() const override;
const char* ImplementationName() const override;
private:
bool InitFallbackDecoder();
void UpdateFallbackDecoderHistograms();
bool InitHwDecoder();
VideoDecoder& active_decoder() const;
// Determines if we are trying to use the HW or SW decoder.
enum class DecoderType {
kNone,
kHardware,
kFallback,
} decoder_type_;
std::unique_ptr<VideoDecoder> hw_decoder_;
const bool force_sw_decoder_fallback_;
Settings decoder_settings_;
const std::unique_ptr<VideoDecoder> fallback_decoder_;
const std::string fallback_implementation_name_;
DecodedImageCallback* callback_;
int32_t hw_decoded_frames_since_last_fallback_;
size_t hw_consequtive_generic_errors_;
};
VideoDecoderSoftwareFallbackWrapper::VideoDecoderSoftwareFallbackWrapper(
std::unique_ptr<VideoDecoder> sw_fallback_decoder,
std::unique_ptr<VideoDecoder> hw_decoder,
bool force_sw_decoder_fallback)
: decoder_type_(DecoderType::kNone),
hw_decoder_(std::move(hw_decoder)),
force_sw_decoder_fallback_(force_sw_decoder_fallback),
fallback_decoder_(std::move(sw_fallback_decoder)),
fallback_implementation_name_(
fallback_decoder_->GetDecoderInfo().implementation_name +
" (fallback from: " +
hw_decoder_->GetDecoderInfo().implementation_name + ")"),
callback_(nullptr),
hw_decoded_frames_since_last_fallback_(0),
hw_consequtive_generic_errors_(0) {}
VideoDecoderSoftwareFallbackWrapper::~VideoDecoderSoftwareFallbackWrapper() =
default;
bool VideoDecoderSoftwareFallbackWrapper::Configure(const Settings& settings) {
decoder_settings_ = settings;
if (force_sw_decoder_fallback_) {
RTC_LOG(LS_INFO) << "Forced software decoder fallback enabled.";
RTC_DCHECK(decoder_type_ == DecoderType::kNone);
return InitFallbackDecoder();
}
if (InitHwDecoder()) {
return true;
}
RTC_DCHECK(decoder_type_ == DecoderType::kNone);
return InitFallbackDecoder();
}
bool VideoDecoderSoftwareFallbackWrapper::InitHwDecoder() {
RTC_DCHECK(decoder_type_ == DecoderType::kNone);
if (!hw_decoder_->Configure(decoder_settings_)) {
return false;
}
decoder_type_ = DecoderType::kHardware;
if (callback_)
hw_decoder_->RegisterDecodeCompleteCallback(callback_);
return true;
}
bool VideoDecoderSoftwareFallbackWrapper::InitFallbackDecoder() {
RTC_DCHECK(decoder_type_ == DecoderType::kNone ||
decoder_type_ == DecoderType::kHardware);
RTC_LOG(LS_WARNING) << "Decoder falling back to software decoding.";
if (!fallback_decoder_->Configure(decoder_settings_)) {
RTC_LOG(LS_ERROR) << "Failed to initialize software-decoder fallback.";
return false;
}
UpdateFallbackDecoderHistograms();
if (decoder_type_ == DecoderType::kHardware) {
hw_decoder_->Release();
}
decoder_type_ = DecoderType::kFallback;
if (callback_)
fallback_decoder_->RegisterDecodeCompleteCallback(callback_);
return true;
}
void VideoDecoderSoftwareFallbackWrapper::UpdateFallbackDecoderHistograms() {
const std::string kFallbackHistogramsUmaPrefix =
"WebRTC.Video.HardwareDecodedFramesBetweenSoftwareFallbacks.";
// Each histogram needs its own code path for this to work otherwise the
// histogram names will be mixed up by the optimization that takes place.
switch (decoder_settings_.codec_type()) {
case kVideoCodecGeneric:
RTC_HISTOGRAM_COUNTS_100000(kFallbackHistogramsUmaPrefix + "Generic",
hw_decoded_frames_since_last_fallback_);
break;
case kVideoCodecVP8:
RTC_HISTOGRAM_COUNTS_100000(kFallbackHistogramsUmaPrefix + "Vp8",
hw_decoded_frames_since_last_fallback_);
break;
case kVideoCodecVP9:
RTC_HISTOGRAM_COUNTS_100000(kFallbackHistogramsUmaPrefix + "Vp9",
hw_decoded_frames_since_last_fallback_);
break;
case kVideoCodecAV1:
RTC_HISTOGRAM_COUNTS_100000(kFallbackHistogramsUmaPrefix + "Av1",
hw_decoded_frames_since_last_fallback_);
break;
case kVideoCodecH264:
RTC_HISTOGRAM_COUNTS_100000(kFallbackHistogramsUmaPrefix + "H264",
hw_decoded_frames_since_last_fallback_);
break;
case kVideoCodecMultiplex:
RTC_HISTOGRAM_COUNTS_100000(kFallbackHistogramsUmaPrefix + "Multiplex",
hw_decoded_frames_since_last_fallback_);
break;
case kVideoCodecH265:
RTC_HISTOGRAM_COUNTS_100000(kFallbackHistogramsUmaPrefix + "H265",
hw_decoded_frames_since_last_fallback_);
break;
}
}
int32_t VideoDecoderSoftwareFallbackWrapper::Decode(
const EncodedImage& input_image,
int64_t render_time_ms) {
TRACE_EVENT0("webrtc", "VideoDecoderSoftwareFallbackWrapper::Decode");
switch (decoder_type_) {
case DecoderType::kNone:
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
case DecoderType::kHardware: {
int32_t ret = WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE;
ret = hw_decoder_->Decode(input_image, render_time_ms);
if (ret != WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE) {
if (ret != WEBRTC_VIDEO_CODEC_ERROR) {
++hw_decoded_frames_since_last_fallback_;
hw_consequtive_generic_errors_ = 0;
return ret;
}
if (input_image._frameType == VideoFrameType::kVideoFrameKey) {
// Only count errors on key-frames, since generic errors can happen
// with hw decoder due to many arbitrary reasons.
// However, requesting a key-frame is supposed to fix the issue.
++hw_consequtive_generic_errors_;
}
if (hw_consequtive_generic_errors_ < kMaxConsequtiveHwErrors) {
return ret;
}
}
// HW decoder returned WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE or
// too many generic errors on key-frames encountered.
if (!InitFallbackDecoder()) {
return ret;
}
// Fallback decoder initialized, fall-through.
[[fallthrough]];
}
case DecoderType::kFallback:
return fallback_decoder_->Decode(input_image, render_time_ms);
default:
RTC_DCHECK_NOTREACHED();
return WEBRTC_VIDEO_CODEC_ERROR;
}
}
int32_t VideoDecoderSoftwareFallbackWrapper::RegisterDecodeCompleteCallback(
DecodedImageCallback* callback) {
callback_ = callback;
return active_decoder().RegisterDecodeCompleteCallback(callback);
}
int32_t VideoDecoderSoftwareFallbackWrapper::Release() {
int32_t status;
switch (decoder_type_) {
case DecoderType::kHardware:
status = hw_decoder_->Release();
break;
case DecoderType::kFallback:
RTC_LOG(LS_INFO) << "Releasing software fallback decoder.";
status = fallback_decoder_->Release();
break;
case DecoderType::kNone:
status = WEBRTC_VIDEO_CODEC_OK;
break;
default:
RTC_DCHECK_NOTREACHED();
status = WEBRTC_VIDEO_CODEC_ERROR;
}
decoder_type_ = DecoderType::kNone;
return status;
}
VideoDecoder::DecoderInfo VideoDecoderSoftwareFallbackWrapper::GetDecoderInfo()
const {
DecoderInfo info = active_decoder().GetDecoderInfo();
if (decoder_type_ == DecoderType::kFallback) {
// Cached "A (fallback from B)" string.
info.implementation_name = fallback_implementation_name_;
}
return info;
}
const char* VideoDecoderSoftwareFallbackWrapper::ImplementationName() const {
if (decoder_type_ == DecoderType::kFallback) {
// Cached "A (fallback from B)" string.
return fallback_implementation_name_.c_str();
} else {
return hw_decoder_->ImplementationName();
}
}
VideoDecoder& VideoDecoderSoftwareFallbackWrapper::active_decoder() const {
return decoder_type_ == DecoderType::kFallback ? *fallback_decoder_
: *hw_decoder_;
}
} // namespace
std::unique_ptr<VideoDecoder> CreateVideoDecoderSoftwareFallbackWrapper(
const Environment& env,
std::unique_ptr<VideoDecoder> sw_fallback_decoder,
std::unique_ptr<VideoDecoder> hw_decoder) {
return std::make_unique<VideoDecoderSoftwareFallbackWrapper>(
std::move(sw_fallback_decoder), std::move(hw_decoder),
env.field_trials().IsEnabled("WebRTC-Video-ForcedSwDecoderFallback"));
}
std::unique_ptr<VideoDecoder> CreateVideoDecoderSoftwareFallbackWrapper(
std::unique_ptr<VideoDecoder> sw_fallback_decoder,
std::unique_ptr<VideoDecoder> hw_decoder) {
return std::make_unique<VideoDecoderSoftwareFallbackWrapper>(
std::move(sw_fallback_decoder), std::move(hw_decoder),
webrtc::field_trial::IsEnabled("WebRTC-Video-ForcedSwDecoderFallback"));
}
} // namespace webrtc

View file

@ -0,0 +1,39 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_VIDEO_CODECS_VIDEO_DECODER_SOFTWARE_FALLBACK_WRAPPER_H_
#define API_VIDEO_CODECS_VIDEO_DECODER_SOFTWARE_FALLBACK_WRAPPER_H_
#include <memory>
#include "api/environment/environment.h"
#include "api/video_codecs/video_decoder.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// Used to wrap external VideoDecoders to provide a fallback option on
// software decoding when a hardware decoder fails to decode a stream due to
// hardware restrictions, such as max resolution.
RTC_EXPORT std::unique_ptr<VideoDecoder>
CreateVideoDecoderSoftwareFallbackWrapper(
const Environment& env,
std::unique_ptr<VideoDecoder> sw_fallback_decoder,
std::unique_ptr<VideoDecoder> hw_decoder);
// TODO: bugs.webrtc.org/15791 - Deprecated, remove when not used by chromium.
RTC_EXPORT std::unique_ptr<VideoDecoder>
CreateVideoDecoderSoftwareFallbackWrapper(
std::unique_ptr<VideoDecoder> sw_fallback_decoder,
std::unique_ptr<VideoDecoder> hw_decoder);
} // namespace webrtc
#endif // API_VIDEO_CODECS_VIDEO_DECODER_SOFTWARE_FALLBACK_WRAPPER_H_

View file

@ -0,0 +1,348 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/video_codecs/video_encoder.h"
#include <string.h>
#include <algorithm>
#include "rtc_base/checks.h"
#include "rtc_base/strings/string_builder.h"
namespace webrtc {
// TODO(mflodman): Add default complexity for VP9 and VP9.
VideoCodecVP8 VideoEncoder::GetDefaultVp8Settings() {
VideoCodecVP8 vp8_settings;
memset(&vp8_settings, 0, sizeof(vp8_settings));
vp8_settings.numberOfTemporalLayers = 1;
vp8_settings.denoisingOn = true;
vp8_settings.automaticResizeOn = false;
vp8_settings.keyFrameInterval = 3000;
return vp8_settings;
}
VideoCodecVP9 VideoEncoder::GetDefaultVp9Settings() {
VideoCodecVP9 vp9_settings;
memset(&vp9_settings, 0, sizeof(vp9_settings));
vp9_settings.numberOfTemporalLayers = 1;
vp9_settings.denoisingOn = true;
vp9_settings.keyFrameInterval = 3000;
vp9_settings.adaptiveQpMode = true;
vp9_settings.automaticResizeOn = true;
vp9_settings.numberOfSpatialLayers = 1;
vp9_settings.flexibleMode = false;
vp9_settings.interLayerPred = InterLayerPredMode::kOn;
return vp9_settings;
}
VideoCodecH264 VideoEncoder::GetDefaultH264Settings() {
VideoCodecH264 h264_settings;
memset(&h264_settings, 0, sizeof(h264_settings));
h264_settings.keyFrameInterval = 3000;
h264_settings.numberOfTemporalLayers = 1;
return h264_settings;
}
VideoCodecH265 VideoEncoder::GetDefaultH265Settings() {
VideoCodecH265 h265_settings;
memset(&h265_settings, 0, sizeof(h265_settings));
// h265_settings.profile = kProfileBase;
h265_settings.frameDroppingOn = true;
h265_settings.keyFrameInterval = 3000;
h265_settings.spsData = nullptr;
h265_settings.spsLen = 0;
h265_settings.ppsData = nullptr;
h265_settings.ppsLen = 0;
return h265_settings;
}
VideoEncoder::ScalingSettings::ScalingSettings() = default;
VideoEncoder::ScalingSettings::ScalingSettings(KOff) : ScalingSettings() {}
VideoEncoder::ScalingSettings::ScalingSettings(int low, int high)
: thresholds(QpThresholds(low, high)) {}
VideoEncoder::ScalingSettings::ScalingSettings(int low,
int high,
int min_pixels)
: thresholds(QpThresholds(low, high)), min_pixels_per_frame(min_pixels) {}
VideoEncoder::ScalingSettings::ScalingSettings(const ScalingSettings&) =
default;
VideoEncoder::ScalingSettings::~ScalingSettings() {}
// static
constexpr VideoEncoder::ScalingSettings::KOff
VideoEncoder::ScalingSettings::kOff;
// static
constexpr uint8_t VideoEncoder::EncoderInfo::kMaxFramerateFraction;
bool VideoEncoder::ResolutionBitrateLimits::operator==(
const ResolutionBitrateLimits& rhs) const {
return frame_size_pixels == rhs.frame_size_pixels &&
min_start_bitrate_bps == rhs.min_start_bitrate_bps &&
min_bitrate_bps == rhs.min_bitrate_bps &&
max_bitrate_bps == rhs.max_bitrate_bps;
}
VideoEncoder::EncoderInfo::EncoderInfo()
: scaling_settings(VideoEncoder::ScalingSettings::kOff),
requested_resolution_alignment(1),
apply_alignment_to_all_simulcast_layers(false),
supports_native_handle(false),
implementation_name("unknown"),
has_trusted_rate_controller(false),
is_hardware_accelerated(true),
fps_allocation{absl::InlinedVector<uint8_t, kMaxTemporalStreams>(
1,
kMaxFramerateFraction)},
supports_simulcast(false),
preferred_pixel_formats{VideoFrameBuffer::Type::kI420} {}
VideoEncoder::EncoderInfo::EncoderInfo(const EncoderInfo&) = default;
VideoEncoder::EncoderInfo::~EncoderInfo() = default;
std::string VideoEncoder::EncoderInfo::ToString() const {
char string_buf[2048];
rtc::SimpleStringBuilder oss(string_buf);
oss << "EncoderInfo { "
"ScalingSettings { ";
if (scaling_settings.thresholds) {
oss << "Thresholds { "
"low = "
<< scaling_settings.thresholds->low
<< ", high = " << scaling_settings.thresholds->high << "}, ";
}
oss << "min_pixels_per_frame = " << scaling_settings.min_pixels_per_frame
<< " }";
oss << ", requested_resolution_alignment = " << requested_resolution_alignment
<< ", apply_alignment_to_all_simulcast_layers = "
<< apply_alignment_to_all_simulcast_layers
<< ", supports_native_handle = " << supports_native_handle
<< ", implementation_name = '" << implementation_name
<< "'"
", has_trusted_rate_controller = "
<< has_trusted_rate_controller
<< ", is_hardware_accelerated = " << is_hardware_accelerated
<< ", fps_allocation = [";
size_t num_spatial_layer_with_fps_allocation = 0;
for (size_t i = 0; i < kMaxSpatialLayers; ++i) {
if (!fps_allocation[i].empty()) {
num_spatial_layer_with_fps_allocation = i + 1;
}
}
bool first = true;
for (size_t i = 0; i < num_spatial_layer_with_fps_allocation; ++i) {
if (fps_allocation[i].empty()) {
break;
}
if (!first) {
oss << ", ";
}
const absl::InlinedVector<uint8_t, kMaxTemporalStreams>& fractions =
fps_allocation[i];
if (!fractions.empty()) {
first = false;
oss << "[ ";
for (size_t i = 0; i < fractions.size(); ++i) {
if (i > 0) {
oss << ", ";
}
oss << (static_cast<double>(fractions[i]) / kMaxFramerateFraction);
}
oss << "] ";
}
}
oss << "]";
oss << ", resolution_bitrate_limits = [";
for (size_t i = 0; i < resolution_bitrate_limits.size(); ++i) {
if (i > 0) {
oss << ", ";
}
ResolutionBitrateLimits l = resolution_bitrate_limits[i];
oss << "Limits { "
"frame_size_pixels = "
<< l.frame_size_pixels
<< ", min_start_bitrate_bps = " << l.min_start_bitrate_bps
<< ", min_bitrate_bps = " << l.min_bitrate_bps
<< ", max_bitrate_bps = " << l.max_bitrate_bps << "} ";
}
oss << "] "
", supports_simulcast = "
<< supports_simulcast;
oss << ", preferred_pixel_formats = [";
for (size_t i = 0; i < preferred_pixel_formats.size(); ++i) {
if (i > 0)
oss << ", ";
oss << VideoFrameBufferTypeToString(preferred_pixel_formats.at(i));
}
oss << "]";
if (is_qp_trusted.has_value()) {
oss << ", is_qp_trusted = " << is_qp_trusted.value();
}
oss << "}";
return oss.str();
}
bool VideoEncoder::EncoderInfo::operator==(const EncoderInfo& rhs) const {
if (scaling_settings.thresholds.has_value() !=
rhs.scaling_settings.thresholds.has_value()) {
return false;
}
if (scaling_settings.thresholds.has_value()) {
QpThresholds l = *scaling_settings.thresholds;
QpThresholds r = *rhs.scaling_settings.thresholds;
if (l.low != r.low || l.high != r.high) {
return false;
}
}
if (scaling_settings.min_pixels_per_frame !=
rhs.scaling_settings.min_pixels_per_frame) {
return false;
}
if (supports_native_handle != rhs.supports_native_handle ||
implementation_name != rhs.implementation_name ||
has_trusted_rate_controller != rhs.has_trusted_rate_controller ||
is_hardware_accelerated != rhs.is_hardware_accelerated) {
return false;
}
for (size_t i = 0; i < kMaxSpatialLayers; ++i) {
if (fps_allocation[i] != rhs.fps_allocation[i]) {
return false;
}
}
if (resolution_bitrate_limits != rhs.resolution_bitrate_limits ||
supports_simulcast != rhs.supports_simulcast) {
return false;
}
return true;
}
absl::optional<VideoEncoder::ResolutionBitrateLimits>
VideoEncoder::EncoderInfo::GetEncoderBitrateLimitsForResolution(
int frame_size_pixels) const {
std::vector<ResolutionBitrateLimits> bitrate_limits =
resolution_bitrate_limits;
// Sort the list of bitrate limits by resolution.
sort(bitrate_limits.begin(), bitrate_limits.end(),
[](const ResolutionBitrateLimits& lhs,
const ResolutionBitrateLimits& rhs) {
return lhs.frame_size_pixels < rhs.frame_size_pixels;
});
for (size_t i = 0; i < bitrate_limits.size(); ++i) {
RTC_DCHECK_GE(bitrate_limits[i].min_bitrate_bps, 0);
RTC_DCHECK_GE(bitrate_limits[i].min_start_bitrate_bps, 0);
RTC_DCHECK_GE(bitrate_limits[i].max_bitrate_bps,
bitrate_limits[i].min_bitrate_bps);
if (i > 0) {
// The bitrate limits aren't expected to decrease with resolution.
RTC_DCHECK_GE(bitrate_limits[i].min_bitrate_bps,
bitrate_limits[i - 1].min_bitrate_bps);
RTC_DCHECK_GE(bitrate_limits[i].min_start_bitrate_bps,
bitrate_limits[i - 1].min_start_bitrate_bps);
RTC_DCHECK_GE(bitrate_limits[i].max_bitrate_bps,
bitrate_limits[i - 1].max_bitrate_bps);
}
if (bitrate_limits[i].frame_size_pixels >= frame_size_pixels) {
return absl::optional<ResolutionBitrateLimits>(bitrate_limits[i]);
}
}
return absl::nullopt;
}
VideoEncoder::RateControlParameters::RateControlParameters()
: bitrate(VideoBitrateAllocation()),
framerate_fps(0.0),
bandwidth_allocation(DataRate::Zero()) {}
VideoEncoder::RateControlParameters::RateControlParameters(
const VideoBitrateAllocation& bitrate,
double framerate_fps)
: bitrate(bitrate),
framerate_fps(framerate_fps),
bandwidth_allocation(DataRate::BitsPerSec(bitrate.get_sum_bps())) {}
VideoEncoder::RateControlParameters::RateControlParameters(
const VideoBitrateAllocation& bitrate,
double framerate_fps,
DataRate bandwidth_allocation)
: bitrate(bitrate),
framerate_fps(framerate_fps),
bandwidth_allocation(bandwidth_allocation) {}
bool VideoEncoder::RateControlParameters::operator==(
const VideoEncoder::RateControlParameters& rhs) const {
return std::tie(bitrate, framerate_fps, bandwidth_allocation) ==
std::tie(rhs.bitrate, rhs.framerate_fps, rhs.bandwidth_allocation);
}
bool VideoEncoder::RateControlParameters::operator!=(
const VideoEncoder::RateControlParameters& rhs) const {
return !(rhs == *this);
}
VideoEncoder::RateControlParameters::~RateControlParameters() = default;
void VideoEncoder::SetFecControllerOverride(
FecControllerOverride* fec_controller_override) {}
int32_t VideoEncoder::InitEncode(const VideoCodec* codec_settings,
int32_t number_of_cores,
size_t max_payload_size) {
const VideoEncoder::Capabilities capabilities(/* loss_notification= */ false);
const VideoEncoder::Settings settings(capabilities, number_of_cores,
max_payload_size);
// In theory, this and the other version of InitEncode() could end up calling
// each other in a loop until we get a stack overflow.
// In practice, any subclass of VideoEncoder would overload at least one
// of these, and we have a TODO in the header file to make this pure virtual.
return InitEncode(codec_settings, settings);
}
int VideoEncoder::InitEncode(const VideoCodec* codec_settings,
const VideoEncoder::Settings& settings) {
// In theory, this and the other version of InitEncode() could end up calling
// each other in a loop until we get a stack overflow.
// In practice, any subclass of VideoEncoder would overload at least one
// of these, and we have a TODO in the header file to make this pure virtual.
return InitEncode(codec_settings, settings.number_of_cores,
settings.max_payload_size);
}
void VideoEncoder::OnPacketLossRateUpdate(float packet_loss_rate) {}
void VideoEncoder::OnRttUpdate(int64_t rtt_ms) {}
void VideoEncoder::OnLossNotification(
const LossNotification& loss_notification) {}
} // namespace webrtc

View file

@ -0,0 +1,430 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_VIDEO_CODECS_VIDEO_ENCODER_H_
#define API_VIDEO_CODECS_VIDEO_ENCODER_H_
#include <limits>
#include <memory>
#include <string>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "absl/types/optional.h"
#include "api/fec_controller_override.h"
#include "api/units/data_rate.h"
#include "api/video/encoded_image.h"
#include "api/video/video_bitrate_allocation.h"
#include "api/video/video_codec_constants.h"
#include "api/video/video_frame.h"
#include "api/video_codecs/video_codec.h"
#include "rtc_base/checks.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// TODO(pbos): Expose these through a public (root) header or change these APIs.
struct CodecSpecificInfo;
constexpr int kDefaultMinPixelsPerFrame = 320 * 180;
class RTC_EXPORT EncodedImageCallback {
public:
virtual ~EncodedImageCallback() {}
struct Result {
enum Error {
OK,
// Failed to send the packet.
ERROR_SEND_FAILED,
};
explicit Result(Error error) : error(error) {}
Result(Error error, uint32_t frame_id) : error(error), frame_id(frame_id) {}
Error error;
// Frame ID assigned to the frame. The frame ID should be the same as the ID
// seen by the receiver for this frame. RTP timestamp of the frame is used
// as frame ID when RTP is used to send video. Must be used only when
// error=OK.
uint32_t frame_id = 0;
// Tells the encoder that the next frame is should be dropped.
bool drop_next_frame = false;
};
// Used to signal the encoder about reason a frame is dropped.
// kDroppedByMediaOptimizations - dropped by MediaOptimizations (for rate
// limiting purposes).
// kDroppedByEncoder - dropped by encoder's internal rate limiter.
// TODO(bugs.webrtc.org/10164): Delete this enum? It duplicates the more
// general VideoStreamEncoderObserver::DropReason. Also,
// kDroppedByMediaOptimizations is not produced by any encoder, but by
// VideoStreamEncoder.
enum class DropReason : uint8_t {
kDroppedByMediaOptimizations,
kDroppedByEncoder
};
// Callback function which is called when an image has been encoded.
virtual Result OnEncodedImage(
const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info) = 0;
virtual void OnDroppedFrame(DropReason reason) {}
};
class RTC_EXPORT VideoEncoder {
public:
struct QpThresholds {
QpThresholds(int l, int h) : low(l), high(h) {}
QpThresholds() : low(-1), high(-1) {}
int low;
int high;
};
// Quality scaling is enabled if thresholds are provided.
struct RTC_EXPORT ScalingSettings {
private:
// Private magic type for kOff, implicitly convertible to
// ScalingSettings.
struct KOff {};
public:
// TODO(bugs.webrtc.org/9078): Since absl::optional should be trivially copy
// constructible, this magic value can likely be replaced by a constexpr
// ScalingSettings value.
static constexpr KOff kOff = {};
ScalingSettings(int low, int high);
ScalingSettings(int low, int high, int min_pixels);
ScalingSettings(const ScalingSettings&);
ScalingSettings(KOff); // NOLINT(runtime/explicit)
~ScalingSettings();
absl::optional<QpThresholds> thresholds;
// We will never ask for a resolution lower than this.
// TODO(kthelgason): Lower this limit when better testing
// on MediaCodec and fallback implementations are in place.
// See https://bugs.chromium.org/p/webrtc/issues/detail?id=7206
int min_pixels_per_frame = kDefaultMinPixelsPerFrame;
private:
// Private constructor; to get an object without thresholds, use
// the magic constant ScalingSettings::kOff.
ScalingSettings();
};
// Bitrate limits for resolution.
struct ResolutionBitrateLimits {
ResolutionBitrateLimits(int frame_size_pixels,
int min_start_bitrate_bps,
int min_bitrate_bps,
int max_bitrate_bps)
: frame_size_pixels(frame_size_pixels),
min_start_bitrate_bps(min_start_bitrate_bps),
min_bitrate_bps(min_bitrate_bps),
max_bitrate_bps(max_bitrate_bps) {}
// Size of video frame, in pixels, the bitrate thresholds are intended for.
int frame_size_pixels = 0;
// Recommended minimum bitrate to start encoding.
int min_start_bitrate_bps = 0;
// Recommended minimum bitrate.
int min_bitrate_bps = 0;
// Recommended maximum bitrate.
int max_bitrate_bps = 0;
bool operator==(const ResolutionBitrateLimits& rhs) const;
bool operator!=(const ResolutionBitrateLimits& rhs) const {
return !(*this == rhs);
}
};
// Struct containing metadata about the encoder implementing this interface.
struct RTC_EXPORT EncoderInfo {
static constexpr uint8_t kMaxFramerateFraction =
std::numeric_limits<uint8_t>::max();
EncoderInfo();
EncoderInfo(const EncoderInfo&);
~EncoderInfo();
std::string ToString() const;
bool operator==(const EncoderInfo& rhs) const;
bool operator!=(const EncoderInfo& rhs) const { return !(*this == rhs); }
// Any encoder implementation wishing to use the WebRTC provided
// quality scaler must populate this field.
ScalingSettings scaling_settings;
// The width and height of the incoming video frames should be divisible
// by `requested_resolution_alignment`. If they are not, the encoder may
// drop the incoming frame.
// For example: With I420, this value would be a multiple of 2.
// Note that this field is unrelated to any horizontal or vertical stride
// requirements the encoder has on the incoming video frame buffers.
uint32_t requested_resolution_alignment;
// Same as above but if true, each simulcast layer should also be divisible
// by `requested_resolution_alignment`.
// Note that scale factors `scale_resolution_down_by` may be adjusted so a
// common multiple is not too large to avoid largely cropped frames and
// possibly with an aspect ratio far from the original.
// Warning: large values of scale_resolution_down_by could be changed
// considerably, especially if `requested_resolution_alignment` is large.
bool apply_alignment_to_all_simulcast_layers;
// If true, encoder supports working with a native handle (e.g. texture
// handle for hw codecs) rather than requiring a raw I420 buffer.
bool supports_native_handle;
// The name of this particular encoder implementation, e.g. "libvpx".
std::string implementation_name;
// If this field is true, the encoder rate controller must perform
// well even in difficult situations, and produce close to the specified
// target bitrate seen over a reasonable time window, drop frames if
// necessary in order to keep the rate correct, and react quickly to
// changing bitrate targets. If this method returns true, we disable the
// frame dropper in the media optimization module and rely entirely on the
// encoder to produce media at a bitrate that closely matches the target.
// Any overshooting may result in delay buildup. If this method returns
// false (default behavior), the media opt frame dropper will drop input
// frames if it suspect encoder misbehavior. Misbehavior is common,
// especially in hardware codecs. Disable media opt at your own risk.
bool has_trusted_rate_controller;
// If this field is true, the encoder uses hardware support and different
// thresholds will be used in CPU adaptation.
bool is_hardware_accelerated;
// For each spatial layer (simulcast stream or SVC layer), represented as an
// element in `fps_allocation` a vector indicates how many temporal layers
// the encoder is using for that spatial layer.
// For each spatial/temporal layer pair, the frame rate fraction is given as
// an 8bit unsigned integer where 0 = 0% and 255 = 100%.
//
// If the vector is empty for a given spatial layer, it indicates that frame
// rates are not defined and we can't count on any specific frame rate to be
// generated. Likely this indicates Vp8TemporalLayersType::kBitrateDynamic.
//
// The encoder may update this on a per-frame basis in response to both
// internal and external signals.
//
// Spatial layers are treated independently, but temporal layers are
// cumulative. For instance, if:
// fps_allocation[0][0] = kMaxFramerateFraction / 2;
// fps_allocation[0][1] = kMaxFramerateFraction;
// Then half of the frames are in the base layer and half is in TL1, but
// since TL1 is assumed to depend on the base layer, the frame rate is
// indicated as the full 100% for the top layer.
//
// Defaults to a single spatial layer containing a single temporal layer
// with a 100% frame rate fraction.
absl::InlinedVector<uint8_t, kMaxTemporalStreams>
fps_allocation[kMaxSpatialLayers];
// Recommended bitrate limits for different resolutions.
std::vector<ResolutionBitrateLimits> resolution_bitrate_limits;
// Obtains the limits from `resolution_bitrate_limits` that best matches the
// `frame_size_pixels`.
absl::optional<ResolutionBitrateLimits>
GetEncoderBitrateLimitsForResolution(int frame_size_pixels) const;
// If true, this encoder has internal support for generating simulcast
// streams. Otherwise, an adapter class will be needed.
// Even if true, the config provided to InitEncode() might not be supported,
// in such case the encoder should return
// WEBRTC_VIDEO_CODEC_ERR_SIMULCAST_PARAMETERS_NOT_SUPPORTED.
bool supports_simulcast;
// The list of pixel formats preferred by the encoder. It is assumed that if
// the list is empty and supports_native_handle is false, then {I420} is the
// preferred pixel format. The order of the formats does not matter.
absl::InlinedVector<VideoFrameBuffer::Type, kMaxPreferredPixelFormats>
preferred_pixel_formats;
// Indicates whether or not QP value encoder writes into frame/slice/tile
// header can be interpreted as average frame/slice/tile QP.
absl::optional<bool> is_qp_trusted;
};
struct RTC_EXPORT RateControlParameters {
RateControlParameters();
RateControlParameters(const VideoBitrateAllocation& bitrate,
double framerate_fps);
RateControlParameters(const VideoBitrateAllocation& bitrate,
double framerate_fps,
DataRate bandwidth_allocation);
virtual ~RateControlParameters();
// Target bitrate, per spatial/temporal layer.
// A target bitrate of 0bps indicates a layer should not be encoded at all.
VideoBitrateAllocation target_bitrate;
// Adjusted target bitrate, per spatial/temporal layer. May be lower or
// higher than the target depending on encoder behaviour.
VideoBitrateAllocation bitrate;
// Target framerate, in fps. A value <= 0.0 is invalid and should be
// interpreted as framerate target not available. In this case the encoder
// should fall back to the max framerate specified in `codec_settings` of
// the last InitEncode() call.
double framerate_fps;
// The network bandwidth available for video. This is at least
// `bitrate.get_sum_bps()`, but may be higher if the application is not
// network constrained.
DataRate bandwidth_allocation;
bool operator==(const RateControlParameters& rhs) const;
bool operator!=(const RateControlParameters& rhs) const;
};
struct LossNotification {
// The timestamp of the last decodable frame *prior* to the last received.
// (The last received - described below - might itself be decodable or not.)
uint32_t timestamp_of_last_decodable;
// The timestamp of the last received frame.
uint32_t timestamp_of_last_received;
// Describes whether the dependencies of the last received frame were
// all decodable.
// `false` if some dependencies were undecodable, `true` if all dependencies
// were decodable, and `nullopt` if the dependencies are unknown.
absl::optional<bool> dependencies_of_last_received_decodable;
// Describes whether the received frame was decodable.
// `false` if some dependency was undecodable or if some packet belonging
// to the last received frame was missed.
// `true` if all dependencies were decodable and all packets belonging
// to the last received frame were received.
// `nullopt` if no packet belonging to the last frame was missed, but the
// last packet in the frame was not yet received.
absl::optional<bool> last_received_decodable;
};
// Negotiated capabilities which the VideoEncoder may expect the other
// side to use.
struct Capabilities {
explicit Capabilities(bool loss_notification)
: loss_notification(loss_notification) {}
bool loss_notification;
};
struct Settings {
Settings(const Capabilities& capabilities,
int number_of_cores,
size_t max_payload_size)
: capabilities(capabilities),
number_of_cores(number_of_cores),
max_payload_size(max_payload_size) {}
Capabilities capabilities;
int number_of_cores;
size_t max_payload_size;
// Experimental API - currently only supported by LibvpxVp8Encoder and
// the OpenH264 encoder. If set, limits the number of encoder threads.
absl::optional<int> encoder_thread_limit;
};
static VideoCodecVP8 GetDefaultVp8Settings();
static VideoCodecVP9 GetDefaultVp9Settings();
static VideoCodecH264 GetDefaultH264Settings();
static VideoCodecH265 GetDefaultH265Settings();
virtual ~VideoEncoder() {}
// Set a FecControllerOverride, through which the encoder may override
// decisions made by FecController.
// TODO(bugs.webrtc.org/10769): Update downstream, then make pure-virtual.
virtual void SetFecControllerOverride(
FecControllerOverride* fec_controller_override);
// Initialize the encoder with the information from the codecSettings
//
// Input:
// - codec_settings : Codec settings
// - settings : Settings affecting the encoding itself.
// Input for deprecated version:
// - number_of_cores : Number of cores available for the encoder
// - max_payload_size : The maximum size each payload is allowed
// to have. Usually MTU - overhead.
//
// Return value : Set bit rate if OK
// <0 - Errors:
// WEBRTC_VIDEO_CODEC_ERR_PARAMETER
// WEBRTC_VIDEO_CODEC_ERR_SIZE
// WEBRTC_VIDEO_CODEC_MEMORY
// WEBRTC_VIDEO_CODEC_ERROR
// TODO(bugs.webrtc.org/10720): After updating downstream projects and posting
// an announcement to discuss-webrtc, remove the three-parameters variant
// and make the two-parameters variant pure-virtual.
/* ABSL_DEPRECATED("bugs.webrtc.org/10720") */ virtual int32_t InitEncode(
const VideoCodec* codec_settings,
int32_t number_of_cores,
size_t max_payload_size);
virtual int InitEncode(const VideoCodec* codec_settings,
const VideoEncoder::Settings& settings);
// Register an encode complete callback object.
//
// Input:
// - callback : Callback object which handles encoded images.
//
// Return value : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
virtual int32_t RegisterEncodeCompleteCallback(
EncodedImageCallback* callback) = 0;
// Free encoder memory.
// Return value : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
virtual int32_t Release() = 0;
// Encode an image (as a part of a video stream). The encoded image
// will be returned to the user through the encode complete callback.
//
// Input:
// - frame : Image to be encoded
// - frame_types : Frame type to be generated by the encoder.
//
// Return value : WEBRTC_VIDEO_CODEC_OK if OK
// <0 - Errors:
// WEBRTC_VIDEO_CODEC_ERR_PARAMETER
// WEBRTC_VIDEO_CODEC_MEMORY
// WEBRTC_VIDEO_CODEC_ERROR
virtual int32_t Encode(const VideoFrame& frame,
const std::vector<VideoFrameType>* frame_types) = 0;
// Sets rate control parameters: bitrate, framerate, etc. These settings are
// instantaneous (i.e. not moving averages) and should apply from now until
// the next call to SetRates().
virtual void SetRates(const RateControlParameters& parameters) = 0;
// Inform the encoder when the packet loss rate changes.
//
// Input: - packet_loss_rate : The packet loss rate (0.0 to 1.0).
virtual void OnPacketLossRateUpdate(float packet_loss_rate);
// Inform the encoder when the round trip time changes.
//
// Input: - rtt_ms : The new RTT, in milliseconds.
virtual void OnRttUpdate(int64_t rtt_ms);
// Called when a loss notification is received.
virtual void OnLossNotification(const LossNotification& loss_notification);
// Returns meta-data about the encoder, such as implementation name.
// The output of this method may change during runtime. For instance if a
// hardware encoder fails, it may fall back to doing software encoding using
// an implementation with different characteristics.
virtual EncoderInfo GetEncoderInfo() const = 0;
};
} // namespace webrtc
#endif // API_VIDEO_CODECS_VIDEO_ENCODER_H_

View file

@ -0,0 +1,127 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_VIDEO_CODECS_VIDEO_ENCODER_FACTORY_H_
#define API_VIDEO_CODECS_VIDEO_ENCODER_FACTORY_H_
#include <memory>
#include <string>
#include <vector>
#include "absl/types/optional.h"
#include "api/units/data_rate.h"
#include "api/video/render_resolution.h"
#include "api/video_codecs/sdp_video_format.h"
namespace webrtc {
class VideoEncoder;
// A factory that creates VideoEncoders.
// NOTE: This class is still under development and may change without notice.
class VideoEncoderFactory {
public:
struct CodecSupport {
bool is_supported = false;
bool is_power_efficient = false;
};
// An injectable class that is continuously updated with encoding conditions
// and selects the best encoder given those conditions. An implementation is
// typically stateful to avoid toggling between different encoders, which is
// costly due to recreation of objects, a new codec will always start with a
// key-frame.
class EncoderSelectorInterface {
public:
virtual ~EncoderSelectorInterface() {}
// Informs the encoder selector about which encoder that is currently being
// used.
virtual void OnCurrentEncoder(const SdpVideoFormat& format) = 0;
// Called every time the available bitrate is updated. Should return a
// non-empty if an encoder switch should be performed.
virtual absl::optional<SdpVideoFormat> OnAvailableBitrate(
const DataRate& rate) = 0;
// Called every time the encoder input resolution change. Should return a
// non-empty if an encoder switch should be performed.
virtual absl::optional<SdpVideoFormat> OnResolutionChange(
const RenderResolution& resolution) {
return absl::nullopt;
}
// Called if the currently used encoder reports itself as broken. Should
// return a non-empty if an encoder switch should be performed.
virtual absl::optional<SdpVideoFormat> OnEncoderBroken() = 0;
};
// Returns a list of supported video formats in order of preference, to use
// for signaling etc.
virtual std::vector<SdpVideoFormat> GetSupportedFormats() const = 0;
// Returns a list of supported video formats in order of preference, that can
// also be tagged with additional information to allow the VideoEncoderFactory
// to separate between different implementations when CreateVideoEncoder is
// called.
virtual std::vector<SdpVideoFormat> GetImplementations() const {
return GetSupportedFormats();
}
// Query whether the specifed format is supported or not and if it will be
// power efficient, which is currently interpreted as if there is support for
// hardware acceleration.
// See https://w3c.github.io/webrtc-svc/#scalabilitymodes* for a specification
// of valid values for `scalability_mode`.
// NOTE: QueryCodecSupport is currently an experimental feature that is
// subject to change without notice.
virtual CodecSupport QueryCodecSupport(
const SdpVideoFormat& format,
absl::optional<std::string> scalability_mode) const {
// Default implementation, query for supported formats and check if the
// specified format is supported. Returns false if scalability_mode is
// specified.
CodecSupport codec_support;
if (!scalability_mode) {
codec_support.is_supported = format.IsCodecInList(GetSupportedFormats());
}
return codec_support;
}
// Creates a VideoEncoder for the specified format.
virtual std::unique_ptr<VideoEncoder> CreateVideoEncoder(
const SdpVideoFormat& format) = 0;
// This method creates a EncoderSelector to use for a VideoSendStream.
// (and hence should probably been called CreateEncoderSelector()).
//
// Note: This method is unsuitable if encoding several streams that
// are using same VideoEncoderFactory (either by several streams in one
// PeerConnection or streams with different PeerConnection but same
// PeerConnectionFactory). This is due to the fact that the method is not
// given any stream identifier, nor is the EncoderSelectorInterface given any
// stream identifiers, i.e one does not know which stream is being encoded
// with help of the selector.
//
// In such scenario, the `RtpSenderInterface::SetEncoderSelector` is
// recommended.
//
// TODO(bugs.webrtc.org:14122): Deprecate and remove in favor of
// `RtpSenderInterface::SetEncoderSelector`.
virtual std::unique_ptr<EncoderSelectorInterface> GetEncoderSelector() const {
return nullptr;
}
virtual ~VideoEncoderFactory() {}
};
} // namespace webrtc
#endif // API_VIDEO_CODECS_VIDEO_ENCODER_FACTORY_H_

View file

@ -0,0 +1,145 @@
/*
* Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_VIDEO_CODECS_VIDEO_ENCODER_FACTORY_TEMPLATE_H_
#define API_VIDEO_CODECS_VIDEO_ENCODER_FACTORY_TEMPLATE_H_
#include <memory>
#include <string>
#include <vector>
#include "absl/algorithm/container.h"
#include "api/array_view.h"
#include "api/video_codecs/sdp_video_format.h"
#include "api/video_codecs/video_encoder.h"
#include "api/video_codecs/video_encoder_factory.h"
#include "modules/video_coding/svc/scalability_mode_util.h"
namespace webrtc {
// The VideoEncoderFactoryTemplate supports encoders implementations given as
// template arguments.
//
// To include an encoder in the factory it requires three static members
// functions to be defined:
//
// // Returns the supported SdpVideoFormats this encoder can produce.
// static std::vector<SdpVideoFormat> SupportedFormats();
//
// // Creates an encoder instance for the given format.
// static std::unique_ptr<VideoEncoder>
// CreateEncoder(const SdpVideoFormat& format);
//
// // Returns true if the encoder supports the given scalability mode.
// static bool
// IsScalabilityModeSupported(ScalabilityMode scalability_mode);
//
// Note that the order of the template arguments matter as the factory will
// query/return the first encoder implementation supporting the given
// SdpVideoFormat.
template <typename... Ts>
class VideoEncoderFactoryTemplate : public VideoEncoderFactory {
public:
std::vector<SdpVideoFormat> GetSupportedFormats() const override {
return GetSupportedFormatsInternal<Ts...>();
}
std::unique_ptr<VideoEncoder> CreateVideoEncoder(
const SdpVideoFormat& format) override {
// We fuzzy match the specified format for both valid and not so valid
// reasons. The valid reason is that there are many standardized codec
// specific fmtp parameters that have not been implemented, and in those
// cases we should not fail to instantiate an encoder just because we don't
// recognize the parameter. The not so valid reason is that we have started
// adding parameters completely unrelated to the SDP to the SdpVideoFormat.
// TODO(bugs.webrtc.org/13868): Remove FuzzyMatchSdpVideoFormat
absl::optional<SdpVideoFormat> matched =
FuzzyMatchSdpVideoFormat(GetSupportedFormats(), format);
return CreateVideoEncoderInternal<Ts...>(matched.value_or(format));
}
CodecSupport QueryCodecSupport(
const SdpVideoFormat& format,
absl::optional<std::string> scalability_mode) const override {
return QueryCodecSupportInternal<Ts...>(format, scalability_mode);
}
private:
bool IsFormatInList(
const SdpVideoFormat& format,
rtc::ArrayView<const SdpVideoFormat> supported_formats) const {
return absl::c_any_of(
supported_formats, [&](const SdpVideoFormat& supported_format) {
return supported_format.name == format.name &&
supported_format.parameters == format.parameters;
});
}
template <typename V>
bool IsScalabilityModeSupported(
const absl::optional<std::string>& scalability_mode_string) const {
if (!scalability_mode_string.has_value()) {
return true;
}
absl::optional<ScalabilityMode> scalability_mode =
ScalabilityModeFromString(*scalability_mode_string);
return scalability_mode.has_value() &&
V::IsScalabilityModeSupported(*scalability_mode);
}
template <typename V, typename... Vs>
std::vector<SdpVideoFormat> GetSupportedFormatsInternal() const {
auto supported_formats = V::SupportedFormats();
if constexpr (sizeof...(Vs) > 0) {
// Supported formats may overlap between implementations, so duplicates
// should be filtered out.
for (const auto& other_format : GetSupportedFormatsInternal<Vs...>()) {
if (!IsFormatInList(other_format, supported_formats)) {
supported_formats.push_back(other_format);
}
}
}
return supported_formats;
}
template <typename V, typename... Vs>
std::unique_ptr<VideoEncoder> CreateVideoEncoderInternal(
const SdpVideoFormat& format) {
if (IsFormatInList(format, V::SupportedFormats())) {
return V::CreateEncoder(format);
}
if constexpr (sizeof...(Vs) > 0) {
return CreateVideoEncoderInternal<Vs...>(format);
}
return nullptr;
}
template <typename V, typename... Vs>
CodecSupport QueryCodecSupportInternal(
const SdpVideoFormat& format,
const absl::optional<std::string>& scalability_mode) const {
if (IsFormatInList(format, V::SupportedFormats())) {
return {.is_supported = IsScalabilityModeSupported<V>(scalability_mode)};
}
if constexpr (sizeof...(Vs) > 0) {
return QueryCodecSupportInternal<Vs...>(format, scalability_mode);
}
return {.is_supported = false};
}
};
} // namespace webrtc
#endif // API_VIDEO_CODECS_VIDEO_ENCODER_FACTORY_TEMPLATE_H_

View file

@ -0,0 +1,42 @@
/*
* Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_VIDEO_CODECS_VIDEO_ENCODER_FACTORY_TEMPLATE_LIBAOM_AV1_ADAPTER_H_
#define API_VIDEO_CODECS_VIDEO_ENCODER_FACTORY_TEMPLATE_LIBAOM_AV1_ADAPTER_H_
#include <memory>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "api/video_codecs/sdp_video_format.h"
#include "modules/video_coding/codecs/av1/av1_svc_config.h"
#include "modules/video_coding/codecs/av1/libaom_av1_encoder.h"
namespace webrtc {
struct LibaomAv1EncoderTemplateAdapter {
static std::vector<SdpVideoFormat> SupportedFormats() {
absl::InlinedVector<ScalabilityMode, kScalabilityModeCount>
scalability_modes = LibaomAv1EncoderSupportedScalabilityModes();
return {SdpVideoFormat("AV1", CodecParameterMap(), scalability_modes)};
}
static std::unique_ptr<VideoEncoder> CreateEncoder(
const SdpVideoFormat& format) {
return CreateLibaomAv1Encoder();
}
static bool IsScalabilityModeSupported(ScalabilityMode scalability_mode) {
return LibaomAv1EncoderSupportsScalabilityMode(scalability_mode);
}
};
} // namespace webrtc
#endif // API_VIDEO_CODECS_VIDEO_ENCODER_FACTORY_TEMPLATE_LIBAOM_AV1_ADAPTER_H_

View file

@ -0,0 +1,45 @@
/*
* Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_VIDEO_CODECS_VIDEO_ENCODER_FACTORY_TEMPLATE_LIBVPX_VP8_ADAPTER_H_
#define API_VIDEO_CODECS_VIDEO_ENCODER_FACTORY_TEMPLATE_LIBVPX_VP8_ADAPTER_H_
#include <memory>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "api/video_codecs/sdp_video_format.h"
#include "modules/video_coding/codecs/vp8/include/vp8.h"
#include "modules/video_coding/codecs/vp8/vp8_scalability.h"
namespace webrtc {
struct LibvpxVp8EncoderTemplateAdapter {
static std::vector<SdpVideoFormat> SupportedFormats() {
absl::InlinedVector<ScalabilityMode, kScalabilityModeCount>
scalability_modes;
for (const auto scalability_mode : kVP8SupportedScalabilityModes) {
scalability_modes.push_back(scalability_mode);
}
return {SdpVideoFormat("VP8", CodecParameterMap(), scalability_modes)};
}
static std::unique_ptr<VideoEncoder> CreateEncoder(
const SdpVideoFormat& format) {
return VP8Encoder::Create();
}
static bool IsScalabilityModeSupported(ScalabilityMode scalability_mode) {
return VP8SupportsScalabilityMode(scalability_mode);
}
};
} // namespace webrtc
#endif // API_VIDEO_CODECS_VIDEO_ENCODER_FACTORY_TEMPLATE_LIBVPX_VP8_ADAPTER_H_

View file

@ -0,0 +1,36 @@
/*
* Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_VIDEO_CODECS_VIDEO_ENCODER_FACTORY_TEMPLATE_LIBVPX_VP9_ADAPTER_H_
#define API_VIDEO_CODECS_VIDEO_ENCODER_FACTORY_TEMPLATE_LIBVPX_VP9_ADAPTER_H_
#include <memory>
#include <vector>
#include "modules/video_coding/codecs/vp9/include/vp9.h"
namespace webrtc {
struct LibvpxVp9EncoderTemplateAdapter {
static std::vector<SdpVideoFormat> SupportedFormats() {
return SupportedVP9Codecs(/*add_scalability_modes=*/true);
}
static std::unique_ptr<VideoEncoder> CreateEncoder(
const SdpVideoFormat& format) {
return VP9Encoder::Create(cricket::CreateVideoCodec(format));
}
static bool IsScalabilityModeSupported(ScalabilityMode scalability_mode) {
return VP9Encoder::SupportsScalabilityMode(scalability_mode);
}
};
} // namespace webrtc
#endif // API_VIDEO_CODECS_VIDEO_ENCODER_FACTORY_TEMPLATE_LIBVPX_VP9_ADAPTER_H_

View file

@ -0,0 +1,50 @@
/*
* Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_VIDEO_CODECS_VIDEO_ENCODER_FACTORY_TEMPLATE_OPEN_H264_ADAPTER_H_
#define API_VIDEO_CODECS_VIDEO_ENCODER_FACTORY_TEMPLATE_OPEN_H264_ADAPTER_H_
#include <memory>
#include <vector>
#include "modules/video_coding/codecs/h264/include/h264.h"
namespace webrtc {
// TODO(bugs.webrtc.org/13573): When OpenH264 is no longer a conditional build
// target remove #ifdefs.
struct OpenH264EncoderTemplateAdapter {
static std::vector<SdpVideoFormat> SupportedFormats() {
#if defined(WEBRTC_USE_H264)
return SupportedH264Codecs(/*add_scalability_modes=*/true);
#else
return {};
#endif
}
static std::unique_ptr<VideoEncoder> CreateEncoder(
const SdpVideoFormat& format) {
#if defined(WEBRTC_USE_H264)
return H264Encoder::Create(cricket::CreateVideoCodec(format));
#else
return nullptr;
#endif
}
static bool IsScalabilityModeSupported(ScalabilityMode scalability_mode) {
#if defined(WEBRTC_USE_H264)
return H264Encoder::SupportsScalabilityMode(scalability_mode);
#else
return false;
#endif
}
};
} // namespace webrtc
#endif // API_VIDEO_CODECS_VIDEO_ENCODER_FACTORY_TEMPLATE_OPEN_H264_ADAPTER_H_

View file

@ -0,0 +1,548 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/video_codecs/video_encoder_software_fallback_wrapper.h"
#include <stdint.h>
#include <cstdio>
#include <memory>
#include <string>
#include <vector>
#include "absl/strings/match.h"
#include "absl/types/optional.h"
#include "api/fec_controller_override.h"
#include "api/transport/field_trial_based_config.h"
#include "api/video/i420_buffer.h"
#include "api/video/video_bitrate_allocation.h"
#include "api/video/video_frame.h"
#include "api/video_codecs/video_codec.h"
#include "api/video_codecs/video_encoder.h"
#include "media/base/video_common.h"
#include "modules/video_coding/include/video_error_codes.h"
#include "modules/video_coding/include/video_error_codes_utils.h"
#include "modules/video_coding/utility/simulcast_utility.h"
#include "rtc_base/checks.h"
#include "rtc_base/experiments/field_trial_parser.h"
#include "rtc_base/logging.h"
#include "system_wrappers/include/field_trial.h"
namespace webrtc {
namespace {
// If forced fallback is allowed, either:
//
// 1) The forced fallback is requested if the resolution is less than or equal
// to `max_pixels_`. The resolution is allowed to be scaled down to
// `min_pixels_`.
//
// 2) The forced fallback is requested if temporal support is preferred and the
// SW fallback supports temporal layers while the HW encoder does not.
struct ForcedFallbackParams {
public:
bool SupportsResolutionBasedSwitch(const VideoCodec& codec) const {
if (!enable_resolution_based_switch ||
codec.width * codec.height > max_pixels) {
return false;
}
if (vp8_specific_resolution_switch &&
(codec.codecType != kVideoCodecVP8 ||
codec.numberOfSimulcastStreams > 1)) {
return false;
}
return true;
}
bool SupportsTemporalBasedSwitch(const VideoCodec& codec) const {
return enable_temporal_based_switch &&
SimulcastUtility::NumberOfTemporalLayers(codec, 0) != 1;
}
bool enable_temporal_based_switch = false;
bool enable_resolution_based_switch = false;
bool vp8_specific_resolution_switch = false;
int min_pixels = kDefaultMinPixelsPerFrame;
int max_pixels = 320 * 240;
};
const char kVp8ForceFallbackEncoderFieldTrial[] =
"WebRTC-VP8-Forced-Fallback-Encoder-v2";
absl::optional<ForcedFallbackParams> ParseFallbackParamsFromFieldTrials(
const VideoEncoder& main_encoder) {
// Ignore WebRTC-VP8-Forced-Fallback-Encoder-v2 if
// WebRTC-Video-EncoderFallbackSettings is present.
FieldTrialOptional<int> resolution_threshold_px("resolution_threshold_px");
ParseFieldTrial(
{&resolution_threshold_px},
FieldTrialBasedConfig().Lookup("WebRTC-Video-EncoderFallbackSettings"));
if (resolution_threshold_px) {
ForcedFallbackParams params;
params.enable_resolution_based_switch = true;
params.max_pixels = resolution_threshold_px.Value();
return params;
}
const std::string field_trial =
webrtc::field_trial::FindFullName(kVp8ForceFallbackEncoderFieldTrial);
if (!absl::StartsWith(field_trial, "Enabled")) {
return absl::nullopt;
}
int max_pixels_lower_bound =
main_encoder.GetEncoderInfo().scaling_settings.min_pixels_per_frame - 1;
ForcedFallbackParams params;
params.enable_resolution_based_switch = true;
int min_bps = 0;
if (sscanf(field_trial.c_str(), "Enabled-%d,%d,%d", &params.min_pixels,
&params.max_pixels, &min_bps) != 3) {
RTC_LOG(LS_WARNING)
<< "Invalid number of forced fallback parameters provided.";
return absl::nullopt;
} else if (params.min_pixels <= 0 ||
params.max_pixels < max_pixels_lower_bound ||
params.max_pixels < params.min_pixels || min_bps <= 0) {
RTC_LOG(LS_WARNING) << "Invalid forced fallback parameter value provided.";
return absl::nullopt;
}
params.vp8_specific_resolution_switch = true;
return params;
}
absl::optional<ForcedFallbackParams> GetForcedFallbackParams(
bool prefer_temporal_support,
const VideoEncoder& main_encoder) {
absl::optional<ForcedFallbackParams> params =
ParseFallbackParamsFromFieldTrials(main_encoder);
if (prefer_temporal_support) {
if (!params.has_value()) {
params.emplace();
}
params->enable_temporal_based_switch = true;
}
return params;
}
class VideoEncoderSoftwareFallbackWrapper final : public VideoEncoder {
public:
VideoEncoderSoftwareFallbackWrapper(
std::unique_ptr<webrtc::VideoEncoder> sw_encoder,
std::unique_ptr<webrtc::VideoEncoder> hw_encoder,
bool prefer_temporal_support);
~VideoEncoderSoftwareFallbackWrapper() override;
void SetFecControllerOverride(
FecControllerOverride* fec_controller_override) override;
int32_t InitEncode(const VideoCodec* codec_settings,
const VideoEncoder::Settings& settings) override;
int32_t RegisterEncodeCompleteCallback(
EncodedImageCallback* callback) override;
int32_t Release() override;
int32_t Encode(const VideoFrame& frame,
const std::vector<VideoFrameType>* frame_types) override;
void OnPacketLossRateUpdate(float packet_loss_rate) override;
void OnRttUpdate(int64_t rtt_ms) override;
void OnLossNotification(const LossNotification& loss_notification) override;
void SetRates(const RateControlParameters& parameters) override;
EncoderInfo GetEncoderInfo() const override;
private:
bool InitFallbackEncoder(bool is_forced);
bool TryInitForcedFallbackEncoder();
bool IsFallbackActive() const;
VideoEncoder* current_encoder() {
switch (encoder_state_) {
case EncoderState::kUninitialized:
RTC_LOG(LS_WARNING)
<< "Trying to access encoder in uninitialized fallback wrapper.";
// Return main encoder to preserve previous behavior.
[[fallthrough]];
case EncoderState::kMainEncoderUsed:
return encoder_.get();
case EncoderState::kFallbackDueToFailure:
case EncoderState::kForcedFallback:
return fallback_encoder_.get();
}
RTC_CHECK_NOTREACHED();
}
// Updates encoder with last observed parameters, such as callbacks, rates,
// etc.
void PrimeEncoder(VideoEncoder* encoder) const;
// Settings used in the last InitEncode call and used if a dynamic fallback to
// software is required.
VideoCodec codec_settings_;
absl::optional<VideoEncoder::Settings> encoder_settings_;
// The last rate control settings, if set.
absl::optional<RateControlParameters> rate_control_parameters_;
// The last channel parameters set.
absl::optional<float> packet_loss_;
absl::optional<int64_t> rtt_;
absl::optional<LossNotification> loss_notification_;
enum class EncoderState {
kUninitialized,
kMainEncoderUsed,
kFallbackDueToFailure,
kForcedFallback
};
EncoderState encoder_state_;
const std::unique_ptr<webrtc::VideoEncoder> encoder_;
const std::unique_ptr<webrtc::VideoEncoder> fallback_encoder_;
EncodedImageCallback* callback_;
const absl::optional<ForcedFallbackParams> fallback_params_;
int32_t EncodeWithMainEncoder(const VideoFrame& frame,
const std::vector<VideoFrameType>* frame_types);
};
VideoEncoderSoftwareFallbackWrapper::VideoEncoderSoftwareFallbackWrapper(
std::unique_ptr<webrtc::VideoEncoder> sw_encoder,
std::unique_ptr<webrtc::VideoEncoder> hw_encoder,
bool prefer_temporal_support)
: encoder_state_(EncoderState::kUninitialized),
encoder_(std::move(hw_encoder)),
fallback_encoder_(std::move(sw_encoder)),
callback_(nullptr),
fallback_params_(
GetForcedFallbackParams(prefer_temporal_support, *encoder_)) {
RTC_DCHECK(fallback_encoder_);
}
VideoEncoderSoftwareFallbackWrapper::~VideoEncoderSoftwareFallbackWrapper() =
default;
void VideoEncoderSoftwareFallbackWrapper::PrimeEncoder(
VideoEncoder* encoder) const {
RTC_DCHECK(encoder);
// Replay callback, rates, and channel parameters.
if (callback_) {
encoder->RegisterEncodeCompleteCallback(callback_);
}
if (rate_control_parameters_) {
encoder->SetRates(*rate_control_parameters_);
}
if (rtt_.has_value()) {
encoder->OnRttUpdate(rtt_.value());
}
if (packet_loss_.has_value()) {
encoder->OnPacketLossRateUpdate(packet_loss_.value());
}
if (loss_notification_.has_value()) {
encoder->OnLossNotification(loss_notification_.value());
}
}
bool VideoEncoderSoftwareFallbackWrapper::InitFallbackEncoder(bool is_forced) {
RTC_LOG(LS_WARNING) << "[VESFW] " << __func__
<< "(is_forced=" << (is_forced ? "true" : "false") << ")";
RTC_DCHECK(encoder_settings_.has_value());
const int ret = fallback_encoder_->InitEncode(&codec_settings_,
encoder_settings_.value());
if (ret != WEBRTC_VIDEO_CODEC_OK) {
RTC_LOG(LS_ERROR)
<< "[VESFW] software-encoder fallback initialization failed with"
<< " error code: " << WebRtcVideoCodecErrorToString(ret);
fallback_encoder_->Release();
return false;
}
if (encoder_state_ == EncoderState::kMainEncoderUsed) {
// Since we're switching to the fallback encoder, Release the real encoder.
// It may be re-initialized via InitEncode later, and it will continue to
// get Set calls for rates and channel parameters in the meantime.
encoder_->Release();
}
if (is_forced) {
encoder_state_ = EncoderState::kForcedFallback;
} else {
encoder_state_ = EncoderState::kFallbackDueToFailure;
}
return true;
}
void VideoEncoderSoftwareFallbackWrapper::SetFecControllerOverride(
FecControllerOverride* fec_controller_override) {
// It is important that only one of those would ever interact with the
// `fec_controller_override` at a given time. This is the responsibility
// of `this` to maintain.
encoder_->SetFecControllerOverride(fec_controller_override);
fallback_encoder_->SetFecControllerOverride(fec_controller_override);
}
int32_t VideoEncoderSoftwareFallbackWrapper::InitEncode(
const VideoCodec* codec_settings,
const VideoEncoder::Settings& settings) {
RTC_LOG(LS_INFO) << "[VESFW] " << __func__
<< "(codec=" << codec_settings->ToString()
<< ", settings={number_of_cores: "
<< settings.number_of_cores
<< ", max_payload_size: " << settings.max_payload_size
<< "})";
// Store settings, in case we need to dynamically switch to the fallback
// encoder after a failed Encode call.
codec_settings_ = *codec_settings;
encoder_settings_ = settings;
// Clear stored rate/channel parameters.
rate_control_parameters_ = absl::nullopt;
RTC_DCHECK_EQ(encoder_state_, EncoderState::kUninitialized)
<< "InitEncode() should never be called on an active instance!";
// Try to init forced software codec if it should be used.
if (TryInitForcedFallbackEncoder()) {
PrimeEncoder(current_encoder());
return WEBRTC_VIDEO_CODEC_OK;
}
int32_t ret = encoder_->InitEncode(codec_settings, settings);
if (ret == WEBRTC_VIDEO_CODEC_OK) {
encoder_state_ = EncoderState::kMainEncoderUsed;
PrimeEncoder(current_encoder());
return ret;
}
RTC_LOG(LS_WARNING) << "[VESFW] Hardware encoder initialization failed with"
<< " error code: " << WebRtcVideoCodecErrorToString(ret);
// Try to instantiate software codec.
if (InitFallbackEncoder(/*is_forced=*/false)) {
PrimeEncoder(current_encoder());
return WEBRTC_VIDEO_CODEC_OK;
}
// Software encoder failed too, use original return code.
RTC_LOG(LS_WARNING)
<< "[VESFW] Software fallback encoder initialization also failed.";
encoder_state_ = EncoderState::kUninitialized;
return ret;
}
int32_t VideoEncoderSoftwareFallbackWrapper::RegisterEncodeCompleteCallback(
EncodedImageCallback* callback) {
callback_ = callback;
return current_encoder()->RegisterEncodeCompleteCallback(callback);
}
int32_t VideoEncoderSoftwareFallbackWrapper::Release() {
if (encoder_state_ == EncoderState::kUninitialized) {
return WEBRTC_VIDEO_CODEC_OK;
}
int32_t ret = current_encoder()->Release();
encoder_state_ = EncoderState::kUninitialized;
return ret;
}
int32_t VideoEncoderSoftwareFallbackWrapper::Encode(
const VideoFrame& frame,
const std::vector<VideoFrameType>* frame_types) {
switch (encoder_state_) {
case EncoderState::kUninitialized:
return WEBRTC_VIDEO_CODEC_ERROR;
case EncoderState::kMainEncoderUsed: {
return EncodeWithMainEncoder(frame, frame_types);
}
case EncoderState::kFallbackDueToFailure:
case EncoderState::kForcedFallback:
return fallback_encoder_->Encode(frame, frame_types);
}
RTC_CHECK_NOTREACHED();
}
int32_t VideoEncoderSoftwareFallbackWrapper::EncodeWithMainEncoder(
const VideoFrame& frame,
const std::vector<VideoFrameType>* frame_types) {
int32_t ret = encoder_->Encode(frame, frame_types);
// If requested, try a software fallback.
bool fallback_requested = (ret == WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE);
if (fallback_requested && InitFallbackEncoder(/*is_forced=*/false)) {
// Start using the fallback with this frame.
PrimeEncoder(current_encoder());
if (frame.video_frame_buffer()->type() == VideoFrameBuffer::Type::kNative &&
fallback_encoder_->GetEncoderInfo().supports_native_handle) {
return fallback_encoder_->Encode(frame, frame_types);
} else {
RTC_LOG(LS_INFO) << "Fallback encoder does not support native handle - "
"converting frame to I420";
rtc::scoped_refptr<I420BufferInterface> src_buffer =
frame.video_frame_buffer()->ToI420();
if (!src_buffer) {
RTC_LOG(LS_ERROR) << "Failed to convert from to I420";
return WEBRTC_VIDEO_CODEC_ENCODER_FAILURE;
}
rtc::scoped_refptr<VideoFrameBuffer> dst_buffer =
src_buffer->Scale(codec_settings_.width, codec_settings_.height);
if (!dst_buffer) {
RTC_LOG(LS_ERROR) << "Failed to scale video frame.";
return WEBRTC_VIDEO_CODEC_ENCODER_FAILURE;
}
VideoFrame scaled_frame = frame;
scaled_frame.set_video_frame_buffer(dst_buffer);
scaled_frame.set_update_rect(VideoFrame::UpdateRect{
0, 0, scaled_frame.width(), scaled_frame.height()});
return fallback_encoder_->Encode(scaled_frame, frame_types);
}
}
// Fallback encoder failed too, return original error code.
return ret;
}
void VideoEncoderSoftwareFallbackWrapper::SetRates(
const RateControlParameters& parameters) {
rate_control_parameters_ = parameters;
return current_encoder()->SetRates(parameters);
}
void VideoEncoderSoftwareFallbackWrapper::OnPacketLossRateUpdate(
float packet_loss_rate) {
packet_loss_ = packet_loss_rate;
current_encoder()->OnPacketLossRateUpdate(packet_loss_rate);
}
void VideoEncoderSoftwareFallbackWrapper::OnRttUpdate(int64_t rtt_ms) {
rtt_ = rtt_ms;
current_encoder()->OnRttUpdate(rtt_ms);
}
void VideoEncoderSoftwareFallbackWrapper::OnLossNotification(
const LossNotification& loss_notification) {
loss_notification_ = loss_notification;
current_encoder()->OnLossNotification(loss_notification);
}
VideoEncoder::EncoderInfo VideoEncoderSoftwareFallbackWrapper::GetEncoderInfo()
const {
EncoderInfo fallback_encoder_info = fallback_encoder_->GetEncoderInfo();
EncoderInfo default_encoder_info = encoder_->GetEncoderInfo();
EncoderInfo info =
IsFallbackActive() ? fallback_encoder_info : default_encoder_info;
info.requested_resolution_alignment = cricket::LeastCommonMultiple(
fallback_encoder_info.requested_resolution_alignment,
default_encoder_info.requested_resolution_alignment);
info.apply_alignment_to_all_simulcast_layers =
fallback_encoder_info.apply_alignment_to_all_simulcast_layers ||
default_encoder_info.apply_alignment_to_all_simulcast_layers;
if (fallback_params_ && fallback_params_->vp8_specific_resolution_switch) {
info.scaling_settings.min_pixels_per_frame = fallback_params_->min_pixels;
}
return info;
}
bool VideoEncoderSoftwareFallbackWrapper::IsFallbackActive() const {
return encoder_state_ == EncoderState::kForcedFallback ||
encoder_state_ == EncoderState::kFallbackDueToFailure;
}
bool VideoEncoderSoftwareFallbackWrapper::TryInitForcedFallbackEncoder() {
if (!fallback_params_) {
return false;
}
RTC_DCHECK_EQ(encoder_state_, EncoderState::kUninitialized);
if (fallback_params_->SupportsResolutionBasedSwitch(codec_settings_)) {
// Settings valid, try to instantiate software codec.
RTC_LOG(LS_INFO) << "Request forced SW encoder fallback: "
<< codec_settings_.width << "x" << codec_settings_.height;
return InitFallbackEncoder(/*is_forced=*/true);
}
if (fallback_params_->SupportsTemporalBasedSwitch(codec_settings_)) {
// First init main encoder to see if that supports temporal layers.
if (encoder_->InitEncode(&codec_settings_, encoder_settings_.value()) ==
WEBRTC_VIDEO_CODEC_OK) {
encoder_state_ = EncoderState::kMainEncoderUsed;
}
if (encoder_state_ == EncoderState::kMainEncoderUsed &&
encoder_->GetEncoderInfo().fps_allocation[0].size() != 1) {
// Primary encoder already supports temporal layers, use that instead.
return true;
}
// Try to initialize fallback and check if it supports temporal layers.
if (fallback_encoder_->InitEncode(&codec_settings_,
encoder_settings_.value()) ==
WEBRTC_VIDEO_CODEC_OK) {
if (fallback_encoder_->GetEncoderInfo().fps_allocation[0].size() != 1) {
// Fallback encoder available and supports temporal layers, use it!
if (encoder_state_ == EncoderState::kMainEncoderUsed) {
// Main encoder initialized but does not support temporal layers,
// release it again.
encoder_->Release();
}
encoder_state_ = EncoderState::kForcedFallback;
RTC_LOG(LS_INFO)
<< "Forced switch to SW encoder due to temporal support.";
return true;
} else {
// Fallback encoder intialization succeeded, but it does not support
// temporal layers either - release it.
fallback_encoder_->Release();
}
}
if (encoder_state_ == EncoderState::kMainEncoderUsed) {
// Main encoder already initialized - make use of it.
RTC_LOG(LS_INFO)
<< "Cannot fall back for temporal support since fallback that "
"supports is not available. Using main encoder instead.";
return true;
}
}
// Neither forced fallback mode supported.
return false;
}
} // namespace
std::unique_ptr<VideoEncoder> CreateVideoEncoderSoftwareFallbackWrapper(
std::unique_ptr<VideoEncoder> sw_fallback_encoder,
std::unique_ptr<VideoEncoder> hw_encoder,
bool prefer_temporal_support) {
return std::make_unique<VideoEncoderSoftwareFallbackWrapper>(
std::move(sw_fallback_encoder), std::move(hw_encoder),
prefer_temporal_support);
}
} // namespace webrtc

View file

@ -0,0 +1,47 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_VIDEO_CODECS_VIDEO_ENCODER_SOFTWARE_FALLBACK_WRAPPER_H_
#define API_VIDEO_CODECS_VIDEO_ENCODER_SOFTWARE_FALLBACK_WRAPPER_H_
#include <memory>
#include <utility>
#include "api/video_codecs/video_encoder.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// Used to wrap external VideoEncoders to provide a fallback option on
// software encoding when a hardware encoder fails to encode a stream due to
// hardware restrictions, such as max resolution.
// |bool prefer_temporal_support| indicates that if the software fallback
// encoder supports temporal layers but the hardware encoder does not, a
// fallback should be forced even if the encoder otherwise works.
RTC_EXPORT std::unique_ptr<VideoEncoder>
CreateVideoEncoderSoftwareFallbackWrapper(
std::unique_ptr<VideoEncoder> sw_fallback_encoder,
std::unique_ptr<VideoEncoder> hw_encoder,
bool prefer_temporal_support);
// Default fallback for call-sites not yet updated with
// `prefer_temporal_support`.
// TODO(sprang): Remove when usage is gone.
RTC_EXPORT inline std::unique_ptr<VideoEncoder>
CreateVideoEncoderSoftwareFallbackWrapper(
std::unique_ptr<VideoEncoder> sw_fallback_encoder,
std::unique_ptr<VideoEncoder> hw_encoder) {
return CreateVideoEncoderSoftwareFallbackWrapper(
std::move(sw_fallback_encoder), std::move(hw_encoder), false);
}
} // namespace webrtc
#endif // API_VIDEO_CODECS_VIDEO_ENCODER_SOFTWARE_FALLBACK_WRAPPER_H_

View file

@ -0,0 +1,192 @@
/*
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_VIDEO_CODECS_VP8_FRAME_BUFFER_CONTROLLER_H_
#define API_VIDEO_CODECS_VP8_FRAME_BUFFER_CONTROLLER_H_
#include <array>
#include <memory>
#include <vector>
#include "absl/types/optional.h"
#include "api/fec_controller_override.h"
#include "api/video_codecs/video_codec.h"
#include "api/video_codecs/video_encoder.h"
#include "api/video_codecs/vp8_frame_config.h"
namespace webrtc {
// Some notes on the prerequisites of the TemporalLayers interface.
// * Vp8FrameBufferController is not thread safe, synchronization is the
// caller's responsibility.
// * The encoder is assumed to encode all frames in order, and callbacks to
// PopulateCodecSpecific() / OnEncodeDone() must happen in the same order.
//
// This means that in the case of pipelining encoders, it is OK to have a chain
// of calls such as this:
// - NextFrameConfig(timestampA)
// - NextFrameConfig(timestampB)
// - PopulateCodecSpecific(timestampA, ...)
// - NextFrameConfig(timestampC)
// - OnEncodeDone(timestampA, 1234, ...)
// - NextFrameConfig(timestampC)
// - OnEncodeDone(timestampB, 0, ...)
// - OnEncodeDone(timestampC, 1234, ...)
// Note that NextFrameConfig() for a new frame can happen before
// OnEncodeDone() for a previous one, but calls themselves must be both
// synchronized (e.g. run on a task queue) and in order (per type).
//
// TODO(eladalon): Revise comment (referring to PopulateCodecSpecific in this
// context is not very meaningful).
struct CodecSpecificInfo;
// Each member represents an override of the VPX configuration if the optional
// value is set.
struct Vp8EncoderConfig {
struct TemporalLayerConfig {
bool operator!=(const TemporalLayerConfig& other) const {
return ts_number_layers != other.ts_number_layers ||
ts_target_bitrate != other.ts_target_bitrate ||
ts_rate_decimator != other.ts_rate_decimator ||
ts_periodicity != other.ts_periodicity ||
ts_layer_id != other.ts_layer_id;
}
static constexpr size_t kMaxPeriodicity = 16;
static constexpr size_t kMaxLayers = 5;
// Number of active temporal layers. Set to 0 if not used.
uint32_t ts_number_layers;
// Arrays of length `ts_number_layers`, indicating (cumulative) target
// bitrate and rate decimator (e.g. 4 if every 4th frame is in the given
// layer) for each active temporal layer, starting with temporal id 0.
std::array<uint32_t, kMaxLayers> ts_target_bitrate;
std::array<uint32_t, kMaxLayers> ts_rate_decimator;
// The periodicity of the temporal pattern. Set to 0 if not used.
uint32_t ts_periodicity;
// Array of length `ts_periodicity` indicating the sequence of temporal id's
// to assign to incoming frames.
std::array<uint32_t, kMaxPeriodicity> ts_layer_id;
};
absl::optional<TemporalLayerConfig> temporal_layer_config;
// Target bitrate, in bps.
absl::optional<uint32_t> rc_target_bitrate;
// Clamp QP to max. Use 0 to disable clamping.
absl::optional<uint32_t> rc_max_quantizer;
// Error resilience mode.
absl::optional<uint32_t> g_error_resilient;
// If set to true, all previous configuration overrides should be reset.
bool reset_previous_configuration_overrides = false;
};
// This interface defines a way of delegating the logic of buffer management.
// Multiple streams may be controlled by a single controller, demuxing between
// them using stream_index.
class Vp8FrameBufferController {
public:
virtual ~Vp8FrameBufferController() = default;
// Set limits on QP.
// The limits are suggestion-only; the controller is allowed to exceed them.
virtual void SetQpLimits(size_t stream_index, int min_qp, int max_qp) = 0;
// Number of streamed controlled by `this`.
virtual size_t StreamCount() const = 0;
// If this method returns true, the encoder is free to drop frames for
// instance in an effort to uphold encoding bitrate.
// If this return false, the encoder must not drop any frames unless:
// 1. Requested to do so via Vp8FrameConfig.drop_frame
// 2. The frame to be encoded is requested to be a keyframe
// 3. The encoder detected a large overshoot and decided to drop and then
// re-encode the image at a low bitrate. In this case the encoder should
// call OnFrameDropped() once to indicate drop, and then call
// OnEncodeDone() again when the frame has actually been encoded.
virtual bool SupportsEncoderFrameDropping(size_t stream_index) const = 0;
// New target bitrate for a stream (each entry in
// `bitrates_bps` is for another temporal layer).
virtual void OnRatesUpdated(size_t stream_index,
const std::vector<uint32_t>& bitrates_bps,
int framerate_fps) = 0;
// Called by the encoder before encoding a frame. Returns a set of overrides
// the controller wishes to enact in the encoder's configuration.
// If a value is not overridden, previous overrides are still in effect.
// However, if `Vp8EncoderConfig::reset_previous_configuration_overrides`
// is set to `true`, all previous overrides are reset.
virtual Vp8EncoderConfig UpdateConfiguration(size_t stream_index) = 0;
// Returns the recommended VP8 encode flags needed.
// The timestamp may be used as both a time and a unique identifier, and so
// the caller must make sure no two frames use the same timestamp.
// The timestamp uses a 90kHz RTP clock.
// After calling this method, first call the actual encoder with the provided
// frame configuration, and then OnEncodeDone() below.
virtual Vp8FrameConfig NextFrameConfig(size_t stream_index,
uint32_t rtp_timestamp) = 0;
// Called after the encode step is done. `rtp_timestamp` must match the
// parameter use in the NextFrameConfig() call.
// `is_keyframe` must be true iff the encoder decided to encode this frame as
// a keyframe.
// If `info` is not null, the encoder may update `info` with codec specific
// data such as temporal id. `qp` should indicate the frame-level QP this
// frame was encoded at. If the encoder does not support extracting this, `qp`
// should be set to 0.
virtual void OnEncodeDone(size_t stream_index,
uint32_t rtp_timestamp,
size_t size_bytes,
bool is_keyframe,
int qp,
CodecSpecificInfo* info) = 0;
// Called when a frame is dropped by the encoder.
virtual void OnFrameDropped(size_t stream_index, uint32_t rtp_timestamp) = 0;
// Called by the encoder when the packet loss rate changes.
// `packet_loss_rate` runs between 0.0 (no loss) and 1.0 (everything lost).
virtual void OnPacketLossRateUpdate(float packet_loss_rate) = 0;
// Called by the encoder when the round trip time changes.
virtual void OnRttUpdate(int64_t rtt_ms) = 0;
// Called when a loss notification is received.
virtual void OnLossNotification(
const VideoEncoder::LossNotification& loss_notification) = 0;
};
// Interface for a factory of Vp8FrameBufferController instances.
class Vp8FrameBufferControllerFactory {
public:
virtual ~Vp8FrameBufferControllerFactory() = default;
// Clones oneself. (Avoids Vp8FrameBufferControllerFactoryFactory.)
virtual std::unique_ptr<Vp8FrameBufferControllerFactory> Clone() const = 0;
// Create a Vp8FrameBufferController instance.
virtual std::unique_ptr<Vp8FrameBufferController> Create(
const VideoCodec& codec,
const VideoEncoder::Settings& settings,
FecControllerOverride* fec_controller_override) = 0;
};
} // namespace webrtc
#endif // API_VIDEO_CODECS_VP8_FRAME_BUFFER_CONTROLLER_H_

View file

@ -0,0 +1,78 @@
/*
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/video_codecs/vp8_frame_config.h"
#include "modules/video_coding/codecs/interface/common_constants.h"
#include "rtc_base/checks.h"
namespace webrtc {
Vp8FrameConfig::Vp8FrameConfig() : Vp8FrameConfig(kNone, kNone, kNone, false) {}
Vp8FrameConfig::Vp8FrameConfig(BufferFlags last,
BufferFlags golden,
BufferFlags arf)
: Vp8FrameConfig(last, golden, arf, false) {}
Vp8FrameConfig::Vp8FrameConfig(BufferFlags last,
BufferFlags golden,
BufferFlags arf,
FreezeEntropy)
: Vp8FrameConfig(last, golden, arf, true) {}
Vp8FrameConfig::Vp8FrameConfig(BufferFlags last,
BufferFlags golden,
BufferFlags arf,
bool freeze_entropy)
: drop_frame(last == BufferFlags::kNone && golden == BufferFlags::kNone &&
arf == BufferFlags::kNone),
last_buffer_flags(last),
golden_buffer_flags(golden),
arf_buffer_flags(arf),
encoder_layer_id(0),
packetizer_temporal_idx(kNoTemporalIdx),
layer_sync(false),
freeze_entropy(freeze_entropy),
first_reference(Vp8BufferReference::kNone),
second_reference(Vp8BufferReference::kNone),
retransmission_allowed(true) {}
bool Vp8FrameConfig::References(Buffer buffer) const {
switch (buffer) {
case Buffer::kLast:
return (last_buffer_flags & kReference) != 0;
case Buffer::kGolden:
return (golden_buffer_flags & kReference) != 0;
case Buffer::kArf:
return (arf_buffer_flags & kReference) != 0;
case Buffer::kCount:
break;
}
RTC_DCHECK_NOTREACHED();
return false;
}
bool Vp8FrameConfig::Updates(Buffer buffer) const {
switch (buffer) {
case Buffer::kLast:
return (last_buffer_flags & kUpdate) != 0;
case Buffer::kGolden:
return (golden_buffer_flags & kUpdate) != 0;
case Buffer::kArf:
return (arf_buffer_flags & kUpdate) != 0;
case Buffer::kCount:
break;
}
RTC_DCHECK_NOTREACHED();
return false;
}
} // namespace webrtc

View file

@ -0,0 +1,110 @@
/*
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_VIDEO_CODECS_VP8_FRAME_CONFIG_H_
#define API_VIDEO_CODECS_VP8_FRAME_CONFIG_H_
#include <stdint.h>
namespace webrtc {
// Configuration of a VP8 frame - which buffers are to be referenced
// by it, which buffers should be updated, etc.
struct Vp8FrameConfig {
static Vp8FrameConfig GetIntraFrameConfig() {
Vp8FrameConfig frame_config = Vp8FrameConfig(
BufferFlags::kUpdate, BufferFlags::kUpdate, BufferFlags::kUpdate);
frame_config.packetizer_temporal_idx = 0;
return frame_config;
}
enum BufferFlags : int {
kNone = 0,
kReference = 1,
kUpdate = 2,
kReferenceAndUpdate = kReference | kUpdate,
};
enum FreezeEntropy { kFreezeEntropy };
// Defined bit-maskable reference to the three buffers available in VP8.
enum class Vp8BufferReference : uint8_t {
kNone = 0,
kLast = 1,
kGolden = 2,
kAltref = 4
};
Vp8FrameConfig();
Vp8FrameConfig(BufferFlags last, BufferFlags golden, BufferFlags arf);
Vp8FrameConfig(BufferFlags last,
BufferFlags golden,
BufferFlags arf,
FreezeEntropy);
enum class Buffer : int { kLast = 0, kGolden = 1, kArf = 2, kCount };
bool References(Buffer buffer) const;
bool Updates(Buffer buffer) const;
bool IntraFrame() const {
// Intra frames do not reference any buffers, and update all buffers.
return last_buffer_flags == kUpdate && golden_buffer_flags == kUpdate &&
arf_buffer_flags == kUpdate;
}
bool drop_frame;
BufferFlags last_buffer_flags;
BufferFlags golden_buffer_flags;
BufferFlags arf_buffer_flags;
// The encoder layer ID is used to utilize the correct bitrate allocator
// inside the encoder. It does not control references nor determine which
// "actual" temporal layer this is. The packetizer temporal index determines
// which layer the encoded frame should be packetized into.
// Normally these are the same, but current temporal-layer strategies for
// screenshare use one bitrate allocator for all layers, but attempt to
// packetize / utilize references to split a stream into multiple layers,
// with different quantizer settings, to hit target bitrate.
// TODO(sprang): Screenshare layers are being reconsidered at the time of
// writing, we might be able to remove this distinction, and have a temporal
// layer imply both (the normal case).
int encoder_layer_id;
// TODO(eladalon/sprang): Move out of this class.
int packetizer_temporal_idx;
// TODO(eladalon/sprang): Move out of this class.
bool layer_sync;
bool freeze_entropy;
// Indicates in which order the encoder should search the reference buffers
// when doing motion prediction. Set to kNone to use unspecified order. Any
// buffer indicated here must not have the corresponding no_ref bit set.
// If all three buffers can be reference, the one not listed here should be
// searched last.
Vp8BufferReference first_reference;
Vp8BufferReference second_reference;
// Whether this frame is eligible for retransmission.
bool retransmission_allowed;
private:
Vp8FrameConfig(BufferFlags last,
BufferFlags golden,
BufferFlags arf,
bool freeze_entropy);
};
} // namespace webrtc
#endif // API_VIDEO_CODECS_VP8_FRAME_CONFIG_H_

View file

@ -0,0 +1,108 @@
/*
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/video_codecs/vp8_temporal_layers.h"
#include <utility>
#include "absl/algorithm/container.h"
#include "rtc_base/checks.h"
namespace webrtc {
Vp8TemporalLayers::Vp8TemporalLayers(
std::vector<std::unique_ptr<Vp8FrameBufferController>>&& controllers,
FecControllerOverride* fec_controller_override)
: controllers_(std::move(controllers)) {
RTC_DCHECK(!controllers_.empty());
RTC_DCHECK(absl::c_none_of(
controllers_,
[](const std::unique_ptr<Vp8FrameBufferController>& controller) {
return controller.get() == nullptr;
}));
if (fec_controller_override) {
fec_controller_override->SetFecAllowed(true);
}
}
void Vp8TemporalLayers::SetQpLimits(size_t stream_index,
int min_qp,
int max_qp) {
RTC_DCHECK_LT(stream_index, controllers_.size());
return controllers_[stream_index]->SetQpLimits(0, min_qp, max_qp);
}
size_t Vp8TemporalLayers::StreamCount() const {
return controllers_.size();
}
bool Vp8TemporalLayers::SupportsEncoderFrameDropping(
size_t stream_index) const {
RTC_DCHECK_LT(stream_index, controllers_.size());
return controllers_[stream_index]->SupportsEncoderFrameDropping(0);
}
void Vp8TemporalLayers::OnRatesUpdated(
size_t stream_index,
const std::vector<uint32_t>& bitrates_bps,
int framerate_fps) {
RTC_DCHECK_LT(stream_index, controllers_.size());
return controllers_[stream_index]->OnRatesUpdated(0, bitrates_bps,
framerate_fps);
}
Vp8EncoderConfig Vp8TemporalLayers::UpdateConfiguration(size_t stream_index) {
RTC_DCHECK_LT(stream_index, controllers_.size());
return controllers_[stream_index]->UpdateConfiguration(0);
}
Vp8FrameConfig Vp8TemporalLayers::NextFrameConfig(size_t stream_index,
uint32_t rtp_timestamp) {
RTC_DCHECK_LT(stream_index, controllers_.size());
return controllers_[stream_index]->NextFrameConfig(0, rtp_timestamp);
}
void Vp8TemporalLayers::OnEncodeDone(size_t stream_index,
uint32_t rtp_timestamp,
size_t size_bytes,
bool is_keyframe,
int qp,
CodecSpecificInfo* info) {
RTC_DCHECK_LT(stream_index, controllers_.size());
return controllers_[stream_index]->OnEncodeDone(0, rtp_timestamp, size_bytes,
is_keyframe, qp, info);
}
void Vp8TemporalLayers::OnFrameDropped(size_t stream_index,
uint32_t rtp_timestamp) {
RTC_DCHECK_LT(stream_index, controllers_.size());
controllers_[stream_index]->OnFrameDropped(stream_index, rtp_timestamp);
}
void Vp8TemporalLayers::OnPacketLossRateUpdate(float packet_loss_rate) {
for (auto& controller : controllers_) {
controller->OnPacketLossRateUpdate(packet_loss_rate);
}
}
void Vp8TemporalLayers::OnRttUpdate(int64_t rtt_ms) {
for (auto& controller : controllers_) {
controller->OnRttUpdate(rtt_ms);
}
}
void Vp8TemporalLayers::OnLossNotification(
const VideoEncoder::LossNotification& loss_notification) {
for (auto& controller : controllers_) {
controller->OnLossNotification(loss_notification);
}
}
} // namespace webrtc

View file

@ -0,0 +1,77 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_VIDEO_CODECS_VP8_TEMPORAL_LAYERS_H_
#define API_VIDEO_CODECS_VP8_TEMPORAL_LAYERS_H_
#include <memory>
#include <vector>
#include "api/fec_controller_override.h"
#include "api/video_codecs/video_codec.h"
#include "api/video_codecs/vp8_frame_buffer_controller.h"
#include "api/video_codecs/vp8_frame_config.h"
namespace webrtc {
// Two different flavors of temporal layers are currently available:
// kFixedPattern uses a fixed repeating pattern of 1-4 layers.
// kBitrateDynamic can allocate frames dynamically to 1 or 2 layers, based on
// the bitrate produced.
// TODO(eladalon): Remove this enum.
enum class Vp8TemporalLayersType { kFixedPattern, kBitrateDynamic };
// This interface defines a way of getting the encoder settings needed to
// realize a temporal layer structure.
class Vp8TemporalLayers final : public Vp8FrameBufferController {
public:
Vp8TemporalLayers(
std::vector<std::unique_ptr<Vp8FrameBufferController>>&& controllers,
FecControllerOverride* fec_controller_override);
~Vp8TemporalLayers() override = default;
void SetQpLimits(size_t stream_index, int min_qp, int max_qp) override;
size_t StreamCount() const override;
bool SupportsEncoderFrameDropping(size_t stream_index) const override;
void OnRatesUpdated(size_t stream_index,
const std::vector<uint32_t>& bitrates_bps,
int framerate_fps) override;
Vp8EncoderConfig UpdateConfiguration(size_t stream_index) override;
Vp8FrameConfig NextFrameConfig(size_t stream_index,
uint32_t rtp_timestamp) override;
void OnEncodeDone(size_t stream_index,
uint32_t rtp_timestamp,
size_t size_bytes,
bool is_keyframe,
int qp,
CodecSpecificInfo* info) override;
void OnFrameDropped(size_t stream_index, uint32_t rtp_timestamp) override;
void OnPacketLossRateUpdate(float packet_loss_rate) override;
void OnRttUpdate(int64_t rtt_ms) override;
void OnLossNotification(
const VideoEncoder::LossNotification& loss_notification) override;
private:
std::vector<std::unique_ptr<Vp8FrameBufferController>> controllers_;
};
} // namespace webrtc
#endif // API_VIDEO_CODECS_VP8_TEMPORAL_LAYERS_H_

View file

@ -0,0 +1,59 @@
/*
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/video_codecs/vp8_temporal_layers_factory.h"
#include <algorithm>
#include <memory>
#include <utility>
#include <vector>
#include "api/fec_controller_override.h"
#include "modules/video_coding/codecs/vp8/default_temporal_layers.h"
#include "modules/video_coding/codecs/vp8/screenshare_layers.h"
#include "modules/video_coding/utility/simulcast_utility.h"
#include "rtc_base/checks.h"
namespace webrtc {
std::unique_ptr<Vp8FrameBufferController> Vp8TemporalLayersFactory::Create(
const VideoCodec& codec,
const VideoEncoder::Settings& settings,
FecControllerOverride* fec_controller_override) {
std::vector<std::unique_ptr<Vp8FrameBufferController>> controllers;
const int num_streams = SimulcastUtility::NumberOfSimulcastStreams(codec);
RTC_DCHECK_GE(num_streams, 1);
controllers.reserve(num_streams);
for (int i = 0; i < num_streams; ++i) {
int num_temporal_layers =
SimulcastUtility::NumberOfTemporalLayers(codec, i);
RTC_DCHECK_GE(num_temporal_layers, 1);
if (SimulcastUtility::IsConferenceModeScreenshare(codec) && i == 0) {
// Legacy screenshare layers supports max 2 layers.
num_temporal_layers = std::max(2, num_temporal_layers);
controllers.push_back(
std::make_unique<ScreenshareLayers>(num_temporal_layers));
} else {
controllers.push_back(
std::make_unique<DefaultTemporalLayers>(num_temporal_layers));
}
}
return std::make_unique<Vp8TemporalLayers>(std::move(controllers),
fec_controller_override);
}
std::unique_ptr<Vp8FrameBufferControllerFactory>
Vp8TemporalLayersFactory::Clone() const {
return std::make_unique<Vp8TemporalLayersFactory>();
}
} // namespace webrtc

View file

@ -0,0 +1,34 @@
/*
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_VIDEO_CODECS_VP8_TEMPORAL_LAYERS_FACTORY_H_
#define API_VIDEO_CODECS_VP8_TEMPORAL_LAYERS_FACTORY_H_
#include <memory>
#include "api/video_codecs/vp8_temporal_layers.h"
namespace webrtc {
class Vp8TemporalLayersFactory : public Vp8FrameBufferControllerFactory {
public:
~Vp8TemporalLayersFactory() override = default;
std::unique_ptr<Vp8FrameBufferControllerFactory> Clone() const override;
std::unique_ptr<Vp8FrameBufferController> Create(
const VideoCodec& codec,
const VideoEncoder::Settings& settings,
FecControllerOverride* fec_controller_override) override;
};
} // namespace webrtc
#endif // API_VIDEO_CODECS_VP8_TEMPORAL_LAYERS_FACTORY_H_

View file

@ -0,0 +1,73 @@
/*
* Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/video_codecs/vp9_profile.h"
#include <map>
#include <utility>
#include "rtc_base/string_to_number.h"
namespace webrtc {
// Profile information for VP9 video.
const char kVP9FmtpProfileId[] = "profile-id";
std::string VP9ProfileToString(VP9Profile profile) {
switch (profile) {
case VP9Profile::kProfile0:
return "0";
case VP9Profile::kProfile1:
return "1";
case VP9Profile::kProfile2:
return "2";
case VP9Profile::kProfile3:
return "3";
}
return "0";
}
absl::optional<VP9Profile> StringToVP9Profile(const std::string& str) {
const absl::optional<int> i = rtc::StringToNumber<int>(str);
if (!i.has_value())
return absl::nullopt;
switch (i.value()) {
case 0:
return VP9Profile::kProfile0;
case 1:
return VP9Profile::kProfile1;
case 2:
return VP9Profile::kProfile2;
case 3:
return VP9Profile::kProfile3;
default:
return absl::nullopt;
}
}
absl::optional<VP9Profile> ParseSdpForVP9Profile(
const CodecParameterMap& params) {
const auto profile_it = params.find(kVP9FmtpProfileId);
if (profile_it == params.end())
return VP9Profile::kProfile0;
const std::string& profile_str = profile_it->second;
return StringToVP9Profile(profile_str);
}
bool VP9IsSameProfile(const CodecParameterMap& params1,
const CodecParameterMap& params2) {
const absl::optional<VP9Profile> profile = ParseSdpForVP9Profile(params1);
const absl::optional<VP9Profile> other_profile =
ParseSdpForVP9Profile(params2);
return profile && other_profile && profile == other_profile;
}
} // namespace webrtc

View file

@ -0,0 +1,54 @@
/*
* Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_VIDEO_CODECS_VP9_PROFILE_H_
#define API_VIDEO_CODECS_VP9_PROFILE_H_
#include <string>
#include "absl/types/optional.h"
#include "api/video_codecs/sdp_video_format.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// Profile information for VP9 video.
extern RTC_EXPORT const char kVP9FmtpProfileId[];
enum class VP9Profile {
kProfile0,
kProfile1,
kProfile2,
kProfile3,
};
// Helper functions to convert VP9Profile to std::string. Returns "0" by
// default.
RTC_EXPORT std::string VP9ProfileToString(VP9Profile profile);
// Helper functions to convert std::string to VP9Profile. Returns null if given
// an invalid profile string.
absl::optional<VP9Profile> StringToVP9Profile(const std::string& str);
// Parse profile that is represented as a string of single digit contained in an
// SDP key-value map. A default profile(kProfile0) will be returned if the
// profile key is missing. Nothing will be returned if the key is present but
// the string is invalid.
RTC_EXPORT absl::optional<VP9Profile> ParseSdpForVP9Profile(
const CodecParameterMap& params);
// Returns true if the parameters have the same VP9 profile, or neither contains
// VP9 profile.
bool VP9IsSameProfile(const CodecParameterMap& params1,
const CodecParameterMap& params2);
} // namespace webrtc
#endif // API_VIDEO_CODECS_VP9_PROFILE_H_