Repo created

This commit is contained in:
Fr4nz D13trich 2025-11-22 14:04:28 +01:00
parent 81b91f4139
commit f8c34fa5ee
22732 changed files with 4815320 additions and 2 deletions

View file

@ -0,0 +1,147 @@
# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
import("../../webrtc.gni")
if (is_android) {
import("//build/config/android/config.gni")
import("//build/config/android/rules.gni")
}
rtc_library("audio_codecs_api") {
visibility = [ "*" ]
sources = [
"audio_codec_pair_id.cc",
"audio_codec_pair_id.h",
"audio_decoder.cc",
"audio_decoder.h",
"audio_decoder_factory.h",
"audio_decoder_factory_template.h",
"audio_encoder.cc",
"audio_encoder.h",
"audio_encoder_factory.h",
"audio_encoder_factory_template.h",
"audio_format.cc",
"audio_format.h",
]
deps = [
"..:array_view",
"..:bitrate_allocation",
"..:make_ref_counted",
"..:ref_count",
"..:scoped_refptr",
"../../api:field_trials_view",
"../../api:rtp_parameters",
"../../rtc_base:buffer",
"../../rtc_base:checks",
"../../rtc_base:event_tracer",
"../../rtc_base:refcount",
"../../rtc_base:sanitizer",
"../../rtc_base/system:rtc_export",
"../units:data_rate",
"../units:time_delta",
]
absl_deps = [
"//third_party/abseil-cpp/absl/base:core_headers",
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
]
}
rtc_library("builtin_audio_decoder_factory") {
visibility = [ "*" ]
allow_poison = [ "audio_codecs" ]
sources = [
"builtin_audio_decoder_factory.cc",
"builtin_audio_decoder_factory.h",
]
deps = [
":audio_codecs_api",
"..:scoped_refptr",
"L16:audio_decoder_L16",
"g711:audio_decoder_g711",
"g722:audio_decoder_g722",
]
defines = []
if (rtc_include_ilbc) {
deps += [ "ilbc:audio_decoder_ilbc" ]
defines += [ "WEBRTC_USE_BUILTIN_ILBC=1" ]
} else {
defines += [ "WEBRTC_USE_BUILTIN_ILBC=0" ]
}
if (rtc_include_opus) {
deps += [
"opus:audio_decoder_multiopus",
"opus:audio_decoder_opus",
]
defines += [ "WEBRTC_USE_BUILTIN_OPUS=1" ]
} else {
defines += [ "WEBRTC_USE_BUILTIN_OPUS=0" ]
}
}
rtc_library("builtin_audio_encoder_factory") {
visibility = [ "*" ]
allow_poison = [ "audio_codecs" ]
sources = [
"builtin_audio_encoder_factory.cc",
"builtin_audio_encoder_factory.h",
]
deps = [
":audio_codecs_api",
"..:scoped_refptr",
"L16:audio_encoder_L16",
"g711:audio_encoder_g711",
"g722:audio_encoder_g722",
]
defines = []
if (rtc_include_ilbc) {
deps += [ "ilbc:audio_encoder_ilbc" ]
defines += [ "WEBRTC_USE_BUILTIN_ILBC=1" ]
} else {
defines += [ "WEBRTC_USE_BUILTIN_ILBC=0" ]
}
if (rtc_include_opus) {
deps += [
"opus:audio_encoder_multiopus",
"opus:audio_encoder_opus",
]
defines += [ "WEBRTC_USE_BUILTIN_OPUS=1" ]
} else {
defines += [ "WEBRTC_USE_BUILTIN_OPUS=0" ]
}
}
rtc_library("opus_audio_decoder_factory") {
visibility = [ "*" ]
allow_poison = [ "audio_codecs" ]
sources = [
"opus_audio_decoder_factory.cc",
"opus_audio_decoder_factory.h",
]
deps = [
":audio_codecs_api",
"..:scoped_refptr",
"opus:audio_decoder_multiopus",
"opus:audio_decoder_opus",
]
}
rtc_library("opus_audio_encoder_factory") {
visibility = [ "*" ]
allow_poison = [ "audio_codecs" ]
sources = [
"opus_audio_encoder_factory.cc",
"opus_audio_encoder_factory.h",
]
deps = [
":audio_codecs_api",
"..:scoped_refptr",
"opus:audio_encoder_multiopus",
"opus:audio_encoder_opus",
]
}

View file

@ -0,0 +1,55 @@
# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
import("../../../webrtc.gni")
if (is_android) {
import("//build/config/android/config.gni")
import("//build/config/android/rules.gni")
}
rtc_library("audio_encoder_L16") {
visibility = [ "*" ]
poisonous = [ "audio_codecs" ]
sources = [
"audio_encoder_L16.cc",
"audio_encoder_L16.h",
]
deps = [
"..:audio_codecs_api",
"../../../api:field_trials_view",
"../../../modules/audio_coding:pcm16b",
"../../../rtc_base:safe_conversions",
"../../../rtc_base:safe_minmax",
"../../../rtc_base:stringutils",
"../../../rtc_base/system:rtc_export",
]
absl_deps = [
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
]
}
rtc_library("audio_decoder_L16") {
visibility = [ "*" ]
poisonous = [ "audio_codecs" ]
sources = [
"audio_decoder_L16.cc",
"audio_decoder_L16.h",
]
deps = [
"..:audio_codecs_api",
"../../../api:field_trials_view",
"../../../modules/audio_coding:pcm16b",
"../../../rtc_base:safe_conversions",
"../../../rtc_base/system:rtc_export",
]
absl_deps = [
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
]
}

View file

@ -0,0 +1,49 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio_codecs/L16/audio_decoder_L16.h"
#include <memory>
#include "absl/strings/match.h"
#include "modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.h"
#include "modules/audio_coding/codecs/pcm16b/pcm16b_common.h"
#include "rtc_base/numerics/safe_conversions.h"
namespace webrtc {
absl::optional<AudioDecoderL16::Config> AudioDecoderL16::SdpToConfig(
const SdpAudioFormat& format) {
Config config;
config.sample_rate_hz = format.clockrate_hz;
config.num_channels = rtc::checked_cast<int>(format.num_channels);
if (absl::EqualsIgnoreCase(format.name, "L16") && config.IsOk()) {
return config;
}
return absl::nullopt;
}
void AudioDecoderL16::AppendSupportedDecoders(
std::vector<AudioCodecSpec>* specs) {
Pcm16BAppendSupportedCodecSpecs(specs);
}
std::unique_ptr<AudioDecoder> AudioDecoderL16::MakeAudioDecoder(
const Config& config,
absl::optional<AudioCodecPairId> /*codec_pair_id*/,
const FieldTrialsView* field_trials) {
if (!config.IsOk()) {
return nullptr;
}
return std::make_unique<AudioDecoderPcm16B>(config.sample_rate_hz,
config.num_channels);
}
} // namespace webrtc

View file

@ -0,0 +1,49 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_L16_AUDIO_DECODER_L16_H_
#define API_AUDIO_CODECS_L16_AUDIO_DECODER_L16_H_
#include <memory>
#include <vector>
#include "absl/types/optional.h"
#include "api/audio_codecs/audio_codec_pair_id.h"
#include "api/audio_codecs/audio_decoder.h"
#include "api/audio_codecs/audio_format.h"
#include "api/field_trials_view.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// L16 decoder API for use as a template parameter to
// CreateAudioDecoderFactory<...>().
struct RTC_EXPORT AudioDecoderL16 {
struct Config {
bool IsOk() const {
return (sample_rate_hz == 8000 || sample_rate_hz == 16000 ||
sample_rate_hz == 32000 || sample_rate_hz == 48000) &&
(num_channels >= 1 &&
num_channels <= AudioDecoder::kMaxNumberOfChannels);
}
int sample_rate_hz = 8000;
int num_channels = 1;
};
static absl::optional<Config> SdpToConfig(const SdpAudioFormat& audio_format);
static void AppendSupportedDecoders(std::vector<AudioCodecSpec>* specs);
static std::unique_ptr<AudioDecoder> MakeAudioDecoder(
const Config& config,
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt,
const FieldTrialsView* field_trials = nullptr);
};
} // namespace webrtc
#endif // API_AUDIO_CODECS_L16_AUDIO_DECODER_L16_H_

View file

@ -0,0 +1,76 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio_codecs/L16/audio_encoder_L16.h"
#include <memory>
#include "absl/strings/match.h"
#include "modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.h"
#include "modules/audio_coding/codecs/pcm16b/pcm16b_common.h"
#include "rtc_base/numerics/safe_conversions.h"
#include "rtc_base/numerics/safe_minmax.h"
#include "rtc_base/string_to_number.h"
namespace webrtc {
absl::optional<AudioEncoderL16::Config> AudioEncoderL16::SdpToConfig(
const SdpAudioFormat& format) {
if (!rtc::IsValueInRangeForNumericType<int>(format.num_channels)) {
RTC_DCHECK_NOTREACHED();
return absl::nullopt;
}
Config config;
config.sample_rate_hz = format.clockrate_hz;
config.num_channels = rtc::dchecked_cast<int>(format.num_channels);
auto ptime_iter = format.parameters.find("ptime");
if (ptime_iter != format.parameters.end()) {
const auto ptime = rtc::StringToNumber<int>(ptime_iter->second);
if (ptime && *ptime > 0) {
config.frame_size_ms = rtc::SafeClamp(10 * (*ptime / 10), 10, 60);
}
}
if (absl::EqualsIgnoreCase(format.name, "L16") && config.IsOk()) {
return config;
}
return absl::nullopt;
}
void AudioEncoderL16::AppendSupportedEncoders(
std::vector<AudioCodecSpec>* specs) {
Pcm16BAppendSupportedCodecSpecs(specs);
}
AudioCodecInfo AudioEncoderL16::QueryAudioEncoder(
const AudioEncoderL16::Config& config) {
RTC_DCHECK(config.IsOk());
return {config.sample_rate_hz,
rtc::dchecked_cast<size_t>(config.num_channels),
config.sample_rate_hz * config.num_channels * 16};
}
std::unique_ptr<AudioEncoder> AudioEncoderL16::MakeAudioEncoder(
const AudioEncoderL16::Config& config,
int payload_type,
absl::optional<AudioCodecPairId> /*codec_pair_id*/,
const FieldTrialsView* field_trials) {
AudioEncoderPcm16B::Config c;
c.sample_rate_hz = config.sample_rate_hz;
c.num_channels = config.num_channels;
c.frame_size_ms = config.frame_size_ms;
c.payload_type = payload_type;
if (!config.IsOk()) {
RTC_DCHECK_NOTREACHED();
return nullptr;
}
return std::make_unique<AudioEncoderPcm16B>(c);
}
} // namespace webrtc

View file

@ -0,0 +1,54 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_L16_AUDIO_ENCODER_L16_H_
#define API_AUDIO_CODECS_L16_AUDIO_ENCODER_L16_H_
#include <memory>
#include <vector>
#include "absl/types/optional.h"
#include "api/audio_codecs/audio_codec_pair_id.h"
#include "api/audio_codecs/audio_encoder.h"
#include "api/audio_codecs/audio_format.h"
#include "api/field_trials_view.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// L16 encoder API for use as a template parameter to
// CreateAudioEncoderFactory<...>().
struct RTC_EXPORT AudioEncoderL16 {
struct Config {
bool IsOk() const {
return (sample_rate_hz == 8000 || sample_rate_hz == 16000 ||
sample_rate_hz == 32000 || sample_rate_hz == 48000) &&
num_channels >= 1 &&
num_channels <= AudioEncoder::kMaxNumberOfChannels &&
frame_size_ms > 0 && frame_size_ms <= 120 &&
frame_size_ms % 10 == 0;
}
int sample_rate_hz = 8000;
int num_channels = 1;
int frame_size_ms = 10;
};
static absl::optional<Config> SdpToConfig(const SdpAudioFormat& audio_format);
static void AppendSupportedEncoders(std::vector<AudioCodecSpec>* specs);
static AudioCodecInfo QueryAudioEncoder(const Config& config);
static std::unique_ptr<AudioEncoder> MakeAudioEncoder(
const Config& config,
int payload_type,
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt,
const FieldTrialsView* field_trials = nullptr);
};
} // namespace webrtc
#endif // API_AUDIO_CODECS_L16_AUDIO_ENCODER_L16_H_

View file

@ -0,0 +1,3 @@
alessiob@webrtc.org
henrik.lundin@webrtc.org
jakobi@webrtc.org

View file

@ -0,0 +1,91 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio_codecs/audio_codec_pair_id.h"
#include <atomic>
#include <cstdint>
#include "rtc_base/checks.h"
namespace webrtc {
namespace {
// Returns a new value that it has never returned before. You may call it at
// most 2^63 times in the lifetime of the program. Note: The returned values
// may be easily predictable.
uint64_t GetNextId() {
static std::atomic<uint64_t> next_id(0);
// Atomically increment `next_id`, and return the previous value. Relaxed
// memory order is sufficient, since all we care about is that different
// callers return different values.
const uint64_t new_id = next_id.fetch_add(1, std::memory_order_relaxed);
// This check isn't atomic with the increment, so if we start 2^63 + 1
// invocations of GetNextId() in parallel, the last one to do the atomic
// increment could return the ID 0 before any of the others had time to
// trigger this DCHECK. We blithely assume that this won't happen.
RTC_DCHECK_LT(new_id, uint64_t{1} << 63) << "Used up all ID values";
return new_id;
}
// Make an integer ID more unpredictable. This is a 1:1 mapping, so you can
// feed it any value, but the idea is that you can feed it a sequence such as
// 0, 1, 2, ... and get a new sequence that isn't as trivially predictable, so
// that users won't rely on it being consecutive or increasing or anything like
// that.
constexpr uint64_t ObfuscateId(uint64_t id) {
// Any nonzero coefficient that's relatively prime to 2^64 (that is, any odd
// number) and any constant will give a 1:1 mapping. These high-entropy
// values will prevent the sequence from being trivially predictable.
//
// Both the multiplication and the addition going to overflow almost always,
// but that's fine---we *want* arithmetic mod 2^64.
return uint64_t{0x85fdb20e1294309a} + uint64_t{0xc516ef5c37462469} * id;
}
// The first ten values. Verified against the Python function
//
// def f(n):
// return (0x85fdb20e1294309a + 0xc516ef5c37462469 * n) % 2**64
//
// Callers should obviously not depend on these exact values...
//
// (On Visual C++, we have to disable warning C4307 (integral constant
// overflow), even though unsigned integers have perfectly well-defined
// overflow behavior.)
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4307)
#endif
static_assert(ObfuscateId(0) == uint64_t{0x85fdb20e1294309a}, "");
static_assert(ObfuscateId(1) == uint64_t{0x4b14a16a49da5503}, "");
static_assert(ObfuscateId(2) == uint64_t{0x102b90c68120796c}, "");
static_assert(ObfuscateId(3) == uint64_t{0xd5428022b8669dd5}, "");
static_assert(ObfuscateId(4) == uint64_t{0x9a596f7eefacc23e}, "");
static_assert(ObfuscateId(5) == uint64_t{0x5f705edb26f2e6a7}, "");
static_assert(ObfuscateId(6) == uint64_t{0x24874e375e390b10}, "");
static_assert(ObfuscateId(7) == uint64_t{0xe99e3d93957f2f79}, "");
static_assert(ObfuscateId(8) == uint64_t{0xaeb52cefccc553e2}, "");
static_assert(ObfuscateId(9) == uint64_t{0x73cc1c4c040b784b}, "");
#ifdef _MSC_VER
#pragma warning(pop)
#endif
} // namespace
AudioCodecPairId AudioCodecPairId::Create() {
return AudioCodecPairId(ObfuscateId(GetNextId()));
}
} // namespace webrtc

View file

@ -0,0 +1,74 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_AUDIO_CODEC_PAIR_ID_H_
#define API_AUDIO_CODECS_AUDIO_CODEC_PAIR_ID_H_
#include <stdint.h>
#include <utility>
namespace webrtc {
class AudioCodecPairId final {
public:
// Copyable, but not default constructible.
AudioCodecPairId() = delete;
AudioCodecPairId(const AudioCodecPairId&) = default;
AudioCodecPairId(AudioCodecPairId&&) = default;
AudioCodecPairId& operator=(const AudioCodecPairId&) = default;
AudioCodecPairId& operator=(AudioCodecPairId&&) = default;
friend void swap(AudioCodecPairId& a, AudioCodecPairId& b) {
using std::swap;
swap(a.id_, b.id_);
}
// Creates a new ID, unequal to any previously created ID.
static AudioCodecPairId Create();
// IDs can be tested for equality.
friend bool operator==(AudioCodecPairId a, AudioCodecPairId b) {
return a.id_ == b.id_;
}
friend bool operator!=(AudioCodecPairId a, AudioCodecPairId b) {
return a.id_ != b.id_;
}
// Comparisons. The ordering of ID values is completely arbitrary, but
// stable, so it's useful e.g. if you want to use IDs as keys in an ordered
// map.
friend bool operator<(AudioCodecPairId a, AudioCodecPairId b) {
return a.id_ < b.id_;
}
friend bool operator<=(AudioCodecPairId a, AudioCodecPairId b) {
return a.id_ <= b.id_;
}
friend bool operator>=(AudioCodecPairId a, AudioCodecPairId b) {
return a.id_ >= b.id_;
}
friend bool operator>(AudioCodecPairId a, AudioCodecPairId b) {
return a.id_ > b.id_;
}
// Returns a numeric representation of the ID. The numeric values are
// completely arbitrary, but stable, collision-free, and reasonably evenly
// distributed, so they are e.g. useful as hash values in unordered maps.
uint64_t NumericRepresentation() const { return id_; }
private:
explicit AudioCodecPairId(uint64_t id) : id_(id) {}
uint64_t id_;
};
} // namespace webrtc
#endif // API_AUDIO_CODECS_AUDIO_CODEC_PAIR_ID_H_

View file

@ -0,0 +1,169 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio_codecs/audio_decoder.h"
#include <memory>
#include <utility>
#include "api/array_view.h"
#include "rtc_base/checks.h"
#include "rtc_base/sanitizer.h"
#include "rtc_base/trace_event.h"
namespace webrtc {
namespace {
class OldStyleEncodedFrame final : public AudioDecoder::EncodedAudioFrame {
public:
OldStyleEncodedFrame(AudioDecoder* decoder, rtc::Buffer&& payload)
: decoder_(decoder), payload_(std::move(payload)) {}
size_t Duration() const override {
const int ret = decoder_->PacketDuration(payload_.data(), payload_.size());
return ret < 0 ? 0 : static_cast<size_t>(ret);
}
absl::optional<DecodeResult> Decode(
rtc::ArrayView<int16_t> decoded) const override {
auto speech_type = AudioDecoder::kSpeech;
const int ret = decoder_->Decode(
payload_.data(), payload_.size(), decoder_->SampleRateHz(),
decoded.size() * sizeof(int16_t), decoded.data(), &speech_type);
return ret < 0 ? absl::nullopt
: absl::optional<DecodeResult>(
{static_cast<size_t>(ret), speech_type});
}
private:
AudioDecoder* const decoder_;
const rtc::Buffer payload_;
};
} // namespace
bool AudioDecoder::EncodedAudioFrame::IsDtxPacket() const {
return false;
}
AudioDecoder::ParseResult::ParseResult() = default;
AudioDecoder::ParseResult::ParseResult(ParseResult&& b) = default;
AudioDecoder::ParseResult::ParseResult(uint32_t timestamp,
int priority,
std::unique_ptr<EncodedAudioFrame> frame)
: timestamp(timestamp), priority(priority), frame(std::move(frame)) {
RTC_DCHECK_GE(priority, 0);
}
AudioDecoder::ParseResult::~ParseResult() = default;
AudioDecoder::ParseResult& AudioDecoder::ParseResult::operator=(
ParseResult&& b) = default;
std::vector<AudioDecoder::ParseResult> AudioDecoder::ParsePayload(
rtc::Buffer&& payload,
uint32_t timestamp) {
std::vector<ParseResult> results;
std::unique_ptr<EncodedAudioFrame> frame(
new OldStyleEncodedFrame(this, std::move(payload)));
results.emplace_back(timestamp, 0, std::move(frame));
return results;
}
int AudioDecoder::Decode(const uint8_t* encoded,
size_t encoded_len,
int sample_rate_hz,
size_t max_decoded_bytes,
int16_t* decoded,
SpeechType* speech_type) {
TRACE_EVENT0("webrtc", "AudioDecoder::Decode");
rtc::MsanCheckInitialized(rtc::MakeArrayView(encoded, encoded_len));
int duration = PacketDuration(encoded, encoded_len);
if (duration >= 0 &&
duration * Channels() * sizeof(int16_t) > max_decoded_bytes) {
return -1;
}
return DecodeInternal(encoded, encoded_len, sample_rate_hz, decoded,
speech_type);
}
int AudioDecoder::DecodeRedundant(const uint8_t* encoded,
size_t encoded_len,
int sample_rate_hz,
size_t max_decoded_bytes,
int16_t* decoded,
SpeechType* speech_type) {
TRACE_EVENT0("webrtc", "AudioDecoder::DecodeRedundant");
rtc::MsanCheckInitialized(rtc::MakeArrayView(encoded, encoded_len));
int duration = PacketDurationRedundant(encoded, encoded_len);
if (duration >= 0 &&
duration * Channels() * sizeof(int16_t) > max_decoded_bytes) {
return -1;
}
return DecodeRedundantInternal(encoded, encoded_len, sample_rate_hz, decoded,
speech_type);
}
int AudioDecoder::DecodeRedundantInternal(const uint8_t* encoded,
size_t encoded_len,
int sample_rate_hz,
int16_t* decoded,
SpeechType* speech_type) {
return DecodeInternal(encoded, encoded_len, sample_rate_hz, decoded,
speech_type);
}
bool AudioDecoder::HasDecodePlc() const {
return false;
}
size_t AudioDecoder::DecodePlc(size_t num_frames, int16_t* decoded) {
return 0;
}
// TODO(bugs.webrtc.org/9676): Remove default implementation.
void AudioDecoder::GeneratePlc(size_t /*requested_samples_per_channel*/,
rtc::BufferT<int16_t>* /*concealment_audio*/) {}
int AudioDecoder::ErrorCode() {
return 0;
}
int AudioDecoder::PacketDuration(const uint8_t* encoded,
size_t encoded_len) const {
return kNotImplemented;
}
int AudioDecoder::PacketDurationRedundant(const uint8_t* encoded,
size_t encoded_len) const {
return kNotImplemented;
}
bool AudioDecoder::PacketHasFec(const uint8_t* encoded,
size_t encoded_len) const {
return false;
}
AudioDecoder::SpeechType AudioDecoder::ConvertSpeechType(int16_t type) {
switch (type) {
case 0: // TODO(hlundin): Both iSAC and Opus return 0 for speech.
case 1:
return kSpeech;
case 2:
return kComfortNoise;
default:
RTC_DCHECK_NOTREACHED();
return kSpeech;
}
}
constexpr int AudioDecoder::kMaxNumberOfChannels;
} // namespace webrtc

View file

@ -0,0 +1,195 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_AUDIO_DECODER_H_
#define API_AUDIO_CODECS_AUDIO_DECODER_H_
#include <stddef.h>
#include <stdint.h>
#include <memory>
#include <vector>
#include "absl/types/optional.h"
#include "api/array_view.h"
#include "rtc_base/buffer.h"
namespace webrtc {
class AudioDecoder {
public:
enum SpeechType {
kSpeech = 1,
kComfortNoise = 2,
};
// Used by PacketDuration below. Save the value -1 for errors.
enum { kNotImplemented = -2 };
AudioDecoder() = default;
virtual ~AudioDecoder() = default;
AudioDecoder(const AudioDecoder&) = delete;
AudioDecoder& operator=(const AudioDecoder&) = delete;
class EncodedAudioFrame {
public:
struct DecodeResult {
size_t num_decoded_samples;
SpeechType speech_type;
};
virtual ~EncodedAudioFrame() = default;
// Returns the duration in samples-per-channel of this audio frame.
// If no duration can be ascertained, returns zero.
virtual size_t Duration() const = 0;
// Returns true if this packet contains DTX.
virtual bool IsDtxPacket() const;
// Decodes this frame of audio and writes the result in `decoded`.
// `decoded` must be large enough to store as many samples as indicated by a
// call to Duration() . On success, returns an absl::optional containing the
// total number of samples across all channels, as well as whether the
// decoder produced comfort noise or speech. On failure, returns an empty
// absl::optional. Decode may be called at most once per frame object.
virtual absl::optional<DecodeResult> Decode(
rtc::ArrayView<int16_t> decoded) const = 0;
};
struct ParseResult {
ParseResult();
ParseResult(uint32_t timestamp,
int priority,
std::unique_ptr<EncodedAudioFrame> frame);
ParseResult(ParseResult&& b);
~ParseResult();
ParseResult& operator=(ParseResult&& b);
// The timestamp of the frame is in samples per channel.
uint32_t timestamp;
// The relative priority of the frame compared to other frames of the same
// payload and the same timeframe. A higher value means a lower priority.
// The highest priority is zero - negative values are not allowed.
int priority;
std::unique_ptr<EncodedAudioFrame> frame;
};
// Let the decoder parse this payload and prepare zero or more decodable
// frames. Each frame must be between 10 ms and 120 ms long. The caller must
// ensure that the AudioDecoder object outlives any frame objects returned by
// this call. The decoder is free to swap or move the data from the `payload`
// buffer. `timestamp` is the input timestamp, in samples, corresponding to
// the start of the payload.
virtual std::vector<ParseResult> ParsePayload(rtc::Buffer&& payload,
uint32_t timestamp);
// TODO(bugs.webrtc.org/10098): The Decode and DecodeRedundant methods are
// obsolete; callers should call ParsePayload instead. For now, subclasses
// must still implement DecodeInternal.
// Decodes `encode_len` bytes from `encoded` and writes the result in
// `decoded`. The maximum bytes allowed to be written into `decoded` is
// `max_decoded_bytes`. Returns the total number of samples across all
// channels. If the decoder produced comfort noise, `speech_type`
// is set to kComfortNoise, otherwise it is kSpeech. The desired output
// sample rate is provided in `sample_rate_hz`, which must be valid for the
// codec at hand.
int Decode(const uint8_t* encoded,
size_t encoded_len,
int sample_rate_hz,
size_t max_decoded_bytes,
int16_t* decoded,
SpeechType* speech_type);
// Same as Decode(), but interfaces to the decoders redundant decode function.
// The default implementation simply calls the regular Decode() method.
int DecodeRedundant(const uint8_t* encoded,
size_t encoded_len,
int sample_rate_hz,
size_t max_decoded_bytes,
int16_t* decoded,
SpeechType* speech_type);
// Indicates if the decoder implements the DecodePlc method.
virtual bool HasDecodePlc() const;
// Calls the packet-loss concealment of the decoder to update the state after
// one or several lost packets. The caller has to make sure that the
// memory allocated in `decoded` should accommodate `num_frames` frames.
virtual size_t DecodePlc(size_t num_frames, int16_t* decoded);
// Asks the decoder to generate packet-loss concealment and append it to the
// end of `concealment_audio`. The concealment audio should be in
// channel-interleaved format, with as many channels as the last decoded
// packet produced. The implementation must produce at least
// requested_samples_per_channel, or nothing at all. This is a signal to the
// caller to conceal the loss with other means. If the implementation provides
// concealment samples, it is also responsible for "stitching" it together
// with the decoded audio on either side of the concealment.
// Note: The default implementation of GeneratePlc will be deleted soon. All
// implementations must provide their own, which can be a simple as a no-op.
// TODO(bugs.webrtc.org/9676): Remove default implementation.
virtual void GeneratePlc(size_t requested_samples_per_channel,
rtc::BufferT<int16_t>* concealment_audio);
// Resets the decoder state (empty buffers etc.).
virtual void Reset() = 0;
// Returns the last error code from the decoder.
virtual int ErrorCode();
// Returns the duration in samples-per-channel of the payload in `encoded`
// which is `encoded_len` bytes long. Returns kNotImplemented if no duration
// estimate is available, or -1 in case of an error.
virtual int PacketDuration(const uint8_t* encoded, size_t encoded_len) const;
// Returns the duration in samples-per-channel of the redandant payload in
// `encoded` which is `encoded_len` bytes long. Returns kNotImplemented if no
// duration estimate is available, or -1 in case of an error.
virtual int PacketDurationRedundant(const uint8_t* encoded,
size_t encoded_len) const;
// Detects whether a packet has forward error correction. The packet is
// comprised of the samples in `encoded` which is `encoded_len` bytes long.
// Returns true if the packet has FEC and false otherwise.
virtual bool PacketHasFec(const uint8_t* encoded, size_t encoded_len) const;
// Returns the actual sample rate of the decoder's output. This value may not
// change during the lifetime of the decoder.
virtual int SampleRateHz() const = 0;
// The number of channels in the decoder's output. This value may not change
// during the lifetime of the decoder.
virtual size_t Channels() const = 0;
// The maximum number of audio channels supported by WebRTC decoders.
static constexpr int kMaxNumberOfChannels = 24;
protected:
static SpeechType ConvertSpeechType(int16_t type);
virtual int DecodeInternal(const uint8_t* encoded,
size_t encoded_len,
int sample_rate_hz,
int16_t* decoded,
SpeechType* speech_type) = 0;
virtual int DecodeRedundantInternal(const uint8_t* encoded,
size_t encoded_len,
int sample_rate_hz,
int16_t* decoded,
SpeechType* speech_type);
};
} // namespace webrtc
#endif // API_AUDIO_CODECS_AUDIO_DECODER_H_

View file

@ -0,0 +1,53 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_AUDIO_DECODER_FACTORY_H_
#define API_AUDIO_CODECS_AUDIO_DECODER_FACTORY_H_
#include <memory>
#include <vector>
#include "absl/types/optional.h"
#include "api/audio_codecs/audio_codec_pair_id.h"
#include "api/audio_codecs/audio_decoder.h"
#include "api/audio_codecs/audio_format.h"
#include "api/ref_count.h"
namespace webrtc {
// A factory that creates AudioDecoders.
class AudioDecoderFactory : public RefCountInterface {
public:
virtual std::vector<AudioCodecSpec> GetSupportedDecoders() = 0;
virtual bool IsSupportedDecoder(const SdpAudioFormat& format) = 0;
// Create a new decoder instance. The `codec_pair_id` argument is used to link
// encoders and decoders that talk to the same remote entity: if a
// AudioEncoderFactory::MakeAudioEncoder() and a
// AudioDecoderFactory::MakeAudioDecoder() call receive non-null IDs that
// compare equal, the factory implementations may assume that the encoder and
// decoder form a pair. (The intended use case for this is to set up
// communication between the AudioEncoder and AudioDecoder instances, which is
// needed for some codecs with built-in bandwidth adaptation.)
//
// Returns null if the format isn't supported.
//
// Note: Implementations need to be robust against combinations other than
// one encoder, one decoder getting the same ID; such decoders must still
// work.
virtual std::unique_ptr<AudioDecoder> MakeAudioDecoder(
const SdpAudioFormat& format,
absl::optional<AudioCodecPairId> codec_pair_id) = 0;
};
} // namespace webrtc
#endif // API_AUDIO_CODECS_AUDIO_DECODER_FACTORY_H_

View file

@ -0,0 +1,145 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_AUDIO_DECODER_FACTORY_TEMPLATE_H_
#define API_AUDIO_CODECS_AUDIO_DECODER_FACTORY_TEMPLATE_H_
#include <memory>
#include <vector>
#include "api/audio_codecs/audio_decoder_factory.h"
#include "api/field_trials_view.h"
#include "api/make_ref_counted.h"
#include "api/scoped_refptr.h"
namespace webrtc {
namespace audio_decoder_factory_template_impl {
template <typename... Ts>
struct Helper;
// Base case: 0 template parameters.
template <>
struct Helper<> {
static void AppendSupportedDecoders(std::vector<AudioCodecSpec>* specs) {}
static bool IsSupportedDecoder(const SdpAudioFormat& format) { return false; }
static std::unique_ptr<AudioDecoder> MakeAudioDecoder(
const SdpAudioFormat& format,
absl::optional<AudioCodecPairId> codec_pair_id,
const FieldTrialsView* field_trials) {
return nullptr;
}
};
// Inductive case: Called with n + 1 template parameters; calls subroutines
// with n template parameters.
template <typename T, typename... Ts>
struct Helper<T, Ts...> {
static void AppendSupportedDecoders(std::vector<AudioCodecSpec>* specs) {
T::AppendSupportedDecoders(specs);
Helper<Ts...>::AppendSupportedDecoders(specs);
}
static bool IsSupportedDecoder(const SdpAudioFormat& format) {
auto opt_config = T::SdpToConfig(format);
static_assert(std::is_same<decltype(opt_config),
absl::optional<typename T::Config>>::value,
"T::SdpToConfig() must return a value of type "
"absl::optional<T::Config>");
return opt_config ? true : Helper<Ts...>::IsSupportedDecoder(format);
}
static std::unique_ptr<AudioDecoder> MakeAudioDecoder(
const SdpAudioFormat& format,
absl::optional<AudioCodecPairId> codec_pair_id,
const FieldTrialsView* field_trials) {
auto opt_config = T::SdpToConfig(format);
return opt_config ? T::MakeAudioDecoder(*opt_config, codec_pair_id)
: Helper<Ts...>::MakeAudioDecoder(format, codec_pair_id,
field_trials);
}
};
template <typename... Ts>
class AudioDecoderFactoryT : public AudioDecoderFactory {
public:
explicit AudioDecoderFactoryT(const FieldTrialsView* field_trials) {
field_trials_ = field_trials;
}
std::vector<AudioCodecSpec> GetSupportedDecoders() override {
std::vector<AudioCodecSpec> specs;
Helper<Ts...>::AppendSupportedDecoders(&specs);
return specs;
}
bool IsSupportedDecoder(const SdpAudioFormat& format) override {
return Helper<Ts...>::IsSupportedDecoder(format);
}
std::unique_ptr<AudioDecoder> MakeAudioDecoder(
const SdpAudioFormat& format,
absl::optional<AudioCodecPairId> codec_pair_id) override {
return Helper<Ts...>::MakeAudioDecoder(format, codec_pair_id,
field_trials_);
}
const FieldTrialsView* field_trials_;
};
} // namespace audio_decoder_factory_template_impl
// Make an AudioDecoderFactory that can create instances of the given decoders.
//
// Each decoder type is given as a template argument to the function; it should
// be a struct with the following static member functions:
//
// // Converts `audio_format` to a ConfigType instance. Returns an empty
// // optional if `audio_format` doesn't correctly specify a decoder of our
// // type.
// absl::optional<ConfigType> SdpToConfig(const SdpAudioFormat& audio_format);
//
// // Appends zero or more AudioCodecSpecs to the list that will be returned
// // by AudioDecoderFactory::GetSupportedDecoders().
// void AppendSupportedDecoders(std::vector<AudioCodecSpec>* specs);
//
// // Creates an AudioDecoder for the specified format. Used to implement
// // AudioDecoderFactory::MakeAudioDecoder().
// std::unique_ptr<AudioDecoder> MakeAudioDecoder(
// const ConfigType& config,
// absl::optional<AudioCodecPairId> codec_pair_id);
//
// ConfigType should be a type that encapsulates all the settings needed to
// create an AudioDecoder. T::Config (where T is the decoder struct) should
// either be the config type, or an alias for it.
//
// Whenever it tries to do something, the new factory will try each of the
// decoder types in the order they were specified in the template argument
// list, stopping at the first one that claims to be able to do the job.
//
// TODO(kwiberg): Point at CreateBuiltinAudioDecoderFactory() for an example of
// how it is used.
template <typename... Ts>
rtc::scoped_refptr<AudioDecoderFactory> CreateAudioDecoderFactory(
const FieldTrialsView* field_trials = nullptr) {
// There's no technical reason we couldn't allow zero template parameters,
// but such a factory couldn't create any decoders, and callers can do this
// by mistake by simply forgetting the <> altogether. So we forbid it in
// order to prevent caller foot-shooting.
static_assert(sizeof...(Ts) >= 1,
"Caller must give at least one template parameter");
return rtc::make_ref_counted<
audio_decoder_factory_template_impl::AudioDecoderFactoryT<Ts...>>(
field_trials);
}
} // namespace webrtc
#endif // API_AUDIO_CODECS_AUDIO_DECODER_FACTORY_TEMPLATE_H_

View file

@ -0,0 +1,114 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio_codecs/audio_encoder.h"
#include "rtc_base/checks.h"
#include "rtc_base/trace_event.h"
namespace webrtc {
ANAStats::ANAStats() = default;
ANAStats::~ANAStats() = default;
ANAStats::ANAStats(const ANAStats&) = default;
AudioEncoder::EncodedInfo::EncodedInfo() = default;
AudioEncoder::EncodedInfo::EncodedInfo(const EncodedInfo&) = default;
AudioEncoder::EncodedInfo::EncodedInfo(EncodedInfo&&) = default;
AudioEncoder::EncodedInfo::~EncodedInfo() = default;
AudioEncoder::EncodedInfo& AudioEncoder::EncodedInfo::operator=(
const EncodedInfo&) = default;
AudioEncoder::EncodedInfo& AudioEncoder::EncodedInfo::operator=(EncodedInfo&&) =
default;
int AudioEncoder::RtpTimestampRateHz() const {
return SampleRateHz();
}
AudioEncoder::EncodedInfo AudioEncoder::Encode(
uint32_t rtp_timestamp,
rtc::ArrayView<const int16_t> audio,
rtc::Buffer* encoded) {
TRACE_EVENT0("webrtc", "AudioEncoder::Encode");
RTC_CHECK_EQ(audio.size(),
static_cast<size_t>(NumChannels() * SampleRateHz() / 100));
const size_t old_size = encoded->size();
EncodedInfo info = EncodeImpl(rtp_timestamp, audio, encoded);
RTC_CHECK_EQ(encoded->size() - old_size, info.encoded_bytes);
return info;
}
bool AudioEncoder::SetFec(bool enable) {
return !enable;
}
bool AudioEncoder::SetDtx(bool enable) {
return !enable;
}
bool AudioEncoder::GetDtx() const {
return false;
}
bool AudioEncoder::SetApplication(Application application) {
return false;
}
void AudioEncoder::SetMaxPlaybackRate(int frequency_hz) {}
void AudioEncoder::SetTargetBitrate(int target_bps) {}
rtc::ArrayView<std::unique_ptr<AudioEncoder>>
AudioEncoder::ReclaimContainedEncoders() {
return nullptr;
}
bool AudioEncoder::EnableAudioNetworkAdaptor(const std::string& config_string,
RtcEventLog* event_log) {
return false;
}
void AudioEncoder::DisableAudioNetworkAdaptor() {}
void AudioEncoder::OnReceivedUplinkPacketLossFraction(
float uplink_packet_loss_fraction) {}
void AudioEncoder::OnReceivedUplinkRecoverablePacketLossFraction(
float uplink_recoverable_packet_loss_fraction) {
RTC_DCHECK_NOTREACHED();
}
void AudioEncoder::OnReceivedTargetAudioBitrate(int target_audio_bitrate_bps) {
OnReceivedUplinkBandwidth(target_audio_bitrate_bps, absl::nullopt);
}
void AudioEncoder::OnReceivedUplinkBandwidth(
int target_audio_bitrate_bps,
absl::optional<int64_t> bwe_period_ms) {}
void AudioEncoder::OnReceivedUplinkAllocation(BitrateAllocationUpdate update) {
OnReceivedUplinkBandwidth(update.target_bitrate.bps(),
update.bwe_period.ms());
}
void AudioEncoder::OnReceivedRtt(int rtt_ms) {}
void AudioEncoder::OnReceivedOverhead(size_t overhead_bytes_per_packet) {}
void AudioEncoder::SetReceiverFrameLengthRange(int min_frame_length_ms,
int max_frame_length_ms) {}
ANAStats AudioEncoder::GetANAStats() const {
return ANAStats();
}
constexpr int AudioEncoder::kMaxNumberOfChannels;
} // namespace webrtc

View file

@ -0,0 +1,269 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_AUDIO_ENCODER_H_
#define API_AUDIO_CODECS_AUDIO_ENCODER_H_
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/attributes.h"
#include "absl/types/optional.h"
#include "api/array_view.h"
#include "api/call/bitrate_allocation.h"
#include "api/units/data_rate.h"
#include "api/units/time_delta.h"
#include "rtc_base/buffer.h"
namespace webrtc {
class RtcEventLog;
// Statistics related to Audio Network Adaptation.
struct ANAStats {
ANAStats();
ANAStats(const ANAStats&);
~ANAStats();
// Number of actions taken by the ANA bitrate controller since the start of
// the call. If this value is not set, it indicates that the bitrate
// controller is disabled.
absl::optional<uint32_t> bitrate_action_counter;
// Number of actions taken by the ANA channel controller since the start of
// the call. If this value is not set, it indicates that the channel
// controller is disabled.
absl::optional<uint32_t> channel_action_counter;
// Number of actions taken by the ANA DTX controller since the start of the
// call. If this value is not set, it indicates that the DTX controller is
// disabled.
absl::optional<uint32_t> dtx_action_counter;
// Number of actions taken by the ANA FEC controller since the start of the
// call. If this value is not set, it indicates that the FEC controller is
// disabled.
absl::optional<uint32_t> fec_action_counter;
// Number of times the ANA frame length controller decided to increase the
// frame length since the start of the call. If this value is not set, it
// indicates that the frame length controller is disabled.
absl::optional<uint32_t> frame_length_increase_counter;
// Number of times the ANA frame length controller decided to decrease the
// frame length since the start of the call. If this value is not set, it
// indicates that the frame length controller is disabled.
absl::optional<uint32_t> frame_length_decrease_counter;
// The uplink packet loss fractions as set by the ANA FEC controller. If this
// value is not set, it indicates that the ANA FEC controller is not active.
absl::optional<float> uplink_packet_loss_fraction;
};
// This is the interface class for encoders in AudioCoding module. Each codec
// type must have an implementation of this class.
class AudioEncoder {
public:
// Used for UMA logging of codec usage. The same codecs, with the
// same values, must be listed in
// src/tools/metrics/histograms/histograms.xml in chromium to log
// correct values.
enum class CodecType {
kOther = 0, // Codec not specified, and/or not listed in this enum
kOpus = 1,
kIsac = 2,
kPcmA = 3,
kPcmU = 4,
kG722 = 5,
kIlbc = 6,
// Number of histogram bins in the UMA logging of codec types. The
// total number of different codecs that are logged cannot exceed this
// number.
kMaxLoggedAudioCodecTypes
};
struct EncodedInfoLeaf {
size_t encoded_bytes = 0;
uint32_t encoded_timestamp = 0;
int payload_type = 0;
bool send_even_if_empty = false;
bool speech = true;
CodecType encoder_type = CodecType::kOther;
};
// This is the main struct for auxiliary encoding information. Each encoded
// packet should be accompanied by one EncodedInfo struct, containing the
// total number of `encoded_bytes`, the `encoded_timestamp` and the
// `payload_type`. If the packet contains redundant encodings, the `redundant`
// vector will be populated with EncodedInfoLeaf structs. Each struct in the
// vector represents one encoding; the order of structs in the vector is the
// same as the order in which the actual payloads are written to the byte
// stream. When EncoderInfoLeaf structs are present in the vector, the main
// struct's `encoded_bytes` will be the sum of all the `encoded_bytes` in the
// vector.
struct EncodedInfo : public EncodedInfoLeaf {
EncodedInfo();
EncodedInfo(const EncodedInfo&);
EncodedInfo(EncodedInfo&&);
~EncodedInfo();
EncodedInfo& operator=(const EncodedInfo&);
EncodedInfo& operator=(EncodedInfo&&);
std::vector<EncodedInfoLeaf> redundant;
};
virtual ~AudioEncoder() = default;
// Returns the input sample rate in Hz and the number of input channels.
// These are constants set at instantiation time.
virtual int SampleRateHz() const = 0;
virtual size_t NumChannels() const = 0;
// Returns the rate at which the RTP timestamps are updated. The default
// implementation returns SampleRateHz().
virtual int RtpTimestampRateHz() const;
// Returns the number of 10 ms frames the encoder will put in the next
// packet. This value may only change when Encode() outputs a packet; i.e.,
// the encoder may vary the number of 10 ms frames from packet to packet, but
// it must decide the length of the next packet no later than when outputting
// the preceding packet.
virtual size_t Num10MsFramesInNextPacket() const = 0;
// Returns the maximum value that can be returned by
// Num10MsFramesInNextPacket().
virtual size_t Max10MsFramesInAPacket() const = 0;
// Returns the current target bitrate in bits/s. The value -1 means that the
// codec adapts the target automatically, and a current target cannot be
// provided.
virtual int GetTargetBitrate() const = 0;
// Accepts one 10 ms block of input audio (i.e., SampleRateHz() / 100 *
// NumChannels() samples). Multi-channel audio must be sample-interleaved.
// The encoder appends zero or more bytes of output to `encoded` and returns
// additional encoding information. Encode() checks some preconditions, calls
// EncodeImpl() which does the actual work, and then checks some
// postconditions.
EncodedInfo Encode(uint32_t rtp_timestamp,
rtc::ArrayView<const int16_t> audio,
rtc::Buffer* encoded);
// Resets the encoder to its starting state, discarding any input that has
// been fed to the encoder but not yet emitted in a packet.
virtual void Reset() = 0;
// Enables or disables codec-internal FEC (forward error correction). Returns
// true if the codec was able to comply. The default implementation returns
// true when asked to disable FEC and false when asked to enable it (meaning
// that FEC isn't supported).
virtual bool SetFec(bool enable);
// Enables or disables codec-internal VAD/DTX. Returns true if the codec was
// able to comply. The default implementation returns true when asked to
// disable DTX and false when asked to enable it (meaning that DTX isn't
// supported).
virtual bool SetDtx(bool enable);
// Returns the status of codec-internal DTX. The default implementation always
// returns false.
virtual bool GetDtx() const;
// Sets the application mode. Returns true if the codec was able to comply.
// The default implementation just returns false.
enum class Application { kSpeech, kAudio };
virtual bool SetApplication(Application application);
// Tells the encoder about the highest sample rate the decoder is expected to
// use when decoding the bitstream. The encoder would typically use this
// information to adjust the quality of the encoding. The default
// implementation does nothing.
virtual void SetMaxPlaybackRate(int frequency_hz);
// Tells the encoder what average bitrate we'd like it to produce. The
// encoder is free to adjust or disregard the given bitrate (the default
// implementation does the latter).
ABSL_DEPRECATED("Use OnReceivedTargetAudioBitrate instead")
virtual void SetTargetBitrate(int target_bps);
// Causes this encoder to let go of any other encoders it contains, and
// returns a pointer to an array where they are stored (which is required to
// live as long as this encoder). Unless the returned array is empty, you may
// not call any methods on this encoder afterwards, except for the
// destructor. The default implementation just returns an empty array.
// NOTE: This method is subject to change. Do not call or override it.
virtual rtc::ArrayView<std::unique_ptr<AudioEncoder>>
ReclaimContainedEncoders();
// Enables audio network adaptor. Returns true if successful.
virtual bool EnableAudioNetworkAdaptor(const std::string& config_string,
RtcEventLog* event_log);
// Disables audio network adaptor.
virtual void DisableAudioNetworkAdaptor();
// Provides uplink packet loss fraction to this encoder to allow it to adapt.
// `uplink_packet_loss_fraction` is in the range [0.0, 1.0].
virtual void OnReceivedUplinkPacketLossFraction(
float uplink_packet_loss_fraction);
ABSL_DEPRECATED("")
virtual void OnReceivedUplinkRecoverablePacketLossFraction(
float uplink_recoverable_packet_loss_fraction);
// Provides target audio bitrate to this encoder to allow it to adapt.
virtual void OnReceivedTargetAudioBitrate(int target_bps);
// Provides target audio bitrate and corresponding probing interval of
// the bandwidth estimator to this encoder to allow it to adapt.
virtual void OnReceivedUplinkBandwidth(int target_audio_bitrate_bps,
absl::optional<int64_t> bwe_period_ms);
// Provides target audio bitrate and corresponding probing interval of
// the bandwidth estimator to this encoder to allow it to adapt.
virtual void OnReceivedUplinkAllocation(BitrateAllocationUpdate update);
// Provides RTT to this encoder to allow it to adapt.
virtual void OnReceivedRtt(int rtt_ms);
// Provides overhead to this encoder to adapt. The overhead is the number of
// bytes that will be added to each packet the encoder generates.
virtual void OnReceivedOverhead(size_t overhead_bytes_per_packet);
// To allow encoder to adapt its frame length, it must be provided the frame
// length range that receivers can accept.
virtual void SetReceiverFrameLengthRange(int min_frame_length_ms,
int max_frame_length_ms);
// Get statistics related to audio network adaptation.
virtual ANAStats GetANAStats() const;
// The range of frame lengths that are supported or nullopt if there's no such
// information. This is used together with the bitrate range to calculate the
// full bitrate range, including overhead.
virtual absl::optional<std::pair<TimeDelta, TimeDelta>> GetFrameLengthRange()
const = 0;
// The range of payload bitrates that are supported. This is used together
// with the frame length range to calculate the full bitrate range, including
// overhead.
virtual absl::optional<std::pair<DataRate, DataRate>> GetBitrateRange()
const {
return absl::nullopt;
}
// The maximum number of audio channels supported by WebRTC encoders.
static constexpr int kMaxNumberOfChannels = 24;
protected:
// Subclasses implement this to perform the actual encoding. Called by
// Encode().
virtual EncodedInfo EncodeImpl(uint32_t rtp_timestamp,
rtc::ArrayView<const int16_t> audio,
rtc::Buffer* encoded) = 0;
};
} // namespace webrtc
#endif // API_AUDIO_CODECS_AUDIO_ENCODER_H_

View file

@ -0,0 +1,62 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_AUDIO_ENCODER_FACTORY_H_
#define API_AUDIO_CODECS_AUDIO_ENCODER_FACTORY_H_
#include <memory>
#include <vector>
#include "absl/types/optional.h"
#include "api/audio_codecs/audio_codec_pair_id.h"
#include "api/audio_codecs/audio_encoder.h"
#include "api/audio_codecs/audio_format.h"
#include "rtc_base/ref_count.h"
namespace webrtc {
// A factory that creates AudioEncoders.
class AudioEncoderFactory : public rtc::RefCountInterface {
public:
// Returns a prioritized list of audio codecs, to use for signaling etc.
virtual std::vector<AudioCodecSpec> GetSupportedEncoders() = 0;
// Returns information about how this format would be encoded, provided it's
// supported. More format and format variations may be supported than those
// returned by GetSupportedEncoders().
virtual absl::optional<AudioCodecInfo> QueryAudioEncoder(
const SdpAudioFormat& format) = 0;
// Creates an AudioEncoder for the specified format. The encoder will tags its
// payloads with the specified payload type. The `codec_pair_id` argument is
// used to link encoders and decoders that talk to the same remote entity: if
// a AudioEncoderFactory::MakeAudioEncoder() and a
// AudioDecoderFactory::MakeAudioDecoder() call receive non-null IDs that
// compare equal, the factory implementations may assume that the encoder and
// decoder form a pair. (The intended use case for this is to set up
// communication between the AudioEncoder and AudioDecoder instances, which is
// needed for some codecs with built-in bandwidth adaptation.)
//
// Returns null if the format isn't supported.
//
// Note: Implementations need to be robust against combinations other than
// one encoder, one decoder getting the same ID; such encoders must still
// work.
//
// TODO(ossu): Try to avoid audio encoders having to know their payload type.
virtual std::unique_ptr<AudioEncoder> MakeAudioEncoder(
int payload_type,
const SdpAudioFormat& format,
absl::optional<AudioCodecPairId> codec_pair_id) = 0;
};
} // namespace webrtc
#endif // API_AUDIO_CODECS_AUDIO_ENCODER_FACTORY_H_

View file

@ -0,0 +1,163 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_AUDIO_ENCODER_FACTORY_TEMPLATE_H_
#define API_AUDIO_CODECS_AUDIO_ENCODER_FACTORY_TEMPLATE_H_
#include <memory>
#include <vector>
#include "api/audio_codecs/audio_encoder_factory.h"
#include "api/field_trials_view.h"
#include "api/make_ref_counted.h"
#include "api/scoped_refptr.h"
namespace webrtc {
namespace audio_encoder_factory_template_impl {
template <typename... Ts>
struct Helper;
// Base case: 0 template parameters.
template <>
struct Helper<> {
static void AppendSupportedEncoders(std::vector<AudioCodecSpec>* specs) {}
static absl::optional<AudioCodecInfo> QueryAudioEncoder(
const SdpAudioFormat& format) {
return absl::nullopt;
}
static std::unique_ptr<AudioEncoder> MakeAudioEncoder(
int payload_type,
const SdpAudioFormat& format,
absl::optional<AudioCodecPairId> codec_pair_id,
const FieldTrialsView* field_trials) {
return nullptr;
}
};
// Inductive case: Called with n + 1 template parameters; calls subroutines
// with n template parameters.
template <typename T, typename... Ts>
struct Helper<T, Ts...> {
static void AppendSupportedEncoders(std::vector<AudioCodecSpec>* specs) {
T::AppendSupportedEncoders(specs);
Helper<Ts...>::AppendSupportedEncoders(specs);
}
static absl::optional<AudioCodecInfo> QueryAudioEncoder(
const SdpAudioFormat& format) {
auto opt_config = T::SdpToConfig(format);
static_assert(std::is_same<decltype(opt_config),
absl::optional<typename T::Config>>::value,
"T::SdpToConfig() must return a value of type "
"absl::optional<T::Config>");
return opt_config ? absl::optional<AudioCodecInfo>(
T::QueryAudioEncoder(*opt_config))
: Helper<Ts...>::QueryAudioEncoder(format);
}
static std::unique_ptr<AudioEncoder> MakeAudioEncoder(
int payload_type,
const SdpAudioFormat& format,
absl::optional<AudioCodecPairId> codec_pair_id,
const FieldTrialsView* field_trials) {
auto opt_config = T::SdpToConfig(format);
if (opt_config) {
return T::MakeAudioEncoder(*opt_config, payload_type, codec_pair_id);
} else {
return Helper<Ts...>::MakeAudioEncoder(payload_type, format,
codec_pair_id, field_trials);
}
}
};
template <typename... Ts>
class AudioEncoderFactoryT : public AudioEncoderFactory {
public:
explicit AudioEncoderFactoryT(const FieldTrialsView* field_trials) {
field_trials_ = field_trials;
}
std::vector<AudioCodecSpec> GetSupportedEncoders() override {
std::vector<AudioCodecSpec> specs;
Helper<Ts...>::AppendSupportedEncoders(&specs);
return specs;
}
absl::optional<AudioCodecInfo> QueryAudioEncoder(
const SdpAudioFormat& format) override {
return Helper<Ts...>::QueryAudioEncoder(format);
}
std::unique_ptr<AudioEncoder> MakeAudioEncoder(
int payload_type,
const SdpAudioFormat& format,
absl::optional<AudioCodecPairId> codec_pair_id) override {
return Helper<Ts...>::MakeAudioEncoder(payload_type, format, codec_pair_id,
field_trials_);
}
const FieldTrialsView* field_trials_;
};
} // namespace audio_encoder_factory_template_impl
// Make an AudioEncoderFactory that can create instances of the given encoders.
//
// Each encoder type is given as a template argument to the function; it should
// be a struct with the following static member functions:
//
// // Converts `audio_format` to a ConfigType instance. Returns an empty
// // optional if `audio_format` doesn't correctly specify an encoder of our
// // type.
// absl::optional<ConfigType> SdpToConfig(const SdpAudioFormat& audio_format);
//
// // Appends zero or more AudioCodecSpecs to the list that will be returned
// // by AudioEncoderFactory::GetSupportedEncoders().
// void AppendSupportedEncoders(std::vector<AudioCodecSpec>* specs);
//
// // Returns information about how this format would be encoded. Used to
// // implement AudioEncoderFactory::QueryAudioEncoder().
// AudioCodecInfo QueryAudioEncoder(const ConfigType& config);
//
// // Creates an AudioEncoder for the specified format. Used to implement
// // AudioEncoderFactory::MakeAudioEncoder().
// std::unique_ptr<AudioDecoder> MakeAudioEncoder(
// const ConfigType& config,
// int payload_type,
// absl::optional<AudioCodecPairId> codec_pair_id);
//
// ConfigType should be a type that encapsulates all the settings needed to
// create an AudioEncoder. T::Config (where T is the encoder struct) should
// either be the config type, or an alias for it.
//
// Whenever it tries to do something, the new factory will try each of the
// encoders in the order they were specified in the template argument list,
// stopping at the first one that claims to be able to do the job.
//
// TODO(kwiberg): Point at CreateBuiltinAudioEncoderFactory() for an example of
// how it is used.
template <typename... Ts>
rtc::scoped_refptr<AudioEncoderFactory> CreateAudioEncoderFactory(
const FieldTrialsView* field_trials = nullptr) {
// There's no technical reason we couldn't allow zero template parameters,
// but such a factory couldn't create any encoders, and callers can do this
// by mistake by simply forgetting the <> altogether. So we forbid it in
// order to prevent caller foot-shooting.
static_assert(sizeof...(Ts) >= 1,
"Caller must give at least one template parameter");
return rtc::make_ref_counted<
audio_encoder_factory_template_impl::AudioEncoderFactoryT<Ts...>>(
field_trials);
}
} // namespace webrtc
#endif // API_AUDIO_CODECS_AUDIO_ENCODER_FACTORY_TEMPLATE_H_

View file

@ -0,0 +1,86 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio_codecs/audio_format.h"
#include <utility>
#include "absl/strings/match.h"
namespace webrtc {
SdpAudioFormat::SdpAudioFormat(const SdpAudioFormat&) = default;
SdpAudioFormat::SdpAudioFormat(SdpAudioFormat&&) = default;
SdpAudioFormat::SdpAudioFormat(absl::string_view name,
int clockrate_hz,
size_t num_channels)
: name(name), clockrate_hz(clockrate_hz), num_channels(num_channels) {}
SdpAudioFormat::SdpAudioFormat(absl::string_view name,
int clockrate_hz,
size_t num_channels,
const CodecParameterMap& param)
: name(name),
clockrate_hz(clockrate_hz),
num_channels(num_channels),
parameters(param) {}
SdpAudioFormat::SdpAudioFormat(absl::string_view name,
int clockrate_hz,
size_t num_channels,
CodecParameterMap&& param)
: name(name),
clockrate_hz(clockrate_hz),
num_channels(num_channels),
parameters(std::move(param)) {}
bool SdpAudioFormat::Matches(const SdpAudioFormat& o) const {
return absl::EqualsIgnoreCase(name, o.name) &&
clockrate_hz == o.clockrate_hz && num_channels == o.num_channels;
}
SdpAudioFormat::~SdpAudioFormat() = default;
SdpAudioFormat& SdpAudioFormat::operator=(const SdpAudioFormat&) = default;
SdpAudioFormat& SdpAudioFormat::operator=(SdpAudioFormat&&) = default;
bool operator==(const SdpAudioFormat& a, const SdpAudioFormat& b) {
return absl::EqualsIgnoreCase(a.name, b.name) &&
a.clockrate_hz == b.clockrate_hz && a.num_channels == b.num_channels &&
a.parameters == b.parameters;
}
AudioCodecInfo::AudioCodecInfo(int sample_rate_hz,
size_t num_channels,
int bitrate_bps)
: AudioCodecInfo(sample_rate_hz,
num_channels,
bitrate_bps,
bitrate_bps,
bitrate_bps) {}
AudioCodecInfo::AudioCodecInfo(int sample_rate_hz,
size_t num_channels,
int default_bitrate_bps,
int min_bitrate_bps,
int max_bitrate_bps)
: sample_rate_hz(sample_rate_hz),
num_channels(num_channels),
default_bitrate_bps(default_bitrate_bps),
min_bitrate_bps(min_bitrate_bps),
max_bitrate_bps(max_bitrate_bps) {
RTC_DCHECK_GT(sample_rate_hz, 0);
RTC_DCHECK_GT(num_channels, 0);
RTC_DCHECK_GE(min_bitrate_bps, 0);
RTC_DCHECK_LE(min_bitrate_bps, default_bitrate_bps);
RTC_DCHECK_GE(max_bitrate_bps, default_bitrate_bps);
}
} // namespace webrtc

View file

@ -0,0 +1,135 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_AUDIO_FORMAT_H_
#define API_AUDIO_CODECS_AUDIO_FORMAT_H_
#include <stddef.h>
#include <map>
#include <string>
#include "absl/strings/string_view.h"
#include "api/rtp_parameters.h"
#include "rtc_base/checks.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// SDP specification for a single audio codec.
struct RTC_EXPORT SdpAudioFormat {
using Parameters [[deprecated(("Use webrtc::CodecParameterMap"))]] =
std::map<std::string, std::string>;
SdpAudioFormat(const SdpAudioFormat&);
SdpAudioFormat(SdpAudioFormat&&);
SdpAudioFormat(absl::string_view name, int clockrate_hz, size_t num_channels);
SdpAudioFormat(absl::string_view name,
int clockrate_hz,
size_t num_channels,
const CodecParameterMap& param);
SdpAudioFormat(absl::string_view name,
int clockrate_hz,
size_t num_channels,
CodecParameterMap&& param);
~SdpAudioFormat();
// Returns true if this format is compatible with `o`. In SDP terminology:
// would it represent the same codec between an offer and an answer? As
// opposed to operator==, this method disregards codec parameters.
bool Matches(const SdpAudioFormat& o) const;
SdpAudioFormat& operator=(const SdpAudioFormat&);
SdpAudioFormat& operator=(SdpAudioFormat&&);
friend bool operator==(const SdpAudioFormat& a, const SdpAudioFormat& b);
friend bool operator!=(const SdpAudioFormat& a, const SdpAudioFormat& b) {
return !(a == b);
}
std::string name;
int clockrate_hz;
size_t num_channels;
CodecParameterMap parameters;
};
// Information about how an audio format is treated by the codec implementation.
// Contains basic information, such as sample rate and number of channels, which
// isn't uniformly presented by SDP. Also contains flags indicating support for
// integrating with other parts of WebRTC, like external VAD and comfort noise
// level calculation.
//
// To avoid API breakage, and make the code clearer, AudioCodecInfo should not
// be directly initializable with any flags indicating optional support. If it
// were, these initializers would break any time a new flag was added. It's also
// more difficult to understand:
// AudioCodecInfo info{16000, 1, 32000, true, false, false, true, true};
// than
// AudioCodecInfo info(16000, 1, 32000);
// info.allow_comfort_noise = true;
// info.future_flag_b = true;
// info.future_flag_c = true;
struct AudioCodecInfo {
AudioCodecInfo(int sample_rate_hz, size_t num_channels, int bitrate_bps);
AudioCodecInfo(int sample_rate_hz,
size_t num_channels,
int default_bitrate_bps,
int min_bitrate_bps,
int max_bitrate_bps);
AudioCodecInfo(const AudioCodecInfo& b) = default;
~AudioCodecInfo() = default;
bool operator==(const AudioCodecInfo& b) const {
return sample_rate_hz == b.sample_rate_hz &&
num_channels == b.num_channels &&
default_bitrate_bps == b.default_bitrate_bps &&
min_bitrate_bps == b.min_bitrate_bps &&
max_bitrate_bps == b.max_bitrate_bps &&
allow_comfort_noise == b.allow_comfort_noise &&
supports_network_adaption == b.supports_network_adaption;
}
bool operator!=(const AudioCodecInfo& b) const { return !(*this == b); }
bool HasFixedBitrate() const {
RTC_DCHECK_GE(min_bitrate_bps, 0);
RTC_DCHECK_LE(min_bitrate_bps, default_bitrate_bps);
RTC_DCHECK_GE(max_bitrate_bps, default_bitrate_bps);
return min_bitrate_bps == max_bitrate_bps;
}
int sample_rate_hz;
size_t num_channels;
int default_bitrate_bps;
int min_bitrate_bps;
int max_bitrate_bps;
bool allow_comfort_noise = true; // This codec can be used with an external
// comfort noise generator.
bool supports_network_adaption = false; // This codec can adapt to varying
// network conditions.
};
// AudioCodecSpec ties an audio format to specific information about the codec
// and its implementation.
struct AudioCodecSpec {
bool operator==(const AudioCodecSpec& b) const {
return format == b.format && info == b.info;
}
bool operator!=(const AudioCodecSpec& b) const { return !(*this == b); }
SdpAudioFormat format;
AudioCodecInfo info;
};
} // namespace webrtc
#endif // API_AUDIO_CODECS_AUDIO_FORMAT_H_

View file

@ -0,0 +1,68 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio_codecs/builtin_audio_decoder_factory.h"
#include <memory>
#include <vector>
#include "api/audio_codecs/L16/audio_decoder_L16.h"
#include "api/audio_codecs/audio_decoder_factory_template.h"
#include "api/audio_codecs/g711/audio_decoder_g711.h"
#include "api/audio_codecs/g722/audio_decoder_g722.h"
#if WEBRTC_USE_BUILTIN_ILBC
#include "api/audio_codecs/ilbc/audio_decoder_ilbc.h" // nogncheck
#endif
#if WEBRTC_USE_BUILTIN_OPUS
#include "api/audio_codecs/opus/audio_decoder_multi_channel_opus.h"
#include "api/audio_codecs/opus/audio_decoder_opus.h" // nogncheck
#endif
namespace webrtc {
namespace {
// Modify an audio decoder to not advertise support for anything.
template <typename T>
struct NotAdvertised {
using Config = typename T::Config;
static absl::optional<Config> SdpToConfig(
const SdpAudioFormat& audio_format) {
return T::SdpToConfig(audio_format);
}
static void AppendSupportedDecoders(std::vector<AudioCodecSpec>* specs) {
// Don't advertise support for anything.
}
static std::unique_ptr<AudioDecoder> MakeAudioDecoder(
const Config& config,
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt) {
return T::MakeAudioDecoder(config, codec_pair_id);
}
};
} // namespace
rtc::scoped_refptr<AudioDecoderFactory> CreateBuiltinAudioDecoderFactory() {
return CreateAudioDecoderFactory<
#if WEBRTC_USE_BUILTIN_OPUS
AudioDecoderOpus, NotAdvertised<AudioDecoderMultiChannelOpus>,
#endif
AudioDecoderG722,
#if WEBRTC_USE_BUILTIN_ILBC
AudioDecoderIlbc,
#endif
AudioDecoderG711, NotAdvertised<AudioDecoderL16>>();
}
} // namespace webrtc

View file

@ -0,0 +1,28 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_BUILTIN_AUDIO_DECODER_FACTORY_H_
#define API_AUDIO_CODECS_BUILTIN_AUDIO_DECODER_FACTORY_H_
#include "api/audio_codecs/audio_decoder_factory.h"
#include "api/scoped_refptr.h"
namespace webrtc {
// Creates a new factory that can create the built-in types of audio decoders.
// Note: This will link with all the code implementing those codecs, so if you
// only need a subset of the codecs, consider using
// CreateAudioDecoderFactory<...codecs listed here...>() or
// CreateOpusAudioDecoderFactory() instead.
rtc::scoped_refptr<AudioDecoderFactory> CreateBuiltinAudioDecoderFactory();
} // namespace webrtc
#endif // API_AUDIO_CODECS_BUILTIN_AUDIO_DECODER_FACTORY_H_

View file

@ -0,0 +1,74 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio_codecs/builtin_audio_encoder_factory.h"
#include <memory>
#include <vector>
#include "api/audio_codecs/L16/audio_encoder_L16.h"
#include "api/audio_codecs/audio_encoder_factory_template.h"
#include "api/audio_codecs/g711/audio_encoder_g711.h"
#include "api/audio_codecs/g722/audio_encoder_g722.h"
#if WEBRTC_USE_BUILTIN_ILBC
#include "api/audio_codecs/ilbc/audio_encoder_ilbc.h" // nogncheck
#endif
#if WEBRTC_USE_BUILTIN_OPUS
#include "api/audio_codecs/opus/audio_encoder_multi_channel_opus.h"
#include "api/audio_codecs/opus/audio_encoder_opus.h" // nogncheck
#endif
namespace webrtc {
namespace {
// Modify an audio encoder to not advertise support for anything.
template <typename T>
struct NotAdvertised {
using Config = typename T::Config;
static absl::optional<Config> SdpToConfig(
const SdpAudioFormat& audio_format) {
return T::SdpToConfig(audio_format);
}
static void AppendSupportedEncoders(std::vector<AudioCodecSpec>* specs) {
// Don't advertise support for anything.
}
static AudioCodecInfo QueryAudioEncoder(const Config& config) {
return T::QueryAudioEncoder(config);
}
static std::unique_ptr<AudioEncoder> MakeAudioEncoder(
const Config& config,
int payload_type,
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt,
const FieldTrialsView* field_trials = nullptr) {
return T::MakeAudioEncoder(config, payload_type, codec_pair_id,
field_trials);
}
};
} // namespace
rtc::scoped_refptr<AudioEncoderFactory> CreateBuiltinAudioEncoderFactory() {
return CreateAudioEncoderFactory<
#if WEBRTC_USE_BUILTIN_OPUS
AudioEncoderOpus, NotAdvertised<AudioEncoderMultiChannelOpus>,
#endif
AudioEncoderG722,
#if WEBRTC_USE_BUILTIN_ILBC
AudioEncoderIlbc,
#endif
AudioEncoderG711, NotAdvertised<AudioEncoderL16>>();
}
} // namespace webrtc

View file

@ -0,0 +1,28 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_BUILTIN_AUDIO_ENCODER_FACTORY_H_
#define API_AUDIO_CODECS_BUILTIN_AUDIO_ENCODER_FACTORY_H_
#include "api/audio_codecs/audio_encoder_factory.h"
#include "api/scoped_refptr.h"
namespace webrtc {
// Creates a new factory that can create the built-in types of audio encoders.
// Note: This will link with all the code implementing those codecs, so if you
// only need a subset of the codecs, consider using
// CreateAudioEncoderFactory<...codecs listed here...>() or
// CreateOpusAudioEncoderFactory() instead.
rtc::scoped_refptr<AudioEncoderFactory> CreateBuiltinAudioEncoderFactory();
} // namespace webrtc
#endif // API_AUDIO_CODECS_BUILTIN_AUDIO_ENCODER_FACTORY_H_

View file

@ -0,0 +1,55 @@
# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
import("../../../webrtc.gni")
if (is_android) {
import("//build/config/android/config.gni")
import("//build/config/android/rules.gni")
}
rtc_library("audio_encoder_g711") {
visibility = [ "*" ]
poisonous = [ "audio_codecs" ]
sources = [
"audio_encoder_g711.cc",
"audio_encoder_g711.h",
]
deps = [
"..:audio_codecs_api",
"../../../api:field_trials_view",
"../../../modules/audio_coding:g711",
"../../../rtc_base:safe_conversions",
"../../../rtc_base:safe_minmax",
"../../../rtc_base:stringutils",
"../../../rtc_base/system:rtc_export",
]
absl_deps = [
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
]
}
rtc_library("audio_decoder_g711") {
visibility = [ "*" ]
poisonous = [ "audio_codecs" ]
sources = [
"audio_decoder_g711.cc",
"audio_decoder_g711.h",
]
deps = [
"..:audio_codecs_api",
"../../../api:field_trials_view",
"../../../modules/audio_coding:g711",
"../../../rtc_base:safe_conversions",
"../../../rtc_base/system:rtc_export",
]
absl_deps = [
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
]
}

View file

@ -0,0 +1,67 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio_codecs/g711/audio_decoder_g711.h"
#include <memory>
#include <vector>
#include "absl/strings/match.h"
#include "modules/audio_coding/codecs/g711/audio_decoder_pcm.h"
#include "rtc_base/numerics/safe_conversions.h"
namespace webrtc {
absl::optional<AudioDecoderG711::Config> AudioDecoderG711::SdpToConfig(
const SdpAudioFormat& format) {
const bool is_pcmu = absl::EqualsIgnoreCase(format.name, "PCMU");
const bool is_pcma = absl::EqualsIgnoreCase(format.name, "PCMA");
if (format.clockrate_hz == 8000 && format.num_channels >= 1 &&
(is_pcmu || is_pcma)) {
Config config;
config.type = is_pcmu ? Config::Type::kPcmU : Config::Type::kPcmA;
config.num_channels = rtc::dchecked_cast<int>(format.num_channels);
if (!config.IsOk()) {
RTC_DCHECK_NOTREACHED();
return absl::nullopt;
}
return config;
} else {
return absl::nullopt;
}
}
void AudioDecoderG711::AppendSupportedDecoders(
std::vector<AudioCodecSpec>* specs) {
for (const char* type : {"PCMU", "PCMA"}) {
specs->push_back({{type, 8000, 1}, {8000, 1, 64000}});
}
}
std::unique_ptr<AudioDecoder> AudioDecoderG711::MakeAudioDecoder(
const Config& config,
absl::optional<AudioCodecPairId> /*codec_pair_id*/,
const FieldTrialsView* field_trials) {
if (!config.IsOk()) {
RTC_DCHECK_NOTREACHED();
return nullptr;
}
switch (config.type) {
case Config::Type::kPcmU:
return std::make_unique<AudioDecoderPcmU>(config.num_channels);
case Config::Type::kPcmA:
return std::make_unique<AudioDecoderPcmA>(config.num_channels);
default:
RTC_DCHECK_NOTREACHED();
return nullptr;
}
}
} // namespace webrtc

View file

@ -0,0 +1,49 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_G711_AUDIO_DECODER_G711_H_
#define API_AUDIO_CODECS_G711_AUDIO_DECODER_G711_H_
#include <memory>
#include <vector>
#include "absl/types/optional.h"
#include "api/audio_codecs/audio_codec_pair_id.h"
#include "api/audio_codecs/audio_decoder.h"
#include "api/audio_codecs/audio_format.h"
#include "api/field_trials_view.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// G711 decoder API for use as a template parameter to
// CreateAudioDecoderFactory<...>().
struct RTC_EXPORT AudioDecoderG711 {
struct Config {
enum class Type { kPcmU, kPcmA };
bool IsOk() const {
return (type == Type::kPcmU || type == Type::kPcmA) &&
num_channels >= 1 &&
num_channels <= AudioDecoder::kMaxNumberOfChannels;
}
Type type;
int num_channels;
};
static absl::optional<Config> SdpToConfig(const SdpAudioFormat& audio_format);
static void AppendSupportedDecoders(std::vector<AudioCodecSpec>* specs);
static std::unique_ptr<AudioDecoder> MakeAudioDecoder(
const Config& config,
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt,
const FieldTrialsView* field_trials = nullptr);
};
} // namespace webrtc
#endif // API_AUDIO_CODECS_G711_AUDIO_DECODER_G711_H_

View file

@ -0,0 +1,95 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio_codecs/g711/audio_encoder_g711.h"
#include <memory>
#include <vector>
#include "absl/strings/match.h"
#include "modules/audio_coding/codecs/g711/audio_encoder_pcm.h"
#include "rtc_base/numerics/safe_conversions.h"
#include "rtc_base/numerics/safe_minmax.h"
#include "rtc_base/string_to_number.h"
namespace webrtc {
absl::optional<AudioEncoderG711::Config> AudioEncoderG711::SdpToConfig(
const SdpAudioFormat& format) {
const bool is_pcmu = absl::EqualsIgnoreCase(format.name, "PCMU");
const bool is_pcma = absl::EqualsIgnoreCase(format.name, "PCMA");
if (format.clockrate_hz == 8000 && format.num_channels >= 1 &&
(is_pcmu || is_pcma)) {
Config config;
config.type = is_pcmu ? Config::Type::kPcmU : Config::Type::kPcmA;
config.num_channels = rtc::dchecked_cast<int>(format.num_channels);
config.frame_size_ms = 20;
auto ptime_iter = format.parameters.find("ptime");
if (ptime_iter != format.parameters.end()) {
const auto ptime = rtc::StringToNumber<int>(ptime_iter->second);
if (ptime && *ptime > 0) {
config.frame_size_ms = rtc::SafeClamp(10 * (*ptime / 10), 10, 60);
}
}
if (!config.IsOk()) {
RTC_DCHECK_NOTREACHED();
return absl::nullopt;
}
return config;
} else {
return absl::nullopt;
}
}
void AudioEncoderG711::AppendSupportedEncoders(
std::vector<AudioCodecSpec>* specs) {
for (const char* type : {"PCMU", "PCMA"}) {
specs->push_back({{type, 8000, 1}, {8000, 1, 64000}});
}
}
AudioCodecInfo AudioEncoderG711::QueryAudioEncoder(const Config& config) {
RTC_DCHECK(config.IsOk());
return {8000, rtc::dchecked_cast<size_t>(config.num_channels),
64000 * config.num_channels};
}
std::unique_ptr<AudioEncoder> AudioEncoderG711::MakeAudioEncoder(
const Config& config,
int payload_type,
absl::optional<AudioCodecPairId> /*codec_pair_id*/,
const FieldTrialsView* field_trials) {
if (!config.IsOk()) {
RTC_DCHECK_NOTREACHED();
return nullptr;
}
switch (config.type) {
case Config::Type::kPcmU: {
AudioEncoderPcmU::Config impl_config;
impl_config.num_channels = config.num_channels;
impl_config.frame_size_ms = config.frame_size_ms;
impl_config.payload_type = payload_type;
return std::make_unique<AudioEncoderPcmU>(impl_config);
}
case Config::Type::kPcmA: {
AudioEncoderPcmA::Config impl_config;
impl_config.num_channels = config.num_channels;
impl_config.frame_size_ms = config.frame_size_ms;
impl_config.payload_type = payload_type;
return std::make_unique<AudioEncoderPcmA>(impl_config);
}
default: {
RTC_DCHECK_NOTREACHED();
return nullptr;
}
}
}
} // namespace webrtc

View file

@ -0,0 +1,54 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_G711_AUDIO_ENCODER_G711_H_
#define API_AUDIO_CODECS_G711_AUDIO_ENCODER_G711_H_
#include <memory>
#include <vector>
#include "absl/types/optional.h"
#include "api/audio_codecs/audio_codec_pair_id.h"
#include "api/audio_codecs/audio_encoder.h"
#include "api/audio_codecs/audio_format.h"
#include "api/field_trials_view.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// G711 encoder API for use as a template parameter to
// CreateAudioEncoderFactory<...>().
struct RTC_EXPORT AudioEncoderG711 {
struct Config {
enum class Type { kPcmU, kPcmA };
bool IsOk() const {
return (type == Type::kPcmU || type == Type::kPcmA) &&
frame_size_ms > 0 && frame_size_ms % 10 == 0 &&
num_channels >= 1 &&
num_channels <= AudioEncoder::kMaxNumberOfChannels;
}
Type type = Type::kPcmU;
int num_channels = 1;
int frame_size_ms = 20;
};
static absl::optional<AudioEncoderG711::Config> SdpToConfig(
const SdpAudioFormat& audio_format);
static void AppendSupportedEncoders(std::vector<AudioCodecSpec>* specs);
static AudioCodecInfo QueryAudioEncoder(const Config& config);
static std::unique_ptr<AudioEncoder> MakeAudioEncoder(
const Config& config,
int payload_type,
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt,
const FieldTrialsView* field_trials = nullptr);
};
} // namespace webrtc
#endif // API_AUDIO_CODECS_G711_AUDIO_ENCODER_G711_H_

View file

@ -0,0 +1,62 @@
# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
import("../../../webrtc.gni")
if (is_android) {
import("//build/config/android/config.gni")
import("//build/config/android/rules.gni")
}
rtc_source_set("audio_encoder_g722_config") {
visibility = [ "*" ]
sources = [ "audio_encoder_g722_config.h" ]
deps = [ "..:audio_codecs_api" ]
}
rtc_library("audio_encoder_g722") {
visibility = [ "*" ]
poisonous = [ "audio_codecs" ]
sources = [
"audio_encoder_g722.cc",
"audio_encoder_g722.h",
]
deps = [
":audio_encoder_g722_config",
"..:audio_codecs_api",
"../../../api:field_trials_view",
"../../../modules/audio_coding:g722",
"../../../rtc_base:safe_conversions",
"../../../rtc_base:safe_minmax",
"../../../rtc_base:stringutils",
"../../../rtc_base/system:rtc_export",
]
absl_deps = [
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
]
}
rtc_library("audio_decoder_g722") {
visibility = [ "*" ]
poisonous = [ "audio_codecs" ]
sources = [
"audio_decoder_g722.cc",
"audio_decoder_g722.h",
]
deps = [
"..:audio_codecs_api",
"../../../api:field_trials_view",
"../../../modules/audio_coding:g722",
"../../../rtc_base:safe_conversions",
"../../../rtc_base/system:rtc_export",
]
absl_deps = [
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
]
}

View file

@ -0,0 +1,56 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio_codecs/g722/audio_decoder_g722.h"
#include <memory>
#include <vector>
#include "absl/strings/match.h"
#include "modules/audio_coding/codecs/g722/audio_decoder_g722.h"
#include "rtc_base/numerics/safe_conversions.h"
namespace webrtc {
absl::optional<AudioDecoderG722::Config> AudioDecoderG722::SdpToConfig(
const SdpAudioFormat& format) {
if (absl::EqualsIgnoreCase(format.name, "G722") &&
format.clockrate_hz == 8000 &&
(format.num_channels == 1 || format.num_channels == 2)) {
return Config{rtc::dchecked_cast<int>(format.num_channels)};
}
return absl::nullopt;
}
void AudioDecoderG722::AppendSupportedDecoders(
std::vector<AudioCodecSpec>* specs) {
specs->push_back({{"G722", 8000, 1}, {16000, 1, 64000}});
}
std::unique_ptr<AudioDecoder> AudioDecoderG722::MakeAudioDecoder(
Config config,
absl::optional<AudioCodecPairId> /*codec_pair_id*/,
const FieldTrialsView* field_trials) {
if (!config.IsOk()) {
RTC_DCHECK_NOTREACHED();
return nullptr;
}
switch (config.num_channels) {
case 1:
return std::make_unique<AudioDecoderG722Impl>();
case 2:
return std::make_unique<AudioDecoderG722StereoImpl>();
default:
RTC_DCHECK_NOTREACHED();
return nullptr;
}
}
} // namespace webrtc

View file

@ -0,0 +1,43 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_G722_AUDIO_DECODER_G722_H_
#define API_AUDIO_CODECS_G722_AUDIO_DECODER_G722_H_
#include <memory>
#include <vector>
#include "absl/types/optional.h"
#include "api/audio_codecs/audio_codec_pair_id.h"
#include "api/audio_codecs/audio_decoder.h"
#include "api/audio_codecs/audio_format.h"
#include "api/field_trials_view.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// G722 decoder API for use as a template parameter to
// CreateAudioDecoderFactory<...>().
struct RTC_EXPORT AudioDecoderG722 {
struct Config {
bool IsOk() const { return num_channels == 1 || num_channels == 2; }
int num_channels;
};
static absl::optional<Config> SdpToConfig(const SdpAudioFormat& audio_format);
static void AppendSupportedDecoders(std::vector<AudioCodecSpec>* specs);
static std::unique_ptr<AudioDecoder> MakeAudioDecoder(
Config config,
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt,
const FieldTrialsView* field_trials = nullptr);
};
} // namespace webrtc
#endif // API_AUDIO_CODECS_G722_AUDIO_DECODER_G722_H_

View file

@ -0,0 +1,74 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio_codecs/g722/audio_encoder_g722.h"
#include <memory>
#include <vector>
#include "absl/strings/match.h"
#include "modules/audio_coding/codecs/g722/audio_encoder_g722.h"
#include "rtc_base/numerics/safe_conversions.h"
#include "rtc_base/numerics/safe_minmax.h"
#include "rtc_base/string_to_number.h"
namespace webrtc {
absl::optional<AudioEncoderG722Config> AudioEncoderG722::SdpToConfig(
const SdpAudioFormat& format) {
if (!absl::EqualsIgnoreCase(format.name, "g722") ||
format.clockrate_hz != 8000) {
return absl::nullopt;
}
AudioEncoderG722Config config;
config.num_channels = rtc::checked_cast<int>(format.num_channels);
auto ptime_iter = format.parameters.find("ptime");
if (ptime_iter != format.parameters.end()) {
auto ptime = rtc::StringToNumber<int>(ptime_iter->second);
if (ptime && *ptime > 0) {
const int whole_packets = *ptime / 10;
config.frame_size_ms = rtc::SafeClamp<int>(whole_packets * 10, 10, 60);
}
}
if (!config.IsOk()) {
RTC_DCHECK_NOTREACHED();
return absl::nullopt;
}
return config;
}
void AudioEncoderG722::AppendSupportedEncoders(
std::vector<AudioCodecSpec>* specs) {
const SdpAudioFormat fmt = {"G722", 8000, 1};
const AudioCodecInfo info = QueryAudioEncoder(*SdpToConfig(fmt));
specs->push_back({fmt, info});
}
AudioCodecInfo AudioEncoderG722::QueryAudioEncoder(
const AudioEncoderG722Config& config) {
RTC_DCHECK(config.IsOk());
return {16000, rtc::dchecked_cast<size_t>(config.num_channels),
64000 * config.num_channels};
}
std::unique_ptr<AudioEncoder> AudioEncoderG722::MakeAudioEncoder(
const AudioEncoderG722Config& config,
int payload_type,
absl::optional<AudioCodecPairId> /*codec_pair_id*/,
const FieldTrialsView* field_trials) {
if (!config.IsOk()) {
RTC_DCHECK_NOTREACHED();
return nullptr;
}
return std::make_unique<AudioEncoderG722Impl>(config, payload_type);
}
} // namespace webrtc

View file

@ -0,0 +1,44 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_G722_AUDIO_ENCODER_G722_H_
#define API_AUDIO_CODECS_G722_AUDIO_ENCODER_G722_H_
#include <memory>
#include <vector>
#include "absl/types/optional.h"
#include "api/audio_codecs/audio_codec_pair_id.h"
#include "api/audio_codecs/audio_encoder.h"
#include "api/audio_codecs/audio_format.h"
#include "api/audio_codecs/g722/audio_encoder_g722_config.h"
#include "api/field_trials_view.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// G722 encoder API for use as a template parameter to
// CreateAudioEncoderFactory<...>().
struct RTC_EXPORT AudioEncoderG722 {
using Config = AudioEncoderG722Config;
static absl::optional<AudioEncoderG722Config> SdpToConfig(
const SdpAudioFormat& audio_format);
static void AppendSupportedEncoders(std::vector<AudioCodecSpec>* specs);
static AudioCodecInfo QueryAudioEncoder(const AudioEncoderG722Config& config);
static std::unique_ptr<AudioEncoder> MakeAudioEncoder(
const AudioEncoderG722Config& config,
int payload_type,
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt,
const FieldTrialsView* field_trials = nullptr);
};
} // namespace webrtc
#endif // API_AUDIO_CODECS_G722_AUDIO_ENCODER_G722_H_

View file

@ -0,0 +1,29 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_G722_AUDIO_ENCODER_G722_CONFIG_H_
#define API_AUDIO_CODECS_G722_AUDIO_ENCODER_G722_CONFIG_H_
#include "api/audio_codecs/audio_encoder.h"
namespace webrtc {
struct AudioEncoderG722Config {
bool IsOk() const {
return frame_size_ms > 0 && frame_size_ms % 10 == 0 && num_channels >= 1 &&
num_channels <= AudioEncoder::kMaxNumberOfChannels;
}
int frame_size_ms = 20;
int num_channels = 1;
};
} // namespace webrtc
#endif // API_AUDIO_CODECS_G722_AUDIO_ENCODER_G722_CONFIG_H_

View file

@ -0,0 +1,58 @@
# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
import("../../../webrtc.gni")
if (is_android) {
import("//build/config/android/config.gni")
import("//build/config/android/rules.gni")
}
rtc_source_set("audio_encoder_ilbc_config") {
visibility = [ "*" ]
sources = [ "audio_encoder_ilbc_config.h" ]
}
rtc_library("audio_encoder_ilbc") {
visibility = [ "*" ]
poisonous = [ "audio_codecs" ]
sources = [
"audio_encoder_ilbc.cc",
"audio_encoder_ilbc.h",
]
deps = [
":audio_encoder_ilbc_config",
"..:audio_codecs_api",
"../../../api:field_trials_view",
"../../../modules/audio_coding:ilbc",
"../../../rtc_base:safe_conversions",
"../../../rtc_base:safe_minmax",
"../../../rtc_base:stringutils",
]
absl_deps = [
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
]
}
rtc_library("audio_decoder_ilbc") {
visibility = [ "*" ]
poisonous = [ "audio_codecs" ]
sources = [
"audio_decoder_ilbc.cc",
"audio_decoder_ilbc.h",
]
deps = [
"..:audio_codecs_api",
"../../../api:field_trials_view",
"../../../modules/audio_coding:ilbc",
]
absl_deps = [
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
]
}

View file

@ -0,0 +1,42 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio_codecs/ilbc/audio_decoder_ilbc.h"
#include <memory>
#include <vector>
#include "absl/strings/match.h"
#include "modules/audio_coding/codecs/ilbc/audio_decoder_ilbc.h"
namespace webrtc {
absl::optional<AudioDecoderIlbc::Config> AudioDecoderIlbc::SdpToConfig(
const SdpAudioFormat& format) {
if (absl::EqualsIgnoreCase(format.name, "ILBC") &&
format.clockrate_hz == 8000 && format.num_channels == 1) {
return Config();
}
return absl::nullopt;
}
void AudioDecoderIlbc::AppendSupportedDecoders(
std::vector<AudioCodecSpec>* specs) {
specs->push_back({{"ILBC", 8000, 1}, {8000, 1, 13300}});
}
std::unique_ptr<AudioDecoder> AudioDecoderIlbc::MakeAudioDecoder(
Config config,
absl::optional<AudioCodecPairId> /*codec_pair_id*/,
const FieldTrialsView* field_trials) {
return std::make_unique<AudioDecoderIlbcImpl>();
}
} // namespace webrtc

View file

@ -0,0 +1,39 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_ILBC_AUDIO_DECODER_ILBC_H_
#define API_AUDIO_CODECS_ILBC_AUDIO_DECODER_ILBC_H_
#include <memory>
#include <vector>
#include "absl/types/optional.h"
#include "api/audio_codecs/audio_codec_pair_id.h"
#include "api/audio_codecs/audio_decoder.h"
#include "api/audio_codecs/audio_format.h"
#include "api/field_trials_view.h"
namespace webrtc {
// ILBC decoder API for use as a template parameter to
// CreateAudioDecoderFactory<...>().
struct AudioDecoderIlbc {
struct Config {}; // Empty---no config values needed!
static absl::optional<Config> SdpToConfig(const SdpAudioFormat& audio_format);
static void AppendSupportedDecoders(std::vector<AudioCodecSpec>* specs);
static std::unique_ptr<AudioDecoder> MakeAudioDecoder(
Config config,
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt,
const FieldTrialsView* field_trials = nullptr);
};
} // namespace webrtc
#endif // API_AUDIO_CODECS_ILBC_AUDIO_DECODER_ILBC_H_

View file

@ -0,0 +1,88 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio_codecs/ilbc/audio_encoder_ilbc.h"
#include <memory>
#include <vector>
#include "absl/strings/match.h"
#include "modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.h"
#include "rtc_base/numerics/safe_conversions.h"
#include "rtc_base/numerics/safe_minmax.h"
#include "rtc_base/string_to_number.h"
namespace webrtc {
namespace {
int GetIlbcBitrate(int ptime) {
switch (ptime) {
case 20:
case 40:
// 38 bytes per frame of 20 ms => 15200 bits/s.
return 15200;
case 30:
case 60:
// 50 bytes per frame of 30 ms => (approx) 13333 bits/s.
return 13333;
default:
RTC_CHECK_NOTREACHED();
}
}
} // namespace
absl::optional<AudioEncoderIlbcConfig> AudioEncoderIlbc::SdpToConfig(
const SdpAudioFormat& format) {
if (!absl::EqualsIgnoreCase(format.name.c_str(), "ILBC") ||
format.clockrate_hz != 8000 || format.num_channels != 1) {
return absl::nullopt;
}
AudioEncoderIlbcConfig config;
auto ptime_iter = format.parameters.find("ptime");
if (ptime_iter != format.parameters.end()) {
auto ptime = rtc::StringToNumber<int>(ptime_iter->second);
if (ptime && *ptime > 0) {
const int whole_packets = *ptime / 10;
config.frame_size_ms = rtc::SafeClamp<int>(whole_packets * 10, 20, 60);
}
}
if (!config.IsOk()) {
RTC_DCHECK_NOTREACHED();
return absl::nullopt;
}
return config;
}
void AudioEncoderIlbc::AppendSupportedEncoders(
std::vector<AudioCodecSpec>* specs) {
const SdpAudioFormat fmt = {"ILBC", 8000, 1};
const AudioCodecInfo info = QueryAudioEncoder(*SdpToConfig(fmt));
specs->push_back({fmt, info});
}
AudioCodecInfo AudioEncoderIlbc::QueryAudioEncoder(
const AudioEncoderIlbcConfig& config) {
RTC_DCHECK(config.IsOk());
return {8000, 1, GetIlbcBitrate(config.frame_size_ms)};
}
std::unique_ptr<AudioEncoder> AudioEncoderIlbc::MakeAudioEncoder(
const AudioEncoderIlbcConfig& config,
int payload_type,
absl::optional<AudioCodecPairId> /*codec_pair_id*/,
const FieldTrialsView* field_trials) {
if (!config.IsOk()) {
RTC_DCHECK_NOTREACHED();
return nullptr;
}
return std::make_unique<AudioEncoderIlbcImpl>(config, payload_type);
}
} // namespace webrtc

View file

@ -0,0 +1,43 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_ILBC_AUDIO_ENCODER_ILBC_H_
#define API_AUDIO_CODECS_ILBC_AUDIO_ENCODER_ILBC_H_
#include <memory>
#include <vector>
#include "absl/types/optional.h"
#include "api/audio_codecs/audio_codec_pair_id.h"
#include "api/audio_codecs/audio_encoder.h"
#include "api/audio_codecs/audio_format.h"
#include "api/audio_codecs/ilbc/audio_encoder_ilbc_config.h"
#include "api/field_trials_view.h"
namespace webrtc {
// ILBC encoder API for use as a template parameter to
// CreateAudioEncoderFactory<...>().
struct AudioEncoderIlbc {
using Config = AudioEncoderIlbcConfig;
static absl::optional<AudioEncoderIlbcConfig> SdpToConfig(
const SdpAudioFormat& audio_format);
static void AppendSupportedEncoders(std::vector<AudioCodecSpec>* specs);
static AudioCodecInfo QueryAudioEncoder(const AudioEncoderIlbcConfig& config);
static std::unique_ptr<AudioEncoder> MakeAudioEncoder(
const AudioEncoderIlbcConfig& config,
int payload_type,
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt,
const FieldTrialsView* field_trials = nullptr);
};
} // namespace webrtc
#endif // API_AUDIO_CODECS_ILBC_AUDIO_ENCODER_ILBC_H_

View file

@ -0,0 +1,28 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_ILBC_AUDIO_ENCODER_ILBC_CONFIG_H_
#define API_AUDIO_CODECS_ILBC_AUDIO_ENCODER_ILBC_CONFIG_H_
namespace webrtc {
struct AudioEncoderIlbcConfig {
bool IsOk() const {
return (frame_size_ms == 20 || frame_size_ms == 30 || frame_size_ms == 40 ||
frame_size_ms == 60);
}
int frame_size_ms = 30; // Valid values are 20, 30, 40, and 60 ms.
// Note that frame size 40 ms produces encodings with two 20 ms frames in
// them, and frame size 60 ms consists of two 30 ms frames.
};
} // namespace webrtc
#endif // API_AUDIO_CODECS_ILBC_AUDIO_ENCODER_ILBC_CONFIG_H_

View file

@ -0,0 +1,110 @@
# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
import("../../../webrtc.gni")
if (is_android) {
import("//build/config/android/config.gni")
import("//build/config/android/rules.gni")
}
rtc_library("audio_encoder_opus_config") {
visibility = [ "*" ]
sources = [
"audio_encoder_multi_channel_opus_config.cc",
"audio_encoder_multi_channel_opus_config.h",
"audio_encoder_opus_config.cc",
"audio_encoder_opus_config.h",
]
deps = [ "../../../rtc_base/system:rtc_export" ]
absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
defines = []
if (rtc_opus_variable_complexity) {
defines += [ "WEBRTC_OPUS_VARIABLE_COMPLEXITY=1" ]
} else {
defines += [ "WEBRTC_OPUS_VARIABLE_COMPLEXITY=0" ]
}
}
rtc_source_set("audio_decoder_opus_config") {
visibility = [ "*" ]
sources = [ "audio_decoder_multi_channel_opus_config.h" ]
deps = [ "..:audio_codecs_api" ]
}
rtc_library("audio_encoder_opus") {
visibility = [ "*" ]
poisonous = [ "audio_codecs" ]
public = [ "audio_encoder_opus.h" ]
sources = [ "audio_encoder_opus.cc" ]
deps = [
":audio_encoder_opus_config",
"..:audio_codecs_api",
"../../../api:field_trials_view",
"../../../modules/audio_coding:webrtc_opus",
"../../../rtc_base/system:rtc_export",
]
absl_deps = [
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
]
}
rtc_library("audio_decoder_opus") {
visibility = [ "*" ]
poisonous = [ "audio_codecs" ]
sources = [
"audio_decoder_opus.cc",
"audio_decoder_opus.h",
]
deps = [
"..:audio_codecs_api",
"../../../api:field_trials_view",
"../../../modules/audio_coding:webrtc_opus",
"../../../rtc_base/system:rtc_export",
]
absl_deps = [
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
]
}
rtc_library("audio_encoder_multiopus") {
visibility = [ "*" ]
poisonous = [ "audio_codecs" ]
public = [ "audio_encoder_multi_channel_opus.h" ]
sources = [ "audio_encoder_multi_channel_opus.cc" ]
deps = [
"..:audio_codecs_api",
"../../../api:field_trials_view",
"../../../modules/audio_coding:webrtc_multiopus",
"../../../rtc_base/system:rtc_export",
"../opus:audio_encoder_opus_config",
]
absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
}
rtc_library("audio_decoder_multiopus") {
visibility = [ "*" ]
poisonous = [ "audio_codecs" ]
sources = [
"audio_decoder_multi_channel_opus.cc",
"audio_decoder_multi_channel_opus.h",
]
deps = [
":audio_decoder_opus_config",
"..:audio_codecs_api",
"../../../api:field_trials_view",
"../../../modules/audio_coding:webrtc_multiopus",
"../../../rtc_base/system:rtc_export",
]
absl_deps = [
"//third_party/abseil-cpp/absl/memory",
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
]
}

View file

@ -0,0 +1,71 @@
/*
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio_codecs/opus/audio_decoder_multi_channel_opus.h"
#include <memory>
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "absl/strings/match.h"
#include "modules/audio_coding/codecs/opus/audio_decoder_multi_channel_opus_impl.h"
namespace webrtc {
absl::optional<AudioDecoderMultiChannelOpusConfig>
AudioDecoderMultiChannelOpus::SdpToConfig(const SdpAudioFormat& format) {
return AudioDecoderMultiChannelOpusImpl::SdpToConfig(format);
}
void AudioDecoderMultiChannelOpus::AppendSupportedDecoders(
std::vector<AudioCodecSpec>* specs) {
// To get full utilization of the surround support of the Opus lib, we can
// mark which channel is the low frequency effects (LFE). But that is not done
// ATM.
{
AudioCodecInfo surround_5_1_opus_info{48000, 6,
/* default_bitrate_bps= */ 128000};
surround_5_1_opus_info.allow_comfort_noise = false;
surround_5_1_opus_info.supports_network_adaption = false;
SdpAudioFormat opus_format({"multiopus",
48000,
6,
{{"minptime", "10"},
{"useinbandfec", "1"},
{"channel_mapping", "0,4,1,2,3,5"},
{"num_streams", "4"},
{"coupled_streams", "2"}}});
specs->push_back({std::move(opus_format), surround_5_1_opus_info});
}
{
AudioCodecInfo surround_7_1_opus_info{48000, 8,
/* default_bitrate_bps= */ 200000};
surround_7_1_opus_info.allow_comfort_noise = false;
surround_7_1_opus_info.supports_network_adaption = false;
SdpAudioFormat opus_format({"multiopus",
48000,
8,
{{"minptime", "10"},
{"useinbandfec", "1"},
{"channel_mapping", "0,6,1,2,3,4,5,7"},
{"num_streams", "5"},
{"coupled_streams", "3"}}});
specs->push_back({std::move(opus_format), surround_7_1_opus_info});
}
}
std::unique_ptr<AudioDecoder> AudioDecoderMultiChannelOpus::MakeAudioDecoder(
AudioDecoderMultiChannelOpusConfig config,
absl::optional<AudioCodecPairId> /*codec_pair_id*/,
const FieldTrialsView* field_trials) {
return AudioDecoderMultiChannelOpusImpl::MakeAudioDecoder(config);
}
} // namespace webrtc

View file

@ -0,0 +1,42 @@
/*
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_OPUS_AUDIO_DECODER_MULTI_CHANNEL_OPUS_H_
#define API_AUDIO_CODECS_OPUS_AUDIO_DECODER_MULTI_CHANNEL_OPUS_H_
#include <memory>
#include <vector>
#include "absl/types/optional.h"
#include "api/audio_codecs/audio_codec_pair_id.h"
#include "api/audio_codecs/audio_decoder.h"
#include "api/audio_codecs/audio_format.h"
#include "api/audio_codecs/opus/audio_decoder_multi_channel_opus_config.h"
#include "api/field_trials_view.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// Opus decoder API for use as a template parameter to
// CreateAudioDecoderFactory<...>().
struct RTC_EXPORT AudioDecoderMultiChannelOpus {
using Config = AudioDecoderMultiChannelOpusConfig;
static absl::optional<AudioDecoderMultiChannelOpusConfig> SdpToConfig(
const SdpAudioFormat& audio_format);
static void AppendSupportedDecoders(std::vector<AudioCodecSpec>* specs);
static std::unique_ptr<AudioDecoder> MakeAudioDecoder(
AudioDecoderMultiChannelOpusConfig config,
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt,
const FieldTrialsView* field_trials = nullptr);
};
} // namespace webrtc
#endif // API_AUDIO_CODECS_OPUS_AUDIO_DECODER_MULTI_CHANNEL_OPUS_H_

View file

@ -0,0 +1,66 @@
/*
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_OPUS_AUDIO_DECODER_MULTI_CHANNEL_OPUS_CONFIG_H_
#define API_AUDIO_CODECS_OPUS_AUDIO_DECODER_MULTI_CHANNEL_OPUS_CONFIG_H_
#include <vector>
#include "api/audio_codecs/audio_decoder.h"
namespace webrtc {
struct AudioDecoderMultiChannelOpusConfig {
// The number of channels that the decoder will output.
int num_channels;
// Number of mono or stereo encoded Opus streams.
int num_streams;
// Number of channel pairs coupled together, see RFC 7845 section
// 5.1.1. Has to be less than the number of streams.
int coupled_streams;
// Channel mapping table, defines the mapping from encoded streams to output
// channels. See RFC 7845 section 5.1.1.
std::vector<unsigned char> channel_mapping;
bool IsOk() const {
if (num_channels < 1 || num_channels > AudioDecoder::kMaxNumberOfChannels ||
num_streams < 0 || coupled_streams < 0) {
return false;
}
if (num_streams < coupled_streams) {
return false;
}
if (channel_mapping.size() != static_cast<size_t>(num_channels)) {
return false;
}
// Every mono stream codes one channel, every coupled stream codes two. This
// is the total coded channel count:
const int max_coded_channel = num_streams + coupled_streams;
for (const auto& x : channel_mapping) {
// Coded channels >= max_coded_channel don't exist. Except for 255, which
// tells Opus to put silence in output channel x.
if (x >= max_coded_channel && x != 255) {
return false;
}
}
if (num_channels > 255 || max_coded_channel >= 255) {
return false;
}
return true;
}
};
} // namespace webrtc
#endif // API_AUDIO_CODECS_OPUS_AUDIO_DECODER_MULTI_CHANNEL_OPUS_CONFIG_H_

View file

@ -0,0 +1,86 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio_codecs/opus/audio_decoder_opus.h"
#include <memory>
#include <utility>
#include <vector>
#include "absl/strings/match.h"
#include "modules/audio_coding/codecs/opus/audio_decoder_opus.h"
namespace webrtc {
bool AudioDecoderOpus::Config::IsOk() const {
if (sample_rate_hz != 16000 && sample_rate_hz != 48000) {
// Unsupported sample rate. (libopus supports a few other rates as
// well; we can add support for them when needed.)
return false;
}
if (num_channels != 1 && num_channels != 2) {
return false;
}
return true;
}
absl::optional<AudioDecoderOpus::Config> AudioDecoderOpus::SdpToConfig(
const SdpAudioFormat& format) {
const auto num_channels = [&]() -> absl::optional<int> {
auto stereo = format.parameters.find("stereo");
if (stereo != format.parameters.end()) {
if (stereo->second == "0") {
return 1;
} else if (stereo->second == "1") {
return 2;
} else {
return absl::nullopt; // Bad stereo parameter.
}
}
return 1; // Default to mono.
}();
if (absl::EqualsIgnoreCase(format.name, "opus") &&
format.clockrate_hz == 48000 && format.num_channels == 2 &&
num_channels) {
Config config;
config.num_channels = *num_channels;
if (!config.IsOk()) {
RTC_DCHECK_NOTREACHED();
return absl::nullopt;
}
return config;
} else {
return absl::nullopt;
}
}
void AudioDecoderOpus::AppendSupportedDecoders(
std::vector<AudioCodecSpec>* specs) {
AudioCodecInfo opus_info{48000, 1, 64000, 6000, 510000};
opus_info.allow_comfort_noise = false;
opus_info.supports_network_adaption = true;
SdpAudioFormat opus_format(
{"opus", 48000, 2, {{"minptime", "10"}, {"useinbandfec", "1"}}});
specs->push_back({std::move(opus_format), opus_info});
}
std::unique_ptr<AudioDecoder> AudioDecoderOpus::MakeAudioDecoder(
Config config,
absl::optional<AudioCodecPairId> /*codec_pair_id*/,
const FieldTrialsView* field_trials) {
if (!config.IsOk()) {
RTC_DCHECK_NOTREACHED();
return nullptr;
}
return std::make_unique<AudioDecoderOpusImpl>(config.num_channels,
config.sample_rate_hz);
}
} // namespace webrtc

View file

@ -0,0 +1,44 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_OPUS_AUDIO_DECODER_OPUS_H_
#define API_AUDIO_CODECS_OPUS_AUDIO_DECODER_OPUS_H_
#include <memory>
#include <vector>
#include "absl/types/optional.h"
#include "api/audio_codecs/audio_codec_pair_id.h"
#include "api/audio_codecs/audio_decoder.h"
#include "api/audio_codecs/audio_format.h"
#include "api/field_trials_view.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// Opus decoder API for use as a template parameter to
// CreateAudioDecoderFactory<...>().
struct RTC_EXPORT AudioDecoderOpus {
struct Config {
bool IsOk() const; // Checks if the values are currently OK.
int sample_rate_hz = 48000;
int num_channels = 1;
};
static absl::optional<Config> SdpToConfig(const SdpAudioFormat& audio_format);
static void AppendSupportedDecoders(std::vector<AudioCodecSpec>* specs);
static std::unique_ptr<AudioDecoder> MakeAudioDecoder(
Config config,
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt,
const FieldTrialsView* field_trials = nullptr);
};
} // namespace webrtc
#endif // API_AUDIO_CODECS_OPUS_AUDIO_DECODER_OPUS_H_

View file

@ -0,0 +1,75 @@
/*
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio_codecs/opus/audio_encoder_multi_channel_opus.h"
#include <utility>
#include "modules/audio_coding/codecs/opus/audio_encoder_multi_channel_opus_impl.h"
namespace webrtc {
absl::optional<AudioEncoderMultiChannelOpusConfig>
AudioEncoderMultiChannelOpus::SdpToConfig(const SdpAudioFormat& format) {
return AudioEncoderMultiChannelOpusImpl::SdpToConfig(format);
}
void AudioEncoderMultiChannelOpus::AppendSupportedEncoders(
std::vector<AudioCodecSpec>* specs) {
// To get full utilization of the surround support of the Opus lib, we can
// mark which channel is the low frequency effects (LFE). But that is not done
// ATM.
{
AudioCodecInfo surround_5_1_opus_info{48000, 6,
/* default_bitrate_bps= */ 128000};
surround_5_1_opus_info.allow_comfort_noise = false;
surround_5_1_opus_info.supports_network_adaption = false;
SdpAudioFormat opus_format({"multiopus",
48000,
6,
{{"minptime", "10"},
{"useinbandfec", "1"},
{"channel_mapping", "0,4,1,2,3,5"},
{"num_streams", "4"},
{"coupled_streams", "2"}}});
specs->push_back({std::move(opus_format), surround_5_1_opus_info});
}
{
AudioCodecInfo surround_7_1_opus_info{48000, 8,
/* default_bitrate_bps= */ 200000};
surround_7_1_opus_info.allow_comfort_noise = false;
surround_7_1_opus_info.supports_network_adaption = false;
SdpAudioFormat opus_format({"multiopus",
48000,
8,
{{"minptime", "10"},
{"useinbandfec", "1"},
{"channel_mapping", "0,6,1,2,3,4,5,7"},
{"num_streams", "5"},
{"coupled_streams", "3"}}});
specs->push_back({std::move(opus_format), surround_7_1_opus_info});
}
}
AudioCodecInfo AudioEncoderMultiChannelOpus::QueryAudioEncoder(
const AudioEncoderMultiChannelOpusConfig& config) {
return AudioEncoderMultiChannelOpusImpl::QueryAudioEncoder(config);
}
std::unique_ptr<AudioEncoder> AudioEncoderMultiChannelOpus::MakeAudioEncoder(
const AudioEncoderMultiChannelOpusConfig& config,
int payload_type,
absl::optional<AudioCodecPairId> /*codec_pair_id*/,
const FieldTrialsView* field_trials) {
return AudioEncoderMultiChannelOpusImpl::MakeAudioEncoder(config,
payload_type);
}
} // namespace webrtc

View file

@ -0,0 +1,43 @@
/*
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_OPUS_AUDIO_ENCODER_MULTI_CHANNEL_OPUS_H_
#define API_AUDIO_CODECS_OPUS_AUDIO_ENCODER_MULTI_CHANNEL_OPUS_H_
#include <memory>
#include <vector>
#include "absl/types/optional.h"
#include "api/audio_codecs/audio_codec_pair_id.h"
#include "api/audio_codecs/audio_encoder.h"
#include "api/audio_codecs/audio_format.h"
#include "api/audio_codecs/opus/audio_encoder_multi_channel_opus_config.h"
#include "api/field_trials_view.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// Opus encoder API for use as a template parameter to
// CreateAudioEncoderFactory<...>().
struct RTC_EXPORT AudioEncoderMultiChannelOpus {
using Config = AudioEncoderMultiChannelOpusConfig;
static absl::optional<Config> SdpToConfig(const SdpAudioFormat& audio_format);
static void AppendSupportedEncoders(std::vector<AudioCodecSpec>* specs);
static AudioCodecInfo QueryAudioEncoder(const Config& config);
static std::unique_ptr<AudioEncoder> MakeAudioEncoder(
const Config& config,
int payload_type,
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt,
const FieldTrialsView* field_trials = nullptr);
};
} // namespace webrtc
#endif // API_AUDIO_CODECS_OPUS_AUDIO_ENCODER_MULTI_CHANNEL_OPUS_H_

View file

@ -0,0 +1,107 @@
/*
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio_codecs/opus/audio_encoder_multi_channel_opus_config.h"
namespace webrtc {
namespace {
constexpr int kDefaultComplexity = 9;
} // namespace
AudioEncoderMultiChannelOpusConfig::AudioEncoderMultiChannelOpusConfig()
: frame_size_ms(kDefaultFrameSizeMs),
num_channels(1),
application(ApplicationMode::kVoip),
bitrate_bps(32000),
fec_enabled(false),
cbr_enabled(false),
dtx_enabled(false),
max_playback_rate_hz(48000),
complexity(kDefaultComplexity),
num_streams(-1),
coupled_streams(-1) {}
AudioEncoderMultiChannelOpusConfig::AudioEncoderMultiChannelOpusConfig(
const AudioEncoderMultiChannelOpusConfig&) = default;
AudioEncoderMultiChannelOpusConfig::~AudioEncoderMultiChannelOpusConfig() =
default;
AudioEncoderMultiChannelOpusConfig&
AudioEncoderMultiChannelOpusConfig::operator=(
const AudioEncoderMultiChannelOpusConfig&) = default;
bool AudioEncoderMultiChannelOpusConfig::IsOk() const {
if (frame_size_ms <= 0 || frame_size_ms % 10 != 0)
return false;
if (num_channels >= 255) {
return false;
}
if (bitrate_bps < kMinBitrateBps || bitrate_bps > kMaxBitrateBps)
return false;
if (complexity < 0 || complexity > 10)
return false;
// Check the lengths:
if (num_streams < 0 || coupled_streams < 0) {
return false;
}
if (num_streams < coupled_streams) {
return false;
}
if (channel_mapping.size() != static_cast<size_t>(num_channels)) {
return false;
}
// Every mono stream codes one channel, every coupled stream codes two. This
// is the total coded channel count:
const int max_coded_channel = num_streams + coupled_streams;
for (const auto& x : channel_mapping) {
// Coded channels >= max_coded_channel don't exist. Except for 255, which
// tells Opus to ignore input channel x.
if (x >= max_coded_channel && x != 255) {
return false;
}
}
// Inverse mapping.
constexpr int kNotSet = -1;
std::vector<int> coded_channels_to_input_channels(max_coded_channel, kNotSet);
for (size_t i = 0; i < num_channels; ++i) {
if (channel_mapping[i] == 255) {
continue;
}
// If it's not ignored, put it in the inverted mapping. But first check if
// we've told Opus to use another input channel for this coded channel:
const int coded_channel = channel_mapping[i];
if (coded_channels_to_input_channels[coded_channel] != kNotSet) {
// Coded channel `coded_channel` comes from both input channels
// `coded_channels_to_input_channels[coded_channel]` and `i`.
return false;
}
coded_channels_to_input_channels[coded_channel] = i;
}
// Check that we specified what input the encoder should use to produce
// every coded channel.
for (int i = 0; i < max_coded_channel; ++i) {
if (coded_channels_to_input_channels[i] == kNotSet) {
// Coded channel `i` has unspecified input channel.
return false;
}
}
if (num_channels > 255 || max_coded_channel >= 255) {
return false;
}
return true;
}
} // namespace webrtc

View file

@ -0,0 +1,66 @@
/*
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_OPUS_AUDIO_ENCODER_MULTI_CHANNEL_OPUS_CONFIG_H_
#define API_AUDIO_CODECS_OPUS_AUDIO_ENCODER_MULTI_CHANNEL_OPUS_CONFIG_H_
#include <stddef.h>
#include <vector>
#include "absl/types/optional.h"
#include "api/audio_codecs/opus/audio_encoder_opus_config.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
struct RTC_EXPORT AudioEncoderMultiChannelOpusConfig {
static constexpr int kDefaultFrameSizeMs = 20;
// Opus API allows a min bitrate of 500bps, but Opus documentation suggests
// bitrate should be in the range of 6000 to 510000, inclusive.
static constexpr int kMinBitrateBps = 6000;
static constexpr int kMaxBitrateBps = 510000;
AudioEncoderMultiChannelOpusConfig();
AudioEncoderMultiChannelOpusConfig(const AudioEncoderMultiChannelOpusConfig&);
~AudioEncoderMultiChannelOpusConfig();
AudioEncoderMultiChannelOpusConfig& operator=(
const AudioEncoderMultiChannelOpusConfig&);
int frame_size_ms;
size_t num_channels;
enum class ApplicationMode { kVoip, kAudio };
ApplicationMode application;
int bitrate_bps;
bool fec_enabled;
bool cbr_enabled;
bool dtx_enabled;
int max_playback_rate_hz;
std::vector<int> supported_frame_lengths_ms;
int complexity;
// Number of mono/stereo Opus streams.
int num_streams;
// Number of channel pairs coupled together, see RFC 7845 section
// 5.1.1. Has to be less than the number of streams
int coupled_streams;
// Channel mapping table, defines the mapping from encoded streams to input
// channels. See RFC 7845 section 5.1.1.
std::vector<unsigned char> channel_mapping;
bool IsOk() const;
};
} // namespace webrtc
#endif // API_AUDIO_CODECS_OPUS_AUDIO_ENCODER_MULTI_CHANNEL_OPUS_CONFIG_H_

View file

@ -0,0 +1,44 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio_codecs/opus/audio_encoder_opus.h"
#include "modules/audio_coding/codecs/opus/audio_encoder_opus.h"
namespace webrtc {
absl::optional<AudioEncoderOpusConfig> AudioEncoderOpus::SdpToConfig(
const SdpAudioFormat& format) {
return AudioEncoderOpusImpl::SdpToConfig(format);
}
void AudioEncoderOpus::AppendSupportedEncoders(
std::vector<AudioCodecSpec>* specs) {
AudioEncoderOpusImpl::AppendSupportedEncoders(specs);
}
AudioCodecInfo AudioEncoderOpus::QueryAudioEncoder(
const AudioEncoderOpusConfig& config) {
return AudioEncoderOpusImpl::QueryAudioEncoder(config);
}
std::unique_ptr<AudioEncoder> AudioEncoderOpus::MakeAudioEncoder(
const AudioEncoderOpusConfig& config,
int payload_type,
absl::optional<AudioCodecPairId> /*codec_pair_id*/,
const FieldTrialsView* field_trials) {
if (!config.IsOk()) {
RTC_DCHECK_NOTREACHED();
return nullptr;
}
return AudioEncoderOpusImpl::MakeAudioEncoder(config, payload_type);
}
} // namespace webrtc

View file

@ -0,0 +1,44 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_OPUS_AUDIO_ENCODER_OPUS_H_
#define API_AUDIO_CODECS_OPUS_AUDIO_ENCODER_OPUS_H_
#include <memory>
#include <vector>
#include "absl/types/optional.h"
#include "api/audio_codecs/audio_codec_pair_id.h"
#include "api/audio_codecs/audio_encoder.h"
#include "api/audio_codecs/audio_format.h"
#include "api/audio_codecs/opus/audio_encoder_opus_config.h"
#include "api/field_trials_view.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// Opus encoder API for use as a template parameter to
// CreateAudioEncoderFactory<...>().
struct RTC_EXPORT AudioEncoderOpus {
using Config = AudioEncoderOpusConfig;
static absl::optional<AudioEncoderOpusConfig> SdpToConfig(
const SdpAudioFormat& audio_format);
static void AppendSupportedEncoders(std::vector<AudioCodecSpec>* specs);
static AudioCodecInfo QueryAudioEncoder(const AudioEncoderOpusConfig& config);
static std::unique_ptr<AudioEncoder> MakeAudioEncoder(
const AudioEncoderOpusConfig& config,
int payload_type,
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt,
const FieldTrialsView* field_trials = nullptr);
};
} // namespace webrtc
#endif // API_AUDIO_CODECS_OPUS_AUDIO_ENCODER_OPUS_H_

View file

@ -0,0 +1,75 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio_codecs/opus/audio_encoder_opus_config.h"
namespace webrtc {
namespace {
#if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS)
constexpr int kDefaultComplexity = 5;
#else
constexpr int kDefaultComplexity = 9;
#endif
constexpr int kDefaultLowRateComplexity =
WEBRTC_OPUS_VARIABLE_COMPLEXITY ? 9 : kDefaultComplexity;
} // namespace
constexpr int AudioEncoderOpusConfig::kDefaultFrameSizeMs;
constexpr int AudioEncoderOpusConfig::kMinBitrateBps;
constexpr int AudioEncoderOpusConfig::kMaxBitrateBps;
AudioEncoderOpusConfig::AudioEncoderOpusConfig()
: frame_size_ms(kDefaultFrameSizeMs),
sample_rate_hz(48000),
num_channels(1),
application(ApplicationMode::kVoip),
bitrate_bps(32000),
fec_enabled(false),
cbr_enabled(false),
max_playback_rate_hz(48000),
complexity(kDefaultComplexity),
low_rate_complexity(kDefaultLowRateComplexity),
complexity_threshold_bps(12500),
complexity_threshold_window_bps(1500),
dtx_enabled(false),
uplink_bandwidth_update_interval_ms(200),
payload_type(-1) {}
AudioEncoderOpusConfig::AudioEncoderOpusConfig(const AudioEncoderOpusConfig&) =
default;
AudioEncoderOpusConfig::~AudioEncoderOpusConfig() = default;
AudioEncoderOpusConfig& AudioEncoderOpusConfig::operator=(
const AudioEncoderOpusConfig&) = default;
bool AudioEncoderOpusConfig::IsOk() const {
if (frame_size_ms <= 0 || frame_size_ms % 10 != 0)
return false;
if (sample_rate_hz != 16000 && sample_rate_hz != 48000) {
// Unsupported input sample rate. (libopus supports a few other rates as
// well; we can add support for them when needed.)
return false;
}
if (num_channels >= 255) {
return false;
}
if (!bitrate_bps)
return false;
if (*bitrate_bps < kMinBitrateBps || *bitrate_bps > kMaxBitrateBps)
return false;
if (complexity < 0 || complexity > 10)
return false;
if (low_rate_complexity < 0 || low_rate_complexity > 10)
return false;
return true;
}
} // namespace webrtc

View file

@ -0,0 +1,74 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_OPUS_AUDIO_ENCODER_OPUS_CONFIG_H_
#define API_AUDIO_CODECS_OPUS_AUDIO_ENCODER_OPUS_CONFIG_H_
#include <stddef.h>
#include <vector>
#include "absl/types/optional.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
struct RTC_EXPORT AudioEncoderOpusConfig {
static constexpr int kDefaultFrameSizeMs = 20;
// Opus API allows a min bitrate of 500bps, but Opus documentation suggests
// bitrate should be in the range of 6000 to 510000, inclusive.
static constexpr int kMinBitrateBps = 6000;
static constexpr int kMaxBitrateBps = 510000;
AudioEncoderOpusConfig();
AudioEncoderOpusConfig(const AudioEncoderOpusConfig&);
~AudioEncoderOpusConfig();
AudioEncoderOpusConfig& operator=(const AudioEncoderOpusConfig&);
bool IsOk() const; // Checks if the values are currently OK.
int frame_size_ms;
int sample_rate_hz;
size_t num_channels;
enum class ApplicationMode { kVoip, kAudio };
ApplicationMode application;
// NOTE: This member must always be set.
// TODO(kwiberg): Turn it into just an int.
absl::optional<int> bitrate_bps;
bool fec_enabled;
bool cbr_enabled;
int max_playback_rate_hz;
// `complexity` is used when the bitrate goes above
// `complexity_threshold_bps` + `complexity_threshold_window_bps`;
// `low_rate_complexity` is used when the bitrate falls below
// `complexity_threshold_bps` - `complexity_threshold_window_bps`. In the
// interval in the middle, we keep using the most recent of the two
// complexity settings.
int complexity;
int low_rate_complexity;
int complexity_threshold_bps;
int complexity_threshold_window_bps;
bool dtx_enabled;
std::vector<int> supported_frame_lengths_ms;
int uplink_bandwidth_update_interval_ms;
// NOTE: This member isn't necessary, and will soon go away. See
// https://bugs.chromium.org/p/webrtc/issues/detail?id=7847
int payload_type;
};
} // namespace webrtc
#endif // API_AUDIO_CODECS_OPUS_AUDIO_ENCODER_OPUS_CONFIG_H_

View file

@ -0,0 +1,49 @@
/*
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio_codecs/opus_audio_decoder_factory.h"
#include <memory>
#include <vector>
#include "api/audio_codecs/audio_decoder_factory_template.h"
#include "api/audio_codecs/opus/audio_decoder_multi_channel_opus.h"
#include "api/audio_codecs/opus/audio_decoder_opus.h"
namespace webrtc {
namespace {
// Modify an audio decoder to not advertise support for anything.
template <typename T>
struct NotAdvertised {
using Config = typename T::Config;
static absl::optional<Config> SdpToConfig(
const SdpAudioFormat& audio_format) {
return T::SdpToConfig(audio_format);
}
static void AppendSupportedDecoders(std::vector<AudioCodecSpec>* specs) {
// Don't advertise support for anything.
}
static std::unique_ptr<AudioDecoder> MakeAudioDecoder(
const Config& config,
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt) {
return T::MakeAudioDecoder(config, codec_pair_id);
}
};
} // namespace
rtc::scoped_refptr<AudioDecoderFactory> CreateOpusAudioDecoderFactory() {
return CreateAudioDecoderFactory<
AudioDecoderOpus, NotAdvertised<AudioDecoderMultiChannelOpus>>();
}
} // namespace webrtc

View file

@ -0,0 +1,26 @@
/*
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_OPUS_AUDIO_DECODER_FACTORY_H_
#define API_AUDIO_CODECS_OPUS_AUDIO_DECODER_FACTORY_H_
#include "api/audio_codecs/audio_decoder_factory.h"
#include "api/scoped_refptr.h"
namespace webrtc {
// Creates a new factory that can create only Opus audio decoders. Works like
// CreateAudioDecoderFactory<AudioDecoderOpus>(), but is easier to use and is
// not inline because it isn't a template.
rtc::scoped_refptr<AudioDecoderFactory> CreateOpusAudioDecoderFactory();
} // namespace webrtc
#endif // API_AUDIO_CODECS_OPUS_AUDIO_DECODER_FACTORY_H_

View file

@ -0,0 +1,54 @@
/*
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio_codecs/opus_audio_encoder_factory.h"
#include <memory>
#include <vector>
#include "api/audio_codecs/audio_encoder_factory_template.h"
#include "api/audio_codecs/opus/audio_encoder_multi_channel_opus.h"
#include "api/audio_codecs/opus/audio_encoder_opus.h"
namespace webrtc {
namespace {
// Modify an audio encoder to not advertise support for anything.
template <typename T>
struct NotAdvertised {
using Config = typename T::Config;
static absl::optional<Config> SdpToConfig(
const SdpAudioFormat& audio_format) {
return T::SdpToConfig(audio_format);
}
static void AppendSupportedEncoders(std::vector<AudioCodecSpec>* specs) {
// Don't advertise support for anything.
}
static AudioCodecInfo QueryAudioEncoder(const Config& config) {
return T::QueryAudioEncoder(config);
}
static std::unique_ptr<AudioEncoder> MakeAudioEncoder(
const Config& config,
int payload_type,
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt,
const FieldTrialsView* field_trials = nullptr) {
return T::MakeAudioEncoder(config, payload_type, codec_pair_id,
field_trials);
}
};
} // namespace
rtc::scoped_refptr<AudioEncoderFactory> CreateOpusAudioEncoderFactory() {
return CreateAudioEncoderFactory<
AudioEncoderOpus, NotAdvertised<AudioEncoderMultiChannelOpus>>();
}
} // namespace webrtc

View file

@ -0,0 +1,26 @@
/*
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_OPUS_AUDIO_ENCODER_FACTORY_H_
#define API_AUDIO_CODECS_OPUS_AUDIO_ENCODER_FACTORY_H_
#include "api/audio_codecs/audio_encoder_factory.h"
#include "api/scoped_refptr.h"
namespace webrtc {
// Creates a new factory that can create only Opus audio encoders. Works like
// CreateAudioEncoderFactory<AudioEncoderOpus>(), but is easier to use and is
// not inline because it isn't a template.
rtc::scoped_refptr<AudioEncoderFactory> CreateOpusAudioEncoderFactory();
} // namespace webrtc
#endif // API_AUDIO_CODECS_OPUS_AUDIO_ENCODER_FACTORY_H_

View file

@ -0,0 +1,39 @@
# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
import("../../../webrtc.gni")
if (is_android) {
import("//build/config/android/config.gni")
import("//build/config/android/rules.gni")
}
if (rtc_include_tests) {
rtc_library("audio_codecs_api_unittests") {
testonly = true
sources = [
"audio_decoder_factory_template_unittest.cc",
"audio_encoder_factory_template_unittest.cc",
]
deps = [
"..:audio_codecs_api",
"../../../test:audio_codec_mocks",
"../../../test:scoped_key_value_config",
"../../../test:test_support",
"../L16:audio_decoder_L16",
"../L16:audio_encoder_L16",
"../g711:audio_decoder_g711",
"../g711:audio_encoder_g711",
"../g722:audio_decoder_g722",
"../g722:audio_encoder_g722",
"../ilbc:audio_decoder_ilbc",
"../ilbc:audio_encoder_ilbc",
"../opus:audio_decoder_opus",
"../opus:audio_encoder_opus",
]
}
}

View file

@ -0,0 +1,222 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio_codecs/audio_decoder_factory_template.h"
#include <memory>
#include "api/audio_codecs/L16/audio_decoder_L16.h"
#include "api/audio_codecs/g711/audio_decoder_g711.h"
#include "api/audio_codecs/g722/audio_decoder_g722.h"
#include "api/audio_codecs/ilbc/audio_decoder_ilbc.h"
#include "api/audio_codecs/opus/audio_decoder_opus.h"
#include "test/gmock.h"
#include "test/gtest.h"
#include "test/mock_audio_decoder.h"
#include "test/scoped_key_value_config.h"
namespace webrtc {
namespace {
struct BogusParams {
static SdpAudioFormat AudioFormat() { return {"bogus", 8000, 1}; }
static AudioCodecInfo CodecInfo() { return {8000, 1, 12345}; }
};
struct ShamParams {
static SdpAudioFormat AudioFormat() {
return {"sham", 16000, 2, {{"param", "value"}}};
}
static AudioCodecInfo CodecInfo() { return {16000, 2, 23456}; }
};
template <typename Params>
struct AudioDecoderFakeApi {
struct Config {
SdpAudioFormat audio_format;
};
static absl::optional<Config> SdpToConfig(
const SdpAudioFormat& audio_format) {
if (Params::AudioFormat() == audio_format) {
Config config = {audio_format};
return config;
} else {
return absl::nullopt;
}
}
static void AppendSupportedDecoders(std::vector<AudioCodecSpec>* specs) {
specs->push_back({Params::AudioFormat(), Params::CodecInfo()});
}
static AudioCodecInfo QueryAudioDecoder(const Config&) {
return Params::CodecInfo();
}
static std::unique_ptr<AudioDecoder> MakeAudioDecoder(
const Config&,
absl::optional<AudioCodecPairId> /*codec_pair_id*/ = absl::nullopt) {
auto dec = std::make_unique<testing::StrictMock<MockAudioDecoder>>();
EXPECT_CALL(*dec, SampleRateHz())
.WillOnce(::testing::Return(Params::CodecInfo().sample_rate_hz));
EXPECT_CALL(*dec, Die());
return std::move(dec);
}
};
} // namespace
TEST(AudioDecoderFactoryTemplateTest, NoDecoderTypes) {
test::ScopedKeyValueConfig field_trials;
rtc::scoped_refptr<AudioDecoderFactory> factory(
rtc::make_ref_counted<
audio_decoder_factory_template_impl::AudioDecoderFactoryT<>>(
&field_trials));
EXPECT_THAT(factory->GetSupportedDecoders(), ::testing::IsEmpty());
EXPECT_FALSE(factory->IsSupportedDecoder({"foo", 8000, 1}));
EXPECT_EQ(nullptr,
factory->MakeAudioDecoder({"bar", 16000, 1}, absl::nullopt));
}
TEST(AudioDecoderFactoryTemplateTest, OneDecoderType) {
auto factory = CreateAudioDecoderFactory<AudioDecoderFakeApi<BogusParams>>();
EXPECT_THAT(factory->GetSupportedDecoders(),
::testing::ElementsAre(
AudioCodecSpec{{"bogus", 8000, 1}, {8000, 1, 12345}}));
EXPECT_FALSE(factory->IsSupportedDecoder({"foo", 8000, 1}));
EXPECT_TRUE(factory->IsSupportedDecoder({"bogus", 8000, 1}));
EXPECT_EQ(nullptr,
factory->MakeAudioDecoder({"bar", 16000, 1}, absl::nullopt));
auto dec = factory->MakeAudioDecoder({"bogus", 8000, 1}, absl::nullopt);
ASSERT_NE(nullptr, dec);
EXPECT_EQ(8000, dec->SampleRateHz());
}
TEST(AudioDecoderFactoryTemplateTest, TwoDecoderTypes) {
auto factory = CreateAudioDecoderFactory<AudioDecoderFakeApi<BogusParams>,
AudioDecoderFakeApi<ShamParams>>();
EXPECT_THAT(factory->GetSupportedDecoders(),
::testing::ElementsAre(
AudioCodecSpec{{"bogus", 8000, 1}, {8000, 1, 12345}},
AudioCodecSpec{{"sham", 16000, 2, {{"param", "value"}}},
{16000, 2, 23456}}));
EXPECT_FALSE(factory->IsSupportedDecoder({"foo", 8000, 1}));
EXPECT_TRUE(factory->IsSupportedDecoder({"bogus", 8000, 1}));
EXPECT_TRUE(
factory->IsSupportedDecoder({"sham", 16000, 2, {{"param", "value"}}}));
EXPECT_EQ(nullptr,
factory->MakeAudioDecoder({"bar", 16000, 1}, absl::nullopt));
auto dec1 = factory->MakeAudioDecoder({"bogus", 8000, 1}, absl::nullopt);
ASSERT_NE(nullptr, dec1);
EXPECT_EQ(8000, dec1->SampleRateHz());
EXPECT_EQ(nullptr,
factory->MakeAudioDecoder({"sham", 16000, 2}, absl::nullopt));
auto dec2 = factory->MakeAudioDecoder(
{"sham", 16000, 2, {{"param", "value"}}}, absl::nullopt);
ASSERT_NE(nullptr, dec2);
EXPECT_EQ(16000, dec2->SampleRateHz());
}
TEST(AudioDecoderFactoryTemplateTest, G711) {
auto factory = CreateAudioDecoderFactory<AudioDecoderG711>();
EXPECT_THAT(factory->GetSupportedDecoders(),
::testing::ElementsAre(
AudioCodecSpec{{"PCMU", 8000, 1}, {8000, 1, 64000}},
AudioCodecSpec{{"PCMA", 8000, 1}, {8000, 1, 64000}}));
EXPECT_FALSE(factory->IsSupportedDecoder({"G711", 8000, 1}));
EXPECT_TRUE(factory->IsSupportedDecoder({"PCMU", 8000, 1}));
EXPECT_TRUE(factory->IsSupportedDecoder({"pcma", 8000, 1}));
EXPECT_EQ(nullptr,
factory->MakeAudioDecoder({"pcmu", 16000, 1}, absl::nullopt));
auto dec1 = factory->MakeAudioDecoder({"pcmu", 8000, 1}, absl::nullopt);
ASSERT_NE(nullptr, dec1);
EXPECT_EQ(8000, dec1->SampleRateHz());
auto dec2 = factory->MakeAudioDecoder({"PCMA", 8000, 1}, absl::nullopt);
ASSERT_NE(nullptr, dec2);
EXPECT_EQ(8000, dec2->SampleRateHz());
}
TEST(AudioDecoderFactoryTemplateTest, G722) {
auto factory = CreateAudioDecoderFactory<AudioDecoderG722>();
EXPECT_THAT(factory->GetSupportedDecoders(),
::testing::ElementsAre(
AudioCodecSpec{{"G722", 8000, 1}, {16000, 1, 64000}}));
EXPECT_FALSE(factory->IsSupportedDecoder({"foo", 8000, 1}));
EXPECT_TRUE(factory->IsSupportedDecoder({"G722", 8000, 1}));
EXPECT_EQ(nullptr,
factory->MakeAudioDecoder({"bar", 16000, 1}, absl::nullopt));
auto dec1 = factory->MakeAudioDecoder({"G722", 8000, 1}, absl::nullopt);
ASSERT_NE(nullptr, dec1);
EXPECT_EQ(16000, dec1->SampleRateHz());
EXPECT_EQ(1u, dec1->Channels());
auto dec2 = factory->MakeAudioDecoder({"G722", 8000, 2}, absl::nullopt);
ASSERT_NE(nullptr, dec2);
EXPECT_EQ(16000, dec2->SampleRateHz());
EXPECT_EQ(2u, dec2->Channels());
auto dec3 = factory->MakeAudioDecoder({"G722", 8000, 3}, absl::nullopt);
ASSERT_EQ(nullptr, dec3);
}
TEST(AudioDecoderFactoryTemplateTest, Ilbc) {
auto factory = CreateAudioDecoderFactory<AudioDecoderIlbc>();
EXPECT_THAT(factory->GetSupportedDecoders(),
::testing::ElementsAre(
AudioCodecSpec{{"ILBC", 8000, 1}, {8000, 1, 13300}}));
EXPECT_FALSE(factory->IsSupportedDecoder({"foo", 8000, 1}));
EXPECT_TRUE(factory->IsSupportedDecoder({"ilbc", 8000, 1}));
EXPECT_EQ(nullptr,
factory->MakeAudioDecoder({"bar", 8000, 1}, absl::nullopt));
auto dec = factory->MakeAudioDecoder({"ilbc", 8000, 1}, absl::nullopt);
ASSERT_NE(nullptr, dec);
EXPECT_EQ(8000, dec->SampleRateHz());
}
TEST(AudioDecoderFactoryTemplateTest, L16) {
auto factory = CreateAudioDecoderFactory<AudioDecoderL16>();
EXPECT_THAT(
factory->GetSupportedDecoders(),
::testing::ElementsAre(
AudioCodecSpec{{"L16", 8000, 1}, {8000, 1, 8000 * 16}},
AudioCodecSpec{{"L16", 16000, 1}, {16000, 1, 16000 * 16}},
AudioCodecSpec{{"L16", 32000, 1}, {32000, 1, 32000 * 16}},
AudioCodecSpec{{"L16", 8000, 2}, {8000, 2, 8000 * 16 * 2}},
AudioCodecSpec{{"L16", 16000, 2}, {16000, 2, 16000 * 16 * 2}},
AudioCodecSpec{{"L16", 32000, 2}, {32000, 2, 32000 * 16 * 2}}));
EXPECT_FALSE(factory->IsSupportedDecoder({"foo", 8000, 1}));
EXPECT_TRUE(factory->IsSupportedDecoder({"L16", 48000, 1}));
EXPECT_FALSE(factory->IsSupportedDecoder({"L16", 96000, 1}));
EXPECT_EQ(nullptr,
factory->MakeAudioDecoder({"L16", 8000, 0}, absl::nullopt));
auto dec = factory->MakeAudioDecoder({"L16", 48000, 2}, absl::nullopt);
ASSERT_NE(nullptr, dec);
EXPECT_EQ(48000, dec->SampleRateHz());
}
TEST(AudioDecoderFactoryTemplateTest, Opus) {
auto factory = CreateAudioDecoderFactory<AudioDecoderOpus>();
AudioCodecInfo opus_info{48000, 1, 64000, 6000, 510000};
opus_info.allow_comfort_noise = false;
opus_info.supports_network_adaption = true;
const SdpAudioFormat opus_format(
{"opus", 48000, 2, {{"minptime", "10"}, {"useinbandfec", "1"}}});
EXPECT_THAT(factory->GetSupportedDecoders(),
::testing::ElementsAre(AudioCodecSpec{opus_format, opus_info}));
EXPECT_FALSE(factory->IsSupportedDecoder({"opus", 48000, 1}));
EXPECT_TRUE(factory->IsSupportedDecoder({"opus", 48000, 2}));
EXPECT_EQ(nullptr,
factory->MakeAudioDecoder({"bar", 16000, 1}, absl::nullopt));
auto dec = factory->MakeAudioDecoder({"opus", 48000, 2}, absl::nullopt);
ASSERT_NE(nullptr, dec);
EXPECT_EQ(48000, dec->SampleRateHz());
}
} // namespace webrtc

View file

@ -0,0 +1,224 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio_codecs/audio_encoder_factory_template.h"
#include <memory>
#include "api/audio_codecs/L16/audio_encoder_L16.h"
#include "api/audio_codecs/g711/audio_encoder_g711.h"
#include "api/audio_codecs/g722/audio_encoder_g722.h"
#include "api/audio_codecs/ilbc/audio_encoder_ilbc.h"
#include "api/audio_codecs/opus/audio_encoder_opus.h"
#include "test/gmock.h"
#include "test/gtest.h"
#include "test/mock_audio_encoder.h"
#include "test/scoped_key_value_config.h"
namespace webrtc {
namespace {
struct BogusParams {
static SdpAudioFormat AudioFormat() { return {"bogus", 8000, 1}; }
static AudioCodecInfo CodecInfo() { return {8000, 1, 12345}; }
};
struct ShamParams {
static SdpAudioFormat AudioFormat() {
return {"sham", 16000, 2, {{"param", "value"}}};
}
static AudioCodecInfo CodecInfo() { return {16000, 2, 23456}; }
};
template <typename Params>
struct AudioEncoderFakeApi {
struct Config {
SdpAudioFormat audio_format;
};
static absl::optional<Config> SdpToConfig(
const SdpAudioFormat& audio_format) {
if (Params::AudioFormat() == audio_format) {
Config config = {audio_format};
return config;
} else {
return absl::nullopt;
}
}
static void AppendSupportedEncoders(std::vector<AudioCodecSpec>* specs) {
specs->push_back({Params::AudioFormat(), Params::CodecInfo()});
}
static AudioCodecInfo QueryAudioEncoder(const Config&) {
return Params::CodecInfo();
}
static std::unique_ptr<AudioEncoder> MakeAudioEncoder(
const Config&,
int payload_type,
absl::optional<AudioCodecPairId> /*codec_pair_id*/ = absl::nullopt) {
auto enc = std::make_unique<testing::StrictMock<MockAudioEncoder>>();
EXPECT_CALL(*enc, SampleRateHz())
.WillOnce(::testing::Return(Params::CodecInfo().sample_rate_hz));
return std::move(enc);
}
};
} // namespace
TEST(AudioEncoderFactoryTemplateTest, NoEncoderTypes) {
test::ScopedKeyValueConfig field_trials;
rtc::scoped_refptr<AudioEncoderFactory> factory(
rtc::make_ref_counted<
audio_encoder_factory_template_impl::AudioEncoderFactoryT<>>(
&field_trials));
EXPECT_THAT(factory->GetSupportedEncoders(), ::testing::IsEmpty());
EXPECT_EQ(absl::nullopt, factory->QueryAudioEncoder({"foo", 8000, 1}));
EXPECT_EQ(nullptr,
factory->MakeAudioEncoder(17, {"bar", 16000, 1}, absl::nullopt));
}
TEST(AudioEncoderFactoryTemplateTest, OneEncoderType) {
auto factory = CreateAudioEncoderFactory<AudioEncoderFakeApi<BogusParams>>();
EXPECT_THAT(factory->GetSupportedEncoders(),
::testing::ElementsAre(
AudioCodecSpec{{"bogus", 8000, 1}, {8000, 1, 12345}}));
EXPECT_EQ(absl::nullopt, factory->QueryAudioEncoder({"foo", 8000, 1}));
EXPECT_EQ(AudioCodecInfo(8000, 1, 12345),
factory->QueryAudioEncoder({"bogus", 8000, 1}));
EXPECT_EQ(nullptr,
factory->MakeAudioEncoder(17, {"bar", 16000, 1}, absl::nullopt));
auto enc = factory->MakeAudioEncoder(17, {"bogus", 8000, 1}, absl::nullopt);
ASSERT_NE(nullptr, enc);
EXPECT_EQ(8000, enc->SampleRateHz());
}
TEST(AudioEncoderFactoryTemplateTest, TwoEncoderTypes) {
auto factory = CreateAudioEncoderFactory<AudioEncoderFakeApi<BogusParams>,
AudioEncoderFakeApi<ShamParams>>();
EXPECT_THAT(factory->GetSupportedEncoders(),
::testing::ElementsAre(
AudioCodecSpec{{"bogus", 8000, 1}, {8000, 1, 12345}},
AudioCodecSpec{{"sham", 16000, 2, {{"param", "value"}}},
{16000, 2, 23456}}));
EXPECT_EQ(absl::nullopt, factory->QueryAudioEncoder({"foo", 8000, 1}));
EXPECT_EQ(AudioCodecInfo(8000, 1, 12345),
factory->QueryAudioEncoder({"bogus", 8000, 1}));
EXPECT_EQ(
AudioCodecInfo(16000, 2, 23456),
factory->QueryAudioEncoder({"sham", 16000, 2, {{"param", "value"}}}));
EXPECT_EQ(nullptr,
factory->MakeAudioEncoder(17, {"bar", 16000, 1}, absl::nullopt));
auto enc1 = factory->MakeAudioEncoder(17, {"bogus", 8000, 1}, absl::nullopt);
ASSERT_NE(nullptr, enc1);
EXPECT_EQ(8000, enc1->SampleRateHz());
EXPECT_EQ(nullptr,
factory->MakeAudioEncoder(17, {"sham", 16000, 2}, absl::nullopt));
auto enc2 = factory->MakeAudioEncoder(
17, {"sham", 16000, 2, {{"param", "value"}}}, absl::nullopt);
ASSERT_NE(nullptr, enc2);
EXPECT_EQ(16000, enc2->SampleRateHz());
}
TEST(AudioEncoderFactoryTemplateTest, G711) {
auto factory = CreateAudioEncoderFactory<AudioEncoderG711>();
EXPECT_THAT(factory->GetSupportedEncoders(),
::testing::ElementsAre(
AudioCodecSpec{{"PCMU", 8000, 1}, {8000, 1, 64000}},
AudioCodecSpec{{"PCMA", 8000, 1}, {8000, 1, 64000}}));
EXPECT_EQ(absl::nullopt, factory->QueryAudioEncoder({"PCMA", 16000, 1}));
EXPECT_EQ(AudioCodecInfo(8000, 1, 64000),
factory->QueryAudioEncoder({"PCMA", 8000, 1}));
EXPECT_EQ(nullptr,
factory->MakeAudioEncoder(17, {"PCMU", 16000, 1}, absl::nullopt));
auto enc1 = factory->MakeAudioEncoder(17, {"PCMU", 8000, 1}, absl::nullopt);
ASSERT_NE(nullptr, enc1);
EXPECT_EQ(8000, enc1->SampleRateHz());
auto enc2 = factory->MakeAudioEncoder(17, {"PCMA", 8000, 1}, absl::nullopt);
ASSERT_NE(nullptr, enc2);
EXPECT_EQ(8000, enc2->SampleRateHz());
}
TEST(AudioEncoderFactoryTemplateTest, G722) {
auto factory = CreateAudioEncoderFactory<AudioEncoderG722>();
EXPECT_THAT(factory->GetSupportedEncoders(),
::testing::ElementsAre(
AudioCodecSpec{{"G722", 8000, 1}, {16000, 1, 64000}}));
EXPECT_EQ(absl::nullopt, factory->QueryAudioEncoder({"foo", 8000, 1}));
EXPECT_EQ(AudioCodecInfo(16000, 1, 64000),
factory->QueryAudioEncoder({"G722", 8000, 1}));
EXPECT_EQ(nullptr,
factory->MakeAudioEncoder(17, {"bar", 16000, 1}, absl::nullopt));
auto enc = factory->MakeAudioEncoder(17, {"G722", 8000, 1}, absl::nullopt);
ASSERT_NE(nullptr, enc);
EXPECT_EQ(16000, enc->SampleRateHz());
}
TEST(AudioEncoderFactoryTemplateTest, Ilbc) {
auto factory = CreateAudioEncoderFactory<AudioEncoderIlbc>();
EXPECT_THAT(factory->GetSupportedEncoders(),
::testing::ElementsAre(
AudioCodecSpec{{"ILBC", 8000, 1}, {8000, 1, 13333}}));
EXPECT_EQ(absl::nullopt, factory->QueryAudioEncoder({"foo", 8000, 1}));
EXPECT_EQ(AudioCodecInfo(8000, 1, 13333),
factory->QueryAudioEncoder({"ilbc", 8000, 1}));
EXPECT_EQ(nullptr,
factory->MakeAudioEncoder(17, {"bar", 8000, 1}, absl::nullopt));
auto enc = factory->MakeAudioEncoder(17, {"ilbc", 8000, 1}, absl::nullopt);
ASSERT_NE(nullptr, enc);
EXPECT_EQ(8000, enc->SampleRateHz());
}
TEST(AudioEncoderFactoryTemplateTest, L16) {
auto factory = CreateAudioEncoderFactory<AudioEncoderL16>();
EXPECT_THAT(
factory->GetSupportedEncoders(),
::testing::ElementsAre(
AudioCodecSpec{{"L16", 8000, 1}, {8000, 1, 8000 * 16}},
AudioCodecSpec{{"L16", 16000, 1}, {16000, 1, 16000 * 16}},
AudioCodecSpec{{"L16", 32000, 1}, {32000, 1, 32000 * 16}},
AudioCodecSpec{{"L16", 8000, 2}, {8000, 2, 8000 * 16 * 2}},
AudioCodecSpec{{"L16", 16000, 2}, {16000, 2, 16000 * 16 * 2}},
AudioCodecSpec{{"L16", 32000, 2}, {32000, 2, 32000 * 16 * 2}}));
EXPECT_EQ(absl::nullopt, factory->QueryAudioEncoder({"L16", 8000, 0}));
EXPECT_EQ(AudioCodecInfo(48000, 1, 48000 * 16),
factory->QueryAudioEncoder({"L16", 48000, 1}));
EXPECT_EQ(nullptr,
factory->MakeAudioEncoder(17, {"L16", 8000, 0}, absl::nullopt));
auto enc = factory->MakeAudioEncoder(17, {"L16", 48000, 2}, absl::nullopt);
ASSERT_NE(nullptr, enc);
EXPECT_EQ(48000, enc->SampleRateHz());
}
TEST(AudioEncoderFactoryTemplateTest, Opus) {
auto factory = CreateAudioEncoderFactory<AudioEncoderOpus>();
AudioCodecInfo info = {48000, 1, 32000, 6000, 510000};
info.allow_comfort_noise = false;
info.supports_network_adaption = true;
EXPECT_THAT(
factory->GetSupportedEncoders(),
::testing::ElementsAre(AudioCodecSpec{
{"opus", 48000, 2, {{"minptime", "10"}, {"useinbandfec", "1"}}},
info}));
EXPECT_EQ(absl::nullopt, factory->QueryAudioEncoder({"foo", 8000, 1}));
EXPECT_EQ(
info,
factory->QueryAudioEncoder(
{"opus", 48000, 2, {{"minptime", "10"}, {"useinbandfec", "1"}}}));
EXPECT_EQ(nullptr,
factory->MakeAudioEncoder(17, {"bar", 16000, 1}, absl::nullopt));
auto enc = factory->MakeAudioEncoder(17, {"opus", 48000, 2}, absl::nullopt);
ASSERT_NE(nullptr, enc);
EXPECT_EQ(48000, enc->SampleRateHz());
}
} // namespace webrtc