Repo created
This commit is contained in:
parent
81b91f4139
commit
f8c34fa5ee
22732 changed files with 4815320 additions and 2 deletions
14
TMessagesProj/jni/voip/webrtc/api/OWNERS
Normal file
14
TMessagesProj/jni/voip/webrtc/api/OWNERS
Normal file
|
|
@ -0,0 +1,14 @@
|
|||
hta@webrtc.org
|
||||
magjed@webrtc.org
|
||||
perkj@webrtc.org
|
||||
tommi@webrtc.org
|
||||
|
||||
# For approvals that absolutely must be done on US Pacific time
|
||||
deadbeef@webrtc.org
|
||||
tkchin@webrtc.org
|
||||
|
||||
per-file peer_connection*=hbos@webrtc.org
|
||||
|
||||
per-file DEPS=mbonadei@webrtc.org
|
||||
|
||||
per-file uma_metrics.h=kron@webrtc.org
|
||||
37
TMessagesProj/jni/voip/webrtc/api/README.md
Normal file
37
TMessagesProj/jni/voip/webrtc/api/README.md
Normal file
|
|
@ -0,0 +1,37 @@
|
|||
<!-- go/cmark -->
|
||||
<!--* freshness: {owner: 'hta' reviewed: '2021-01-01'} *-->
|
||||
|
||||
# How to write code in the `api/` directory
|
||||
|
||||
Mostly, just follow the regular [style guide](/g3doc/style-guide.md), but:
|
||||
|
||||
* Note that `api/` code is not exempt from the “`.h` and `.cc` files come in
|
||||
pairs” rule, so if you declare something in `api/path/to/foo.h`, it should be
|
||||
defined in `api/path/to/foo.cc`.
|
||||
* Headers in `api/` should, if possible, not `#include` headers outside `api/`.
|
||||
It’s not always possible to avoid this, but be aware that it adds to a small
|
||||
mountain of technical debt that we’re trying to shrink.
|
||||
* `.cc` files in `api/`, on the other hand, are free to `#include` headers
|
||||
outside `api/`.
|
||||
* Avoid structs in api, prefer classes.
|
||||
|
||||
The preferred way for `api/` code to access non-`api/` code is to call
|
||||
it from a `.cc` file, so that users of our API headers won’t transitively
|
||||
`#include` non-public headers.
|
||||
|
||||
For headers in `api/` that need to refer to non-public types, forward
|
||||
declarations are often a lesser evil than including non-public header files. The
|
||||
usual [rules](/g3doc/style-guide.md#forward-declarations) still apply, though.
|
||||
|
||||
`.cc` files in `api/` should preferably be kept reasonably small. If a
|
||||
substantial implementation is needed, consider putting it with our non-public
|
||||
code, and just call it from the `api/` `.cc` file.
|
||||
|
||||
Avoid defining api with structs as it makes harder for the api to evolve.
|
||||
Your struct may gain invariant, or change how it represents data.
|
||||
Evolving struct from the api is particular challenging as it is designed to be
|
||||
used in other code bases and thus needs to be updated independetly from its usage.
|
||||
Class with accessors and setters makes such migration safer.
|
||||
See [Google C++ style guide](https://google.github.io/styleguide/cppguide.html#Structs_vs._Classes) for more.
|
||||
|
||||
If you need to evolve existent struct in api, prefer first to convert it into a class.
|
||||
23
TMessagesProj/jni/voip/webrtc/api/adaptation/BUILD.gn
Normal file
23
TMessagesProj/jni/voip/webrtc/api/adaptation/BUILD.gn
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
# Copyright(c) 2020 The WebRTC project authors.All Rights Reserved.
|
||||
#
|
||||
# Use of this source code is governed by a BSD - style license
|
||||
# that can be found in the LICENSE file in the root of the source
|
||||
# tree.An additional intellectual property rights grant can be found
|
||||
# in the file PATENTS.All contributing project authors may
|
||||
# be found in the AUTHORS file in the root of the source tree.
|
||||
|
||||
import("../../webrtc.gni")
|
||||
|
||||
rtc_source_set("resource_adaptation_api") {
|
||||
visibility = [ "*" ]
|
||||
sources = [
|
||||
"resource.cc",
|
||||
"resource.h",
|
||||
]
|
||||
deps = [
|
||||
"../../api:scoped_refptr",
|
||||
"../../rtc_base:checks",
|
||||
"../../rtc_base:refcount",
|
||||
"../../rtc_base/system:rtc_export",
|
||||
]
|
||||
}
|
||||
7
TMessagesProj/jni/voip/webrtc/api/adaptation/DEPS
Normal file
7
TMessagesProj/jni/voip/webrtc/api/adaptation/DEPS
Normal file
|
|
@ -0,0 +1,7 @@
|
|||
specific_include_rules = {
|
||||
"resource\.h": [
|
||||
# ref_count.h is a public_deps of rtc_base:refcount. Necessary because of
|
||||
# rtc::RefCountInterface.
|
||||
"+rtc_base/ref_count.h",
|
||||
],
|
||||
}
|
||||
33
TMessagesProj/jni/voip/webrtc/api/adaptation/resource.cc
Normal file
33
TMessagesProj/jni/voip/webrtc/api/adaptation/resource.cc
Normal file
|
|
@ -0,0 +1,33 @@
|
|||
/*
|
||||
* Copyright 2019 The WebRTC Project Authors. All rights reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "api/adaptation/resource.h"
|
||||
|
||||
#include "rtc_base/checks.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
const char* ResourceUsageStateToString(ResourceUsageState usage_state) {
|
||||
switch (usage_state) {
|
||||
case ResourceUsageState::kOveruse:
|
||||
return "kOveruse";
|
||||
case ResourceUsageState::kUnderuse:
|
||||
return "kUnderuse";
|
||||
}
|
||||
RTC_CHECK_NOTREACHED();
|
||||
}
|
||||
|
||||
ResourceListener::~ResourceListener() {}
|
||||
|
||||
Resource::Resource() {}
|
||||
|
||||
Resource::~Resource() {}
|
||||
|
||||
} // namespace webrtc
|
||||
67
TMessagesProj/jni/voip/webrtc/api/adaptation/resource.h
Normal file
67
TMessagesProj/jni/voip/webrtc/api/adaptation/resource.h
Normal file
|
|
@ -0,0 +1,67 @@
|
|||
/*
|
||||
* Copyright 2019 The WebRTC Project Authors. All rights reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef API_ADAPTATION_RESOURCE_H_
|
||||
#define API_ADAPTATION_RESOURCE_H_
|
||||
|
||||
#include <string>
|
||||
|
||||
#include "api/scoped_refptr.h"
|
||||
#include "rtc_base/ref_count.h"
|
||||
#include "rtc_base/system/rtc_export.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class Resource;
|
||||
|
||||
enum class ResourceUsageState {
|
||||
// Action is needed to minimze the load on this resource.
|
||||
kOveruse,
|
||||
// Increasing the load on this resource is desired, if possible.
|
||||
kUnderuse,
|
||||
};
|
||||
|
||||
RTC_EXPORT const char* ResourceUsageStateToString(
|
||||
ResourceUsageState usage_state);
|
||||
|
||||
class RTC_EXPORT ResourceListener {
|
||||
public:
|
||||
virtual ~ResourceListener();
|
||||
|
||||
virtual void OnResourceUsageStateMeasured(
|
||||
rtc::scoped_refptr<Resource> resource,
|
||||
ResourceUsageState usage_state) = 0;
|
||||
};
|
||||
|
||||
// A Resource monitors an implementation-specific resource. It may report
|
||||
// kOveruse or kUnderuse when resource usage is high or low enough that we
|
||||
// should perform some sort of mitigation to fulfil the resource's constraints.
|
||||
//
|
||||
// The methods on this interface are invoked on the adaptation task queue.
|
||||
// Resource usage measurements may be performed on an any task queue.
|
||||
//
|
||||
// The Resource is reference counted to prevent use-after-free when posting
|
||||
// between task queues. As such, the implementation MUST NOT make any
|
||||
// assumptions about which task queue Resource is destructed on.
|
||||
class RTC_EXPORT Resource : public rtc::RefCountInterface {
|
||||
public:
|
||||
Resource();
|
||||
// Destruction may happen on any task queue.
|
||||
~Resource() override;
|
||||
|
||||
virtual std::string Name() const = 0;
|
||||
// The `listener` may be informed of resource usage measurements on any task
|
||||
// queue, but not after this method is invoked with the null argument.
|
||||
virtual void SetResourceListener(ResourceListener* listener) = 0;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // API_ADAPTATION_RESOURCE_H_
|
||||
334
TMessagesProj/jni/voip/webrtc/api/array_view.h
Normal file
334
TMessagesProj/jni/voip/webrtc/api/array_view.h
Normal file
|
|
@ -0,0 +1,334 @@
|
|||
/*
|
||||
* Copyright 2015 The WebRTC Project Authors. All rights reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef API_ARRAY_VIEW_H_
|
||||
#define API_ARRAY_VIEW_H_
|
||||
|
||||
#include <algorithm>
|
||||
#include <array>
|
||||
#include <iterator>
|
||||
#include <type_traits>
|
||||
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/type_traits.h"
|
||||
|
||||
namespace rtc {
|
||||
|
||||
// tl;dr: rtc::ArrayView is the same thing as gsl::span from the Guideline
|
||||
// Support Library.
|
||||
//
|
||||
// Many functions read from or write to arrays. The obvious way to do this is
|
||||
// to use two arguments, a pointer to the first element and an element count:
|
||||
//
|
||||
// bool Contains17(const int* arr, size_t size) {
|
||||
// for (size_t i = 0; i < size; ++i) {
|
||||
// if (arr[i] == 17)
|
||||
// return true;
|
||||
// }
|
||||
// return false;
|
||||
// }
|
||||
//
|
||||
// This is flexible, since it doesn't matter how the array is stored (C array,
|
||||
// std::vector, rtc::Buffer, ...), but it's error-prone because the caller has
|
||||
// to correctly specify the array length:
|
||||
//
|
||||
// Contains17(arr, arraysize(arr)); // C array
|
||||
// Contains17(arr.data(), arr.size()); // std::vector
|
||||
// Contains17(arr, size); // pointer + size
|
||||
// ...
|
||||
//
|
||||
// It's also kind of messy to have two separate arguments for what is
|
||||
// conceptually a single thing.
|
||||
//
|
||||
// Enter rtc::ArrayView<T>. It contains a T pointer (to an array it doesn't
|
||||
// own) and a count, and supports the basic things you'd expect, such as
|
||||
// indexing and iteration. It allows us to write our function like this:
|
||||
//
|
||||
// bool Contains17(rtc::ArrayView<const int> arr) {
|
||||
// for (auto e : arr) {
|
||||
// if (e == 17)
|
||||
// return true;
|
||||
// }
|
||||
// return false;
|
||||
// }
|
||||
//
|
||||
// And even better, because a bunch of things will implicitly convert to
|
||||
// ArrayView, we can call it like this:
|
||||
//
|
||||
// Contains17(arr); // C array
|
||||
// Contains17(arr); // std::vector
|
||||
// Contains17(rtc::ArrayView<int>(arr, size)); // pointer + size
|
||||
// Contains17(nullptr); // nullptr -> empty ArrayView
|
||||
// ...
|
||||
//
|
||||
// ArrayView<T> stores both a pointer and a size, but you may also use
|
||||
// ArrayView<T, N>, which has a size that's fixed at compile time (which means
|
||||
// it only has to store the pointer).
|
||||
//
|
||||
// One important point is that ArrayView<T> and ArrayView<const T> are
|
||||
// different types, which allow and don't allow mutation of the array elements,
|
||||
// respectively. The implicit conversions work just like you'd hope, so that
|
||||
// e.g. vector<int> will convert to either ArrayView<int> or ArrayView<const
|
||||
// int>, but const vector<int> will convert only to ArrayView<const int>.
|
||||
// (ArrayView itself can be the source type in such conversions, so
|
||||
// ArrayView<int> will convert to ArrayView<const int>.)
|
||||
//
|
||||
// Note: ArrayView is tiny (just a pointer and a count if variable-sized, just
|
||||
// a pointer if fix-sized) and trivially copyable, so it's probably cheaper to
|
||||
// pass it by value than by const reference.
|
||||
|
||||
namespace array_view_internal {
|
||||
|
||||
// Magic constant for indicating that the size of an ArrayView is variable
|
||||
// instead of fixed.
|
||||
enum : std::ptrdiff_t { kArrayViewVarSize = -4711 };
|
||||
|
||||
// Base class for ArrayViews of fixed nonzero size.
|
||||
template <typename T, std::ptrdiff_t Size>
|
||||
class ArrayViewBase {
|
||||
static_assert(Size > 0, "ArrayView size must be variable or non-negative");
|
||||
|
||||
public:
|
||||
ArrayViewBase(T* data, size_t size) : data_(data) {}
|
||||
|
||||
static constexpr size_t size() { return Size; }
|
||||
static constexpr bool empty() { return false; }
|
||||
T* data() const { return data_; }
|
||||
|
||||
protected:
|
||||
static constexpr bool fixed_size() { return true; }
|
||||
|
||||
private:
|
||||
T* data_;
|
||||
};
|
||||
|
||||
// Specialized base class for ArrayViews of fixed zero size.
|
||||
template <typename T>
|
||||
class ArrayViewBase<T, 0> {
|
||||
public:
|
||||
explicit ArrayViewBase(T* data, size_t size) {}
|
||||
|
||||
static constexpr size_t size() { return 0; }
|
||||
static constexpr bool empty() { return true; }
|
||||
T* data() const { return nullptr; }
|
||||
|
||||
protected:
|
||||
static constexpr bool fixed_size() { return true; }
|
||||
};
|
||||
|
||||
// Specialized base class for ArrayViews of variable size.
|
||||
template <typename T>
|
||||
class ArrayViewBase<T, array_view_internal::kArrayViewVarSize> {
|
||||
public:
|
||||
ArrayViewBase(T* data, size_t size)
|
||||
: data_(size == 0 ? nullptr : data), size_(size) {}
|
||||
|
||||
size_t size() const { return size_; }
|
||||
bool empty() const { return size_ == 0; }
|
||||
T* data() const { return data_; }
|
||||
|
||||
protected:
|
||||
static constexpr bool fixed_size() { return false; }
|
||||
|
||||
private:
|
||||
T* data_;
|
||||
size_t size_;
|
||||
};
|
||||
|
||||
} // namespace array_view_internal
|
||||
|
||||
template <typename T,
|
||||
std::ptrdiff_t Size = array_view_internal::kArrayViewVarSize>
|
||||
class ArrayView final : public array_view_internal::ArrayViewBase<T, Size> {
|
||||
public:
|
||||
using value_type = T;
|
||||
using reference = value_type&;
|
||||
using const_reference = const value_type&;
|
||||
using pointer = value_type*;
|
||||
using const_pointer = const value_type*;
|
||||
using const_iterator = const T*;
|
||||
|
||||
// Construct an ArrayView from a pointer and a length.
|
||||
template <typename U>
|
||||
ArrayView(U* data, size_t size)
|
||||
: array_view_internal::ArrayViewBase<T, Size>::ArrayViewBase(data, size) {
|
||||
RTC_DCHECK_EQ(size == 0 ? nullptr : data, this->data());
|
||||
RTC_DCHECK_EQ(size, this->size());
|
||||
RTC_DCHECK_EQ(!this->data(),
|
||||
this->size() == 0); // data is null iff size == 0.
|
||||
}
|
||||
|
||||
// Construct an empty ArrayView. Note that fixed-size ArrayViews of size > 0
|
||||
// cannot be empty.
|
||||
ArrayView() : ArrayView(nullptr, 0) {}
|
||||
ArrayView(std::nullptr_t) // NOLINT
|
||||
: ArrayView() {}
|
||||
ArrayView(std::nullptr_t, size_t size)
|
||||
: ArrayView(static_cast<T*>(nullptr), size) {
|
||||
static_assert(Size == 0 || Size == array_view_internal::kArrayViewVarSize,
|
||||
"");
|
||||
RTC_DCHECK_EQ(0, size);
|
||||
}
|
||||
|
||||
// Construct an ArrayView from a C-style array.
|
||||
template <typename U, size_t N>
|
||||
ArrayView(U (&array)[N]) // NOLINT
|
||||
: ArrayView(array, N) {
|
||||
static_assert(Size == N || Size == array_view_internal::kArrayViewVarSize,
|
||||
"Array size must match ArrayView size");
|
||||
}
|
||||
|
||||
// (Only if size is fixed.) Construct a fixed size ArrayView<T, N> from a
|
||||
// non-const std::array instance. For an ArrayView with variable size, the
|
||||
// used ctor is ArrayView(U& u) instead.
|
||||
template <typename U,
|
||||
size_t N,
|
||||
typename std::enable_if<
|
||||
Size == static_cast<std::ptrdiff_t>(N)>::type* = nullptr>
|
||||
ArrayView(std::array<U, N>& u) // NOLINT
|
||||
: ArrayView(u.data(), u.size()) {}
|
||||
|
||||
// (Only if size is fixed.) Construct a fixed size ArrayView<T, N> where T is
|
||||
// const from a const(expr) std::array instance. For an ArrayView with
|
||||
// variable size, the used ctor is ArrayView(U& u) instead.
|
||||
template <typename U,
|
||||
size_t N,
|
||||
typename std::enable_if<
|
||||
Size == static_cast<std::ptrdiff_t>(N)>::type* = nullptr>
|
||||
ArrayView(const std::array<U, N>& u) // NOLINT
|
||||
: ArrayView(u.data(), u.size()) {}
|
||||
|
||||
// (Only if size is fixed.) Construct an ArrayView from any type U that has a
|
||||
// static constexpr size() method whose return value is equal to Size, and a
|
||||
// data() method whose return value converts implicitly to T*. In particular,
|
||||
// this means we allow conversion from ArrayView<T, N> to ArrayView<const T,
|
||||
// N>, but not the other way around. We also don't allow conversion from
|
||||
// ArrayView<T> to ArrayView<T, N>, or from ArrayView<T, M> to ArrayView<T,
|
||||
// N> when M != N.
|
||||
template <
|
||||
typename U,
|
||||
typename std::enable_if<Size != array_view_internal::kArrayViewVarSize &&
|
||||
HasDataAndSize<U, T>::value>::type* = nullptr>
|
||||
ArrayView(U& u) // NOLINT
|
||||
: ArrayView(u.data(), u.size()) {
|
||||
static_assert(U::size() == Size, "Sizes must match exactly");
|
||||
}
|
||||
template <
|
||||
typename U,
|
||||
typename std::enable_if<Size != array_view_internal::kArrayViewVarSize &&
|
||||
HasDataAndSize<U, T>::value>::type* = nullptr>
|
||||
ArrayView(const U& u) // NOLINT(runtime/explicit)
|
||||
: ArrayView(u.data(), u.size()) {
|
||||
static_assert(U::size() == Size, "Sizes must match exactly");
|
||||
}
|
||||
|
||||
// (Only if size is variable.) Construct an ArrayView from any type U that
|
||||
// has a size() method whose return value converts implicitly to size_t, and
|
||||
// a data() method whose return value converts implicitly to T*. In
|
||||
// particular, this means we allow conversion from ArrayView<T> to
|
||||
// ArrayView<const T>, but not the other way around. Other allowed
|
||||
// conversions include
|
||||
// ArrayView<T, N> to ArrayView<T> or ArrayView<const T>,
|
||||
// std::vector<T> to ArrayView<T> or ArrayView<const T>,
|
||||
// const std::vector<T> to ArrayView<const T>,
|
||||
// rtc::Buffer to ArrayView<uint8_t> or ArrayView<const uint8_t>, and
|
||||
// const rtc::Buffer to ArrayView<const uint8_t>.
|
||||
template <
|
||||
typename U,
|
||||
typename std::enable_if<Size == array_view_internal::kArrayViewVarSize &&
|
||||
HasDataAndSize<U, T>::value>::type* = nullptr>
|
||||
ArrayView(U& u) // NOLINT
|
||||
: ArrayView(u.data(), u.size()) {}
|
||||
template <
|
||||
typename U,
|
||||
typename std::enable_if<Size == array_view_internal::kArrayViewVarSize &&
|
||||
HasDataAndSize<U, T>::value>::type* = nullptr>
|
||||
ArrayView(const U& u) // NOLINT(runtime/explicit)
|
||||
: ArrayView(u.data(), u.size()) {}
|
||||
|
||||
// Indexing and iteration. These allow mutation even if the ArrayView is
|
||||
// const, because the ArrayView doesn't own the array. (To prevent mutation,
|
||||
// use a const element type.)
|
||||
T& operator[](size_t idx) const {
|
||||
RTC_DCHECK_LT(idx, this->size());
|
||||
RTC_DCHECK(this->data());
|
||||
return this->data()[idx];
|
||||
}
|
||||
T* begin() const { return this->data(); }
|
||||
T* end() const { return this->data() + this->size(); }
|
||||
const T* cbegin() const { return this->data(); }
|
||||
const T* cend() const { return this->data() + this->size(); }
|
||||
std::reverse_iterator<T*> rbegin() const {
|
||||
return std::make_reverse_iterator(end());
|
||||
}
|
||||
std::reverse_iterator<T*> rend() const {
|
||||
return std::make_reverse_iterator(begin());
|
||||
}
|
||||
std::reverse_iterator<const T*> crbegin() const {
|
||||
return std::make_reverse_iterator(cend());
|
||||
}
|
||||
std::reverse_iterator<const T*> crend() const {
|
||||
return std::make_reverse_iterator(cbegin());
|
||||
}
|
||||
|
||||
ArrayView<T> subview(size_t offset, size_t size) const {
|
||||
return offset < this->size()
|
||||
? ArrayView<T>(this->data() + offset,
|
||||
std::min(size, this->size() - offset))
|
||||
: ArrayView<T>();
|
||||
}
|
||||
ArrayView<T> subview(size_t offset) const {
|
||||
return subview(offset, this->size());
|
||||
}
|
||||
};
|
||||
|
||||
// Comparing two ArrayViews compares their (pointer,size) pairs; it does *not*
|
||||
// dereference the pointers.
|
||||
template <typename T, std::ptrdiff_t Size1, std::ptrdiff_t Size2>
|
||||
bool operator==(const ArrayView<T, Size1>& a, const ArrayView<T, Size2>& b) {
|
||||
return a.data() == b.data() && a.size() == b.size();
|
||||
}
|
||||
template <typename T, std::ptrdiff_t Size1, std::ptrdiff_t Size2>
|
||||
bool operator!=(const ArrayView<T, Size1>& a, const ArrayView<T, Size2>& b) {
|
||||
return !(a == b);
|
||||
}
|
||||
|
||||
// Variable-size ArrayViews are the size of two pointers; fixed-size ArrayViews
|
||||
// are the size of one pointer. (And as a special case, fixed-size ArrayViews
|
||||
// of size 0 require no storage.)
|
||||
static_assert(sizeof(ArrayView<int>) == 2 * sizeof(int*), "");
|
||||
static_assert(sizeof(ArrayView<int, 17>) == sizeof(int*), "");
|
||||
static_assert(std::is_empty<ArrayView<int, 0>>::value, "");
|
||||
|
||||
template <typename T>
|
||||
inline ArrayView<T> MakeArrayView(T* data, size_t size) {
|
||||
return ArrayView<T>(data, size);
|
||||
}
|
||||
|
||||
// Only for primitive types that have the same size and aligment.
|
||||
// Allow reinterpret cast of the array view to another primitive type of the
|
||||
// same size.
|
||||
// Template arguments order is (U, T, Size) to allow deduction of the template
|
||||
// arguments in client calls: reinterpret_array_view<target_type>(array_view).
|
||||
template <typename U, typename T, std::ptrdiff_t Size>
|
||||
inline ArrayView<U, Size> reinterpret_array_view(ArrayView<T, Size> view) {
|
||||
static_assert(sizeof(U) == sizeof(T) && alignof(U) == alignof(T),
|
||||
"ArrayView reinterpret_cast is only supported for casting "
|
||||
"between views that represent the same chunk of memory.");
|
||||
static_assert(
|
||||
std::is_fundamental<T>::value && std::is_fundamental<U>::value,
|
||||
"ArrayView reinterpret_cast is only supported for casting between "
|
||||
"fundamental types.");
|
||||
return ArrayView<U, Size>(reinterpret_cast<U*>(view.data()), view.size());
|
||||
}
|
||||
|
||||
} // namespace rtc
|
||||
|
||||
#endif // API_ARRAY_VIEW_H_
|
||||
630
TMessagesProj/jni/voip/webrtc/api/array_view_unittest.cc
Normal file
630
TMessagesProj/jni/voip/webrtc/api/array_view_unittest.cc
Normal file
|
|
@ -0,0 +1,630 @@
|
|||
/*
|
||||
* Copyright 2015 The WebRTC Project Authors. All rights reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "api/array_view.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <array>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "rtc_base/buffer.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/gunit.h"
|
||||
#include "test/gmock.h"
|
||||
|
||||
namespace rtc {
|
||||
|
||||
namespace {
|
||||
|
||||
using ::testing::ElementsAre;
|
||||
using ::testing::IsEmpty;
|
||||
|
||||
template <typename T>
|
||||
size_t Call(ArrayView<T> av) {
|
||||
return av.size();
|
||||
}
|
||||
|
||||
template <typename T, size_t N>
|
||||
void CallFixed(ArrayView<T, N> av) {}
|
||||
|
||||
} // namespace
|
||||
|
||||
TEST(ArrayViewDeathTest, TestConstructFromPtrAndArray) {
|
||||
char arr[] = "Arrr!";
|
||||
const char carr[] = "Carrr!";
|
||||
EXPECT_EQ(6u, Call<const char>(arr));
|
||||
EXPECT_EQ(7u, Call<const char>(carr));
|
||||
EXPECT_EQ(6u, Call<char>(arr));
|
||||
// Call<char>(carr); // Compile error, because can't drop const.
|
||||
// Call<int>(arr); // Compile error, because incompatible types.
|
||||
ArrayView<int*> x;
|
||||
EXPECT_EQ(0u, x.size());
|
||||
EXPECT_EQ(nullptr, x.data());
|
||||
ArrayView<char> y = arr;
|
||||
EXPECT_EQ(6u, y.size());
|
||||
EXPECT_EQ(arr, y.data());
|
||||
ArrayView<char, 6> yf = arr;
|
||||
static_assert(yf.size() == 6, "");
|
||||
EXPECT_EQ(arr, yf.data());
|
||||
ArrayView<const char> z(arr + 1, 3);
|
||||
EXPECT_EQ(3u, z.size());
|
||||
EXPECT_EQ(arr + 1, z.data());
|
||||
ArrayView<const char, 3> zf(arr + 1, 3);
|
||||
static_assert(zf.size() == 3, "");
|
||||
EXPECT_EQ(arr + 1, zf.data());
|
||||
ArrayView<const char> w(arr, 2);
|
||||
EXPECT_EQ(2u, w.size());
|
||||
EXPECT_EQ(arr, w.data());
|
||||
ArrayView<const char, 2> wf(arr, 2);
|
||||
static_assert(wf.size() == 2, "");
|
||||
EXPECT_EQ(arr, wf.data());
|
||||
ArrayView<char> q(arr, 0);
|
||||
EXPECT_EQ(0u, q.size());
|
||||
EXPECT_EQ(nullptr, q.data());
|
||||
ArrayView<char, 0> qf(arr, 0);
|
||||
static_assert(qf.size() == 0, "");
|
||||
EXPECT_EQ(nullptr, qf.data());
|
||||
#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
|
||||
// DCHECK error (nullptr with nonzero size).
|
||||
EXPECT_DEATH(ArrayView<int>(static_cast<int*>(nullptr), 5), "");
|
||||
#endif
|
||||
// These are compile errors, because incompatible types.
|
||||
// ArrayView<int> m = arr;
|
||||
// ArrayView<float> n(arr + 2, 2);
|
||||
}
|
||||
|
||||
TEST(ArrayViewTest, TestCopyConstructorVariableLvalue) {
|
||||
char arr[] = "Arrr!";
|
||||
ArrayView<char> x = arr;
|
||||
EXPECT_EQ(6u, x.size());
|
||||
EXPECT_EQ(arr, x.data());
|
||||
ArrayView<char> y = x; // Copy non-const -> non-const.
|
||||
EXPECT_EQ(6u, y.size());
|
||||
EXPECT_EQ(arr, y.data());
|
||||
ArrayView<const char> z = x; // Copy non-const -> const.
|
||||
EXPECT_EQ(6u, z.size());
|
||||
EXPECT_EQ(arr, z.data());
|
||||
ArrayView<const char> w = z; // Copy const -> const.
|
||||
EXPECT_EQ(6u, w.size());
|
||||
EXPECT_EQ(arr, w.data());
|
||||
// ArrayView<char> v = z; // Compile error, because can't drop const.
|
||||
}
|
||||
|
||||
TEST(ArrayViewTest, TestCopyConstructorVariableRvalue) {
|
||||
char arr[] = "Arrr!";
|
||||
ArrayView<char> x = arr;
|
||||
EXPECT_EQ(6u, x.size());
|
||||
EXPECT_EQ(arr, x.data());
|
||||
ArrayView<char> y = std::move(x); // Copy non-const -> non-const.
|
||||
EXPECT_EQ(6u, y.size());
|
||||
EXPECT_EQ(arr, y.data());
|
||||
ArrayView<const char> z = std::move(x); // Copy non-const -> const.
|
||||
EXPECT_EQ(6u, z.size());
|
||||
EXPECT_EQ(arr, z.data());
|
||||
ArrayView<const char> w = std::move(z); // Copy const -> const.
|
||||
EXPECT_EQ(6u, w.size());
|
||||
EXPECT_EQ(arr, w.data());
|
||||
// ArrayView<char> v = std::move(z); // Error, because can't drop const.
|
||||
}
|
||||
|
||||
TEST(ArrayViewTest, TestCopyConstructorFixedLvalue) {
|
||||
char arr[] = "Arrr!";
|
||||
ArrayView<char, 6> x = arr;
|
||||
static_assert(x.size() == 6, "");
|
||||
EXPECT_EQ(arr, x.data());
|
||||
|
||||
// Copy fixed -> fixed.
|
||||
ArrayView<char, 6> y = x; // Copy non-const -> non-const.
|
||||
static_assert(y.size() == 6, "");
|
||||
EXPECT_EQ(arr, y.data());
|
||||
ArrayView<const char, 6> z = x; // Copy non-const -> const.
|
||||
static_assert(z.size() == 6, "");
|
||||
EXPECT_EQ(arr, z.data());
|
||||
ArrayView<const char, 6> w = z; // Copy const -> const.
|
||||
static_assert(w.size() == 6, "");
|
||||
EXPECT_EQ(arr, w.data());
|
||||
// ArrayView<char, 6> v = z; // Compile error, because can't drop const.
|
||||
|
||||
// Copy fixed -> variable.
|
||||
ArrayView<char> yv = x; // Copy non-const -> non-const.
|
||||
EXPECT_EQ(6u, yv.size());
|
||||
EXPECT_EQ(arr, yv.data());
|
||||
ArrayView<const char> zv = x; // Copy non-const -> const.
|
||||
EXPECT_EQ(6u, zv.size());
|
||||
EXPECT_EQ(arr, zv.data());
|
||||
ArrayView<const char> wv = z; // Copy const -> const.
|
||||
EXPECT_EQ(6u, wv.size());
|
||||
EXPECT_EQ(arr, wv.data());
|
||||
// ArrayView<char> vv = z; // Compile error, because can't drop const.
|
||||
}
|
||||
|
||||
TEST(ArrayViewTest, TestCopyConstructorFixedRvalue) {
|
||||
char arr[] = "Arrr!";
|
||||
ArrayView<char, 6> x = arr;
|
||||
static_assert(x.size() == 6, "");
|
||||
EXPECT_EQ(arr, x.data());
|
||||
|
||||
// Copy fixed -> fixed.
|
||||
ArrayView<char, 6> y = std::move(x); // Copy non-const -> non-const.
|
||||
static_assert(y.size() == 6, "");
|
||||
EXPECT_EQ(arr, y.data());
|
||||
ArrayView<const char, 6> z = std::move(x); // Copy non-const -> const.
|
||||
static_assert(z.size() == 6, "");
|
||||
EXPECT_EQ(arr, z.data());
|
||||
ArrayView<const char, 6> w = std::move(z); // Copy const -> const.
|
||||
static_assert(w.size() == 6, "");
|
||||
EXPECT_EQ(arr, w.data());
|
||||
// ArrayView<char, 6> v = std::move(z); // Error, because can't drop const.
|
||||
|
||||
// Copy fixed -> variable.
|
||||
ArrayView<char> yv = std::move(x); // Copy non-const -> non-const.
|
||||
EXPECT_EQ(6u, yv.size());
|
||||
EXPECT_EQ(arr, yv.data());
|
||||
ArrayView<const char> zv = std::move(x); // Copy non-const -> const.
|
||||
EXPECT_EQ(6u, zv.size());
|
||||
EXPECT_EQ(arr, zv.data());
|
||||
ArrayView<const char> wv = std::move(z); // Copy const -> const.
|
||||
EXPECT_EQ(6u, wv.size());
|
||||
EXPECT_EQ(arr, wv.data());
|
||||
// ArrayView<char> vv = std::move(z); // Error, because can't drop const.
|
||||
}
|
||||
|
||||
TEST(ArrayViewTest, TestCopyAssignmentVariableLvalue) {
|
||||
char arr[] = "Arrr!";
|
||||
ArrayView<char> x(arr);
|
||||
EXPECT_EQ(6u, x.size());
|
||||
EXPECT_EQ(arr, x.data());
|
||||
ArrayView<char> y;
|
||||
y = x; // Copy non-const -> non-const.
|
||||
EXPECT_EQ(6u, y.size());
|
||||
EXPECT_EQ(arr, y.data());
|
||||
ArrayView<const char> z;
|
||||
z = x; // Copy non-const -> const.
|
||||
EXPECT_EQ(6u, z.size());
|
||||
EXPECT_EQ(arr, z.data());
|
||||
ArrayView<const char> w;
|
||||
w = z; // Copy const -> const.
|
||||
EXPECT_EQ(6u, w.size());
|
||||
EXPECT_EQ(arr, w.data());
|
||||
// ArrayView<char> v;
|
||||
// v = z; // Compile error, because can't drop const.
|
||||
}
|
||||
|
||||
TEST(ArrayViewTest, TestCopyAssignmentVariableRvalue) {
|
||||
char arr[] = "Arrr!";
|
||||
ArrayView<char> x(arr);
|
||||
EXPECT_EQ(6u, x.size());
|
||||
EXPECT_EQ(arr, x.data());
|
||||
ArrayView<char> y;
|
||||
y = std::move(x); // Copy non-const -> non-const.
|
||||
EXPECT_EQ(6u, y.size());
|
||||
EXPECT_EQ(arr, y.data());
|
||||
ArrayView<const char> z;
|
||||
z = std::move(x); // Copy non-const -> const.
|
||||
EXPECT_EQ(6u, z.size());
|
||||
EXPECT_EQ(arr, z.data());
|
||||
ArrayView<const char> w;
|
||||
w = std::move(z); // Copy const -> const.
|
||||
EXPECT_EQ(6u, w.size());
|
||||
EXPECT_EQ(arr, w.data());
|
||||
// ArrayView<char> v;
|
||||
// v = std::move(z); // Compile error, because can't drop const.
|
||||
}
|
||||
|
||||
TEST(ArrayViewTest, TestCopyAssignmentFixedLvalue) {
|
||||
char arr[] = "Arrr!";
|
||||
char init[] = "Init!";
|
||||
ArrayView<char, 6> x(arr);
|
||||
EXPECT_EQ(arr, x.data());
|
||||
|
||||
// Copy fixed -> fixed.
|
||||
ArrayView<char, 6> y(init);
|
||||
y = x; // Copy non-const -> non-const.
|
||||
EXPECT_EQ(arr, y.data());
|
||||
ArrayView<const char, 6> z(init);
|
||||
z = x; // Copy non-const -> const.
|
||||
EXPECT_EQ(arr, z.data());
|
||||
ArrayView<const char, 6> w(init);
|
||||
w = z; // Copy const -> const.
|
||||
EXPECT_EQ(arr, w.data());
|
||||
// ArrayView<char, 6> v(init);
|
||||
// v = z; // Compile error, because can't drop const.
|
||||
|
||||
// Copy fixed -> variable.
|
||||
ArrayView<char> yv;
|
||||
yv = x; // Copy non-const -> non-const.
|
||||
EXPECT_EQ(6u, yv.size());
|
||||
EXPECT_EQ(arr, yv.data());
|
||||
ArrayView<const char> zv;
|
||||
zv = x; // Copy non-const -> const.
|
||||
EXPECT_EQ(6u, zv.size());
|
||||
EXPECT_EQ(arr, zv.data());
|
||||
ArrayView<const char> wv;
|
||||
wv = z; // Copy const -> const.
|
||||
EXPECT_EQ(6u, wv.size());
|
||||
EXPECT_EQ(arr, wv.data());
|
||||
// ArrayView<char> v;
|
||||
// v = z; // Compile error, because can't drop const.
|
||||
}
|
||||
|
||||
TEST(ArrayViewTest, TestCopyAssignmentFixedRvalue) {
|
||||
char arr[] = "Arrr!";
|
||||
char init[] = "Init!";
|
||||
ArrayView<char, 6> x(arr);
|
||||
EXPECT_EQ(arr, x.data());
|
||||
|
||||
// Copy fixed -> fixed.
|
||||
ArrayView<char, 6> y(init);
|
||||
y = std::move(x); // Copy non-const -> non-const.
|
||||
EXPECT_EQ(arr, y.data());
|
||||
ArrayView<const char, 6> z(init);
|
||||
z = std::move(x); // Copy non-const -> const.
|
||||
EXPECT_EQ(arr, z.data());
|
||||
ArrayView<const char, 6> w(init);
|
||||
w = std::move(z); // Copy const -> const.
|
||||
EXPECT_EQ(arr, w.data());
|
||||
// ArrayView<char, 6> v(init);
|
||||
// v = std::move(z); // Compile error, because can't drop const.
|
||||
|
||||
// Copy fixed -> variable.
|
||||
ArrayView<char> yv;
|
||||
yv = std::move(x); // Copy non-const -> non-const.
|
||||
EXPECT_EQ(6u, yv.size());
|
||||
EXPECT_EQ(arr, yv.data());
|
||||
ArrayView<const char> zv;
|
||||
zv = std::move(x); // Copy non-const -> const.
|
||||
EXPECT_EQ(6u, zv.size());
|
||||
EXPECT_EQ(arr, zv.data());
|
||||
ArrayView<const char> wv;
|
||||
wv = std::move(z); // Copy const -> const.
|
||||
EXPECT_EQ(6u, wv.size());
|
||||
EXPECT_EQ(arr, wv.data());
|
||||
// ArrayView<char> v;
|
||||
// v = std::move(z); // Compile error, because can't drop const.
|
||||
}
|
||||
|
||||
TEST(ArrayViewTest, TestStdArray) {
|
||||
EXPECT_EQ(4u, Call<const int>(std::array<int, 4>{1, 2, 3, 4}));
|
||||
CallFixed<const int, 3>(std::array<int, 3>{2, 3, 4});
|
||||
constexpr size_t size = 5;
|
||||
std::array<float, size> arr{};
|
||||
// Fixed size view.
|
||||
rtc::ArrayView<float, size> arr_view_fixed(arr);
|
||||
EXPECT_EQ(arr.data(), arr_view_fixed.data());
|
||||
static_assert(size == arr_view_fixed.size(), "");
|
||||
// Variable size view.
|
||||
rtc::ArrayView<float> arr_view(arr);
|
||||
EXPECT_EQ(arr.data(), arr_view.data());
|
||||
EXPECT_EQ(size, arr_view.size());
|
||||
}
|
||||
|
||||
TEST(ArrayViewTest, TestConstStdArray) {
|
||||
constexpr size_t size = 5;
|
||||
|
||||
constexpr std::array<float, size> constexpr_arr{};
|
||||
rtc::ArrayView<const float, size> constexpr_arr_view(constexpr_arr);
|
||||
EXPECT_EQ(constexpr_arr.data(), constexpr_arr_view.data());
|
||||
static_assert(constexpr_arr.size() == constexpr_arr_view.size(), "");
|
||||
|
||||
const std::array<float, size> const_arr{};
|
||||
rtc::ArrayView<const float, size> const_arr_view(const_arr);
|
||||
EXPECT_EQ(const_arr.data(), const_arr_view.data());
|
||||
static_assert(const_arr.size() == const_arr_view.size(), "");
|
||||
|
||||
std::array<float, size> non_const_arr{};
|
||||
rtc::ArrayView<const float, size> non_const_arr_view(non_const_arr);
|
||||
EXPECT_EQ(non_const_arr.data(), non_const_arr_view.data());
|
||||
static_assert(non_const_arr.size() == non_const_arr_view.size(), "");
|
||||
}
|
||||
|
||||
TEST(ArrayViewTest, TestStdVector) {
|
||||
EXPECT_EQ(3u, Call<const int>(std::vector<int>{4, 5, 6}));
|
||||
std::vector<int> v;
|
||||
v.push_back(3);
|
||||
v.push_back(11);
|
||||
EXPECT_EQ(2u, Call<const int>(v));
|
||||
EXPECT_EQ(2u, Call<int>(v));
|
||||
// Call<unsigned int>(v); // Compile error, because incompatible types.
|
||||
ArrayView<int> x = v;
|
||||
EXPECT_EQ(2u, x.size());
|
||||
EXPECT_EQ(v.data(), x.data());
|
||||
ArrayView<const int> y;
|
||||
y = v;
|
||||
EXPECT_EQ(2u, y.size());
|
||||
EXPECT_EQ(v.data(), y.data());
|
||||
// ArrayView<double> d = v; // Compile error, because incompatible types.
|
||||
const std::vector<int> cv;
|
||||
EXPECT_EQ(0u, Call<const int>(cv));
|
||||
// Call<int>(cv); // Compile error, because can't drop const.
|
||||
ArrayView<const int> z = cv;
|
||||
EXPECT_EQ(0u, z.size());
|
||||
EXPECT_EQ(nullptr, z.data());
|
||||
// ArrayView<int> w = cv; // Compile error, because can't drop const.
|
||||
}
|
||||
|
||||
TEST(ArrayViewTest, TestRtcBuffer) {
|
||||
rtc::Buffer b = "so buffer";
|
||||
EXPECT_EQ(10u, Call<const uint8_t>(b));
|
||||
EXPECT_EQ(10u, Call<uint8_t>(b));
|
||||
// Call<int8_t>(b); // Compile error, because incompatible types.
|
||||
ArrayView<uint8_t> x = b;
|
||||
EXPECT_EQ(10u, x.size());
|
||||
EXPECT_EQ(b.data(), x.data());
|
||||
ArrayView<const uint8_t> y;
|
||||
y = b;
|
||||
EXPECT_EQ(10u, y.size());
|
||||
EXPECT_EQ(b.data(), y.data());
|
||||
// ArrayView<char> d = b; // Compile error, because incompatible types.
|
||||
const rtc::Buffer cb = "very const";
|
||||
EXPECT_EQ(11u, Call<const uint8_t>(cb));
|
||||
// Call<uint8_t>(cb); // Compile error, because can't drop const.
|
||||
ArrayView<const uint8_t> z = cb;
|
||||
EXPECT_EQ(11u, z.size());
|
||||
EXPECT_EQ(cb.data(), z.data());
|
||||
// ArrayView<uint8_t> w = cb; // Compile error, because can't drop const.
|
||||
}
|
||||
|
||||
TEST(ArrayViewTest, TestSwapVariable) {
|
||||
const char arr[] = "Arrr!";
|
||||
const char aye[] = "Aye, Cap'n!";
|
||||
ArrayView<const char> x(arr);
|
||||
EXPECT_EQ(6u, x.size());
|
||||
EXPECT_EQ(arr, x.data());
|
||||
ArrayView<const char> y(aye);
|
||||
EXPECT_EQ(12u, y.size());
|
||||
EXPECT_EQ(aye, y.data());
|
||||
using std::swap;
|
||||
swap(x, y);
|
||||
EXPECT_EQ(12u, x.size());
|
||||
EXPECT_EQ(aye, x.data());
|
||||
EXPECT_EQ(6u, y.size());
|
||||
EXPECT_EQ(arr, y.data());
|
||||
// ArrayView<char> z;
|
||||
// swap(x, z); // Compile error, because can't drop const.
|
||||
}
|
||||
|
||||
TEST(FixArrayViewTest, TestSwapFixed) {
|
||||
const char arr[] = "Arr!";
|
||||
char aye[] = "Aye!";
|
||||
ArrayView<const char, 5> x(arr);
|
||||
EXPECT_EQ(arr, x.data());
|
||||
ArrayView<const char, 5> y(aye);
|
||||
EXPECT_EQ(aye, y.data());
|
||||
using std::swap;
|
||||
swap(x, y);
|
||||
EXPECT_EQ(aye, x.data());
|
||||
EXPECT_EQ(arr, y.data());
|
||||
// ArrayView<char, 5> z(aye);
|
||||
// swap(x, z); // Compile error, because can't drop const.
|
||||
// ArrayView<const char, 4> w(aye, 4);
|
||||
// swap(x, w); // Compile error, because different sizes.
|
||||
}
|
||||
|
||||
TEST(ArrayViewDeathTest, TestIndexing) {
|
||||
char arr[] = "abcdefg";
|
||||
ArrayView<char> x(arr);
|
||||
const ArrayView<char> y(arr);
|
||||
ArrayView<const char, 8> z(arr);
|
||||
EXPECT_EQ(8u, x.size());
|
||||
EXPECT_EQ(8u, y.size());
|
||||
EXPECT_EQ(8u, z.size());
|
||||
EXPECT_EQ('b', x[1]);
|
||||
EXPECT_EQ('c', y[2]);
|
||||
EXPECT_EQ('d', z[3]);
|
||||
x[3] = 'X';
|
||||
y[2] = 'Y';
|
||||
// z[1] = 'Z'; // Compile error, because z's element type is const char.
|
||||
EXPECT_EQ('b', x[1]);
|
||||
EXPECT_EQ('Y', y[2]);
|
||||
EXPECT_EQ('X', z[3]);
|
||||
#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
|
||||
EXPECT_DEATH(z[8], ""); // DCHECK error (index out of bounds).
|
||||
#endif
|
||||
}
|
||||
|
||||
TEST(ArrayViewTest, TestIterationEmpty) {
|
||||
// Variable-size.
|
||||
ArrayView<std::vector<std::vector<std::vector<std::string>>>> av;
|
||||
EXPECT_EQ(av.begin(), av.end());
|
||||
EXPECT_EQ(av.cbegin(), av.cend());
|
||||
for (auto& e : av) {
|
||||
EXPECT_TRUE(false);
|
||||
EXPECT_EQ(42u, e.size()); // Dummy use of e to prevent unused var warning.
|
||||
}
|
||||
|
||||
// Fixed-size.
|
||||
ArrayView<std::vector<std::vector<std::vector<std::string>>>, 0> af;
|
||||
EXPECT_EQ(af.begin(), af.end());
|
||||
EXPECT_EQ(af.cbegin(), af.cend());
|
||||
for (auto& e : af) {
|
||||
EXPECT_TRUE(false);
|
||||
EXPECT_EQ(42u, e.size()); // Dummy use of e to prevent unused var warning.
|
||||
}
|
||||
}
|
||||
|
||||
TEST(ArrayViewTest, TestReverseIterationEmpty) {
|
||||
// Variable-size.
|
||||
ArrayView<std::vector<std::vector<std::vector<std::string>>>> av;
|
||||
EXPECT_EQ(av.rbegin(), av.rend());
|
||||
EXPECT_EQ(av.crbegin(), av.crend());
|
||||
EXPECT_TRUE(av.empty());
|
||||
|
||||
// Fixed-size.
|
||||
ArrayView<std::vector<std::vector<std::vector<std::string>>>, 0> af;
|
||||
EXPECT_EQ(af.begin(), af.end());
|
||||
EXPECT_EQ(af.cbegin(), af.cend());
|
||||
EXPECT_TRUE(af.empty());
|
||||
}
|
||||
|
||||
TEST(ArrayViewTest, TestIterationVariable) {
|
||||
char arr[] = "Arrr!";
|
||||
ArrayView<char> av(arr);
|
||||
EXPECT_EQ('A', *av.begin());
|
||||
EXPECT_EQ('A', *av.cbegin());
|
||||
EXPECT_EQ('\0', *(av.end() - 1));
|
||||
EXPECT_EQ('\0', *(av.cend() - 1));
|
||||
char i = 0;
|
||||
for (auto& e : av) {
|
||||
EXPECT_EQ(arr + i, &e);
|
||||
e = 's' + i;
|
||||
++i;
|
||||
}
|
||||
i = 0;
|
||||
for (auto& e : ArrayView<const char>(av)) {
|
||||
EXPECT_EQ(arr + i, &e);
|
||||
// e = 'q' + i; // Compile error, because e is a const char&.
|
||||
++i;
|
||||
}
|
||||
}
|
||||
|
||||
TEST(ArrayViewTest, TestReverseIterationVariable) {
|
||||
char arr[] = "Arrr!";
|
||||
ArrayView<char> av(arr);
|
||||
EXPECT_EQ('\0', *av.rbegin());
|
||||
EXPECT_EQ('\0', *av.crbegin());
|
||||
EXPECT_EQ('A', *(av.rend() - 1));
|
||||
EXPECT_EQ('A', *(av.crend() - 1));
|
||||
|
||||
const char* cit = av.cend() - 1;
|
||||
for (auto crit = av.crbegin(); crit != av.crend(); ++crit, --cit) {
|
||||
EXPECT_EQ(*cit, *crit);
|
||||
}
|
||||
|
||||
char* it = av.end() - 1;
|
||||
for (auto rit = av.rbegin(); rit != av.rend(); ++rit, --it) {
|
||||
EXPECT_EQ(*it, *rit);
|
||||
}
|
||||
}
|
||||
|
||||
TEST(ArrayViewTest, TestIterationFixed) {
|
||||
char arr[] = "Arrr!";
|
||||
ArrayView<char, 6> av(arr);
|
||||
EXPECT_EQ('A', *av.begin());
|
||||
EXPECT_EQ('A', *av.cbegin());
|
||||
EXPECT_EQ('\0', *(av.end() - 1));
|
||||
EXPECT_EQ('\0', *(av.cend() - 1));
|
||||
char i = 0;
|
||||
for (auto& e : av) {
|
||||
EXPECT_EQ(arr + i, &e);
|
||||
e = 's' + i;
|
||||
++i;
|
||||
}
|
||||
i = 0;
|
||||
for (auto& e : ArrayView<const char, 6>(av)) {
|
||||
EXPECT_EQ(arr + i, &e);
|
||||
// e = 'q' + i; // Compile error, because e is a const char&.
|
||||
++i;
|
||||
}
|
||||
}
|
||||
|
||||
TEST(ArrayViewTest, TestReverseIterationFixed) {
|
||||
char arr[] = "Arrr!";
|
||||
ArrayView<char, 6> av(arr);
|
||||
EXPECT_EQ('\0', *av.rbegin());
|
||||
EXPECT_EQ('\0', *av.crbegin());
|
||||
EXPECT_EQ('A', *(av.rend() - 1));
|
||||
EXPECT_EQ('A', *(av.crend() - 1));
|
||||
|
||||
const char* cit = av.cend() - 1;
|
||||
for (auto crit = av.crbegin(); crit != av.crend(); ++crit, --cit) {
|
||||
EXPECT_EQ(*cit, *crit);
|
||||
}
|
||||
|
||||
char* it = av.end() - 1;
|
||||
for (auto rit = av.rbegin(); rit != av.rend(); ++rit, --it) {
|
||||
EXPECT_EQ(*it, *rit);
|
||||
}
|
||||
}
|
||||
|
||||
TEST(ArrayViewTest, TestEmpty) {
|
||||
EXPECT_TRUE(ArrayView<int>().empty());
|
||||
const int a[] = {1, 2, 3};
|
||||
EXPECT_FALSE(ArrayView<const int>(a).empty());
|
||||
|
||||
static_assert(ArrayView<int, 0>::empty(), "");
|
||||
static_assert(!ArrayView<int, 3>::empty(), "");
|
||||
}
|
||||
|
||||
TEST(ArrayViewTest, TestCompare) {
|
||||
int a[] = {1, 2, 3};
|
||||
int b[] = {1, 2, 3};
|
||||
|
||||
EXPECT_EQ(ArrayView<int>(a), ArrayView<int>(a));
|
||||
EXPECT_EQ((ArrayView<int, 3>(a)), (ArrayView<int, 3>(a)));
|
||||
EXPECT_EQ(ArrayView<int>(a), (ArrayView<int, 3>(a)));
|
||||
EXPECT_EQ(ArrayView<int>(), ArrayView<int>());
|
||||
EXPECT_EQ(ArrayView<int>(), ArrayView<int>(a, 0));
|
||||
EXPECT_EQ(ArrayView<int>(a, 0), ArrayView<int>(b, 0));
|
||||
EXPECT_EQ((ArrayView<int, 0>(a, 0)), ArrayView<int>());
|
||||
|
||||
EXPECT_NE(ArrayView<int>(a), ArrayView<int>(b));
|
||||
EXPECT_NE((ArrayView<int, 3>(a)), (ArrayView<int, 3>(b)));
|
||||
EXPECT_NE((ArrayView<int, 3>(a)), ArrayView<int>(b));
|
||||
EXPECT_NE(ArrayView<int>(a), ArrayView<int>());
|
||||
EXPECT_NE(ArrayView<int>(a), ArrayView<int>(a, 2));
|
||||
EXPECT_NE((ArrayView<int, 3>(a)), (ArrayView<int, 2>(a, 2)));
|
||||
}
|
||||
|
||||
TEST(ArrayViewTest, TestSubViewVariable) {
|
||||
int a[] = {1, 2, 3};
|
||||
ArrayView<int> av(a);
|
||||
|
||||
EXPECT_EQ(av.subview(0), av);
|
||||
|
||||
EXPECT_THAT(av.subview(1), ElementsAre(2, 3));
|
||||
EXPECT_THAT(av.subview(2), ElementsAre(3));
|
||||
EXPECT_THAT(av.subview(3), IsEmpty());
|
||||
EXPECT_THAT(av.subview(4), IsEmpty());
|
||||
|
||||
EXPECT_THAT(av.subview(1, 0), IsEmpty());
|
||||
EXPECT_THAT(av.subview(1, 1), ElementsAre(2));
|
||||
EXPECT_THAT(av.subview(1, 2), ElementsAre(2, 3));
|
||||
EXPECT_THAT(av.subview(1, 3), ElementsAre(2, 3));
|
||||
}
|
||||
|
||||
TEST(ArrayViewTest, TestSubViewFixed) {
|
||||
int a[] = {1, 2, 3};
|
||||
ArrayView<int, 3> av(a);
|
||||
|
||||
EXPECT_EQ(av.subview(0), av);
|
||||
|
||||
EXPECT_THAT(av.subview(1), ElementsAre(2, 3));
|
||||
EXPECT_THAT(av.subview(2), ElementsAre(3));
|
||||
EXPECT_THAT(av.subview(3), IsEmpty());
|
||||
EXPECT_THAT(av.subview(4), IsEmpty());
|
||||
|
||||
EXPECT_THAT(av.subview(1, 0), IsEmpty());
|
||||
EXPECT_THAT(av.subview(1, 1), ElementsAre(2));
|
||||
EXPECT_THAT(av.subview(1, 2), ElementsAre(2, 3));
|
||||
EXPECT_THAT(av.subview(1, 3), ElementsAre(2, 3));
|
||||
}
|
||||
|
||||
TEST(ArrayViewTest, TestReinterpretCastFixedSize) {
|
||||
uint8_t bytes[] = {1, 2, 3};
|
||||
ArrayView<uint8_t, 3> uint8_av(bytes);
|
||||
ArrayView<int8_t, 3> int8_av = reinterpret_array_view<int8_t>(uint8_av);
|
||||
EXPECT_EQ(int8_av.size(), uint8_av.size());
|
||||
EXPECT_EQ(int8_av[0], 1);
|
||||
EXPECT_EQ(int8_av[1], 2);
|
||||
EXPECT_EQ(int8_av[2], 3);
|
||||
}
|
||||
|
||||
TEST(ArrayViewTest, TestReinterpretCastVariableSize) {
|
||||
std::vector<int8_t> v = {1, 2, 3};
|
||||
ArrayView<int8_t> int8_av(v);
|
||||
ArrayView<uint8_t> uint8_av = reinterpret_array_view<uint8_t>(int8_av);
|
||||
EXPECT_EQ(int8_av.size(), uint8_av.size());
|
||||
EXPECT_EQ(uint8_av[0], 1);
|
||||
EXPECT_EQ(uint8_av[1], 2);
|
||||
EXPECT_EQ(uint8_av[2], 3);
|
||||
}
|
||||
} // namespace rtc
|
||||
105
TMessagesProj/jni/voip/webrtc/api/async_dns_resolver.h
Normal file
105
TMessagesProj/jni/voip/webrtc/api/async_dns_resolver.h
Normal file
|
|
@ -0,0 +1,105 @@
|
|||
/*
|
||||
* Copyright 2021 The WebRTC Project Authors. All rights reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef API_ASYNC_DNS_RESOLVER_H_
|
||||
#define API_ASYNC_DNS_RESOLVER_H_
|
||||
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
|
||||
#include "absl/functional/any_invocable.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/socket_address.h"
|
||||
#include "rtc_base/system/rtc_export.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// This interface defines the methods to resolve a hostname asynchronously.
|
||||
// The AsyncDnsResolverInterface class encapsulates a single name query.
|
||||
//
|
||||
// Usage:
|
||||
// std::unique_ptr<AsyncDnsResolverInterface> resolver =
|
||||
// factory->Create(address-to-be-resolved, [r = resolver.get()]() {
|
||||
// if (r->result.GetResolvedAddress(AF_INET, &addr) {
|
||||
// // success
|
||||
// } else {
|
||||
// // failure
|
||||
// error = r->result().GetError();
|
||||
// }
|
||||
// // Release resolver.
|
||||
// resolver_list.erase(std::remove_if(resolver_list.begin(),
|
||||
// resolver_list.end(),
|
||||
// [](refptr) { refptr.get() == r; });
|
||||
// });
|
||||
// resolver_list.push_back(std::move(resolver));
|
||||
|
||||
class AsyncDnsResolverResult {
|
||||
public:
|
||||
virtual ~AsyncDnsResolverResult() = default;
|
||||
// Returns true iff the address from `Start` was successfully resolved.
|
||||
// If the address was successfully resolved, sets `addr` to a copy of the
|
||||
// address from `Start` with the IP address set to the top most resolved
|
||||
// address of `family` (`addr` will have both hostname and the resolved ip).
|
||||
virtual bool GetResolvedAddress(int family,
|
||||
rtc::SocketAddress* addr) const = 0;
|
||||
// Returns error from resolver.
|
||||
virtual int GetError() const = 0;
|
||||
};
|
||||
|
||||
// The API for a single name query.
|
||||
// The constructor, destructor and all functions must be called from
|
||||
// the same sequence, and the callback will also be called on that sequence.
|
||||
// The class guarantees that the callback will not be called if the
|
||||
// resolver's destructor has been called.
|
||||
class RTC_EXPORT AsyncDnsResolverInterface {
|
||||
public:
|
||||
virtual ~AsyncDnsResolverInterface() = default;
|
||||
|
||||
// Start address resolution of the hostname in `addr`.
|
||||
virtual void Start(const rtc::SocketAddress& addr,
|
||||
absl::AnyInvocable<void()> callback) = 0;
|
||||
// Start address resolution of the hostname in `addr` matching `family`.
|
||||
virtual void Start(const rtc::SocketAddress& addr,
|
||||
int family,
|
||||
absl::AnyInvocable<void()> callback) = 0;
|
||||
virtual const AsyncDnsResolverResult& result() const = 0;
|
||||
};
|
||||
|
||||
// An abstract factory for creating AsyncDnsResolverInterfaces. This allows
|
||||
// client applications to provide WebRTC with their own mechanism for
|
||||
// performing DNS resolution.
|
||||
class AsyncDnsResolverFactoryInterface {
|
||||
public:
|
||||
virtual ~AsyncDnsResolverFactoryInterface() = default;
|
||||
|
||||
// Creates an AsyncDnsResolver and starts resolving the name. The callback
|
||||
// will be called when resolution is finished.
|
||||
// The callback will be called on the sequence that the caller runs on.
|
||||
virtual std::unique_ptr<webrtc::AsyncDnsResolverInterface> CreateAndResolve(
|
||||
const rtc::SocketAddress& addr,
|
||||
absl::AnyInvocable<void()> callback) = 0;
|
||||
// Creates an AsyncDnsResolver and starts resolving the name to an address
|
||||
// matching the specified family. The callback will be called when resolution
|
||||
// is finished. The callback will be called on the sequence that the caller
|
||||
// runs on.
|
||||
virtual std::unique_ptr<webrtc::AsyncDnsResolverInterface> CreateAndResolve(
|
||||
const rtc::SocketAddress& addr,
|
||||
int family,
|
||||
absl::AnyInvocable<void()> callback) = 0;
|
||||
// Creates an AsyncDnsResolver and does not start it.
|
||||
// For backwards compatibility, will be deprecated and removed.
|
||||
// One has to do a separate Start() call on the
|
||||
// resolver to start name resolution.
|
||||
virtual std::unique_ptr<webrtc::AsyncDnsResolverInterface> Create() = 0;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // API_ASYNC_DNS_RESOLVER_H_
|
||||
93
TMessagesProj/jni/voip/webrtc/api/audio/BUILD.gn
Normal file
93
TMessagesProj/jni/voip/webrtc/api/audio/BUILD.gn
Normal file
|
|
@ -0,0 +1,93 @@
|
|||
# Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
|
||||
#
|
||||
# Use of this source code is governed by a BSD-style license
|
||||
# that can be found in the LICENSE file in the root of the source
|
||||
# tree. An additional intellectual property rights grant can be found
|
||||
# in the file PATENTS. All contributing project authors may
|
||||
# be found in the AUTHORS file in the root of the source tree.
|
||||
|
||||
import("../../webrtc.gni")
|
||||
|
||||
rtc_library("audio_frame_api") {
|
||||
visibility = [ "*" ]
|
||||
sources = [
|
||||
"audio_frame.cc",
|
||||
"audio_frame.h",
|
||||
"channel_layout.cc",
|
||||
"channel_layout.h",
|
||||
]
|
||||
|
||||
deps = [
|
||||
"..:rtp_packet_info",
|
||||
"../../rtc_base:checks",
|
||||
"../../rtc_base:logging",
|
||||
"../../rtc_base:macromagic",
|
||||
"../../rtc_base:timeutils",
|
||||
]
|
||||
}
|
||||
|
||||
rtc_source_set("audio_frame_processor") {
|
||||
visibility = [ "*" ]
|
||||
sources = [ "audio_frame_processor.h" ]
|
||||
}
|
||||
|
||||
rtc_source_set("audio_mixer_api") {
|
||||
visibility = [ "*" ]
|
||||
sources = [ "audio_mixer.h" ]
|
||||
|
||||
deps = [
|
||||
":audio_frame_api",
|
||||
"..:make_ref_counted",
|
||||
"../../rtc_base:refcount",
|
||||
]
|
||||
}
|
||||
|
||||
rtc_library("aec3_config") {
|
||||
visibility = [ "*" ]
|
||||
sources = [
|
||||
"echo_canceller3_config.cc",
|
||||
"echo_canceller3_config.h",
|
||||
]
|
||||
deps = [
|
||||
"../../rtc_base:checks",
|
||||
"../../rtc_base:safe_minmax",
|
||||
"../../rtc_base/system:rtc_export",
|
||||
]
|
||||
}
|
||||
|
||||
rtc_library("aec3_factory") {
|
||||
visibility = [ "*" ]
|
||||
configs += [ "../../modules/audio_processing:apm_debug_dump" ]
|
||||
sources = [
|
||||
"echo_canceller3_factory.cc",
|
||||
"echo_canceller3_factory.h",
|
||||
]
|
||||
|
||||
deps = [
|
||||
":aec3_config",
|
||||
":echo_control",
|
||||
"../../modules/audio_processing/aec3",
|
||||
"../../rtc_base/system:rtc_export",
|
||||
]
|
||||
}
|
||||
|
||||
rtc_source_set("echo_control") {
|
||||
visibility = [ "*" ]
|
||||
sources = [ "echo_control.h" ]
|
||||
deps = [ "../../rtc_base:checks" ]
|
||||
}
|
||||
|
||||
rtc_source_set("echo_detector_creator") {
|
||||
visibility = [ "*" ]
|
||||
allow_poison = [ "default_echo_detector" ]
|
||||
sources = [
|
||||
"echo_detector_creator.cc",
|
||||
"echo_detector_creator.h",
|
||||
]
|
||||
deps = [
|
||||
"..:make_ref_counted",
|
||||
"../../api:scoped_refptr",
|
||||
"../../modules/audio_processing:api",
|
||||
"../../modules/audio_processing:residual_echo_detector",
|
||||
]
|
||||
}
|
||||
2
TMessagesProj/jni/voip/webrtc/api/audio/OWNERS
Normal file
2
TMessagesProj/jni/voip/webrtc/api/audio/OWNERS
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
gustaf@webrtc.org
|
||||
peah@webrtc.org
|
||||
140
TMessagesProj/jni/voip/webrtc/api/audio/audio_frame.cc
Normal file
140
TMessagesProj/jni/voip/webrtc/api/audio/audio_frame.cc
Normal file
|
|
@ -0,0 +1,140 @@
|
|||
/*
|
||||
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "api/audio/audio_frame.h"
|
||||
|
||||
#include <string.h>
|
||||
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/time_utils.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
AudioFrame::AudioFrame() {
|
||||
// Visual Studio doesn't like this in the class definition.
|
||||
static_assert(sizeof(data_) == kMaxDataSizeBytes, "kMaxDataSizeBytes");
|
||||
}
|
||||
|
||||
void AudioFrame::Reset() {
|
||||
ResetWithoutMuting();
|
||||
muted_ = true;
|
||||
}
|
||||
|
||||
void AudioFrame::ResetWithoutMuting() {
|
||||
// TODO(wu): Zero is a valid value for `timestamp_`. We should initialize
|
||||
// to an invalid value, or add a new member to indicate invalidity.
|
||||
timestamp_ = 0;
|
||||
elapsed_time_ms_ = -1;
|
||||
ntp_time_ms_ = -1;
|
||||
samples_per_channel_ = 0;
|
||||
sample_rate_hz_ = 0;
|
||||
num_channels_ = 0;
|
||||
channel_layout_ = CHANNEL_LAYOUT_NONE;
|
||||
speech_type_ = kUndefined;
|
||||
vad_activity_ = kVadUnknown;
|
||||
profile_timestamp_ms_ = 0;
|
||||
packet_infos_ = RtpPacketInfos();
|
||||
absolute_capture_timestamp_ms_ = absl::nullopt;
|
||||
}
|
||||
|
||||
void AudioFrame::UpdateFrame(uint32_t timestamp,
|
||||
const int16_t* data,
|
||||
size_t samples_per_channel,
|
||||
int sample_rate_hz,
|
||||
SpeechType speech_type,
|
||||
VADActivity vad_activity,
|
||||
size_t num_channels) {
|
||||
timestamp_ = timestamp;
|
||||
samples_per_channel_ = samples_per_channel;
|
||||
sample_rate_hz_ = sample_rate_hz;
|
||||
speech_type_ = speech_type;
|
||||
vad_activity_ = vad_activity;
|
||||
num_channels_ = num_channels;
|
||||
channel_layout_ = GuessChannelLayout(num_channels);
|
||||
if (channel_layout_ != CHANNEL_LAYOUT_UNSUPPORTED) {
|
||||
RTC_DCHECK_EQ(num_channels, ChannelLayoutToChannelCount(channel_layout_));
|
||||
}
|
||||
|
||||
const size_t length = samples_per_channel * num_channels;
|
||||
RTC_CHECK_LE(length, kMaxDataSizeSamples);
|
||||
if (data != nullptr) {
|
||||
memcpy(data_, data, sizeof(int16_t) * length);
|
||||
muted_ = false;
|
||||
} else {
|
||||
muted_ = true;
|
||||
}
|
||||
}
|
||||
|
||||
void AudioFrame::CopyFrom(const AudioFrame& src) {
|
||||
if (this == &src)
|
||||
return;
|
||||
|
||||
timestamp_ = src.timestamp_;
|
||||
elapsed_time_ms_ = src.elapsed_time_ms_;
|
||||
ntp_time_ms_ = src.ntp_time_ms_;
|
||||
packet_infos_ = src.packet_infos_;
|
||||
muted_ = src.muted();
|
||||
samples_per_channel_ = src.samples_per_channel_;
|
||||
sample_rate_hz_ = src.sample_rate_hz_;
|
||||
speech_type_ = src.speech_type_;
|
||||
vad_activity_ = src.vad_activity_;
|
||||
num_channels_ = src.num_channels_;
|
||||
channel_layout_ = src.channel_layout_;
|
||||
absolute_capture_timestamp_ms_ = src.absolute_capture_timestamp_ms();
|
||||
|
||||
const size_t length = samples_per_channel_ * num_channels_;
|
||||
RTC_CHECK_LE(length, kMaxDataSizeSamples);
|
||||
if (!src.muted()) {
|
||||
memcpy(data_, src.data(), sizeof(int16_t) * length);
|
||||
muted_ = false;
|
||||
}
|
||||
}
|
||||
|
||||
void AudioFrame::UpdateProfileTimeStamp() {
|
||||
profile_timestamp_ms_ = rtc::TimeMillis();
|
||||
}
|
||||
|
||||
int64_t AudioFrame::ElapsedProfileTimeMs() const {
|
||||
if (profile_timestamp_ms_ == 0) {
|
||||
// Profiling has not been activated.
|
||||
return -1;
|
||||
}
|
||||
return rtc::TimeSince(profile_timestamp_ms_);
|
||||
}
|
||||
|
||||
const int16_t* AudioFrame::data() const {
|
||||
return muted_ ? empty_data() : data_;
|
||||
}
|
||||
|
||||
// TODO(henrik.lundin) Can we skip zeroing the buffer?
|
||||
// See https://bugs.chromium.org/p/webrtc/issues/detail?id=5647.
|
||||
int16_t* AudioFrame::mutable_data() {
|
||||
if (muted_) {
|
||||
memset(data_, 0, kMaxDataSizeBytes);
|
||||
muted_ = false;
|
||||
}
|
||||
return data_;
|
||||
}
|
||||
|
||||
void AudioFrame::Mute() {
|
||||
muted_ = true;
|
||||
}
|
||||
|
||||
bool AudioFrame::muted() const {
|
||||
return muted_;
|
||||
}
|
||||
|
||||
// static
|
||||
const int16_t* AudioFrame::empty_data() {
|
||||
static int16_t* null_data = new int16_t[kMaxDataSizeSamples]();
|
||||
return &null_data[0];
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
173
TMessagesProj/jni/voip/webrtc/api/audio/audio_frame.h
Normal file
173
TMessagesProj/jni/voip/webrtc/api/audio/audio_frame.h
Normal file
|
|
@ -0,0 +1,173 @@
|
|||
/*
|
||||
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef API_AUDIO_AUDIO_FRAME_H_
|
||||
#define API_AUDIO_AUDIO_FRAME_H_
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#include "api/audio/channel_layout.h"
|
||||
#include "api/rtp_packet_infos.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
/* This class holds up to 120 ms of super-wideband (32 kHz) stereo audio. It
|
||||
* allows for adding and subtracting frames while keeping track of the resulting
|
||||
* states.
|
||||
*
|
||||
* Notes
|
||||
* - This is a de-facto api, not designed for external use. The AudioFrame class
|
||||
* is in need of overhaul or even replacement, and anyone depending on it
|
||||
* should be prepared for that.
|
||||
* - The total number of samples is samples_per_channel_ * num_channels_.
|
||||
* - Stereo data is interleaved starting with the left channel.
|
||||
*/
|
||||
class AudioFrame {
|
||||
public:
|
||||
// Using constexpr here causes linker errors unless the variable also has an
|
||||
// out-of-class definition, which is impractical in this header-only class.
|
||||
// (This makes no sense because it compiles as an enum value, which we most
|
||||
// certainly cannot take the address of, just fine.) C++17 introduces inline
|
||||
// variables which should allow us to switch to constexpr and keep this a
|
||||
// header-only class.
|
||||
enum : size_t {
|
||||
// Stereo, 32 kHz, 120 ms (2 * 32 * 120)
|
||||
// Stereo, 192 kHz, 20 ms (2 * 192 * 20)
|
||||
kMaxDataSizeSamples = 7680,
|
||||
kMaxDataSizeBytes = kMaxDataSizeSamples * sizeof(int16_t),
|
||||
};
|
||||
|
||||
enum VADActivity { kVadActive = 0, kVadPassive = 1, kVadUnknown = 2 };
|
||||
enum SpeechType {
|
||||
kNormalSpeech = 0,
|
||||
kPLC = 1,
|
||||
kCNG = 2,
|
||||
kPLCCNG = 3,
|
||||
kCodecPLC = 5,
|
||||
kUndefined = 4
|
||||
};
|
||||
|
||||
AudioFrame();
|
||||
|
||||
AudioFrame(const AudioFrame&) = delete;
|
||||
AudioFrame& operator=(const AudioFrame&) = delete;
|
||||
|
||||
// Resets all members to their default state.
|
||||
void Reset();
|
||||
// Same as Reset(), but leaves mute state unchanged. Muting a frame requires
|
||||
// the buffer to be zeroed on the next call to mutable_data(). Callers
|
||||
// intending to write to the buffer immediately after Reset() can instead use
|
||||
// ResetWithoutMuting() to skip this wasteful zeroing.
|
||||
void ResetWithoutMuting();
|
||||
|
||||
void UpdateFrame(uint32_t timestamp,
|
||||
const int16_t* data,
|
||||
size_t samples_per_channel,
|
||||
int sample_rate_hz,
|
||||
SpeechType speech_type,
|
||||
VADActivity vad_activity,
|
||||
size_t num_channels = 1);
|
||||
|
||||
void CopyFrom(const AudioFrame& src);
|
||||
|
||||
// Sets a wall-time clock timestamp in milliseconds to be used for profiling
|
||||
// of time between two points in the audio chain.
|
||||
// Example:
|
||||
// t0: UpdateProfileTimeStamp()
|
||||
// t1: ElapsedProfileTimeMs() => t1 - t0 [msec]
|
||||
void UpdateProfileTimeStamp();
|
||||
// Returns the time difference between now and when UpdateProfileTimeStamp()
|
||||
// was last called. Returns -1 if UpdateProfileTimeStamp() has not yet been
|
||||
// called.
|
||||
int64_t ElapsedProfileTimeMs() const;
|
||||
|
||||
// data() returns a zeroed static buffer if the frame is muted.
|
||||
// mutable_frame() always returns a non-static buffer; the first call to
|
||||
// mutable_frame() zeros the non-static buffer and marks the frame unmuted.
|
||||
const int16_t* data() const;
|
||||
int16_t* mutable_data();
|
||||
|
||||
// Prefer to mute frames using AudioFrameOperations::Mute.
|
||||
void Mute();
|
||||
// Frame is muted by default.
|
||||
bool muted() const;
|
||||
|
||||
size_t max_16bit_samples() const { return kMaxDataSizeSamples; }
|
||||
size_t samples_per_channel() const { return samples_per_channel_; }
|
||||
size_t num_channels() const { return num_channels_; }
|
||||
ChannelLayout channel_layout() const { return channel_layout_; }
|
||||
int sample_rate_hz() const { return sample_rate_hz_; }
|
||||
|
||||
void set_absolute_capture_timestamp_ms(
|
||||
int64_t absolute_capture_time_stamp_ms) {
|
||||
absolute_capture_timestamp_ms_ = absolute_capture_time_stamp_ms;
|
||||
}
|
||||
|
||||
absl::optional<int64_t> absolute_capture_timestamp_ms() const {
|
||||
return absolute_capture_timestamp_ms_;
|
||||
}
|
||||
|
||||
// RTP timestamp of the first sample in the AudioFrame.
|
||||
uint32_t timestamp_ = 0;
|
||||
// Time since the first frame in milliseconds.
|
||||
// -1 represents an uninitialized value.
|
||||
int64_t elapsed_time_ms_ = -1;
|
||||
// NTP time of the estimated capture time in local timebase in milliseconds.
|
||||
// -1 represents an uninitialized value.
|
||||
int64_t ntp_time_ms_ = -1;
|
||||
size_t samples_per_channel_ = 0;
|
||||
int sample_rate_hz_ = 0;
|
||||
size_t num_channels_ = 0;
|
||||
ChannelLayout channel_layout_ = CHANNEL_LAYOUT_NONE;
|
||||
SpeechType speech_type_ = kUndefined;
|
||||
VADActivity vad_activity_ = kVadUnknown;
|
||||
// Monotonically increasing timestamp intended for profiling of audio frames.
|
||||
// Typically used for measuring elapsed time between two different points in
|
||||
// the audio path. No lock is used to save resources and we are thread safe
|
||||
// by design.
|
||||
// TODO(nisse@webrtc.org): consider using absl::optional.
|
||||
int64_t profile_timestamp_ms_ = 0;
|
||||
|
||||
// Information about packets used to assemble this audio frame. This is needed
|
||||
// by `SourceTracker` when the frame is delivered to the RTCRtpReceiver's
|
||||
// MediaStreamTrack, in order to implement getContributingSources(). See:
|
||||
// https://w3c.github.io/webrtc-pc/#dom-rtcrtpreceiver-getcontributingsources
|
||||
//
|
||||
// TODO(bugs.webrtc.org/10757):
|
||||
// Note that this information might not be fully accurate since we currently
|
||||
// don't have a proper way to track it across the audio sync buffer. The
|
||||
// sync buffer is the small sample-holding buffer located after the audio
|
||||
// decoder and before where samples are assembled into output frames.
|
||||
//
|
||||
// `RtpPacketInfos` may also be empty if the audio samples did not come from
|
||||
// RTP packets. E.g. if the audio were locally generated by packet loss
|
||||
// concealment, comfort noise generation, etc.
|
||||
RtpPacketInfos packet_infos_;
|
||||
|
||||
private:
|
||||
// A permanently zeroed out buffer to represent muted frames. This is a
|
||||
// header-only class, so the only way to avoid creating a separate empty
|
||||
// buffer per translation unit is to wrap a static in an inline function.
|
||||
static const int16_t* empty_data();
|
||||
|
||||
int16_t data_[kMaxDataSizeSamples];
|
||||
bool muted_ = true;
|
||||
|
||||
// Absolute capture timestamp when this audio frame was originally captured.
|
||||
// This is only valid for audio frames captured on this machine. The absolute
|
||||
// capture timestamp of a received frame is found in `packet_infos_`.
|
||||
// This timestamp MUST be based on the same clock as rtc::TimeMillis().
|
||||
absl::optional<int64_t> absolute_capture_timestamp_ms_;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // API_AUDIO_AUDIO_FRAME_H_
|
||||
|
|
@ -0,0 +1,43 @@
|
|||
/*
|
||||
* Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef API_AUDIO_AUDIO_FRAME_PROCESSOR_H_
|
||||
#define API_AUDIO_AUDIO_FRAME_PROCESSOR_H_
|
||||
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class AudioFrame;
|
||||
|
||||
// If passed into PeerConnectionFactory, will be used for additional
|
||||
// processing of captured audio frames, performed before encoding.
|
||||
// Implementations must be thread-safe.
|
||||
class AudioFrameProcessor {
|
||||
public:
|
||||
using OnAudioFrameCallback = std::function<void(std::unique_ptr<AudioFrame>)>;
|
||||
virtual ~AudioFrameProcessor() = default;
|
||||
|
||||
// Processes the frame received from WebRTC, is called by WebRTC off the
|
||||
// realtime audio capturing path. AudioFrameProcessor must reply with
|
||||
// processed frames by calling `sink_callback` if it was provided in SetSink()
|
||||
// call. `sink_callback` can be called in the context of Process().
|
||||
virtual void Process(std::unique_ptr<AudioFrame> frame) = 0;
|
||||
|
||||
// Atomically replaces the current sink with the new one. Before the
|
||||
// first call to this function, or if the provided `sink_callback` is nullptr,
|
||||
// processed frames are simply discarded.
|
||||
virtual void SetSink(OnAudioFrameCallback sink_callback) = 0;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // API_AUDIO_AUDIO_FRAME_PROCESSOR_H_
|
||||
80
TMessagesProj/jni/voip/webrtc/api/audio/audio_mixer.h
Normal file
80
TMessagesProj/jni/voip/webrtc/api/audio/audio_mixer.h
Normal file
|
|
@ -0,0 +1,80 @@
|
|||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef API_AUDIO_AUDIO_MIXER_H_
|
||||
#define API_AUDIO_AUDIO_MIXER_H_
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "api/audio/audio_frame.h"
|
||||
#include "rtc_base/ref_count.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// WORK IN PROGRESS
|
||||
// This class is under development and is not yet intended for for use outside
|
||||
// of WebRtc/Libjingle.
|
||||
class AudioMixer : public rtc::RefCountInterface {
|
||||
public:
|
||||
// A callback class that all mixer participants must inherit from/implement.
|
||||
class Source {
|
||||
public:
|
||||
enum class AudioFrameInfo {
|
||||
kNormal, // The samples in audio_frame are valid and should be used.
|
||||
kMuted, // The samples in audio_frame should not be used, but
|
||||
// should be implicitly interpreted as zero. Other
|
||||
// fields in audio_frame may be read and should
|
||||
// contain meaningful values.
|
||||
kError, // The audio_frame will not be used.
|
||||
};
|
||||
|
||||
// Overwrites `audio_frame`. The data_ field is overwritten with
|
||||
// 10 ms of new audio (either 1 or 2 interleaved channels) at
|
||||
// `sample_rate_hz`. All fields in `audio_frame` must be updated.
|
||||
virtual AudioFrameInfo GetAudioFrameWithInfo(int sample_rate_hz,
|
||||
AudioFrame* audio_frame) = 0;
|
||||
|
||||
// A way for a mixer implementation to distinguish participants.
|
||||
virtual int Ssrc() const = 0;
|
||||
|
||||
// A way for this source to say that GetAudioFrameWithInfo called
|
||||
// with this sample rate or higher will not cause quality loss.
|
||||
virtual int PreferredSampleRate() const = 0;
|
||||
|
||||
virtual ~Source() {}
|
||||
};
|
||||
|
||||
// Returns true if adding was successful. A source is never added
|
||||
// twice. Addition and removal can happen on different threads.
|
||||
virtual bool AddSource(Source* audio_source) = 0;
|
||||
|
||||
// Removal is never attempted if a source has not been successfully
|
||||
// added to the mixer.
|
||||
virtual void RemoveSource(Source* audio_source) = 0;
|
||||
|
||||
// Performs mixing by asking registered audio sources for audio. The
|
||||
// mixed result is placed in the provided AudioFrame. This method
|
||||
// will only be called from a single thread. The channels argument
|
||||
// specifies the number of channels of the mix result. The mixer
|
||||
// should mix at a rate that doesn't cause quality loss of the
|
||||
// sources' audio. The mixing rate is one of the rates listed in
|
||||
// AudioProcessing::NativeRate. All fields in
|
||||
// `audio_frame_for_mixing` must be updated.
|
||||
virtual void Mix(size_t number_of_channels,
|
||||
AudioFrame* audio_frame_for_mixing) = 0;
|
||||
|
||||
protected:
|
||||
// Since the mixer is reference counted, the destructor may be
|
||||
// called from any thread.
|
||||
~AudioMixer() override {}
|
||||
};
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // API_AUDIO_AUDIO_MIXER_H_
|
||||
282
TMessagesProj/jni/voip/webrtc/api/audio/channel_layout.cc
Normal file
282
TMessagesProj/jni/voip/webrtc/api/audio/channel_layout.cc
Normal file
|
|
@ -0,0 +1,282 @@
|
|||
/*
|
||||
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "api/audio/channel_layout.h"
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
#include "rtc_base/arraysize.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/logging.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
static const int kLayoutToChannels[] = {
|
||||
0, // CHANNEL_LAYOUT_NONE
|
||||
0, // CHANNEL_LAYOUT_UNSUPPORTED
|
||||
1, // CHANNEL_LAYOUT_MONO
|
||||
2, // CHANNEL_LAYOUT_STEREO
|
||||
3, // CHANNEL_LAYOUT_2_1
|
||||
3, // CHANNEL_LAYOUT_SURROUND
|
||||
4, // CHANNEL_LAYOUT_4_0
|
||||
4, // CHANNEL_LAYOUT_2_2
|
||||
4, // CHANNEL_LAYOUT_QUAD
|
||||
5, // CHANNEL_LAYOUT_5_0
|
||||
6, // CHANNEL_LAYOUT_5_1
|
||||
5, // CHANNEL_LAYOUT_5_0_BACK
|
||||
6, // CHANNEL_LAYOUT_5_1_BACK
|
||||
7, // CHANNEL_LAYOUT_7_0
|
||||
8, // CHANNEL_LAYOUT_7_1
|
||||
8, // CHANNEL_LAYOUT_7_1_WIDE
|
||||
2, // CHANNEL_LAYOUT_STEREO_DOWNMIX
|
||||
3, // CHANNEL_LAYOUT_2POINT1
|
||||
4, // CHANNEL_LAYOUT_3_1
|
||||
5, // CHANNEL_LAYOUT_4_1
|
||||
6, // CHANNEL_LAYOUT_6_0
|
||||
6, // CHANNEL_LAYOUT_6_0_FRONT
|
||||
6, // CHANNEL_LAYOUT_HEXAGONAL
|
||||
7, // CHANNEL_LAYOUT_6_1
|
||||
7, // CHANNEL_LAYOUT_6_1_BACK
|
||||
7, // CHANNEL_LAYOUT_6_1_FRONT
|
||||
7, // CHANNEL_LAYOUT_7_0_FRONT
|
||||
8, // CHANNEL_LAYOUT_7_1_WIDE_BACK
|
||||
8, // CHANNEL_LAYOUT_OCTAGONAL
|
||||
0, // CHANNEL_LAYOUT_DISCRETE
|
||||
3, // CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC
|
||||
5, // CHANNEL_LAYOUT_4_1_QUAD_SIDE
|
||||
0, // CHANNEL_LAYOUT_BITSTREAM
|
||||
};
|
||||
|
||||
// The channel orderings for each layout as specified by FFmpeg. Each value
|
||||
// represents the index of each channel in each layout. Values of -1 mean the
|
||||
// channel at that index is not used for that layout. For example, the left side
|
||||
// surround sound channel in FFmpeg's 5.1 layout is in the 5th position (because
|
||||
// the order is L, R, C, LFE, LS, RS), so
|
||||
// kChannelOrderings[CHANNEL_LAYOUT_5_1][SIDE_LEFT] = 4;
|
||||
static const int kChannelOrderings[CHANNEL_LAYOUT_MAX + 1][CHANNELS_MAX + 1] = {
|
||||
// FL | FR | FC | LFE | BL | BR | FLofC | FRofC | BC | SL | SR
|
||||
|
||||
// CHANNEL_LAYOUT_NONE
|
||||
{-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
|
||||
|
||||
// CHANNEL_LAYOUT_UNSUPPORTED
|
||||
{-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
|
||||
|
||||
// CHANNEL_LAYOUT_MONO
|
||||
{-1, -1, 0, -1, -1, -1, -1, -1, -1, -1, -1},
|
||||
|
||||
// CHANNEL_LAYOUT_STEREO
|
||||
{0, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
|
||||
|
||||
// CHANNEL_LAYOUT_2_1
|
||||
{0, 1, -1, -1, -1, -1, -1, -1, 2, -1, -1},
|
||||
|
||||
// CHANNEL_LAYOUT_SURROUND
|
||||
{0, 1, 2, -1, -1, -1, -1, -1, -1, -1, -1},
|
||||
|
||||
// CHANNEL_LAYOUT_4_0
|
||||
{0, 1, 2, -1, -1, -1, -1, -1, 3, -1, -1},
|
||||
|
||||
// CHANNEL_LAYOUT_2_2
|
||||
{0, 1, -1, -1, -1, -1, -1, -1, -1, 2, 3},
|
||||
|
||||
// CHANNEL_LAYOUT_QUAD
|
||||
{0, 1, -1, -1, 2, 3, -1, -1, -1, -1, -1},
|
||||
|
||||
// CHANNEL_LAYOUT_5_0
|
||||
{0, 1, 2, -1, -1, -1, -1, -1, -1, 3, 4},
|
||||
|
||||
// CHANNEL_LAYOUT_5_1
|
||||
{0, 1, 2, 3, -1, -1, -1, -1, -1, 4, 5},
|
||||
|
||||
// FL | FR | FC | LFE | BL | BR | FLofC | FRofC | BC | SL | SR
|
||||
|
||||
// CHANNEL_LAYOUT_5_0_BACK
|
||||
{0, 1, 2, -1, 3, 4, -1, -1, -1, -1, -1},
|
||||
|
||||
// CHANNEL_LAYOUT_5_1_BACK
|
||||
{0, 1, 2, 3, 4, 5, -1, -1, -1, -1, -1},
|
||||
|
||||
// CHANNEL_LAYOUT_7_0
|
||||
{0, 1, 2, -1, 5, 6, -1, -1, -1, 3, 4},
|
||||
|
||||
// CHANNEL_LAYOUT_7_1
|
||||
{0, 1, 2, 3, 6, 7, -1, -1, -1, 4, 5},
|
||||
|
||||
// CHANNEL_LAYOUT_7_1_WIDE
|
||||
{0, 1, 2, 3, -1, -1, 6, 7, -1, 4, 5},
|
||||
|
||||
// CHANNEL_LAYOUT_STEREO_DOWNMIX
|
||||
{0, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
|
||||
|
||||
// CHANNEL_LAYOUT_2POINT1
|
||||
{0, 1, -1, 2, -1, -1, -1, -1, -1, -1, -1},
|
||||
|
||||
// CHANNEL_LAYOUT_3_1
|
||||
{0, 1, 2, 3, -1, -1, -1, -1, -1, -1, -1},
|
||||
|
||||
// CHANNEL_LAYOUT_4_1
|
||||
{0, 1, 2, 4, -1, -1, -1, -1, 3, -1, -1},
|
||||
|
||||
// CHANNEL_LAYOUT_6_0
|
||||
{0, 1, 2, -1, -1, -1, -1, -1, 5, 3, 4},
|
||||
|
||||
// CHANNEL_LAYOUT_6_0_FRONT
|
||||
{0, 1, -1, -1, -1, -1, 4, 5, -1, 2, 3},
|
||||
|
||||
// FL | FR | FC | LFE | BL | BR | FLofC | FRofC | BC | SL | SR
|
||||
|
||||
// CHANNEL_LAYOUT_HEXAGONAL
|
||||
{0, 1, 2, -1, 3, 4, -1, -1, 5, -1, -1},
|
||||
|
||||
// CHANNEL_LAYOUT_6_1
|
||||
{0, 1, 2, 3, -1, -1, -1, -1, 6, 4, 5},
|
||||
|
||||
// CHANNEL_LAYOUT_6_1_BACK
|
||||
{0, 1, 2, 3, 4, 5, -1, -1, 6, -1, -1},
|
||||
|
||||
// CHANNEL_LAYOUT_6_1_FRONT
|
||||
{0, 1, -1, 6, -1, -1, 4, 5, -1, 2, 3},
|
||||
|
||||
// CHANNEL_LAYOUT_7_0_FRONT
|
||||
{0, 1, 2, -1, -1, -1, 5, 6, -1, 3, 4},
|
||||
|
||||
// CHANNEL_LAYOUT_7_1_WIDE_BACK
|
||||
{0, 1, 2, 3, 4, 5, 6, 7, -1, -1, -1},
|
||||
|
||||
// CHANNEL_LAYOUT_OCTAGONAL
|
||||
{0, 1, 2, -1, 5, 6, -1, -1, 7, 3, 4},
|
||||
|
||||
// CHANNEL_LAYOUT_DISCRETE
|
||||
{-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
|
||||
|
||||
// CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC
|
||||
{0, 1, 2, -1, -1, -1, -1, -1, -1, -1, -1},
|
||||
|
||||
// CHANNEL_LAYOUT_4_1_QUAD_SIDE
|
||||
{0, 1, -1, 4, -1, -1, -1, -1, -1, 2, 3},
|
||||
|
||||
// CHANNEL_LAYOUT_BITSTREAM
|
||||
{-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
|
||||
|
||||
// FL | FR | FC | LFE | BL | BR | FLofC | FRofC | BC | SL | SR
|
||||
};
|
||||
|
||||
int ChannelLayoutToChannelCount(ChannelLayout layout) {
|
||||
RTC_DCHECK_LT(static_cast<size_t>(layout), arraysize(kLayoutToChannels));
|
||||
RTC_DCHECK_LE(kLayoutToChannels[layout], kMaxConcurrentChannels);
|
||||
return kLayoutToChannels[layout];
|
||||
}
|
||||
|
||||
// Converts a channel count into a channel layout.
|
||||
ChannelLayout GuessChannelLayout(int channels) {
|
||||
switch (channels) {
|
||||
case 1:
|
||||
return CHANNEL_LAYOUT_MONO;
|
||||
case 2:
|
||||
return CHANNEL_LAYOUT_STEREO;
|
||||
case 3:
|
||||
return CHANNEL_LAYOUT_SURROUND;
|
||||
case 4:
|
||||
return CHANNEL_LAYOUT_QUAD;
|
||||
case 5:
|
||||
return CHANNEL_LAYOUT_5_0;
|
||||
case 6:
|
||||
return CHANNEL_LAYOUT_5_1;
|
||||
case 7:
|
||||
return CHANNEL_LAYOUT_6_1;
|
||||
case 8:
|
||||
return CHANNEL_LAYOUT_7_1;
|
||||
default:
|
||||
RTC_DLOG(LS_WARNING) << "Unsupported channel count: " << channels;
|
||||
}
|
||||
return CHANNEL_LAYOUT_UNSUPPORTED;
|
||||
}
|
||||
|
||||
int ChannelOrder(ChannelLayout layout, Channels channel) {
|
||||
RTC_DCHECK_LT(static_cast<size_t>(layout), arraysize(kChannelOrderings));
|
||||
RTC_DCHECK_LT(static_cast<size_t>(channel), arraysize(kChannelOrderings[0]));
|
||||
return kChannelOrderings[layout][channel];
|
||||
}
|
||||
|
||||
const char* ChannelLayoutToString(ChannelLayout layout) {
|
||||
switch (layout) {
|
||||
case CHANNEL_LAYOUT_NONE:
|
||||
return "NONE";
|
||||
case CHANNEL_LAYOUT_UNSUPPORTED:
|
||||
return "UNSUPPORTED";
|
||||
case CHANNEL_LAYOUT_MONO:
|
||||
return "MONO";
|
||||
case CHANNEL_LAYOUT_STEREO:
|
||||
return "STEREO";
|
||||
case CHANNEL_LAYOUT_2_1:
|
||||
return "2.1";
|
||||
case CHANNEL_LAYOUT_SURROUND:
|
||||
return "SURROUND";
|
||||
case CHANNEL_LAYOUT_4_0:
|
||||
return "4.0";
|
||||
case CHANNEL_LAYOUT_2_2:
|
||||
return "QUAD_SIDE";
|
||||
case CHANNEL_LAYOUT_QUAD:
|
||||
return "QUAD";
|
||||
case CHANNEL_LAYOUT_5_0:
|
||||
return "5.0";
|
||||
case CHANNEL_LAYOUT_5_1:
|
||||
return "5.1";
|
||||
case CHANNEL_LAYOUT_5_0_BACK:
|
||||
return "5.0_BACK";
|
||||
case CHANNEL_LAYOUT_5_1_BACK:
|
||||
return "5.1_BACK";
|
||||
case CHANNEL_LAYOUT_7_0:
|
||||
return "7.0";
|
||||
case CHANNEL_LAYOUT_7_1:
|
||||
return "7.1";
|
||||
case CHANNEL_LAYOUT_7_1_WIDE:
|
||||
return "7.1_WIDE";
|
||||
case CHANNEL_LAYOUT_STEREO_DOWNMIX:
|
||||
return "STEREO_DOWNMIX";
|
||||
case CHANNEL_LAYOUT_2POINT1:
|
||||
return "2POINT1";
|
||||
case CHANNEL_LAYOUT_3_1:
|
||||
return "3.1";
|
||||
case CHANNEL_LAYOUT_4_1:
|
||||
return "4.1";
|
||||
case CHANNEL_LAYOUT_6_0:
|
||||
return "6.0";
|
||||
case CHANNEL_LAYOUT_6_0_FRONT:
|
||||
return "6.0_FRONT";
|
||||
case CHANNEL_LAYOUT_HEXAGONAL:
|
||||
return "HEXAGONAL";
|
||||
case CHANNEL_LAYOUT_6_1:
|
||||
return "6.1";
|
||||
case CHANNEL_LAYOUT_6_1_BACK:
|
||||
return "6.1_BACK";
|
||||
case CHANNEL_LAYOUT_6_1_FRONT:
|
||||
return "6.1_FRONT";
|
||||
case CHANNEL_LAYOUT_7_0_FRONT:
|
||||
return "7.0_FRONT";
|
||||
case CHANNEL_LAYOUT_7_1_WIDE_BACK:
|
||||
return "7.1_WIDE_BACK";
|
||||
case CHANNEL_LAYOUT_OCTAGONAL:
|
||||
return "OCTAGONAL";
|
||||
case CHANNEL_LAYOUT_DISCRETE:
|
||||
return "DISCRETE";
|
||||
case CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC:
|
||||
return "STEREO_AND_KEYBOARD_MIC";
|
||||
case CHANNEL_LAYOUT_4_1_QUAD_SIDE:
|
||||
return "4.1_QUAD_SIDE";
|
||||
case CHANNEL_LAYOUT_BITSTREAM:
|
||||
return "BITSTREAM";
|
||||
}
|
||||
RTC_DCHECK_NOTREACHED() << "Invalid channel layout provided: " << layout;
|
||||
return "";
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
165
TMessagesProj/jni/voip/webrtc/api/audio/channel_layout.h
Normal file
165
TMessagesProj/jni/voip/webrtc/api/audio/channel_layout.h
Normal file
|
|
@ -0,0 +1,165 @@
|
|||
/*
|
||||
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef API_AUDIO_CHANNEL_LAYOUT_H_
|
||||
#define API_AUDIO_CHANNEL_LAYOUT_H_
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// This file is derived from Chromium's base/channel_layout.h.
|
||||
|
||||
// Enumerates the various representations of the ordering of audio channels.
|
||||
// Logged to UMA, so never reuse a value, always add new/greater ones!
|
||||
enum ChannelLayout {
|
||||
CHANNEL_LAYOUT_NONE = 0,
|
||||
CHANNEL_LAYOUT_UNSUPPORTED = 1,
|
||||
|
||||
// Front C
|
||||
CHANNEL_LAYOUT_MONO = 2,
|
||||
|
||||
// Front L, Front R
|
||||
CHANNEL_LAYOUT_STEREO = 3,
|
||||
|
||||
// Front L, Front R, Back C
|
||||
CHANNEL_LAYOUT_2_1 = 4,
|
||||
|
||||
// Front L, Front R, Front C
|
||||
CHANNEL_LAYOUT_SURROUND = 5,
|
||||
|
||||
// Front L, Front R, Front C, Back C
|
||||
CHANNEL_LAYOUT_4_0 = 6,
|
||||
|
||||
// Front L, Front R, Side L, Side R
|
||||
CHANNEL_LAYOUT_2_2 = 7,
|
||||
|
||||
// Front L, Front R, Back L, Back R
|
||||
CHANNEL_LAYOUT_QUAD = 8,
|
||||
|
||||
// Front L, Front R, Front C, Side L, Side R
|
||||
CHANNEL_LAYOUT_5_0 = 9,
|
||||
|
||||
// Front L, Front R, Front C, LFE, Side L, Side R
|
||||
CHANNEL_LAYOUT_5_1 = 10,
|
||||
|
||||
// Front L, Front R, Front C, Back L, Back R
|
||||
CHANNEL_LAYOUT_5_0_BACK = 11,
|
||||
|
||||
// Front L, Front R, Front C, LFE, Back L, Back R
|
||||
CHANNEL_LAYOUT_5_1_BACK = 12,
|
||||
|
||||
// Front L, Front R, Front C, Side L, Side R, Back L, Back R
|
||||
CHANNEL_LAYOUT_7_0 = 13,
|
||||
|
||||
// Front L, Front R, Front C, LFE, Side L, Side R, Back L, Back R
|
||||
CHANNEL_LAYOUT_7_1 = 14,
|
||||
|
||||
// Front L, Front R, Front C, LFE, Side L, Side R, Front LofC, Front RofC
|
||||
CHANNEL_LAYOUT_7_1_WIDE = 15,
|
||||
|
||||
// Stereo L, Stereo R
|
||||
CHANNEL_LAYOUT_STEREO_DOWNMIX = 16,
|
||||
|
||||
// Stereo L, Stereo R, LFE
|
||||
CHANNEL_LAYOUT_2POINT1 = 17,
|
||||
|
||||
// Stereo L, Stereo R, Front C, LFE
|
||||
CHANNEL_LAYOUT_3_1 = 18,
|
||||
|
||||
// Stereo L, Stereo R, Front C, Rear C, LFE
|
||||
CHANNEL_LAYOUT_4_1 = 19,
|
||||
|
||||
// Stereo L, Stereo R, Front C, Side L, Side R, Back C
|
||||
CHANNEL_LAYOUT_6_0 = 20,
|
||||
|
||||
// Stereo L, Stereo R, Side L, Side R, Front LofC, Front RofC
|
||||
CHANNEL_LAYOUT_6_0_FRONT = 21,
|
||||
|
||||
// Stereo L, Stereo R, Front C, Rear L, Rear R, Rear C
|
||||
CHANNEL_LAYOUT_HEXAGONAL = 22,
|
||||
|
||||
// Stereo L, Stereo R, Front C, LFE, Side L, Side R, Rear Center
|
||||
CHANNEL_LAYOUT_6_1 = 23,
|
||||
|
||||
// Stereo L, Stereo R, Front C, LFE, Back L, Back R, Rear Center
|
||||
CHANNEL_LAYOUT_6_1_BACK = 24,
|
||||
|
||||
// Stereo L, Stereo R, Side L, Side R, Front LofC, Front RofC, LFE
|
||||
CHANNEL_LAYOUT_6_1_FRONT = 25,
|
||||
|
||||
// Front L, Front R, Front C, Side L, Side R, Front LofC, Front RofC
|
||||
CHANNEL_LAYOUT_7_0_FRONT = 26,
|
||||
|
||||
// Front L, Front R, Front C, LFE, Back L, Back R, Front LofC, Front RofC
|
||||
CHANNEL_LAYOUT_7_1_WIDE_BACK = 27,
|
||||
|
||||
// Front L, Front R, Front C, Side L, Side R, Rear L, Back R, Back C.
|
||||
CHANNEL_LAYOUT_OCTAGONAL = 28,
|
||||
|
||||
// Channels are not explicitly mapped to speakers.
|
||||
CHANNEL_LAYOUT_DISCRETE = 29,
|
||||
|
||||
// Front L, Front R, Front C. Front C contains the keyboard mic audio. This
|
||||
// layout is only intended for input for WebRTC. The Front C channel
|
||||
// is stripped away in the WebRTC audio input pipeline and never seen outside
|
||||
// of that.
|
||||
CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC = 30,
|
||||
|
||||
// Front L, Front R, Side L, Side R, LFE
|
||||
CHANNEL_LAYOUT_4_1_QUAD_SIDE = 31,
|
||||
|
||||
// Actual channel layout is specified in the bitstream and the actual channel
|
||||
// count is unknown at Chromium media pipeline level (useful for audio
|
||||
// pass-through mode).
|
||||
CHANNEL_LAYOUT_BITSTREAM = 32,
|
||||
|
||||
// Max value, must always equal the largest entry ever logged.
|
||||
CHANNEL_LAYOUT_MAX = CHANNEL_LAYOUT_BITSTREAM
|
||||
};
|
||||
|
||||
// Note: Do not reorder or reassign these values; other code depends on their
|
||||
// ordering to operate correctly. E.g., CoreAudio channel layout computations.
|
||||
enum Channels {
|
||||
LEFT = 0,
|
||||
RIGHT,
|
||||
CENTER,
|
||||
LFE,
|
||||
BACK_LEFT,
|
||||
BACK_RIGHT,
|
||||
LEFT_OF_CENTER,
|
||||
RIGHT_OF_CENTER,
|
||||
BACK_CENTER,
|
||||
SIDE_LEFT,
|
||||
SIDE_RIGHT,
|
||||
CHANNELS_MAX =
|
||||
SIDE_RIGHT, // Must always equal the largest value ever logged.
|
||||
};
|
||||
|
||||
// The maximum number of concurrently active channels for all possible layouts.
|
||||
// ChannelLayoutToChannelCount() will never return a value higher than this.
|
||||
constexpr int kMaxConcurrentChannels = 8;
|
||||
|
||||
// Returns the expected channel position in an interleaved stream. Values of -1
|
||||
// mean the channel at that index is not used for that layout. Values range
|
||||
// from 0 to ChannelLayoutToChannelCount(layout) - 1.
|
||||
int ChannelOrder(ChannelLayout layout, Channels channel);
|
||||
|
||||
// Returns the number of channels in a given ChannelLayout.
|
||||
int ChannelLayoutToChannelCount(ChannelLayout layout);
|
||||
|
||||
// Given the number of channels, return the best layout,
|
||||
// or return CHANNEL_LAYOUT_UNSUPPORTED if there is no good match.
|
||||
ChannelLayout GuessChannelLayout(int channels);
|
||||
|
||||
// Returns a string representation of the channel layout.
|
||||
const char* ChannelLayoutToString(ChannelLayout layout);
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // API_AUDIO_CHANNEL_LAYOUT_H_
|
||||
|
|
@ -0,0 +1,278 @@
|
|||
/*
|
||||
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
#include "api/audio/echo_canceller3_config.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <cmath>
|
||||
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/numerics/safe_minmax.h"
|
||||
|
||||
namespace webrtc {
|
||||
namespace {
|
||||
bool Limit(float* value, float min, float max) {
|
||||
float clamped = rtc::SafeClamp(*value, min, max);
|
||||
clamped = std::isfinite(clamped) ? clamped : min;
|
||||
bool res = *value == clamped;
|
||||
*value = clamped;
|
||||
return res;
|
||||
}
|
||||
|
||||
bool Limit(size_t* value, size_t min, size_t max) {
|
||||
size_t clamped = rtc::SafeClamp(*value, min, max);
|
||||
bool res = *value == clamped;
|
||||
*value = clamped;
|
||||
return res;
|
||||
}
|
||||
|
||||
bool Limit(int* value, int min, int max) {
|
||||
int clamped = rtc::SafeClamp(*value, min, max);
|
||||
bool res = *value == clamped;
|
||||
*value = clamped;
|
||||
return res;
|
||||
}
|
||||
|
||||
bool FloorLimit(size_t* value, size_t min) {
|
||||
size_t clamped = *value >= min ? *value : min;
|
||||
bool res = *value == clamped;
|
||||
*value = clamped;
|
||||
return res;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
EchoCanceller3Config::EchoCanceller3Config() = default;
|
||||
EchoCanceller3Config::EchoCanceller3Config(const EchoCanceller3Config& e) =
|
||||
default;
|
||||
EchoCanceller3Config& EchoCanceller3Config::operator=(
|
||||
const EchoCanceller3Config& e) = default;
|
||||
EchoCanceller3Config::Delay::Delay() = default;
|
||||
EchoCanceller3Config::Delay::Delay(const EchoCanceller3Config::Delay& e) =
|
||||
default;
|
||||
EchoCanceller3Config::Delay& EchoCanceller3Config::Delay::operator=(
|
||||
const Delay& e) = default;
|
||||
|
||||
EchoCanceller3Config::EchoModel::EchoModel() = default;
|
||||
EchoCanceller3Config::EchoModel::EchoModel(
|
||||
const EchoCanceller3Config::EchoModel& e) = default;
|
||||
EchoCanceller3Config::EchoModel& EchoCanceller3Config::EchoModel::operator=(
|
||||
const EchoModel& e) = default;
|
||||
|
||||
EchoCanceller3Config::Suppressor::Suppressor() = default;
|
||||
EchoCanceller3Config::Suppressor::Suppressor(
|
||||
const EchoCanceller3Config::Suppressor& e) = default;
|
||||
EchoCanceller3Config::Suppressor& EchoCanceller3Config::Suppressor::operator=(
|
||||
const Suppressor& e) = default;
|
||||
|
||||
EchoCanceller3Config::Suppressor::MaskingThresholds::MaskingThresholds(
|
||||
float enr_transparent,
|
||||
float enr_suppress,
|
||||
float emr_transparent)
|
||||
: enr_transparent(enr_transparent),
|
||||
enr_suppress(enr_suppress),
|
||||
emr_transparent(emr_transparent) {}
|
||||
EchoCanceller3Config::Suppressor::MaskingThresholds::MaskingThresholds(
|
||||
const EchoCanceller3Config::Suppressor::MaskingThresholds& e) = default;
|
||||
EchoCanceller3Config::Suppressor::MaskingThresholds&
|
||||
EchoCanceller3Config::Suppressor::MaskingThresholds::operator=(
|
||||
const MaskingThresholds& e) = default;
|
||||
|
||||
EchoCanceller3Config::Suppressor::Tuning::Tuning(MaskingThresholds mask_lf,
|
||||
MaskingThresholds mask_hf,
|
||||
float max_inc_factor,
|
||||
float max_dec_factor_lf)
|
||||
: mask_lf(mask_lf),
|
||||
mask_hf(mask_hf),
|
||||
max_inc_factor(max_inc_factor),
|
||||
max_dec_factor_lf(max_dec_factor_lf) {}
|
||||
EchoCanceller3Config::Suppressor::Tuning::Tuning(
|
||||
const EchoCanceller3Config::Suppressor::Tuning& e) = default;
|
||||
EchoCanceller3Config::Suppressor::Tuning&
|
||||
EchoCanceller3Config::Suppressor::Tuning::operator=(const Tuning& e) = default;
|
||||
|
||||
bool EchoCanceller3Config::Validate(EchoCanceller3Config* config) {
|
||||
RTC_DCHECK(config);
|
||||
EchoCanceller3Config* c = config;
|
||||
bool res = true;
|
||||
|
||||
if (c->delay.down_sampling_factor != 4 &&
|
||||
c->delay.down_sampling_factor != 8) {
|
||||
c->delay.down_sampling_factor = 4;
|
||||
res = false;
|
||||
}
|
||||
|
||||
res = res & Limit(&c->delay.default_delay, 0, 5000);
|
||||
res = res & Limit(&c->delay.num_filters, 0, 5000);
|
||||
res = res & Limit(&c->delay.delay_headroom_samples, 0, 5000);
|
||||
res = res & Limit(&c->delay.hysteresis_limit_blocks, 0, 5000);
|
||||
res = res & Limit(&c->delay.fixed_capture_delay_samples, 0, 5000);
|
||||
res = res & Limit(&c->delay.delay_estimate_smoothing, 0.f, 1.f);
|
||||
res = res & Limit(&c->delay.delay_candidate_detection_threshold, 0.f, 1.f);
|
||||
res = res & Limit(&c->delay.delay_selection_thresholds.initial, 1, 250);
|
||||
res = res & Limit(&c->delay.delay_selection_thresholds.converged, 1, 250);
|
||||
|
||||
res = res & FloorLimit(&c->filter.refined.length_blocks, 1);
|
||||
res = res & Limit(&c->filter.refined.leakage_converged, 0.f, 1000.f);
|
||||
res = res & Limit(&c->filter.refined.leakage_diverged, 0.f, 1000.f);
|
||||
res = res & Limit(&c->filter.refined.error_floor, 0.f, 1000.f);
|
||||
res = res & Limit(&c->filter.refined.error_ceil, 0.f, 100000000.f);
|
||||
res = res & Limit(&c->filter.refined.noise_gate, 0.f, 100000000.f);
|
||||
|
||||
res = res & FloorLimit(&c->filter.refined_initial.length_blocks, 1);
|
||||
res = res & Limit(&c->filter.refined_initial.leakage_converged, 0.f, 1000.f);
|
||||
res = res & Limit(&c->filter.refined_initial.leakage_diverged, 0.f, 1000.f);
|
||||
res = res & Limit(&c->filter.refined_initial.error_floor, 0.f, 1000.f);
|
||||
res = res & Limit(&c->filter.refined_initial.error_ceil, 0.f, 100000000.f);
|
||||
res = res & Limit(&c->filter.refined_initial.noise_gate, 0.f, 100000000.f);
|
||||
|
||||
if (c->filter.refined.length_blocks <
|
||||
c->filter.refined_initial.length_blocks) {
|
||||
c->filter.refined_initial.length_blocks = c->filter.refined.length_blocks;
|
||||
res = false;
|
||||
}
|
||||
|
||||
res = res & FloorLimit(&c->filter.coarse.length_blocks, 1);
|
||||
res = res & Limit(&c->filter.coarse.rate, 0.f, 1.f);
|
||||
res = res & Limit(&c->filter.coarse.noise_gate, 0.f, 100000000.f);
|
||||
|
||||
res = res & FloorLimit(&c->filter.coarse_initial.length_blocks, 1);
|
||||
res = res & Limit(&c->filter.coarse_initial.rate, 0.f, 1.f);
|
||||
res = res & Limit(&c->filter.coarse_initial.noise_gate, 0.f, 100000000.f);
|
||||
|
||||
if (c->filter.coarse.length_blocks < c->filter.coarse_initial.length_blocks) {
|
||||
c->filter.coarse_initial.length_blocks = c->filter.coarse.length_blocks;
|
||||
res = false;
|
||||
}
|
||||
|
||||
res = res & Limit(&c->filter.config_change_duration_blocks, 0, 100000);
|
||||
res = res & Limit(&c->filter.initial_state_seconds, 0.f, 100.f);
|
||||
res = res & Limit(&c->filter.coarse_reset_hangover_blocks, 0, 250000);
|
||||
|
||||
res = res & Limit(&c->erle.min, 1.f, 100000.f);
|
||||
res = res & Limit(&c->erle.max_l, 1.f, 100000.f);
|
||||
res = res & Limit(&c->erle.max_h, 1.f, 100000.f);
|
||||
if (c->erle.min > c->erle.max_l || c->erle.min > c->erle.max_h) {
|
||||
c->erle.min = std::min(c->erle.max_l, c->erle.max_h);
|
||||
res = false;
|
||||
}
|
||||
res = res & Limit(&c->erle.num_sections, 1, c->filter.refined.length_blocks);
|
||||
|
||||
res = res & Limit(&c->ep_strength.default_gain, 0.f, 1000000.f);
|
||||
res = res & Limit(&c->ep_strength.default_len, -1.f, 1.f);
|
||||
res = res & Limit(&c->ep_strength.nearend_len, -1.0f, 1.0f);
|
||||
|
||||
res =
|
||||
res & Limit(&c->echo_audibility.low_render_limit, 0.f, 32768.f * 32768.f);
|
||||
res = res &
|
||||
Limit(&c->echo_audibility.normal_render_limit, 0.f, 32768.f * 32768.f);
|
||||
res = res & Limit(&c->echo_audibility.floor_power, 0.f, 32768.f * 32768.f);
|
||||
res = res & Limit(&c->echo_audibility.audibility_threshold_lf, 0.f,
|
||||
32768.f * 32768.f);
|
||||
res = res & Limit(&c->echo_audibility.audibility_threshold_mf, 0.f,
|
||||
32768.f * 32768.f);
|
||||
res = res & Limit(&c->echo_audibility.audibility_threshold_hf, 0.f,
|
||||
32768.f * 32768.f);
|
||||
|
||||
res = res &
|
||||
Limit(&c->render_levels.active_render_limit, 0.f, 32768.f * 32768.f);
|
||||
res = res & Limit(&c->render_levels.poor_excitation_render_limit, 0.f,
|
||||
32768.f * 32768.f);
|
||||
res = res & Limit(&c->render_levels.poor_excitation_render_limit_ds8, 0.f,
|
||||
32768.f * 32768.f);
|
||||
|
||||
res = res & Limit(&c->echo_model.noise_floor_hold, 0, 1000);
|
||||
res = res & Limit(&c->echo_model.min_noise_floor_power, 0, 2000000.f);
|
||||
res = res & Limit(&c->echo_model.stationary_gate_slope, 0, 1000000.f);
|
||||
res = res & Limit(&c->echo_model.noise_gate_power, 0, 1000000.f);
|
||||
res = res & Limit(&c->echo_model.noise_gate_slope, 0, 1000000.f);
|
||||
res = res & Limit(&c->echo_model.render_pre_window_size, 0, 100);
|
||||
res = res & Limit(&c->echo_model.render_post_window_size, 0, 100);
|
||||
|
||||
res = res & Limit(&c->comfort_noise.noise_floor_dbfs, -200.f, 0.f);
|
||||
|
||||
res = res & Limit(&c->suppressor.nearend_average_blocks, 1, 5000);
|
||||
|
||||
res = res &
|
||||
Limit(&c->suppressor.normal_tuning.mask_lf.enr_transparent, 0.f, 100.f);
|
||||
res = res &
|
||||
Limit(&c->suppressor.normal_tuning.mask_lf.enr_suppress, 0.f, 100.f);
|
||||
res = res &
|
||||
Limit(&c->suppressor.normal_tuning.mask_lf.emr_transparent, 0.f, 100.f);
|
||||
res = res &
|
||||
Limit(&c->suppressor.normal_tuning.mask_hf.enr_transparent, 0.f, 100.f);
|
||||
res = res &
|
||||
Limit(&c->suppressor.normal_tuning.mask_hf.enr_suppress, 0.f, 100.f);
|
||||
res = res &
|
||||
Limit(&c->suppressor.normal_tuning.mask_hf.emr_transparent, 0.f, 100.f);
|
||||
res = res & Limit(&c->suppressor.normal_tuning.max_inc_factor, 0.f, 100.f);
|
||||
res = res & Limit(&c->suppressor.normal_tuning.max_dec_factor_lf, 0.f, 100.f);
|
||||
|
||||
res = res & Limit(&c->suppressor.nearend_tuning.mask_lf.enr_transparent, 0.f,
|
||||
100.f);
|
||||
res = res &
|
||||
Limit(&c->suppressor.nearend_tuning.mask_lf.enr_suppress, 0.f, 100.f);
|
||||
res = res & Limit(&c->suppressor.nearend_tuning.mask_lf.emr_transparent, 0.f,
|
||||
100.f);
|
||||
res = res & Limit(&c->suppressor.nearend_tuning.mask_hf.enr_transparent, 0.f,
|
||||
100.f);
|
||||
res = res &
|
||||
Limit(&c->suppressor.nearend_tuning.mask_hf.enr_suppress, 0.f, 100.f);
|
||||
res = res & Limit(&c->suppressor.nearend_tuning.mask_hf.emr_transparent, 0.f,
|
||||
100.f);
|
||||
res = res & Limit(&c->suppressor.nearend_tuning.max_inc_factor, 0.f, 100.f);
|
||||
res =
|
||||
res & Limit(&c->suppressor.nearend_tuning.max_dec_factor_lf, 0.f, 100.f);
|
||||
|
||||
res = res & Limit(&c->suppressor.last_permanent_lf_smoothing_band, 0, 64);
|
||||
res = res & Limit(&c->suppressor.last_lf_smoothing_band, 0, 64);
|
||||
res = res & Limit(&c->suppressor.last_lf_band, 0, 63);
|
||||
res = res &
|
||||
Limit(&c->suppressor.first_hf_band, c->suppressor.last_lf_band + 1, 64);
|
||||
|
||||
res = res & Limit(&c->suppressor.dominant_nearend_detection.enr_threshold,
|
||||
0.f, 1000000.f);
|
||||
res = res & Limit(&c->suppressor.dominant_nearend_detection.snr_threshold,
|
||||
0.f, 1000000.f);
|
||||
res = res & Limit(&c->suppressor.dominant_nearend_detection.hold_duration, 0,
|
||||
10000);
|
||||
res = res & Limit(&c->suppressor.dominant_nearend_detection.trigger_threshold,
|
||||
0, 10000);
|
||||
|
||||
res = res &
|
||||
Limit(&c->suppressor.subband_nearend_detection.nearend_average_blocks,
|
||||
1, 1024);
|
||||
res =
|
||||
res & Limit(&c->suppressor.subband_nearend_detection.subband1.low, 0, 65);
|
||||
res = res & Limit(&c->suppressor.subband_nearend_detection.subband1.high,
|
||||
c->suppressor.subband_nearend_detection.subband1.low, 65);
|
||||
res =
|
||||
res & Limit(&c->suppressor.subband_nearend_detection.subband2.low, 0, 65);
|
||||
res = res & Limit(&c->suppressor.subband_nearend_detection.subband2.high,
|
||||
c->suppressor.subband_nearend_detection.subband2.low, 65);
|
||||
res = res & Limit(&c->suppressor.subband_nearend_detection.nearend_threshold,
|
||||
0.f, 1.e24f);
|
||||
res = res & Limit(&c->suppressor.subband_nearend_detection.snr_threshold, 0.f,
|
||||
1.e24f);
|
||||
|
||||
res = res & Limit(&c->suppressor.high_bands_suppression.enr_threshold, 0.f,
|
||||
1000000.f);
|
||||
res = res & Limit(&c->suppressor.high_bands_suppression.max_gain_during_echo,
|
||||
0.f, 1.f);
|
||||
res = res & Limit(&c->suppressor.high_bands_suppression
|
||||
.anti_howling_activation_threshold,
|
||||
0.f, 32768.f * 32768.f);
|
||||
res = res & Limit(&c->suppressor.high_bands_suppression.anti_howling_gain,
|
||||
0.f, 1.f);
|
||||
|
||||
res = res & Limit(&c->suppressor.floor_first_increase, 0.f, 1000000.f);
|
||||
|
||||
return res;
|
||||
}
|
||||
} // namespace webrtc
|
||||
250
TMessagesProj/jni/voip/webrtc/api/audio/echo_canceller3_config.h
Normal file
250
TMessagesProj/jni/voip/webrtc/api/audio/echo_canceller3_config.h
Normal file
|
|
@ -0,0 +1,250 @@
|
|||
/*
|
||||
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef API_AUDIO_ECHO_CANCELLER3_CONFIG_H_
|
||||
#define API_AUDIO_ECHO_CANCELLER3_CONFIG_H_
|
||||
|
||||
#include <stddef.h> // size_t
|
||||
|
||||
#include "rtc_base/system/rtc_export.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// Configuration struct for EchoCanceller3
|
||||
struct RTC_EXPORT EchoCanceller3Config {
|
||||
// Checks and updates the config parameters to lie within (mostly) reasonable
|
||||
// ranges. Returns true if and only of the config did not need to be changed.
|
||||
static bool Validate(EchoCanceller3Config* config);
|
||||
|
||||
EchoCanceller3Config();
|
||||
EchoCanceller3Config(const EchoCanceller3Config& e);
|
||||
EchoCanceller3Config& operator=(const EchoCanceller3Config& other);
|
||||
|
||||
struct Buffering {
|
||||
size_t excess_render_detection_interval_blocks = 250;
|
||||
size_t max_allowed_excess_render_blocks = 8;
|
||||
} buffering;
|
||||
|
||||
struct Delay {
|
||||
Delay();
|
||||
Delay(const Delay& e);
|
||||
Delay& operator=(const Delay& e);
|
||||
size_t default_delay = 5;
|
||||
size_t down_sampling_factor = 4;
|
||||
size_t num_filters = 5;
|
||||
size_t delay_headroom_samples = 32;
|
||||
size_t hysteresis_limit_blocks = 1;
|
||||
size_t fixed_capture_delay_samples = 0;
|
||||
float delay_estimate_smoothing = 0.7f;
|
||||
float delay_estimate_smoothing_delay_found = 0.7f;
|
||||
float delay_candidate_detection_threshold = 0.2f;
|
||||
struct DelaySelectionThresholds {
|
||||
int initial;
|
||||
int converged;
|
||||
} delay_selection_thresholds = {5, 20};
|
||||
bool use_external_delay_estimator = false;
|
||||
bool log_warning_on_delay_changes = false;
|
||||
struct AlignmentMixing {
|
||||
bool downmix;
|
||||
bool adaptive_selection;
|
||||
float activity_power_threshold;
|
||||
bool prefer_first_two_channels;
|
||||
};
|
||||
AlignmentMixing render_alignment_mixing = {false, true, 10000.f, true};
|
||||
AlignmentMixing capture_alignment_mixing = {false, true, 10000.f, false};
|
||||
bool detect_pre_echo = true;
|
||||
} delay;
|
||||
|
||||
struct Filter {
|
||||
struct RefinedConfiguration {
|
||||
size_t length_blocks;
|
||||
float leakage_converged;
|
||||
float leakage_diverged;
|
||||
float error_floor;
|
||||
float error_ceil;
|
||||
float noise_gate;
|
||||
};
|
||||
|
||||
struct CoarseConfiguration {
|
||||
size_t length_blocks;
|
||||
float rate;
|
||||
float noise_gate;
|
||||
};
|
||||
|
||||
RefinedConfiguration refined = {13, 0.00005f, 0.05f,
|
||||
0.001f, 2.f, 20075344.f};
|
||||
CoarseConfiguration coarse = {13, 0.7f, 20075344.f};
|
||||
|
||||
RefinedConfiguration refined_initial = {12, 0.005f, 0.5f,
|
||||
0.001f, 2.f, 20075344.f};
|
||||
CoarseConfiguration coarse_initial = {12, 0.9f, 20075344.f};
|
||||
|
||||
size_t config_change_duration_blocks = 250;
|
||||
float initial_state_seconds = 2.5f;
|
||||
int coarse_reset_hangover_blocks = 25;
|
||||
bool conservative_initial_phase = false;
|
||||
bool enable_coarse_filter_output_usage = true;
|
||||
bool use_linear_filter = true;
|
||||
bool high_pass_filter_echo_reference = false;
|
||||
bool export_linear_aec_output = false;
|
||||
} filter;
|
||||
|
||||
struct Erle {
|
||||
float min = 1.f;
|
||||
float max_l = 4.f;
|
||||
float max_h = 1.5f;
|
||||
bool onset_detection = true;
|
||||
size_t num_sections = 1;
|
||||
bool clamp_quality_estimate_to_zero = true;
|
||||
bool clamp_quality_estimate_to_one = true;
|
||||
} erle;
|
||||
|
||||
struct EpStrength {
|
||||
float default_gain = 1.f;
|
||||
float default_len = 0.83f;
|
||||
float nearend_len = 0.83f;
|
||||
bool echo_can_saturate = true;
|
||||
bool bounded_erl = false;
|
||||
bool erle_onset_compensation_in_dominant_nearend = false;
|
||||
bool use_conservative_tail_frequency_response = true;
|
||||
} ep_strength;
|
||||
|
||||
struct EchoAudibility {
|
||||
float low_render_limit = 4 * 64.f;
|
||||
float normal_render_limit = 64.f;
|
||||
float floor_power = 2 * 64.f;
|
||||
float audibility_threshold_lf = 10;
|
||||
float audibility_threshold_mf = 10;
|
||||
float audibility_threshold_hf = 10;
|
||||
bool use_stationarity_properties = false;
|
||||
bool use_stationarity_properties_at_init = false;
|
||||
} echo_audibility;
|
||||
|
||||
struct RenderLevels {
|
||||
float active_render_limit = 100.f;
|
||||
float poor_excitation_render_limit = 150.f;
|
||||
float poor_excitation_render_limit_ds8 = 20.f;
|
||||
float render_power_gain_db = 0.f;
|
||||
} render_levels;
|
||||
|
||||
struct EchoRemovalControl {
|
||||
bool has_clock_drift = false;
|
||||
bool linear_and_stable_echo_path = false;
|
||||
} echo_removal_control;
|
||||
|
||||
struct EchoModel {
|
||||
EchoModel();
|
||||
EchoModel(const EchoModel& e);
|
||||
EchoModel& operator=(const EchoModel& e);
|
||||
size_t noise_floor_hold = 50;
|
||||
float min_noise_floor_power = 1638400.f;
|
||||
float stationary_gate_slope = 10.f;
|
||||
float noise_gate_power = 27509.42f;
|
||||
float noise_gate_slope = 0.3f;
|
||||
size_t render_pre_window_size = 1;
|
||||
size_t render_post_window_size = 1;
|
||||
bool model_reverb_in_nonlinear_mode = true;
|
||||
} echo_model;
|
||||
|
||||
struct ComfortNoise {
|
||||
float noise_floor_dbfs = -96.03406f;
|
||||
} comfort_noise;
|
||||
|
||||
struct Suppressor {
|
||||
Suppressor();
|
||||
Suppressor(const Suppressor& e);
|
||||
Suppressor& operator=(const Suppressor& e);
|
||||
|
||||
size_t nearend_average_blocks = 4;
|
||||
|
||||
struct MaskingThresholds {
|
||||
MaskingThresholds(float enr_transparent,
|
||||
float enr_suppress,
|
||||
float emr_transparent);
|
||||
MaskingThresholds(const MaskingThresholds& e);
|
||||
MaskingThresholds& operator=(const MaskingThresholds& e);
|
||||
float enr_transparent;
|
||||
float enr_suppress;
|
||||
float emr_transparent;
|
||||
};
|
||||
|
||||
struct Tuning {
|
||||
Tuning(MaskingThresholds mask_lf,
|
||||
MaskingThresholds mask_hf,
|
||||
float max_inc_factor,
|
||||
float max_dec_factor_lf);
|
||||
Tuning(const Tuning& e);
|
||||
Tuning& operator=(const Tuning& e);
|
||||
MaskingThresholds mask_lf;
|
||||
MaskingThresholds mask_hf;
|
||||
float max_inc_factor;
|
||||
float max_dec_factor_lf;
|
||||
};
|
||||
|
||||
Tuning normal_tuning = Tuning(MaskingThresholds(.3f, .4f, .3f),
|
||||
MaskingThresholds(.07f, .1f, .3f),
|
||||
2.0f,
|
||||
0.25f);
|
||||
Tuning nearend_tuning = Tuning(MaskingThresholds(1.09f, 1.1f, .3f),
|
||||
MaskingThresholds(.1f, .3f, .3f),
|
||||
2.0f,
|
||||
0.25f);
|
||||
|
||||
bool lf_smoothing_during_initial_phase = true;
|
||||
int last_permanent_lf_smoothing_band = 0;
|
||||
int last_lf_smoothing_band = 5;
|
||||
int last_lf_band = 5;
|
||||
int first_hf_band = 8;
|
||||
|
||||
struct DominantNearendDetection {
|
||||
float enr_threshold = .25f;
|
||||
float enr_exit_threshold = 10.f;
|
||||
float snr_threshold = 30.f;
|
||||
int hold_duration = 50;
|
||||
int trigger_threshold = 12;
|
||||
bool use_during_initial_phase = true;
|
||||
bool use_unbounded_echo_spectrum = true;
|
||||
} dominant_nearend_detection;
|
||||
|
||||
struct SubbandNearendDetection {
|
||||
size_t nearend_average_blocks = 1;
|
||||
struct SubbandRegion {
|
||||
size_t low;
|
||||
size_t high;
|
||||
};
|
||||
SubbandRegion subband1 = {1, 1};
|
||||
SubbandRegion subband2 = {1, 1};
|
||||
float nearend_threshold = 1.f;
|
||||
float snr_threshold = 1.f;
|
||||
} subband_nearend_detection;
|
||||
|
||||
bool use_subband_nearend_detection = false;
|
||||
|
||||
struct HighBandsSuppression {
|
||||
float enr_threshold = 1.f;
|
||||
float max_gain_during_echo = 1.f;
|
||||
float anti_howling_activation_threshold = 400.f;
|
||||
float anti_howling_gain = 1.f;
|
||||
} high_bands_suppression;
|
||||
|
||||
float floor_first_increase = 0.00001f;
|
||||
bool conservative_hf_suppression = false;
|
||||
} suppressor;
|
||||
|
||||
struct MultiChannel {
|
||||
bool detect_stereo_content = true;
|
||||
float stereo_detection_threshold = 0.0f;
|
||||
int stereo_detection_timeout_threshold_seconds = 300;
|
||||
float stereo_detection_hysteresis_seconds = 2.0f;
|
||||
} multi_channel;
|
||||
};
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // API_AUDIO_ECHO_CANCELLER3_CONFIG_H_
|
||||
|
|
@ -0,0 +1,32 @@
|
|||
/*
|
||||
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
#include "api/audio/echo_canceller3_factory.h"
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "modules/audio_processing/aec3/echo_canceller3.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
EchoCanceller3Factory::EchoCanceller3Factory() {}
|
||||
|
||||
EchoCanceller3Factory::EchoCanceller3Factory(const EchoCanceller3Config& config)
|
||||
: config_(config) {}
|
||||
|
||||
std::unique_ptr<EchoControl> EchoCanceller3Factory::Create(
|
||||
int sample_rate_hz,
|
||||
int num_render_channels,
|
||||
int num_capture_channels) {
|
||||
return std::make_unique<EchoCanceller3>(
|
||||
config_, /*multichannel_config=*/absl::nullopt, sample_rate_hz,
|
||||
num_render_channels, num_capture_channels);
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,41 @@
|
|||
/*
|
||||
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef API_AUDIO_ECHO_CANCELLER3_FACTORY_H_
|
||||
#define API_AUDIO_ECHO_CANCELLER3_FACTORY_H_
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "api/audio/echo_canceller3_config.h"
|
||||
#include "api/audio/echo_control.h"
|
||||
#include "rtc_base/system/rtc_export.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class RTC_EXPORT EchoCanceller3Factory : public EchoControlFactory {
|
||||
public:
|
||||
// Factory producing EchoCanceller3 instances with the default configuration.
|
||||
EchoCanceller3Factory();
|
||||
|
||||
// Factory producing EchoCanceller3 instances with the specified
|
||||
// configuration.
|
||||
explicit EchoCanceller3Factory(const EchoCanceller3Config& config);
|
||||
|
||||
// Creates an EchoCanceller3 with a specified channel count and sampling rate.
|
||||
std::unique_ptr<EchoControl> Create(int sample_rate_hz,
|
||||
int num_render_channels,
|
||||
int num_capture_channels) override;
|
||||
|
||||
private:
|
||||
const EchoCanceller3Config config_;
|
||||
};
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // API_AUDIO_ECHO_CANCELLER3_FACTORY_H_
|
||||
75
TMessagesProj/jni/voip/webrtc/api/audio/echo_control.h
Normal file
75
TMessagesProj/jni/voip/webrtc/api/audio/echo_control.h
Normal file
|
|
@ -0,0 +1,75 @@
|
|||
/*
|
||||
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef API_AUDIO_ECHO_CONTROL_H_
|
||||
#define API_AUDIO_ECHO_CONTROL_H_
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "rtc_base/checks.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class AudioBuffer;
|
||||
|
||||
// Interface for an acoustic echo cancellation (AEC) submodule.
|
||||
class EchoControl {
|
||||
public:
|
||||
// Analysis (not changing) of the render signal.
|
||||
virtual void AnalyzeRender(AudioBuffer* render) = 0;
|
||||
|
||||
// Analysis (not changing) of the capture signal.
|
||||
virtual void AnalyzeCapture(AudioBuffer* capture) = 0;
|
||||
|
||||
// Processes the capture signal in order to remove the echo.
|
||||
virtual void ProcessCapture(AudioBuffer* capture, bool level_change) = 0;
|
||||
|
||||
// As above, but also returns the linear filter output.
|
||||
virtual void ProcessCapture(AudioBuffer* capture,
|
||||
AudioBuffer* linear_output,
|
||||
bool level_change) = 0;
|
||||
|
||||
struct Metrics {
|
||||
double echo_return_loss;
|
||||
double echo_return_loss_enhancement;
|
||||
int delay_ms;
|
||||
};
|
||||
|
||||
// Collect current metrics from the echo controller.
|
||||
virtual Metrics GetMetrics() const = 0;
|
||||
|
||||
// Provides an optional external estimate of the audio buffer delay.
|
||||
virtual void SetAudioBufferDelay(int delay_ms) = 0;
|
||||
|
||||
// Specifies whether the capture output will be used. The purpose of this is
|
||||
// to allow the echo controller to deactivate some of the processing when the
|
||||
// resulting output is anyway not used, for instance when the endpoint is
|
||||
// muted.
|
||||
// TODO(b/177830919): Make pure virtual.
|
||||
virtual void SetCaptureOutputUsage(bool capture_output_used) {}
|
||||
|
||||
// Returns wheter the signal is altered.
|
||||
virtual bool ActiveProcessing() const = 0;
|
||||
|
||||
virtual ~EchoControl() {}
|
||||
};
|
||||
|
||||
// Interface for a factory that creates EchoControllers.
|
||||
class EchoControlFactory {
|
||||
public:
|
||||
virtual std::unique_ptr<EchoControl> Create(int sample_rate_hz,
|
||||
int num_render_channels,
|
||||
int num_capture_channels) = 0;
|
||||
|
||||
virtual ~EchoControlFactory() = default;
|
||||
};
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // API_AUDIO_ECHO_CONTROL_H_
|
||||
|
|
@ -0,0 +1,21 @@
|
|||
/*
|
||||
* Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
#include "api/audio/echo_detector_creator.h"
|
||||
|
||||
#include "api/make_ref_counted.h"
|
||||
#include "modules/audio_processing/residual_echo_detector.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
rtc::scoped_refptr<EchoDetector> CreateEchoDetector() {
|
||||
return rtc::make_ref_counted<ResidualEchoDetector>();
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,26 @@
|
|||
/*
|
||||
* Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef API_AUDIO_ECHO_DETECTOR_CREATOR_H_
|
||||
#define API_AUDIO_ECHO_DETECTOR_CREATOR_H_
|
||||
|
||||
#include "api/scoped_refptr.h"
|
||||
#include "modules/audio_processing/include/audio_processing.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// Returns an instance of the WebRTC implementation of a residual echo detector.
|
||||
// It can be provided to the webrtc::AudioProcessingBuilder to obtain the
|
||||
// usual residual echo metrics.
|
||||
rtc::scoped_refptr<EchoDetector> CreateEchoDetector();
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // API_AUDIO_ECHO_DETECTOR_CREATOR_H_
|
||||
29
TMessagesProj/jni/voip/webrtc/api/audio/test/BUILD.gn
Normal file
29
TMessagesProj/jni/voip/webrtc/api/audio/test/BUILD.gn
Normal file
|
|
@ -0,0 +1,29 @@
|
|||
# Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
|
||||
#
|
||||
# Use of this source code is governed by a BSD-style license
|
||||
# that can be found in the LICENSE file in the root of the source
|
||||
# tree. An additional intellectual property rights grant can be found
|
||||
# in the file PATENTS. All contributing project authors may
|
||||
# be found in the AUTHORS file in the root of the source tree.
|
||||
|
||||
import("../../../webrtc.gni")
|
||||
if (is_android) {
|
||||
import("//build/config/android/config.gni")
|
||||
import("//build/config/android/rules.gni")
|
||||
}
|
||||
|
||||
if (rtc_include_tests) {
|
||||
rtc_library("audio_api_unittests") {
|
||||
testonly = true
|
||||
sources = [
|
||||
"audio_frame_unittest.cc",
|
||||
"echo_canceller3_config_unittest.cc",
|
||||
]
|
||||
deps = [
|
||||
"..:aec3_config",
|
||||
"..:audio_frame_api",
|
||||
"../../../modules/audio_processing:aec3_config_json",
|
||||
"../../../test:test_support",
|
||||
]
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,136 @@
|
|||
/*
|
||||
* Copyright 2018 The WebRTC Project Authors. All rights reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "api/audio/audio_frame.h"
|
||||
|
||||
#include <stdint.h>
|
||||
#include <string.h> // memcmp
|
||||
|
||||
#include "test/gtest.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace {
|
||||
|
||||
bool AllSamplesAre(int16_t sample, const AudioFrame& frame) {
|
||||
const int16_t* frame_data = frame.data();
|
||||
for (size_t i = 0; i < frame.max_16bit_samples(); i++) {
|
||||
if (frame_data[i] != sample) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
constexpr uint32_t kTimestamp = 27;
|
||||
constexpr int kSampleRateHz = 16000;
|
||||
constexpr size_t kNumChannelsMono = 1;
|
||||
constexpr size_t kNumChannelsStereo = 2;
|
||||
constexpr size_t kNumChannels5_1 = 6;
|
||||
constexpr size_t kSamplesPerChannel = kSampleRateHz / 100;
|
||||
|
||||
} // namespace
|
||||
|
||||
TEST(AudioFrameTest, FrameStartsMuted) {
|
||||
AudioFrame frame;
|
||||
EXPECT_TRUE(frame.muted());
|
||||
EXPECT_TRUE(AllSamplesAre(0, frame));
|
||||
}
|
||||
|
||||
TEST(AudioFrameTest, UnmutedFrameIsInitiallyZeroed) {
|
||||
AudioFrame frame;
|
||||
frame.mutable_data();
|
||||
EXPECT_FALSE(frame.muted());
|
||||
EXPECT_TRUE(AllSamplesAre(0, frame));
|
||||
}
|
||||
|
||||
TEST(AudioFrameTest, MutedFrameBufferIsZeroed) {
|
||||
AudioFrame frame;
|
||||
int16_t* frame_data = frame.mutable_data();
|
||||
for (size_t i = 0; i < frame.max_16bit_samples(); i++) {
|
||||
frame_data[i] = 17;
|
||||
}
|
||||
ASSERT_TRUE(AllSamplesAre(17, frame));
|
||||
frame.Mute();
|
||||
EXPECT_TRUE(frame.muted());
|
||||
EXPECT_TRUE(AllSamplesAre(0, frame));
|
||||
}
|
||||
|
||||
TEST(AudioFrameTest, UpdateFrameMono) {
|
||||
AudioFrame frame;
|
||||
int16_t samples[kNumChannelsMono * kSamplesPerChannel] = {17};
|
||||
frame.UpdateFrame(kTimestamp, samples, kSamplesPerChannel, kSampleRateHz,
|
||||
AudioFrame::kPLC, AudioFrame::kVadActive, kNumChannelsMono);
|
||||
|
||||
EXPECT_EQ(kTimestamp, frame.timestamp_);
|
||||
EXPECT_EQ(kSamplesPerChannel, frame.samples_per_channel());
|
||||
EXPECT_EQ(kSampleRateHz, frame.sample_rate_hz());
|
||||
EXPECT_EQ(AudioFrame::kPLC, frame.speech_type_);
|
||||
EXPECT_EQ(AudioFrame::kVadActive, frame.vad_activity_);
|
||||
EXPECT_EQ(kNumChannelsMono, frame.num_channels());
|
||||
EXPECT_EQ(CHANNEL_LAYOUT_MONO, frame.channel_layout());
|
||||
|
||||
EXPECT_FALSE(frame.muted());
|
||||
EXPECT_EQ(0, memcmp(samples, frame.data(), sizeof(samples)));
|
||||
|
||||
frame.UpdateFrame(kTimestamp, nullptr /* data*/, kSamplesPerChannel,
|
||||
kSampleRateHz, AudioFrame::kPLC, AudioFrame::kVadActive,
|
||||
kNumChannelsMono);
|
||||
EXPECT_TRUE(frame.muted());
|
||||
EXPECT_TRUE(AllSamplesAre(0, frame));
|
||||
}
|
||||
|
||||
TEST(AudioFrameTest, UpdateFrameMultiChannel) {
|
||||
AudioFrame frame;
|
||||
frame.UpdateFrame(kTimestamp, nullptr /* data */, kSamplesPerChannel,
|
||||
kSampleRateHz, AudioFrame::kPLC, AudioFrame::kVadActive,
|
||||
kNumChannelsStereo);
|
||||
EXPECT_EQ(kSamplesPerChannel, frame.samples_per_channel());
|
||||
EXPECT_EQ(kNumChannelsStereo, frame.num_channels());
|
||||
EXPECT_EQ(CHANNEL_LAYOUT_STEREO, frame.channel_layout());
|
||||
|
||||
frame.UpdateFrame(kTimestamp, nullptr /* data */, kSamplesPerChannel,
|
||||
kSampleRateHz, AudioFrame::kPLC, AudioFrame::kVadActive,
|
||||
kNumChannels5_1);
|
||||
EXPECT_EQ(kSamplesPerChannel, frame.samples_per_channel());
|
||||
EXPECT_EQ(kNumChannels5_1, frame.num_channels());
|
||||
EXPECT_EQ(CHANNEL_LAYOUT_5_1, frame.channel_layout());
|
||||
}
|
||||
|
||||
TEST(AudioFrameTest, CopyFrom) {
|
||||
AudioFrame frame1;
|
||||
AudioFrame frame2;
|
||||
|
||||
int16_t samples[kNumChannelsMono * kSamplesPerChannel] = {17};
|
||||
frame2.UpdateFrame(kTimestamp, samples, kSamplesPerChannel, kSampleRateHz,
|
||||
AudioFrame::kPLC, AudioFrame::kVadActive,
|
||||
kNumChannelsMono);
|
||||
frame1.CopyFrom(frame2);
|
||||
|
||||
EXPECT_EQ(frame2.timestamp_, frame1.timestamp_);
|
||||
EXPECT_EQ(frame2.samples_per_channel_, frame1.samples_per_channel_);
|
||||
EXPECT_EQ(frame2.sample_rate_hz_, frame1.sample_rate_hz_);
|
||||
EXPECT_EQ(frame2.speech_type_, frame1.speech_type_);
|
||||
EXPECT_EQ(frame2.vad_activity_, frame1.vad_activity_);
|
||||
EXPECT_EQ(frame2.num_channels_, frame1.num_channels_);
|
||||
|
||||
EXPECT_EQ(frame2.muted(), frame1.muted());
|
||||
EXPECT_EQ(0, memcmp(frame2.data(), frame1.data(), sizeof(samples)));
|
||||
|
||||
frame2.UpdateFrame(kTimestamp, nullptr /* data */, kSamplesPerChannel,
|
||||
kSampleRateHz, AudioFrame::kPLC, AudioFrame::kVadActive,
|
||||
kNumChannelsMono);
|
||||
frame1.CopyFrom(frame2);
|
||||
|
||||
EXPECT_EQ(frame2.muted(), frame1.muted());
|
||||
EXPECT_EQ(0, memcmp(frame2.data(), frame1.data(), sizeof(samples)));
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,46 @@
|
|||
/*
|
||||
* Copyright 2018 The WebRTC Project Authors. All rights reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "api/audio/echo_canceller3_config.h"
|
||||
|
||||
#include "modules/audio_processing/test/echo_canceller3_config_json.h"
|
||||
#include "test/gtest.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
TEST(EchoCanceller3Config, ValidConfigIsNotModified) {
|
||||
EchoCanceller3Config config;
|
||||
EXPECT_TRUE(EchoCanceller3Config::Validate(&config));
|
||||
EchoCanceller3Config default_config;
|
||||
EXPECT_EQ(Aec3ConfigToJsonString(config),
|
||||
Aec3ConfigToJsonString(default_config));
|
||||
}
|
||||
|
||||
TEST(EchoCanceller3Config, InvalidConfigIsCorrected) {
|
||||
// Change a parameter and validate.
|
||||
EchoCanceller3Config config;
|
||||
config.echo_model.min_noise_floor_power = -1600000.f;
|
||||
EXPECT_FALSE(EchoCanceller3Config::Validate(&config));
|
||||
EXPECT_GE(config.echo_model.min_noise_floor_power, 0.f);
|
||||
// Verify remaining parameters are unchanged.
|
||||
EchoCanceller3Config default_config;
|
||||
config.echo_model.min_noise_floor_power =
|
||||
default_config.echo_model.min_noise_floor_power;
|
||||
EXPECT_EQ(Aec3ConfigToJsonString(config),
|
||||
Aec3ConfigToJsonString(default_config));
|
||||
}
|
||||
|
||||
TEST(EchoCanceller3Config, ValidatedConfigsAreValid) {
|
||||
EchoCanceller3Config config;
|
||||
config.delay.down_sampling_factor = 983;
|
||||
EXPECT_FALSE(EchoCanceller3Config::Validate(&config));
|
||||
EXPECT_TRUE(EchoCanceller3Config::Validate(&config));
|
||||
}
|
||||
} // namespace webrtc
|
||||
147
TMessagesProj/jni/voip/webrtc/api/audio_codecs/BUILD.gn
Normal file
147
TMessagesProj/jni/voip/webrtc/api/audio_codecs/BUILD.gn
Normal file
|
|
@ -0,0 +1,147 @@
|
|||
# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
#
|
||||
# Use of this source code is governed by a BSD-style license
|
||||
# that can be found in the LICENSE file in the root of the source
|
||||
# tree. An additional intellectual property rights grant can be found
|
||||
# in the file PATENTS. All contributing project authors may
|
||||
# be found in the AUTHORS file in the root of the source tree.
|
||||
|
||||
import("../../webrtc.gni")
|
||||
if (is_android) {
|
||||
import("//build/config/android/config.gni")
|
||||
import("//build/config/android/rules.gni")
|
||||
}
|
||||
|
||||
rtc_library("audio_codecs_api") {
|
||||
visibility = [ "*" ]
|
||||
sources = [
|
||||
"audio_codec_pair_id.cc",
|
||||
"audio_codec_pair_id.h",
|
||||
"audio_decoder.cc",
|
||||
"audio_decoder.h",
|
||||
"audio_decoder_factory.h",
|
||||
"audio_decoder_factory_template.h",
|
||||
"audio_encoder.cc",
|
||||
"audio_encoder.h",
|
||||
"audio_encoder_factory.h",
|
||||
"audio_encoder_factory_template.h",
|
||||
"audio_format.cc",
|
||||
"audio_format.h",
|
||||
]
|
||||
deps = [
|
||||
"..:array_view",
|
||||
"..:bitrate_allocation",
|
||||
"..:make_ref_counted",
|
||||
"..:ref_count",
|
||||
"..:scoped_refptr",
|
||||
"../../api:field_trials_view",
|
||||
"../../api:rtp_parameters",
|
||||
"../../rtc_base:buffer",
|
||||
"../../rtc_base:checks",
|
||||
"../../rtc_base:event_tracer",
|
||||
"../../rtc_base:refcount",
|
||||
"../../rtc_base:sanitizer",
|
||||
"../../rtc_base/system:rtc_export",
|
||||
"../units:data_rate",
|
||||
"../units:time_delta",
|
||||
]
|
||||
absl_deps = [
|
||||
"//third_party/abseil-cpp/absl/base:core_headers",
|
||||
"//third_party/abseil-cpp/absl/strings",
|
||||
"//third_party/abseil-cpp/absl/types:optional",
|
||||
]
|
||||
}
|
||||
|
||||
rtc_library("builtin_audio_decoder_factory") {
|
||||
visibility = [ "*" ]
|
||||
allow_poison = [ "audio_codecs" ]
|
||||
sources = [
|
||||
"builtin_audio_decoder_factory.cc",
|
||||
"builtin_audio_decoder_factory.h",
|
||||
]
|
||||
deps = [
|
||||
":audio_codecs_api",
|
||||
"..:scoped_refptr",
|
||||
"L16:audio_decoder_L16",
|
||||
"g711:audio_decoder_g711",
|
||||
"g722:audio_decoder_g722",
|
||||
]
|
||||
defines = []
|
||||
if (rtc_include_ilbc) {
|
||||
deps += [ "ilbc:audio_decoder_ilbc" ]
|
||||
defines += [ "WEBRTC_USE_BUILTIN_ILBC=1" ]
|
||||
} else {
|
||||
defines += [ "WEBRTC_USE_BUILTIN_ILBC=0" ]
|
||||
}
|
||||
if (rtc_include_opus) {
|
||||
deps += [
|
||||
"opus:audio_decoder_multiopus",
|
||||
"opus:audio_decoder_opus",
|
||||
]
|
||||
defines += [ "WEBRTC_USE_BUILTIN_OPUS=1" ]
|
||||
} else {
|
||||
defines += [ "WEBRTC_USE_BUILTIN_OPUS=0" ]
|
||||
}
|
||||
}
|
||||
|
||||
rtc_library("builtin_audio_encoder_factory") {
|
||||
visibility = [ "*" ]
|
||||
allow_poison = [ "audio_codecs" ]
|
||||
sources = [
|
||||
"builtin_audio_encoder_factory.cc",
|
||||
"builtin_audio_encoder_factory.h",
|
||||
]
|
||||
deps = [
|
||||
":audio_codecs_api",
|
||||
"..:scoped_refptr",
|
||||
"L16:audio_encoder_L16",
|
||||
"g711:audio_encoder_g711",
|
||||
"g722:audio_encoder_g722",
|
||||
]
|
||||
defines = []
|
||||
if (rtc_include_ilbc) {
|
||||
deps += [ "ilbc:audio_encoder_ilbc" ]
|
||||
defines += [ "WEBRTC_USE_BUILTIN_ILBC=1" ]
|
||||
} else {
|
||||
defines += [ "WEBRTC_USE_BUILTIN_ILBC=0" ]
|
||||
}
|
||||
if (rtc_include_opus) {
|
||||
deps += [
|
||||
"opus:audio_encoder_multiopus",
|
||||
"opus:audio_encoder_opus",
|
||||
]
|
||||
defines += [ "WEBRTC_USE_BUILTIN_OPUS=1" ]
|
||||
} else {
|
||||
defines += [ "WEBRTC_USE_BUILTIN_OPUS=0" ]
|
||||
}
|
||||
}
|
||||
|
||||
rtc_library("opus_audio_decoder_factory") {
|
||||
visibility = [ "*" ]
|
||||
allow_poison = [ "audio_codecs" ]
|
||||
sources = [
|
||||
"opus_audio_decoder_factory.cc",
|
||||
"opus_audio_decoder_factory.h",
|
||||
]
|
||||
deps = [
|
||||
":audio_codecs_api",
|
||||
"..:scoped_refptr",
|
||||
"opus:audio_decoder_multiopus",
|
||||
"opus:audio_decoder_opus",
|
||||
]
|
||||
}
|
||||
|
||||
rtc_library("opus_audio_encoder_factory") {
|
||||
visibility = [ "*" ]
|
||||
allow_poison = [ "audio_codecs" ]
|
||||
sources = [
|
||||
"opus_audio_encoder_factory.cc",
|
||||
"opus_audio_encoder_factory.h",
|
||||
]
|
||||
deps = [
|
||||
":audio_codecs_api",
|
||||
"..:scoped_refptr",
|
||||
"opus:audio_encoder_multiopus",
|
||||
"opus:audio_encoder_opus",
|
||||
]
|
||||
}
|
||||
55
TMessagesProj/jni/voip/webrtc/api/audio_codecs/L16/BUILD.gn
Normal file
55
TMessagesProj/jni/voip/webrtc/api/audio_codecs/L16/BUILD.gn
Normal file
|
|
@ -0,0 +1,55 @@
|
|||
# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
#
|
||||
# Use of this source code is governed by a BSD-style license
|
||||
# that can be found in the LICENSE file in the root of the source
|
||||
# tree. An additional intellectual property rights grant can be found
|
||||
# in the file PATENTS. All contributing project authors may
|
||||
# be found in the AUTHORS file in the root of the source tree.
|
||||
|
||||
import("../../../webrtc.gni")
|
||||
if (is_android) {
|
||||
import("//build/config/android/config.gni")
|
||||
import("//build/config/android/rules.gni")
|
||||
}
|
||||
|
||||
rtc_library("audio_encoder_L16") {
|
||||
visibility = [ "*" ]
|
||||
poisonous = [ "audio_codecs" ]
|
||||
sources = [
|
||||
"audio_encoder_L16.cc",
|
||||
"audio_encoder_L16.h",
|
||||
]
|
||||
deps = [
|
||||
"..:audio_codecs_api",
|
||||
"../../../api:field_trials_view",
|
||||
"../../../modules/audio_coding:pcm16b",
|
||||
"../../../rtc_base:safe_conversions",
|
||||
"../../../rtc_base:safe_minmax",
|
||||
"../../../rtc_base:stringutils",
|
||||
"../../../rtc_base/system:rtc_export",
|
||||
]
|
||||
absl_deps = [
|
||||
"//third_party/abseil-cpp/absl/strings",
|
||||
"//third_party/abseil-cpp/absl/types:optional",
|
||||
]
|
||||
}
|
||||
|
||||
rtc_library("audio_decoder_L16") {
|
||||
visibility = [ "*" ]
|
||||
poisonous = [ "audio_codecs" ]
|
||||
sources = [
|
||||
"audio_decoder_L16.cc",
|
||||
"audio_decoder_L16.h",
|
||||
]
|
||||
deps = [
|
||||
"..:audio_codecs_api",
|
||||
"../../../api:field_trials_view",
|
||||
"../../../modules/audio_coding:pcm16b",
|
||||
"../../../rtc_base:safe_conversions",
|
||||
"../../../rtc_base/system:rtc_export",
|
||||
]
|
||||
absl_deps = [
|
||||
"//third_party/abseil-cpp/absl/strings",
|
||||
"//third_party/abseil-cpp/absl/types:optional",
|
||||
]
|
||||
}
|
||||
|
|
@ -0,0 +1,49 @@
|
|||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "api/audio_codecs/L16/audio_decoder_L16.h"
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "absl/strings/match.h"
|
||||
#include "modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.h"
|
||||
#include "modules/audio_coding/codecs/pcm16b/pcm16b_common.h"
|
||||
#include "rtc_base/numerics/safe_conversions.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
absl::optional<AudioDecoderL16::Config> AudioDecoderL16::SdpToConfig(
|
||||
const SdpAudioFormat& format) {
|
||||
Config config;
|
||||
config.sample_rate_hz = format.clockrate_hz;
|
||||
config.num_channels = rtc::checked_cast<int>(format.num_channels);
|
||||
if (absl::EqualsIgnoreCase(format.name, "L16") && config.IsOk()) {
|
||||
return config;
|
||||
}
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
void AudioDecoderL16::AppendSupportedDecoders(
|
||||
std::vector<AudioCodecSpec>* specs) {
|
||||
Pcm16BAppendSupportedCodecSpecs(specs);
|
||||
}
|
||||
|
||||
std::unique_ptr<AudioDecoder> AudioDecoderL16::MakeAudioDecoder(
|
||||
const Config& config,
|
||||
absl::optional<AudioCodecPairId> /*codec_pair_id*/,
|
||||
const FieldTrialsView* field_trials) {
|
||||
if (!config.IsOk()) {
|
||||
return nullptr;
|
||||
}
|
||||
return std::make_unique<AudioDecoderPcm16B>(config.sample_rate_hz,
|
||||
config.num_channels);
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,49 @@
|
|||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef API_AUDIO_CODECS_L16_AUDIO_DECODER_L16_H_
|
||||
#define API_AUDIO_CODECS_L16_AUDIO_DECODER_L16_H_
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "api/audio_codecs/audio_codec_pair_id.h"
|
||||
#include "api/audio_codecs/audio_decoder.h"
|
||||
#include "api/audio_codecs/audio_format.h"
|
||||
#include "api/field_trials_view.h"
|
||||
#include "rtc_base/system/rtc_export.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// L16 decoder API for use as a template parameter to
|
||||
// CreateAudioDecoderFactory<...>().
|
||||
struct RTC_EXPORT AudioDecoderL16 {
|
||||
struct Config {
|
||||
bool IsOk() const {
|
||||
return (sample_rate_hz == 8000 || sample_rate_hz == 16000 ||
|
||||
sample_rate_hz == 32000 || sample_rate_hz == 48000) &&
|
||||
(num_channels >= 1 &&
|
||||
num_channels <= AudioDecoder::kMaxNumberOfChannels);
|
||||
}
|
||||
int sample_rate_hz = 8000;
|
||||
int num_channels = 1;
|
||||
};
|
||||
static absl::optional<Config> SdpToConfig(const SdpAudioFormat& audio_format);
|
||||
static void AppendSupportedDecoders(std::vector<AudioCodecSpec>* specs);
|
||||
static std::unique_ptr<AudioDecoder> MakeAudioDecoder(
|
||||
const Config& config,
|
||||
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt,
|
||||
const FieldTrialsView* field_trials = nullptr);
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // API_AUDIO_CODECS_L16_AUDIO_DECODER_L16_H_
|
||||
|
|
@ -0,0 +1,76 @@
|
|||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "api/audio_codecs/L16/audio_encoder_L16.h"
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "absl/strings/match.h"
|
||||
#include "modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.h"
|
||||
#include "modules/audio_coding/codecs/pcm16b/pcm16b_common.h"
|
||||
#include "rtc_base/numerics/safe_conversions.h"
|
||||
#include "rtc_base/numerics/safe_minmax.h"
|
||||
#include "rtc_base/string_to_number.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
absl::optional<AudioEncoderL16::Config> AudioEncoderL16::SdpToConfig(
|
||||
const SdpAudioFormat& format) {
|
||||
if (!rtc::IsValueInRangeForNumericType<int>(format.num_channels)) {
|
||||
RTC_DCHECK_NOTREACHED();
|
||||
return absl::nullopt;
|
||||
}
|
||||
Config config;
|
||||
config.sample_rate_hz = format.clockrate_hz;
|
||||
config.num_channels = rtc::dchecked_cast<int>(format.num_channels);
|
||||
auto ptime_iter = format.parameters.find("ptime");
|
||||
if (ptime_iter != format.parameters.end()) {
|
||||
const auto ptime = rtc::StringToNumber<int>(ptime_iter->second);
|
||||
if (ptime && *ptime > 0) {
|
||||
config.frame_size_ms = rtc::SafeClamp(10 * (*ptime / 10), 10, 60);
|
||||
}
|
||||
}
|
||||
if (absl::EqualsIgnoreCase(format.name, "L16") && config.IsOk()) {
|
||||
return config;
|
||||
}
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
void AudioEncoderL16::AppendSupportedEncoders(
|
||||
std::vector<AudioCodecSpec>* specs) {
|
||||
Pcm16BAppendSupportedCodecSpecs(specs);
|
||||
}
|
||||
|
||||
AudioCodecInfo AudioEncoderL16::QueryAudioEncoder(
|
||||
const AudioEncoderL16::Config& config) {
|
||||
RTC_DCHECK(config.IsOk());
|
||||
return {config.sample_rate_hz,
|
||||
rtc::dchecked_cast<size_t>(config.num_channels),
|
||||
config.sample_rate_hz * config.num_channels * 16};
|
||||
}
|
||||
|
||||
std::unique_ptr<AudioEncoder> AudioEncoderL16::MakeAudioEncoder(
|
||||
const AudioEncoderL16::Config& config,
|
||||
int payload_type,
|
||||
absl::optional<AudioCodecPairId> /*codec_pair_id*/,
|
||||
const FieldTrialsView* field_trials) {
|
||||
AudioEncoderPcm16B::Config c;
|
||||
c.sample_rate_hz = config.sample_rate_hz;
|
||||
c.num_channels = config.num_channels;
|
||||
c.frame_size_ms = config.frame_size_ms;
|
||||
c.payload_type = payload_type;
|
||||
if (!config.IsOk()) {
|
||||
RTC_DCHECK_NOTREACHED();
|
||||
return nullptr;
|
||||
}
|
||||
return std::make_unique<AudioEncoderPcm16B>(c);
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,54 @@
|
|||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef API_AUDIO_CODECS_L16_AUDIO_ENCODER_L16_H_
|
||||
#define API_AUDIO_CODECS_L16_AUDIO_ENCODER_L16_H_
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "api/audio_codecs/audio_codec_pair_id.h"
|
||||
#include "api/audio_codecs/audio_encoder.h"
|
||||
#include "api/audio_codecs/audio_format.h"
|
||||
#include "api/field_trials_view.h"
|
||||
#include "rtc_base/system/rtc_export.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// L16 encoder API for use as a template parameter to
|
||||
// CreateAudioEncoderFactory<...>().
|
||||
struct RTC_EXPORT AudioEncoderL16 {
|
||||
struct Config {
|
||||
bool IsOk() const {
|
||||
return (sample_rate_hz == 8000 || sample_rate_hz == 16000 ||
|
||||
sample_rate_hz == 32000 || sample_rate_hz == 48000) &&
|
||||
num_channels >= 1 &&
|
||||
num_channels <= AudioEncoder::kMaxNumberOfChannels &&
|
||||
frame_size_ms > 0 && frame_size_ms <= 120 &&
|
||||
frame_size_ms % 10 == 0;
|
||||
}
|
||||
int sample_rate_hz = 8000;
|
||||
int num_channels = 1;
|
||||
int frame_size_ms = 10;
|
||||
};
|
||||
static absl::optional<Config> SdpToConfig(const SdpAudioFormat& audio_format);
|
||||
static void AppendSupportedEncoders(std::vector<AudioCodecSpec>* specs);
|
||||
static AudioCodecInfo QueryAudioEncoder(const Config& config);
|
||||
static std::unique_ptr<AudioEncoder> MakeAudioEncoder(
|
||||
const Config& config,
|
||||
int payload_type,
|
||||
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt,
|
||||
const FieldTrialsView* field_trials = nullptr);
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // API_AUDIO_CODECS_L16_AUDIO_ENCODER_L16_H_
|
||||
3
TMessagesProj/jni/voip/webrtc/api/audio_codecs/OWNERS
Normal file
3
TMessagesProj/jni/voip/webrtc/api/audio_codecs/OWNERS
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
alessiob@webrtc.org
|
||||
henrik.lundin@webrtc.org
|
||||
jakobi@webrtc.org
|
||||
|
|
@ -0,0 +1,91 @@
|
|||
/*
|
||||
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "api/audio_codecs/audio_codec_pair_id.h"
|
||||
|
||||
#include <atomic>
|
||||
#include <cstdint>
|
||||
|
||||
#include "rtc_base/checks.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace {
|
||||
|
||||
// Returns a new value that it has never returned before. You may call it at
|
||||
// most 2^63 times in the lifetime of the program. Note: The returned values
|
||||
// may be easily predictable.
|
||||
uint64_t GetNextId() {
|
||||
static std::atomic<uint64_t> next_id(0);
|
||||
|
||||
// Atomically increment `next_id`, and return the previous value. Relaxed
|
||||
// memory order is sufficient, since all we care about is that different
|
||||
// callers return different values.
|
||||
const uint64_t new_id = next_id.fetch_add(1, std::memory_order_relaxed);
|
||||
|
||||
// This check isn't atomic with the increment, so if we start 2^63 + 1
|
||||
// invocations of GetNextId() in parallel, the last one to do the atomic
|
||||
// increment could return the ID 0 before any of the others had time to
|
||||
// trigger this DCHECK. We blithely assume that this won't happen.
|
||||
RTC_DCHECK_LT(new_id, uint64_t{1} << 63) << "Used up all ID values";
|
||||
|
||||
return new_id;
|
||||
}
|
||||
|
||||
// Make an integer ID more unpredictable. This is a 1:1 mapping, so you can
|
||||
// feed it any value, but the idea is that you can feed it a sequence such as
|
||||
// 0, 1, 2, ... and get a new sequence that isn't as trivially predictable, so
|
||||
// that users won't rely on it being consecutive or increasing or anything like
|
||||
// that.
|
||||
constexpr uint64_t ObfuscateId(uint64_t id) {
|
||||
// Any nonzero coefficient that's relatively prime to 2^64 (that is, any odd
|
||||
// number) and any constant will give a 1:1 mapping. These high-entropy
|
||||
// values will prevent the sequence from being trivially predictable.
|
||||
//
|
||||
// Both the multiplication and the addition going to overflow almost always,
|
||||
// but that's fine---we *want* arithmetic mod 2^64.
|
||||
return uint64_t{0x85fdb20e1294309a} + uint64_t{0xc516ef5c37462469} * id;
|
||||
}
|
||||
|
||||
// The first ten values. Verified against the Python function
|
||||
//
|
||||
// def f(n):
|
||||
// return (0x85fdb20e1294309a + 0xc516ef5c37462469 * n) % 2**64
|
||||
//
|
||||
// Callers should obviously not depend on these exact values...
|
||||
//
|
||||
// (On Visual C++, we have to disable warning C4307 (integral constant
|
||||
// overflow), even though unsigned integers have perfectly well-defined
|
||||
// overflow behavior.)
|
||||
#ifdef _MSC_VER
|
||||
#pragma warning(push)
|
||||
#pragma warning(disable : 4307)
|
||||
#endif
|
||||
static_assert(ObfuscateId(0) == uint64_t{0x85fdb20e1294309a}, "");
|
||||
static_assert(ObfuscateId(1) == uint64_t{0x4b14a16a49da5503}, "");
|
||||
static_assert(ObfuscateId(2) == uint64_t{0x102b90c68120796c}, "");
|
||||
static_assert(ObfuscateId(3) == uint64_t{0xd5428022b8669dd5}, "");
|
||||
static_assert(ObfuscateId(4) == uint64_t{0x9a596f7eefacc23e}, "");
|
||||
static_assert(ObfuscateId(5) == uint64_t{0x5f705edb26f2e6a7}, "");
|
||||
static_assert(ObfuscateId(6) == uint64_t{0x24874e375e390b10}, "");
|
||||
static_assert(ObfuscateId(7) == uint64_t{0xe99e3d93957f2f79}, "");
|
||||
static_assert(ObfuscateId(8) == uint64_t{0xaeb52cefccc553e2}, "");
|
||||
static_assert(ObfuscateId(9) == uint64_t{0x73cc1c4c040b784b}, "");
|
||||
#ifdef _MSC_VER
|
||||
#pragma warning(pop)
|
||||
#endif
|
||||
|
||||
} // namespace
|
||||
|
||||
AudioCodecPairId AudioCodecPairId::Create() {
|
||||
return AudioCodecPairId(ObfuscateId(GetNextId()));
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,74 @@
|
|||
/*
|
||||
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef API_AUDIO_CODECS_AUDIO_CODEC_PAIR_ID_H_
|
||||
#define API_AUDIO_CODECS_AUDIO_CODEC_PAIR_ID_H_
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include <utility>
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class AudioCodecPairId final {
|
||||
public:
|
||||
// Copyable, but not default constructible.
|
||||
AudioCodecPairId() = delete;
|
||||
AudioCodecPairId(const AudioCodecPairId&) = default;
|
||||
AudioCodecPairId(AudioCodecPairId&&) = default;
|
||||
AudioCodecPairId& operator=(const AudioCodecPairId&) = default;
|
||||
AudioCodecPairId& operator=(AudioCodecPairId&&) = default;
|
||||
|
||||
friend void swap(AudioCodecPairId& a, AudioCodecPairId& b) {
|
||||
using std::swap;
|
||||
swap(a.id_, b.id_);
|
||||
}
|
||||
|
||||
// Creates a new ID, unequal to any previously created ID.
|
||||
static AudioCodecPairId Create();
|
||||
|
||||
// IDs can be tested for equality.
|
||||
friend bool operator==(AudioCodecPairId a, AudioCodecPairId b) {
|
||||
return a.id_ == b.id_;
|
||||
}
|
||||
friend bool operator!=(AudioCodecPairId a, AudioCodecPairId b) {
|
||||
return a.id_ != b.id_;
|
||||
}
|
||||
|
||||
// Comparisons. The ordering of ID values is completely arbitrary, but
|
||||
// stable, so it's useful e.g. if you want to use IDs as keys in an ordered
|
||||
// map.
|
||||
friend bool operator<(AudioCodecPairId a, AudioCodecPairId b) {
|
||||
return a.id_ < b.id_;
|
||||
}
|
||||
friend bool operator<=(AudioCodecPairId a, AudioCodecPairId b) {
|
||||
return a.id_ <= b.id_;
|
||||
}
|
||||
friend bool operator>=(AudioCodecPairId a, AudioCodecPairId b) {
|
||||
return a.id_ >= b.id_;
|
||||
}
|
||||
friend bool operator>(AudioCodecPairId a, AudioCodecPairId b) {
|
||||
return a.id_ > b.id_;
|
||||
}
|
||||
|
||||
// Returns a numeric representation of the ID. The numeric values are
|
||||
// completely arbitrary, but stable, collision-free, and reasonably evenly
|
||||
// distributed, so they are e.g. useful as hash values in unordered maps.
|
||||
uint64_t NumericRepresentation() const { return id_; }
|
||||
|
||||
private:
|
||||
explicit AudioCodecPairId(uint64_t id) : id_(id) {}
|
||||
|
||||
uint64_t id_;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // API_AUDIO_CODECS_AUDIO_CODEC_PAIR_ID_H_
|
||||
169
TMessagesProj/jni/voip/webrtc/api/audio_codecs/audio_decoder.cc
Normal file
169
TMessagesProj/jni/voip/webrtc/api/audio_codecs/audio_decoder.cc
Normal file
|
|
@ -0,0 +1,169 @@
|
|||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "api/audio_codecs/audio_decoder.h"
|
||||
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
|
||||
#include "api/array_view.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/sanitizer.h"
|
||||
#include "rtc_base/trace_event.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace {
|
||||
|
||||
class OldStyleEncodedFrame final : public AudioDecoder::EncodedAudioFrame {
|
||||
public:
|
||||
OldStyleEncodedFrame(AudioDecoder* decoder, rtc::Buffer&& payload)
|
||||
: decoder_(decoder), payload_(std::move(payload)) {}
|
||||
|
||||
size_t Duration() const override {
|
||||
const int ret = decoder_->PacketDuration(payload_.data(), payload_.size());
|
||||
return ret < 0 ? 0 : static_cast<size_t>(ret);
|
||||
}
|
||||
|
||||
absl::optional<DecodeResult> Decode(
|
||||
rtc::ArrayView<int16_t> decoded) const override {
|
||||
auto speech_type = AudioDecoder::kSpeech;
|
||||
const int ret = decoder_->Decode(
|
||||
payload_.data(), payload_.size(), decoder_->SampleRateHz(),
|
||||
decoded.size() * sizeof(int16_t), decoded.data(), &speech_type);
|
||||
return ret < 0 ? absl::nullopt
|
||||
: absl::optional<DecodeResult>(
|
||||
{static_cast<size_t>(ret), speech_type});
|
||||
}
|
||||
|
||||
private:
|
||||
AudioDecoder* const decoder_;
|
||||
const rtc::Buffer payload_;
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
bool AudioDecoder::EncodedAudioFrame::IsDtxPacket() const {
|
||||
return false;
|
||||
}
|
||||
|
||||
AudioDecoder::ParseResult::ParseResult() = default;
|
||||
AudioDecoder::ParseResult::ParseResult(ParseResult&& b) = default;
|
||||
AudioDecoder::ParseResult::ParseResult(uint32_t timestamp,
|
||||
int priority,
|
||||
std::unique_ptr<EncodedAudioFrame> frame)
|
||||
: timestamp(timestamp), priority(priority), frame(std::move(frame)) {
|
||||
RTC_DCHECK_GE(priority, 0);
|
||||
}
|
||||
|
||||
AudioDecoder::ParseResult::~ParseResult() = default;
|
||||
|
||||
AudioDecoder::ParseResult& AudioDecoder::ParseResult::operator=(
|
||||
ParseResult&& b) = default;
|
||||
|
||||
std::vector<AudioDecoder::ParseResult> AudioDecoder::ParsePayload(
|
||||
rtc::Buffer&& payload,
|
||||
uint32_t timestamp) {
|
||||
std::vector<ParseResult> results;
|
||||
std::unique_ptr<EncodedAudioFrame> frame(
|
||||
new OldStyleEncodedFrame(this, std::move(payload)));
|
||||
results.emplace_back(timestamp, 0, std::move(frame));
|
||||
return results;
|
||||
}
|
||||
|
||||
int AudioDecoder::Decode(const uint8_t* encoded,
|
||||
size_t encoded_len,
|
||||
int sample_rate_hz,
|
||||
size_t max_decoded_bytes,
|
||||
int16_t* decoded,
|
||||
SpeechType* speech_type) {
|
||||
TRACE_EVENT0("webrtc", "AudioDecoder::Decode");
|
||||
rtc::MsanCheckInitialized(rtc::MakeArrayView(encoded, encoded_len));
|
||||
int duration = PacketDuration(encoded, encoded_len);
|
||||
if (duration >= 0 &&
|
||||
duration * Channels() * sizeof(int16_t) > max_decoded_bytes) {
|
||||
return -1;
|
||||
}
|
||||
return DecodeInternal(encoded, encoded_len, sample_rate_hz, decoded,
|
||||
speech_type);
|
||||
}
|
||||
|
||||
int AudioDecoder::DecodeRedundant(const uint8_t* encoded,
|
||||
size_t encoded_len,
|
||||
int sample_rate_hz,
|
||||
size_t max_decoded_bytes,
|
||||
int16_t* decoded,
|
||||
SpeechType* speech_type) {
|
||||
TRACE_EVENT0("webrtc", "AudioDecoder::DecodeRedundant");
|
||||
rtc::MsanCheckInitialized(rtc::MakeArrayView(encoded, encoded_len));
|
||||
int duration = PacketDurationRedundant(encoded, encoded_len);
|
||||
if (duration >= 0 &&
|
||||
duration * Channels() * sizeof(int16_t) > max_decoded_bytes) {
|
||||
return -1;
|
||||
}
|
||||
return DecodeRedundantInternal(encoded, encoded_len, sample_rate_hz, decoded,
|
||||
speech_type);
|
||||
}
|
||||
|
||||
int AudioDecoder::DecodeRedundantInternal(const uint8_t* encoded,
|
||||
size_t encoded_len,
|
||||
int sample_rate_hz,
|
||||
int16_t* decoded,
|
||||
SpeechType* speech_type) {
|
||||
return DecodeInternal(encoded, encoded_len, sample_rate_hz, decoded,
|
||||
speech_type);
|
||||
}
|
||||
|
||||
bool AudioDecoder::HasDecodePlc() const {
|
||||
return false;
|
||||
}
|
||||
|
||||
size_t AudioDecoder::DecodePlc(size_t num_frames, int16_t* decoded) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
// TODO(bugs.webrtc.org/9676): Remove default implementation.
|
||||
void AudioDecoder::GeneratePlc(size_t /*requested_samples_per_channel*/,
|
||||
rtc::BufferT<int16_t>* /*concealment_audio*/) {}
|
||||
|
||||
int AudioDecoder::ErrorCode() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
int AudioDecoder::PacketDuration(const uint8_t* encoded,
|
||||
size_t encoded_len) const {
|
||||
return kNotImplemented;
|
||||
}
|
||||
|
||||
int AudioDecoder::PacketDurationRedundant(const uint8_t* encoded,
|
||||
size_t encoded_len) const {
|
||||
return kNotImplemented;
|
||||
}
|
||||
|
||||
bool AudioDecoder::PacketHasFec(const uint8_t* encoded,
|
||||
size_t encoded_len) const {
|
||||
return false;
|
||||
}
|
||||
|
||||
AudioDecoder::SpeechType AudioDecoder::ConvertSpeechType(int16_t type) {
|
||||
switch (type) {
|
||||
case 0: // TODO(hlundin): Both iSAC and Opus return 0 for speech.
|
||||
case 1:
|
||||
return kSpeech;
|
||||
case 2:
|
||||
return kComfortNoise;
|
||||
default:
|
||||
RTC_DCHECK_NOTREACHED();
|
||||
return kSpeech;
|
||||
}
|
||||
}
|
||||
|
||||
constexpr int AudioDecoder::kMaxNumberOfChannels;
|
||||
} // namespace webrtc
|
||||
195
TMessagesProj/jni/voip/webrtc/api/audio_codecs/audio_decoder.h
Normal file
195
TMessagesProj/jni/voip/webrtc/api/audio_codecs/audio_decoder.h
Normal file
|
|
@ -0,0 +1,195 @@
|
|||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef API_AUDIO_CODECS_AUDIO_DECODER_H_
|
||||
#define API_AUDIO_CODECS_AUDIO_DECODER_H_
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "api/array_view.h"
|
||||
#include "rtc_base/buffer.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class AudioDecoder {
|
||||
public:
|
||||
enum SpeechType {
|
||||
kSpeech = 1,
|
||||
kComfortNoise = 2,
|
||||
};
|
||||
|
||||
// Used by PacketDuration below. Save the value -1 for errors.
|
||||
enum { kNotImplemented = -2 };
|
||||
|
||||
AudioDecoder() = default;
|
||||
virtual ~AudioDecoder() = default;
|
||||
|
||||
AudioDecoder(const AudioDecoder&) = delete;
|
||||
AudioDecoder& operator=(const AudioDecoder&) = delete;
|
||||
|
||||
class EncodedAudioFrame {
|
||||
public:
|
||||
struct DecodeResult {
|
||||
size_t num_decoded_samples;
|
||||
SpeechType speech_type;
|
||||
};
|
||||
|
||||
virtual ~EncodedAudioFrame() = default;
|
||||
|
||||
// Returns the duration in samples-per-channel of this audio frame.
|
||||
// If no duration can be ascertained, returns zero.
|
||||
virtual size_t Duration() const = 0;
|
||||
|
||||
// Returns true if this packet contains DTX.
|
||||
virtual bool IsDtxPacket() const;
|
||||
|
||||
// Decodes this frame of audio and writes the result in `decoded`.
|
||||
// `decoded` must be large enough to store as many samples as indicated by a
|
||||
// call to Duration() . On success, returns an absl::optional containing the
|
||||
// total number of samples across all channels, as well as whether the
|
||||
// decoder produced comfort noise or speech. On failure, returns an empty
|
||||
// absl::optional. Decode may be called at most once per frame object.
|
||||
virtual absl::optional<DecodeResult> Decode(
|
||||
rtc::ArrayView<int16_t> decoded) const = 0;
|
||||
};
|
||||
|
||||
struct ParseResult {
|
||||
ParseResult();
|
||||
ParseResult(uint32_t timestamp,
|
||||
int priority,
|
||||
std::unique_ptr<EncodedAudioFrame> frame);
|
||||
ParseResult(ParseResult&& b);
|
||||
~ParseResult();
|
||||
|
||||
ParseResult& operator=(ParseResult&& b);
|
||||
|
||||
// The timestamp of the frame is in samples per channel.
|
||||
uint32_t timestamp;
|
||||
// The relative priority of the frame compared to other frames of the same
|
||||
// payload and the same timeframe. A higher value means a lower priority.
|
||||
// The highest priority is zero - negative values are not allowed.
|
||||
int priority;
|
||||
std::unique_ptr<EncodedAudioFrame> frame;
|
||||
};
|
||||
|
||||
// Let the decoder parse this payload and prepare zero or more decodable
|
||||
// frames. Each frame must be between 10 ms and 120 ms long. The caller must
|
||||
// ensure that the AudioDecoder object outlives any frame objects returned by
|
||||
// this call. The decoder is free to swap or move the data from the `payload`
|
||||
// buffer. `timestamp` is the input timestamp, in samples, corresponding to
|
||||
// the start of the payload.
|
||||
virtual std::vector<ParseResult> ParsePayload(rtc::Buffer&& payload,
|
||||
uint32_t timestamp);
|
||||
|
||||
// TODO(bugs.webrtc.org/10098): The Decode and DecodeRedundant methods are
|
||||
// obsolete; callers should call ParsePayload instead. For now, subclasses
|
||||
// must still implement DecodeInternal.
|
||||
|
||||
// Decodes `encode_len` bytes from `encoded` and writes the result in
|
||||
// `decoded`. The maximum bytes allowed to be written into `decoded` is
|
||||
// `max_decoded_bytes`. Returns the total number of samples across all
|
||||
// channels. If the decoder produced comfort noise, `speech_type`
|
||||
// is set to kComfortNoise, otherwise it is kSpeech. The desired output
|
||||
// sample rate is provided in `sample_rate_hz`, which must be valid for the
|
||||
// codec at hand.
|
||||
int Decode(const uint8_t* encoded,
|
||||
size_t encoded_len,
|
||||
int sample_rate_hz,
|
||||
size_t max_decoded_bytes,
|
||||
int16_t* decoded,
|
||||
SpeechType* speech_type);
|
||||
|
||||
// Same as Decode(), but interfaces to the decoders redundant decode function.
|
||||
// The default implementation simply calls the regular Decode() method.
|
||||
int DecodeRedundant(const uint8_t* encoded,
|
||||
size_t encoded_len,
|
||||
int sample_rate_hz,
|
||||
size_t max_decoded_bytes,
|
||||
int16_t* decoded,
|
||||
SpeechType* speech_type);
|
||||
|
||||
// Indicates if the decoder implements the DecodePlc method.
|
||||
virtual bool HasDecodePlc() const;
|
||||
|
||||
// Calls the packet-loss concealment of the decoder to update the state after
|
||||
// one or several lost packets. The caller has to make sure that the
|
||||
// memory allocated in `decoded` should accommodate `num_frames` frames.
|
||||
virtual size_t DecodePlc(size_t num_frames, int16_t* decoded);
|
||||
|
||||
// Asks the decoder to generate packet-loss concealment and append it to the
|
||||
// end of `concealment_audio`. The concealment audio should be in
|
||||
// channel-interleaved format, with as many channels as the last decoded
|
||||
// packet produced. The implementation must produce at least
|
||||
// requested_samples_per_channel, or nothing at all. This is a signal to the
|
||||
// caller to conceal the loss with other means. If the implementation provides
|
||||
// concealment samples, it is also responsible for "stitching" it together
|
||||
// with the decoded audio on either side of the concealment.
|
||||
// Note: The default implementation of GeneratePlc will be deleted soon. All
|
||||
// implementations must provide their own, which can be a simple as a no-op.
|
||||
// TODO(bugs.webrtc.org/9676): Remove default implementation.
|
||||
virtual void GeneratePlc(size_t requested_samples_per_channel,
|
||||
rtc::BufferT<int16_t>* concealment_audio);
|
||||
|
||||
// Resets the decoder state (empty buffers etc.).
|
||||
virtual void Reset() = 0;
|
||||
|
||||
// Returns the last error code from the decoder.
|
||||
virtual int ErrorCode();
|
||||
|
||||
// Returns the duration in samples-per-channel of the payload in `encoded`
|
||||
// which is `encoded_len` bytes long. Returns kNotImplemented if no duration
|
||||
// estimate is available, or -1 in case of an error.
|
||||
virtual int PacketDuration(const uint8_t* encoded, size_t encoded_len) const;
|
||||
|
||||
// Returns the duration in samples-per-channel of the redandant payload in
|
||||
// `encoded` which is `encoded_len` bytes long. Returns kNotImplemented if no
|
||||
// duration estimate is available, or -1 in case of an error.
|
||||
virtual int PacketDurationRedundant(const uint8_t* encoded,
|
||||
size_t encoded_len) const;
|
||||
|
||||
// Detects whether a packet has forward error correction. The packet is
|
||||
// comprised of the samples in `encoded` which is `encoded_len` bytes long.
|
||||
// Returns true if the packet has FEC and false otherwise.
|
||||
virtual bool PacketHasFec(const uint8_t* encoded, size_t encoded_len) const;
|
||||
|
||||
// Returns the actual sample rate of the decoder's output. This value may not
|
||||
// change during the lifetime of the decoder.
|
||||
virtual int SampleRateHz() const = 0;
|
||||
|
||||
// The number of channels in the decoder's output. This value may not change
|
||||
// during the lifetime of the decoder.
|
||||
virtual size_t Channels() const = 0;
|
||||
|
||||
// The maximum number of audio channels supported by WebRTC decoders.
|
||||
static constexpr int kMaxNumberOfChannels = 24;
|
||||
|
||||
protected:
|
||||
static SpeechType ConvertSpeechType(int16_t type);
|
||||
|
||||
virtual int DecodeInternal(const uint8_t* encoded,
|
||||
size_t encoded_len,
|
||||
int sample_rate_hz,
|
||||
int16_t* decoded,
|
||||
SpeechType* speech_type) = 0;
|
||||
|
||||
virtual int DecodeRedundantInternal(const uint8_t* encoded,
|
||||
size_t encoded_len,
|
||||
int sample_rate_hz,
|
||||
int16_t* decoded,
|
||||
SpeechType* speech_type);
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
#endif // API_AUDIO_CODECS_AUDIO_DECODER_H_
|
||||
|
|
@ -0,0 +1,53 @@
|
|||
/*
|
||||
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef API_AUDIO_CODECS_AUDIO_DECODER_FACTORY_H_
|
||||
#define API_AUDIO_CODECS_AUDIO_DECODER_FACTORY_H_
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "api/audio_codecs/audio_codec_pair_id.h"
|
||||
#include "api/audio_codecs/audio_decoder.h"
|
||||
#include "api/audio_codecs/audio_format.h"
|
||||
#include "api/ref_count.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// A factory that creates AudioDecoders.
|
||||
class AudioDecoderFactory : public RefCountInterface {
|
||||
public:
|
||||
virtual std::vector<AudioCodecSpec> GetSupportedDecoders() = 0;
|
||||
|
||||
virtual bool IsSupportedDecoder(const SdpAudioFormat& format) = 0;
|
||||
|
||||
// Create a new decoder instance. The `codec_pair_id` argument is used to link
|
||||
// encoders and decoders that talk to the same remote entity: if a
|
||||
// AudioEncoderFactory::MakeAudioEncoder() and a
|
||||
// AudioDecoderFactory::MakeAudioDecoder() call receive non-null IDs that
|
||||
// compare equal, the factory implementations may assume that the encoder and
|
||||
// decoder form a pair. (The intended use case for this is to set up
|
||||
// communication between the AudioEncoder and AudioDecoder instances, which is
|
||||
// needed for some codecs with built-in bandwidth adaptation.)
|
||||
//
|
||||
// Returns null if the format isn't supported.
|
||||
//
|
||||
// Note: Implementations need to be robust against combinations other than
|
||||
// one encoder, one decoder getting the same ID; such decoders must still
|
||||
// work.
|
||||
virtual std::unique_ptr<AudioDecoder> MakeAudioDecoder(
|
||||
const SdpAudioFormat& format,
|
||||
absl::optional<AudioCodecPairId> codec_pair_id) = 0;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // API_AUDIO_CODECS_AUDIO_DECODER_FACTORY_H_
|
||||
|
|
@ -0,0 +1,145 @@
|
|||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef API_AUDIO_CODECS_AUDIO_DECODER_FACTORY_TEMPLATE_H_
|
||||
#define API_AUDIO_CODECS_AUDIO_DECODER_FACTORY_TEMPLATE_H_
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "api/audio_codecs/audio_decoder_factory.h"
|
||||
#include "api/field_trials_view.h"
|
||||
#include "api/make_ref_counted.h"
|
||||
#include "api/scoped_refptr.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace audio_decoder_factory_template_impl {
|
||||
|
||||
template <typename... Ts>
|
||||
struct Helper;
|
||||
|
||||
// Base case: 0 template parameters.
|
||||
template <>
|
||||
struct Helper<> {
|
||||
static void AppendSupportedDecoders(std::vector<AudioCodecSpec>* specs) {}
|
||||
static bool IsSupportedDecoder(const SdpAudioFormat& format) { return false; }
|
||||
static std::unique_ptr<AudioDecoder> MakeAudioDecoder(
|
||||
const SdpAudioFormat& format,
|
||||
absl::optional<AudioCodecPairId> codec_pair_id,
|
||||
const FieldTrialsView* field_trials) {
|
||||
return nullptr;
|
||||
}
|
||||
};
|
||||
|
||||
// Inductive case: Called with n + 1 template parameters; calls subroutines
|
||||
// with n template parameters.
|
||||
template <typename T, typename... Ts>
|
||||
struct Helper<T, Ts...> {
|
||||
static void AppendSupportedDecoders(std::vector<AudioCodecSpec>* specs) {
|
||||
T::AppendSupportedDecoders(specs);
|
||||
Helper<Ts...>::AppendSupportedDecoders(specs);
|
||||
}
|
||||
static bool IsSupportedDecoder(const SdpAudioFormat& format) {
|
||||
auto opt_config = T::SdpToConfig(format);
|
||||
static_assert(std::is_same<decltype(opt_config),
|
||||
absl::optional<typename T::Config>>::value,
|
||||
"T::SdpToConfig() must return a value of type "
|
||||
"absl::optional<T::Config>");
|
||||
return opt_config ? true : Helper<Ts...>::IsSupportedDecoder(format);
|
||||
}
|
||||
static std::unique_ptr<AudioDecoder> MakeAudioDecoder(
|
||||
const SdpAudioFormat& format,
|
||||
absl::optional<AudioCodecPairId> codec_pair_id,
|
||||
const FieldTrialsView* field_trials) {
|
||||
auto opt_config = T::SdpToConfig(format);
|
||||
return opt_config ? T::MakeAudioDecoder(*opt_config, codec_pair_id)
|
||||
: Helper<Ts...>::MakeAudioDecoder(format, codec_pair_id,
|
||||
field_trials);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename... Ts>
|
||||
class AudioDecoderFactoryT : public AudioDecoderFactory {
|
||||
public:
|
||||
explicit AudioDecoderFactoryT(const FieldTrialsView* field_trials) {
|
||||
field_trials_ = field_trials;
|
||||
}
|
||||
|
||||
std::vector<AudioCodecSpec> GetSupportedDecoders() override {
|
||||
std::vector<AudioCodecSpec> specs;
|
||||
Helper<Ts...>::AppendSupportedDecoders(&specs);
|
||||
return specs;
|
||||
}
|
||||
|
||||
bool IsSupportedDecoder(const SdpAudioFormat& format) override {
|
||||
return Helper<Ts...>::IsSupportedDecoder(format);
|
||||
}
|
||||
|
||||
std::unique_ptr<AudioDecoder> MakeAudioDecoder(
|
||||
const SdpAudioFormat& format,
|
||||
absl::optional<AudioCodecPairId> codec_pair_id) override {
|
||||
return Helper<Ts...>::MakeAudioDecoder(format, codec_pair_id,
|
||||
field_trials_);
|
||||
}
|
||||
|
||||
const FieldTrialsView* field_trials_;
|
||||
};
|
||||
|
||||
} // namespace audio_decoder_factory_template_impl
|
||||
|
||||
// Make an AudioDecoderFactory that can create instances of the given decoders.
|
||||
//
|
||||
// Each decoder type is given as a template argument to the function; it should
|
||||
// be a struct with the following static member functions:
|
||||
//
|
||||
// // Converts `audio_format` to a ConfigType instance. Returns an empty
|
||||
// // optional if `audio_format` doesn't correctly specify a decoder of our
|
||||
// // type.
|
||||
// absl::optional<ConfigType> SdpToConfig(const SdpAudioFormat& audio_format);
|
||||
//
|
||||
// // Appends zero or more AudioCodecSpecs to the list that will be returned
|
||||
// // by AudioDecoderFactory::GetSupportedDecoders().
|
||||
// void AppendSupportedDecoders(std::vector<AudioCodecSpec>* specs);
|
||||
//
|
||||
// // Creates an AudioDecoder for the specified format. Used to implement
|
||||
// // AudioDecoderFactory::MakeAudioDecoder().
|
||||
// std::unique_ptr<AudioDecoder> MakeAudioDecoder(
|
||||
// const ConfigType& config,
|
||||
// absl::optional<AudioCodecPairId> codec_pair_id);
|
||||
//
|
||||
// ConfigType should be a type that encapsulates all the settings needed to
|
||||
// create an AudioDecoder. T::Config (where T is the decoder struct) should
|
||||
// either be the config type, or an alias for it.
|
||||
//
|
||||
// Whenever it tries to do something, the new factory will try each of the
|
||||
// decoder types in the order they were specified in the template argument
|
||||
// list, stopping at the first one that claims to be able to do the job.
|
||||
//
|
||||
// TODO(kwiberg): Point at CreateBuiltinAudioDecoderFactory() for an example of
|
||||
// how it is used.
|
||||
template <typename... Ts>
|
||||
rtc::scoped_refptr<AudioDecoderFactory> CreateAudioDecoderFactory(
|
||||
const FieldTrialsView* field_trials = nullptr) {
|
||||
// There's no technical reason we couldn't allow zero template parameters,
|
||||
// but such a factory couldn't create any decoders, and callers can do this
|
||||
// by mistake by simply forgetting the <> altogether. So we forbid it in
|
||||
// order to prevent caller foot-shooting.
|
||||
static_assert(sizeof...(Ts) >= 1,
|
||||
"Caller must give at least one template parameter");
|
||||
|
||||
return rtc::make_ref_counted<
|
||||
audio_decoder_factory_template_impl::AudioDecoderFactoryT<Ts...>>(
|
||||
field_trials);
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // API_AUDIO_CODECS_AUDIO_DECODER_FACTORY_TEMPLATE_H_
|
||||
114
TMessagesProj/jni/voip/webrtc/api/audio_codecs/audio_encoder.cc
Normal file
114
TMessagesProj/jni/voip/webrtc/api/audio_codecs/audio_encoder.cc
Normal file
|
|
@ -0,0 +1,114 @@
|
|||
/*
|
||||
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "api/audio_codecs/audio_encoder.h"
|
||||
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/trace_event.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
ANAStats::ANAStats() = default;
|
||||
ANAStats::~ANAStats() = default;
|
||||
ANAStats::ANAStats(const ANAStats&) = default;
|
||||
|
||||
AudioEncoder::EncodedInfo::EncodedInfo() = default;
|
||||
AudioEncoder::EncodedInfo::EncodedInfo(const EncodedInfo&) = default;
|
||||
AudioEncoder::EncodedInfo::EncodedInfo(EncodedInfo&&) = default;
|
||||
AudioEncoder::EncodedInfo::~EncodedInfo() = default;
|
||||
AudioEncoder::EncodedInfo& AudioEncoder::EncodedInfo::operator=(
|
||||
const EncodedInfo&) = default;
|
||||
AudioEncoder::EncodedInfo& AudioEncoder::EncodedInfo::operator=(EncodedInfo&&) =
|
||||
default;
|
||||
|
||||
int AudioEncoder::RtpTimestampRateHz() const {
|
||||
return SampleRateHz();
|
||||
}
|
||||
|
||||
AudioEncoder::EncodedInfo AudioEncoder::Encode(
|
||||
uint32_t rtp_timestamp,
|
||||
rtc::ArrayView<const int16_t> audio,
|
||||
rtc::Buffer* encoded) {
|
||||
TRACE_EVENT0("webrtc", "AudioEncoder::Encode");
|
||||
RTC_CHECK_EQ(audio.size(),
|
||||
static_cast<size_t>(NumChannels() * SampleRateHz() / 100));
|
||||
|
||||
const size_t old_size = encoded->size();
|
||||
EncodedInfo info = EncodeImpl(rtp_timestamp, audio, encoded);
|
||||
RTC_CHECK_EQ(encoded->size() - old_size, info.encoded_bytes);
|
||||
return info;
|
||||
}
|
||||
|
||||
bool AudioEncoder::SetFec(bool enable) {
|
||||
return !enable;
|
||||
}
|
||||
|
||||
bool AudioEncoder::SetDtx(bool enable) {
|
||||
return !enable;
|
||||
}
|
||||
|
||||
bool AudioEncoder::GetDtx() const {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool AudioEncoder::SetApplication(Application application) {
|
||||
return false;
|
||||
}
|
||||
|
||||
void AudioEncoder::SetMaxPlaybackRate(int frequency_hz) {}
|
||||
|
||||
void AudioEncoder::SetTargetBitrate(int target_bps) {}
|
||||
|
||||
rtc::ArrayView<std::unique_ptr<AudioEncoder>>
|
||||
AudioEncoder::ReclaimContainedEncoders() {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
bool AudioEncoder::EnableAudioNetworkAdaptor(const std::string& config_string,
|
||||
RtcEventLog* event_log) {
|
||||
return false;
|
||||
}
|
||||
|
||||
void AudioEncoder::DisableAudioNetworkAdaptor() {}
|
||||
|
||||
void AudioEncoder::OnReceivedUplinkPacketLossFraction(
|
||||
float uplink_packet_loss_fraction) {}
|
||||
|
||||
void AudioEncoder::OnReceivedUplinkRecoverablePacketLossFraction(
|
||||
float uplink_recoverable_packet_loss_fraction) {
|
||||
RTC_DCHECK_NOTREACHED();
|
||||
}
|
||||
|
||||
void AudioEncoder::OnReceivedTargetAudioBitrate(int target_audio_bitrate_bps) {
|
||||
OnReceivedUplinkBandwidth(target_audio_bitrate_bps, absl::nullopt);
|
||||
}
|
||||
|
||||
void AudioEncoder::OnReceivedUplinkBandwidth(
|
||||
int target_audio_bitrate_bps,
|
||||
absl::optional<int64_t> bwe_period_ms) {}
|
||||
|
||||
void AudioEncoder::OnReceivedUplinkAllocation(BitrateAllocationUpdate update) {
|
||||
OnReceivedUplinkBandwidth(update.target_bitrate.bps(),
|
||||
update.bwe_period.ms());
|
||||
}
|
||||
|
||||
void AudioEncoder::OnReceivedRtt(int rtt_ms) {}
|
||||
|
||||
void AudioEncoder::OnReceivedOverhead(size_t overhead_bytes_per_packet) {}
|
||||
|
||||
void AudioEncoder::SetReceiverFrameLengthRange(int min_frame_length_ms,
|
||||
int max_frame_length_ms) {}
|
||||
|
||||
ANAStats AudioEncoder::GetANAStats() const {
|
||||
return ANAStats();
|
||||
}
|
||||
|
||||
constexpr int AudioEncoder::kMaxNumberOfChannels;
|
||||
} // namespace webrtc
|
||||
269
TMessagesProj/jni/voip/webrtc/api/audio_codecs/audio_encoder.h
Normal file
269
TMessagesProj/jni/voip/webrtc/api/audio_codecs/audio_encoder.h
Normal file
|
|
@ -0,0 +1,269 @@
|
|||
/*
|
||||
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef API_AUDIO_CODECS_AUDIO_ENCODER_H_
|
||||
#define API_AUDIO_CODECS_AUDIO_ENCODER_H_
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/base/attributes.h"
|
||||
#include "absl/types/optional.h"
|
||||
#include "api/array_view.h"
|
||||
#include "api/call/bitrate_allocation.h"
|
||||
#include "api/units/data_rate.h"
|
||||
#include "api/units/time_delta.h"
|
||||
#include "rtc_base/buffer.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class RtcEventLog;
|
||||
|
||||
// Statistics related to Audio Network Adaptation.
|
||||
struct ANAStats {
|
||||
ANAStats();
|
||||
ANAStats(const ANAStats&);
|
||||
~ANAStats();
|
||||
// Number of actions taken by the ANA bitrate controller since the start of
|
||||
// the call. If this value is not set, it indicates that the bitrate
|
||||
// controller is disabled.
|
||||
absl::optional<uint32_t> bitrate_action_counter;
|
||||
// Number of actions taken by the ANA channel controller since the start of
|
||||
// the call. If this value is not set, it indicates that the channel
|
||||
// controller is disabled.
|
||||
absl::optional<uint32_t> channel_action_counter;
|
||||
// Number of actions taken by the ANA DTX controller since the start of the
|
||||
// call. If this value is not set, it indicates that the DTX controller is
|
||||
// disabled.
|
||||
absl::optional<uint32_t> dtx_action_counter;
|
||||
// Number of actions taken by the ANA FEC controller since the start of the
|
||||
// call. If this value is not set, it indicates that the FEC controller is
|
||||
// disabled.
|
||||
absl::optional<uint32_t> fec_action_counter;
|
||||
// Number of times the ANA frame length controller decided to increase the
|
||||
// frame length since the start of the call. If this value is not set, it
|
||||
// indicates that the frame length controller is disabled.
|
||||
absl::optional<uint32_t> frame_length_increase_counter;
|
||||
// Number of times the ANA frame length controller decided to decrease the
|
||||
// frame length since the start of the call. If this value is not set, it
|
||||
// indicates that the frame length controller is disabled.
|
||||
absl::optional<uint32_t> frame_length_decrease_counter;
|
||||
// The uplink packet loss fractions as set by the ANA FEC controller. If this
|
||||
// value is not set, it indicates that the ANA FEC controller is not active.
|
||||
absl::optional<float> uplink_packet_loss_fraction;
|
||||
};
|
||||
|
||||
// This is the interface class for encoders in AudioCoding module. Each codec
|
||||
// type must have an implementation of this class.
|
||||
class AudioEncoder {
|
||||
public:
|
||||
// Used for UMA logging of codec usage. The same codecs, with the
|
||||
// same values, must be listed in
|
||||
// src/tools/metrics/histograms/histograms.xml in chromium to log
|
||||
// correct values.
|
||||
enum class CodecType {
|
||||
kOther = 0, // Codec not specified, and/or not listed in this enum
|
||||
kOpus = 1,
|
||||
kIsac = 2,
|
||||
kPcmA = 3,
|
||||
kPcmU = 4,
|
||||
kG722 = 5,
|
||||
kIlbc = 6,
|
||||
|
||||
// Number of histogram bins in the UMA logging of codec types. The
|
||||
// total number of different codecs that are logged cannot exceed this
|
||||
// number.
|
||||
kMaxLoggedAudioCodecTypes
|
||||
};
|
||||
|
||||
struct EncodedInfoLeaf {
|
||||
size_t encoded_bytes = 0;
|
||||
uint32_t encoded_timestamp = 0;
|
||||
int payload_type = 0;
|
||||
bool send_even_if_empty = false;
|
||||
bool speech = true;
|
||||
CodecType encoder_type = CodecType::kOther;
|
||||
};
|
||||
|
||||
// This is the main struct for auxiliary encoding information. Each encoded
|
||||
// packet should be accompanied by one EncodedInfo struct, containing the
|
||||
// total number of `encoded_bytes`, the `encoded_timestamp` and the
|
||||
// `payload_type`. If the packet contains redundant encodings, the `redundant`
|
||||
// vector will be populated with EncodedInfoLeaf structs. Each struct in the
|
||||
// vector represents one encoding; the order of structs in the vector is the
|
||||
// same as the order in which the actual payloads are written to the byte
|
||||
// stream. When EncoderInfoLeaf structs are present in the vector, the main
|
||||
// struct's `encoded_bytes` will be the sum of all the `encoded_bytes` in the
|
||||
// vector.
|
||||
struct EncodedInfo : public EncodedInfoLeaf {
|
||||
EncodedInfo();
|
||||
EncodedInfo(const EncodedInfo&);
|
||||
EncodedInfo(EncodedInfo&&);
|
||||
~EncodedInfo();
|
||||
EncodedInfo& operator=(const EncodedInfo&);
|
||||
EncodedInfo& operator=(EncodedInfo&&);
|
||||
|
||||
std::vector<EncodedInfoLeaf> redundant;
|
||||
};
|
||||
|
||||
virtual ~AudioEncoder() = default;
|
||||
|
||||
// Returns the input sample rate in Hz and the number of input channels.
|
||||
// These are constants set at instantiation time.
|
||||
virtual int SampleRateHz() const = 0;
|
||||
virtual size_t NumChannels() const = 0;
|
||||
|
||||
// Returns the rate at which the RTP timestamps are updated. The default
|
||||
// implementation returns SampleRateHz().
|
||||
virtual int RtpTimestampRateHz() const;
|
||||
|
||||
// Returns the number of 10 ms frames the encoder will put in the next
|
||||
// packet. This value may only change when Encode() outputs a packet; i.e.,
|
||||
// the encoder may vary the number of 10 ms frames from packet to packet, but
|
||||
// it must decide the length of the next packet no later than when outputting
|
||||
// the preceding packet.
|
||||
virtual size_t Num10MsFramesInNextPacket() const = 0;
|
||||
|
||||
// Returns the maximum value that can be returned by
|
||||
// Num10MsFramesInNextPacket().
|
||||
virtual size_t Max10MsFramesInAPacket() const = 0;
|
||||
|
||||
// Returns the current target bitrate in bits/s. The value -1 means that the
|
||||
// codec adapts the target automatically, and a current target cannot be
|
||||
// provided.
|
||||
virtual int GetTargetBitrate() const = 0;
|
||||
|
||||
// Accepts one 10 ms block of input audio (i.e., SampleRateHz() / 100 *
|
||||
// NumChannels() samples). Multi-channel audio must be sample-interleaved.
|
||||
// The encoder appends zero or more bytes of output to `encoded` and returns
|
||||
// additional encoding information. Encode() checks some preconditions, calls
|
||||
// EncodeImpl() which does the actual work, and then checks some
|
||||
// postconditions.
|
||||
EncodedInfo Encode(uint32_t rtp_timestamp,
|
||||
rtc::ArrayView<const int16_t> audio,
|
||||
rtc::Buffer* encoded);
|
||||
|
||||
// Resets the encoder to its starting state, discarding any input that has
|
||||
// been fed to the encoder but not yet emitted in a packet.
|
||||
virtual void Reset() = 0;
|
||||
|
||||
// Enables or disables codec-internal FEC (forward error correction). Returns
|
||||
// true if the codec was able to comply. The default implementation returns
|
||||
// true when asked to disable FEC and false when asked to enable it (meaning
|
||||
// that FEC isn't supported).
|
||||
virtual bool SetFec(bool enable);
|
||||
|
||||
// Enables or disables codec-internal VAD/DTX. Returns true if the codec was
|
||||
// able to comply. The default implementation returns true when asked to
|
||||
// disable DTX and false when asked to enable it (meaning that DTX isn't
|
||||
// supported).
|
||||
virtual bool SetDtx(bool enable);
|
||||
|
||||
// Returns the status of codec-internal DTX. The default implementation always
|
||||
// returns false.
|
||||
virtual bool GetDtx() const;
|
||||
|
||||
// Sets the application mode. Returns true if the codec was able to comply.
|
||||
// The default implementation just returns false.
|
||||
enum class Application { kSpeech, kAudio };
|
||||
virtual bool SetApplication(Application application);
|
||||
|
||||
// Tells the encoder about the highest sample rate the decoder is expected to
|
||||
// use when decoding the bitstream. The encoder would typically use this
|
||||
// information to adjust the quality of the encoding. The default
|
||||
// implementation does nothing.
|
||||
virtual void SetMaxPlaybackRate(int frequency_hz);
|
||||
|
||||
// Tells the encoder what average bitrate we'd like it to produce. The
|
||||
// encoder is free to adjust or disregard the given bitrate (the default
|
||||
// implementation does the latter).
|
||||
ABSL_DEPRECATED("Use OnReceivedTargetAudioBitrate instead")
|
||||
virtual void SetTargetBitrate(int target_bps);
|
||||
|
||||
// Causes this encoder to let go of any other encoders it contains, and
|
||||
// returns a pointer to an array where they are stored (which is required to
|
||||
// live as long as this encoder). Unless the returned array is empty, you may
|
||||
// not call any methods on this encoder afterwards, except for the
|
||||
// destructor. The default implementation just returns an empty array.
|
||||
// NOTE: This method is subject to change. Do not call or override it.
|
||||
virtual rtc::ArrayView<std::unique_ptr<AudioEncoder>>
|
||||
ReclaimContainedEncoders();
|
||||
|
||||
// Enables audio network adaptor. Returns true if successful.
|
||||
virtual bool EnableAudioNetworkAdaptor(const std::string& config_string,
|
||||
RtcEventLog* event_log);
|
||||
|
||||
// Disables audio network adaptor.
|
||||
virtual void DisableAudioNetworkAdaptor();
|
||||
|
||||
// Provides uplink packet loss fraction to this encoder to allow it to adapt.
|
||||
// `uplink_packet_loss_fraction` is in the range [0.0, 1.0].
|
||||
virtual void OnReceivedUplinkPacketLossFraction(
|
||||
float uplink_packet_loss_fraction);
|
||||
|
||||
ABSL_DEPRECATED("")
|
||||
virtual void OnReceivedUplinkRecoverablePacketLossFraction(
|
||||
float uplink_recoverable_packet_loss_fraction);
|
||||
|
||||
// Provides target audio bitrate to this encoder to allow it to adapt.
|
||||
virtual void OnReceivedTargetAudioBitrate(int target_bps);
|
||||
|
||||
// Provides target audio bitrate and corresponding probing interval of
|
||||
// the bandwidth estimator to this encoder to allow it to adapt.
|
||||
virtual void OnReceivedUplinkBandwidth(int target_audio_bitrate_bps,
|
||||
absl::optional<int64_t> bwe_period_ms);
|
||||
|
||||
// Provides target audio bitrate and corresponding probing interval of
|
||||
// the bandwidth estimator to this encoder to allow it to adapt.
|
||||
virtual void OnReceivedUplinkAllocation(BitrateAllocationUpdate update);
|
||||
|
||||
// Provides RTT to this encoder to allow it to adapt.
|
||||
virtual void OnReceivedRtt(int rtt_ms);
|
||||
|
||||
// Provides overhead to this encoder to adapt. The overhead is the number of
|
||||
// bytes that will be added to each packet the encoder generates.
|
||||
virtual void OnReceivedOverhead(size_t overhead_bytes_per_packet);
|
||||
|
||||
// To allow encoder to adapt its frame length, it must be provided the frame
|
||||
// length range that receivers can accept.
|
||||
virtual void SetReceiverFrameLengthRange(int min_frame_length_ms,
|
||||
int max_frame_length_ms);
|
||||
|
||||
// Get statistics related to audio network adaptation.
|
||||
virtual ANAStats GetANAStats() const;
|
||||
|
||||
// The range of frame lengths that are supported or nullopt if there's no such
|
||||
// information. This is used together with the bitrate range to calculate the
|
||||
// full bitrate range, including overhead.
|
||||
virtual absl::optional<std::pair<TimeDelta, TimeDelta>> GetFrameLengthRange()
|
||||
const = 0;
|
||||
|
||||
// The range of payload bitrates that are supported. This is used together
|
||||
// with the frame length range to calculate the full bitrate range, including
|
||||
// overhead.
|
||||
virtual absl::optional<std::pair<DataRate, DataRate>> GetBitrateRange()
|
||||
const {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
// The maximum number of audio channels supported by WebRTC encoders.
|
||||
static constexpr int kMaxNumberOfChannels = 24;
|
||||
|
||||
protected:
|
||||
// Subclasses implement this to perform the actual encoding. Called by
|
||||
// Encode().
|
||||
virtual EncodedInfo EncodeImpl(uint32_t rtp_timestamp,
|
||||
rtc::ArrayView<const int16_t> audio,
|
||||
rtc::Buffer* encoded) = 0;
|
||||
};
|
||||
} // namespace webrtc
|
||||
#endif // API_AUDIO_CODECS_AUDIO_ENCODER_H_
|
||||
|
|
@ -0,0 +1,62 @@
|
|||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef API_AUDIO_CODECS_AUDIO_ENCODER_FACTORY_H_
|
||||
#define API_AUDIO_CODECS_AUDIO_ENCODER_FACTORY_H_
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "api/audio_codecs/audio_codec_pair_id.h"
|
||||
#include "api/audio_codecs/audio_encoder.h"
|
||||
#include "api/audio_codecs/audio_format.h"
|
||||
#include "rtc_base/ref_count.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// A factory that creates AudioEncoders.
|
||||
class AudioEncoderFactory : public rtc::RefCountInterface {
|
||||
public:
|
||||
// Returns a prioritized list of audio codecs, to use for signaling etc.
|
||||
virtual std::vector<AudioCodecSpec> GetSupportedEncoders() = 0;
|
||||
|
||||
// Returns information about how this format would be encoded, provided it's
|
||||
// supported. More format and format variations may be supported than those
|
||||
// returned by GetSupportedEncoders().
|
||||
virtual absl::optional<AudioCodecInfo> QueryAudioEncoder(
|
||||
const SdpAudioFormat& format) = 0;
|
||||
|
||||
// Creates an AudioEncoder for the specified format. The encoder will tags its
|
||||
// payloads with the specified payload type. The `codec_pair_id` argument is
|
||||
// used to link encoders and decoders that talk to the same remote entity: if
|
||||
// a AudioEncoderFactory::MakeAudioEncoder() and a
|
||||
// AudioDecoderFactory::MakeAudioDecoder() call receive non-null IDs that
|
||||
// compare equal, the factory implementations may assume that the encoder and
|
||||
// decoder form a pair. (The intended use case for this is to set up
|
||||
// communication between the AudioEncoder and AudioDecoder instances, which is
|
||||
// needed for some codecs with built-in bandwidth adaptation.)
|
||||
//
|
||||
// Returns null if the format isn't supported.
|
||||
//
|
||||
// Note: Implementations need to be robust against combinations other than
|
||||
// one encoder, one decoder getting the same ID; such encoders must still
|
||||
// work.
|
||||
//
|
||||
// TODO(ossu): Try to avoid audio encoders having to know their payload type.
|
||||
virtual std::unique_ptr<AudioEncoder> MakeAudioEncoder(
|
||||
int payload_type,
|
||||
const SdpAudioFormat& format,
|
||||
absl::optional<AudioCodecPairId> codec_pair_id) = 0;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // API_AUDIO_CODECS_AUDIO_ENCODER_FACTORY_H_
|
||||
|
|
@ -0,0 +1,163 @@
|
|||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef API_AUDIO_CODECS_AUDIO_ENCODER_FACTORY_TEMPLATE_H_
|
||||
#define API_AUDIO_CODECS_AUDIO_ENCODER_FACTORY_TEMPLATE_H_
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "api/audio_codecs/audio_encoder_factory.h"
|
||||
#include "api/field_trials_view.h"
|
||||
#include "api/make_ref_counted.h"
|
||||
#include "api/scoped_refptr.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace audio_encoder_factory_template_impl {
|
||||
|
||||
template <typename... Ts>
|
||||
struct Helper;
|
||||
|
||||
// Base case: 0 template parameters.
|
||||
template <>
|
||||
struct Helper<> {
|
||||
static void AppendSupportedEncoders(std::vector<AudioCodecSpec>* specs) {}
|
||||
static absl::optional<AudioCodecInfo> QueryAudioEncoder(
|
||||
const SdpAudioFormat& format) {
|
||||
return absl::nullopt;
|
||||
}
|
||||
static std::unique_ptr<AudioEncoder> MakeAudioEncoder(
|
||||
int payload_type,
|
||||
const SdpAudioFormat& format,
|
||||
absl::optional<AudioCodecPairId> codec_pair_id,
|
||||
const FieldTrialsView* field_trials) {
|
||||
return nullptr;
|
||||
}
|
||||
};
|
||||
|
||||
// Inductive case: Called with n + 1 template parameters; calls subroutines
|
||||
// with n template parameters.
|
||||
template <typename T, typename... Ts>
|
||||
struct Helper<T, Ts...> {
|
||||
static void AppendSupportedEncoders(std::vector<AudioCodecSpec>* specs) {
|
||||
T::AppendSupportedEncoders(specs);
|
||||
Helper<Ts...>::AppendSupportedEncoders(specs);
|
||||
}
|
||||
static absl::optional<AudioCodecInfo> QueryAudioEncoder(
|
||||
const SdpAudioFormat& format) {
|
||||
auto opt_config = T::SdpToConfig(format);
|
||||
static_assert(std::is_same<decltype(opt_config),
|
||||
absl::optional<typename T::Config>>::value,
|
||||
"T::SdpToConfig() must return a value of type "
|
||||
"absl::optional<T::Config>");
|
||||
return opt_config ? absl::optional<AudioCodecInfo>(
|
||||
T::QueryAudioEncoder(*opt_config))
|
||||
: Helper<Ts...>::QueryAudioEncoder(format);
|
||||
}
|
||||
static std::unique_ptr<AudioEncoder> MakeAudioEncoder(
|
||||
int payload_type,
|
||||
const SdpAudioFormat& format,
|
||||
absl::optional<AudioCodecPairId> codec_pair_id,
|
||||
const FieldTrialsView* field_trials) {
|
||||
auto opt_config = T::SdpToConfig(format);
|
||||
if (opt_config) {
|
||||
return T::MakeAudioEncoder(*opt_config, payload_type, codec_pair_id);
|
||||
} else {
|
||||
return Helper<Ts...>::MakeAudioEncoder(payload_type, format,
|
||||
codec_pair_id, field_trials);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template <typename... Ts>
|
||||
class AudioEncoderFactoryT : public AudioEncoderFactory {
|
||||
public:
|
||||
explicit AudioEncoderFactoryT(const FieldTrialsView* field_trials) {
|
||||
field_trials_ = field_trials;
|
||||
}
|
||||
|
||||
std::vector<AudioCodecSpec> GetSupportedEncoders() override {
|
||||
std::vector<AudioCodecSpec> specs;
|
||||
Helper<Ts...>::AppendSupportedEncoders(&specs);
|
||||
return specs;
|
||||
}
|
||||
|
||||
absl::optional<AudioCodecInfo> QueryAudioEncoder(
|
||||
const SdpAudioFormat& format) override {
|
||||
return Helper<Ts...>::QueryAudioEncoder(format);
|
||||
}
|
||||
|
||||
std::unique_ptr<AudioEncoder> MakeAudioEncoder(
|
||||
int payload_type,
|
||||
const SdpAudioFormat& format,
|
||||
absl::optional<AudioCodecPairId> codec_pair_id) override {
|
||||
return Helper<Ts...>::MakeAudioEncoder(payload_type, format, codec_pair_id,
|
||||
field_trials_);
|
||||
}
|
||||
|
||||
const FieldTrialsView* field_trials_;
|
||||
};
|
||||
|
||||
} // namespace audio_encoder_factory_template_impl
|
||||
|
||||
// Make an AudioEncoderFactory that can create instances of the given encoders.
|
||||
//
|
||||
// Each encoder type is given as a template argument to the function; it should
|
||||
// be a struct with the following static member functions:
|
||||
//
|
||||
// // Converts `audio_format` to a ConfigType instance. Returns an empty
|
||||
// // optional if `audio_format` doesn't correctly specify an encoder of our
|
||||
// // type.
|
||||
// absl::optional<ConfigType> SdpToConfig(const SdpAudioFormat& audio_format);
|
||||
//
|
||||
// // Appends zero or more AudioCodecSpecs to the list that will be returned
|
||||
// // by AudioEncoderFactory::GetSupportedEncoders().
|
||||
// void AppendSupportedEncoders(std::vector<AudioCodecSpec>* specs);
|
||||
//
|
||||
// // Returns information about how this format would be encoded. Used to
|
||||
// // implement AudioEncoderFactory::QueryAudioEncoder().
|
||||
// AudioCodecInfo QueryAudioEncoder(const ConfigType& config);
|
||||
//
|
||||
// // Creates an AudioEncoder for the specified format. Used to implement
|
||||
// // AudioEncoderFactory::MakeAudioEncoder().
|
||||
// std::unique_ptr<AudioDecoder> MakeAudioEncoder(
|
||||
// const ConfigType& config,
|
||||
// int payload_type,
|
||||
// absl::optional<AudioCodecPairId> codec_pair_id);
|
||||
//
|
||||
// ConfigType should be a type that encapsulates all the settings needed to
|
||||
// create an AudioEncoder. T::Config (where T is the encoder struct) should
|
||||
// either be the config type, or an alias for it.
|
||||
//
|
||||
// Whenever it tries to do something, the new factory will try each of the
|
||||
// encoders in the order they were specified in the template argument list,
|
||||
// stopping at the first one that claims to be able to do the job.
|
||||
//
|
||||
// TODO(kwiberg): Point at CreateBuiltinAudioEncoderFactory() for an example of
|
||||
// how it is used.
|
||||
template <typename... Ts>
|
||||
rtc::scoped_refptr<AudioEncoderFactory> CreateAudioEncoderFactory(
|
||||
const FieldTrialsView* field_trials = nullptr) {
|
||||
// There's no technical reason we couldn't allow zero template parameters,
|
||||
// but such a factory couldn't create any encoders, and callers can do this
|
||||
// by mistake by simply forgetting the <> altogether. So we forbid it in
|
||||
// order to prevent caller foot-shooting.
|
||||
static_assert(sizeof...(Ts) >= 1,
|
||||
"Caller must give at least one template parameter");
|
||||
|
||||
return rtc::make_ref_counted<
|
||||
audio_encoder_factory_template_impl::AudioEncoderFactoryT<Ts...>>(
|
||||
field_trials);
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // API_AUDIO_CODECS_AUDIO_ENCODER_FACTORY_TEMPLATE_H_
|
||||
|
|
@ -0,0 +1,86 @@
|
|||
/*
|
||||
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "api/audio_codecs/audio_format.h"
|
||||
|
||||
#include <utility>
|
||||
|
||||
#include "absl/strings/match.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
SdpAudioFormat::SdpAudioFormat(const SdpAudioFormat&) = default;
|
||||
SdpAudioFormat::SdpAudioFormat(SdpAudioFormat&&) = default;
|
||||
|
||||
SdpAudioFormat::SdpAudioFormat(absl::string_view name,
|
||||
int clockrate_hz,
|
||||
size_t num_channels)
|
||||
: name(name), clockrate_hz(clockrate_hz), num_channels(num_channels) {}
|
||||
|
||||
SdpAudioFormat::SdpAudioFormat(absl::string_view name,
|
||||
int clockrate_hz,
|
||||
size_t num_channels,
|
||||
const CodecParameterMap& param)
|
||||
: name(name),
|
||||
clockrate_hz(clockrate_hz),
|
||||
num_channels(num_channels),
|
||||
parameters(param) {}
|
||||
|
||||
SdpAudioFormat::SdpAudioFormat(absl::string_view name,
|
||||
int clockrate_hz,
|
||||
size_t num_channels,
|
||||
CodecParameterMap&& param)
|
||||
: name(name),
|
||||
clockrate_hz(clockrate_hz),
|
||||
num_channels(num_channels),
|
||||
parameters(std::move(param)) {}
|
||||
|
||||
bool SdpAudioFormat::Matches(const SdpAudioFormat& o) const {
|
||||
return absl::EqualsIgnoreCase(name, o.name) &&
|
||||
clockrate_hz == o.clockrate_hz && num_channels == o.num_channels;
|
||||
}
|
||||
|
||||
SdpAudioFormat::~SdpAudioFormat() = default;
|
||||
SdpAudioFormat& SdpAudioFormat::operator=(const SdpAudioFormat&) = default;
|
||||
SdpAudioFormat& SdpAudioFormat::operator=(SdpAudioFormat&&) = default;
|
||||
|
||||
bool operator==(const SdpAudioFormat& a, const SdpAudioFormat& b) {
|
||||
return absl::EqualsIgnoreCase(a.name, b.name) &&
|
||||
a.clockrate_hz == b.clockrate_hz && a.num_channels == b.num_channels &&
|
||||
a.parameters == b.parameters;
|
||||
}
|
||||
|
||||
AudioCodecInfo::AudioCodecInfo(int sample_rate_hz,
|
||||
size_t num_channels,
|
||||
int bitrate_bps)
|
||||
: AudioCodecInfo(sample_rate_hz,
|
||||
num_channels,
|
||||
bitrate_bps,
|
||||
bitrate_bps,
|
||||
bitrate_bps) {}
|
||||
|
||||
AudioCodecInfo::AudioCodecInfo(int sample_rate_hz,
|
||||
size_t num_channels,
|
||||
int default_bitrate_bps,
|
||||
int min_bitrate_bps,
|
||||
int max_bitrate_bps)
|
||||
: sample_rate_hz(sample_rate_hz),
|
||||
num_channels(num_channels),
|
||||
default_bitrate_bps(default_bitrate_bps),
|
||||
min_bitrate_bps(min_bitrate_bps),
|
||||
max_bitrate_bps(max_bitrate_bps) {
|
||||
RTC_DCHECK_GT(sample_rate_hz, 0);
|
||||
RTC_DCHECK_GT(num_channels, 0);
|
||||
RTC_DCHECK_GE(min_bitrate_bps, 0);
|
||||
RTC_DCHECK_LE(min_bitrate_bps, default_bitrate_bps);
|
||||
RTC_DCHECK_GE(max_bitrate_bps, default_bitrate_bps);
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
135
TMessagesProj/jni/voip/webrtc/api/audio_codecs/audio_format.h
Normal file
135
TMessagesProj/jni/voip/webrtc/api/audio_codecs/audio_format.h
Normal file
|
|
@ -0,0 +1,135 @@
|
|||
/*
|
||||
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef API_AUDIO_CODECS_AUDIO_FORMAT_H_
|
||||
#define API_AUDIO_CODECS_AUDIO_FORMAT_H_
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
#include <map>
|
||||
#include <string>
|
||||
|
||||
#include "absl/strings/string_view.h"
|
||||
#include "api/rtp_parameters.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/system/rtc_export.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// SDP specification for a single audio codec.
|
||||
struct RTC_EXPORT SdpAudioFormat {
|
||||
using Parameters [[deprecated(("Use webrtc::CodecParameterMap"))]] =
|
||||
std::map<std::string, std::string>;
|
||||
|
||||
SdpAudioFormat(const SdpAudioFormat&);
|
||||
SdpAudioFormat(SdpAudioFormat&&);
|
||||
SdpAudioFormat(absl::string_view name, int clockrate_hz, size_t num_channels);
|
||||
SdpAudioFormat(absl::string_view name,
|
||||
int clockrate_hz,
|
||||
size_t num_channels,
|
||||
const CodecParameterMap& param);
|
||||
SdpAudioFormat(absl::string_view name,
|
||||
int clockrate_hz,
|
||||
size_t num_channels,
|
||||
CodecParameterMap&& param);
|
||||
~SdpAudioFormat();
|
||||
|
||||
// Returns true if this format is compatible with `o`. In SDP terminology:
|
||||
// would it represent the same codec between an offer and an answer? As
|
||||
// opposed to operator==, this method disregards codec parameters.
|
||||
bool Matches(const SdpAudioFormat& o) const;
|
||||
|
||||
SdpAudioFormat& operator=(const SdpAudioFormat&);
|
||||
SdpAudioFormat& operator=(SdpAudioFormat&&);
|
||||
|
||||
friend bool operator==(const SdpAudioFormat& a, const SdpAudioFormat& b);
|
||||
friend bool operator!=(const SdpAudioFormat& a, const SdpAudioFormat& b) {
|
||||
return !(a == b);
|
||||
}
|
||||
|
||||
std::string name;
|
||||
int clockrate_hz;
|
||||
size_t num_channels;
|
||||
CodecParameterMap parameters;
|
||||
};
|
||||
|
||||
// Information about how an audio format is treated by the codec implementation.
|
||||
// Contains basic information, such as sample rate and number of channels, which
|
||||
// isn't uniformly presented by SDP. Also contains flags indicating support for
|
||||
// integrating with other parts of WebRTC, like external VAD and comfort noise
|
||||
// level calculation.
|
||||
//
|
||||
// To avoid API breakage, and make the code clearer, AudioCodecInfo should not
|
||||
// be directly initializable with any flags indicating optional support. If it
|
||||
// were, these initializers would break any time a new flag was added. It's also
|
||||
// more difficult to understand:
|
||||
// AudioCodecInfo info{16000, 1, 32000, true, false, false, true, true};
|
||||
// than
|
||||
// AudioCodecInfo info(16000, 1, 32000);
|
||||
// info.allow_comfort_noise = true;
|
||||
// info.future_flag_b = true;
|
||||
// info.future_flag_c = true;
|
||||
struct AudioCodecInfo {
|
||||
AudioCodecInfo(int sample_rate_hz, size_t num_channels, int bitrate_bps);
|
||||
AudioCodecInfo(int sample_rate_hz,
|
||||
size_t num_channels,
|
||||
int default_bitrate_bps,
|
||||
int min_bitrate_bps,
|
||||
int max_bitrate_bps);
|
||||
AudioCodecInfo(const AudioCodecInfo& b) = default;
|
||||
~AudioCodecInfo() = default;
|
||||
|
||||
bool operator==(const AudioCodecInfo& b) const {
|
||||
return sample_rate_hz == b.sample_rate_hz &&
|
||||
num_channels == b.num_channels &&
|
||||
default_bitrate_bps == b.default_bitrate_bps &&
|
||||
min_bitrate_bps == b.min_bitrate_bps &&
|
||||
max_bitrate_bps == b.max_bitrate_bps &&
|
||||
allow_comfort_noise == b.allow_comfort_noise &&
|
||||
supports_network_adaption == b.supports_network_adaption;
|
||||
}
|
||||
|
||||
bool operator!=(const AudioCodecInfo& b) const { return !(*this == b); }
|
||||
|
||||
bool HasFixedBitrate() const {
|
||||
RTC_DCHECK_GE(min_bitrate_bps, 0);
|
||||
RTC_DCHECK_LE(min_bitrate_bps, default_bitrate_bps);
|
||||
RTC_DCHECK_GE(max_bitrate_bps, default_bitrate_bps);
|
||||
return min_bitrate_bps == max_bitrate_bps;
|
||||
}
|
||||
|
||||
int sample_rate_hz;
|
||||
size_t num_channels;
|
||||
int default_bitrate_bps;
|
||||
int min_bitrate_bps;
|
||||
int max_bitrate_bps;
|
||||
|
||||
bool allow_comfort_noise = true; // This codec can be used with an external
|
||||
// comfort noise generator.
|
||||
bool supports_network_adaption = false; // This codec can adapt to varying
|
||||
// network conditions.
|
||||
};
|
||||
|
||||
// AudioCodecSpec ties an audio format to specific information about the codec
|
||||
// and its implementation.
|
||||
struct AudioCodecSpec {
|
||||
bool operator==(const AudioCodecSpec& b) const {
|
||||
return format == b.format && info == b.info;
|
||||
}
|
||||
|
||||
bool operator!=(const AudioCodecSpec& b) const { return !(*this == b); }
|
||||
|
||||
SdpAudioFormat format;
|
||||
AudioCodecInfo info;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // API_AUDIO_CODECS_AUDIO_FORMAT_H_
|
||||
|
|
@ -0,0 +1,68 @@
|
|||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "api/audio_codecs/builtin_audio_decoder_factory.h"
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "api/audio_codecs/L16/audio_decoder_L16.h"
|
||||
#include "api/audio_codecs/audio_decoder_factory_template.h"
|
||||
#include "api/audio_codecs/g711/audio_decoder_g711.h"
|
||||
#include "api/audio_codecs/g722/audio_decoder_g722.h"
|
||||
#if WEBRTC_USE_BUILTIN_ILBC
|
||||
#include "api/audio_codecs/ilbc/audio_decoder_ilbc.h" // nogncheck
|
||||
#endif
|
||||
#if WEBRTC_USE_BUILTIN_OPUS
|
||||
#include "api/audio_codecs/opus/audio_decoder_multi_channel_opus.h"
|
||||
#include "api/audio_codecs/opus/audio_decoder_opus.h" // nogncheck
|
||||
#endif
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace {
|
||||
|
||||
// Modify an audio decoder to not advertise support for anything.
|
||||
template <typename T>
|
||||
struct NotAdvertised {
|
||||
using Config = typename T::Config;
|
||||
static absl::optional<Config> SdpToConfig(
|
||||
const SdpAudioFormat& audio_format) {
|
||||
return T::SdpToConfig(audio_format);
|
||||
}
|
||||
static void AppendSupportedDecoders(std::vector<AudioCodecSpec>* specs) {
|
||||
// Don't advertise support for anything.
|
||||
}
|
||||
static std::unique_ptr<AudioDecoder> MakeAudioDecoder(
|
||||
const Config& config,
|
||||
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt) {
|
||||
return T::MakeAudioDecoder(config, codec_pair_id);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
rtc::scoped_refptr<AudioDecoderFactory> CreateBuiltinAudioDecoderFactory() {
|
||||
return CreateAudioDecoderFactory<
|
||||
|
||||
#if WEBRTC_USE_BUILTIN_OPUS
|
||||
AudioDecoderOpus, NotAdvertised<AudioDecoderMultiChannelOpus>,
|
||||
#endif
|
||||
|
||||
AudioDecoderG722,
|
||||
|
||||
#if WEBRTC_USE_BUILTIN_ILBC
|
||||
AudioDecoderIlbc,
|
||||
#endif
|
||||
|
||||
AudioDecoderG711, NotAdvertised<AudioDecoderL16>>();
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,28 @@
|
|||
/*
|
||||
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef API_AUDIO_CODECS_BUILTIN_AUDIO_DECODER_FACTORY_H_
|
||||
#define API_AUDIO_CODECS_BUILTIN_AUDIO_DECODER_FACTORY_H_
|
||||
|
||||
#include "api/audio_codecs/audio_decoder_factory.h"
|
||||
#include "api/scoped_refptr.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// Creates a new factory that can create the built-in types of audio decoders.
|
||||
// Note: This will link with all the code implementing those codecs, so if you
|
||||
// only need a subset of the codecs, consider using
|
||||
// CreateAudioDecoderFactory<...codecs listed here...>() or
|
||||
// CreateOpusAudioDecoderFactory() instead.
|
||||
rtc::scoped_refptr<AudioDecoderFactory> CreateBuiltinAudioDecoderFactory();
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // API_AUDIO_CODECS_BUILTIN_AUDIO_DECODER_FACTORY_H_
|
||||
|
|
@ -0,0 +1,74 @@
|
|||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "api/audio_codecs/builtin_audio_encoder_factory.h"
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "api/audio_codecs/L16/audio_encoder_L16.h"
|
||||
#include "api/audio_codecs/audio_encoder_factory_template.h"
|
||||
#include "api/audio_codecs/g711/audio_encoder_g711.h"
|
||||
#include "api/audio_codecs/g722/audio_encoder_g722.h"
|
||||
#if WEBRTC_USE_BUILTIN_ILBC
|
||||
#include "api/audio_codecs/ilbc/audio_encoder_ilbc.h" // nogncheck
|
||||
#endif
|
||||
#if WEBRTC_USE_BUILTIN_OPUS
|
||||
#include "api/audio_codecs/opus/audio_encoder_multi_channel_opus.h"
|
||||
#include "api/audio_codecs/opus/audio_encoder_opus.h" // nogncheck
|
||||
#endif
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace {
|
||||
|
||||
// Modify an audio encoder to not advertise support for anything.
|
||||
template <typename T>
|
||||
struct NotAdvertised {
|
||||
using Config = typename T::Config;
|
||||
static absl::optional<Config> SdpToConfig(
|
||||
const SdpAudioFormat& audio_format) {
|
||||
return T::SdpToConfig(audio_format);
|
||||
}
|
||||
static void AppendSupportedEncoders(std::vector<AudioCodecSpec>* specs) {
|
||||
// Don't advertise support for anything.
|
||||
}
|
||||
static AudioCodecInfo QueryAudioEncoder(const Config& config) {
|
||||
return T::QueryAudioEncoder(config);
|
||||
}
|
||||
static std::unique_ptr<AudioEncoder> MakeAudioEncoder(
|
||||
const Config& config,
|
||||
int payload_type,
|
||||
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt,
|
||||
const FieldTrialsView* field_trials = nullptr) {
|
||||
return T::MakeAudioEncoder(config, payload_type, codec_pair_id,
|
||||
field_trials);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
rtc::scoped_refptr<AudioEncoderFactory> CreateBuiltinAudioEncoderFactory() {
|
||||
return CreateAudioEncoderFactory<
|
||||
|
||||
#if WEBRTC_USE_BUILTIN_OPUS
|
||||
AudioEncoderOpus, NotAdvertised<AudioEncoderMultiChannelOpus>,
|
||||
#endif
|
||||
|
||||
AudioEncoderG722,
|
||||
|
||||
#if WEBRTC_USE_BUILTIN_ILBC
|
||||
AudioEncoderIlbc,
|
||||
#endif
|
||||
|
||||
AudioEncoderG711, NotAdvertised<AudioEncoderL16>>();
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,28 @@
|
|||
/*
|
||||
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef API_AUDIO_CODECS_BUILTIN_AUDIO_ENCODER_FACTORY_H_
|
||||
#define API_AUDIO_CODECS_BUILTIN_AUDIO_ENCODER_FACTORY_H_
|
||||
|
||||
#include "api/audio_codecs/audio_encoder_factory.h"
|
||||
#include "api/scoped_refptr.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// Creates a new factory that can create the built-in types of audio encoders.
|
||||
// Note: This will link with all the code implementing those codecs, so if you
|
||||
// only need a subset of the codecs, consider using
|
||||
// CreateAudioEncoderFactory<...codecs listed here...>() or
|
||||
// CreateOpusAudioEncoderFactory() instead.
|
||||
rtc::scoped_refptr<AudioEncoderFactory> CreateBuiltinAudioEncoderFactory();
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // API_AUDIO_CODECS_BUILTIN_AUDIO_ENCODER_FACTORY_H_
|
||||
55
TMessagesProj/jni/voip/webrtc/api/audio_codecs/g711/BUILD.gn
Normal file
55
TMessagesProj/jni/voip/webrtc/api/audio_codecs/g711/BUILD.gn
Normal file
|
|
@ -0,0 +1,55 @@
|
|||
# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
#
|
||||
# Use of this source code is governed by a BSD-style license
|
||||
# that can be found in the LICENSE file in the root of the source
|
||||
# tree. An additional intellectual property rights grant can be found
|
||||
# in the file PATENTS. All contributing project authors may
|
||||
# be found in the AUTHORS file in the root of the source tree.
|
||||
|
||||
import("../../../webrtc.gni")
|
||||
if (is_android) {
|
||||
import("//build/config/android/config.gni")
|
||||
import("//build/config/android/rules.gni")
|
||||
}
|
||||
|
||||
rtc_library("audio_encoder_g711") {
|
||||
visibility = [ "*" ]
|
||||
poisonous = [ "audio_codecs" ]
|
||||
sources = [
|
||||
"audio_encoder_g711.cc",
|
||||
"audio_encoder_g711.h",
|
||||
]
|
||||
deps = [
|
||||
"..:audio_codecs_api",
|
||||
"../../../api:field_trials_view",
|
||||
"../../../modules/audio_coding:g711",
|
||||
"../../../rtc_base:safe_conversions",
|
||||
"../../../rtc_base:safe_minmax",
|
||||
"../../../rtc_base:stringutils",
|
||||
"../../../rtc_base/system:rtc_export",
|
||||
]
|
||||
absl_deps = [
|
||||
"//third_party/abseil-cpp/absl/strings",
|
||||
"//third_party/abseil-cpp/absl/types:optional",
|
||||
]
|
||||
}
|
||||
|
||||
rtc_library("audio_decoder_g711") {
|
||||
visibility = [ "*" ]
|
||||
poisonous = [ "audio_codecs" ]
|
||||
sources = [
|
||||
"audio_decoder_g711.cc",
|
||||
"audio_decoder_g711.h",
|
||||
]
|
||||
deps = [
|
||||
"..:audio_codecs_api",
|
||||
"../../../api:field_trials_view",
|
||||
"../../../modules/audio_coding:g711",
|
||||
"../../../rtc_base:safe_conversions",
|
||||
"../../../rtc_base/system:rtc_export",
|
||||
]
|
||||
absl_deps = [
|
||||
"//third_party/abseil-cpp/absl/strings",
|
||||
"//third_party/abseil-cpp/absl/types:optional",
|
||||
]
|
||||
}
|
||||
|
|
@ -0,0 +1,67 @@
|
|||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "api/audio_codecs/g711/audio_decoder_g711.h"
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/strings/match.h"
|
||||
#include "modules/audio_coding/codecs/g711/audio_decoder_pcm.h"
|
||||
#include "rtc_base/numerics/safe_conversions.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
absl::optional<AudioDecoderG711::Config> AudioDecoderG711::SdpToConfig(
|
||||
const SdpAudioFormat& format) {
|
||||
const bool is_pcmu = absl::EqualsIgnoreCase(format.name, "PCMU");
|
||||
const bool is_pcma = absl::EqualsIgnoreCase(format.name, "PCMA");
|
||||
if (format.clockrate_hz == 8000 && format.num_channels >= 1 &&
|
||||
(is_pcmu || is_pcma)) {
|
||||
Config config;
|
||||
config.type = is_pcmu ? Config::Type::kPcmU : Config::Type::kPcmA;
|
||||
config.num_channels = rtc::dchecked_cast<int>(format.num_channels);
|
||||
if (!config.IsOk()) {
|
||||
RTC_DCHECK_NOTREACHED();
|
||||
return absl::nullopt;
|
||||
}
|
||||
return config;
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
}
|
||||
|
||||
void AudioDecoderG711::AppendSupportedDecoders(
|
||||
std::vector<AudioCodecSpec>* specs) {
|
||||
for (const char* type : {"PCMU", "PCMA"}) {
|
||||
specs->push_back({{type, 8000, 1}, {8000, 1, 64000}});
|
||||
}
|
||||
}
|
||||
|
||||
std::unique_ptr<AudioDecoder> AudioDecoderG711::MakeAudioDecoder(
|
||||
const Config& config,
|
||||
absl::optional<AudioCodecPairId> /*codec_pair_id*/,
|
||||
const FieldTrialsView* field_trials) {
|
||||
if (!config.IsOk()) {
|
||||
RTC_DCHECK_NOTREACHED();
|
||||
return nullptr;
|
||||
}
|
||||
switch (config.type) {
|
||||
case Config::Type::kPcmU:
|
||||
return std::make_unique<AudioDecoderPcmU>(config.num_channels);
|
||||
case Config::Type::kPcmA:
|
||||
return std::make_unique<AudioDecoderPcmA>(config.num_channels);
|
||||
default:
|
||||
RTC_DCHECK_NOTREACHED();
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,49 @@
|
|||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef API_AUDIO_CODECS_G711_AUDIO_DECODER_G711_H_
|
||||
#define API_AUDIO_CODECS_G711_AUDIO_DECODER_G711_H_
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "api/audio_codecs/audio_codec_pair_id.h"
|
||||
#include "api/audio_codecs/audio_decoder.h"
|
||||
#include "api/audio_codecs/audio_format.h"
|
||||
#include "api/field_trials_view.h"
|
||||
#include "rtc_base/system/rtc_export.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// G711 decoder API for use as a template parameter to
|
||||
// CreateAudioDecoderFactory<...>().
|
||||
struct RTC_EXPORT AudioDecoderG711 {
|
||||
struct Config {
|
||||
enum class Type { kPcmU, kPcmA };
|
||||
bool IsOk() const {
|
||||
return (type == Type::kPcmU || type == Type::kPcmA) &&
|
||||
num_channels >= 1 &&
|
||||
num_channels <= AudioDecoder::kMaxNumberOfChannels;
|
||||
}
|
||||
Type type;
|
||||
int num_channels;
|
||||
};
|
||||
static absl::optional<Config> SdpToConfig(const SdpAudioFormat& audio_format);
|
||||
static void AppendSupportedDecoders(std::vector<AudioCodecSpec>* specs);
|
||||
static std::unique_ptr<AudioDecoder> MakeAudioDecoder(
|
||||
const Config& config,
|
||||
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt,
|
||||
const FieldTrialsView* field_trials = nullptr);
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // API_AUDIO_CODECS_G711_AUDIO_DECODER_G711_H_
|
||||
|
|
@ -0,0 +1,95 @@
|
|||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "api/audio_codecs/g711/audio_encoder_g711.h"
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/strings/match.h"
|
||||
#include "modules/audio_coding/codecs/g711/audio_encoder_pcm.h"
|
||||
#include "rtc_base/numerics/safe_conversions.h"
|
||||
#include "rtc_base/numerics/safe_minmax.h"
|
||||
#include "rtc_base/string_to_number.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
absl::optional<AudioEncoderG711::Config> AudioEncoderG711::SdpToConfig(
|
||||
const SdpAudioFormat& format) {
|
||||
const bool is_pcmu = absl::EqualsIgnoreCase(format.name, "PCMU");
|
||||
const bool is_pcma = absl::EqualsIgnoreCase(format.name, "PCMA");
|
||||
if (format.clockrate_hz == 8000 && format.num_channels >= 1 &&
|
||||
(is_pcmu || is_pcma)) {
|
||||
Config config;
|
||||
config.type = is_pcmu ? Config::Type::kPcmU : Config::Type::kPcmA;
|
||||
config.num_channels = rtc::dchecked_cast<int>(format.num_channels);
|
||||
config.frame_size_ms = 20;
|
||||
auto ptime_iter = format.parameters.find("ptime");
|
||||
if (ptime_iter != format.parameters.end()) {
|
||||
const auto ptime = rtc::StringToNumber<int>(ptime_iter->second);
|
||||
if (ptime && *ptime > 0) {
|
||||
config.frame_size_ms = rtc::SafeClamp(10 * (*ptime / 10), 10, 60);
|
||||
}
|
||||
}
|
||||
if (!config.IsOk()) {
|
||||
RTC_DCHECK_NOTREACHED();
|
||||
return absl::nullopt;
|
||||
}
|
||||
return config;
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
}
|
||||
|
||||
void AudioEncoderG711::AppendSupportedEncoders(
|
||||
std::vector<AudioCodecSpec>* specs) {
|
||||
for (const char* type : {"PCMU", "PCMA"}) {
|
||||
specs->push_back({{type, 8000, 1}, {8000, 1, 64000}});
|
||||
}
|
||||
}
|
||||
|
||||
AudioCodecInfo AudioEncoderG711::QueryAudioEncoder(const Config& config) {
|
||||
RTC_DCHECK(config.IsOk());
|
||||
return {8000, rtc::dchecked_cast<size_t>(config.num_channels),
|
||||
64000 * config.num_channels};
|
||||
}
|
||||
|
||||
std::unique_ptr<AudioEncoder> AudioEncoderG711::MakeAudioEncoder(
|
||||
const Config& config,
|
||||
int payload_type,
|
||||
absl::optional<AudioCodecPairId> /*codec_pair_id*/,
|
||||
const FieldTrialsView* field_trials) {
|
||||
if (!config.IsOk()) {
|
||||
RTC_DCHECK_NOTREACHED();
|
||||
return nullptr;
|
||||
}
|
||||
switch (config.type) {
|
||||
case Config::Type::kPcmU: {
|
||||
AudioEncoderPcmU::Config impl_config;
|
||||
impl_config.num_channels = config.num_channels;
|
||||
impl_config.frame_size_ms = config.frame_size_ms;
|
||||
impl_config.payload_type = payload_type;
|
||||
return std::make_unique<AudioEncoderPcmU>(impl_config);
|
||||
}
|
||||
case Config::Type::kPcmA: {
|
||||
AudioEncoderPcmA::Config impl_config;
|
||||
impl_config.num_channels = config.num_channels;
|
||||
impl_config.frame_size_ms = config.frame_size_ms;
|
||||
impl_config.payload_type = payload_type;
|
||||
return std::make_unique<AudioEncoderPcmA>(impl_config);
|
||||
}
|
||||
default: {
|
||||
RTC_DCHECK_NOTREACHED();
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,54 @@
|
|||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef API_AUDIO_CODECS_G711_AUDIO_ENCODER_G711_H_
|
||||
#define API_AUDIO_CODECS_G711_AUDIO_ENCODER_G711_H_
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "api/audio_codecs/audio_codec_pair_id.h"
|
||||
#include "api/audio_codecs/audio_encoder.h"
|
||||
#include "api/audio_codecs/audio_format.h"
|
||||
#include "api/field_trials_view.h"
|
||||
#include "rtc_base/system/rtc_export.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// G711 encoder API for use as a template parameter to
|
||||
// CreateAudioEncoderFactory<...>().
|
||||
struct RTC_EXPORT AudioEncoderG711 {
|
||||
struct Config {
|
||||
enum class Type { kPcmU, kPcmA };
|
||||
bool IsOk() const {
|
||||
return (type == Type::kPcmU || type == Type::kPcmA) &&
|
||||
frame_size_ms > 0 && frame_size_ms % 10 == 0 &&
|
||||
num_channels >= 1 &&
|
||||
num_channels <= AudioEncoder::kMaxNumberOfChannels;
|
||||
}
|
||||
Type type = Type::kPcmU;
|
||||
int num_channels = 1;
|
||||
int frame_size_ms = 20;
|
||||
};
|
||||
static absl::optional<AudioEncoderG711::Config> SdpToConfig(
|
||||
const SdpAudioFormat& audio_format);
|
||||
static void AppendSupportedEncoders(std::vector<AudioCodecSpec>* specs);
|
||||
static AudioCodecInfo QueryAudioEncoder(const Config& config);
|
||||
static std::unique_ptr<AudioEncoder> MakeAudioEncoder(
|
||||
const Config& config,
|
||||
int payload_type,
|
||||
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt,
|
||||
const FieldTrialsView* field_trials = nullptr);
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // API_AUDIO_CODECS_G711_AUDIO_ENCODER_G711_H_
|
||||
62
TMessagesProj/jni/voip/webrtc/api/audio_codecs/g722/BUILD.gn
Normal file
62
TMessagesProj/jni/voip/webrtc/api/audio_codecs/g722/BUILD.gn
Normal file
|
|
@ -0,0 +1,62 @@
|
|||
# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
#
|
||||
# Use of this source code is governed by a BSD-style license
|
||||
# that can be found in the LICENSE file in the root of the source
|
||||
# tree. An additional intellectual property rights grant can be found
|
||||
# in the file PATENTS. All contributing project authors may
|
||||
# be found in the AUTHORS file in the root of the source tree.
|
||||
|
||||
import("../../../webrtc.gni")
|
||||
if (is_android) {
|
||||
import("//build/config/android/config.gni")
|
||||
import("//build/config/android/rules.gni")
|
||||
}
|
||||
|
||||
rtc_source_set("audio_encoder_g722_config") {
|
||||
visibility = [ "*" ]
|
||||
sources = [ "audio_encoder_g722_config.h" ]
|
||||
deps = [ "..:audio_codecs_api" ]
|
||||
}
|
||||
|
||||
rtc_library("audio_encoder_g722") {
|
||||
visibility = [ "*" ]
|
||||
poisonous = [ "audio_codecs" ]
|
||||
sources = [
|
||||
"audio_encoder_g722.cc",
|
||||
"audio_encoder_g722.h",
|
||||
]
|
||||
deps = [
|
||||
":audio_encoder_g722_config",
|
||||
"..:audio_codecs_api",
|
||||
"../../../api:field_trials_view",
|
||||
"../../../modules/audio_coding:g722",
|
||||
"../../../rtc_base:safe_conversions",
|
||||
"../../../rtc_base:safe_minmax",
|
||||
"../../../rtc_base:stringutils",
|
||||
"../../../rtc_base/system:rtc_export",
|
||||
]
|
||||
absl_deps = [
|
||||
"//third_party/abseil-cpp/absl/strings",
|
||||
"//third_party/abseil-cpp/absl/types:optional",
|
||||
]
|
||||
}
|
||||
|
||||
rtc_library("audio_decoder_g722") {
|
||||
visibility = [ "*" ]
|
||||
poisonous = [ "audio_codecs" ]
|
||||
sources = [
|
||||
"audio_decoder_g722.cc",
|
||||
"audio_decoder_g722.h",
|
||||
]
|
||||
deps = [
|
||||
"..:audio_codecs_api",
|
||||
"../../../api:field_trials_view",
|
||||
"../../../modules/audio_coding:g722",
|
||||
"../../../rtc_base:safe_conversions",
|
||||
"../../../rtc_base/system:rtc_export",
|
||||
]
|
||||
absl_deps = [
|
||||
"//third_party/abseil-cpp/absl/strings",
|
||||
"//third_party/abseil-cpp/absl/types:optional",
|
||||
]
|
||||
}
|
||||
|
|
@ -0,0 +1,56 @@
|
|||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "api/audio_codecs/g722/audio_decoder_g722.h"
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/strings/match.h"
|
||||
#include "modules/audio_coding/codecs/g722/audio_decoder_g722.h"
|
||||
#include "rtc_base/numerics/safe_conversions.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
absl::optional<AudioDecoderG722::Config> AudioDecoderG722::SdpToConfig(
|
||||
const SdpAudioFormat& format) {
|
||||
if (absl::EqualsIgnoreCase(format.name, "G722") &&
|
||||
format.clockrate_hz == 8000 &&
|
||||
(format.num_channels == 1 || format.num_channels == 2)) {
|
||||
return Config{rtc::dchecked_cast<int>(format.num_channels)};
|
||||
}
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
void AudioDecoderG722::AppendSupportedDecoders(
|
||||
std::vector<AudioCodecSpec>* specs) {
|
||||
specs->push_back({{"G722", 8000, 1}, {16000, 1, 64000}});
|
||||
}
|
||||
|
||||
std::unique_ptr<AudioDecoder> AudioDecoderG722::MakeAudioDecoder(
|
||||
Config config,
|
||||
absl::optional<AudioCodecPairId> /*codec_pair_id*/,
|
||||
const FieldTrialsView* field_trials) {
|
||||
if (!config.IsOk()) {
|
||||
RTC_DCHECK_NOTREACHED();
|
||||
return nullptr;
|
||||
}
|
||||
switch (config.num_channels) {
|
||||
case 1:
|
||||
return std::make_unique<AudioDecoderG722Impl>();
|
||||
case 2:
|
||||
return std::make_unique<AudioDecoderG722StereoImpl>();
|
||||
default:
|
||||
RTC_DCHECK_NOTREACHED();
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,43 @@
|
|||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef API_AUDIO_CODECS_G722_AUDIO_DECODER_G722_H_
|
||||
#define API_AUDIO_CODECS_G722_AUDIO_DECODER_G722_H_
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "api/audio_codecs/audio_codec_pair_id.h"
|
||||
#include "api/audio_codecs/audio_decoder.h"
|
||||
#include "api/audio_codecs/audio_format.h"
|
||||
#include "api/field_trials_view.h"
|
||||
#include "rtc_base/system/rtc_export.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// G722 decoder API for use as a template parameter to
|
||||
// CreateAudioDecoderFactory<...>().
|
||||
struct RTC_EXPORT AudioDecoderG722 {
|
||||
struct Config {
|
||||
bool IsOk() const { return num_channels == 1 || num_channels == 2; }
|
||||
int num_channels;
|
||||
};
|
||||
static absl::optional<Config> SdpToConfig(const SdpAudioFormat& audio_format);
|
||||
static void AppendSupportedDecoders(std::vector<AudioCodecSpec>* specs);
|
||||
static std::unique_ptr<AudioDecoder> MakeAudioDecoder(
|
||||
Config config,
|
||||
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt,
|
||||
const FieldTrialsView* field_trials = nullptr);
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // API_AUDIO_CODECS_G722_AUDIO_DECODER_G722_H_
|
||||
|
|
@ -0,0 +1,74 @@
|
|||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "api/audio_codecs/g722/audio_encoder_g722.h"
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/strings/match.h"
|
||||
#include "modules/audio_coding/codecs/g722/audio_encoder_g722.h"
|
||||
#include "rtc_base/numerics/safe_conversions.h"
|
||||
#include "rtc_base/numerics/safe_minmax.h"
|
||||
#include "rtc_base/string_to_number.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
absl::optional<AudioEncoderG722Config> AudioEncoderG722::SdpToConfig(
|
||||
const SdpAudioFormat& format) {
|
||||
if (!absl::EqualsIgnoreCase(format.name, "g722") ||
|
||||
format.clockrate_hz != 8000) {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
AudioEncoderG722Config config;
|
||||
config.num_channels = rtc::checked_cast<int>(format.num_channels);
|
||||
auto ptime_iter = format.parameters.find("ptime");
|
||||
if (ptime_iter != format.parameters.end()) {
|
||||
auto ptime = rtc::StringToNumber<int>(ptime_iter->second);
|
||||
if (ptime && *ptime > 0) {
|
||||
const int whole_packets = *ptime / 10;
|
||||
config.frame_size_ms = rtc::SafeClamp<int>(whole_packets * 10, 10, 60);
|
||||
}
|
||||
}
|
||||
if (!config.IsOk()) {
|
||||
RTC_DCHECK_NOTREACHED();
|
||||
return absl::nullopt;
|
||||
}
|
||||
return config;
|
||||
}
|
||||
|
||||
void AudioEncoderG722::AppendSupportedEncoders(
|
||||
std::vector<AudioCodecSpec>* specs) {
|
||||
const SdpAudioFormat fmt = {"G722", 8000, 1};
|
||||
const AudioCodecInfo info = QueryAudioEncoder(*SdpToConfig(fmt));
|
||||
specs->push_back({fmt, info});
|
||||
}
|
||||
|
||||
AudioCodecInfo AudioEncoderG722::QueryAudioEncoder(
|
||||
const AudioEncoderG722Config& config) {
|
||||
RTC_DCHECK(config.IsOk());
|
||||
return {16000, rtc::dchecked_cast<size_t>(config.num_channels),
|
||||
64000 * config.num_channels};
|
||||
}
|
||||
|
||||
std::unique_ptr<AudioEncoder> AudioEncoderG722::MakeAudioEncoder(
|
||||
const AudioEncoderG722Config& config,
|
||||
int payload_type,
|
||||
absl::optional<AudioCodecPairId> /*codec_pair_id*/,
|
||||
const FieldTrialsView* field_trials) {
|
||||
if (!config.IsOk()) {
|
||||
RTC_DCHECK_NOTREACHED();
|
||||
return nullptr;
|
||||
}
|
||||
return std::make_unique<AudioEncoderG722Impl>(config, payload_type);
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,44 @@
|
|||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef API_AUDIO_CODECS_G722_AUDIO_ENCODER_G722_H_
|
||||
#define API_AUDIO_CODECS_G722_AUDIO_ENCODER_G722_H_
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "api/audio_codecs/audio_codec_pair_id.h"
|
||||
#include "api/audio_codecs/audio_encoder.h"
|
||||
#include "api/audio_codecs/audio_format.h"
|
||||
#include "api/audio_codecs/g722/audio_encoder_g722_config.h"
|
||||
#include "api/field_trials_view.h"
|
||||
#include "rtc_base/system/rtc_export.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// G722 encoder API for use as a template parameter to
|
||||
// CreateAudioEncoderFactory<...>().
|
||||
struct RTC_EXPORT AudioEncoderG722 {
|
||||
using Config = AudioEncoderG722Config;
|
||||
static absl::optional<AudioEncoderG722Config> SdpToConfig(
|
||||
const SdpAudioFormat& audio_format);
|
||||
static void AppendSupportedEncoders(std::vector<AudioCodecSpec>* specs);
|
||||
static AudioCodecInfo QueryAudioEncoder(const AudioEncoderG722Config& config);
|
||||
static std::unique_ptr<AudioEncoder> MakeAudioEncoder(
|
||||
const AudioEncoderG722Config& config,
|
||||
int payload_type,
|
||||
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt,
|
||||
const FieldTrialsView* field_trials = nullptr);
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // API_AUDIO_CODECS_G722_AUDIO_ENCODER_G722_H_
|
||||
|
|
@ -0,0 +1,29 @@
|
|||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef API_AUDIO_CODECS_G722_AUDIO_ENCODER_G722_CONFIG_H_
|
||||
#define API_AUDIO_CODECS_G722_AUDIO_ENCODER_G722_CONFIG_H_
|
||||
|
||||
#include "api/audio_codecs/audio_encoder.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
struct AudioEncoderG722Config {
|
||||
bool IsOk() const {
|
||||
return frame_size_ms > 0 && frame_size_ms % 10 == 0 && num_channels >= 1 &&
|
||||
num_channels <= AudioEncoder::kMaxNumberOfChannels;
|
||||
}
|
||||
int frame_size_ms = 20;
|
||||
int num_channels = 1;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // API_AUDIO_CODECS_G722_AUDIO_ENCODER_G722_CONFIG_H_
|
||||
58
TMessagesProj/jni/voip/webrtc/api/audio_codecs/ilbc/BUILD.gn
Normal file
58
TMessagesProj/jni/voip/webrtc/api/audio_codecs/ilbc/BUILD.gn
Normal file
|
|
@ -0,0 +1,58 @@
|
|||
# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
#
|
||||
# Use of this source code is governed by a BSD-style license
|
||||
# that can be found in the LICENSE file in the root of the source
|
||||
# tree. An additional intellectual property rights grant can be found
|
||||
# in the file PATENTS. All contributing project authors may
|
||||
# be found in the AUTHORS file in the root of the source tree.
|
||||
|
||||
import("../../../webrtc.gni")
|
||||
if (is_android) {
|
||||
import("//build/config/android/config.gni")
|
||||
import("//build/config/android/rules.gni")
|
||||
}
|
||||
|
||||
rtc_source_set("audio_encoder_ilbc_config") {
|
||||
visibility = [ "*" ]
|
||||
sources = [ "audio_encoder_ilbc_config.h" ]
|
||||
}
|
||||
|
||||
rtc_library("audio_encoder_ilbc") {
|
||||
visibility = [ "*" ]
|
||||
poisonous = [ "audio_codecs" ]
|
||||
sources = [
|
||||
"audio_encoder_ilbc.cc",
|
||||
"audio_encoder_ilbc.h",
|
||||
]
|
||||
deps = [
|
||||
":audio_encoder_ilbc_config",
|
||||
"..:audio_codecs_api",
|
||||
"../../../api:field_trials_view",
|
||||
"../../../modules/audio_coding:ilbc",
|
||||
"../../../rtc_base:safe_conversions",
|
||||
"../../../rtc_base:safe_minmax",
|
||||
"../../../rtc_base:stringutils",
|
||||
]
|
||||
absl_deps = [
|
||||
"//third_party/abseil-cpp/absl/strings",
|
||||
"//third_party/abseil-cpp/absl/types:optional",
|
||||
]
|
||||
}
|
||||
|
||||
rtc_library("audio_decoder_ilbc") {
|
||||
visibility = [ "*" ]
|
||||
poisonous = [ "audio_codecs" ]
|
||||
sources = [
|
||||
"audio_decoder_ilbc.cc",
|
||||
"audio_decoder_ilbc.h",
|
||||
]
|
||||
deps = [
|
||||
"..:audio_codecs_api",
|
||||
"../../../api:field_trials_view",
|
||||
"../../../modules/audio_coding:ilbc",
|
||||
]
|
||||
absl_deps = [
|
||||
"//third_party/abseil-cpp/absl/strings",
|
||||
"//third_party/abseil-cpp/absl/types:optional",
|
||||
]
|
||||
}
|
||||
|
|
@ -0,0 +1,42 @@
|
|||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "api/audio_codecs/ilbc/audio_decoder_ilbc.h"
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/strings/match.h"
|
||||
#include "modules/audio_coding/codecs/ilbc/audio_decoder_ilbc.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
absl::optional<AudioDecoderIlbc::Config> AudioDecoderIlbc::SdpToConfig(
|
||||
const SdpAudioFormat& format) {
|
||||
if (absl::EqualsIgnoreCase(format.name, "ILBC") &&
|
||||
format.clockrate_hz == 8000 && format.num_channels == 1) {
|
||||
return Config();
|
||||
}
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
void AudioDecoderIlbc::AppendSupportedDecoders(
|
||||
std::vector<AudioCodecSpec>* specs) {
|
||||
specs->push_back({{"ILBC", 8000, 1}, {8000, 1, 13300}});
|
||||
}
|
||||
|
||||
std::unique_ptr<AudioDecoder> AudioDecoderIlbc::MakeAudioDecoder(
|
||||
Config config,
|
||||
absl::optional<AudioCodecPairId> /*codec_pair_id*/,
|
||||
const FieldTrialsView* field_trials) {
|
||||
return std::make_unique<AudioDecoderIlbcImpl>();
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,39 @@
|
|||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef API_AUDIO_CODECS_ILBC_AUDIO_DECODER_ILBC_H_
|
||||
#define API_AUDIO_CODECS_ILBC_AUDIO_DECODER_ILBC_H_
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "api/audio_codecs/audio_codec_pair_id.h"
|
||||
#include "api/audio_codecs/audio_decoder.h"
|
||||
#include "api/audio_codecs/audio_format.h"
|
||||
#include "api/field_trials_view.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// ILBC decoder API for use as a template parameter to
|
||||
// CreateAudioDecoderFactory<...>().
|
||||
struct AudioDecoderIlbc {
|
||||
struct Config {}; // Empty---no config values needed!
|
||||
static absl::optional<Config> SdpToConfig(const SdpAudioFormat& audio_format);
|
||||
static void AppendSupportedDecoders(std::vector<AudioCodecSpec>* specs);
|
||||
static std::unique_ptr<AudioDecoder> MakeAudioDecoder(
|
||||
Config config,
|
||||
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt,
|
||||
const FieldTrialsView* field_trials = nullptr);
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // API_AUDIO_CODECS_ILBC_AUDIO_DECODER_ILBC_H_
|
||||
|
|
@ -0,0 +1,88 @@
|
|||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "api/audio_codecs/ilbc/audio_encoder_ilbc.h"
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/strings/match.h"
|
||||
#include "modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.h"
|
||||
#include "rtc_base/numerics/safe_conversions.h"
|
||||
#include "rtc_base/numerics/safe_minmax.h"
|
||||
#include "rtc_base/string_to_number.h"
|
||||
|
||||
namespace webrtc {
|
||||
namespace {
|
||||
int GetIlbcBitrate(int ptime) {
|
||||
switch (ptime) {
|
||||
case 20:
|
||||
case 40:
|
||||
// 38 bytes per frame of 20 ms => 15200 bits/s.
|
||||
return 15200;
|
||||
case 30:
|
||||
case 60:
|
||||
// 50 bytes per frame of 30 ms => (approx) 13333 bits/s.
|
||||
return 13333;
|
||||
default:
|
||||
RTC_CHECK_NOTREACHED();
|
||||
}
|
||||
}
|
||||
} // namespace
|
||||
|
||||
absl::optional<AudioEncoderIlbcConfig> AudioEncoderIlbc::SdpToConfig(
|
||||
const SdpAudioFormat& format) {
|
||||
if (!absl::EqualsIgnoreCase(format.name.c_str(), "ILBC") ||
|
||||
format.clockrate_hz != 8000 || format.num_channels != 1) {
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
AudioEncoderIlbcConfig config;
|
||||
auto ptime_iter = format.parameters.find("ptime");
|
||||
if (ptime_iter != format.parameters.end()) {
|
||||
auto ptime = rtc::StringToNumber<int>(ptime_iter->second);
|
||||
if (ptime && *ptime > 0) {
|
||||
const int whole_packets = *ptime / 10;
|
||||
config.frame_size_ms = rtc::SafeClamp<int>(whole_packets * 10, 20, 60);
|
||||
}
|
||||
}
|
||||
if (!config.IsOk()) {
|
||||
RTC_DCHECK_NOTREACHED();
|
||||
return absl::nullopt;
|
||||
}
|
||||
return config;
|
||||
}
|
||||
|
||||
void AudioEncoderIlbc::AppendSupportedEncoders(
|
||||
std::vector<AudioCodecSpec>* specs) {
|
||||
const SdpAudioFormat fmt = {"ILBC", 8000, 1};
|
||||
const AudioCodecInfo info = QueryAudioEncoder(*SdpToConfig(fmt));
|
||||
specs->push_back({fmt, info});
|
||||
}
|
||||
|
||||
AudioCodecInfo AudioEncoderIlbc::QueryAudioEncoder(
|
||||
const AudioEncoderIlbcConfig& config) {
|
||||
RTC_DCHECK(config.IsOk());
|
||||
return {8000, 1, GetIlbcBitrate(config.frame_size_ms)};
|
||||
}
|
||||
|
||||
std::unique_ptr<AudioEncoder> AudioEncoderIlbc::MakeAudioEncoder(
|
||||
const AudioEncoderIlbcConfig& config,
|
||||
int payload_type,
|
||||
absl::optional<AudioCodecPairId> /*codec_pair_id*/,
|
||||
const FieldTrialsView* field_trials) {
|
||||
if (!config.IsOk()) {
|
||||
RTC_DCHECK_NOTREACHED();
|
||||
return nullptr;
|
||||
}
|
||||
return std::make_unique<AudioEncoderIlbcImpl>(config, payload_type);
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,43 @@
|
|||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef API_AUDIO_CODECS_ILBC_AUDIO_ENCODER_ILBC_H_
|
||||
#define API_AUDIO_CODECS_ILBC_AUDIO_ENCODER_ILBC_H_
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "api/audio_codecs/audio_codec_pair_id.h"
|
||||
#include "api/audio_codecs/audio_encoder.h"
|
||||
#include "api/audio_codecs/audio_format.h"
|
||||
#include "api/audio_codecs/ilbc/audio_encoder_ilbc_config.h"
|
||||
#include "api/field_trials_view.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// ILBC encoder API for use as a template parameter to
|
||||
// CreateAudioEncoderFactory<...>().
|
||||
struct AudioEncoderIlbc {
|
||||
using Config = AudioEncoderIlbcConfig;
|
||||
static absl::optional<AudioEncoderIlbcConfig> SdpToConfig(
|
||||
const SdpAudioFormat& audio_format);
|
||||
static void AppendSupportedEncoders(std::vector<AudioCodecSpec>* specs);
|
||||
static AudioCodecInfo QueryAudioEncoder(const AudioEncoderIlbcConfig& config);
|
||||
static std::unique_ptr<AudioEncoder> MakeAudioEncoder(
|
||||
const AudioEncoderIlbcConfig& config,
|
||||
int payload_type,
|
||||
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt,
|
||||
const FieldTrialsView* field_trials = nullptr);
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // API_AUDIO_CODECS_ILBC_AUDIO_ENCODER_ILBC_H_
|
||||
|
|
@ -0,0 +1,28 @@
|
|||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef API_AUDIO_CODECS_ILBC_AUDIO_ENCODER_ILBC_CONFIG_H_
|
||||
#define API_AUDIO_CODECS_ILBC_AUDIO_ENCODER_ILBC_CONFIG_H_
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
struct AudioEncoderIlbcConfig {
|
||||
bool IsOk() const {
|
||||
return (frame_size_ms == 20 || frame_size_ms == 30 || frame_size_ms == 40 ||
|
||||
frame_size_ms == 60);
|
||||
}
|
||||
int frame_size_ms = 30; // Valid values are 20, 30, 40, and 60 ms.
|
||||
// Note that frame size 40 ms produces encodings with two 20 ms frames in
|
||||
// them, and frame size 60 ms consists of two 30 ms frames.
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // API_AUDIO_CODECS_ILBC_AUDIO_ENCODER_ILBC_CONFIG_H_
|
||||
110
TMessagesProj/jni/voip/webrtc/api/audio_codecs/opus/BUILD.gn
Normal file
110
TMessagesProj/jni/voip/webrtc/api/audio_codecs/opus/BUILD.gn
Normal file
|
|
@ -0,0 +1,110 @@
|
|||
# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
#
|
||||
# Use of this source code is governed by a BSD-style license
|
||||
# that can be found in the LICENSE file in the root of the source
|
||||
# tree. An additional intellectual property rights grant can be found
|
||||
# in the file PATENTS. All contributing project authors may
|
||||
# be found in the AUTHORS file in the root of the source tree.
|
||||
|
||||
import("../../../webrtc.gni")
|
||||
if (is_android) {
|
||||
import("//build/config/android/config.gni")
|
||||
import("//build/config/android/rules.gni")
|
||||
}
|
||||
|
||||
rtc_library("audio_encoder_opus_config") {
|
||||
visibility = [ "*" ]
|
||||
sources = [
|
||||
"audio_encoder_multi_channel_opus_config.cc",
|
||||
"audio_encoder_multi_channel_opus_config.h",
|
||||
"audio_encoder_opus_config.cc",
|
||||
"audio_encoder_opus_config.h",
|
||||
]
|
||||
deps = [ "../../../rtc_base/system:rtc_export" ]
|
||||
absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
|
||||
defines = []
|
||||
if (rtc_opus_variable_complexity) {
|
||||
defines += [ "WEBRTC_OPUS_VARIABLE_COMPLEXITY=1" ]
|
||||
} else {
|
||||
defines += [ "WEBRTC_OPUS_VARIABLE_COMPLEXITY=0" ]
|
||||
}
|
||||
}
|
||||
|
||||
rtc_source_set("audio_decoder_opus_config") {
|
||||
visibility = [ "*" ]
|
||||
sources = [ "audio_decoder_multi_channel_opus_config.h" ]
|
||||
deps = [ "..:audio_codecs_api" ]
|
||||
}
|
||||
|
||||
rtc_library("audio_encoder_opus") {
|
||||
visibility = [ "*" ]
|
||||
poisonous = [ "audio_codecs" ]
|
||||
public = [ "audio_encoder_opus.h" ]
|
||||
sources = [ "audio_encoder_opus.cc" ]
|
||||
deps = [
|
||||
":audio_encoder_opus_config",
|
||||
"..:audio_codecs_api",
|
||||
"../../../api:field_trials_view",
|
||||
"../../../modules/audio_coding:webrtc_opus",
|
||||
"../../../rtc_base/system:rtc_export",
|
||||
]
|
||||
absl_deps = [
|
||||
"//third_party/abseil-cpp/absl/strings",
|
||||
"//third_party/abseil-cpp/absl/types:optional",
|
||||
]
|
||||
}
|
||||
|
||||
rtc_library("audio_decoder_opus") {
|
||||
visibility = [ "*" ]
|
||||
poisonous = [ "audio_codecs" ]
|
||||
sources = [
|
||||
"audio_decoder_opus.cc",
|
||||
"audio_decoder_opus.h",
|
||||
]
|
||||
deps = [
|
||||
"..:audio_codecs_api",
|
||||
"../../../api:field_trials_view",
|
||||
"../../../modules/audio_coding:webrtc_opus",
|
||||
"../../../rtc_base/system:rtc_export",
|
||||
]
|
||||
absl_deps = [
|
||||
"//third_party/abseil-cpp/absl/strings",
|
||||
"//third_party/abseil-cpp/absl/types:optional",
|
||||
]
|
||||
}
|
||||
|
||||
rtc_library("audio_encoder_multiopus") {
|
||||
visibility = [ "*" ]
|
||||
poisonous = [ "audio_codecs" ]
|
||||
public = [ "audio_encoder_multi_channel_opus.h" ]
|
||||
sources = [ "audio_encoder_multi_channel_opus.cc" ]
|
||||
deps = [
|
||||
"..:audio_codecs_api",
|
||||
"../../../api:field_trials_view",
|
||||
"../../../modules/audio_coding:webrtc_multiopus",
|
||||
"../../../rtc_base/system:rtc_export",
|
||||
"../opus:audio_encoder_opus_config",
|
||||
]
|
||||
absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
|
||||
}
|
||||
|
||||
rtc_library("audio_decoder_multiopus") {
|
||||
visibility = [ "*" ]
|
||||
poisonous = [ "audio_codecs" ]
|
||||
sources = [
|
||||
"audio_decoder_multi_channel_opus.cc",
|
||||
"audio_decoder_multi_channel_opus.h",
|
||||
]
|
||||
deps = [
|
||||
":audio_decoder_opus_config",
|
||||
"..:audio_codecs_api",
|
||||
"../../../api:field_trials_view",
|
||||
"../../../modules/audio_coding:webrtc_multiopus",
|
||||
"../../../rtc_base/system:rtc_export",
|
||||
]
|
||||
absl_deps = [
|
||||
"//third_party/abseil-cpp/absl/memory",
|
||||
"//third_party/abseil-cpp/absl/strings",
|
||||
"//third_party/abseil-cpp/absl/types:optional",
|
||||
]
|
||||
}
|
||||
|
|
@ -0,0 +1,71 @@
|
|||
/*
|
||||
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "api/audio_codecs/opus/audio_decoder_multi_channel_opus.h"
|
||||
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/memory/memory.h"
|
||||
#include "absl/strings/match.h"
|
||||
#include "modules/audio_coding/codecs/opus/audio_decoder_multi_channel_opus_impl.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
absl::optional<AudioDecoderMultiChannelOpusConfig>
|
||||
AudioDecoderMultiChannelOpus::SdpToConfig(const SdpAudioFormat& format) {
|
||||
return AudioDecoderMultiChannelOpusImpl::SdpToConfig(format);
|
||||
}
|
||||
|
||||
void AudioDecoderMultiChannelOpus::AppendSupportedDecoders(
|
||||
std::vector<AudioCodecSpec>* specs) {
|
||||
// To get full utilization of the surround support of the Opus lib, we can
|
||||
// mark which channel is the low frequency effects (LFE). But that is not done
|
||||
// ATM.
|
||||
{
|
||||
AudioCodecInfo surround_5_1_opus_info{48000, 6,
|
||||
/* default_bitrate_bps= */ 128000};
|
||||
surround_5_1_opus_info.allow_comfort_noise = false;
|
||||
surround_5_1_opus_info.supports_network_adaption = false;
|
||||
SdpAudioFormat opus_format({"multiopus",
|
||||
48000,
|
||||
6,
|
||||
{{"minptime", "10"},
|
||||
{"useinbandfec", "1"},
|
||||
{"channel_mapping", "0,4,1,2,3,5"},
|
||||
{"num_streams", "4"},
|
||||
{"coupled_streams", "2"}}});
|
||||
specs->push_back({std::move(opus_format), surround_5_1_opus_info});
|
||||
}
|
||||
{
|
||||
AudioCodecInfo surround_7_1_opus_info{48000, 8,
|
||||
/* default_bitrate_bps= */ 200000};
|
||||
surround_7_1_opus_info.allow_comfort_noise = false;
|
||||
surround_7_1_opus_info.supports_network_adaption = false;
|
||||
SdpAudioFormat opus_format({"multiopus",
|
||||
48000,
|
||||
8,
|
||||
{{"minptime", "10"},
|
||||
{"useinbandfec", "1"},
|
||||
{"channel_mapping", "0,6,1,2,3,4,5,7"},
|
||||
{"num_streams", "5"},
|
||||
{"coupled_streams", "3"}}});
|
||||
specs->push_back({std::move(opus_format), surround_7_1_opus_info});
|
||||
}
|
||||
}
|
||||
|
||||
std::unique_ptr<AudioDecoder> AudioDecoderMultiChannelOpus::MakeAudioDecoder(
|
||||
AudioDecoderMultiChannelOpusConfig config,
|
||||
absl::optional<AudioCodecPairId> /*codec_pair_id*/,
|
||||
const FieldTrialsView* field_trials) {
|
||||
return AudioDecoderMultiChannelOpusImpl::MakeAudioDecoder(config);
|
||||
}
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,42 @@
|
|||
/*
|
||||
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef API_AUDIO_CODECS_OPUS_AUDIO_DECODER_MULTI_CHANNEL_OPUS_H_
|
||||
#define API_AUDIO_CODECS_OPUS_AUDIO_DECODER_MULTI_CHANNEL_OPUS_H_
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "api/audio_codecs/audio_codec_pair_id.h"
|
||||
#include "api/audio_codecs/audio_decoder.h"
|
||||
#include "api/audio_codecs/audio_format.h"
|
||||
#include "api/audio_codecs/opus/audio_decoder_multi_channel_opus_config.h"
|
||||
#include "api/field_trials_view.h"
|
||||
#include "rtc_base/system/rtc_export.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// Opus decoder API for use as a template parameter to
|
||||
// CreateAudioDecoderFactory<...>().
|
||||
struct RTC_EXPORT AudioDecoderMultiChannelOpus {
|
||||
using Config = AudioDecoderMultiChannelOpusConfig;
|
||||
static absl::optional<AudioDecoderMultiChannelOpusConfig> SdpToConfig(
|
||||
const SdpAudioFormat& audio_format);
|
||||
static void AppendSupportedDecoders(std::vector<AudioCodecSpec>* specs);
|
||||
static std::unique_ptr<AudioDecoder> MakeAudioDecoder(
|
||||
AudioDecoderMultiChannelOpusConfig config,
|
||||
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt,
|
||||
const FieldTrialsView* field_trials = nullptr);
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // API_AUDIO_CODECS_OPUS_AUDIO_DECODER_MULTI_CHANNEL_OPUS_H_
|
||||
|
|
@ -0,0 +1,66 @@
|
|||
/*
|
||||
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef API_AUDIO_CODECS_OPUS_AUDIO_DECODER_MULTI_CHANNEL_OPUS_CONFIG_H_
|
||||
#define API_AUDIO_CODECS_OPUS_AUDIO_DECODER_MULTI_CHANNEL_OPUS_CONFIG_H_
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "api/audio_codecs/audio_decoder.h"
|
||||
|
||||
namespace webrtc {
|
||||
struct AudioDecoderMultiChannelOpusConfig {
|
||||
// The number of channels that the decoder will output.
|
||||
int num_channels;
|
||||
|
||||
// Number of mono or stereo encoded Opus streams.
|
||||
int num_streams;
|
||||
|
||||
// Number of channel pairs coupled together, see RFC 7845 section
|
||||
// 5.1.1. Has to be less than the number of streams.
|
||||
int coupled_streams;
|
||||
|
||||
// Channel mapping table, defines the mapping from encoded streams to output
|
||||
// channels. See RFC 7845 section 5.1.1.
|
||||
std::vector<unsigned char> channel_mapping;
|
||||
|
||||
bool IsOk() const {
|
||||
if (num_channels < 1 || num_channels > AudioDecoder::kMaxNumberOfChannels ||
|
||||
num_streams < 0 || coupled_streams < 0) {
|
||||
return false;
|
||||
}
|
||||
if (num_streams < coupled_streams) {
|
||||
return false;
|
||||
}
|
||||
if (channel_mapping.size() != static_cast<size_t>(num_channels)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Every mono stream codes one channel, every coupled stream codes two. This
|
||||
// is the total coded channel count:
|
||||
const int max_coded_channel = num_streams + coupled_streams;
|
||||
for (const auto& x : channel_mapping) {
|
||||
// Coded channels >= max_coded_channel don't exist. Except for 255, which
|
||||
// tells Opus to put silence in output channel x.
|
||||
if (x >= max_coded_channel && x != 255) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if (num_channels > 255 || max_coded_channel >= 255) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // API_AUDIO_CODECS_OPUS_AUDIO_DECODER_MULTI_CHANNEL_OPUS_CONFIG_H_
|
||||
|
|
@ -0,0 +1,86 @@
|
|||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "api/audio_codecs/opus/audio_decoder_opus.h"
|
||||
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/strings/match.h"
|
||||
#include "modules/audio_coding/codecs/opus/audio_decoder_opus.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
bool AudioDecoderOpus::Config::IsOk() const {
|
||||
if (sample_rate_hz != 16000 && sample_rate_hz != 48000) {
|
||||
// Unsupported sample rate. (libopus supports a few other rates as
|
||||
// well; we can add support for them when needed.)
|
||||
return false;
|
||||
}
|
||||
if (num_channels != 1 && num_channels != 2) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
absl::optional<AudioDecoderOpus::Config> AudioDecoderOpus::SdpToConfig(
|
||||
const SdpAudioFormat& format) {
|
||||
const auto num_channels = [&]() -> absl::optional<int> {
|
||||
auto stereo = format.parameters.find("stereo");
|
||||
if (stereo != format.parameters.end()) {
|
||||
if (stereo->second == "0") {
|
||||
return 1;
|
||||
} else if (stereo->second == "1") {
|
||||
return 2;
|
||||
} else {
|
||||
return absl::nullopt; // Bad stereo parameter.
|
||||
}
|
||||
}
|
||||
return 1; // Default to mono.
|
||||
}();
|
||||
if (absl::EqualsIgnoreCase(format.name, "opus") &&
|
||||
format.clockrate_hz == 48000 && format.num_channels == 2 &&
|
||||
num_channels) {
|
||||
Config config;
|
||||
config.num_channels = *num_channels;
|
||||
if (!config.IsOk()) {
|
||||
RTC_DCHECK_NOTREACHED();
|
||||
return absl::nullopt;
|
||||
}
|
||||
return config;
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
}
|
||||
|
||||
void AudioDecoderOpus::AppendSupportedDecoders(
|
||||
std::vector<AudioCodecSpec>* specs) {
|
||||
AudioCodecInfo opus_info{48000, 1, 64000, 6000, 510000};
|
||||
opus_info.allow_comfort_noise = false;
|
||||
opus_info.supports_network_adaption = true;
|
||||
SdpAudioFormat opus_format(
|
||||
{"opus", 48000, 2, {{"minptime", "10"}, {"useinbandfec", "1"}}});
|
||||
specs->push_back({std::move(opus_format), opus_info});
|
||||
}
|
||||
|
||||
std::unique_ptr<AudioDecoder> AudioDecoderOpus::MakeAudioDecoder(
|
||||
Config config,
|
||||
absl::optional<AudioCodecPairId> /*codec_pair_id*/,
|
||||
const FieldTrialsView* field_trials) {
|
||||
if (!config.IsOk()) {
|
||||
RTC_DCHECK_NOTREACHED();
|
||||
return nullptr;
|
||||
}
|
||||
return std::make_unique<AudioDecoderOpusImpl>(config.num_channels,
|
||||
config.sample_rate_hz);
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,44 @@
|
|||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef API_AUDIO_CODECS_OPUS_AUDIO_DECODER_OPUS_H_
|
||||
#define API_AUDIO_CODECS_OPUS_AUDIO_DECODER_OPUS_H_
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "api/audio_codecs/audio_codec_pair_id.h"
|
||||
#include "api/audio_codecs/audio_decoder.h"
|
||||
#include "api/audio_codecs/audio_format.h"
|
||||
#include "api/field_trials_view.h"
|
||||
#include "rtc_base/system/rtc_export.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// Opus decoder API for use as a template parameter to
|
||||
// CreateAudioDecoderFactory<...>().
|
||||
struct RTC_EXPORT AudioDecoderOpus {
|
||||
struct Config {
|
||||
bool IsOk() const; // Checks if the values are currently OK.
|
||||
int sample_rate_hz = 48000;
|
||||
int num_channels = 1;
|
||||
};
|
||||
static absl::optional<Config> SdpToConfig(const SdpAudioFormat& audio_format);
|
||||
static void AppendSupportedDecoders(std::vector<AudioCodecSpec>* specs);
|
||||
static std::unique_ptr<AudioDecoder> MakeAudioDecoder(
|
||||
Config config,
|
||||
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt,
|
||||
const FieldTrialsView* field_trials = nullptr);
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // API_AUDIO_CODECS_OPUS_AUDIO_DECODER_OPUS_H_
|
||||
|
|
@ -0,0 +1,75 @@
|
|||
/*
|
||||
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "api/audio_codecs/opus/audio_encoder_multi_channel_opus.h"
|
||||
|
||||
#include <utility>
|
||||
|
||||
#include "modules/audio_coding/codecs/opus/audio_encoder_multi_channel_opus_impl.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
absl::optional<AudioEncoderMultiChannelOpusConfig>
|
||||
AudioEncoderMultiChannelOpus::SdpToConfig(const SdpAudioFormat& format) {
|
||||
return AudioEncoderMultiChannelOpusImpl::SdpToConfig(format);
|
||||
}
|
||||
|
||||
void AudioEncoderMultiChannelOpus::AppendSupportedEncoders(
|
||||
std::vector<AudioCodecSpec>* specs) {
|
||||
// To get full utilization of the surround support of the Opus lib, we can
|
||||
// mark which channel is the low frequency effects (LFE). But that is not done
|
||||
// ATM.
|
||||
{
|
||||
AudioCodecInfo surround_5_1_opus_info{48000, 6,
|
||||
/* default_bitrate_bps= */ 128000};
|
||||
surround_5_1_opus_info.allow_comfort_noise = false;
|
||||
surround_5_1_opus_info.supports_network_adaption = false;
|
||||
SdpAudioFormat opus_format({"multiopus",
|
||||
48000,
|
||||
6,
|
||||
{{"minptime", "10"},
|
||||
{"useinbandfec", "1"},
|
||||
{"channel_mapping", "0,4,1,2,3,5"},
|
||||
{"num_streams", "4"},
|
||||
{"coupled_streams", "2"}}});
|
||||
specs->push_back({std::move(opus_format), surround_5_1_opus_info});
|
||||
}
|
||||
{
|
||||
AudioCodecInfo surround_7_1_opus_info{48000, 8,
|
||||
/* default_bitrate_bps= */ 200000};
|
||||
surround_7_1_opus_info.allow_comfort_noise = false;
|
||||
surround_7_1_opus_info.supports_network_adaption = false;
|
||||
SdpAudioFormat opus_format({"multiopus",
|
||||
48000,
|
||||
8,
|
||||
{{"minptime", "10"},
|
||||
{"useinbandfec", "1"},
|
||||
{"channel_mapping", "0,6,1,2,3,4,5,7"},
|
||||
{"num_streams", "5"},
|
||||
{"coupled_streams", "3"}}});
|
||||
specs->push_back({std::move(opus_format), surround_7_1_opus_info});
|
||||
}
|
||||
}
|
||||
|
||||
AudioCodecInfo AudioEncoderMultiChannelOpus::QueryAudioEncoder(
|
||||
const AudioEncoderMultiChannelOpusConfig& config) {
|
||||
return AudioEncoderMultiChannelOpusImpl::QueryAudioEncoder(config);
|
||||
}
|
||||
|
||||
std::unique_ptr<AudioEncoder> AudioEncoderMultiChannelOpus::MakeAudioEncoder(
|
||||
const AudioEncoderMultiChannelOpusConfig& config,
|
||||
int payload_type,
|
||||
absl::optional<AudioCodecPairId> /*codec_pair_id*/,
|
||||
const FieldTrialsView* field_trials) {
|
||||
return AudioEncoderMultiChannelOpusImpl::MakeAudioEncoder(config,
|
||||
payload_type);
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,43 @@
|
|||
/*
|
||||
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef API_AUDIO_CODECS_OPUS_AUDIO_ENCODER_MULTI_CHANNEL_OPUS_H_
|
||||
#define API_AUDIO_CODECS_OPUS_AUDIO_ENCODER_MULTI_CHANNEL_OPUS_H_
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "api/audio_codecs/audio_codec_pair_id.h"
|
||||
#include "api/audio_codecs/audio_encoder.h"
|
||||
#include "api/audio_codecs/audio_format.h"
|
||||
#include "api/audio_codecs/opus/audio_encoder_multi_channel_opus_config.h"
|
||||
#include "api/field_trials_view.h"
|
||||
#include "rtc_base/system/rtc_export.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// Opus encoder API for use as a template parameter to
|
||||
// CreateAudioEncoderFactory<...>().
|
||||
struct RTC_EXPORT AudioEncoderMultiChannelOpus {
|
||||
using Config = AudioEncoderMultiChannelOpusConfig;
|
||||
static absl::optional<Config> SdpToConfig(const SdpAudioFormat& audio_format);
|
||||
static void AppendSupportedEncoders(std::vector<AudioCodecSpec>* specs);
|
||||
static AudioCodecInfo QueryAudioEncoder(const Config& config);
|
||||
static std::unique_ptr<AudioEncoder> MakeAudioEncoder(
|
||||
const Config& config,
|
||||
int payload_type,
|
||||
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt,
|
||||
const FieldTrialsView* field_trials = nullptr);
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // API_AUDIO_CODECS_OPUS_AUDIO_ENCODER_MULTI_CHANNEL_OPUS_H_
|
||||
|
|
@ -0,0 +1,107 @@
|
|||
/*
|
||||
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "api/audio_codecs/opus/audio_encoder_multi_channel_opus_config.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace {
|
||||
constexpr int kDefaultComplexity = 9;
|
||||
} // namespace
|
||||
|
||||
AudioEncoderMultiChannelOpusConfig::AudioEncoderMultiChannelOpusConfig()
|
||||
: frame_size_ms(kDefaultFrameSizeMs),
|
||||
num_channels(1),
|
||||
application(ApplicationMode::kVoip),
|
||||
bitrate_bps(32000),
|
||||
fec_enabled(false),
|
||||
cbr_enabled(false),
|
||||
dtx_enabled(false),
|
||||
max_playback_rate_hz(48000),
|
||||
complexity(kDefaultComplexity),
|
||||
num_streams(-1),
|
||||
coupled_streams(-1) {}
|
||||
AudioEncoderMultiChannelOpusConfig::AudioEncoderMultiChannelOpusConfig(
|
||||
const AudioEncoderMultiChannelOpusConfig&) = default;
|
||||
AudioEncoderMultiChannelOpusConfig::~AudioEncoderMultiChannelOpusConfig() =
|
||||
default;
|
||||
AudioEncoderMultiChannelOpusConfig&
|
||||
AudioEncoderMultiChannelOpusConfig::operator=(
|
||||
const AudioEncoderMultiChannelOpusConfig&) = default;
|
||||
|
||||
bool AudioEncoderMultiChannelOpusConfig::IsOk() const {
|
||||
if (frame_size_ms <= 0 || frame_size_ms % 10 != 0)
|
||||
return false;
|
||||
if (num_channels >= 255) {
|
||||
return false;
|
||||
}
|
||||
if (bitrate_bps < kMinBitrateBps || bitrate_bps > kMaxBitrateBps)
|
||||
return false;
|
||||
if (complexity < 0 || complexity > 10)
|
||||
return false;
|
||||
|
||||
// Check the lengths:
|
||||
if (num_streams < 0 || coupled_streams < 0) {
|
||||
return false;
|
||||
}
|
||||
if (num_streams < coupled_streams) {
|
||||
return false;
|
||||
}
|
||||
if (channel_mapping.size() != static_cast<size_t>(num_channels)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Every mono stream codes one channel, every coupled stream codes two. This
|
||||
// is the total coded channel count:
|
||||
const int max_coded_channel = num_streams + coupled_streams;
|
||||
for (const auto& x : channel_mapping) {
|
||||
// Coded channels >= max_coded_channel don't exist. Except for 255, which
|
||||
// tells Opus to ignore input channel x.
|
||||
if (x >= max_coded_channel && x != 255) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Inverse mapping.
|
||||
constexpr int kNotSet = -1;
|
||||
std::vector<int> coded_channels_to_input_channels(max_coded_channel, kNotSet);
|
||||
for (size_t i = 0; i < num_channels; ++i) {
|
||||
if (channel_mapping[i] == 255) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// If it's not ignored, put it in the inverted mapping. But first check if
|
||||
// we've told Opus to use another input channel for this coded channel:
|
||||
const int coded_channel = channel_mapping[i];
|
||||
if (coded_channels_to_input_channels[coded_channel] != kNotSet) {
|
||||
// Coded channel `coded_channel` comes from both input channels
|
||||
// `coded_channels_to_input_channels[coded_channel]` and `i`.
|
||||
return false;
|
||||
}
|
||||
|
||||
coded_channels_to_input_channels[coded_channel] = i;
|
||||
}
|
||||
|
||||
// Check that we specified what input the encoder should use to produce
|
||||
// every coded channel.
|
||||
for (int i = 0; i < max_coded_channel; ++i) {
|
||||
if (coded_channels_to_input_channels[i] == kNotSet) {
|
||||
// Coded channel `i` has unspecified input channel.
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if (num_channels > 255 || max_coded_channel >= 255) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,66 @@
|
|||
/*
|
||||
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef API_AUDIO_CODECS_OPUS_AUDIO_ENCODER_MULTI_CHANNEL_OPUS_CONFIG_H_
|
||||
#define API_AUDIO_CODECS_OPUS_AUDIO_ENCODER_MULTI_CHANNEL_OPUS_CONFIG_H_
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "api/audio_codecs/opus/audio_encoder_opus_config.h"
|
||||
#include "rtc_base/system/rtc_export.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
struct RTC_EXPORT AudioEncoderMultiChannelOpusConfig {
|
||||
static constexpr int kDefaultFrameSizeMs = 20;
|
||||
|
||||
// Opus API allows a min bitrate of 500bps, but Opus documentation suggests
|
||||
// bitrate should be in the range of 6000 to 510000, inclusive.
|
||||
static constexpr int kMinBitrateBps = 6000;
|
||||
static constexpr int kMaxBitrateBps = 510000;
|
||||
|
||||
AudioEncoderMultiChannelOpusConfig();
|
||||
AudioEncoderMultiChannelOpusConfig(const AudioEncoderMultiChannelOpusConfig&);
|
||||
~AudioEncoderMultiChannelOpusConfig();
|
||||
AudioEncoderMultiChannelOpusConfig& operator=(
|
||||
const AudioEncoderMultiChannelOpusConfig&);
|
||||
|
||||
int frame_size_ms;
|
||||
size_t num_channels;
|
||||
enum class ApplicationMode { kVoip, kAudio };
|
||||
ApplicationMode application;
|
||||
int bitrate_bps;
|
||||
bool fec_enabled;
|
||||
bool cbr_enabled;
|
||||
bool dtx_enabled;
|
||||
int max_playback_rate_hz;
|
||||
std::vector<int> supported_frame_lengths_ms;
|
||||
|
||||
int complexity;
|
||||
|
||||
// Number of mono/stereo Opus streams.
|
||||
int num_streams;
|
||||
|
||||
// Number of channel pairs coupled together, see RFC 7845 section
|
||||
// 5.1.1. Has to be less than the number of streams
|
||||
int coupled_streams;
|
||||
|
||||
// Channel mapping table, defines the mapping from encoded streams to input
|
||||
// channels. See RFC 7845 section 5.1.1.
|
||||
std::vector<unsigned char> channel_mapping;
|
||||
|
||||
bool IsOk() const;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
#endif // API_AUDIO_CODECS_OPUS_AUDIO_ENCODER_MULTI_CHANNEL_OPUS_CONFIG_H_
|
||||
|
|
@ -0,0 +1,44 @@
|
|||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "api/audio_codecs/opus/audio_encoder_opus.h"
|
||||
|
||||
#include "modules/audio_coding/codecs/opus/audio_encoder_opus.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
absl::optional<AudioEncoderOpusConfig> AudioEncoderOpus::SdpToConfig(
|
||||
const SdpAudioFormat& format) {
|
||||
return AudioEncoderOpusImpl::SdpToConfig(format);
|
||||
}
|
||||
|
||||
void AudioEncoderOpus::AppendSupportedEncoders(
|
||||
std::vector<AudioCodecSpec>* specs) {
|
||||
AudioEncoderOpusImpl::AppendSupportedEncoders(specs);
|
||||
}
|
||||
|
||||
AudioCodecInfo AudioEncoderOpus::QueryAudioEncoder(
|
||||
const AudioEncoderOpusConfig& config) {
|
||||
return AudioEncoderOpusImpl::QueryAudioEncoder(config);
|
||||
}
|
||||
|
||||
std::unique_ptr<AudioEncoder> AudioEncoderOpus::MakeAudioEncoder(
|
||||
const AudioEncoderOpusConfig& config,
|
||||
int payload_type,
|
||||
absl::optional<AudioCodecPairId> /*codec_pair_id*/,
|
||||
const FieldTrialsView* field_trials) {
|
||||
if (!config.IsOk()) {
|
||||
RTC_DCHECK_NOTREACHED();
|
||||
return nullptr;
|
||||
}
|
||||
return AudioEncoderOpusImpl::MakeAudioEncoder(config, payload_type);
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,44 @@
|
|||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef API_AUDIO_CODECS_OPUS_AUDIO_ENCODER_OPUS_H_
|
||||
#define API_AUDIO_CODECS_OPUS_AUDIO_ENCODER_OPUS_H_
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "api/audio_codecs/audio_codec_pair_id.h"
|
||||
#include "api/audio_codecs/audio_encoder.h"
|
||||
#include "api/audio_codecs/audio_format.h"
|
||||
#include "api/audio_codecs/opus/audio_encoder_opus_config.h"
|
||||
#include "api/field_trials_view.h"
|
||||
#include "rtc_base/system/rtc_export.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// Opus encoder API for use as a template parameter to
|
||||
// CreateAudioEncoderFactory<...>().
|
||||
struct RTC_EXPORT AudioEncoderOpus {
|
||||
using Config = AudioEncoderOpusConfig;
|
||||
static absl::optional<AudioEncoderOpusConfig> SdpToConfig(
|
||||
const SdpAudioFormat& audio_format);
|
||||
static void AppendSupportedEncoders(std::vector<AudioCodecSpec>* specs);
|
||||
static AudioCodecInfo QueryAudioEncoder(const AudioEncoderOpusConfig& config);
|
||||
static std::unique_ptr<AudioEncoder> MakeAudioEncoder(
|
||||
const AudioEncoderOpusConfig& config,
|
||||
int payload_type,
|
||||
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt,
|
||||
const FieldTrialsView* field_trials = nullptr);
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // API_AUDIO_CODECS_OPUS_AUDIO_ENCODER_OPUS_H_
|
||||
|
|
@ -0,0 +1,75 @@
|
|||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "api/audio_codecs/opus/audio_encoder_opus_config.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace {
|
||||
|
||||
#if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS)
|
||||
constexpr int kDefaultComplexity = 5;
|
||||
#else
|
||||
constexpr int kDefaultComplexity = 9;
|
||||
#endif
|
||||
|
||||
constexpr int kDefaultLowRateComplexity =
|
||||
WEBRTC_OPUS_VARIABLE_COMPLEXITY ? 9 : kDefaultComplexity;
|
||||
|
||||
} // namespace
|
||||
|
||||
constexpr int AudioEncoderOpusConfig::kDefaultFrameSizeMs;
|
||||
constexpr int AudioEncoderOpusConfig::kMinBitrateBps;
|
||||
constexpr int AudioEncoderOpusConfig::kMaxBitrateBps;
|
||||
|
||||
AudioEncoderOpusConfig::AudioEncoderOpusConfig()
|
||||
: frame_size_ms(kDefaultFrameSizeMs),
|
||||
sample_rate_hz(48000),
|
||||
num_channels(1),
|
||||
application(ApplicationMode::kVoip),
|
||||
bitrate_bps(32000),
|
||||
fec_enabled(false),
|
||||
cbr_enabled(false),
|
||||
max_playback_rate_hz(48000),
|
||||
complexity(kDefaultComplexity),
|
||||
low_rate_complexity(kDefaultLowRateComplexity),
|
||||
complexity_threshold_bps(12500),
|
||||
complexity_threshold_window_bps(1500),
|
||||
dtx_enabled(false),
|
||||
uplink_bandwidth_update_interval_ms(200),
|
||||
payload_type(-1) {}
|
||||
AudioEncoderOpusConfig::AudioEncoderOpusConfig(const AudioEncoderOpusConfig&) =
|
||||
default;
|
||||
AudioEncoderOpusConfig::~AudioEncoderOpusConfig() = default;
|
||||
AudioEncoderOpusConfig& AudioEncoderOpusConfig::operator=(
|
||||
const AudioEncoderOpusConfig&) = default;
|
||||
|
||||
bool AudioEncoderOpusConfig::IsOk() const {
|
||||
if (frame_size_ms <= 0 || frame_size_ms % 10 != 0)
|
||||
return false;
|
||||
if (sample_rate_hz != 16000 && sample_rate_hz != 48000) {
|
||||
// Unsupported input sample rate. (libopus supports a few other rates as
|
||||
// well; we can add support for them when needed.)
|
||||
return false;
|
||||
}
|
||||
if (num_channels >= 255) {
|
||||
return false;
|
||||
}
|
||||
if (!bitrate_bps)
|
||||
return false;
|
||||
if (*bitrate_bps < kMinBitrateBps || *bitrate_bps > kMaxBitrateBps)
|
||||
return false;
|
||||
if (complexity < 0 || complexity > 10)
|
||||
return false;
|
||||
if (low_rate_complexity < 0 || low_rate_complexity > 10)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,74 @@
|
|||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef API_AUDIO_CODECS_OPUS_AUDIO_ENCODER_OPUS_CONFIG_H_
|
||||
#define API_AUDIO_CODECS_OPUS_AUDIO_ENCODER_OPUS_CONFIG_H_
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "rtc_base/system/rtc_export.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
struct RTC_EXPORT AudioEncoderOpusConfig {
|
||||
static constexpr int kDefaultFrameSizeMs = 20;
|
||||
|
||||
// Opus API allows a min bitrate of 500bps, but Opus documentation suggests
|
||||
// bitrate should be in the range of 6000 to 510000, inclusive.
|
||||
static constexpr int kMinBitrateBps = 6000;
|
||||
static constexpr int kMaxBitrateBps = 510000;
|
||||
|
||||
AudioEncoderOpusConfig();
|
||||
AudioEncoderOpusConfig(const AudioEncoderOpusConfig&);
|
||||
~AudioEncoderOpusConfig();
|
||||
AudioEncoderOpusConfig& operator=(const AudioEncoderOpusConfig&);
|
||||
|
||||
bool IsOk() const; // Checks if the values are currently OK.
|
||||
|
||||
int frame_size_ms;
|
||||
int sample_rate_hz;
|
||||
size_t num_channels;
|
||||
enum class ApplicationMode { kVoip, kAudio };
|
||||
ApplicationMode application;
|
||||
|
||||
// NOTE: This member must always be set.
|
||||
// TODO(kwiberg): Turn it into just an int.
|
||||
absl::optional<int> bitrate_bps;
|
||||
|
||||
bool fec_enabled;
|
||||
bool cbr_enabled;
|
||||
int max_playback_rate_hz;
|
||||
|
||||
// `complexity` is used when the bitrate goes above
|
||||
// `complexity_threshold_bps` + `complexity_threshold_window_bps`;
|
||||
// `low_rate_complexity` is used when the bitrate falls below
|
||||
// `complexity_threshold_bps` - `complexity_threshold_window_bps`. In the
|
||||
// interval in the middle, we keep using the most recent of the two
|
||||
// complexity settings.
|
||||
int complexity;
|
||||
int low_rate_complexity;
|
||||
int complexity_threshold_bps;
|
||||
int complexity_threshold_window_bps;
|
||||
|
||||
bool dtx_enabled;
|
||||
std::vector<int> supported_frame_lengths_ms;
|
||||
int uplink_bandwidth_update_interval_ms;
|
||||
|
||||
// NOTE: This member isn't necessary, and will soon go away. See
|
||||
// https://bugs.chromium.org/p/webrtc/issues/detail?id=7847
|
||||
int payload_type;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // API_AUDIO_CODECS_OPUS_AUDIO_ENCODER_OPUS_CONFIG_H_
|
||||
|
|
@ -0,0 +1,49 @@
|
|||
/*
|
||||
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "api/audio_codecs/opus_audio_decoder_factory.h"
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "api/audio_codecs/audio_decoder_factory_template.h"
|
||||
#include "api/audio_codecs/opus/audio_decoder_multi_channel_opus.h"
|
||||
#include "api/audio_codecs/opus/audio_decoder_opus.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace {
|
||||
|
||||
// Modify an audio decoder to not advertise support for anything.
|
||||
template <typename T>
|
||||
struct NotAdvertised {
|
||||
using Config = typename T::Config;
|
||||
static absl::optional<Config> SdpToConfig(
|
||||
const SdpAudioFormat& audio_format) {
|
||||
return T::SdpToConfig(audio_format);
|
||||
}
|
||||
static void AppendSupportedDecoders(std::vector<AudioCodecSpec>* specs) {
|
||||
// Don't advertise support for anything.
|
||||
}
|
||||
static std::unique_ptr<AudioDecoder> MakeAudioDecoder(
|
||||
const Config& config,
|
||||
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt) {
|
||||
return T::MakeAudioDecoder(config, codec_pair_id);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
rtc::scoped_refptr<AudioDecoderFactory> CreateOpusAudioDecoderFactory() {
|
||||
return CreateAudioDecoderFactory<
|
||||
AudioDecoderOpus, NotAdvertised<AudioDecoderMultiChannelOpus>>();
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,26 @@
|
|||
/*
|
||||
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef API_AUDIO_CODECS_OPUS_AUDIO_DECODER_FACTORY_H_
|
||||
#define API_AUDIO_CODECS_OPUS_AUDIO_DECODER_FACTORY_H_
|
||||
|
||||
#include "api/audio_codecs/audio_decoder_factory.h"
|
||||
#include "api/scoped_refptr.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// Creates a new factory that can create only Opus audio decoders. Works like
|
||||
// CreateAudioDecoderFactory<AudioDecoderOpus>(), but is easier to use and is
|
||||
// not inline because it isn't a template.
|
||||
rtc::scoped_refptr<AudioDecoderFactory> CreateOpusAudioDecoderFactory();
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // API_AUDIO_CODECS_OPUS_AUDIO_DECODER_FACTORY_H_
|
||||
|
|
@ -0,0 +1,54 @@
|
|||
/*
|
||||
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "api/audio_codecs/opus_audio_encoder_factory.h"
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "api/audio_codecs/audio_encoder_factory_template.h"
|
||||
#include "api/audio_codecs/opus/audio_encoder_multi_channel_opus.h"
|
||||
#include "api/audio_codecs/opus/audio_encoder_opus.h"
|
||||
|
||||
namespace webrtc {
|
||||
namespace {
|
||||
|
||||
// Modify an audio encoder to not advertise support for anything.
|
||||
template <typename T>
|
||||
struct NotAdvertised {
|
||||
using Config = typename T::Config;
|
||||
static absl::optional<Config> SdpToConfig(
|
||||
const SdpAudioFormat& audio_format) {
|
||||
return T::SdpToConfig(audio_format);
|
||||
}
|
||||
static void AppendSupportedEncoders(std::vector<AudioCodecSpec>* specs) {
|
||||
// Don't advertise support for anything.
|
||||
}
|
||||
static AudioCodecInfo QueryAudioEncoder(const Config& config) {
|
||||
return T::QueryAudioEncoder(config);
|
||||
}
|
||||
static std::unique_ptr<AudioEncoder> MakeAudioEncoder(
|
||||
const Config& config,
|
||||
int payload_type,
|
||||
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt,
|
||||
const FieldTrialsView* field_trials = nullptr) {
|
||||
return T::MakeAudioEncoder(config, payload_type, codec_pair_id,
|
||||
field_trials);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
rtc::scoped_refptr<AudioEncoderFactory> CreateOpusAudioEncoderFactory() {
|
||||
return CreateAudioEncoderFactory<
|
||||
AudioEncoderOpus, NotAdvertised<AudioEncoderMultiChannelOpus>>();
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,26 @@
|
|||
/*
|
||||
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef API_AUDIO_CODECS_OPUS_AUDIO_ENCODER_FACTORY_H_
|
||||
#define API_AUDIO_CODECS_OPUS_AUDIO_ENCODER_FACTORY_H_
|
||||
|
||||
#include "api/audio_codecs/audio_encoder_factory.h"
|
||||
#include "api/scoped_refptr.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// Creates a new factory that can create only Opus audio encoders. Works like
|
||||
// CreateAudioEncoderFactory<AudioEncoderOpus>(), but is easier to use and is
|
||||
// not inline because it isn't a template.
|
||||
rtc::scoped_refptr<AudioEncoderFactory> CreateOpusAudioEncoderFactory();
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // API_AUDIO_CODECS_OPUS_AUDIO_ENCODER_FACTORY_H_
|
||||
39
TMessagesProj/jni/voip/webrtc/api/audio_codecs/test/BUILD.gn
Normal file
39
TMessagesProj/jni/voip/webrtc/api/audio_codecs/test/BUILD.gn
Normal file
|
|
@ -0,0 +1,39 @@
|
|||
# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
#
|
||||
# Use of this source code is governed by a BSD-style license
|
||||
# that can be found in the LICENSE file in the root of the source
|
||||
# tree. An additional intellectual property rights grant can be found
|
||||
# in the file PATENTS. All contributing project authors may
|
||||
# be found in the AUTHORS file in the root of the source tree.
|
||||
|
||||
import("../../../webrtc.gni")
|
||||
if (is_android) {
|
||||
import("//build/config/android/config.gni")
|
||||
import("//build/config/android/rules.gni")
|
||||
}
|
||||
|
||||
if (rtc_include_tests) {
|
||||
rtc_library("audio_codecs_api_unittests") {
|
||||
testonly = true
|
||||
sources = [
|
||||
"audio_decoder_factory_template_unittest.cc",
|
||||
"audio_encoder_factory_template_unittest.cc",
|
||||
]
|
||||
deps = [
|
||||
"..:audio_codecs_api",
|
||||
"../../../test:audio_codec_mocks",
|
||||
"../../../test:scoped_key_value_config",
|
||||
"../../../test:test_support",
|
||||
"../L16:audio_decoder_L16",
|
||||
"../L16:audio_encoder_L16",
|
||||
"../g711:audio_decoder_g711",
|
||||
"../g711:audio_encoder_g711",
|
||||
"../g722:audio_decoder_g722",
|
||||
"../g722:audio_encoder_g722",
|
||||
"../ilbc:audio_decoder_ilbc",
|
||||
"../ilbc:audio_encoder_ilbc",
|
||||
"../opus:audio_decoder_opus",
|
||||
"../opus:audio_encoder_opus",
|
||||
]
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,222 @@
|
|||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "api/audio_codecs/audio_decoder_factory_template.h"
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "api/audio_codecs/L16/audio_decoder_L16.h"
|
||||
#include "api/audio_codecs/g711/audio_decoder_g711.h"
|
||||
#include "api/audio_codecs/g722/audio_decoder_g722.h"
|
||||
#include "api/audio_codecs/ilbc/audio_decoder_ilbc.h"
|
||||
#include "api/audio_codecs/opus/audio_decoder_opus.h"
|
||||
#include "test/gmock.h"
|
||||
#include "test/gtest.h"
|
||||
#include "test/mock_audio_decoder.h"
|
||||
#include "test/scoped_key_value_config.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace {
|
||||
|
||||
struct BogusParams {
|
||||
static SdpAudioFormat AudioFormat() { return {"bogus", 8000, 1}; }
|
||||
static AudioCodecInfo CodecInfo() { return {8000, 1, 12345}; }
|
||||
};
|
||||
|
||||
struct ShamParams {
|
||||
static SdpAudioFormat AudioFormat() {
|
||||
return {"sham", 16000, 2, {{"param", "value"}}};
|
||||
}
|
||||
static AudioCodecInfo CodecInfo() { return {16000, 2, 23456}; }
|
||||
};
|
||||
|
||||
template <typename Params>
|
||||
struct AudioDecoderFakeApi {
|
||||
struct Config {
|
||||
SdpAudioFormat audio_format;
|
||||
};
|
||||
|
||||
static absl::optional<Config> SdpToConfig(
|
||||
const SdpAudioFormat& audio_format) {
|
||||
if (Params::AudioFormat() == audio_format) {
|
||||
Config config = {audio_format};
|
||||
return config;
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
}
|
||||
|
||||
static void AppendSupportedDecoders(std::vector<AudioCodecSpec>* specs) {
|
||||
specs->push_back({Params::AudioFormat(), Params::CodecInfo()});
|
||||
}
|
||||
|
||||
static AudioCodecInfo QueryAudioDecoder(const Config&) {
|
||||
return Params::CodecInfo();
|
||||
}
|
||||
|
||||
static std::unique_ptr<AudioDecoder> MakeAudioDecoder(
|
||||
const Config&,
|
||||
absl::optional<AudioCodecPairId> /*codec_pair_id*/ = absl::nullopt) {
|
||||
auto dec = std::make_unique<testing::StrictMock<MockAudioDecoder>>();
|
||||
EXPECT_CALL(*dec, SampleRateHz())
|
||||
.WillOnce(::testing::Return(Params::CodecInfo().sample_rate_hz));
|
||||
EXPECT_CALL(*dec, Die());
|
||||
return std::move(dec);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
TEST(AudioDecoderFactoryTemplateTest, NoDecoderTypes) {
|
||||
test::ScopedKeyValueConfig field_trials;
|
||||
rtc::scoped_refptr<AudioDecoderFactory> factory(
|
||||
rtc::make_ref_counted<
|
||||
audio_decoder_factory_template_impl::AudioDecoderFactoryT<>>(
|
||||
&field_trials));
|
||||
EXPECT_THAT(factory->GetSupportedDecoders(), ::testing::IsEmpty());
|
||||
EXPECT_FALSE(factory->IsSupportedDecoder({"foo", 8000, 1}));
|
||||
EXPECT_EQ(nullptr,
|
||||
factory->MakeAudioDecoder({"bar", 16000, 1}, absl::nullopt));
|
||||
}
|
||||
|
||||
TEST(AudioDecoderFactoryTemplateTest, OneDecoderType) {
|
||||
auto factory = CreateAudioDecoderFactory<AudioDecoderFakeApi<BogusParams>>();
|
||||
EXPECT_THAT(factory->GetSupportedDecoders(),
|
||||
::testing::ElementsAre(
|
||||
AudioCodecSpec{{"bogus", 8000, 1}, {8000, 1, 12345}}));
|
||||
EXPECT_FALSE(factory->IsSupportedDecoder({"foo", 8000, 1}));
|
||||
EXPECT_TRUE(factory->IsSupportedDecoder({"bogus", 8000, 1}));
|
||||
EXPECT_EQ(nullptr,
|
||||
factory->MakeAudioDecoder({"bar", 16000, 1}, absl::nullopt));
|
||||
auto dec = factory->MakeAudioDecoder({"bogus", 8000, 1}, absl::nullopt);
|
||||
ASSERT_NE(nullptr, dec);
|
||||
EXPECT_EQ(8000, dec->SampleRateHz());
|
||||
}
|
||||
|
||||
TEST(AudioDecoderFactoryTemplateTest, TwoDecoderTypes) {
|
||||
auto factory = CreateAudioDecoderFactory<AudioDecoderFakeApi<BogusParams>,
|
||||
AudioDecoderFakeApi<ShamParams>>();
|
||||
EXPECT_THAT(factory->GetSupportedDecoders(),
|
||||
::testing::ElementsAre(
|
||||
AudioCodecSpec{{"bogus", 8000, 1}, {8000, 1, 12345}},
|
||||
AudioCodecSpec{{"sham", 16000, 2, {{"param", "value"}}},
|
||||
{16000, 2, 23456}}));
|
||||
EXPECT_FALSE(factory->IsSupportedDecoder({"foo", 8000, 1}));
|
||||
EXPECT_TRUE(factory->IsSupportedDecoder({"bogus", 8000, 1}));
|
||||
EXPECT_TRUE(
|
||||
factory->IsSupportedDecoder({"sham", 16000, 2, {{"param", "value"}}}));
|
||||
EXPECT_EQ(nullptr,
|
||||
factory->MakeAudioDecoder({"bar", 16000, 1}, absl::nullopt));
|
||||
auto dec1 = factory->MakeAudioDecoder({"bogus", 8000, 1}, absl::nullopt);
|
||||
ASSERT_NE(nullptr, dec1);
|
||||
EXPECT_EQ(8000, dec1->SampleRateHz());
|
||||
EXPECT_EQ(nullptr,
|
||||
factory->MakeAudioDecoder({"sham", 16000, 2}, absl::nullopt));
|
||||
auto dec2 = factory->MakeAudioDecoder(
|
||||
{"sham", 16000, 2, {{"param", "value"}}}, absl::nullopt);
|
||||
ASSERT_NE(nullptr, dec2);
|
||||
EXPECT_EQ(16000, dec2->SampleRateHz());
|
||||
}
|
||||
|
||||
TEST(AudioDecoderFactoryTemplateTest, G711) {
|
||||
auto factory = CreateAudioDecoderFactory<AudioDecoderG711>();
|
||||
EXPECT_THAT(factory->GetSupportedDecoders(),
|
||||
::testing::ElementsAre(
|
||||
AudioCodecSpec{{"PCMU", 8000, 1}, {8000, 1, 64000}},
|
||||
AudioCodecSpec{{"PCMA", 8000, 1}, {8000, 1, 64000}}));
|
||||
EXPECT_FALSE(factory->IsSupportedDecoder({"G711", 8000, 1}));
|
||||
EXPECT_TRUE(factory->IsSupportedDecoder({"PCMU", 8000, 1}));
|
||||
EXPECT_TRUE(factory->IsSupportedDecoder({"pcma", 8000, 1}));
|
||||
EXPECT_EQ(nullptr,
|
||||
factory->MakeAudioDecoder({"pcmu", 16000, 1}, absl::nullopt));
|
||||
auto dec1 = factory->MakeAudioDecoder({"pcmu", 8000, 1}, absl::nullopt);
|
||||
ASSERT_NE(nullptr, dec1);
|
||||
EXPECT_EQ(8000, dec1->SampleRateHz());
|
||||
auto dec2 = factory->MakeAudioDecoder({"PCMA", 8000, 1}, absl::nullopt);
|
||||
ASSERT_NE(nullptr, dec2);
|
||||
EXPECT_EQ(8000, dec2->SampleRateHz());
|
||||
}
|
||||
|
||||
TEST(AudioDecoderFactoryTemplateTest, G722) {
|
||||
auto factory = CreateAudioDecoderFactory<AudioDecoderG722>();
|
||||
EXPECT_THAT(factory->GetSupportedDecoders(),
|
||||
::testing::ElementsAre(
|
||||
AudioCodecSpec{{"G722", 8000, 1}, {16000, 1, 64000}}));
|
||||
EXPECT_FALSE(factory->IsSupportedDecoder({"foo", 8000, 1}));
|
||||
EXPECT_TRUE(factory->IsSupportedDecoder({"G722", 8000, 1}));
|
||||
EXPECT_EQ(nullptr,
|
||||
factory->MakeAudioDecoder({"bar", 16000, 1}, absl::nullopt));
|
||||
auto dec1 = factory->MakeAudioDecoder({"G722", 8000, 1}, absl::nullopt);
|
||||
ASSERT_NE(nullptr, dec1);
|
||||
EXPECT_EQ(16000, dec1->SampleRateHz());
|
||||
EXPECT_EQ(1u, dec1->Channels());
|
||||
auto dec2 = factory->MakeAudioDecoder({"G722", 8000, 2}, absl::nullopt);
|
||||
ASSERT_NE(nullptr, dec2);
|
||||
EXPECT_EQ(16000, dec2->SampleRateHz());
|
||||
EXPECT_EQ(2u, dec2->Channels());
|
||||
auto dec3 = factory->MakeAudioDecoder({"G722", 8000, 3}, absl::nullopt);
|
||||
ASSERT_EQ(nullptr, dec3);
|
||||
}
|
||||
|
||||
TEST(AudioDecoderFactoryTemplateTest, Ilbc) {
|
||||
auto factory = CreateAudioDecoderFactory<AudioDecoderIlbc>();
|
||||
EXPECT_THAT(factory->GetSupportedDecoders(),
|
||||
::testing::ElementsAre(
|
||||
AudioCodecSpec{{"ILBC", 8000, 1}, {8000, 1, 13300}}));
|
||||
EXPECT_FALSE(factory->IsSupportedDecoder({"foo", 8000, 1}));
|
||||
EXPECT_TRUE(factory->IsSupportedDecoder({"ilbc", 8000, 1}));
|
||||
EXPECT_EQ(nullptr,
|
||||
factory->MakeAudioDecoder({"bar", 8000, 1}, absl::nullopt));
|
||||
auto dec = factory->MakeAudioDecoder({"ilbc", 8000, 1}, absl::nullopt);
|
||||
ASSERT_NE(nullptr, dec);
|
||||
EXPECT_EQ(8000, dec->SampleRateHz());
|
||||
}
|
||||
|
||||
TEST(AudioDecoderFactoryTemplateTest, L16) {
|
||||
auto factory = CreateAudioDecoderFactory<AudioDecoderL16>();
|
||||
EXPECT_THAT(
|
||||
factory->GetSupportedDecoders(),
|
||||
::testing::ElementsAre(
|
||||
AudioCodecSpec{{"L16", 8000, 1}, {8000, 1, 8000 * 16}},
|
||||
AudioCodecSpec{{"L16", 16000, 1}, {16000, 1, 16000 * 16}},
|
||||
AudioCodecSpec{{"L16", 32000, 1}, {32000, 1, 32000 * 16}},
|
||||
AudioCodecSpec{{"L16", 8000, 2}, {8000, 2, 8000 * 16 * 2}},
|
||||
AudioCodecSpec{{"L16", 16000, 2}, {16000, 2, 16000 * 16 * 2}},
|
||||
AudioCodecSpec{{"L16", 32000, 2}, {32000, 2, 32000 * 16 * 2}}));
|
||||
EXPECT_FALSE(factory->IsSupportedDecoder({"foo", 8000, 1}));
|
||||
EXPECT_TRUE(factory->IsSupportedDecoder({"L16", 48000, 1}));
|
||||
EXPECT_FALSE(factory->IsSupportedDecoder({"L16", 96000, 1}));
|
||||
EXPECT_EQ(nullptr,
|
||||
factory->MakeAudioDecoder({"L16", 8000, 0}, absl::nullopt));
|
||||
auto dec = factory->MakeAudioDecoder({"L16", 48000, 2}, absl::nullopt);
|
||||
ASSERT_NE(nullptr, dec);
|
||||
EXPECT_EQ(48000, dec->SampleRateHz());
|
||||
}
|
||||
|
||||
TEST(AudioDecoderFactoryTemplateTest, Opus) {
|
||||
auto factory = CreateAudioDecoderFactory<AudioDecoderOpus>();
|
||||
AudioCodecInfo opus_info{48000, 1, 64000, 6000, 510000};
|
||||
opus_info.allow_comfort_noise = false;
|
||||
opus_info.supports_network_adaption = true;
|
||||
const SdpAudioFormat opus_format(
|
||||
{"opus", 48000, 2, {{"minptime", "10"}, {"useinbandfec", "1"}}});
|
||||
EXPECT_THAT(factory->GetSupportedDecoders(),
|
||||
::testing::ElementsAre(AudioCodecSpec{opus_format, opus_info}));
|
||||
EXPECT_FALSE(factory->IsSupportedDecoder({"opus", 48000, 1}));
|
||||
EXPECT_TRUE(factory->IsSupportedDecoder({"opus", 48000, 2}));
|
||||
EXPECT_EQ(nullptr,
|
||||
factory->MakeAudioDecoder({"bar", 16000, 1}, absl::nullopt));
|
||||
auto dec = factory->MakeAudioDecoder({"opus", 48000, 2}, absl::nullopt);
|
||||
ASSERT_NE(nullptr, dec);
|
||||
EXPECT_EQ(48000, dec->SampleRateHz());
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,224 @@
|
|||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "api/audio_codecs/audio_encoder_factory_template.h"
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "api/audio_codecs/L16/audio_encoder_L16.h"
|
||||
#include "api/audio_codecs/g711/audio_encoder_g711.h"
|
||||
#include "api/audio_codecs/g722/audio_encoder_g722.h"
|
||||
#include "api/audio_codecs/ilbc/audio_encoder_ilbc.h"
|
||||
#include "api/audio_codecs/opus/audio_encoder_opus.h"
|
||||
#include "test/gmock.h"
|
||||
#include "test/gtest.h"
|
||||
#include "test/mock_audio_encoder.h"
|
||||
#include "test/scoped_key_value_config.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace {
|
||||
|
||||
struct BogusParams {
|
||||
static SdpAudioFormat AudioFormat() { return {"bogus", 8000, 1}; }
|
||||
static AudioCodecInfo CodecInfo() { return {8000, 1, 12345}; }
|
||||
};
|
||||
|
||||
struct ShamParams {
|
||||
static SdpAudioFormat AudioFormat() {
|
||||
return {"sham", 16000, 2, {{"param", "value"}}};
|
||||
}
|
||||
static AudioCodecInfo CodecInfo() { return {16000, 2, 23456}; }
|
||||
};
|
||||
|
||||
template <typename Params>
|
||||
struct AudioEncoderFakeApi {
|
||||
struct Config {
|
||||
SdpAudioFormat audio_format;
|
||||
};
|
||||
|
||||
static absl::optional<Config> SdpToConfig(
|
||||
const SdpAudioFormat& audio_format) {
|
||||
if (Params::AudioFormat() == audio_format) {
|
||||
Config config = {audio_format};
|
||||
return config;
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
}
|
||||
|
||||
static void AppendSupportedEncoders(std::vector<AudioCodecSpec>* specs) {
|
||||
specs->push_back({Params::AudioFormat(), Params::CodecInfo()});
|
||||
}
|
||||
|
||||
static AudioCodecInfo QueryAudioEncoder(const Config&) {
|
||||
return Params::CodecInfo();
|
||||
}
|
||||
|
||||
static std::unique_ptr<AudioEncoder> MakeAudioEncoder(
|
||||
const Config&,
|
||||
int payload_type,
|
||||
absl::optional<AudioCodecPairId> /*codec_pair_id*/ = absl::nullopt) {
|
||||
auto enc = std::make_unique<testing::StrictMock<MockAudioEncoder>>();
|
||||
EXPECT_CALL(*enc, SampleRateHz())
|
||||
.WillOnce(::testing::Return(Params::CodecInfo().sample_rate_hz));
|
||||
return std::move(enc);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
TEST(AudioEncoderFactoryTemplateTest, NoEncoderTypes) {
|
||||
test::ScopedKeyValueConfig field_trials;
|
||||
rtc::scoped_refptr<AudioEncoderFactory> factory(
|
||||
rtc::make_ref_counted<
|
||||
audio_encoder_factory_template_impl::AudioEncoderFactoryT<>>(
|
||||
&field_trials));
|
||||
EXPECT_THAT(factory->GetSupportedEncoders(), ::testing::IsEmpty());
|
||||
EXPECT_EQ(absl::nullopt, factory->QueryAudioEncoder({"foo", 8000, 1}));
|
||||
EXPECT_EQ(nullptr,
|
||||
factory->MakeAudioEncoder(17, {"bar", 16000, 1}, absl::nullopt));
|
||||
}
|
||||
|
||||
TEST(AudioEncoderFactoryTemplateTest, OneEncoderType) {
|
||||
auto factory = CreateAudioEncoderFactory<AudioEncoderFakeApi<BogusParams>>();
|
||||
EXPECT_THAT(factory->GetSupportedEncoders(),
|
||||
::testing::ElementsAre(
|
||||
AudioCodecSpec{{"bogus", 8000, 1}, {8000, 1, 12345}}));
|
||||
EXPECT_EQ(absl::nullopt, factory->QueryAudioEncoder({"foo", 8000, 1}));
|
||||
EXPECT_EQ(AudioCodecInfo(8000, 1, 12345),
|
||||
factory->QueryAudioEncoder({"bogus", 8000, 1}));
|
||||
EXPECT_EQ(nullptr,
|
||||
factory->MakeAudioEncoder(17, {"bar", 16000, 1}, absl::nullopt));
|
||||
auto enc = factory->MakeAudioEncoder(17, {"bogus", 8000, 1}, absl::nullopt);
|
||||
ASSERT_NE(nullptr, enc);
|
||||
EXPECT_EQ(8000, enc->SampleRateHz());
|
||||
}
|
||||
|
||||
TEST(AudioEncoderFactoryTemplateTest, TwoEncoderTypes) {
|
||||
auto factory = CreateAudioEncoderFactory<AudioEncoderFakeApi<BogusParams>,
|
||||
AudioEncoderFakeApi<ShamParams>>();
|
||||
EXPECT_THAT(factory->GetSupportedEncoders(),
|
||||
::testing::ElementsAre(
|
||||
AudioCodecSpec{{"bogus", 8000, 1}, {8000, 1, 12345}},
|
||||
AudioCodecSpec{{"sham", 16000, 2, {{"param", "value"}}},
|
||||
{16000, 2, 23456}}));
|
||||
EXPECT_EQ(absl::nullopt, factory->QueryAudioEncoder({"foo", 8000, 1}));
|
||||
EXPECT_EQ(AudioCodecInfo(8000, 1, 12345),
|
||||
factory->QueryAudioEncoder({"bogus", 8000, 1}));
|
||||
EXPECT_EQ(
|
||||
AudioCodecInfo(16000, 2, 23456),
|
||||
factory->QueryAudioEncoder({"sham", 16000, 2, {{"param", "value"}}}));
|
||||
EXPECT_EQ(nullptr,
|
||||
factory->MakeAudioEncoder(17, {"bar", 16000, 1}, absl::nullopt));
|
||||
auto enc1 = factory->MakeAudioEncoder(17, {"bogus", 8000, 1}, absl::nullopt);
|
||||
ASSERT_NE(nullptr, enc1);
|
||||
EXPECT_EQ(8000, enc1->SampleRateHz());
|
||||
EXPECT_EQ(nullptr,
|
||||
factory->MakeAudioEncoder(17, {"sham", 16000, 2}, absl::nullopt));
|
||||
auto enc2 = factory->MakeAudioEncoder(
|
||||
17, {"sham", 16000, 2, {{"param", "value"}}}, absl::nullopt);
|
||||
ASSERT_NE(nullptr, enc2);
|
||||
EXPECT_EQ(16000, enc2->SampleRateHz());
|
||||
}
|
||||
|
||||
TEST(AudioEncoderFactoryTemplateTest, G711) {
|
||||
auto factory = CreateAudioEncoderFactory<AudioEncoderG711>();
|
||||
EXPECT_THAT(factory->GetSupportedEncoders(),
|
||||
::testing::ElementsAre(
|
||||
AudioCodecSpec{{"PCMU", 8000, 1}, {8000, 1, 64000}},
|
||||
AudioCodecSpec{{"PCMA", 8000, 1}, {8000, 1, 64000}}));
|
||||
EXPECT_EQ(absl::nullopt, factory->QueryAudioEncoder({"PCMA", 16000, 1}));
|
||||
EXPECT_EQ(AudioCodecInfo(8000, 1, 64000),
|
||||
factory->QueryAudioEncoder({"PCMA", 8000, 1}));
|
||||
EXPECT_EQ(nullptr,
|
||||
factory->MakeAudioEncoder(17, {"PCMU", 16000, 1}, absl::nullopt));
|
||||
auto enc1 = factory->MakeAudioEncoder(17, {"PCMU", 8000, 1}, absl::nullopt);
|
||||
ASSERT_NE(nullptr, enc1);
|
||||
EXPECT_EQ(8000, enc1->SampleRateHz());
|
||||
auto enc2 = factory->MakeAudioEncoder(17, {"PCMA", 8000, 1}, absl::nullopt);
|
||||
ASSERT_NE(nullptr, enc2);
|
||||
EXPECT_EQ(8000, enc2->SampleRateHz());
|
||||
}
|
||||
|
||||
TEST(AudioEncoderFactoryTemplateTest, G722) {
|
||||
auto factory = CreateAudioEncoderFactory<AudioEncoderG722>();
|
||||
EXPECT_THAT(factory->GetSupportedEncoders(),
|
||||
::testing::ElementsAre(
|
||||
AudioCodecSpec{{"G722", 8000, 1}, {16000, 1, 64000}}));
|
||||
EXPECT_EQ(absl::nullopt, factory->QueryAudioEncoder({"foo", 8000, 1}));
|
||||
EXPECT_EQ(AudioCodecInfo(16000, 1, 64000),
|
||||
factory->QueryAudioEncoder({"G722", 8000, 1}));
|
||||
EXPECT_EQ(nullptr,
|
||||
factory->MakeAudioEncoder(17, {"bar", 16000, 1}, absl::nullopt));
|
||||
auto enc = factory->MakeAudioEncoder(17, {"G722", 8000, 1}, absl::nullopt);
|
||||
ASSERT_NE(nullptr, enc);
|
||||
EXPECT_EQ(16000, enc->SampleRateHz());
|
||||
}
|
||||
|
||||
TEST(AudioEncoderFactoryTemplateTest, Ilbc) {
|
||||
auto factory = CreateAudioEncoderFactory<AudioEncoderIlbc>();
|
||||
EXPECT_THAT(factory->GetSupportedEncoders(),
|
||||
::testing::ElementsAre(
|
||||
AudioCodecSpec{{"ILBC", 8000, 1}, {8000, 1, 13333}}));
|
||||
EXPECT_EQ(absl::nullopt, factory->QueryAudioEncoder({"foo", 8000, 1}));
|
||||
EXPECT_EQ(AudioCodecInfo(8000, 1, 13333),
|
||||
factory->QueryAudioEncoder({"ilbc", 8000, 1}));
|
||||
EXPECT_EQ(nullptr,
|
||||
factory->MakeAudioEncoder(17, {"bar", 8000, 1}, absl::nullopt));
|
||||
auto enc = factory->MakeAudioEncoder(17, {"ilbc", 8000, 1}, absl::nullopt);
|
||||
ASSERT_NE(nullptr, enc);
|
||||
EXPECT_EQ(8000, enc->SampleRateHz());
|
||||
}
|
||||
|
||||
TEST(AudioEncoderFactoryTemplateTest, L16) {
|
||||
auto factory = CreateAudioEncoderFactory<AudioEncoderL16>();
|
||||
EXPECT_THAT(
|
||||
factory->GetSupportedEncoders(),
|
||||
::testing::ElementsAre(
|
||||
AudioCodecSpec{{"L16", 8000, 1}, {8000, 1, 8000 * 16}},
|
||||
AudioCodecSpec{{"L16", 16000, 1}, {16000, 1, 16000 * 16}},
|
||||
AudioCodecSpec{{"L16", 32000, 1}, {32000, 1, 32000 * 16}},
|
||||
AudioCodecSpec{{"L16", 8000, 2}, {8000, 2, 8000 * 16 * 2}},
|
||||
AudioCodecSpec{{"L16", 16000, 2}, {16000, 2, 16000 * 16 * 2}},
|
||||
AudioCodecSpec{{"L16", 32000, 2}, {32000, 2, 32000 * 16 * 2}}));
|
||||
EXPECT_EQ(absl::nullopt, factory->QueryAudioEncoder({"L16", 8000, 0}));
|
||||
EXPECT_EQ(AudioCodecInfo(48000, 1, 48000 * 16),
|
||||
factory->QueryAudioEncoder({"L16", 48000, 1}));
|
||||
EXPECT_EQ(nullptr,
|
||||
factory->MakeAudioEncoder(17, {"L16", 8000, 0}, absl::nullopt));
|
||||
auto enc = factory->MakeAudioEncoder(17, {"L16", 48000, 2}, absl::nullopt);
|
||||
ASSERT_NE(nullptr, enc);
|
||||
EXPECT_EQ(48000, enc->SampleRateHz());
|
||||
}
|
||||
|
||||
TEST(AudioEncoderFactoryTemplateTest, Opus) {
|
||||
auto factory = CreateAudioEncoderFactory<AudioEncoderOpus>();
|
||||
AudioCodecInfo info = {48000, 1, 32000, 6000, 510000};
|
||||
info.allow_comfort_noise = false;
|
||||
info.supports_network_adaption = true;
|
||||
EXPECT_THAT(
|
||||
factory->GetSupportedEncoders(),
|
||||
::testing::ElementsAre(AudioCodecSpec{
|
||||
{"opus", 48000, 2, {{"minptime", "10"}, {"useinbandfec", "1"}}},
|
||||
info}));
|
||||
EXPECT_EQ(absl::nullopt, factory->QueryAudioEncoder({"foo", 8000, 1}));
|
||||
EXPECT_EQ(
|
||||
info,
|
||||
factory->QueryAudioEncoder(
|
||||
{"opus", 48000, 2, {{"minptime", "10"}, {"useinbandfec", "1"}}}));
|
||||
EXPECT_EQ(nullptr,
|
||||
factory->MakeAudioEncoder(17, {"bar", 16000, 1}, absl::nullopt));
|
||||
auto enc = factory->MakeAudioEncoder(17, {"opus", 48000, 2}, absl::nullopt);
|
||||
ASSERT_NE(nullptr, enc);
|
||||
EXPECT_EQ(48000, enc->SampleRateHz());
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
104
TMessagesProj/jni/voip/webrtc/api/audio_options.cc
Normal file
104
TMessagesProj/jni/voip/webrtc/api/audio_options.cc
Normal file
|
|
@ -0,0 +1,104 @@
|
|||
/*
|
||||
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "api/audio_options.h"
|
||||
|
||||
#include "api/array_view.h"
|
||||
#include "rtc_base/strings/string_builder.h"
|
||||
|
||||
namespace cricket {
|
||||
namespace {
|
||||
|
||||
template <class T>
|
||||
void ToStringIfSet(rtc::SimpleStringBuilder* result,
|
||||
const char* key,
|
||||
const absl::optional<T>& val) {
|
||||
if (val) {
|
||||
(*result) << key << ": " << *val << ", ";
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void SetFrom(absl::optional<T>* s, const absl::optional<T>& o) {
|
||||
if (o) {
|
||||
*s = o;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
AudioOptions::AudioOptions() = default;
|
||||
AudioOptions::~AudioOptions() = default;
|
||||
|
||||
void AudioOptions::SetAll(const AudioOptions& change) {
|
||||
SetFrom(&echo_cancellation, change.echo_cancellation);
|
||||
#if defined(WEBRTC_IOS)
|
||||
SetFrom(&ios_force_software_aec_HACK, change.ios_force_software_aec_HACK);
|
||||
#endif
|
||||
SetFrom(&auto_gain_control, change.auto_gain_control);
|
||||
SetFrom(&noise_suppression, change.noise_suppression);
|
||||
SetFrom(&highpass_filter, change.highpass_filter);
|
||||
SetFrom(&stereo_swapping, change.stereo_swapping);
|
||||
SetFrom(&audio_jitter_buffer_max_packets,
|
||||
change.audio_jitter_buffer_max_packets);
|
||||
SetFrom(&audio_jitter_buffer_fast_accelerate,
|
||||
change.audio_jitter_buffer_fast_accelerate);
|
||||
SetFrom(&audio_jitter_buffer_min_delay_ms,
|
||||
change.audio_jitter_buffer_min_delay_ms);
|
||||
SetFrom(&audio_network_adaptor, change.audio_network_adaptor);
|
||||
SetFrom(&audio_network_adaptor_config, change.audio_network_adaptor_config);
|
||||
SetFrom(&init_recording_on_send, change.init_recording_on_send);
|
||||
}
|
||||
|
||||
bool AudioOptions::operator==(const AudioOptions& o) const {
|
||||
return echo_cancellation == o.echo_cancellation &&
|
||||
#if defined(WEBRTC_IOS)
|
||||
ios_force_software_aec_HACK == o.ios_force_software_aec_HACK &&
|
||||
#endif
|
||||
auto_gain_control == o.auto_gain_control &&
|
||||
noise_suppression == o.noise_suppression &&
|
||||
highpass_filter == o.highpass_filter &&
|
||||
stereo_swapping == o.stereo_swapping &&
|
||||
audio_jitter_buffer_max_packets == o.audio_jitter_buffer_max_packets &&
|
||||
audio_jitter_buffer_fast_accelerate ==
|
||||
o.audio_jitter_buffer_fast_accelerate &&
|
||||
audio_jitter_buffer_min_delay_ms ==
|
||||
o.audio_jitter_buffer_min_delay_ms &&
|
||||
audio_network_adaptor == o.audio_network_adaptor &&
|
||||
audio_network_adaptor_config == o.audio_network_adaptor_config &&
|
||||
init_recording_on_send == o.init_recording_on_send;
|
||||
}
|
||||
|
||||
std::string AudioOptions::ToString() const {
|
||||
char buffer[1024];
|
||||
rtc::SimpleStringBuilder result(buffer);
|
||||
result << "AudioOptions {";
|
||||
ToStringIfSet(&result, "aec", echo_cancellation);
|
||||
#if defined(WEBRTC_IOS)
|
||||
ToStringIfSet(&result, "ios_force_software_aec_HACK",
|
||||
ios_force_software_aec_HACK);
|
||||
#endif
|
||||
ToStringIfSet(&result, "agc", auto_gain_control);
|
||||
ToStringIfSet(&result, "ns", noise_suppression);
|
||||
ToStringIfSet(&result, "hf", highpass_filter);
|
||||
ToStringIfSet(&result, "swap", stereo_swapping);
|
||||
ToStringIfSet(&result, "audio_jitter_buffer_max_packets",
|
||||
audio_jitter_buffer_max_packets);
|
||||
ToStringIfSet(&result, "audio_jitter_buffer_fast_accelerate",
|
||||
audio_jitter_buffer_fast_accelerate);
|
||||
ToStringIfSet(&result, "audio_jitter_buffer_min_delay_ms",
|
||||
audio_jitter_buffer_min_delay_ms);
|
||||
ToStringIfSet(&result, "audio_network_adaptor", audio_network_adaptor);
|
||||
ToStringIfSet(&result, "init_recording_on_send", init_recording_on_send);
|
||||
result << "}";
|
||||
return result.str();
|
||||
}
|
||||
|
||||
} // namespace cricket
|
||||
75
TMessagesProj/jni/voip/webrtc/api/audio_options.h
Normal file
75
TMessagesProj/jni/voip/webrtc/api/audio_options.h
Normal file
|
|
@ -0,0 +1,75 @@
|
|||
/*
|
||||
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef API_AUDIO_OPTIONS_H_
|
||||
#define API_AUDIO_OPTIONS_H_
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include <string>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "rtc_base/system/rtc_export.h"
|
||||
|
||||
namespace cricket {
|
||||
|
||||
// Options that can be applied to a VoiceMediaChannel or a VoiceMediaEngine.
|
||||
// Used to be flags, but that makes it hard to selectively apply options.
|
||||
// We are moving all of the setting of options to structs like this,
|
||||
// but some things currently still use flags.
|
||||
struct RTC_EXPORT AudioOptions {
|
||||
AudioOptions();
|
||||
~AudioOptions();
|
||||
void SetAll(const AudioOptions& change);
|
||||
|
||||
bool operator==(const AudioOptions& o) const;
|
||||
bool operator!=(const AudioOptions& o) const { return !(*this == o); }
|
||||
|
||||
std::string ToString() const;
|
||||
|
||||
// Audio processing that attempts to filter away the output signal from
|
||||
// later inbound pickup.
|
||||
absl::optional<bool> echo_cancellation;
|
||||
#if defined(WEBRTC_IOS)
|
||||
// Forces software echo cancellation on iOS. This is a temporary workaround
|
||||
// (until Apple fixes the bug) for a device with non-functioning AEC. May
|
||||
// improve performance on that particular device, but will cause unpredictable
|
||||
// behavior in all other cases. See http://bugs.webrtc.org/8682.
|
||||
absl::optional<bool> ios_force_software_aec_HACK;
|
||||
#endif
|
||||
// Audio processing to adjust the sensitivity of the local mic dynamically.
|
||||
absl::optional<bool> auto_gain_control;
|
||||
// Audio processing to filter out background noise.
|
||||
absl::optional<bool> noise_suppression;
|
||||
// Audio processing to remove background noise of lower frequencies.
|
||||
absl::optional<bool> highpass_filter;
|
||||
// Audio processing to swap the left and right channels.
|
||||
absl::optional<bool> stereo_swapping;
|
||||
// Audio receiver jitter buffer (NetEq) max capacity in number of packets.
|
||||
absl::optional<int> audio_jitter_buffer_max_packets;
|
||||
// Audio receiver jitter buffer (NetEq) fast accelerate mode.
|
||||
absl::optional<bool> audio_jitter_buffer_fast_accelerate;
|
||||
// Audio receiver jitter buffer (NetEq) minimum target delay in milliseconds.
|
||||
absl::optional<int> audio_jitter_buffer_min_delay_ms;
|
||||
// Enable audio network adaptor.
|
||||
// TODO(webrtc:11717): Remove this API in favor of adaptivePtime in
|
||||
// RtpEncodingParameters.
|
||||
absl::optional<bool> audio_network_adaptor;
|
||||
// Config string for audio network adaptor.
|
||||
absl::optional<std::string> audio_network_adaptor_config;
|
||||
// Pre-initialize the ADM for recording when starting to send. Default to
|
||||
// true.
|
||||
// TODO(webrtc:13566): Remove this option. See issue for details.
|
||||
absl::optional<bool> init_recording_on_send;
|
||||
};
|
||||
|
||||
} // namespace cricket
|
||||
|
||||
#endif // API_AUDIO_OPTIONS_H_
|
||||
48
TMessagesProj/jni/voip/webrtc/api/call/audio_sink.h
Normal file
48
TMessagesProj/jni/voip/webrtc/api/call/audio_sink.h
Normal file
|
|
@ -0,0 +1,48 @@
|
|||
/*
|
||||
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef API_CALL_AUDIO_SINK_H_
|
||||
#define API_CALL_AUDIO_SINK_H_
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// Represents a simple push audio sink.
|
||||
class AudioSinkInterface {
|
||||
public:
|
||||
virtual ~AudioSinkInterface() {}
|
||||
|
||||
struct Data {
|
||||
Data(const int16_t* data,
|
||||
size_t samples_per_channel,
|
||||
int sample_rate,
|
||||
size_t channels,
|
||||
uint32_t timestamp)
|
||||
: data(data),
|
||||
samples_per_channel(samples_per_channel),
|
||||
sample_rate(sample_rate),
|
||||
channels(channels),
|
||||
timestamp(timestamp) {}
|
||||
|
||||
const int16_t* data; // The actual 16bit audio data.
|
||||
size_t samples_per_channel; // Number of frames in the buffer.
|
||||
int sample_rate; // Sample rate in Hz.
|
||||
size_t channels; // Number of channels in the audio data.
|
||||
uint32_t timestamp; // The RTP timestamp of the first sample.
|
||||
};
|
||||
|
||||
virtual void OnData(const Data& audio) = 0;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // API_CALL_AUDIO_SINK_H_
|
||||
45
TMessagesProj/jni/voip/webrtc/api/call/bitrate_allocation.h
Normal file
45
TMessagesProj/jni/voip/webrtc/api/call/bitrate_allocation.h
Normal file
|
|
@ -0,0 +1,45 @@
|
|||
/*
|
||||
* Copyright 2018 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
#ifndef API_CALL_BITRATE_ALLOCATION_H_
|
||||
#define API_CALL_BITRATE_ALLOCATION_H_
|
||||
|
||||
#include "api/units/data_rate.h"
|
||||
#include "api/units/time_delta.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// BitrateAllocationUpdate provides information to allocated streams about their
|
||||
// bitrate allocation. It originates from the BitrateAllocater class and is
|
||||
// propagated from there.
|
||||
struct BitrateAllocationUpdate {
|
||||
// The allocated target bitrate. Media streams should produce this amount of
|
||||
// data. (Note that this may include packet overhead depending on
|
||||
// configuration.)
|
||||
DataRate target_bitrate = DataRate::Zero();
|
||||
// The allocated part of the estimated link capacity. This is more stable than
|
||||
// the target as it is based on the underlying link capacity estimate. This
|
||||
// should be used to change encoder configuration when the cost of change is
|
||||
// high.
|
||||
DataRate stable_target_bitrate = DataRate::Zero();
|
||||
// Predicted packet loss ratio.
|
||||
double packet_loss_ratio = 0;
|
||||
// Predicted round trip time.
|
||||
TimeDelta round_trip_time = TimeDelta::PlusInfinity();
|
||||
// `bwe_period` is deprecated, use `stable_target_bitrate` allocation instead.
|
||||
TimeDelta bwe_period = TimeDelta::PlusInfinity();
|
||||
// Congestion window pushback bitrate reduction fraction. Used in
|
||||
// VideoStreamEncoder to reduce the bitrate by the given fraction
|
||||
// by dropping frames.
|
||||
double cwnd_reduce_ratio = 0;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // API_CALL_BITRATE_ALLOCATION_H_
|
||||
23
TMessagesProj/jni/voip/webrtc/api/call/transport.cc
Normal file
23
TMessagesProj/jni/voip/webrtc/api/call/transport.cc
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
/*
|
||||
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "api/call/transport.h"
|
||||
|
||||
#include <cstdint>
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
PacketOptions::PacketOptions() = default;
|
||||
|
||||
PacketOptions::PacketOptions(const PacketOptions&) = default;
|
||||
|
||||
PacketOptions::~PacketOptions() = default;
|
||||
|
||||
} // namespace webrtc
|
||||
58
TMessagesProj/jni/voip/webrtc/api/call/transport.h
Normal file
58
TMessagesProj/jni/voip/webrtc/api/call/transport.h
Normal file
|
|
@ -0,0 +1,58 @@
|
|||
/*
|
||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef API_CALL_TRANSPORT_H_
|
||||
#define API_CALL_TRANSPORT_H_
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#include "api/array_view.h"
|
||||
#include "api/ref_counted_base.h"
|
||||
#include "api/scoped_refptr.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// TODO(holmer): Look into unifying this with the PacketOptions in
|
||||
// asyncpacketsocket.h.
|
||||
struct PacketOptions {
|
||||
PacketOptions();
|
||||
PacketOptions(const PacketOptions&);
|
||||
~PacketOptions();
|
||||
|
||||
// A 16 bits positive id. Negative ids are invalid and should be interpreted
|
||||
// as packet_id not being set.
|
||||
int packet_id = -1;
|
||||
// Additional data bound to the RTP packet for use in application code,
|
||||
// outside of WebRTC.
|
||||
rtc::scoped_refptr<rtc::RefCountedBase> additional_data;
|
||||
// Whether this is a retransmission of an earlier packet.
|
||||
bool is_retransmit = false;
|
||||
bool included_in_feedback = false;
|
||||
bool included_in_allocation = false;
|
||||
// Whether this packet can be part of a packet batch at lower levels.
|
||||
bool batchable = false;
|
||||
// Whether this packet is the last of a batch.
|
||||
bool last_packet_in_batch = false;
|
||||
};
|
||||
|
||||
class Transport {
|
||||
public:
|
||||
virtual bool SendRtp(rtc::ArrayView<const uint8_t> packet,
|
||||
const PacketOptions& options) = 0;
|
||||
virtual bool SendRtcp(rtc::ArrayView<const uint8_t> packet) = 0;
|
||||
|
||||
protected:
|
||||
virtual ~Transport() {}
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // API_CALL_TRANSPORT_H_
|
||||
219
TMessagesProj/jni/voip/webrtc/api/candidate.cc
Normal file
219
TMessagesProj/jni/voip/webrtc/api/candidate.cc
Normal file
|
|
@ -0,0 +1,219 @@
|
|||
/*
|
||||
* Copyright 2017 The WebRTC Project Authors. All rights reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "api/candidate.h"
|
||||
|
||||
#include "absl/base/attributes.h"
|
||||
#include "rtc_base/helpers.h"
|
||||
#include "rtc_base/ip_address.h"
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/strings/string_builder.h"
|
||||
|
||||
namespace cricket {
|
||||
|
||||
ABSL_CONST_INIT const absl::string_view LOCAL_PORT_TYPE = "local";
|
||||
ABSL_CONST_INIT const absl::string_view STUN_PORT_TYPE = "stun";
|
||||
ABSL_CONST_INIT const absl::string_view PRFLX_PORT_TYPE = "prflx";
|
||||
ABSL_CONST_INIT const absl::string_view RELAY_PORT_TYPE = "relay";
|
||||
|
||||
Candidate::Candidate()
|
||||
: id_(rtc::CreateRandomString(8)),
|
||||
component_(0),
|
||||
priority_(0),
|
||||
type_(LOCAL_PORT_TYPE),
|
||||
network_type_(rtc::ADAPTER_TYPE_UNKNOWN),
|
||||
underlying_type_for_vpn_(rtc::ADAPTER_TYPE_UNKNOWN),
|
||||
generation_(0),
|
||||
network_id_(0),
|
||||
network_cost_(0) {}
|
||||
|
||||
Candidate::Candidate(int component,
|
||||
absl::string_view protocol,
|
||||
const rtc::SocketAddress& address,
|
||||
uint32_t priority,
|
||||
absl::string_view username,
|
||||
absl::string_view password,
|
||||
absl::string_view type,
|
||||
uint32_t generation,
|
||||
absl::string_view foundation,
|
||||
uint16_t network_id,
|
||||
uint16_t network_cost)
|
||||
: id_(rtc::CreateRandomString(8)),
|
||||
component_(component),
|
||||
protocol_(protocol),
|
||||
address_(address),
|
||||
priority_(priority),
|
||||
username_(username),
|
||||
password_(password),
|
||||
type_(type),
|
||||
network_type_(rtc::ADAPTER_TYPE_UNKNOWN),
|
||||
underlying_type_for_vpn_(rtc::ADAPTER_TYPE_UNKNOWN),
|
||||
generation_(generation),
|
||||
foundation_(foundation),
|
||||
network_id_(network_id),
|
||||
network_cost_(network_cost) {}
|
||||
|
||||
Candidate::Candidate(const Candidate&) = default;
|
||||
|
||||
Candidate::~Candidate() = default;
|
||||
|
||||
void Candidate::generate_id() {
|
||||
id_ = rtc::CreateRandomString(8);
|
||||
}
|
||||
|
||||
bool Candidate::is_local() const {
|
||||
return type_ == LOCAL_PORT_TYPE;
|
||||
}
|
||||
bool Candidate::is_stun() const {
|
||||
return type_ == STUN_PORT_TYPE;
|
||||
}
|
||||
bool Candidate::is_prflx() const {
|
||||
return type_ == PRFLX_PORT_TYPE;
|
||||
}
|
||||
bool Candidate::is_relay() const {
|
||||
return type_ == RELAY_PORT_TYPE;
|
||||
}
|
||||
|
||||
absl::string_view Candidate::type_name() const {
|
||||
// The LOCAL_PORT_TYPE and STUN_PORT_TYPE constants are not the standard type
|
||||
// names, so check for those specifically. For other types, `type_` will have
|
||||
// the correct name.
|
||||
if (is_local())
|
||||
return "host";
|
||||
if (is_stun())
|
||||
return "srflx";
|
||||
return type_;
|
||||
}
|
||||
|
||||
bool Candidate::IsEquivalent(const Candidate& c) const {
|
||||
// We ignore the network name, since that is just debug information, and
|
||||
// the priority and the network cost, since they should be the same if the
|
||||
// rest are.
|
||||
return (component_ == c.component_) && (protocol_ == c.protocol_) &&
|
||||
(address_ == c.address_) && (username_ == c.username_) &&
|
||||
(password_ == c.password_) && (type_ == c.type_) &&
|
||||
(generation_ == c.generation_) && (foundation_ == c.foundation_) &&
|
||||
(related_address_ == c.related_address_) &&
|
||||
(network_id_ == c.network_id_);
|
||||
}
|
||||
|
||||
bool Candidate::MatchesForRemoval(const Candidate& c) const {
|
||||
return component_ == c.component_ && protocol_ == c.protocol_ &&
|
||||
address_ == c.address_;
|
||||
}
|
||||
|
||||
std::string Candidate::ToStringInternal(bool sensitive) const {
|
||||
rtc::StringBuilder ost;
|
||||
std::string address =
|
||||
sensitive ? address_.ToSensitiveString() : address_.ToString();
|
||||
std::string related_address = sensitive ? related_address_.ToSensitiveString()
|
||||
: related_address_.ToString();
|
||||
ost << "Cand[" << transport_name_ << ":" << foundation_ << ":" << component_
|
||||
<< ":" << protocol_ << ":" << priority_ << ":" << address << ":"
|
||||
<< type_name() << ":" << related_address << ":" << username_ << ":"
|
||||
<< password_ << ":" << network_id_ << ":" << network_cost_ << ":"
|
||||
<< generation_ << "]";
|
||||
return ost.Release();
|
||||
}
|
||||
|
||||
uint32_t Candidate::GetPriority(uint32_t type_preference,
|
||||
int network_adapter_preference,
|
||||
int relay_preference,
|
||||
bool adjust_local_preference) const {
|
||||
// RFC 5245 - 4.1.2.1.
|
||||
// priority = (2^24)*(type preference) +
|
||||
// (2^8)*(local preference) +
|
||||
// (2^0)*(256 - component ID)
|
||||
|
||||
// `local_preference` length is 2 bytes, 0-65535 inclusive.
|
||||
// In our implemenation we will partion local_preference into
|
||||
// 0 1
|
||||
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
|
||||
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
// | NIC Pref | Addr Pref |
|
||||
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
// NIC Type - Type of the network adapter e.g. 3G/Wifi/Wired.
|
||||
// Addr Pref - Address preference value as per RFC 3484.
|
||||
// local preference = (NIC Type << 8 | Addr_Pref) + relay preference.
|
||||
// The relay preference is based on the number of TURN servers, the
|
||||
// first TURN server gets the highest preference.
|
||||
int addr_pref = IPAddressPrecedence(address_.ipaddr());
|
||||
int local_preference =
|
||||
((network_adapter_preference << 8) | addr_pref) + relay_preference;
|
||||
|
||||
// Ensure that the added relay preference will not result in a relay candidate
|
||||
// whose STUN priority attribute has a higher priority than a server-reflexive
|
||||
// candidate.
|
||||
// The STUN priority attribute is calculated as
|
||||
// (peer-reflexive type preference) << 24 | (priority & 0x00FFFFFF)
|
||||
// as described in
|
||||
// https://www.rfc-editor.org/rfc/rfc5245#section-7.1.2.1
|
||||
// To satisfy that condition, add kMaxTurnServers to the local preference.
|
||||
// This can not overflow the field width since the highest "NIC pref"
|
||||
// assigned is kHighestNetworkPreference = 127
|
||||
RTC_DCHECK_LT(local_preference + kMaxTurnServers, 0x10000);
|
||||
if (adjust_local_preference && relay_protocol_.empty()) {
|
||||
local_preference += kMaxTurnServers;
|
||||
}
|
||||
|
||||
return (type_preference << 24) | (local_preference << 8) | (256 - component_);
|
||||
}
|
||||
|
||||
bool Candidate::operator==(const Candidate& o) const {
|
||||
return id_ == o.id_ && component_ == o.component_ &&
|
||||
protocol_ == o.protocol_ && relay_protocol_ == o.relay_protocol_ &&
|
||||
address_ == o.address_ && priority_ == o.priority_ &&
|
||||
username_ == o.username_ && password_ == o.password_ &&
|
||||
type_ == o.type_ && network_name_ == o.network_name_ &&
|
||||
network_type_ == o.network_type_ && generation_ == o.generation_ &&
|
||||
foundation_ == o.foundation_ &&
|
||||
related_address_ == o.related_address_ && tcptype_ == o.tcptype_ &&
|
||||
transport_name_ == o.transport_name_ && network_id_ == o.network_id_;
|
||||
}
|
||||
|
||||
bool Candidate::operator!=(const Candidate& o) const {
|
||||
return !(*this == o);
|
||||
}
|
||||
|
||||
Candidate Candidate::ToSanitizedCopy(bool use_hostname_address,
|
||||
bool filter_related_address) const {
|
||||
Candidate copy(*this);
|
||||
if (use_hostname_address) {
|
||||
rtc::IPAddress ip;
|
||||
if (address().hostname().empty()) {
|
||||
// IP needs to be redacted, but no hostname available.
|
||||
rtc::SocketAddress redacted_addr("redacted-ip.invalid", address().port());
|
||||
copy.set_address(redacted_addr);
|
||||
} else if (IPFromString(address().hostname(), &ip)) {
|
||||
// The hostname is an IP literal, and needs to be redacted too.
|
||||
rtc::SocketAddress redacted_addr("redacted-literal.invalid",
|
||||
address().port());
|
||||
copy.set_address(redacted_addr);
|
||||
} else {
|
||||
rtc::SocketAddress hostname_only_addr(address().hostname(),
|
||||
address().port());
|
||||
copy.set_address(hostname_only_addr);
|
||||
}
|
||||
}
|
||||
if (filter_related_address) {
|
||||
copy.set_related_address(
|
||||
rtc::EmptySocketAddressWithFamily(copy.address().family()));
|
||||
}
|
||||
return copy;
|
||||
}
|
||||
|
||||
void Candidate::Assign(std::string& s, absl::string_view view) {
|
||||
// Assigning via a temporary object, like s = std::string(view), results in
|
||||
// binary size bloat. To avoid that, extract pointer and size from the
|
||||
// string view, and use std::string::assign method.
|
||||
s.assign(view.data(), view.size());
|
||||
}
|
||||
|
||||
} // namespace cricket
|
||||
255
TMessagesProj/jni/voip/webrtc/api/candidate.h
Normal file
255
TMessagesProj/jni/voip/webrtc/api/candidate.h
Normal file
|
|
@ -0,0 +1,255 @@
|
|||
/*
|
||||
* Copyright 2004 The WebRTC Project Authors. All rights reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef API_CANDIDATE_H_
|
||||
#define API_CANDIDATE_H_
|
||||
|
||||
#include <limits.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <string>
|
||||
|
||||
#include "absl/base/attributes.h"
|
||||
#include "absl/strings/string_view.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/network_constants.h"
|
||||
#include "rtc_base/socket_address.h"
|
||||
#include "rtc_base/system/rtc_export.h"
|
||||
|
||||
namespace webrtc {
|
||||
enum class IceCandidateType : int { kHost, kSrflx, kPrflx, kRelay };
|
||||
} // namespace webrtc
|
||||
|
||||
namespace cricket {
|
||||
|
||||
// TODO(tommi): These are temporarily here, moved from `port.h` and will
|
||||
// eventually be removed once we use enums instead of strings for these values.
|
||||
RTC_EXPORT extern const absl::string_view LOCAL_PORT_TYPE;
|
||||
RTC_EXPORT extern const absl::string_view STUN_PORT_TYPE;
|
||||
RTC_EXPORT extern const absl::string_view PRFLX_PORT_TYPE;
|
||||
RTC_EXPORT extern const absl::string_view RELAY_PORT_TYPE;
|
||||
|
||||
// TURN servers are limited to 32 in accordance with
|
||||
// https://w3c.github.io/webrtc-pc/#dom-rtcconfiguration-iceservers
|
||||
static constexpr size_t kMaxTurnServers = 32;
|
||||
|
||||
// Candidate for ICE based connection discovery.
|
||||
// TODO(phoglund): remove things in here that are not needed in the public API.
|
||||
|
||||
class RTC_EXPORT Candidate {
|
||||
public:
|
||||
Candidate();
|
||||
// TODO(pthatcher): Match the ordering and param list as per RFC 5245
|
||||
// candidate-attribute syntax. http://tools.ietf.org/html/rfc5245#section-15.1
|
||||
Candidate(int component,
|
||||
absl::string_view protocol,
|
||||
const rtc::SocketAddress& address,
|
||||
uint32_t priority,
|
||||
absl::string_view username,
|
||||
absl::string_view password,
|
||||
absl::string_view type ABSL_ATTRIBUTE_LIFETIME_BOUND,
|
||||
uint32_t generation,
|
||||
absl::string_view foundation,
|
||||
uint16_t network_id = 0,
|
||||
uint16_t network_cost = 0);
|
||||
Candidate(const Candidate&);
|
||||
~Candidate();
|
||||
|
||||
// 8 character long randomized ID string for logging purposes.
|
||||
const std::string& id() const { return id_; }
|
||||
// Generates a new, 8 character long, id.
|
||||
void generate_id();
|
||||
// TODO(tommi): Callers should use generate_id(). Remove.
|
||||
[[deprecated]] void set_id(absl::string_view id) { Assign(id_, id); }
|
||||
|
||||
int component() const { return component_; }
|
||||
void set_component(int component) { component_ = component; }
|
||||
|
||||
const std::string& protocol() const { return protocol_; }
|
||||
void set_protocol(absl::string_view protocol) { Assign(protocol_, protocol); }
|
||||
|
||||
// The protocol used to talk to relay.
|
||||
const std::string& relay_protocol() const { return relay_protocol_; }
|
||||
void set_relay_protocol(absl::string_view protocol) {
|
||||
Assign(relay_protocol_, protocol);
|
||||
}
|
||||
|
||||
const rtc::SocketAddress& address() const { return address_; }
|
||||
void set_address(const rtc::SocketAddress& address) { address_ = address; }
|
||||
|
||||
uint32_t priority() const { return priority_; }
|
||||
void set_priority(const uint32_t priority) { priority_ = priority; }
|
||||
|
||||
// TODO(honghaiz): Change to usernameFragment or ufrag.
|
||||
const std::string& username() const { return username_; }
|
||||
void set_username(absl::string_view username) { Assign(username_, username); }
|
||||
|
||||
const std::string& password() const { return password_; }
|
||||
void set_password(absl::string_view password) { Assign(password_, password); }
|
||||
|
||||
const std::string& type() const { return type_; }
|
||||
|
||||
// Returns the name of the candidate type as specified in
|
||||
// https://datatracker.ietf.org/doc/html/rfc5245#section-15.1
|
||||
absl::string_view type_name() const;
|
||||
|
||||
// Setting the type requires a constant string (e.g.
|
||||
// cricket::LOCAL_PORT_TYPE). The type should really be an enum rather than a
|
||||
// string, but until we make that change the lifetime attribute helps us lock
|
||||
// things down. See also the `Port` class.
|
||||
void set_type(absl::string_view type ABSL_ATTRIBUTE_LIFETIME_BOUND) {
|
||||
Assign(type_, type);
|
||||
}
|
||||
|
||||
// Provide these simple checkers to abstract away dependency on the port types
|
||||
// that are currently defined outside of Candidate. This will ease the change
|
||||
// from the string type to an enum.
|
||||
bool is_local() const;
|
||||
bool is_stun() const;
|
||||
bool is_prflx() const;
|
||||
bool is_relay() const;
|
||||
|
||||
// Returns the type preference, a value between 0-126 inclusive, with 0 being
|
||||
// the lowest preference value, as described in RFC 5245.
|
||||
// https://datatracker.ietf.org/doc/html/rfc5245#section-4.1.2.1
|
||||
int type_preference() const {
|
||||
// From https://datatracker.ietf.org/doc/html/rfc5245#section-4.1.4 :
|
||||
// It is RECOMMENDED that default candidates be chosen based on the
|
||||
// likelihood of those candidates to work with the peer that is being
|
||||
// contacted.
|
||||
// I.e. it is recommended that relayed > reflexive > host.
|
||||
if (is_local())
|
||||
return 1; // Host.
|
||||
if (is_stun())
|
||||
return 2; // Reflexive.
|
||||
if (is_relay())
|
||||
return 3; // Relayed.
|
||||
return 0; // Unknown, lowest preference.
|
||||
}
|
||||
|
||||
const std::string& network_name() const { return network_name_; }
|
||||
void set_network_name(absl::string_view network_name) {
|
||||
Assign(network_name_, network_name);
|
||||
}
|
||||
|
||||
rtc::AdapterType network_type() const { return network_type_; }
|
||||
void set_network_type(rtc::AdapterType network_type) {
|
||||
network_type_ = network_type;
|
||||
}
|
||||
|
||||
rtc::AdapterType underlying_type_for_vpn() const {
|
||||
return underlying_type_for_vpn_;
|
||||
}
|
||||
void set_underlying_type_for_vpn(rtc::AdapterType network_type) {
|
||||
underlying_type_for_vpn_ = network_type;
|
||||
}
|
||||
|
||||
// Candidates in a new generation replace those in the old generation.
|
||||
uint32_t generation() const { return generation_; }
|
||||
void set_generation(uint32_t generation) { generation_ = generation; }
|
||||
|
||||
// `network_cost` measures the cost/penalty of using this candidate. A network
|
||||
// cost of 0 indicates this candidate can be used freely. A value of
|
||||
// rtc::kNetworkCostMax indicates it should be used only as the last resort.
|
||||
void set_network_cost(uint16_t network_cost) {
|
||||
RTC_DCHECK_LE(network_cost, rtc::kNetworkCostMax);
|
||||
network_cost_ = network_cost;
|
||||
}
|
||||
uint16_t network_cost() const { return network_cost_; }
|
||||
|
||||
// An ID assigned to the network hosting the candidate.
|
||||
uint16_t network_id() const { return network_id_; }
|
||||
void set_network_id(uint16_t network_id) { network_id_ = network_id; }
|
||||
|
||||
const std::string& foundation() const { return foundation_; }
|
||||
void set_foundation(absl::string_view foundation) {
|
||||
Assign(foundation_, foundation);
|
||||
}
|
||||
|
||||
const rtc::SocketAddress& related_address() const { return related_address_; }
|
||||
void set_related_address(const rtc::SocketAddress& related_address) {
|
||||
related_address_ = related_address;
|
||||
}
|
||||
const std::string& tcptype() const { return tcptype_; }
|
||||
void set_tcptype(absl::string_view tcptype) { Assign(tcptype_, tcptype); }
|
||||
|
||||
// The name of the transport channel of this candidate.
|
||||
// TODO(phoglund): remove.
|
||||
const std::string& transport_name() const { return transport_name_; }
|
||||
void set_transport_name(absl::string_view transport_name) {
|
||||
Assign(transport_name_, transport_name);
|
||||
}
|
||||
|
||||
// The URL of the ICE server which this candidate is gathered from.
|
||||
const std::string& url() const { return url_; }
|
||||
void set_url(absl::string_view url) { Assign(url_, url); }
|
||||
|
||||
// Determines whether this candidate is equivalent to the given one.
|
||||
bool IsEquivalent(const Candidate& c) const;
|
||||
|
||||
// Determines whether this candidate can be considered equivalent to the
|
||||
// given one when looking for a matching candidate to remove.
|
||||
bool MatchesForRemoval(const Candidate& c) const;
|
||||
|
||||
std::string ToString() const { return ToStringInternal(false); }
|
||||
|
||||
std::string ToSensitiveString() const { return ToStringInternal(true); }
|
||||
|
||||
uint32_t GetPriority(uint32_t type_preference,
|
||||
int network_adapter_preference,
|
||||
int relay_preference,
|
||||
bool adjust_local_preference) const;
|
||||
|
||||
bool operator==(const Candidate& o) const;
|
||||
bool operator!=(const Candidate& o) const;
|
||||
|
||||
// Returns a sanitized copy configured by the given booleans. If
|
||||
// `use_host_address` is true, the returned copy has its IP removed from
|
||||
// `address()`, which leads `address()` to be a hostname address. If
|
||||
// `filter_related_address`, the returned copy has its related address reset
|
||||
// to the wildcard address (i.e. 0.0.0.0 for IPv4 and :: for IPv6). Note that
|
||||
// setting both booleans to false returns an identical copy to the original
|
||||
// candidate.
|
||||
Candidate ToSanitizedCopy(bool use_hostname_address,
|
||||
bool filter_related_address) const;
|
||||
|
||||
private:
|
||||
// TODO(bugs.webrtc.org/13220): With C++17, we get a std::string assignment
|
||||
// operator accepting any object implicitly convertible to std::string_view,
|
||||
// and then we don't need this workaround.
|
||||
static void Assign(std::string& s, absl::string_view view);
|
||||
std::string ToStringInternal(bool sensitive) const;
|
||||
|
||||
std::string id_;
|
||||
int component_;
|
||||
std::string protocol_;
|
||||
std::string relay_protocol_;
|
||||
rtc::SocketAddress address_;
|
||||
uint32_t priority_;
|
||||
std::string username_;
|
||||
std::string password_;
|
||||
std::string type_;
|
||||
std::string network_name_;
|
||||
rtc::AdapterType network_type_;
|
||||
rtc::AdapterType underlying_type_for_vpn_;
|
||||
uint32_t generation_;
|
||||
std::string foundation_;
|
||||
rtc::SocketAddress related_address_;
|
||||
std::string tcptype_;
|
||||
std::string transport_name_;
|
||||
uint16_t network_id_;
|
||||
uint16_t network_cost_;
|
||||
std::string url_;
|
||||
};
|
||||
|
||||
} // namespace cricket
|
||||
|
||||
#endif // API_CANDIDATE_H_
|
||||
56
TMessagesProj/jni/voip/webrtc/api/candidate_unittest.cc
Normal file
56
TMessagesProj/jni/voip/webrtc/api/candidate_unittest.cc
Normal file
|
|
@ -0,0 +1,56 @@
|
|||
/*
|
||||
* Copyright 2024 The WebRTC Project Authors. All rights reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "api/candidate.h"
|
||||
|
||||
#include <string>
|
||||
|
||||
#include "p2p/base/p2p_constants.h"
|
||||
#include "rtc_base/gunit.h"
|
||||
|
||||
namespace cricket {
|
||||
|
||||
TEST(CandidateTest, Id) {
|
||||
Candidate c;
|
||||
EXPECT_EQ(c.id().size(), 8u);
|
||||
std::string current_id = c.id();
|
||||
// Generate a new ID.
|
||||
c.generate_id();
|
||||
EXPECT_EQ(c.id().size(), 8u);
|
||||
EXPECT_NE(current_id, c.id());
|
||||
}
|
||||
|
||||
TEST(CandidateTest, Component) {
|
||||
Candidate c;
|
||||
EXPECT_EQ(c.component(), 0);
|
||||
c.set_component(ICE_CANDIDATE_COMPONENT_DEFAULT);
|
||||
EXPECT_EQ(c.component(), ICE_CANDIDATE_COMPONENT_DEFAULT);
|
||||
}
|
||||
|
||||
TEST(CandidateTest, TypeName) {
|
||||
Candidate c;
|
||||
// The `type_name()` property defaults to "host".
|
||||
EXPECT_EQ(c.type_name(), "host");
|
||||
EXPECT_EQ(c.type(), LOCAL_PORT_TYPE);
|
||||
|
||||
c.set_type(STUN_PORT_TYPE);
|
||||
EXPECT_EQ(c.type_name(), "srflx");
|
||||
EXPECT_EQ(c.type(), STUN_PORT_TYPE);
|
||||
|
||||
c.set_type(PRFLX_PORT_TYPE);
|
||||
EXPECT_EQ(c.type_name(), "prflx");
|
||||
EXPECT_EQ(c.type(), PRFLX_PORT_TYPE);
|
||||
|
||||
c.set_type(RELAY_PORT_TYPE);
|
||||
EXPECT_EQ(c.type_name(), "relay");
|
||||
EXPECT_EQ(c.type(), RELAY_PORT_TYPE);
|
||||
}
|
||||
|
||||
} // namespace cricket
|
||||
|
|
@ -0,0 +1,75 @@
|
|||
/*
|
||||
* Copyright 2018 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "api/create_peerconnection_factory.h"
|
||||
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
|
||||
#include "api/enable_media.h"
|
||||
#include "api/peer_connection_interface.h"
|
||||
#include "api/rtc_event_log/rtc_event_log_factory.h"
|
||||
#include "api/scoped_refptr.h"
|
||||
#include "api/task_queue/default_task_queue_factory.h"
|
||||
#include "api/transport/field_trial_based_config.h"
|
||||
#include "modules/audio_device/include/audio_device.h"
|
||||
#include "modules/audio_processing/include/audio_processing.h"
|
||||
#include "rtc_base/thread.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
rtc::scoped_refptr<PeerConnectionFactoryInterface> CreatePeerConnectionFactory(
|
||||
rtc::Thread* network_thread,
|
||||
rtc::Thread* worker_thread,
|
||||
rtc::Thread* signaling_thread,
|
||||
rtc::scoped_refptr<AudioDeviceModule> default_adm,
|
||||
rtc::scoped_refptr<AudioEncoderFactory> audio_encoder_factory,
|
||||
rtc::scoped_refptr<AudioDecoderFactory> audio_decoder_factory,
|
||||
std::unique_ptr<VideoEncoderFactory> video_encoder_factory,
|
||||
std::unique_ptr<VideoDecoderFactory> video_decoder_factory,
|
||||
rtc::scoped_refptr<AudioMixer> audio_mixer,
|
||||
rtc::scoped_refptr<AudioProcessing> audio_processing,
|
||||
std::unique_ptr<AudioFrameProcessor> audio_frame_processor,
|
||||
std::unique_ptr<FieldTrialsView> field_trials) {
|
||||
if (!field_trials) {
|
||||
field_trials = std::make_unique<webrtc::FieldTrialBasedConfig>();
|
||||
}
|
||||
|
||||
PeerConnectionFactoryDependencies dependencies;
|
||||
dependencies.network_thread = network_thread;
|
||||
dependencies.worker_thread = worker_thread;
|
||||
dependencies.signaling_thread = signaling_thread;
|
||||
dependencies.task_queue_factory =
|
||||
CreateDefaultTaskQueueFactory(field_trials.get());
|
||||
dependencies.event_log_factory = std::make_unique<RtcEventLogFactory>();
|
||||
dependencies.trials = std::move(field_trials);
|
||||
|
||||
if (network_thread) {
|
||||
// TODO(bugs.webrtc.org/13145): Add an rtc::SocketFactory* argument.
|
||||
dependencies.socket_factory = network_thread->socketserver();
|
||||
}
|
||||
dependencies.adm = std::move(default_adm);
|
||||
dependencies.audio_encoder_factory = std::move(audio_encoder_factory);
|
||||
dependencies.audio_decoder_factory = std::move(audio_decoder_factory);
|
||||
dependencies.audio_frame_processor = std::move(audio_frame_processor);
|
||||
if (audio_processing) {
|
||||
dependencies.audio_processing = std::move(audio_processing);
|
||||
} else {
|
||||
dependencies.audio_processing = AudioProcessingBuilder().Create();
|
||||
}
|
||||
dependencies.audio_mixer = std::move(audio_mixer);
|
||||
dependencies.video_encoder_factory = std::move(video_encoder_factory);
|
||||
dependencies.video_decoder_factory = std::move(video_decoder_factory);
|
||||
EnableMedia(dependencies);
|
||||
|
||||
return CreateModularPeerConnectionFactory(std::move(dependencies));
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,57 @@
|
|||
/*
|
||||
* Copyright 2018 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef API_CREATE_PEERCONNECTION_FACTORY_H_
|
||||
#define API_CREATE_PEERCONNECTION_FACTORY_H_
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "api/audio/audio_mixer.h"
|
||||
#include "api/audio_codecs/audio_decoder_factory.h"
|
||||
#include "api/audio_codecs/audio_encoder_factory.h"
|
||||
#include "api/peer_connection_interface.h"
|
||||
#include "api/scoped_refptr.h"
|
||||
#include "api/video_codecs/video_decoder_factory.h"
|
||||
#include "api/video_codecs/video_encoder_factory.h"
|
||||
|
||||
namespace rtc {
|
||||
// TODO(bugs.webrtc.org/9987): Move rtc::Thread to api/ or expose a better
|
||||
// type. At the moment, rtc::Thread is not part of api/ so it cannot be
|
||||
// included in order to avoid to leak internal types.
|
||||
class Thread;
|
||||
} // namespace rtc
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class AudioDeviceModule;
|
||||
class AudioFrameProcessor;
|
||||
class AudioProcessing;
|
||||
|
||||
// Create a new instance of PeerConnectionFactoryInterface with optional video
|
||||
// codec factories. These video factories represents all video codecs, i.e. no
|
||||
// extra internal video codecs will be added.
|
||||
RTC_EXPORT rtc::scoped_refptr<PeerConnectionFactoryInterface>
|
||||
CreatePeerConnectionFactory(
|
||||
rtc::Thread* network_thread,
|
||||
rtc::Thread* worker_thread,
|
||||
rtc::Thread* signaling_thread,
|
||||
rtc::scoped_refptr<AudioDeviceModule> default_adm,
|
||||
rtc::scoped_refptr<AudioEncoderFactory> audio_encoder_factory,
|
||||
rtc::scoped_refptr<AudioDecoderFactory> audio_decoder_factory,
|
||||
std::unique_ptr<VideoEncoderFactory> video_encoder_factory,
|
||||
std::unique_ptr<VideoDecoderFactory> video_decoder_factory,
|
||||
rtc::scoped_refptr<AudioMixer> audio_mixer,
|
||||
rtc::scoped_refptr<AudioProcessing> audio_processing,
|
||||
std::unique_ptr<AudioFrameProcessor> audio_frame_processor = nullptr,
|
||||
std::unique_ptr<FieldTrialsView> field_trials = nullptr);
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // API_CREATE_PEERCONNECTION_FACTORY_H_
|
||||
49
TMessagesProj/jni/voip/webrtc/api/crypto/BUILD.gn
Normal file
49
TMessagesProj/jni/voip/webrtc/api/crypto/BUILD.gn
Normal file
|
|
@ -0,0 +1,49 @@
|
|||
# Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
|
||||
#
|
||||
# Use of this source code is governed by a BSD-style license
|
||||
# that can be found in the LICENSE file in the root of the source
|
||||
# tree. An additional intellectual property rights grant can be found
|
||||
# in the file PATENTS. All contributing project authors may
|
||||
# be found in the AUTHORS file in the root of the source tree.
|
||||
|
||||
import("../../webrtc.gni")
|
||||
|
||||
group("crypto") {
|
||||
deps = [
|
||||
":frame_decryptor_interface",
|
||||
":frame_encryptor_interface",
|
||||
":options",
|
||||
]
|
||||
}
|
||||
|
||||
rtc_library("options") {
|
||||
visibility = [ "*" ]
|
||||
sources = [
|
||||
"crypto_options.cc",
|
||||
"crypto_options.h",
|
||||
]
|
||||
deps = [
|
||||
"../../rtc_base:ssl",
|
||||
"../../rtc_base/system:rtc_export",
|
||||
]
|
||||
}
|
||||
|
||||
rtc_source_set("frame_decryptor_interface") {
|
||||
visibility = [ "*" ]
|
||||
sources = [ "frame_decryptor_interface.h" ]
|
||||
deps = [
|
||||
"..:array_view",
|
||||
"..:rtp_parameters",
|
||||
"../../rtc_base:refcount",
|
||||
]
|
||||
}
|
||||
|
||||
rtc_source_set("frame_encryptor_interface") {
|
||||
visibility = [ "*" ]
|
||||
sources = [ "frame_encryptor_interface.h" ]
|
||||
deps = [
|
||||
"..:array_view",
|
||||
"..:rtp_parameters",
|
||||
"../../rtc_base:refcount",
|
||||
]
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue