Repo created

This commit is contained in:
Fr4nz D13trich 2025-11-22 14:04:28 +01:00
parent 81b91f4139
commit f8c34fa5ee
22732 changed files with 4815320 additions and 2 deletions

View file

@ -0,0 +1,2 @@
alessiob@webrtc.org
henrik.lundin@webrtc.org

View file

@ -0,0 +1,92 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_mixer/audio_frame_manipulator.h"
#include "audio/utility/audio_frame_operations.h"
#include "audio/utility/channel_mixer.h"
#include "rtc_base/checks.h"
namespace webrtc {
uint32_t AudioMixerCalculateEnergy(const AudioFrame& audio_frame) {
if (audio_frame.muted()) {
return 0;
}
uint32_t energy = 0;
const int16_t* frame_data = audio_frame.data();
for (size_t position = 0;
position < audio_frame.samples_per_channel_ * audio_frame.num_channels_;
position++) {
// TODO(aleloi): This can overflow. Convert to floats.
energy += frame_data[position] * frame_data[position];
}
return energy;
}
void Ramp(float start_gain, float target_gain, AudioFrame* audio_frame) {
RTC_DCHECK(audio_frame);
RTC_DCHECK_GE(start_gain, 0.0f);
RTC_DCHECK_GE(target_gain, 0.0f);
if (start_gain == target_gain || audio_frame->muted()) {
return;
}
size_t samples = audio_frame->samples_per_channel_;
RTC_DCHECK_LT(0, samples);
float increment = (target_gain - start_gain) / samples;
float gain = start_gain;
int16_t* frame_data = audio_frame->mutable_data();
for (size_t i = 0; i < samples; ++i) {
// If the audio is interleaved of several channels, we want to
// apply the same gain change to the ith sample of every channel.
for (size_t ch = 0; ch < audio_frame->num_channels_; ++ch) {
frame_data[audio_frame->num_channels_ * i + ch] *= gain;
}
gain += increment;
}
}
void RemixFrame(size_t target_number_of_channels, AudioFrame* frame) {
RTC_DCHECK_GE(target_number_of_channels, 1);
// TODO(bugs.webrtc.org/10783): take channel layout into account as well.
if (frame->num_channels() == target_number_of_channels) {
return;
}
// Use legacy components for the most simple cases (mono <-> stereo) to ensure
// that native WebRTC clients are not affected when support for multi-channel
// audio is added to Chrome.
// TODO(bugs.webrtc.org/10783): utilize channel mixer for mono/stereo as well.
if (target_number_of_channels < 3 && frame->num_channels() < 3) {
if (frame->num_channels() > target_number_of_channels) {
AudioFrameOperations::DownmixChannels(target_number_of_channels, frame);
} else {
AudioFrameOperations::UpmixChannels(target_number_of_channels, frame);
}
} else {
// Use generic channel mixer when the number of channels for input our
// output is larger than two. E.g. stereo -> 5.1 channel up-mixing.
// TODO(bugs.webrtc.org/10783): ensure that actual channel layouts are used
// instead of guessing based on number of channels.
const ChannelLayout output_layout(
GuessChannelLayout(target_number_of_channels));
ChannelMixer mixer(GuessChannelLayout(frame->num_channels()),
output_layout);
mixer.Transform(frame);
RTC_DCHECK_EQ(frame->channel_layout(), output_layout);
}
RTC_DCHECK_EQ(frame->num_channels(), target_number_of_channels)
<< "Wrong number of channels, " << frame->num_channels() << " vs "
<< target_number_of_channels;
}
} // namespace webrtc

View file

@ -0,0 +1,33 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_MIXER_AUDIO_FRAME_MANIPULATOR_H_
#define MODULES_AUDIO_MIXER_AUDIO_FRAME_MANIPULATOR_H_
#include <stddef.h>
#include <stdint.h>
#include "api/audio/audio_frame.h"
namespace webrtc {
// Updates the audioFrame's energy (based on its samples).
uint32_t AudioMixerCalculateEnergy(const AudioFrame& audio_frame);
// Ramps up or down the provided audio frame. Ramp(0, 1, frame) will
// linearly increase the samples in the frame from 0 to full volume.
void Ramp(float start_gain, float target_gain, AudioFrame* audio_frame);
// Downmixes or upmixes a frame between stereo and mono.
void RemixFrame(size_t target_number_of_channels, AudioFrame* frame);
} // namespace webrtc
#endif // MODULES_AUDIO_MIXER_AUDIO_FRAME_MANIPULATOR_H_

View file

@ -0,0 +1,160 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_mixer/audio_mixer_impl.h"
#include <stdint.h>
#include <algorithm>
#include <iterator>
#include <type_traits>
#include <utility>
#include "modules/audio_mixer/audio_frame_manipulator.h"
#include "modules/audio_mixer/default_output_rate_calculator.h"
#include "rtc_base/checks.h"
#include "rtc_base/logging.h"
#include "rtc_base/trace_event.h"
#include "system_wrappers/include/metrics.h"
namespace webrtc {
struct AudioMixerImpl::SourceStatus {
explicit SourceStatus(Source* audio_source) : audio_source(audio_source) {}
Source* audio_source = nullptr;
// A frame that will be passed to audio_source->GetAudioFrameWithInfo.
AudioFrame audio_frame;
};
namespace {
std::vector<std::unique_ptr<AudioMixerImpl::SourceStatus>>::const_iterator
FindSourceInList(
AudioMixerImpl::Source const* audio_source,
std::vector<std::unique_ptr<AudioMixerImpl::SourceStatus>> const*
audio_source_list) {
return std::find_if(
audio_source_list->begin(), audio_source_list->end(),
[audio_source](const std::unique_ptr<AudioMixerImpl::SourceStatus>& p) {
return p->audio_source == audio_source;
});
}
} // namespace
struct AudioMixerImpl::HelperContainers {
void resize(size_t size) {
audio_to_mix.resize(size);
preferred_rates.resize(size);
}
std::vector<AudioFrame*> audio_to_mix;
std::vector<int> preferred_rates;
};
AudioMixerImpl::AudioMixerImpl(
std::unique_ptr<OutputRateCalculator> output_rate_calculator,
bool use_limiter)
: output_rate_calculator_(std::move(output_rate_calculator)),
audio_source_list_(),
helper_containers_(std::make_unique<HelperContainers>()),
frame_combiner_(use_limiter) {}
AudioMixerImpl::~AudioMixerImpl() {}
rtc::scoped_refptr<AudioMixerImpl> AudioMixerImpl::Create() {
return Create(std::unique_ptr<DefaultOutputRateCalculator>(
new DefaultOutputRateCalculator()),
/*use_limiter=*/true);
}
rtc::scoped_refptr<AudioMixerImpl> AudioMixerImpl::Create(
std::unique_ptr<OutputRateCalculator> output_rate_calculator,
bool use_limiter) {
return rtc::make_ref_counted<AudioMixerImpl>(
std::move(output_rate_calculator), use_limiter);
}
void AudioMixerImpl::Mix(size_t number_of_channels,
AudioFrame* audio_frame_for_mixing) {
TRACE_EVENT0("webrtc", "AudioMixerImpl::Mix");
RTC_DCHECK(number_of_channels >= 1);
MutexLock lock(&mutex_);
size_t number_of_streams = audio_source_list_.size();
std::transform(audio_source_list_.begin(), audio_source_list_.end(),
helper_containers_->preferred_rates.begin(),
[&](std::unique_ptr<SourceStatus>& a) {
return a->audio_source->PreferredSampleRate();
});
int output_frequency = output_rate_calculator_->CalculateOutputRateFromRange(
rtc::ArrayView<const int>(helper_containers_->preferred_rates.data(),
number_of_streams));
frame_combiner_.Combine(GetAudioFromSources(output_frequency),
number_of_channels, output_frequency,
number_of_streams, audio_frame_for_mixing);
}
bool AudioMixerImpl::AddSource(Source* audio_source) {
RTC_DCHECK(audio_source);
MutexLock lock(&mutex_);
RTC_DCHECK(FindSourceInList(audio_source, &audio_source_list_) ==
audio_source_list_.end())
<< "Source already added to mixer";
audio_source_list_.emplace_back(new SourceStatus(audio_source));
helper_containers_->resize(audio_source_list_.size());
UpdateSourceCountStats();
return true;
}
void AudioMixerImpl::RemoveSource(Source* audio_source) {
RTC_DCHECK(audio_source);
MutexLock lock(&mutex_);
const auto iter = FindSourceInList(audio_source, &audio_source_list_);
RTC_DCHECK(iter != audio_source_list_.end()) << "Source not present in mixer";
audio_source_list_.erase(iter);
}
rtc::ArrayView<AudioFrame* const> AudioMixerImpl::GetAudioFromSources(
int output_frequency) {
int audio_to_mix_count = 0;
for (auto& source_and_status : audio_source_list_) {
const auto audio_frame_info =
source_and_status->audio_source->GetAudioFrameWithInfo(
output_frequency, &source_and_status->audio_frame);
switch (audio_frame_info) {
case Source::AudioFrameInfo::kError:
RTC_LOG_F(LS_WARNING)
<< "failed to GetAudioFrameWithInfo() from source";
break;
case Source::AudioFrameInfo::kMuted:
break;
case Source::AudioFrameInfo::kNormal:
helper_containers_->audio_to_mix[audio_to_mix_count++] =
&source_and_status->audio_frame;
}
}
return rtc::ArrayView<AudioFrame* const>(
helper_containers_->audio_to_mix.data(), audio_to_mix_count);
}
void AudioMixerImpl::UpdateSourceCountStats() {
size_t current_source_count = audio_source_list_.size();
// Log to the histogram whenever the maximum number of sources increases.
if (current_source_count > max_source_count_ever_) {
RTC_HISTOGRAM_COUNTS_LINEAR("WebRTC.Audio.AudioMixer.NewHighestSourceCount",
current_source_count, 1, 20, 20);
max_source_count_ever_ = current_source_count;
}
}
} // namespace webrtc

View file

@ -0,0 +1,91 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_MIXER_AUDIO_MIXER_IMPL_H_
#define MODULES_AUDIO_MIXER_AUDIO_MIXER_IMPL_H_
#include <stddef.h>
#include <memory>
#include <vector>
#include "api/array_view.h"
#include "api/audio/audio_frame.h"
#include "api/audio/audio_mixer.h"
#include "api/scoped_refptr.h"
#include "modules/audio_mixer/frame_combiner.h"
#include "modules/audio_mixer/output_rate_calculator.h"
#include "rtc_base/race_checker.h"
#include "rtc_base/synchronization/mutex.h"
#include "rtc_base/thread_annotations.h"
namespace webrtc {
class AudioMixerImpl : public AudioMixer {
public:
struct SourceStatus;
// AudioProcessing only accepts 10 ms frames.
static const int kFrameDurationInMs = 10;
static rtc::scoped_refptr<AudioMixerImpl> Create();
static rtc::scoped_refptr<AudioMixerImpl> Create(
std::unique_ptr<OutputRateCalculator> output_rate_calculator,
bool use_limiter);
~AudioMixerImpl() override;
AudioMixerImpl(const AudioMixerImpl&) = delete;
AudioMixerImpl& operator=(const AudioMixerImpl&) = delete;
// AudioMixer functions
bool AddSource(Source* audio_source) override;
void RemoveSource(Source* audio_source) override;
void Mix(size_t number_of_channels,
AudioFrame* audio_frame_for_mixing) override
RTC_LOCKS_EXCLUDED(mutex_);
protected:
AudioMixerImpl(std::unique_ptr<OutputRateCalculator> output_rate_calculator,
bool use_limiter);
private:
struct HelperContainers;
void UpdateSourceCountStats() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
// Fetches audio frames to mix from sources.
rtc::ArrayView<AudioFrame* const> GetAudioFromSources(int output_frequency)
RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
// The critical section lock guards audio source insertion and
// removal, which can be done from any thread. The race checker
// checks that mixing is done sequentially.
mutable Mutex mutex_;
std::unique_ptr<OutputRateCalculator> output_rate_calculator_;
// List of all audio sources.
std::vector<std::unique_ptr<SourceStatus>> audio_source_list_
RTC_GUARDED_BY(mutex_);
const std::unique_ptr<HelperContainers> helper_containers_
RTC_GUARDED_BY(mutex_);
// Component that handles actual adding of audio frames.
FrameCombiner frame_combiner_;
// The highest source count this mixer has ever had. Used for UMA stats.
size_t max_source_count_ever_ = 0;
};
} // namespace webrtc
#endif // MODULES_AUDIO_MIXER_AUDIO_MIXER_IMPL_H_

View file

@ -0,0 +1,41 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_mixer/default_output_rate_calculator.h"
#include <algorithm>
#include <iterator>
#include "modules/audio_processing/include/audio_processing.h"
#include "rtc_base/checks.h"
namespace webrtc {
int DefaultOutputRateCalculator::CalculateOutputRateFromRange(
rtc::ArrayView<const int> preferred_sample_rates) {
if (preferred_sample_rates.empty()) {
return DefaultOutputRateCalculator::kDefaultFrequency;
}
using NativeRate = AudioProcessing::NativeRate;
const int maximal_frequency = *std::max_element(
preferred_sample_rates.cbegin(), preferred_sample_rates.cend());
RTC_DCHECK_LE(NativeRate::kSampleRate8kHz, maximal_frequency);
RTC_DCHECK_GE(NativeRate::kSampleRate48kHz, maximal_frequency);
static constexpr NativeRate native_rates[] = {
NativeRate::kSampleRate8kHz, NativeRate::kSampleRate16kHz,
NativeRate::kSampleRate32kHz, NativeRate::kSampleRate48kHz};
const auto* rounded_up_index = std::lower_bound(
std::begin(native_rates), std::end(native_rates), maximal_frequency);
RTC_DCHECK(rounded_up_index != std::end(native_rates));
return *rounded_up_index;
}
} // namespace webrtc

View file

@ -0,0 +1,36 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_MIXER_DEFAULT_OUTPUT_RATE_CALCULATOR_H_
#define MODULES_AUDIO_MIXER_DEFAULT_OUTPUT_RATE_CALCULATOR_H_
#include <vector>
#include "api/array_view.h"
#include "modules/audio_mixer/output_rate_calculator.h"
namespace webrtc {
class DefaultOutputRateCalculator : public OutputRateCalculator {
public:
static const int kDefaultFrequency = 48000;
// Produces the least native rate greater or equal to the preferred
// sample rates. A native rate is one in
// AudioProcessing::NativeRate. If `preferred_sample_rates` is
// empty, returns `kDefaultFrequency`.
int CalculateOutputRateFromRange(
rtc::ArrayView<const int> preferred_sample_rates) override;
~DefaultOutputRateCalculator() override {}
};
} // namespace webrtc
#endif // MODULES_AUDIO_MIXER_DEFAULT_OUTPUT_RATE_CALCULATOR_H_

View file

@ -0,0 +1,213 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_mixer/frame_combiner.h"
#include <algorithm>
#include <array>
#include <cstdint>
#include <iterator>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "api/array_view.h"
#include "api/rtp_packet_info.h"
#include "api/rtp_packet_infos.h"
#include "common_audio/include/audio_util.h"
#include "modules/audio_mixer/audio_frame_manipulator.h"
#include "modules/audio_mixer/audio_mixer_impl.h"
#include "modules/audio_processing/include/audio_frame_view.h"
#include "modules/audio_processing/include/audio_processing.h"
#include "modules/audio_processing/logging/apm_data_dumper.h"
#include "rtc_base/arraysize.h"
#include "rtc_base/checks.h"
#include "rtc_base/numerics/safe_conversions.h"
#include "system_wrappers/include/metrics.h"
namespace webrtc {
namespace {
using MixingBuffer =
std::array<std::array<float, FrameCombiner::kMaximumChannelSize>,
FrameCombiner::kMaximumNumberOfChannels>;
void SetAudioFrameFields(rtc::ArrayView<const AudioFrame* const> mix_list,
size_t number_of_channels,
int sample_rate,
size_t number_of_streams,
AudioFrame* audio_frame_for_mixing) {
const size_t samples_per_channel = static_cast<size_t>(
(sample_rate * webrtc::AudioMixerImpl::kFrameDurationInMs) / 1000);
// TODO(minyue): Issue bugs.webrtc.org/3390.
// Audio frame timestamp. The 'timestamp_' field is set to dummy
// value '0', because it is only supported in the one channel case and
// is then updated in the helper functions.
audio_frame_for_mixing->UpdateFrame(
0, nullptr, samples_per_channel, sample_rate, AudioFrame::kUndefined,
AudioFrame::kVadUnknown, number_of_channels);
if (mix_list.empty()) {
audio_frame_for_mixing->elapsed_time_ms_ = -1;
} else {
audio_frame_for_mixing->timestamp_ = mix_list[0]->timestamp_;
audio_frame_for_mixing->elapsed_time_ms_ = mix_list[0]->elapsed_time_ms_;
audio_frame_for_mixing->ntp_time_ms_ = mix_list[0]->ntp_time_ms_;
std::vector<RtpPacketInfo> packet_infos;
for (const auto& frame : mix_list) {
audio_frame_for_mixing->timestamp_ =
std::min(audio_frame_for_mixing->timestamp_, frame->timestamp_);
audio_frame_for_mixing->ntp_time_ms_ =
std::min(audio_frame_for_mixing->ntp_time_ms_, frame->ntp_time_ms_);
audio_frame_for_mixing->elapsed_time_ms_ = std::max(
audio_frame_for_mixing->elapsed_time_ms_, frame->elapsed_time_ms_);
packet_infos.insert(packet_infos.end(), frame->packet_infos_.begin(),
frame->packet_infos_.end());
}
audio_frame_for_mixing->packet_infos_ =
RtpPacketInfos(std::move(packet_infos));
}
}
void MixFewFramesWithNoLimiter(rtc::ArrayView<const AudioFrame* const> mix_list,
AudioFrame* audio_frame_for_mixing) {
if (mix_list.empty()) {
audio_frame_for_mixing->Mute();
return;
}
RTC_DCHECK_LE(mix_list.size(), 1);
std::copy(mix_list[0]->data(),
mix_list[0]->data() +
mix_list[0]->num_channels_ * mix_list[0]->samples_per_channel_,
audio_frame_for_mixing->mutable_data());
}
void MixToFloatFrame(rtc::ArrayView<const AudioFrame* const> mix_list,
size_t samples_per_channel,
size_t number_of_channels,
MixingBuffer* mixing_buffer) {
RTC_DCHECK_LE(samples_per_channel, FrameCombiner::kMaximumChannelSize);
RTC_DCHECK_LE(number_of_channels, FrameCombiner::kMaximumNumberOfChannels);
// Clear the mixing buffer.
*mixing_buffer = {};
// Convert to FloatS16 and mix.
for (size_t i = 0; i < mix_list.size(); ++i) {
const AudioFrame* const frame = mix_list[i];
const int16_t* const frame_data = frame->data();
for (size_t j = 0; j < std::min(number_of_channels,
FrameCombiner::kMaximumNumberOfChannels);
++j) {
for (size_t k = 0; k < std::min(samples_per_channel,
FrameCombiner::kMaximumChannelSize);
++k) {
(*mixing_buffer)[j][k] += frame_data[number_of_channels * k + j];
}
}
}
}
void RunLimiter(AudioFrameView<float> mixing_buffer_view, Limiter* limiter) {
const size_t sample_rate = mixing_buffer_view.samples_per_channel() * 1000 /
AudioMixerImpl::kFrameDurationInMs;
// TODO(alessiob): Avoid calling SetSampleRate every time.
limiter->SetSampleRate(sample_rate);
limiter->Process(mixing_buffer_view);
}
// Both interleaves and rounds.
void InterleaveToAudioFrame(AudioFrameView<const float> mixing_buffer_view,
AudioFrame* audio_frame_for_mixing) {
const size_t number_of_channels = mixing_buffer_view.num_channels();
const size_t samples_per_channel = mixing_buffer_view.samples_per_channel();
int16_t* const mixing_data = audio_frame_for_mixing->mutable_data();
// Put data in the result frame.
for (size_t i = 0; i < number_of_channels; ++i) {
for (size_t j = 0; j < samples_per_channel; ++j) {
mixing_data[number_of_channels * j + i] =
FloatS16ToS16(mixing_buffer_view.channel(i)[j]);
}
}
}
} // namespace
constexpr size_t FrameCombiner::kMaximumNumberOfChannels;
constexpr size_t FrameCombiner::kMaximumChannelSize;
FrameCombiner::FrameCombiner(bool use_limiter)
: data_dumper_(new ApmDataDumper(0)),
mixing_buffer_(
std::make_unique<std::array<std::array<float, kMaximumChannelSize>,
kMaximumNumberOfChannels>>()),
limiter_(static_cast<size_t>(48000), data_dumper_.get(), "AudioMixer"),
use_limiter_(use_limiter) {
static_assert(kMaximumChannelSize * kMaximumNumberOfChannels <=
AudioFrame::kMaxDataSizeSamples,
"");
}
FrameCombiner::~FrameCombiner() = default;
void FrameCombiner::Combine(rtc::ArrayView<AudioFrame* const> mix_list,
size_t number_of_channels,
int sample_rate,
size_t number_of_streams,
AudioFrame* audio_frame_for_mixing) {
RTC_DCHECK(audio_frame_for_mixing);
SetAudioFrameFields(mix_list, number_of_channels, sample_rate,
number_of_streams, audio_frame_for_mixing);
const size_t samples_per_channel = static_cast<size_t>(
(sample_rate * webrtc::AudioMixerImpl::kFrameDurationInMs) / 1000);
for (const auto* frame : mix_list) {
RTC_DCHECK_EQ(samples_per_channel, frame->samples_per_channel_);
RTC_DCHECK_EQ(sample_rate, frame->sample_rate_hz_);
}
// The 'num_channels_' field of frames in 'mix_list' could be
// different from 'number_of_channels'.
for (auto* frame : mix_list) {
RemixFrame(number_of_channels, frame);
}
if (number_of_streams <= 1) {
MixFewFramesWithNoLimiter(mix_list, audio_frame_for_mixing);
return;
}
MixToFloatFrame(mix_list, samples_per_channel, number_of_channels,
mixing_buffer_.get());
const size_t output_number_of_channels =
std::min(number_of_channels, kMaximumNumberOfChannels);
const size_t output_samples_per_channel =
std::min(samples_per_channel, kMaximumChannelSize);
// Put float data in an AudioFrameView.
std::array<float*, kMaximumNumberOfChannels> channel_pointers{};
for (size_t i = 0; i < output_number_of_channels; ++i) {
channel_pointers[i] = &(*mixing_buffer_.get())[i][0];
}
AudioFrameView<float> mixing_buffer_view(&channel_pointers[0],
output_number_of_channels,
output_samples_per_channel);
if (use_limiter_) {
RunLimiter(mixing_buffer_view, &limiter_);
}
InterleaveToAudioFrame(mixing_buffer_view, audio_frame_for_mixing);
}
} // namespace webrtc

View file

@ -0,0 +1,56 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_MIXER_FRAME_COMBINER_H_
#define MODULES_AUDIO_MIXER_FRAME_COMBINER_H_
#include <memory>
#include <vector>
#include "api/array_view.h"
#include "api/audio/audio_frame.h"
#include "modules/audio_processing/agc2/limiter.h"
namespace webrtc {
class ApmDataDumper;
class FrameCombiner {
public:
explicit FrameCombiner(bool use_limiter);
~FrameCombiner();
// Combine several frames into one. Assumes sample_rate,
// samples_per_channel of the input frames match the parameters. The
// parameters 'number_of_channels' and 'sample_rate' are needed
// because 'mix_list' can be empty. The parameter
// 'number_of_streams' is used for determining whether to pass the
// data through a limiter.
void Combine(rtc::ArrayView<AudioFrame* const> mix_list,
size_t number_of_channels,
int sample_rate,
size_t number_of_streams,
AudioFrame* audio_frame_for_mixing);
// Stereo, 48 kHz, 10 ms.
static constexpr size_t kMaximumNumberOfChannels = 8;
static constexpr size_t kMaximumChannelSize = 48 * 10;
using MixingBuffer = std::array<std::array<float, kMaximumChannelSize>,
kMaximumNumberOfChannels>;
private:
std::unique_ptr<ApmDataDumper> data_dumper_;
std::unique_ptr<MixingBuffer> mixing_buffer_;
Limiter limiter_;
const bool use_limiter_;
};
} // namespace webrtc
#endif // MODULES_AUDIO_MIXER_FRAME_COMBINER_H_

View file

@ -0,0 +1,63 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_mixer/gain_change_calculator.h"
#include <math.h>
#include <cstdlib>
#include <vector>
#include "rtc_base/checks.h"
namespace webrtc {
namespace {
constexpr int16_t kReliabilityThreshold = 100;
} // namespace
float GainChangeCalculator::CalculateGainChange(
rtc::ArrayView<const int16_t> in,
rtc::ArrayView<const int16_t> out) {
RTC_DCHECK_EQ(in.size(), out.size());
std::vector<float> gain(in.size());
CalculateGain(in, out, gain);
return CalculateDifferences(gain);
}
float GainChangeCalculator::LatestGain() const {
return last_reliable_gain_;
}
void GainChangeCalculator::CalculateGain(rtc::ArrayView<const int16_t> in,
rtc::ArrayView<const int16_t> out,
rtc::ArrayView<float> gain) {
RTC_DCHECK_EQ(in.size(), out.size());
RTC_DCHECK_EQ(in.size(), gain.size());
for (size_t i = 0; i < in.size(); ++i) {
if (std::abs(in[i]) >= kReliabilityThreshold) {
last_reliable_gain_ = out[i] / static_cast<float>(in[i]);
}
gain[i] = last_reliable_gain_;
}
}
float GainChangeCalculator::CalculateDifferences(
rtc::ArrayView<const float> values) {
float res = 0;
for (float f : values) {
res += fabs(f - last_value_);
last_value_ = f;
}
return res;
}
} // namespace webrtc

View file

@ -0,0 +1,42 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_MIXER_GAIN_CHANGE_CALCULATOR_H_
#define MODULES_AUDIO_MIXER_GAIN_CHANGE_CALCULATOR_H_
#include <stdint.h>
#include "api/array_view.h"
namespace webrtc {
class GainChangeCalculator {
public:
// The 'out' signal is assumed to be produced from 'in' by applying
// a smoothly varying gain. This method computes variations of the
// gain and handles special cases when the samples are small.
float CalculateGainChange(rtc::ArrayView<const int16_t> in,
rtc::ArrayView<const int16_t> out);
float LatestGain() const;
private:
void CalculateGain(rtc::ArrayView<const int16_t> in,
rtc::ArrayView<const int16_t> out,
rtc::ArrayView<float> gain);
float CalculateDifferences(rtc::ArrayView<const float> values);
float last_value_ = 0.f;
float last_reliable_gain_ = 1.0f;
};
} // namespace webrtc
#endif // MODULES_AUDIO_MIXER_GAIN_CHANGE_CALCULATOR_H_

View file

@ -0,0 +1,32 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_MIXER_OUTPUT_RATE_CALCULATOR_H_
#define MODULES_AUDIO_MIXER_OUTPUT_RATE_CALCULATOR_H_
#include <vector>
#include "api/array_view.h"
namespace webrtc {
// Decides the sample rate of a mixing iteration given the preferred
// sample rates of the sources.
class OutputRateCalculator {
public:
virtual int CalculateOutputRateFromRange(
rtc::ArrayView<const int> preferred_sample_rates) = 0;
virtual ~OutputRateCalculator() {}
};
} // namespace webrtc
#endif // MODULES_AUDIO_MIXER_OUTPUT_RATE_CALCULATOR_H_

View file

@ -0,0 +1,35 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_mixer/sine_wave_generator.h"
#include <math.h>
#include <stddef.h>
#include "rtc_base/numerics/safe_conversions.h"
namespace webrtc {
namespace {
constexpr float kPi = 3.14159265f;
} // namespace
void SineWaveGenerator::GenerateNextFrame(AudioFrame* frame) {
RTC_DCHECK(frame);
int16_t* frame_data = frame->mutable_data();
for (size_t i = 0; i < frame->samples_per_channel_; ++i) {
for (size_t ch = 0; ch < frame->num_channels_; ++ch) {
frame_data[frame->num_channels_ * i + ch] =
rtc::saturated_cast<int16_t>(amplitude_ * sinf(phase_));
}
phase_ += wave_frequency_hz_ * 2 * kPi / frame->sample_rate_hz_;
}
}
} // namespace webrtc

View file

@ -0,0 +1,40 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_MIXER_SINE_WAVE_GENERATOR_H_
#define MODULES_AUDIO_MIXER_SINE_WAVE_GENERATOR_H_
#include <stdint.h>
#include "api/audio/audio_frame.h"
#include "rtc_base/checks.h"
namespace webrtc {
class SineWaveGenerator {
public:
SineWaveGenerator(float wave_frequency_hz, int16_t amplitude)
: wave_frequency_hz_(wave_frequency_hz), amplitude_(amplitude) {
RTC_DCHECK_GT(wave_frequency_hz, 0);
}
// Produces appropriate output based on frame->num_channels_,
// frame->sample_rate_hz_.
void GenerateNextFrame(AudioFrame* frame);
private:
float phase_ = 0.f;
const float wave_frequency_hz_;
const int16_t amplitude_;
};
} // namespace webrtc
#endif // MODULES_AUDIO_MIXER_SINE_WAVE_GENERATOR_H_