Repo created
This commit is contained in:
parent
81b91f4139
commit
f8c34fa5ee
22732 changed files with 4815320 additions and 2 deletions
|
|
@ -0,0 +1,2 @@
|
|||
henrika@webrtc.org
|
||||
tkchin@webrtc.org
|
||||
|
|
@ -0,0 +1,216 @@
|
|||
/*
|
||||
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "modules/audio_device/android/aaudio_player.h"
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "api/array_view.h"
|
||||
#include "api/task_queue/task_queue_base.h"
|
||||
#include "modules/audio_device/android/audio_manager.h"
|
||||
#include "modules/audio_device/fine_audio_buffer.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/logging.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
AAudioPlayer::AAudioPlayer(AudioManager* audio_manager)
|
||||
: main_thread_(TaskQueueBase::Current()),
|
||||
aaudio_(audio_manager, AAUDIO_DIRECTION_OUTPUT, this) {
|
||||
RTC_LOG(LS_INFO) << "ctor";
|
||||
thread_checker_aaudio_.Detach();
|
||||
}
|
||||
|
||||
AAudioPlayer::~AAudioPlayer() {
|
||||
RTC_LOG(LS_INFO) << "dtor";
|
||||
RTC_DCHECK_RUN_ON(&main_thread_checker_);
|
||||
Terminate();
|
||||
RTC_LOG(LS_INFO) << "#detected underruns: " << underrun_count_;
|
||||
}
|
||||
|
||||
int AAudioPlayer::Init() {
|
||||
RTC_LOG(LS_INFO) << "Init";
|
||||
RTC_DCHECK_RUN_ON(&main_thread_checker_);
|
||||
if (aaudio_.audio_parameters().channels() == 2) {
|
||||
RTC_DLOG(LS_WARNING) << "Stereo mode is enabled";
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int AAudioPlayer::Terminate() {
|
||||
RTC_LOG(LS_INFO) << "Terminate";
|
||||
RTC_DCHECK_RUN_ON(&main_thread_checker_);
|
||||
StopPlayout();
|
||||
return 0;
|
||||
}
|
||||
|
||||
int AAudioPlayer::InitPlayout() {
|
||||
RTC_LOG(LS_INFO) << "InitPlayout";
|
||||
RTC_DCHECK_RUN_ON(&main_thread_checker_);
|
||||
RTC_DCHECK(!initialized_);
|
||||
RTC_DCHECK(!playing_);
|
||||
if (!aaudio_.Init()) {
|
||||
return -1;
|
||||
}
|
||||
initialized_ = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool AAudioPlayer::PlayoutIsInitialized() const {
|
||||
RTC_DCHECK_RUN_ON(&main_thread_checker_);
|
||||
return initialized_;
|
||||
}
|
||||
|
||||
int AAudioPlayer::StartPlayout() {
|
||||
RTC_LOG(LS_INFO) << "StartPlayout";
|
||||
RTC_DCHECK_RUN_ON(&main_thread_checker_);
|
||||
RTC_DCHECK(!playing_);
|
||||
if (!initialized_) {
|
||||
RTC_DLOG(LS_WARNING)
|
||||
<< "Playout can not start since InitPlayout must succeed first";
|
||||
return 0;
|
||||
}
|
||||
if (fine_audio_buffer_) {
|
||||
fine_audio_buffer_->ResetPlayout();
|
||||
}
|
||||
if (!aaudio_.Start()) {
|
||||
return -1;
|
||||
}
|
||||
underrun_count_ = aaudio_.xrun_count();
|
||||
first_data_callback_ = true;
|
||||
playing_ = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int AAudioPlayer::StopPlayout() {
|
||||
RTC_LOG(LS_INFO) << "StopPlayout";
|
||||
RTC_DCHECK_RUN_ON(&main_thread_checker_);
|
||||
if (!initialized_ || !playing_) {
|
||||
return 0;
|
||||
}
|
||||
if (!aaudio_.Stop()) {
|
||||
RTC_LOG(LS_ERROR) << "StopPlayout failed";
|
||||
return -1;
|
||||
}
|
||||
thread_checker_aaudio_.Detach();
|
||||
initialized_ = false;
|
||||
playing_ = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool AAudioPlayer::Playing() const {
|
||||
RTC_DCHECK_RUN_ON(&main_thread_checker_);
|
||||
return playing_;
|
||||
}
|
||||
|
||||
void AAudioPlayer::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
|
||||
RTC_DLOG(LS_INFO) << "AttachAudioBuffer";
|
||||
RTC_DCHECK_RUN_ON(&main_thread_checker_);
|
||||
audio_device_buffer_ = audioBuffer;
|
||||
const AudioParameters audio_parameters = aaudio_.audio_parameters();
|
||||
audio_device_buffer_->SetPlayoutSampleRate(audio_parameters.sample_rate());
|
||||
audio_device_buffer_->SetPlayoutChannels(audio_parameters.channels());
|
||||
RTC_CHECK(audio_device_buffer_);
|
||||
// Create a modified audio buffer class which allows us to ask for any number
|
||||
// of samples (and not only multiple of 10ms) to match the optimal buffer
|
||||
// size per callback used by AAudio.
|
||||
fine_audio_buffer_ = std::make_unique<FineAudioBuffer>(audio_device_buffer_);
|
||||
}
|
||||
|
||||
int AAudioPlayer::SpeakerVolumeIsAvailable(bool& available) {
|
||||
available = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void AAudioPlayer::OnErrorCallback(aaudio_result_t error) {
|
||||
RTC_LOG(LS_ERROR) << "OnErrorCallback: " << AAudio_convertResultToText(error);
|
||||
// TODO(henrika): investigate if we can use a thread checker here. Initial
|
||||
// tests shows that this callback can sometimes be called on a unique thread
|
||||
// but according to the documentation it should be on the same thread as the
|
||||
// data callback.
|
||||
// RTC_DCHECK_RUN_ON(&thread_checker_aaudio_);
|
||||
if (aaudio_.stream_state() == AAUDIO_STREAM_STATE_DISCONNECTED) {
|
||||
// The stream is disconnected and any attempt to use it will return
|
||||
// AAUDIO_ERROR_DISCONNECTED.
|
||||
RTC_LOG(LS_WARNING) << "Output stream disconnected";
|
||||
// AAudio documentation states: "You should not close or reopen the stream
|
||||
// from the callback, use another thread instead". A message is therefore
|
||||
// sent to the main thread to do the restart operation.
|
||||
RTC_DCHECK(main_thread_);
|
||||
main_thread_->PostTask([this] { HandleStreamDisconnected(); });
|
||||
}
|
||||
}
|
||||
|
||||
aaudio_data_callback_result_t AAudioPlayer::OnDataCallback(void* audio_data,
|
||||
int32_t num_frames) {
|
||||
RTC_DCHECK_RUN_ON(&thread_checker_aaudio_);
|
||||
// Log device id in first data callback to ensure that a valid device is
|
||||
// utilized.
|
||||
if (first_data_callback_) {
|
||||
RTC_LOG(LS_INFO) << "--- First output data callback: "
|
||||
"device id="
|
||||
<< aaudio_.device_id();
|
||||
first_data_callback_ = false;
|
||||
}
|
||||
|
||||
// Check if the underrun count has increased. If it has, increase the buffer
|
||||
// size by adding the size of a burst. It will reduce the risk of underruns
|
||||
// at the expense of an increased latency.
|
||||
// TODO(henrika): enable possibility to disable and/or tune the algorithm.
|
||||
const int32_t underrun_count = aaudio_.xrun_count();
|
||||
if (underrun_count > underrun_count_) {
|
||||
RTC_LOG(LS_ERROR) << "Underrun detected: " << underrun_count;
|
||||
underrun_count_ = underrun_count;
|
||||
aaudio_.IncreaseOutputBufferSize();
|
||||
}
|
||||
|
||||
// Estimate latency between writing an audio frame to the output stream and
|
||||
// the time that same frame is played out on the output audio device.
|
||||
latency_millis_ = aaudio_.EstimateLatencyMillis();
|
||||
// TODO(henrika): use for development only.
|
||||
if (aaudio_.frames_written() % (1000 * aaudio_.frames_per_burst()) == 0) {
|
||||
RTC_DLOG(LS_INFO) << "output latency: " << latency_millis_
|
||||
<< ", num_frames: " << num_frames;
|
||||
}
|
||||
|
||||
// Read audio data from the WebRTC source using the FineAudioBuffer object
|
||||
// and write that data into `audio_data` to be played out by AAudio.
|
||||
// Prime output with zeros during a short initial phase to avoid distortion.
|
||||
// TODO(henrika): do more work to figure out of if the initial forced silence
|
||||
// period is really needed.
|
||||
if (aaudio_.frames_written() < 50 * aaudio_.frames_per_burst()) {
|
||||
const size_t num_bytes =
|
||||
sizeof(int16_t) * aaudio_.samples_per_frame() * num_frames;
|
||||
memset(audio_data, 0, num_bytes);
|
||||
} else {
|
||||
fine_audio_buffer_->GetPlayoutData(
|
||||
rtc::MakeArrayView(static_cast<int16_t*>(audio_data),
|
||||
aaudio_.samples_per_frame() * num_frames),
|
||||
static_cast<int>(latency_millis_ + 0.5));
|
||||
}
|
||||
|
||||
// TODO(henrika): possibly add trace here to be included in systrace.
|
||||
// See https://developer.android.com/studio/profile/systrace-commandline.html.
|
||||
return AAUDIO_CALLBACK_RESULT_CONTINUE;
|
||||
}
|
||||
|
||||
void AAudioPlayer::HandleStreamDisconnected() {
|
||||
RTC_DCHECK_RUN_ON(&main_thread_checker_);
|
||||
RTC_DLOG(LS_INFO) << "HandleStreamDisconnected";
|
||||
if (!initialized_ || !playing_) {
|
||||
return;
|
||||
}
|
||||
// Perform a restart by first closing the disconnected stream and then start
|
||||
// a new stream; this time using the new (preferred) audio output device.
|
||||
StopPlayout();
|
||||
InitPlayout();
|
||||
StartPlayout();
|
||||
}
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,141 @@
|
|||
/*
|
||||
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef MODULES_AUDIO_DEVICE_ANDROID_AAUDIO_PLAYER_H_
|
||||
#define MODULES_AUDIO_DEVICE_ANDROID_AAUDIO_PLAYER_H_
|
||||
|
||||
#include <aaudio/AAudio.h>
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "api/sequence_checker.h"
|
||||
#include "api/task_queue/task_queue_base.h"
|
||||
#include "modules/audio_device/android/aaudio_wrapper.h"
|
||||
#include "modules/audio_device/include/audio_device_defines.h"
|
||||
#include "rtc_base/thread_annotations.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class AudioDeviceBuffer;
|
||||
class FineAudioBuffer;
|
||||
class AudioManager;
|
||||
|
||||
// Implements low-latency 16-bit mono PCM audio output support for Android
|
||||
// using the C based AAudio API.
|
||||
//
|
||||
// An instance must be created and destroyed on one and the same thread.
|
||||
// All public methods must also be called on the same thread. A thread checker
|
||||
// will DCHECK if any method is called on an invalid thread. Audio buffers
|
||||
// are requested on a dedicated high-priority thread owned by AAudio.
|
||||
//
|
||||
// The existing design forces the user to call InitPlayout() after StopPlayout()
|
||||
// to be able to call StartPlayout() again. This is in line with how the Java-
|
||||
// based implementation works.
|
||||
//
|
||||
// An audio stream can be disconnected, e.g. when an audio device is removed.
|
||||
// This implementation will restart the audio stream using the new preferred
|
||||
// device if such an event happens.
|
||||
//
|
||||
// Also supports automatic buffer-size adjustment based on underrun detections
|
||||
// where the internal AAudio buffer can be increased when needed. It will
|
||||
// reduce the risk of underruns (~glitches) at the expense of an increased
|
||||
// latency.
|
||||
class AAudioPlayer final : public AAudioObserverInterface {
|
||||
public:
|
||||
explicit AAudioPlayer(AudioManager* audio_manager);
|
||||
~AAudioPlayer();
|
||||
|
||||
int Init();
|
||||
int Terminate();
|
||||
|
||||
int InitPlayout();
|
||||
bool PlayoutIsInitialized() const;
|
||||
|
||||
int StartPlayout();
|
||||
int StopPlayout();
|
||||
bool Playing() const;
|
||||
|
||||
void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
|
||||
|
||||
// Not implemented in AAudio.
|
||||
int SpeakerVolumeIsAvailable(bool& available); // NOLINT
|
||||
int SetSpeakerVolume(uint32_t volume) { return -1; }
|
||||
int SpeakerVolume(uint32_t& volume) const { return -1; } // NOLINT
|
||||
int MaxSpeakerVolume(uint32_t& maxVolume) const { return -1; } // NOLINT
|
||||
int MinSpeakerVolume(uint32_t& minVolume) const { return -1; } // NOLINT
|
||||
|
||||
protected:
|
||||
// AAudioObserverInterface implementation.
|
||||
|
||||
// For an output stream, this function should render and write `num_frames`
|
||||
// of data in the streams current data format to the `audio_data` buffer.
|
||||
// Called on a real-time thread owned by AAudio.
|
||||
aaudio_data_callback_result_t OnDataCallback(void* audio_data,
|
||||
int32_t num_frames) override;
|
||||
// AAudio calls this functions if any error occurs on a callback thread.
|
||||
// Called on a real-time thread owned by AAudio.
|
||||
void OnErrorCallback(aaudio_result_t error) override;
|
||||
|
||||
private:
|
||||
// Closes the existing stream and starts a new stream.
|
||||
void HandleStreamDisconnected();
|
||||
|
||||
// Ensures that methods are called from the same thread as this object is
|
||||
// created on.
|
||||
SequenceChecker main_thread_checker_;
|
||||
|
||||
// Stores thread ID in first call to AAudioPlayer::OnDataCallback from a
|
||||
// real-time thread owned by AAudio. Detached during construction of this
|
||||
// object.
|
||||
SequenceChecker thread_checker_aaudio_;
|
||||
|
||||
// The task queue on which this object is created on.
|
||||
TaskQueueBase* main_thread_;
|
||||
|
||||
// Wraps all AAudio resources. Contains an output stream using the default
|
||||
// output audio device. Can be accessed on both the main thread and the
|
||||
// real-time thread owned by AAudio. See separate AAudio documentation about
|
||||
// thread safety.
|
||||
AAudioWrapper aaudio_;
|
||||
|
||||
// FineAudioBuffer takes an AudioDeviceBuffer which delivers audio data
|
||||
// in chunks of 10ms. It then allows for this data to be pulled in
|
||||
// a finer or coarser granularity. I.e. interacting with this class instead
|
||||
// of directly with the AudioDeviceBuffer one can ask for any number of
|
||||
// audio data samples.
|
||||
// Example: native buffer size can be 192 audio frames at 48kHz sample rate.
|
||||
// WebRTC will provide 480 audio frames per 10ms but AAudio asks for 192
|
||||
// in each callback (once every 4th ms). This class can then ask for 192 and
|
||||
// the FineAudioBuffer will ask WebRTC for new data approximately only every
|
||||
// second callback and also cache non-utilized audio.
|
||||
std::unique_ptr<FineAudioBuffer> fine_audio_buffer_;
|
||||
|
||||
// Counts number of detected underrun events reported by AAudio.
|
||||
int32_t underrun_count_ = 0;
|
||||
|
||||
// True only for the first data callback in each audio session.
|
||||
bool first_data_callback_ = true;
|
||||
|
||||
// Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the
|
||||
// AudioDeviceModuleImpl class and set by AudioDeviceModule::Create().
|
||||
AudioDeviceBuffer* audio_device_buffer_ RTC_GUARDED_BY(main_thread_checker_) =
|
||||
nullptr;
|
||||
|
||||
bool initialized_ RTC_GUARDED_BY(main_thread_checker_) = false;
|
||||
bool playing_ RTC_GUARDED_BY(main_thread_checker_) = false;
|
||||
|
||||
// Estimated latency between writing an audio frame to the output stream and
|
||||
// the time that same frame is played out on the output audio device.
|
||||
double latency_millis_ RTC_GUARDED_BY(thread_checker_aaudio_) = 0;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // MODULES_AUDIO_DEVICE_ANDROID_AAUDIO_PLAYER_H_
|
||||
|
|
@ -0,0 +1,205 @@
|
|||
/*
|
||||
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "modules/audio_device/android/aaudio_recorder.h"
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "api/array_view.h"
|
||||
#include "api/task_queue/task_queue_base.h"
|
||||
#include "modules/audio_device/android/audio_manager.h"
|
||||
#include "modules/audio_device/fine_audio_buffer.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/time_utils.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
AAudioRecorder::AAudioRecorder(AudioManager* audio_manager)
|
||||
: main_thread_(TaskQueueBase::Current()),
|
||||
aaudio_(audio_manager, AAUDIO_DIRECTION_INPUT, this) {
|
||||
RTC_LOG(LS_INFO) << "ctor";
|
||||
thread_checker_aaudio_.Detach();
|
||||
}
|
||||
|
||||
AAudioRecorder::~AAudioRecorder() {
|
||||
RTC_LOG(LS_INFO) << "dtor";
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
Terminate();
|
||||
RTC_LOG(LS_INFO) << "detected owerflows: " << overflow_count_;
|
||||
}
|
||||
|
||||
int AAudioRecorder::Init() {
|
||||
RTC_LOG(LS_INFO) << "Init";
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
if (aaudio_.audio_parameters().channels() == 2) {
|
||||
RTC_DLOG(LS_WARNING) << "Stereo mode is enabled";
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int AAudioRecorder::Terminate() {
|
||||
RTC_LOG(LS_INFO) << "Terminate";
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
StopRecording();
|
||||
return 0;
|
||||
}
|
||||
|
||||
int AAudioRecorder::InitRecording() {
|
||||
RTC_LOG(LS_INFO) << "InitRecording";
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
RTC_DCHECK(!initialized_);
|
||||
RTC_DCHECK(!recording_);
|
||||
if (!aaudio_.Init()) {
|
||||
return -1;
|
||||
}
|
||||
initialized_ = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int AAudioRecorder::StartRecording() {
|
||||
RTC_LOG(LS_INFO) << "StartRecording";
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
RTC_DCHECK(initialized_);
|
||||
RTC_DCHECK(!recording_);
|
||||
if (fine_audio_buffer_) {
|
||||
fine_audio_buffer_->ResetPlayout();
|
||||
}
|
||||
if (!aaudio_.Start()) {
|
||||
return -1;
|
||||
}
|
||||
overflow_count_ = aaudio_.xrun_count();
|
||||
first_data_callback_ = true;
|
||||
recording_ = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int AAudioRecorder::StopRecording() {
|
||||
RTC_LOG(LS_INFO) << "StopRecording";
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
if (!initialized_ || !recording_) {
|
||||
return 0;
|
||||
}
|
||||
if (!aaudio_.Stop()) {
|
||||
return -1;
|
||||
}
|
||||
thread_checker_aaudio_.Detach();
|
||||
initialized_ = false;
|
||||
recording_ = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void AAudioRecorder::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
|
||||
RTC_LOG(LS_INFO) << "AttachAudioBuffer";
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
audio_device_buffer_ = audioBuffer;
|
||||
const AudioParameters audio_parameters = aaudio_.audio_parameters();
|
||||
audio_device_buffer_->SetRecordingSampleRate(audio_parameters.sample_rate());
|
||||
audio_device_buffer_->SetRecordingChannels(audio_parameters.channels());
|
||||
RTC_CHECK(audio_device_buffer_);
|
||||
// Create a modified audio buffer class which allows us to deliver any number
|
||||
// of samples (and not only multiples of 10ms which WebRTC uses) to match the
|
||||
// native AAudio buffer size.
|
||||
fine_audio_buffer_ = std::make_unique<FineAudioBuffer>(audio_device_buffer_);
|
||||
}
|
||||
|
||||
int AAudioRecorder::EnableBuiltInAEC(bool enable) {
|
||||
RTC_LOG(LS_INFO) << "EnableBuiltInAEC: " << enable;
|
||||
RTC_LOG(LS_ERROR) << "Not implemented";
|
||||
return -1;
|
||||
}
|
||||
|
||||
int AAudioRecorder::EnableBuiltInAGC(bool enable) {
|
||||
RTC_LOG(LS_INFO) << "EnableBuiltInAGC: " << enable;
|
||||
RTC_LOG(LS_ERROR) << "Not implemented";
|
||||
return -1;
|
||||
}
|
||||
|
||||
int AAudioRecorder::EnableBuiltInNS(bool enable) {
|
||||
RTC_LOG(LS_INFO) << "EnableBuiltInNS: " << enable;
|
||||
RTC_LOG(LS_ERROR) << "Not implemented";
|
||||
return -1;
|
||||
}
|
||||
|
||||
void AAudioRecorder::OnErrorCallback(aaudio_result_t error) {
|
||||
RTC_LOG(LS_ERROR) << "OnErrorCallback: " << AAudio_convertResultToText(error);
|
||||
// RTC_DCHECK(thread_checker_aaudio_.IsCurrent());
|
||||
if (aaudio_.stream_state() == AAUDIO_STREAM_STATE_DISCONNECTED) {
|
||||
// The stream is disconnected and any attempt to use it will return
|
||||
// AAUDIO_ERROR_DISCONNECTED..
|
||||
RTC_LOG(LS_WARNING) << "Input stream disconnected => restart is required";
|
||||
// AAudio documentation states: "You should not close or reopen the stream
|
||||
// from the callback, use another thread instead". A message is therefore
|
||||
// sent to the main thread to do the restart operation.
|
||||
RTC_DCHECK(main_thread_);
|
||||
main_thread_->PostTask([this] { HandleStreamDisconnected(); });
|
||||
}
|
||||
}
|
||||
|
||||
// Read and process `num_frames` of data from the `audio_data` buffer.
|
||||
// TODO(henrika): possibly add trace here to be included in systrace.
|
||||
// See https://developer.android.com/studio/profile/systrace-commandline.html.
|
||||
aaudio_data_callback_result_t AAudioRecorder::OnDataCallback(
|
||||
void* audio_data,
|
||||
int32_t num_frames) {
|
||||
// TODO(henrika): figure out why we sometimes hit this one.
|
||||
// RTC_DCHECK(thread_checker_aaudio_.IsCurrent());
|
||||
// RTC_LOG(LS_INFO) << "OnDataCallback: " << num_frames;
|
||||
// Drain the input buffer at first callback to ensure that it does not
|
||||
// contain any old data. Will also ensure that the lowest possible latency
|
||||
// is obtained.
|
||||
if (first_data_callback_) {
|
||||
RTC_LOG(LS_INFO) << "--- First input data callback: "
|
||||
"device id="
|
||||
<< aaudio_.device_id();
|
||||
aaudio_.ClearInputStream(audio_data, num_frames);
|
||||
first_data_callback_ = false;
|
||||
}
|
||||
// Check if the overflow counter has increased and if so log a warning.
|
||||
// TODO(henrika): possible add UMA stat or capacity extension.
|
||||
const int32_t overflow_count = aaudio_.xrun_count();
|
||||
if (overflow_count > overflow_count_) {
|
||||
RTC_LOG(LS_ERROR) << "Overflow detected: " << overflow_count;
|
||||
overflow_count_ = overflow_count;
|
||||
}
|
||||
// Estimated time between an audio frame was recorded by the input device and
|
||||
// it can read on the input stream.
|
||||
latency_millis_ = aaudio_.EstimateLatencyMillis();
|
||||
// TODO(henrika): use for development only.
|
||||
if (aaudio_.frames_read() % (1000 * aaudio_.frames_per_burst()) == 0) {
|
||||
RTC_DLOG(LS_INFO) << "input latency: " << latency_millis_
|
||||
<< ", num_frames: " << num_frames;
|
||||
}
|
||||
// Copy recorded audio in `audio_data` to the WebRTC sink using the
|
||||
// FineAudioBuffer object.
|
||||
fine_audio_buffer_->DeliverRecordedData(
|
||||
rtc::MakeArrayView(static_cast<const int16_t*>(audio_data),
|
||||
aaudio_.samples_per_frame() * num_frames),
|
||||
static_cast<int>(latency_millis_ + 0.5));
|
||||
|
||||
return AAUDIO_CALLBACK_RESULT_CONTINUE;
|
||||
}
|
||||
|
||||
void AAudioRecorder::HandleStreamDisconnected() {
|
||||
RTC_DCHECK_RUN_ON(&thread_checker_);
|
||||
RTC_LOG(LS_INFO) << "HandleStreamDisconnected";
|
||||
if (!initialized_ || !recording_) {
|
||||
return;
|
||||
}
|
||||
// Perform a restart by first closing the disconnected stream and then start
|
||||
// a new stream; this time using the new (preferred) audio input device.
|
||||
// TODO(henrika): resolve issue where a one restart attempt leads to a long
|
||||
// sequence of new calls to OnErrorCallback().
|
||||
// See b/73148976 for details.
|
||||
StopRecording();
|
||||
InitRecording();
|
||||
StartRecording();
|
||||
}
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,124 @@
|
|||
/*
|
||||
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef MODULES_AUDIO_DEVICE_ANDROID_AAUDIO_RECORDER_H_
|
||||
#define MODULES_AUDIO_DEVICE_ANDROID_AAUDIO_RECORDER_H_
|
||||
|
||||
#include <aaudio/AAudio.h>
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "api/sequence_checker.h"
|
||||
#include "api/task_queue/task_queue_base.h"
|
||||
#include "modules/audio_device/android/aaudio_wrapper.h"
|
||||
#include "modules/audio_device/include/audio_device_defines.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class AudioDeviceBuffer;
|
||||
class FineAudioBuffer;
|
||||
class AudioManager;
|
||||
|
||||
// Implements low-latency 16-bit mono PCM audio input support for Android
|
||||
// using the C based AAudio API.
|
||||
//
|
||||
// An instance must be created and destroyed on one and the same thread.
|
||||
// All public methods must also be called on the same thread. A thread checker
|
||||
// will RTC_DCHECK if any method is called on an invalid thread. Audio buffers
|
||||
// are delivered on a dedicated high-priority thread owned by AAudio.
|
||||
//
|
||||
// The existing design forces the user to call InitRecording() after
|
||||
// StopRecording() to be able to call StartRecording() again. This is in line
|
||||
// with how the Java- based implementation works.
|
||||
//
|
||||
// TODO(henrika): add comments about device changes and adaptive buffer
|
||||
// management.
|
||||
class AAudioRecorder : public AAudioObserverInterface {
|
||||
public:
|
||||
explicit AAudioRecorder(AudioManager* audio_manager);
|
||||
~AAudioRecorder();
|
||||
|
||||
int Init();
|
||||
int Terminate();
|
||||
|
||||
int InitRecording();
|
||||
bool RecordingIsInitialized() const { return initialized_; }
|
||||
|
||||
int StartRecording();
|
||||
int StopRecording();
|
||||
bool Recording() const { return recording_; }
|
||||
|
||||
void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
|
||||
|
||||
double latency_millis() const { return latency_millis_; }
|
||||
|
||||
// TODO(henrika): add support using AAudio APIs when available.
|
||||
int EnableBuiltInAEC(bool enable);
|
||||
int EnableBuiltInAGC(bool enable);
|
||||
int EnableBuiltInNS(bool enable);
|
||||
|
||||
protected:
|
||||
// AAudioObserverInterface implementation.
|
||||
|
||||
// For an input stream, this function should read `num_frames` of recorded
|
||||
// data, in the stream's current data format, from the `audio_data` buffer.
|
||||
// Called on a real-time thread owned by AAudio.
|
||||
aaudio_data_callback_result_t OnDataCallback(void* audio_data,
|
||||
int32_t num_frames) override;
|
||||
|
||||
// AAudio calls this function if any error occurs on a callback thread.
|
||||
// Called on a real-time thread owned by AAudio.
|
||||
void OnErrorCallback(aaudio_result_t error) override;
|
||||
|
||||
private:
|
||||
// Closes the existing stream and starts a new stream.
|
||||
void HandleStreamDisconnected();
|
||||
|
||||
// Ensures that methods are called from the same thread as this object is
|
||||
// created on.
|
||||
SequenceChecker thread_checker_;
|
||||
|
||||
// Stores thread ID in first call to AAudioPlayer::OnDataCallback from a
|
||||
// real-time thread owned by AAudio. Detached during construction of this
|
||||
// object.
|
||||
SequenceChecker thread_checker_aaudio_;
|
||||
|
||||
// The thread on which this object is created on.
|
||||
TaskQueueBase* main_thread_;
|
||||
|
||||
// Wraps all AAudio resources. Contains an input stream using the default
|
||||
// input audio device.
|
||||
AAudioWrapper aaudio_;
|
||||
|
||||
// Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the
|
||||
// AudioDeviceModuleImpl class and called by AudioDeviceModule::Create().
|
||||
AudioDeviceBuffer* audio_device_buffer_ = nullptr;
|
||||
|
||||
bool initialized_ = false;
|
||||
bool recording_ = false;
|
||||
|
||||
// Consumes audio of native buffer size and feeds the WebRTC layer with 10ms
|
||||
// chunks of audio.
|
||||
std::unique_ptr<FineAudioBuffer> fine_audio_buffer_;
|
||||
|
||||
// Counts number of detected overflow events reported by AAudio.
|
||||
int32_t overflow_count_ = 0;
|
||||
|
||||
// Estimated time between an audio frame was recorded by the input device and
|
||||
// it can read on the input stream.
|
||||
double latency_millis_ = 0;
|
||||
|
||||
// True only for the first data callback in each audio session.
|
||||
bool first_data_callback_ = true;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // MODULES_AUDIO_DEVICE_ANDROID_AAUDIO_RECORDER_H_
|
||||
|
|
@ -0,0 +1,499 @@
|
|||
/*
|
||||
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "modules/audio_device/android/aaudio_wrapper.h"
|
||||
|
||||
#include "modules/audio_device/android/audio_manager.h"
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/strings/string_builder.h"
|
||||
#include "rtc_base/time_utils.h"
|
||||
|
||||
#define LOG_ON_ERROR(op) \
|
||||
do { \
|
||||
aaudio_result_t result = (op); \
|
||||
if (result != AAUDIO_OK) { \
|
||||
RTC_LOG(LS_ERROR) << #op << ": " << AAudio_convertResultToText(result); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define RETURN_ON_ERROR(op, ...) \
|
||||
do { \
|
||||
aaudio_result_t result = (op); \
|
||||
if (result != AAUDIO_OK) { \
|
||||
RTC_LOG(LS_ERROR) << #op << ": " << AAudio_convertResultToText(result); \
|
||||
return __VA_ARGS__; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace {
|
||||
|
||||
const char* DirectionToString(aaudio_direction_t direction) {
|
||||
switch (direction) {
|
||||
case AAUDIO_DIRECTION_OUTPUT:
|
||||
return "OUTPUT";
|
||||
case AAUDIO_DIRECTION_INPUT:
|
||||
return "INPUT";
|
||||
default:
|
||||
return "UNKNOWN";
|
||||
}
|
||||
}
|
||||
|
||||
const char* SharingModeToString(aaudio_sharing_mode_t mode) {
|
||||
switch (mode) {
|
||||
case AAUDIO_SHARING_MODE_EXCLUSIVE:
|
||||
return "EXCLUSIVE";
|
||||
case AAUDIO_SHARING_MODE_SHARED:
|
||||
return "SHARED";
|
||||
default:
|
||||
return "UNKNOWN";
|
||||
}
|
||||
}
|
||||
|
||||
const char* PerformanceModeToString(aaudio_performance_mode_t mode) {
|
||||
switch (mode) {
|
||||
case AAUDIO_PERFORMANCE_MODE_NONE:
|
||||
return "NONE";
|
||||
case AAUDIO_PERFORMANCE_MODE_POWER_SAVING:
|
||||
return "POWER_SAVING";
|
||||
case AAUDIO_PERFORMANCE_MODE_LOW_LATENCY:
|
||||
return "LOW_LATENCY";
|
||||
default:
|
||||
return "UNKNOWN";
|
||||
}
|
||||
}
|
||||
|
||||
const char* FormatToString(int32_t id) {
|
||||
switch (id) {
|
||||
case AAUDIO_FORMAT_INVALID:
|
||||
return "INVALID";
|
||||
case AAUDIO_FORMAT_UNSPECIFIED:
|
||||
return "UNSPECIFIED";
|
||||
case AAUDIO_FORMAT_PCM_I16:
|
||||
return "PCM_I16";
|
||||
case AAUDIO_FORMAT_PCM_FLOAT:
|
||||
return "FLOAT";
|
||||
default:
|
||||
return "UNKNOWN";
|
||||
}
|
||||
}
|
||||
|
||||
void ErrorCallback(AAudioStream* stream,
|
||||
void* user_data,
|
||||
aaudio_result_t error) {
|
||||
RTC_DCHECK(user_data);
|
||||
AAudioWrapper* aaudio_wrapper = reinterpret_cast<AAudioWrapper*>(user_data);
|
||||
RTC_LOG(LS_WARNING) << "ErrorCallback: "
|
||||
<< DirectionToString(aaudio_wrapper->direction());
|
||||
RTC_DCHECK(aaudio_wrapper->observer());
|
||||
aaudio_wrapper->observer()->OnErrorCallback(error);
|
||||
}
|
||||
|
||||
aaudio_data_callback_result_t DataCallback(AAudioStream* stream,
|
||||
void* user_data,
|
||||
void* audio_data,
|
||||
int32_t num_frames) {
|
||||
RTC_DCHECK(user_data);
|
||||
RTC_DCHECK(audio_data);
|
||||
AAudioWrapper* aaudio_wrapper = reinterpret_cast<AAudioWrapper*>(user_data);
|
||||
RTC_DCHECK(aaudio_wrapper->observer());
|
||||
return aaudio_wrapper->observer()->OnDataCallback(audio_data, num_frames);
|
||||
}
|
||||
|
||||
// Wraps the stream builder object to ensure that it is released properly when
|
||||
// the stream builder goes out of scope.
|
||||
class ScopedStreamBuilder {
|
||||
public:
|
||||
ScopedStreamBuilder() {
|
||||
LOG_ON_ERROR(AAudio_createStreamBuilder(&builder_));
|
||||
RTC_DCHECK(builder_);
|
||||
}
|
||||
~ScopedStreamBuilder() {
|
||||
if (builder_) {
|
||||
LOG_ON_ERROR(AAudioStreamBuilder_delete(builder_));
|
||||
}
|
||||
}
|
||||
|
||||
AAudioStreamBuilder* get() const { return builder_; }
|
||||
|
||||
private:
|
||||
AAudioStreamBuilder* builder_ = nullptr;
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
AAudioWrapper::AAudioWrapper(AudioManager* audio_manager,
|
||||
aaudio_direction_t direction,
|
||||
AAudioObserverInterface* observer)
|
||||
: direction_(direction), observer_(observer) {
|
||||
RTC_LOG(LS_INFO) << "ctor";
|
||||
RTC_DCHECK(observer_);
|
||||
direction_ == AAUDIO_DIRECTION_OUTPUT
|
||||
? audio_parameters_ = audio_manager->GetPlayoutAudioParameters()
|
||||
: audio_parameters_ = audio_manager->GetRecordAudioParameters();
|
||||
aaudio_thread_checker_.Detach();
|
||||
RTC_LOG(LS_INFO) << audio_parameters_.ToString();
|
||||
}
|
||||
|
||||
AAudioWrapper::~AAudioWrapper() {
|
||||
RTC_LOG(LS_INFO) << "dtor";
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
RTC_DCHECK(!stream_);
|
||||
}
|
||||
|
||||
bool AAudioWrapper::Init() {
|
||||
RTC_LOG(LS_INFO) << "Init";
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
// Creates a stream builder which can be used to open an audio stream.
|
||||
ScopedStreamBuilder builder;
|
||||
// Configures the stream builder using audio parameters given at construction.
|
||||
SetStreamConfiguration(builder.get());
|
||||
// Opens a stream based on options in the stream builder.
|
||||
if (!OpenStream(builder.get())) {
|
||||
return false;
|
||||
}
|
||||
// Ensures that the opened stream could activate the requested settings.
|
||||
if (!VerifyStreamConfiguration()) {
|
||||
return false;
|
||||
}
|
||||
// Optimizes the buffer scheme for lowest possible latency and creates
|
||||
// additional buffer logic to match the 10ms buffer size used in WebRTC.
|
||||
if (!OptimizeBuffers()) {
|
||||
return false;
|
||||
}
|
||||
LogStreamState();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool AAudioWrapper::Start() {
|
||||
RTC_LOG(LS_INFO) << "Start";
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
// TODO(henrika): this state check might not be needed.
|
||||
aaudio_stream_state_t current_state = AAudioStream_getState(stream_);
|
||||
if (current_state != AAUDIO_STREAM_STATE_OPEN) {
|
||||
RTC_LOG(LS_ERROR) << "Invalid state: "
|
||||
<< AAudio_convertStreamStateToText(current_state);
|
||||
return false;
|
||||
}
|
||||
// Asynchronous request for the stream to start.
|
||||
RETURN_ON_ERROR(AAudioStream_requestStart(stream_), false);
|
||||
LogStreamState();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool AAudioWrapper::Stop() {
|
||||
RTC_LOG(LS_INFO) << "Stop: " << DirectionToString(direction());
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
// Asynchronous request for the stream to stop.
|
||||
RETURN_ON_ERROR(AAudioStream_requestStop(stream_), false);
|
||||
CloseStream();
|
||||
aaudio_thread_checker_.Detach();
|
||||
return true;
|
||||
}
|
||||
|
||||
double AAudioWrapper::EstimateLatencyMillis() const {
|
||||
RTC_DCHECK(stream_);
|
||||
double latency_millis = 0.0;
|
||||
if (direction() == AAUDIO_DIRECTION_INPUT) {
|
||||
// For input streams. Best guess we can do is to use the current burst size
|
||||
// as delay estimate.
|
||||
latency_millis = static_cast<double>(frames_per_burst()) / sample_rate() *
|
||||
rtc::kNumMillisecsPerSec;
|
||||
} else {
|
||||
int64_t existing_frame_index;
|
||||
int64_t existing_frame_presentation_time;
|
||||
// Get the time at which a particular frame was presented to audio hardware.
|
||||
aaudio_result_t result = AAudioStream_getTimestamp(
|
||||
stream_, CLOCK_MONOTONIC, &existing_frame_index,
|
||||
&existing_frame_presentation_time);
|
||||
// Results are only valid when the stream is in AAUDIO_STREAM_STATE_STARTED.
|
||||
if (result == AAUDIO_OK) {
|
||||
// Get write index for next audio frame.
|
||||
int64_t next_frame_index = frames_written();
|
||||
// Number of frames between next frame and the existing frame.
|
||||
int64_t frame_index_delta = next_frame_index - existing_frame_index;
|
||||
// Assume the next frame will be written now.
|
||||
int64_t next_frame_write_time = rtc::TimeNanos();
|
||||
// Calculate time when next frame will be presented to the hardware taking
|
||||
// sample rate into account.
|
||||
int64_t frame_time_delta =
|
||||
(frame_index_delta * rtc::kNumNanosecsPerSec) / sample_rate();
|
||||
int64_t next_frame_presentation_time =
|
||||
existing_frame_presentation_time + frame_time_delta;
|
||||
// Derive a latency estimate given results above.
|
||||
latency_millis = static_cast<double>(next_frame_presentation_time -
|
||||
next_frame_write_time) /
|
||||
rtc::kNumNanosecsPerMillisec;
|
||||
}
|
||||
}
|
||||
return latency_millis;
|
||||
}
|
||||
|
||||
// Returns new buffer size or a negative error value if buffer size could not
|
||||
// be increased.
|
||||
bool AAudioWrapper::IncreaseOutputBufferSize() {
|
||||
RTC_LOG(LS_INFO) << "IncreaseBufferSize";
|
||||
RTC_DCHECK(stream_);
|
||||
RTC_DCHECK(aaudio_thread_checker_.IsCurrent());
|
||||
RTC_DCHECK_EQ(direction(), AAUDIO_DIRECTION_OUTPUT);
|
||||
aaudio_result_t buffer_size = AAudioStream_getBufferSizeInFrames(stream_);
|
||||
// Try to increase size of buffer with one burst to reduce risk of underrun.
|
||||
buffer_size += frames_per_burst();
|
||||
// Verify that the new buffer size is not larger than max capacity.
|
||||
// TODO(henrika): keep track of case when we reach the capacity limit.
|
||||
const int32_t max_buffer_size = buffer_capacity_in_frames();
|
||||
if (buffer_size > max_buffer_size) {
|
||||
RTC_LOG(LS_ERROR) << "Required buffer size (" << buffer_size
|
||||
<< ") is higher than max: " << max_buffer_size;
|
||||
return false;
|
||||
}
|
||||
RTC_LOG(LS_INFO) << "Updating buffer size to: " << buffer_size
|
||||
<< " (max=" << max_buffer_size << ")";
|
||||
buffer_size = AAudioStream_setBufferSizeInFrames(stream_, buffer_size);
|
||||
if (buffer_size < 0) {
|
||||
RTC_LOG(LS_ERROR) << "Failed to change buffer size: "
|
||||
<< AAudio_convertResultToText(buffer_size);
|
||||
return false;
|
||||
}
|
||||
RTC_LOG(LS_INFO) << "Buffer size changed to: " << buffer_size;
|
||||
return true;
|
||||
}
|
||||
|
||||
void AAudioWrapper::ClearInputStream(void* audio_data, int32_t num_frames) {
|
||||
RTC_LOG(LS_INFO) << "ClearInputStream";
|
||||
RTC_DCHECK(stream_);
|
||||
RTC_DCHECK(aaudio_thread_checker_.IsCurrent());
|
||||
RTC_DCHECK_EQ(direction(), AAUDIO_DIRECTION_INPUT);
|
||||
aaudio_result_t cleared_frames = 0;
|
||||
do {
|
||||
cleared_frames = AAudioStream_read(stream_, audio_data, num_frames, 0);
|
||||
} while (cleared_frames > 0);
|
||||
}
|
||||
|
||||
AAudioObserverInterface* AAudioWrapper::observer() const {
|
||||
return observer_;
|
||||
}
|
||||
|
||||
AudioParameters AAudioWrapper::audio_parameters() const {
|
||||
return audio_parameters_;
|
||||
}
|
||||
|
||||
int32_t AAudioWrapper::samples_per_frame() const {
|
||||
RTC_DCHECK(stream_);
|
||||
return AAudioStream_getSamplesPerFrame(stream_);
|
||||
}
|
||||
|
||||
int32_t AAudioWrapper::buffer_size_in_frames() const {
|
||||
RTC_DCHECK(stream_);
|
||||
return AAudioStream_getBufferSizeInFrames(stream_);
|
||||
}
|
||||
|
||||
int32_t AAudioWrapper::buffer_capacity_in_frames() const {
|
||||
RTC_DCHECK(stream_);
|
||||
return AAudioStream_getBufferCapacityInFrames(stream_);
|
||||
}
|
||||
|
||||
int32_t AAudioWrapper::device_id() const {
|
||||
RTC_DCHECK(stream_);
|
||||
return AAudioStream_getDeviceId(stream_);
|
||||
}
|
||||
|
||||
int32_t AAudioWrapper::xrun_count() const {
|
||||
RTC_DCHECK(stream_);
|
||||
return AAudioStream_getXRunCount(stream_);
|
||||
}
|
||||
|
||||
int32_t AAudioWrapper::format() const {
|
||||
RTC_DCHECK(stream_);
|
||||
return AAudioStream_getFormat(stream_);
|
||||
}
|
||||
|
||||
int32_t AAudioWrapper::sample_rate() const {
|
||||
RTC_DCHECK(stream_);
|
||||
return AAudioStream_getSampleRate(stream_);
|
||||
}
|
||||
|
||||
int32_t AAudioWrapper::channel_count() const {
|
||||
RTC_DCHECK(stream_);
|
||||
return AAudioStream_getChannelCount(stream_);
|
||||
}
|
||||
|
||||
int32_t AAudioWrapper::frames_per_callback() const {
|
||||
RTC_DCHECK(stream_);
|
||||
return AAudioStream_getFramesPerDataCallback(stream_);
|
||||
}
|
||||
|
||||
aaudio_sharing_mode_t AAudioWrapper::sharing_mode() const {
|
||||
RTC_DCHECK(stream_);
|
||||
return AAudioStream_getSharingMode(stream_);
|
||||
}
|
||||
|
||||
aaudio_performance_mode_t AAudioWrapper::performance_mode() const {
|
||||
RTC_DCHECK(stream_);
|
||||
return AAudioStream_getPerformanceMode(stream_);
|
||||
}
|
||||
|
||||
aaudio_stream_state_t AAudioWrapper::stream_state() const {
|
||||
RTC_DCHECK(stream_);
|
||||
return AAudioStream_getState(stream_);
|
||||
}
|
||||
|
||||
int64_t AAudioWrapper::frames_written() const {
|
||||
RTC_DCHECK(stream_);
|
||||
return AAudioStream_getFramesWritten(stream_);
|
||||
}
|
||||
|
||||
int64_t AAudioWrapper::frames_read() const {
|
||||
RTC_DCHECK(stream_);
|
||||
return AAudioStream_getFramesRead(stream_);
|
||||
}
|
||||
|
||||
void AAudioWrapper::SetStreamConfiguration(AAudioStreamBuilder* builder) {
|
||||
RTC_LOG(LS_INFO) << "SetStreamConfiguration";
|
||||
RTC_DCHECK(builder);
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
// Request usage of default primary output/input device.
|
||||
// TODO(henrika): verify that default device follows Java APIs.
|
||||
// https://developer.android.com/reference/android/media/AudioDeviceInfo.html.
|
||||
AAudioStreamBuilder_setDeviceId(builder, AAUDIO_UNSPECIFIED);
|
||||
// Use preferred sample rate given by the audio parameters.
|
||||
AAudioStreamBuilder_setSampleRate(builder, audio_parameters().sample_rate());
|
||||
// Use preferred channel configuration given by the audio parameters.
|
||||
AAudioStreamBuilder_setChannelCount(builder, audio_parameters().channels());
|
||||
// Always use 16-bit PCM audio sample format.
|
||||
AAudioStreamBuilder_setFormat(builder, AAUDIO_FORMAT_PCM_I16);
|
||||
// TODO(henrika): investigate effect of using AAUDIO_SHARING_MODE_EXCLUSIVE.
|
||||
// Ask for exclusive mode since this will give us the lowest possible latency.
|
||||
// If exclusive mode isn't available, shared mode will be used instead.
|
||||
AAudioStreamBuilder_setSharingMode(builder, AAUDIO_SHARING_MODE_SHARED);
|
||||
// Use the direction that was given at construction.
|
||||
AAudioStreamBuilder_setDirection(builder, direction_);
|
||||
// TODO(henrika): investigate performance using different performance modes.
|
||||
AAudioStreamBuilder_setPerformanceMode(builder,
|
||||
AAUDIO_PERFORMANCE_MODE_LOW_LATENCY);
|
||||
// Given that WebRTC applications require low latency, our audio stream uses
|
||||
// an asynchronous callback function to transfer data to and from the
|
||||
// application. AAudio executes the callback in a higher-priority thread that
|
||||
// has better performance.
|
||||
AAudioStreamBuilder_setDataCallback(builder, DataCallback, this);
|
||||
// Request that AAudio calls this functions if any error occurs on a callback
|
||||
// thread.
|
||||
AAudioStreamBuilder_setErrorCallback(builder, ErrorCallback, this);
|
||||
}
|
||||
|
||||
bool AAudioWrapper::OpenStream(AAudioStreamBuilder* builder) {
|
||||
RTC_LOG(LS_INFO) << "OpenStream";
|
||||
RTC_DCHECK(builder);
|
||||
AAudioStream* stream = nullptr;
|
||||
RETURN_ON_ERROR(AAudioStreamBuilder_openStream(builder, &stream), false);
|
||||
stream_ = stream;
|
||||
LogStreamConfiguration();
|
||||
return true;
|
||||
}
|
||||
|
||||
void AAudioWrapper::CloseStream() {
|
||||
RTC_LOG(LS_INFO) << "CloseStream";
|
||||
RTC_DCHECK(stream_);
|
||||
LOG_ON_ERROR(AAudioStream_close(stream_));
|
||||
stream_ = nullptr;
|
||||
}
|
||||
|
||||
void AAudioWrapper::LogStreamConfiguration() {
|
||||
RTC_DCHECK(stream_);
|
||||
char ss_buf[1024];
|
||||
rtc::SimpleStringBuilder ss(ss_buf);
|
||||
ss << "Stream Configuration: ";
|
||||
ss << "sample rate=" << sample_rate() << ", channels=" << channel_count();
|
||||
ss << ", samples per frame=" << samples_per_frame();
|
||||
ss << ", format=" << FormatToString(format());
|
||||
ss << ", sharing mode=" << SharingModeToString(sharing_mode());
|
||||
ss << ", performance mode=" << PerformanceModeToString(performance_mode());
|
||||
ss << ", direction=" << DirectionToString(direction());
|
||||
ss << ", device id=" << AAudioStream_getDeviceId(stream_);
|
||||
ss << ", frames per callback=" << frames_per_callback();
|
||||
RTC_LOG(LS_INFO) << ss.str();
|
||||
}
|
||||
|
||||
void AAudioWrapper::LogStreamState() {
|
||||
RTC_LOG(LS_INFO) << "AAudio stream state: "
|
||||
<< AAudio_convertStreamStateToText(stream_state());
|
||||
}
|
||||
|
||||
bool AAudioWrapper::VerifyStreamConfiguration() {
|
||||
RTC_LOG(LS_INFO) << "VerifyStreamConfiguration";
|
||||
RTC_DCHECK(stream_);
|
||||
// TODO(henrika): should we verify device ID as well?
|
||||
if (AAudioStream_getSampleRate(stream_) != audio_parameters().sample_rate()) {
|
||||
RTC_LOG(LS_ERROR) << "Stream unable to use requested sample rate";
|
||||
return false;
|
||||
}
|
||||
if (AAudioStream_getChannelCount(stream_) !=
|
||||
static_cast<int32_t>(audio_parameters().channels())) {
|
||||
RTC_LOG(LS_ERROR) << "Stream unable to use requested channel count";
|
||||
return false;
|
||||
}
|
||||
if (AAudioStream_getFormat(stream_) != AAUDIO_FORMAT_PCM_I16) {
|
||||
RTC_LOG(LS_ERROR) << "Stream unable to use requested format";
|
||||
return false;
|
||||
}
|
||||
if (AAudioStream_getSharingMode(stream_) != AAUDIO_SHARING_MODE_SHARED) {
|
||||
RTC_LOG(LS_ERROR) << "Stream unable to use requested sharing mode";
|
||||
return false;
|
||||
}
|
||||
if (AAudioStream_getPerformanceMode(stream_) !=
|
||||
AAUDIO_PERFORMANCE_MODE_LOW_LATENCY) {
|
||||
RTC_LOG(LS_ERROR) << "Stream unable to use requested performance mode";
|
||||
return false;
|
||||
}
|
||||
if (AAudioStream_getDirection(stream_) != direction()) {
|
||||
RTC_LOG(LS_ERROR) << "Stream direction could not be set";
|
||||
return false;
|
||||
}
|
||||
if (AAudioStream_getSamplesPerFrame(stream_) !=
|
||||
static_cast<int32_t>(audio_parameters().channels())) {
|
||||
RTC_LOG(LS_ERROR) << "Invalid number of samples per frame";
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool AAudioWrapper::OptimizeBuffers() {
|
||||
RTC_LOG(LS_INFO) << "OptimizeBuffers";
|
||||
RTC_DCHECK(stream_);
|
||||
// Maximum number of frames that can be filled without blocking.
|
||||
RTC_LOG(LS_INFO) << "max buffer capacity in frames: "
|
||||
<< buffer_capacity_in_frames();
|
||||
// Query the number of frames that the application should read or write at
|
||||
// one time for optimal performance.
|
||||
int32_t frames_per_burst = AAudioStream_getFramesPerBurst(stream_);
|
||||
RTC_LOG(LS_INFO) << "frames per burst for optimal performance: "
|
||||
<< frames_per_burst;
|
||||
frames_per_burst_ = frames_per_burst;
|
||||
if (direction() == AAUDIO_DIRECTION_INPUT) {
|
||||
// There is no point in calling setBufferSizeInFrames() for input streams
|
||||
// since it has no effect on the performance (latency in this case).
|
||||
return true;
|
||||
}
|
||||
// Set buffer size to same as burst size to guarantee lowest possible latency.
|
||||
// This size might change for output streams if underruns are detected and
|
||||
// automatic buffer adjustment is enabled.
|
||||
AAudioStream_setBufferSizeInFrames(stream_, frames_per_burst);
|
||||
int32_t buffer_size = AAudioStream_getBufferSizeInFrames(stream_);
|
||||
if (buffer_size != frames_per_burst) {
|
||||
RTC_LOG(LS_ERROR) << "Failed to use optimal buffer burst size";
|
||||
return false;
|
||||
}
|
||||
// Maximum number of frames that can be filled without blocking.
|
||||
RTC_LOG(LS_INFO) << "buffer burst size in frames: " << buffer_size;
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,127 @@
|
|||
/*
|
||||
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef MODULES_AUDIO_DEVICE_ANDROID_AAUDIO_WRAPPER_H_
|
||||
#define MODULES_AUDIO_DEVICE_ANDROID_AAUDIO_WRAPPER_H_
|
||||
|
||||
#include <aaudio/AAudio.h>
|
||||
|
||||
#include "api/sequence_checker.h"
|
||||
#include "modules/audio_device/include/audio_device_defines.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class AudioManager;
|
||||
|
||||
// AAudio callback interface for audio transport to/from the AAudio stream.
|
||||
// The interface also contains an error callback method for notifications of
|
||||
// e.g. device changes.
|
||||
class AAudioObserverInterface {
|
||||
public:
|
||||
// Audio data will be passed in our out of this function dependning on the
|
||||
// direction of the audio stream. This callback function will be called on a
|
||||
// real-time thread owned by AAudio.
|
||||
virtual aaudio_data_callback_result_t OnDataCallback(void* audio_data,
|
||||
int32_t num_frames) = 0;
|
||||
// AAudio will call this functions if any error occurs on a callback thread.
|
||||
// In response, this function could signal or launch another thread to reopen
|
||||
// a stream on another device. Do not reopen the stream in this callback.
|
||||
virtual void OnErrorCallback(aaudio_result_t error) = 0;
|
||||
|
||||
protected:
|
||||
virtual ~AAudioObserverInterface() {}
|
||||
};
|
||||
|
||||
// Utility class which wraps the C-based AAudio API into a more handy C++ class
|
||||
// where the underlying resources (AAudioStreamBuilder and AAudioStream) are
|
||||
// encapsulated. User must set the direction (in or out) at construction since
|
||||
// it defines the stream type and the direction of the data flow in the
|
||||
// AAudioObserverInterface.
|
||||
//
|
||||
// AAudio is a new Android C API introduced in the Android O (26) release.
|
||||
// It is designed for high-performance audio applications that require low
|
||||
// latency. Applications communicate with AAudio by reading and writing data
|
||||
// to streams.
|
||||
//
|
||||
// Each stream is attached to a single audio device, where each audio device
|
||||
// has a unique ID. The ID can be used to bind an audio stream to a specific
|
||||
// audio device but this implementation lets AAudio choose the default primary
|
||||
// device instead (device selection takes place in Java). A stream can only
|
||||
// move data in one direction. When a stream is opened, Android checks to
|
||||
// ensure that the audio device and stream direction agree.
|
||||
class AAudioWrapper {
|
||||
public:
|
||||
AAudioWrapper(AudioManager* audio_manager,
|
||||
aaudio_direction_t direction,
|
||||
AAudioObserverInterface* observer);
|
||||
~AAudioWrapper();
|
||||
|
||||
bool Init();
|
||||
bool Start();
|
||||
bool Stop();
|
||||
|
||||
// For output streams: estimates latency between writing an audio frame to
|
||||
// the output stream and the time that same frame is played out on the output
|
||||
// audio device.
|
||||
// For input streams: estimates latency between reading an audio frame from
|
||||
// the input stream and the time that same frame was recorded on the input
|
||||
// audio device.
|
||||
double EstimateLatencyMillis() const;
|
||||
|
||||
// Increases the internal buffer size for output streams by one burst size to
|
||||
// reduce the risk of underruns. Can be used while a stream is active.
|
||||
bool IncreaseOutputBufferSize();
|
||||
|
||||
// Drains the recording stream of any existing data by reading from it until
|
||||
// it's empty. Can be used to clear out old data before starting a new audio
|
||||
// session.
|
||||
void ClearInputStream(void* audio_data, int32_t num_frames);
|
||||
|
||||
AAudioObserverInterface* observer() const;
|
||||
AudioParameters audio_parameters() const;
|
||||
int32_t samples_per_frame() const;
|
||||
int32_t buffer_size_in_frames() const;
|
||||
int32_t buffer_capacity_in_frames() const;
|
||||
int32_t device_id() const;
|
||||
int32_t xrun_count() const;
|
||||
int32_t format() const;
|
||||
int32_t sample_rate() const;
|
||||
int32_t channel_count() const;
|
||||
int32_t frames_per_callback() const;
|
||||
aaudio_sharing_mode_t sharing_mode() const;
|
||||
aaudio_performance_mode_t performance_mode() const;
|
||||
aaudio_stream_state_t stream_state() const;
|
||||
int64_t frames_written() const;
|
||||
int64_t frames_read() const;
|
||||
aaudio_direction_t direction() const { return direction_; }
|
||||
AAudioStream* stream() const { return stream_; }
|
||||
int32_t frames_per_burst() const { return frames_per_burst_; }
|
||||
|
||||
private:
|
||||
void SetStreamConfiguration(AAudioStreamBuilder* builder);
|
||||
bool OpenStream(AAudioStreamBuilder* builder);
|
||||
void CloseStream();
|
||||
void LogStreamConfiguration();
|
||||
void LogStreamState();
|
||||
bool VerifyStreamConfiguration();
|
||||
bool OptimizeBuffers();
|
||||
|
||||
SequenceChecker thread_checker_;
|
||||
SequenceChecker aaudio_thread_checker_;
|
||||
AudioParameters audio_parameters_;
|
||||
const aaudio_direction_t direction_;
|
||||
AAudioObserverInterface* observer_ = nullptr;
|
||||
AAudioStream* stream_ = nullptr;
|
||||
int32_t frames_per_burst_ = 0;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // MODULES_AUDIO_DEVICE_ANDROID_AAUDIO_WRAPPER_H_
|
||||
|
|
@ -0,0 +1,28 @@
|
|||
/*
|
||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef MODULES_AUDIO_DEVICE_ANDROID_AUDIO_COMMON_H_
|
||||
#define MODULES_AUDIO_DEVICE_ANDROID_AUDIO_COMMON_H_
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
const int kDefaultSampleRate = 44100;
|
||||
// Delay estimates for the two different supported modes. These values are based
|
||||
// on real-time round-trip delay estimates on a large set of devices and they
|
||||
// are lower bounds since the filter length is 128 ms, so the AEC works for
|
||||
// delays in the range [50, ~170] ms and [150, ~270] ms. Note that, in most
|
||||
// cases, the lowest delay estimate will not be utilized since devices that
|
||||
// support low-latency output audio often supports HW AEC as well.
|
||||
const int kLowLatencyModeDelayEstimateInMilliseconds = 50;
|
||||
const int kHighLatencyModeDelayEstimateInMilliseconds = 150;
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // MODULES_AUDIO_DEVICE_ANDROID_AUDIO_COMMON_H_
|
||||
|
|
@ -0,0 +1,441 @@
|
|||
/*
|
||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef MODULES_AUDIO_DEVICE_ANDROID_AUDIO_DEVICE_TEMPLATE_H_
|
||||
#define MODULES_AUDIO_DEVICE_ANDROID_AUDIO_DEVICE_TEMPLATE_H_
|
||||
|
||||
#include "api/sequence_checker.h"
|
||||
#include "modules/audio_device/android/audio_manager.h"
|
||||
#include "modules/audio_device/audio_device_generic.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/logging.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// InputType/OutputType can be any class that implements the capturing/rendering
|
||||
// part of the AudioDeviceGeneric API.
|
||||
// Construction and destruction must be done on one and the same thread. Each
|
||||
// internal implementation of InputType and OutputType will RTC_DCHECK if that
|
||||
// is not the case. All implemented methods must also be called on the same
|
||||
// thread. See comments in each InputType/OutputType class for more info.
|
||||
// It is possible to call the two static methods (SetAndroidAudioDeviceObjects
|
||||
// and ClearAndroidAudioDeviceObjects) from a different thread but both will
|
||||
// RTC_CHECK that the calling thread is attached to a Java VM.
|
||||
|
||||
template <class InputType, class OutputType>
|
||||
class AudioDeviceTemplate : public AudioDeviceGeneric {
|
||||
public:
|
||||
AudioDeviceTemplate(AudioDeviceModule::AudioLayer audio_layer,
|
||||
AudioManager* audio_manager)
|
||||
: audio_layer_(audio_layer),
|
||||
audio_manager_(audio_manager),
|
||||
output_(audio_manager_),
|
||||
input_(audio_manager_),
|
||||
initialized_(false) {
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__;
|
||||
RTC_CHECK(audio_manager);
|
||||
audio_manager_->SetActiveAudioLayer(audio_layer);
|
||||
}
|
||||
|
||||
virtual ~AudioDeviceTemplate() { RTC_LOG(LS_INFO) << __FUNCTION__; }
|
||||
|
||||
int32_t ActiveAudioLayer(
|
||||
AudioDeviceModule::AudioLayer& audioLayer) const override {
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__;
|
||||
audioLayer = audio_layer_;
|
||||
return 0;
|
||||
}
|
||||
|
||||
InitStatus Init() override {
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__;
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
RTC_DCHECK(!initialized_);
|
||||
if (!audio_manager_->Init()) {
|
||||
return InitStatus::OTHER_ERROR;
|
||||
}
|
||||
if (output_.Init() != 0) {
|
||||
audio_manager_->Close();
|
||||
return InitStatus::PLAYOUT_ERROR;
|
||||
}
|
||||
if (input_.Init() != 0) {
|
||||
output_.Terminate();
|
||||
audio_manager_->Close();
|
||||
return InitStatus::RECORDING_ERROR;
|
||||
}
|
||||
initialized_ = true;
|
||||
return InitStatus::OK;
|
||||
}
|
||||
|
||||
int32_t Terminate() override {
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__;
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
int32_t err = input_.Terminate();
|
||||
err |= output_.Terminate();
|
||||
err |= !audio_manager_->Close();
|
||||
initialized_ = false;
|
||||
RTC_DCHECK_EQ(err, 0);
|
||||
return err;
|
||||
}
|
||||
|
||||
bool Initialized() const override {
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__;
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
return initialized_;
|
||||
}
|
||||
|
||||
int16_t PlayoutDevices() override {
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__;
|
||||
return 1;
|
||||
}
|
||||
|
||||
int16_t RecordingDevices() override {
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__;
|
||||
return 1;
|
||||
}
|
||||
|
||||
int32_t PlayoutDeviceName(uint16_t index,
|
||||
char name[kAdmMaxDeviceNameSize],
|
||||
char guid[kAdmMaxGuidSize]) override {
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t RecordingDeviceName(uint16_t index,
|
||||
char name[kAdmMaxDeviceNameSize],
|
||||
char guid[kAdmMaxGuidSize]) override {
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t SetPlayoutDevice(uint16_t index) override {
|
||||
// OK to use but it has no effect currently since device selection is
|
||||
// done using Andoid APIs instead.
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t SetPlayoutDevice(
|
||||
AudioDeviceModule::WindowsDeviceType device) override {
|
||||
// OK to use but it has no effect currently since device selection is
|
||||
// done using Andoid APIs instead.
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t SetRecordingDevice(uint16_t index) override {
|
||||
// OK to use but it has no effect currently since device selection is
|
||||
// done using Andoid APIs instead.
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t SetRecordingDevice(
|
||||
AudioDeviceModule::WindowsDeviceType device) override {
|
||||
// OK to use but it has no effect currently since device selection is
|
||||
// done using Andoid APIs instead.
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t PlayoutIsAvailable(bool& available) override {
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__;
|
||||
available = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t InitPlayout() override {
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__;
|
||||
return output_.InitPlayout();
|
||||
}
|
||||
|
||||
bool PlayoutIsInitialized() const override {
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__;
|
||||
return output_.PlayoutIsInitialized();
|
||||
}
|
||||
|
||||
int32_t RecordingIsAvailable(bool& available) override {
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__;
|
||||
available = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t InitRecording() override {
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__;
|
||||
return input_.InitRecording();
|
||||
}
|
||||
|
||||
bool RecordingIsInitialized() const override {
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__;
|
||||
return input_.RecordingIsInitialized();
|
||||
}
|
||||
|
||||
int32_t StartPlayout() override {
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__;
|
||||
if (!audio_manager_->IsCommunicationModeEnabled()) {
|
||||
RTC_LOG(LS_WARNING)
|
||||
<< "The application should use MODE_IN_COMMUNICATION audio mode!";
|
||||
}
|
||||
return output_.StartPlayout();
|
||||
}
|
||||
|
||||
int32_t StopPlayout() override {
|
||||
// Avoid using audio manger (JNI/Java cost) if playout was inactive.
|
||||
if (!Playing())
|
||||
return 0;
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__;
|
||||
int32_t err = output_.StopPlayout();
|
||||
return err;
|
||||
}
|
||||
|
||||
bool Playing() const override {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__;
|
||||
return output_.Playing();
|
||||
}
|
||||
|
||||
int32_t StartRecording() override {
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__;
|
||||
if (!audio_manager_->IsCommunicationModeEnabled()) {
|
||||
RTC_LOG(LS_WARNING)
|
||||
<< "The application should use MODE_IN_COMMUNICATION audio mode!";
|
||||
}
|
||||
return input_.StartRecording();
|
||||
}
|
||||
|
||||
int32_t StopRecording() override {
|
||||
// Avoid using audio manger (JNI/Java cost) if recording was inactive.
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__;
|
||||
if (!Recording())
|
||||
return 0;
|
||||
int32_t err = input_.StopRecording();
|
||||
return err;
|
||||
}
|
||||
|
||||
bool Recording() const override { return input_.Recording(); }
|
||||
|
||||
int32_t InitSpeaker() override {
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__;
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool SpeakerIsInitialized() const override {
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__;
|
||||
return true;
|
||||
}
|
||||
|
||||
int32_t InitMicrophone() override {
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__;
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool MicrophoneIsInitialized() const override {
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__;
|
||||
return true;
|
||||
}
|
||||
|
||||
int32_t SpeakerVolumeIsAvailable(bool& available) override {
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__;
|
||||
return output_.SpeakerVolumeIsAvailable(available);
|
||||
}
|
||||
|
||||
int32_t SetSpeakerVolume(uint32_t volume) override {
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__;
|
||||
return output_.SetSpeakerVolume(volume);
|
||||
}
|
||||
|
||||
int32_t SpeakerVolume(uint32_t& volume) const override {
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__;
|
||||
return output_.SpeakerVolume(volume);
|
||||
}
|
||||
|
||||
int32_t MaxSpeakerVolume(uint32_t& maxVolume) const override {
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__;
|
||||
return output_.MaxSpeakerVolume(maxVolume);
|
||||
}
|
||||
|
||||
int32_t MinSpeakerVolume(uint32_t& minVolume) const override {
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__;
|
||||
return output_.MinSpeakerVolume(minVolume);
|
||||
}
|
||||
|
||||
int32_t MicrophoneVolumeIsAvailable(bool& available) override {
|
||||
available = false;
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t SetMicrophoneVolume(uint32_t volume) override {
|
||||
RTC_CHECK_NOTREACHED();
|
||||
}
|
||||
|
||||
int32_t MicrophoneVolume(uint32_t& volume) const override {
|
||||
RTC_CHECK_NOTREACHED();
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const override {
|
||||
RTC_CHECK_NOTREACHED();
|
||||
}
|
||||
|
||||
int32_t MinMicrophoneVolume(uint32_t& minVolume) const override {
|
||||
RTC_CHECK_NOTREACHED();
|
||||
}
|
||||
|
||||
int32_t SpeakerMuteIsAvailable(bool& available) override {
|
||||
return false;
|
||||
}
|
||||
|
||||
int32_t SetSpeakerMute(bool enable) override { RTC_CHECK_NOTREACHED(); }
|
||||
|
||||
int32_t SpeakerMute(bool& enabled) const override { RTC_CHECK_NOTREACHED(); }
|
||||
|
||||
int32_t MicrophoneMuteIsAvailable(bool& available) override {
|
||||
return false;
|
||||
}
|
||||
|
||||
int32_t SetMicrophoneMute(bool enable) override { RTC_CHECK_NOTREACHED(); }
|
||||
|
||||
int32_t MicrophoneMute(bool& enabled) const override {
|
||||
RTC_CHECK_NOTREACHED();
|
||||
}
|
||||
|
||||
// Returns true if the audio manager has been configured to support stereo
|
||||
// and false otherwised. Default is mono.
|
||||
int32_t StereoPlayoutIsAvailable(bool& available) override {
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__;
|
||||
available = audio_manager_->IsStereoPlayoutSupported();
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t SetStereoPlayout(bool enable) override {
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__;
|
||||
bool available = audio_manager_->IsStereoPlayoutSupported();
|
||||
// Android does not support changes between mono and stero on the fly.
|
||||
// Instead, the native audio layer is configured via the audio manager
|
||||
// to either support mono or stereo. It is allowed to call this method
|
||||
// if that same state is not modified.
|
||||
return (enable == available) ? 0 : -1;
|
||||
}
|
||||
|
||||
int32_t StereoPlayout(bool& enabled) const override {
|
||||
enabled = audio_manager_->IsStereoPlayoutSupported();
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t StereoRecordingIsAvailable(bool& available) override {
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__;
|
||||
available = audio_manager_->IsStereoRecordSupported();
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t SetStereoRecording(bool enable) override {
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__;
|
||||
bool available = audio_manager_->IsStereoRecordSupported();
|
||||
// Android does not support changes between mono and stero on the fly.
|
||||
// Instead, the native audio layer is configured via the audio manager
|
||||
// to either support mono or stereo. It is allowed to call this method
|
||||
// if that same state is not modified.
|
||||
return (enable == available) ? 0 : -1;
|
||||
}
|
||||
|
||||
int32_t StereoRecording(bool& enabled) const override {
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__;
|
||||
enabled = audio_manager_->IsStereoRecordSupported();
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t PlayoutDelay(uint16_t& delay_ms) const override {
|
||||
// Best guess we can do is to use half of the estimated total delay.
|
||||
delay_ms = audio_manager_->GetDelayEstimateInMilliseconds() / 2;
|
||||
RTC_DCHECK_GT(delay_ms, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) override {
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__;
|
||||
output_.AttachAudioBuffer(audioBuffer);
|
||||
input_.AttachAudioBuffer(audioBuffer);
|
||||
}
|
||||
|
||||
// Returns true if the device both supports built in AEC and the device
|
||||
// is not blacklisted.
|
||||
// Currently, if OpenSL ES is used in both directions, this method will still
|
||||
// report the correct value and it has the correct effect. As an example:
|
||||
// a device supports built in AEC and this method returns true. Libjingle
|
||||
// will then disable the WebRTC based AEC and that will work for all devices
|
||||
// (mainly Nexus) even when OpenSL ES is used for input since our current
|
||||
// implementation will enable built-in AEC by default also for OpenSL ES.
|
||||
// The only "bad" thing that happens today is that when Libjingle calls
|
||||
// OpenSLESRecorder::EnableBuiltInAEC() it will not have any real effect and
|
||||
// a "Not Implemented" log will be filed. This non-perfect state will remain
|
||||
// until I have added full support for audio effects based on OpenSL ES APIs.
|
||||
bool BuiltInAECIsAvailable() const override {
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__;
|
||||
return audio_manager_->IsAcousticEchoCancelerSupported();
|
||||
}
|
||||
|
||||
// TODO(henrika): add implementation for OpenSL ES based audio as well.
|
||||
int32_t EnableBuiltInAEC(bool enable) override {
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")";
|
||||
RTC_CHECK(BuiltInAECIsAvailable()) << "HW AEC is not available";
|
||||
return input_.EnableBuiltInAEC(enable);
|
||||
}
|
||||
|
||||
// Returns true if the device both supports built in AGC and the device
|
||||
// is not blacklisted.
|
||||
// TODO(henrika): add implementation for OpenSL ES based audio as well.
|
||||
// In addition, see comments for BuiltInAECIsAvailable().
|
||||
bool BuiltInAGCIsAvailable() const override {
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__;
|
||||
return audio_manager_->IsAutomaticGainControlSupported();
|
||||
}
|
||||
|
||||
// TODO(henrika): add implementation for OpenSL ES based audio as well.
|
||||
int32_t EnableBuiltInAGC(bool enable) override {
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")";
|
||||
RTC_CHECK(BuiltInAGCIsAvailable()) << "HW AGC is not available";
|
||||
return input_.EnableBuiltInAGC(enable);
|
||||
}
|
||||
|
||||
// Returns true if the device both supports built in NS and the device
|
||||
// is not blacklisted.
|
||||
// TODO(henrika): add implementation for OpenSL ES based audio as well.
|
||||
// In addition, see comments for BuiltInAECIsAvailable().
|
||||
bool BuiltInNSIsAvailable() const override {
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__;
|
||||
return audio_manager_->IsNoiseSuppressorSupported();
|
||||
}
|
||||
|
||||
// TODO(henrika): add implementation for OpenSL ES based audio as well.
|
||||
int32_t EnableBuiltInNS(bool enable) override {
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")";
|
||||
RTC_CHECK(BuiltInNSIsAvailable()) << "HW NS is not available";
|
||||
return input_.EnableBuiltInNS(enable);
|
||||
}
|
||||
|
||||
private:
|
||||
SequenceChecker thread_checker_;
|
||||
|
||||
// Local copy of the audio layer set during construction of the
|
||||
// AudioDeviceModuleImpl instance. Read only value.
|
||||
const AudioDeviceModule::AudioLayer audio_layer_;
|
||||
|
||||
// Non-owning raw pointer to AudioManager instance given to use at
|
||||
// construction. The real object is owned by AudioDeviceModuleImpl and the
|
||||
// life time is the same as that of the AudioDeviceModuleImpl, hence there
|
||||
// is no risk of reading a NULL pointer at any time in this class.
|
||||
AudioManager* const audio_manager_;
|
||||
|
||||
OutputType output_;
|
||||
|
||||
InputType input_;
|
||||
|
||||
bool initialized_;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // MODULES_AUDIO_DEVICE_ANDROID_AUDIO_DEVICE_TEMPLATE_H_
|
||||
|
|
@ -0,0 +1,318 @@
|
|||
/*
|
||||
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "modules/audio_device/android/audio_manager.h"
|
||||
|
||||
#include <utility>
|
||||
|
||||
#include "modules/audio_device/android/audio_common.h"
|
||||
#include "modules/utility/include/helpers_android.h"
|
||||
#include "rtc_base/arraysize.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/platform_thread.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// AudioManager::JavaAudioManager implementation
|
||||
AudioManager::JavaAudioManager::JavaAudioManager(
|
||||
NativeRegistration* native_reg,
|
||||
std::unique_ptr<GlobalRef> audio_manager)
|
||||
: audio_manager_(std::move(audio_manager)),
|
||||
init_(native_reg->GetMethodId("init", "()Z")),
|
||||
dispose_(native_reg->GetMethodId("dispose", "()V")),
|
||||
is_communication_mode_enabled_(
|
||||
native_reg->GetMethodId("isCommunicationModeEnabled", "()Z")),
|
||||
is_device_blacklisted_for_open_sles_usage_(
|
||||
native_reg->GetMethodId("isDeviceBlacklistedForOpenSLESUsage",
|
||||
"()Z")) {
|
||||
RTC_LOG(LS_INFO) << "JavaAudioManager::ctor";
|
||||
}
|
||||
|
||||
AudioManager::JavaAudioManager::~JavaAudioManager() {
|
||||
RTC_LOG(LS_INFO) << "JavaAudioManager::~dtor";
|
||||
}
|
||||
|
||||
bool AudioManager::JavaAudioManager::Init() {
|
||||
return audio_manager_->CallBooleanMethod(init_);
|
||||
}
|
||||
|
||||
void AudioManager::JavaAudioManager::Close() {
|
||||
audio_manager_->CallVoidMethod(dispose_);
|
||||
}
|
||||
|
||||
bool AudioManager::JavaAudioManager::IsCommunicationModeEnabled() {
|
||||
return audio_manager_->CallBooleanMethod(is_communication_mode_enabled_);
|
||||
}
|
||||
|
||||
bool AudioManager::JavaAudioManager::IsDeviceBlacklistedForOpenSLESUsage() {
|
||||
return audio_manager_->CallBooleanMethod(
|
||||
is_device_blacklisted_for_open_sles_usage_);
|
||||
}
|
||||
|
||||
// AudioManager implementation
|
||||
AudioManager::AudioManager()
|
||||
: j_environment_(JVM::GetInstance()->environment()),
|
||||
audio_layer_(AudioDeviceModule::kPlatformDefaultAudio),
|
||||
initialized_(false),
|
||||
hardware_aec_(false),
|
||||
hardware_agc_(false),
|
||||
hardware_ns_(false),
|
||||
low_latency_playout_(false),
|
||||
low_latency_record_(false),
|
||||
delay_estimate_in_milliseconds_(0) {
|
||||
RTC_LOG(LS_INFO) << "ctor";
|
||||
RTC_CHECK(j_environment_);
|
||||
JNINativeMethod native_methods[] = {
|
||||
{"nativeCacheAudioParameters", "(IIIZZZZZZZIIJ)V",
|
||||
reinterpret_cast<void*>(&webrtc::AudioManager::CacheAudioParameters)}};
|
||||
j_native_registration_ = j_environment_->RegisterNatives(
|
||||
"org/webrtc/voiceengine/WebRtcAudioManager", native_methods,
|
||||
arraysize(native_methods));
|
||||
j_audio_manager_.reset(
|
||||
new JavaAudioManager(j_native_registration_.get(),
|
||||
j_native_registration_->NewObject(
|
||||
"<init>", "(J)V", PointerTojlong(this))));
|
||||
}
|
||||
|
||||
AudioManager::~AudioManager() {
|
||||
RTC_LOG(LS_INFO) << "dtor";
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
Close();
|
||||
}
|
||||
|
||||
void AudioManager::SetActiveAudioLayer(
|
||||
AudioDeviceModule::AudioLayer audio_layer) {
|
||||
RTC_LOG(LS_INFO) << "SetActiveAudioLayer: " << audio_layer;
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
RTC_DCHECK(!initialized_);
|
||||
// Store the currently utilized audio layer.
|
||||
audio_layer_ = audio_layer;
|
||||
// The delay estimate can take one of two fixed values depending on if the
|
||||
// device supports low-latency output or not. However, it is also possible
|
||||
// that the user explicitly selects the high-latency audio path, hence we use
|
||||
// the selected `audio_layer` here to set the delay estimate.
|
||||
delay_estimate_in_milliseconds_ =
|
||||
(audio_layer == AudioDeviceModule::kAndroidJavaAudio)
|
||||
? kHighLatencyModeDelayEstimateInMilliseconds
|
||||
: kLowLatencyModeDelayEstimateInMilliseconds;
|
||||
RTC_LOG(LS_INFO) << "delay_estimate_in_milliseconds: "
|
||||
<< delay_estimate_in_milliseconds_;
|
||||
}
|
||||
|
||||
SLObjectItf AudioManager::GetOpenSLEngine() {
|
||||
RTC_LOG(LS_INFO) << "GetOpenSLEngine";
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
// Only allow usage of OpenSL ES if such an audio layer has been specified.
|
||||
if (audio_layer_ != AudioDeviceModule::kAndroidOpenSLESAudio &&
|
||||
audio_layer_ !=
|
||||
AudioDeviceModule::kAndroidJavaInputAndOpenSLESOutputAudio) {
|
||||
RTC_LOG(LS_INFO)
|
||||
<< "Unable to create OpenSL engine for the current audio layer: "
|
||||
<< audio_layer_;
|
||||
return nullptr;
|
||||
}
|
||||
// OpenSL ES for Android only supports a single engine per application.
|
||||
// If one already has been created, return existing object instead of
|
||||
// creating a new.
|
||||
if (engine_object_.Get() != nullptr) {
|
||||
RTC_LOG(LS_WARNING)
|
||||
<< "The OpenSL ES engine object has already been created";
|
||||
return engine_object_.Get();
|
||||
}
|
||||
// Create the engine object in thread safe mode.
|
||||
const SLEngineOption option[] = {
|
||||
{SL_ENGINEOPTION_THREADSAFE, static_cast<SLuint32>(SL_BOOLEAN_TRUE)}};
|
||||
SLresult result =
|
||||
slCreateEngine(engine_object_.Receive(), 1, option, 0, NULL, NULL);
|
||||
if (result != SL_RESULT_SUCCESS) {
|
||||
RTC_LOG(LS_ERROR) << "slCreateEngine() failed: "
|
||||
<< GetSLErrorString(result);
|
||||
engine_object_.Reset();
|
||||
return nullptr;
|
||||
}
|
||||
// Realize the SL Engine in synchronous mode.
|
||||
result = engine_object_->Realize(engine_object_.Get(), SL_BOOLEAN_FALSE);
|
||||
if (result != SL_RESULT_SUCCESS) {
|
||||
RTC_LOG(LS_ERROR) << "Realize() failed: " << GetSLErrorString(result);
|
||||
engine_object_.Reset();
|
||||
return nullptr;
|
||||
}
|
||||
// Finally return the SLObjectItf interface of the engine object.
|
||||
return engine_object_.Get();
|
||||
}
|
||||
|
||||
bool AudioManager::Init() {
|
||||
RTC_LOG(LS_INFO) << "Init";
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
RTC_DCHECK(!initialized_);
|
||||
RTC_DCHECK_NE(audio_layer_, AudioDeviceModule::kPlatformDefaultAudio);
|
||||
if (!j_audio_manager_->Init()) {
|
||||
RTC_LOG(LS_ERROR) << "Init() failed";
|
||||
return false;
|
||||
}
|
||||
initialized_ = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool AudioManager::Close() {
|
||||
RTC_LOG(LS_INFO) << "Close";
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
if (!initialized_)
|
||||
return true;
|
||||
j_audio_manager_->Close();
|
||||
initialized_ = false;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool AudioManager::IsCommunicationModeEnabled() const {
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
return j_audio_manager_->IsCommunicationModeEnabled();
|
||||
}
|
||||
|
||||
bool AudioManager::IsAcousticEchoCancelerSupported() const {
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
return hardware_aec_;
|
||||
}
|
||||
|
||||
bool AudioManager::IsAutomaticGainControlSupported() const {
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
return hardware_agc_;
|
||||
}
|
||||
|
||||
bool AudioManager::IsNoiseSuppressorSupported() const {
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
return hardware_ns_;
|
||||
}
|
||||
|
||||
bool AudioManager::IsLowLatencyPlayoutSupported() const {
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
// Some devices are blacklisted for usage of OpenSL ES even if they report
|
||||
// that low-latency playout is supported. See b/21485703 for details.
|
||||
return j_audio_manager_->IsDeviceBlacklistedForOpenSLESUsage()
|
||||
? false
|
||||
: low_latency_playout_;
|
||||
}
|
||||
|
||||
bool AudioManager::IsLowLatencyRecordSupported() const {
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
return low_latency_record_;
|
||||
}
|
||||
|
||||
bool AudioManager::IsProAudioSupported() const {
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
// TODO(henrika): return the state independently of if OpenSL ES is
|
||||
// blacklisted or not for now. We could use the same approach as in
|
||||
// IsLowLatencyPlayoutSupported() but I can't see the need for it yet.
|
||||
return pro_audio_;
|
||||
}
|
||||
|
||||
// TODO(henrika): improve comments...
|
||||
bool AudioManager::IsAAudioSupported() const {
|
||||
#if defined(WEBRTC_AUDIO_DEVICE_INCLUDE_ANDROID_AAUDIO)
|
||||
return a_audio_;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
bool AudioManager::IsStereoPlayoutSupported() const {
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
return (playout_parameters_.channels() == 2);
|
||||
}
|
||||
|
||||
bool AudioManager::IsStereoRecordSupported() const {
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
return (record_parameters_.channels() == 2);
|
||||
}
|
||||
|
||||
int AudioManager::GetDelayEstimateInMilliseconds() const {
|
||||
return delay_estimate_in_milliseconds_;
|
||||
}
|
||||
|
||||
JNI_FUNCTION_ALIGN
|
||||
void JNICALL AudioManager::CacheAudioParameters(JNIEnv* env,
|
||||
jobject obj,
|
||||
jint sample_rate,
|
||||
jint output_channels,
|
||||
jint input_channels,
|
||||
jboolean hardware_aec,
|
||||
jboolean hardware_agc,
|
||||
jboolean hardware_ns,
|
||||
jboolean low_latency_output,
|
||||
jboolean low_latency_input,
|
||||
jboolean pro_audio,
|
||||
jboolean a_audio,
|
||||
jint output_buffer_size,
|
||||
jint input_buffer_size,
|
||||
jlong native_audio_manager) {
|
||||
webrtc::AudioManager* this_object =
|
||||
reinterpret_cast<webrtc::AudioManager*>(native_audio_manager);
|
||||
this_object->OnCacheAudioParameters(
|
||||
env, sample_rate, output_channels, input_channels, hardware_aec,
|
||||
hardware_agc, hardware_ns, low_latency_output, low_latency_input,
|
||||
pro_audio, a_audio, output_buffer_size, input_buffer_size);
|
||||
}
|
||||
|
||||
void AudioManager::OnCacheAudioParameters(JNIEnv* env,
|
||||
jint sample_rate,
|
||||
jint output_channels,
|
||||
jint input_channels,
|
||||
jboolean hardware_aec,
|
||||
jboolean hardware_agc,
|
||||
jboolean hardware_ns,
|
||||
jboolean low_latency_output,
|
||||
jboolean low_latency_input,
|
||||
jboolean pro_audio,
|
||||
jboolean a_audio,
|
||||
jint output_buffer_size,
|
||||
jint input_buffer_size) {
|
||||
RTC_LOG(LS_INFO)
|
||||
<< "OnCacheAudioParameters: "
|
||||
"hardware_aec: "
|
||||
<< static_cast<bool>(hardware_aec)
|
||||
<< ", hardware_agc: " << static_cast<bool>(hardware_agc)
|
||||
<< ", hardware_ns: " << static_cast<bool>(hardware_ns)
|
||||
<< ", low_latency_output: " << static_cast<bool>(low_latency_output)
|
||||
<< ", low_latency_input: " << static_cast<bool>(low_latency_input)
|
||||
<< ", pro_audio: " << static_cast<bool>(pro_audio)
|
||||
<< ", a_audio: " << static_cast<bool>(a_audio)
|
||||
<< ", sample_rate: " << static_cast<int>(sample_rate)
|
||||
<< ", output_channels: " << static_cast<int>(output_channels)
|
||||
<< ", input_channels: " << static_cast<int>(input_channels)
|
||||
<< ", output_buffer_size: " << static_cast<int>(output_buffer_size)
|
||||
<< ", input_buffer_size: " << static_cast<int>(input_buffer_size);
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
hardware_aec_ = hardware_aec;
|
||||
hardware_agc_ = hardware_agc;
|
||||
hardware_ns_ = hardware_ns;
|
||||
low_latency_playout_ = low_latency_output;
|
||||
low_latency_record_ = low_latency_input;
|
||||
pro_audio_ = pro_audio;
|
||||
a_audio_ = a_audio;
|
||||
playout_parameters_.reset(sample_rate, static_cast<size_t>(output_channels),
|
||||
static_cast<size_t>(output_buffer_size));
|
||||
record_parameters_.reset(sample_rate, static_cast<size_t>(input_channels),
|
||||
static_cast<size_t>(input_buffer_size));
|
||||
}
|
||||
|
||||
const AudioParameters& AudioManager::GetPlayoutAudioParameters() {
|
||||
RTC_CHECK(playout_parameters_.is_valid());
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
return playout_parameters_;
|
||||
}
|
||||
|
||||
const AudioParameters& AudioManager::GetRecordAudioParameters() {
|
||||
RTC_CHECK(record_parameters_.is_valid());
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
return record_parameters_;
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,225 @@
|
|||
/*
|
||||
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef MODULES_AUDIO_DEVICE_ANDROID_AUDIO_MANAGER_H_
|
||||
#define MODULES_AUDIO_DEVICE_ANDROID_AUDIO_MANAGER_H_
|
||||
|
||||
#include <SLES/OpenSLES.h>
|
||||
#include <jni.h>
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "api/sequence_checker.h"
|
||||
#include "modules/audio_device/android/audio_common.h"
|
||||
#include "modules/audio_device/android/opensles_common.h"
|
||||
#include "modules/audio_device/audio_device_config.h"
|
||||
#include "modules/audio_device/audio_device_generic.h"
|
||||
#include "modules/audio_device/include/audio_device_defines.h"
|
||||
#include "modules/utility/include/helpers_android.h"
|
||||
#include "modules/utility/include/jvm_android.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// Implements support for functions in the WebRTC audio stack for Android that
|
||||
// relies on the AudioManager in android.media. It also populates an
|
||||
// AudioParameter structure with native audio parameters detected at
|
||||
// construction. This class does not make any audio-related modifications
|
||||
// unless Init() is called. Caching audio parameters makes no changes but only
|
||||
// reads data from the Java side.
|
||||
class AudioManager {
|
||||
public:
|
||||
// Wraps the Java specific parts of the AudioManager into one helper class.
|
||||
// Stores method IDs for all supported methods at construction and then
|
||||
// allows calls like JavaAudioManager::Close() while hiding the Java/JNI
|
||||
// parts that are associated with this call.
|
||||
class JavaAudioManager {
|
||||
public:
|
||||
JavaAudioManager(NativeRegistration* native_registration,
|
||||
std::unique_ptr<GlobalRef> audio_manager);
|
||||
~JavaAudioManager();
|
||||
|
||||
bool Init();
|
||||
void Close();
|
||||
bool IsCommunicationModeEnabled();
|
||||
bool IsDeviceBlacklistedForOpenSLESUsage();
|
||||
|
||||
private:
|
||||
std::unique_ptr<GlobalRef> audio_manager_;
|
||||
jmethodID init_;
|
||||
jmethodID dispose_;
|
||||
jmethodID is_communication_mode_enabled_;
|
||||
jmethodID is_device_blacklisted_for_open_sles_usage_;
|
||||
};
|
||||
|
||||
AudioManager();
|
||||
~AudioManager();
|
||||
|
||||
// Sets the currently active audio layer combination. Must be called before
|
||||
// Init().
|
||||
void SetActiveAudioLayer(AudioDeviceModule::AudioLayer audio_layer);
|
||||
|
||||
// Creates and realizes the main (global) Open SL engine object and returns
|
||||
// a reference to it. The engine object is only created at the first call
|
||||
// since OpenSL ES for Android only supports a single engine per application.
|
||||
// Subsequent calls returns the already created engine. The SL engine object
|
||||
// is destroyed when the AudioManager object is deleted. It means that the
|
||||
// engine object will be the first OpenSL ES object to be created and last
|
||||
// object to be destroyed.
|
||||
// Note that NULL will be returned unless the audio layer is specified as
|
||||
// AudioDeviceModule::kAndroidOpenSLESAudio or
|
||||
// AudioDeviceModule::kAndroidJavaInputAndOpenSLESOutputAudio.
|
||||
SLObjectItf GetOpenSLEngine();
|
||||
|
||||
// Initializes the audio manager and stores the current audio mode.
|
||||
bool Init();
|
||||
// Revert any setting done by Init().
|
||||
bool Close();
|
||||
|
||||
// Returns true if current audio mode is AudioManager.MODE_IN_COMMUNICATION.
|
||||
bool IsCommunicationModeEnabled() const;
|
||||
|
||||
// Native audio parameters stored during construction.
|
||||
const AudioParameters& GetPlayoutAudioParameters();
|
||||
const AudioParameters& GetRecordAudioParameters();
|
||||
|
||||
// Returns true if the device supports built-in audio effects for AEC, AGC
|
||||
// and NS. Some devices can also be blacklisted for use in combination with
|
||||
// platform effects and these devices will return false.
|
||||
// Can currently only be used in combination with a Java based audio backend
|
||||
// for the recoring side (i.e. using the android.media.AudioRecord API).
|
||||
bool IsAcousticEchoCancelerSupported() const;
|
||||
bool IsAutomaticGainControlSupported() const;
|
||||
bool IsNoiseSuppressorSupported() const;
|
||||
|
||||
// Returns true if the device supports the low-latency audio paths in
|
||||
// combination with OpenSL ES.
|
||||
bool IsLowLatencyPlayoutSupported() const;
|
||||
bool IsLowLatencyRecordSupported() const;
|
||||
|
||||
// Returns true if the device supports (and has been configured for) stereo.
|
||||
// Call the Java API WebRtcAudioManager.setStereoOutput/Input() with true as
|
||||
// paramter to enable stereo. Default is mono in both directions and the
|
||||
// setting is set once and for all when the audio manager object is created.
|
||||
// TODO(henrika): stereo is not supported in combination with OpenSL ES.
|
||||
bool IsStereoPlayoutSupported() const;
|
||||
bool IsStereoRecordSupported() const;
|
||||
|
||||
// Returns true if the device supports pro-audio features in combination with
|
||||
// OpenSL ES.
|
||||
bool IsProAudioSupported() const;
|
||||
|
||||
// Returns true if the device supports AAudio.
|
||||
bool IsAAudioSupported() const;
|
||||
|
||||
// Returns the estimated total delay of this device. Unit is in milliseconds.
|
||||
// The vaule is set once at construction and never changes after that.
|
||||
// Possible values are webrtc::kLowLatencyModeDelayEstimateInMilliseconds and
|
||||
// webrtc::kHighLatencyModeDelayEstimateInMilliseconds.
|
||||
int GetDelayEstimateInMilliseconds() const;
|
||||
|
||||
private:
|
||||
// Called from Java side so we can cache the native audio parameters.
|
||||
// This method will be called by the WebRtcAudioManager constructor, i.e.
|
||||
// on the same thread that this object is created on.
|
||||
static void JNICALL CacheAudioParameters(JNIEnv* env,
|
||||
jobject obj,
|
||||
jint sample_rate,
|
||||
jint output_channels,
|
||||
jint input_channels,
|
||||
jboolean hardware_aec,
|
||||
jboolean hardware_agc,
|
||||
jboolean hardware_ns,
|
||||
jboolean low_latency_output,
|
||||
jboolean low_latency_input,
|
||||
jboolean pro_audio,
|
||||
jboolean a_audio,
|
||||
jint output_buffer_size,
|
||||
jint input_buffer_size,
|
||||
jlong native_audio_manager);
|
||||
void OnCacheAudioParameters(JNIEnv* env,
|
||||
jint sample_rate,
|
||||
jint output_channels,
|
||||
jint input_channels,
|
||||
jboolean hardware_aec,
|
||||
jboolean hardware_agc,
|
||||
jboolean hardware_ns,
|
||||
jboolean low_latency_output,
|
||||
jboolean low_latency_input,
|
||||
jboolean pro_audio,
|
||||
jboolean a_audio,
|
||||
jint output_buffer_size,
|
||||
jint input_buffer_size);
|
||||
|
||||
// Stores thread ID in the constructor.
|
||||
// We can then use RTC_DCHECK_RUN_ON(&thread_checker_) to ensure that
|
||||
// other methods are called from the same thread.
|
||||
SequenceChecker thread_checker_;
|
||||
|
||||
// Calls JavaVM::AttachCurrentThread() if this thread is not attached at
|
||||
// construction.
|
||||
// Also ensures that DetachCurrentThread() is called at destruction.
|
||||
JvmThreadConnector attach_thread_if_needed_;
|
||||
|
||||
// Wraps the JNI interface pointer and methods associated with it.
|
||||
std::unique_ptr<JNIEnvironment> j_environment_;
|
||||
|
||||
// Contains factory method for creating the Java object.
|
||||
std::unique_ptr<NativeRegistration> j_native_registration_;
|
||||
|
||||
// Wraps the Java specific parts of the AudioManager.
|
||||
std::unique_ptr<AudioManager::JavaAudioManager> j_audio_manager_;
|
||||
|
||||
// Contains the selected audio layer specified by the AudioLayer enumerator
|
||||
// in the AudioDeviceModule class.
|
||||
AudioDeviceModule::AudioLayer audio_layer_;
|
||||
|
||||
// This object is the global entry point of the OpenSL ES API.
|
||||
// After creating the engine object, the application can obtain this object‘s
|
||||
// SLEngineItf interface. This interface contains creation methods for all
|
||||
// the other object types in the API. None of these interface are realized
|
||||
// by this class. It only provides access to the global engine object.
|
||||
webrtc::ScopedSLObjectItf engine_object_;
|
||||
|
||||
// Set to true by Init() and false by Close().
|
||||
bool initialized_;
|
||||
|
||||
// True if device supports hardware (or built-in) AEC.
|
||||
bool hardware_aec_;
|
||||
// True if device supports hardware (or built-in) AGC.
|
||||
bool hardware_agc_;
|
||||
// True if device supports hardware (or built-in) NS.
|
||||
bool hardware_ns_;
|
||||
|
||||
// True if device supports the low-latency OpenSL ES audio path for output.
|
||||
bool low_latency_playout_;
|
||||
|
||||
// True if device supports the low-latency OpenSL ES audio path for input.
|
||||
bool low_latency_record_;
|
||||
|
||||
// True if device supports the low-latency OpenSL ES pro-audio path.
|
||||
bool pro_audio_;
|
||||
|
||||
// True if device supports the low-latency AAudio audio path.
|
||||
bool a_audio_;
|
||||
|
||||
// The delay estimate can take one of two fixed values depending on if the
|
||||
// device supports low-latency output or not.
|
||||
int delay_estimate_in_milliseconds_;
|
||||
|
||||
// Contains native parameters (e.g. sample rate, channel configuration).
|
||||
// Set at construction in OnCacheAudioParameters() which is called from
|
||||
// Java on the same thread as this object is created on.
|
||||
AudioParameters playout_parameters_;
|
||||
AudioParameters record_parameters_;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // MODULES_AUDIO_DEVICE_ANDROID_AUDIO_MANAGER_H_
|
||||
|
|
@ -0,0 +1,283 @@
|
|||
/*
|
||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "modules/audio_device/android/audio_merged_screen_record_jni.h"
|
||||
|
||||
#include <string>
|
||||
#include <utility>
|
||||
|
||||
#include "modules/audio_device/android/audio_common.h"
|
||||
#include "rtc_base/arraysize.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/platform_thread.h"
|
||||
#include "rtc_base/time_utils.h"
|
||||
#include "system_wrappers/include/metrics.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace {
|
||||
// Scoped class which logs its time of life as a UMA statistic. It generates
|
||||
// a histogram which measures the time it takes for a method/scope to execute.
|
||||
class ScopedHistogramTimer {
|
||||
public:
|
||||
explicit ScopedHistogramTimer(const std::string& name)
|
||||
: histogram_name_(name), start_time_ms_(rtc::TimeMillis()) {}
|
||||
~ScopedHistogramTimer() {
|
||||
const int64_t life_time_ms = rtc::TimeSince(start_time_ms_);
|
||||
RTC_HISTOGRAM_COUNTS_1000(histogram_name_, life_time_ms);
|
||||
RTC_LOG(LS_INFO) << histogram_name_ << ": " << life_time_ms;
|
||||
}
|
||||
|
||||
private:
|
||||
const std::string histogram_name_;
|
||||
int64_t start_time_ms_;
|
||||
};
|
||||
} // namespace
|
||||
|
||||
// AudioRecordJni::JavaAudioRecord implementation.
|
||||
AudioMergedScreenRecordJni::JavaAudioRecord::JavaAudioRecord(
|
||||
NativeRegistration* native_reg,
|
||||
std::unique_ptr<GlobalRef> audio_record)
|
||||
: audio_record_(std::move(audio_record)),
|
||||
init_recording_(native_reg->GetMethodId("initRecording", "(II)I")),
|
||||
start_recording_(native_reg->GetMethodId("startRecording", "()Z")),
|
||||
stop_recording_(native_reg->GetMethodId("stopRecording", "()Z")),
|
||||
enable_built_in_aec_(native_reg->GetMethodId("enableBuiltInAEC", "(Z)Z")),
|
||||
enable_built_in_ns_(native_reg->GetMethodId("enableBuiltInNS", "(Z)Z")),
|
||||
on_destroy_(native_reg->GetMethodId("onDestroy", "()V")) {}
|
||||
|
||||
AudioMergedScreenRecordJni::JavaAudioRecord::~JavaAudioRecord() {
|
||||
audio_record_->CallVoidMethod(on_destroy_);
|
||||
}
|
||||
|
||||
int AudioMergedScreenRecordJni::JavaAudioRecord::InitRecording(int sample_rate,
|
||||
size_t channels) {
|
||||
return audio_record_->CallIntMethod(init_recording_,
|
||||
static_cast<jint>(sample_rate),
|
||||
static_cast<jint>(channels));
|
||||
}
|
||||
|
||||
bool AudioMergedScreenRecordJni::JavaAudioRecord::StartRecording() {
|
||||
return audio_record_->CallBooleanMethod(start_recording_);
|
||||
}
|
||||
|
||||
bool AudioMergedScreenRecordJni::JavaAudioRecord::StopRecording() {
|
||||
return audio_record_->CallBooleanMethod(stop_recording_);
|
||||
}
|
||||
|
||||
bool AudioMergedScreenRecordJni::JavaAudioRecord::EnableBuiltInAEC(bool enable) {
|
||||
return audio_record_->CallBooleanMethod(enable_built_in_aec_,
|
||||
static_cast<jboolean>(enable));
|
||||
}
|
||||
|
||||
bool AudioMergedScreenRecordJni::JavaAudioRecord::EnableBuiltInNS(bool enable) {
|
||||
return audio_record_->CallBooleanMethod(enable_built_in_ns_,
|
||||
static_cast<jboolean>(enable));
|
||||
}
|
||||
|
||||
// AudioRecordJni implementation.
|
||||
AudioMergedScreenRecordJni::AudioMergedScreenRecordJni(AudioManager* audio_manager)
|
||||
: j_environment_(JVM::GetInstance()->environment()),
|
||||
audio_manager_(audio_manager),
|
||||
audio_parameters_(audio_manager->GetRecordAudioParameters()),
|
||||
total_delay_in_milliseconds_(0),
|
||||
direct_buffer_address_(nullptr),
|
||||
direct_buffer_capacity_in_bytes_(0),
|
||||
frames_per_buffer_(0),
|
||||
initialized_(false),
|
||||
recording_(false),
|
||||
audio_device_buffer_(nullptr) {
|
||||
RTC_LOG(LS_INFO) << "ctor";
|
||||
RTC_DCHECK(audio_parameters_.is_valid());
|
||||
RTC_CHECK(j_environment_);
|
||||
JNINativeMethod native_methods[] = {
|
||||
{"nativeCacheDirectBufferAddress", "(Ljava/nio/ByteBuffer;J)V",
|
||||
reinterpret_cast<void*>(
|
||||
&webrtc::AudioMergedScreenRecordJni::CacheDirectBufferAddress)},
|
||||
{"nativeDataIsRecorded", "(IJ)V",
|
||||
reinterpret_cast<void*>(&webrtc::AudioMergedScreenRecordJni::DataIsRecorded)}};
|
||||
j_native_registration_ = j_environment_->RegisterNatives(
|
||||
"org/webrtc/voiceengine/WebRtcAudioRecord", native_methods,
|
||||
arraysize(native_methods));
|
||||
j_audio_record_.reset(
|
||||
new JavaAudioRecord(j_native_registration_.get(),
|
||||
j_native_registration_->NewObject(
|
||||
"<init>", "(JI)V", PointerTojlong(this), 2)));
|
||||
// Detach from this thread since we want to use the checker to verify calls
|
||||
// from the Java based audio thread.
|
||||
thread_checker_java_.Detach();
|
||||
}
|
||||
|
||||
AudioMergedScreenRecordJni::~AudioMergedScreenRecordJni() {
|
||||
RTC_LOG(LS_INFO) << "dtor";
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
Terminate();
|
||||
}
|
||||
|
||||
int32_t AudioMergedScreenRecordJni::Init() {
|
||||
RTC_LOG(LS_INFO) << "Init";
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioMergedScreenRecordJni::Terminate() {
|
||||
RTC_LOG(LS_INFO) << "Terminate";
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
StopRecording();
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioMergedScreenRecordJni::InitRecording() {
|
||||
RTC_LOG(LS_INFO) << "InitRecording";
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
RTC_DCHECK(!initialized_);
|
||||
RTC_DCHECK(!recording_);
|
||||
ScopedHistogramTimer timer("WebRTC.Audio.InitRecordingDurationMs");
|
||||
int frames_per_buffer = j_audio_record_->InitRecording(
|
||||
audio_parameters_.sample_rate(), audio_parameters_.channels());
|
||||
if (frames_per_buffer < 0) {
|
||||
direct_buffer_address_ = nullptr;
|
||||
RTC_LOG(LS_ERROR) << "InitRecording failed";
|
||||
return -1;
|
||||
}
|
||||
frames_per_buffer_ = static_cast<size_t>(frames_per_buffer);
|
||||
RTC_LOG(LS_INFO) << "frames_per_buffer: " << frames_per_buffer_;
|
||||
const size_t bytes_per_frame = audio_parameters_.channels() * sizeof(int16_t);
|
||||
RTC_CHECK_EQ(direct_buffer_capacity_in_bytes_,
|
||||
frames_per_buffer_ * bytes_per_frame);
|
||||
RTC_CHECK_EQ(frames_per_buffer_, audio_parameters_.frames_per_10ms_buffer());
|
||||
initialized_ = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioMergedScreenRecordJni::StartRecording() {
|
||||
RTC_LOG(LS_INFO) << "StartRecording";
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
RTC_DCHECK(!recording_);
|
||||
if (!initialized_) {
|
||||
RTC_DLOG(LS_WARNING)
|
||||
<< "Recording can not start since InitRecording must succeed first";
|
||||
return 0;
|
||||
}
|
||||
ScopedHistogramTimer timer("WebRTC.Audio.StartRecordingDurationMs");
|
||||
if (!j_audio_record_->StartRecording()) {
|
||||
RTC_LOG(LS_ERROR) << "StartRecording failed";
|
||||
return -1;
|
||||
}
|
||||
recording_ = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioMergedScreenRecordJni::StopRecording() {
|
||||
RTC_LOG(LS_INFO) << "StopRecording";
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
if (!initialized_ || !recording_) {
|
||||
return 0;
|
||||
}
|
||||
if (!j_audio_record_->StopRecording()) {
|
||||
RTC_LOG(LS_ERROR) << "StopRecording failed";
|
||||
return -1;
|
||||
}
|
||||
// If we don't detach here, we will hit a RTC_DCHECK in OnDataIsRecorded()
|
||||
// next time StartRecording() is called since it will create a new Java
|
||||
// thread.
|
||||
thread_checker_java_.Detach();
|
||||
initialized_ = false;
|
||||
recording_ = false;
|
||||
direct_buffer_address_ = nullptr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void AudioMergedScreenRecordJni::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
|
||||
RTC_LOG(LS_INFO) << "AttachAudioBuffer";
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
audio_device_buffer_ = audioBuffer;
|
||||
const int sample_rate_hz = audio_parameters_.sample_rate();
|
||||
RTC_LOG(LS_INFO) << "SetRecordingSampleRate(" << sample_rate_hz << ")";
|
||||
audio_device_buffer_->SetRecordingSampleRate(sample_rate_hz);
|
||||
const size_t channels = audio_parameters_.channels();
|
||||
RTC_LOG(LS_INFO) << "SetRecordingChannels(" << channels << ")";
|
||||
audio_device_buffer_->SetRecordingChannels(channels);
|
||||
total_delay_in_milliseconds_ =
|
||||
audio_manager_->GetDelayEstimateInMilliseconds();
|
||||
RTC_DCHECK_GT(total_delay_in_milliseconds_, 0);
|
||||
RTC_LOG(LS_INFO) << "total_delay_in_milliseconds: "
|
||||
<< total_delay_in_milliseconds_;
|
||||
}
|
||||
|
||||
int32_t AudioMergedScreenRecordJni::EnableBuiltInAEC(bool enable) {
|
||||
RTC_LOG(LS_INFO) << "EnableBuiltInAEC(" << enable << ")";
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
return j_audio_record_->EnableBuiltInAEC(enable) ? 0 : -1;
|
||||
}
|
||||
|
||||
int32_t AudioMergedScreenRecordJni::EnableBuiltInAGC(bool enable) {
|
||||
// TODO(henrika): possibly remove when no longer used by any client.
|
||||
RTC_CHECK_NOTREACHED();
|
||||
}
|
||||
|
||||
int32_t AudioMergedScreenRecordJni::EnableBuiltInNS(bool enable) {
|
||||
RTC_LOG(LS_INFO) << "EnableBuiltInNS(" << enable << ")";
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
return j_audio_record_->EnableBuiltInNS(enable) ? 0 : -1;
|
||||
}
|
||||
|
||||
JNI_FUNCTION_ALIGN
|
||||
void JNICALL AudioMergedScreenRecordJni::CacheDirectBufferAddress(JNIEnv* env,
|
||||
jobject obj,
|
||||
jobject byte_buffer,
|
||||
jlong nativeAudioRecord) {
|
||||
webrtc::AudioMergedScreenRecordJni* this_object =
|
||||
reinterpret_cast<webrtc::AudioMergedScreenRecordJni*>(nativeAudioRecord);
|
||||
this_object->OnCacheDirectBufferAddress(env, byte_buffer);
|
||||
}
|
||||
|
||||
void AudioMergedScreenRecordJni::OnCacheDirectBufferAddress(JNIEnv* env,
|
||||
jobject byte_buffer) {
|
||||
RTC_LOG(LS_INFO) << "OnCacheDirectBufferAddress";
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
RTC_DCHECK(!direct_buffer_address_);
|
||||
direct_buffer_address_ = env->GetDirectBufferAddress(byte_buffer);
|
||||
jlong capacity = env->GetDirectBufferCapacity(byte_buffer);
|
||||
RTC_LOG(LS_INFO) << "direct buffer capacity: " << capacity;
|
||||
direct_buffer_capacity_in_bytes_ = static_cast<size_t>(capacity);
|
||||
}
|
||||
|
||||
JNI_FUNCTION_ALIGN
|
||||
void JNICALL AudioMergedScreenRecordJni::DataIsRecorded(JNIEnv* env,
|
||||
jobject obj,
|
||||
jint length,
|
||||
jlong nativeAudioRecord) {
|
||||
webrtc::AudioMergedScreenRecordJni* this_object =
|
||||
reinterpret_cast<webrtc::AudioMergedScreenRecordJni*>(nativeAudioRecord);
|
||||
this_object->OnDataIsRecorded(length);
|
||||
}
|
||||
|
||||
// This method is called on a high-priority thread from Java. The name of
|
||||
// the thread is 'AudioRecordThread'.
|
||||
void AudioMergedScreenRecordJni::OnDataIsRecorded(int length) {
|
||||
RTC_DCHECK(thread_checker_java_.IsCurrent());
|
||||
if (!audio_device_buffer_) {
|
||||
RTC_LOG(LS_ERROR) << "AttachAudioBuffer has not been called";
|
||||
return;
|
||||
}
|
||||
audio_device_buffer_->SetRecordedBuffer(direct_buffer_address_,
|
||||
frames_per_buffer_);
|
||||
// We provide one (combined) fixed delay estimate for the APM and use the
|
||||
// |playDelayMs| parameter only. Components like the AEC only sees the sum
|
||||
// of |playDelayMs| and |recDelayMs|, hence the distributions does not matter.
|
||||
audio_device_buffer_->SetVQEData(total_delay_in_milliseconds_, 0);
|
||||
if (audio_device_buffer_->DeliverRecordedData() == -1) {
|
||||
RTC_LOG(LS_INFO) << "AudioDeviceBuffer::DeliverRecordedData failed";
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,169 @@
|
|||
/*
|
||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef MODULES_AUDIO_DEVICE_ANDROID_AUDIO_MERGED_SCREEN_RECORD_JNI_H_
|
||||
#define MODULES_AUDIO_DEVICE_ANDROID_AUDIO_MERGED_SCREEN_RECORD_JNI_H_
|
||||
|
||||
#include <jni.h>
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "api/sequence_checker.h"
|
||||
#include "modules/audio_device/android/audio_manager.h"
|
||||
#include "modules/audio_device/audio_device_generic.h"
|
||||
#include "modules/audio_device/include/audio_device_defines.h"
|
||||
#include "modules/utility/include/helpers_android.h"
|
||||
#include "modules/utility/include/jvm_android.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// Implements 16-bit mono PCM audio input support for Android using the Java
|
||||
// AudioRecord interface. Most of the work is done by its Java counterpart in
|
||||
// WebRtcAudioRecord.java. This class is created and lives on a thread in
|
||||
// C++-land, but recorded audio buffers are delivered on a high-priority
|
||||
// thread managed by the Java class.
|
||||
//
|
||||
// The Java class makes use of AudioEffect features (mainly AEC) which are
|
||||
// first available in Jelly Bean. If it is instantiated running against earlier
|
||||
// SDKs, the AEC provided by the APM in WebRTC must be used and enabled
|
||||
// separately instead.
|
||||
//
|
||||
// An instance must be created and destroyed on one and the same thread.
|
||||
// All public methods must also be called on the same thread. A thread checker
|
||||
// will RTC_DCHECK if any method is called on an invalid thread.
|
||||
//
|
||||
// This class uses JvmThreadConnector to attach to a Java VM if needed
|
||||
// and detach when the object goes out of scope. Additional thread checking
|
||||
// guarantees that no other (possibly non attached) thread is used.
|
||||
class AudioMergedScreenRecordJni {
|
||||
public:
|
||||
// Wraps the Java specific parts of the AudioRecordJni into one helper class.
|
||||
class JavaAudioRecord {
|
||||
public:
|
||||
JavaAudioRecord(NativeRegistration* native_registration,
|
||||
std::unique_ptr<GlobalRef> audio_track);
|
||||
~JavaAudioRecord();
|
||||
|
||||
int InitRecording(int sample_rate, size_t channels);
|
||||
bool StartRecording();
|
||||
bool StopRecording();
|
||||
bool EnableBuiltInAEC(bool enable);
|
||||
bool EnableBuiltInNS(bool enable);
|
||||
|
||||
private:
|
||||
std::unique_ptr<GlobalRef> audio_record_;
|
||||
jmethodID init_recording_;
|
||||
jmethodID start_recording_;
|
||||
jmethodID stop_recording_;
|
||||
jmethodID enable_built_in_aec_;
|
||||
jmethodID enable_built_in_ns_;
|
||||
jmethodID on_destroy_;
|
||||
};
|
||||
|
||||
explicit AudioMergedScreenRecordJni(AudioManager* audio_manager);
|
||||
~AudioMergedScreenRecordJni();
|
||||
|
||||
int32_t Init();
|
||||
int32_t Terminate();
|
||||
|
||||
int32_t InitRecording();
|
||||
bool RecordingIsInitialized() const { return initialized_; }
|
||||
|
||||
int32_t StartRecording();
|
||||
int32_t StopRecording();
|
||||
bool Recording() const { return recording_; }
|
||||
|
||||
void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
|
||||
|
||||
int32_t EnableBuiltInAEC(bool enable);
|
||||
int32_t EnableBuiltInAGC(bool enable);
|
||||
int32_t EnableBuiltInNS(bool enable);
|
||||
|
||||
private:
|
||||
// Called from Java side so we can cache the address of the Java-manged
|
||||
// |byte_buffer| in |direct_buffer_address_|. The size of the buffer
|
||||
// is also stored in |direct_buffer_capacity_in_bytes_|.
|
||||
// This method will be called by the WebRtcAudioRecord constructor, i.e.,
|
||||
// on the same thread that this object is created on.
|
||||
static void JNICALL CacheDirectBufferAddress(JNIEnv* env,
|
||||
jobject obj,
|
||||
jobject byte_buffer,
|
||||
jlong nativeAudioRecord);
|
||||
void OnCacheDirectBufferAddress(JNIEnv* env, jobject byte_buffer);
|
||||
|
||||
// Called periodically by the Java based WebRtcAudioRecord object when
|
||||
// recording has started. Each call indicates that there are |length| new
|
||||
// bytes recorded in the memory area |direct_buffer_address_| and it is
|
||||
// now time to send these to the consumer.
|
||||
// This method is called on a high-priority thread from Java. The name of
|
||||
// the thread is 'AudioRecordThread'.
|
||||
static void JNICALL DataIsRecorded(JNIEnv* env,
|
||||
jobject obj,
|
||||
jint length,
|
||||
jlong nativeAudioRecord);
|
||||
void OnDataIsRecorded(int length);
|
||||
|
||||
// Stores thread ID in constructor.
|
||||
SequenceChecker thread_checker_;
|
||||
|
||||
// Stores thread ID in first call to OnDataIsRecorded() from high-priority
|
||||
// thread in Java. Detached during construction of this object.
|
||||
SequenceChecker thread_checker_java_;
|
||||
|
||||
// Calls JavaVM::AttachCurrentThread() if this thread is not attached at
|
||||
// construction.
|
||||
// Also ensures that DetachCurrentThread() is called at destruction.
|
||||
JvmThreadConnector attach_thread_if_needed_;
|
||||
|
||||
// Wraps the JNI interface pointer and methods associated with it.
|
||||
std::unique_ptr<JNIEnvironment> j_environment_;
|
||||
|
||||
// Contains factory method for creating the Java object.
|
||||
std::unique_ptr<NativeRegistration> j_native_registration_;
|
||||
|
||||
// Wraps the Java specific parts of the AudioRecordJni class.
|
||||
std::unique_ptr<AudioMergedScreenRecordJni::JavaAudioRecord> j_audio_record_;
|
||||
|
||||
// Raw pointer to the audio manger.
|
||||
const AudioManager* audio_manager_;
|
||||
|
||||
// Contains audio parameters provided to this class at construction by the
|
||||
// AudioManager.
|
||||
const AudioParameters audio_parameters_;
|
||||
|
||||
// Delay estimate of the total round-trip delay (input + output).
|
||||
// Fixed value set once in AttachAudioBuffer() and it can take one out of two
|
||||
// possible values. See audio_common.h for details.
|
||||
int total_delay_in_milliseconds_;
|
||||
|
||||
// Cached copy of address to direct audio buffer owned by |j_audio_record_|.
|
||||
void* direct_buffer_address_;
|
||||
|
||||
// Number of bytes in the direct audio buffer owned by |j_audio_record_|.
|
||||
size_t direct_buffer_capacity_in_bytes_;
|
||||
|
||||
// Number audio frames per audio buffer. Each audio frame corresponds to
|
||||
// one sample of PCM mono data at 16 bits per sample. Hence, each audio
|
||||
// frame contains 2 bytes (given that the Java layer only supports mono).
|
||||
// Example: 480 for 48000 Hz or 441 for 44100 Hz.
|
||||
size_t frames_per_buffer_;
|
||||
|
||||
bool initialized_;
|
||||
|
||||
bool recording_;
|
||||
|
||||
// Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the
|
||||
// AudioDeviceModuleImpl class and called by AudioDeviceModule::Create().
|
||||
AudioDeviceBuffer* audio_device_buffer_;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // MODULES_AUDIO_DEVICE_ANDROID_AUDIO_RECORD_JNI_H_
|
||||
|
|
@ -0,0 +1,280 @@
|
|||
/*
|
||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "modules/audio_device/android/audio_record_jni.h"
|
||||
|
||||
#include <string>
|
||||
#include <utility>
|
||||
|
||||
#include "modules/audio_device/android/audio_common.h"
|
||||
#include "rtc_base/arraysize.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/platform_thread.h"
|
||||
#include "rtc_base/time_utils.h"
|
||||
#include "system_wrappers/include/metrics.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace {
|
||||
// Scoped class which logs its time of life as a UMA statistic. It generates
|
||||
// a histogram which measures the time it takes for a method/scope to execute.
|
||||
class ScopedHistogramTimer {
|
||||
public:
|
||||
explicit ScopedHistogramTimer(const std::string& name)
|
||||
: histogram_name_(name), start_time_ms_(rtc::TimeMillis()) {}
|
||||
~ScopedHistogramTimer() {
|
||||
const int64_t life_time_ms = rtc::TimeSince(start_time_ms_);
|
||||
RTC_HISTOGRAM_COUNTS_1000(histogram_name_, life_time_ms);
|
||||
RTC_LOG(LS_INFO) << histogram_name_ << ": " << life_time_ms;
|
||||
}
|
||||
|
||||
private:
|
||||
const std::string histogram_name_;
|
||||
int64_t start_time_ms_;
|
||||
};
|
||||
} // namespace
|
||||
|
||||
// AudioRecordJni::JavaAudioRecord implementation.
|
||||
AudioRecordJni::JavaAudioRecord::JavaAudioRecord(
|
||||
NativeRegistration* native_reg,
|
||||
std::unique_ptr<GlobalRef> audio_record)
|
||||
: audio_record_(std::move(audio_record)),
|
||||
init_recording_(native_reg->GetMethodId("initRecording", "(II)I")),
|
||||
start_recording_(native_reg->GetMethodId("startRecording", "()Z")),
|
||||
stop_recording_(native_reg->GetMethodId("stopRecording", "()Z")),
|
||||
enable_built_in_aec_(native_reg->GetMethodId("enableBuiltInAEC", "(Z)Z")),
|
||||
enable_built_in_ns_(native_reg->GetMethodId("enableBuiltInNS", "(Z)Z")) {}
|
||||
|
||||
AudioRecordJni::JavaAudioRecord::~JavaAudioRecord() {}
|
||||
|
||||
int AudioRecordJni::JavaAudioRecord::InitRecording(int sample_rate,
|
||||
size_t channels) {
|
||||
return audio_record_->CallIntMethod(init_recording_,
|
||||
static_cast<jint>(sample_rate),
|
||||
static_cast<jint>(channels));
|
||||
}
|
||||
|
||||
bool AudioRecordJni::JavaAudioRecord::StartRecording() {
|
||||
return audio_record_->CallBooleanMethod(start_recording_);
|
||||
}
|
||||
|
||||
bool AudioRecordJni::JavaAudioRecord::StopRecording() {
|
||||
return audio_record_->CallBooleanMethod(stop_recording_);
|
||||
}
|
||||
|
||||
bool AudioRecordJni::JavaAudioRecord::EnableBuiltInAEC(bool enable) {
|
||||
return audio_record_->CallBooleanMethod(enable_built_in_aec_,
|
||||
static_cast<jboolean>(enable));
|
||||
}
|
||||
|
||||
bool AudioRecordJni::JavaAudioRecord::EnableBuiltInNS(bool enable) {
|
||||
return audio_record_->CallBooleanMethod(enable_built_in_ns_,
|
||||
static_cast<jboolean>(enable));
|
||||
}
|
||||
|
||||
// AudioRecordJni implementation.
|
||||
AudioRecordJni::AudioRecordJni(AudioManager* audio_manager)
|
||||
: j_environment_(JVM::GetInstance()->environment()),
|
||||
audio_manager_(audio_manager),
|
||||
audio_parameters_(audio_manager->GetRecordAudioParameters()),
|
||||
total_delay_in_milliseconds_(0),
|
||||
direct_buffer_address_(nullptr),
|
||||
direct_buffer_capacity_in_bytes_(0),
|
||||
frames_per_buffer_(0),
|
||||
initialized_(false),
|
||||
recording_(false),
|
||||
audio_device_buffer_(nullptr) {
|
||||
RTC_LOG(LS_INFO) << "ctor";
|
||||
RTC_DCHECK(audio_parameters_.is_valid());
|
||||
RTC_CHECK(j_environment_);
|
||||
JNINativeMethod native_methods[] = {
|
||||
{"nativeCacheDirectBufferAddress", "(Ljava/nio/ByteBuffer;J)V",
|
||||
reinterpret_cast<void*>(
|
||||
&webrtc::AudioRecordJni::CacheDirectBufferAddress)},
|
||||
{"nativeDataIsRecorded", "(IJ)V",
|
||||
reinterpret_cast<void*>(&webrtc::AudioRecordJni::DataIsRecorded)}};
|
||||
j_native_registration_ = j_environment_->RegisterNatives(
|
||||
"org/webrtc/voiceengine/WebRtcAudioRecord", native_methods,
|
||||
arraysize(native_methods));
|
||||
j_audio_record_.reset(
|
||||
new JavaAudioRecord(j_native_registration_.get(),
|
||||
j_native_registration_->NewObject(
|
||||
"<init>", "(JI)V", PointerTojlong(this), 0)));
|
||||
// Detach from this thread since we want to use the checker to verify calls
|
||||
// from the Java based audio thread.
|
||||
thread_checker_java_.Detach();
|
||||
}
|
||||
|
||||
AudioRecordJni::~AudioRecordJni() {
|
||||
RTC_LOG(LS_INFO) << "dtor";
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
Terminate();
|
||||
}
|
||||
|
||||
int32_t AudioRecordJni::Init() {
|
||||
RTC_LOG(LS_INFO) << "Init";
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioRecordJni::Terminate() {
|
||||
RTC_LOG(LS_INFO) << "Terminate";
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
StopRecording();
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioRecordJni::InitRecording() {
|
||||
RTC_LOG(LS_INFO) << "InitRecording";
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
RTC_DCHECK(!initialized_);
|
||||
RTC_DCHECK(!recording_);
|
||||
ScopedHistogramTimer timer("WebRTC.Audio.InitRecordingDurationMs");
|
||||
int frames_per_buffer = j_audio_record_->InitRecording(
|
||||
audio_parameters_.sample_rate(), audio_parameters_.channels());
|
||||
if (frames_per_buffer < 0) {
|
||||
direct_buffer_address_ = nullptr;
|
||||
RTC_LOG(LS_ERROR) << "InitRecording failed";
|
||||
return -1;
|
||||
}
|
||||
frames_per_buffer_ = static_cast<size_t>(frames_per_buffer);
|
||||
RTC_LOG(LS_INFO) << "frames_per_buffer: " << frames_per_buffer_;
|
||||
const size_t bytes_per_frame = audio_parameters_.channels() * sizeof(int16_t);
|
||||
RTC_CHECK_EQ(direct_buffer_capacity_in_bytes_,
|
||||
frames_per_buffer_ * bytes_per_frame);
|
||||
RTC_CHECK_EQ(frames_per_buffer_, audio_parameters_.frames_per_10ms_buffer());
|
||||
initialized_ = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioRecordJni::StartRecording() {
|
||||
RTC_LOG(LS_INFO) << "StartRecording";
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
RTC_DCHECK(!recording_);
|
||||
if (!initialized_) {
|
||||
RTC_DLOG(LS_WARNING)
|
||||
<< "Recording can not start since InitRecording must succeed first";
|
||||
return 0;
|
||||
}
|
||||
ScopedHistogramTimer timer("WebRTC.Audio.StartRecordingDurationMs");
|
||||
if (!j_audio_record_->StartRecording()) {
|
||||
RTC_LOG(LS_ERROR) << "StartRecording failed";
|
||||
return -1;
|
||||
}
|
||||
recording_ = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioRecordJni::StopRecording() {
|
||||
RTC_LOG(LS_INFO) << "StopRecording";
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
if (!initialized_ || !recording_) {
|
||||
return 0;
|
||||
}
|
||||
if (!j_audio_record_->StopRecording()) {
|
||||
RTC_LOG(LS_ERROR) << "StopRecording failed";
|
||||
return -1;
|
||||
}
|
||||
// If we don't detach here, we will hit a RTC_DCHECK in OnDataIsRecorded()
|
||||
// next time StartRecording() is called since it will create a new Java
|
||||
// thread.
|
||||
thread_checker_java_.Detach();
|
||||
initialized_ = false;
|
||||
recording_ = false;
|
||||
direct_buffer_address_ = nullptr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void AudioRecordJni::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
|
||||
RTC_LOG(LS_INFO) << "AttachAudioBuffer";
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
audio_device_buffer_ = audioBuffer;
|
||||
const int sample_rate_hz = audio_parameters_.sample_rate();
|
||||
RTC_LOG(LS_INFO) << "SetRecordingSampleRate(" << sample_rate_hz << ")";
|
||||
audio_device_buffer_->SetRecordingSampleRate(sample_rate_hz);
|
||||
const size_t channels = audio_parameters_.channels();
|
||||
RTC_LOG(LS_INFO) << "SetRecordingChannels(" << channels << ")";
|
||||
audio_device_buffer_->SetRecordingChannels(channels);
|
||||
total_delay_in_milliseconds_ =
|
||||
audio_manager_->GetDelayEstimateInMilliseconds();
|
||||
RTC_DCHECK_GT(total_delay_in_milliseconds_, 0);
|
||||
RTC_LOG(LS_INFO) << "total_delay_in_milliseconds: "
|
||||
<< total_delay_in_milliseconds_;
|
||||
}
|
||||
|
||||
int32_t AudioRecordJni::EnableBuiltInAEC(bool enable) {
|
||||
RTC_LOG(LS_INFO) << "EnableBuiltInAEC(" << enable << ")";
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
return j_audio_record_->EnableBuiltInAEC(enable) ? 0 : -1;
|
||||
}
|
||||
|
||||
int32_t AudioRecordJni::EnableBuiltInAGC(bool enable) {
|
||||
// TODO(henrika): possibly remove when no longer used by any client.
|
||||
RTC_CHECK_NOTREACHED();
|
||||
}
|
||||
|
||||
int32_t AudioRecordJni::EnableBuiltInNS(bool enable) {
|
||||
RTC_LOG(LS_INFO) << "EnableBuiltInNS(" << enable << ")";
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
return j_audio_record_->EnableBuiltInNS(enable) ? 0 : -1;
|
||||
}
|
||||
|
||||
JNI_FUNCTION_ALIGN
|
||||
void JNICALL AudioRecordJni::CacheDirectBufferAddress(JNIEnv* env,
|
||||
jobject obj,
|
||||
jobject byte_buffer,
|
||||
jlong nativeAudioRecord) {
|
||||
webrtc::AudioRecordJni* this_object =
|
||||
reinterpret_cast<webrtc::AudioRecordJni*>(nativeAudioRecord);
|
||||
this_object->OnCacheDirectBufferAddress(env, byte_buffer);
|
||||
}
|
||||
|
||||
void AudioRecordJni::OnCacheDirectBufferAddress(JNIEnv* env,
|
||||
jobject byte_buffer) {
|
||||
RTC_LOG(LS_INFO) << "OnCacheDirectBufferAddress";
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
RTC_DCHECK(!direct_buffer_address_);
|
||||
direct_buffer_address_ = env->GetDirectBufferAddress(byte_buffer);
|
||||
jlong capacity = env->GetDirectBufferCapacity(byte_buffer);
|
||||
RTC_LOG(LS_INFO) << "direct buffer capacity: " << capacity;
|
||||
direct_buffer_capacity_in_bytes_ = static_cast<size_t>(capacity);
|
||||
}
|
||||
|
||||
JNI_FUNCTION_ALIGN
|
||||
void JNICALL AudioRecordJni::DataIsRecorded(JNIEnv* env,
|
||||
jobject obj,
|
||||
jint length,
|
||||
jlong nativeAudioRecord) {
|
||||
webrtc::AudioRecordJni* this_object =
|
||||
reinterpret_cast<webrtc::AudioRecordJni*>(nativeAudioRecord);
|
||||
this_object->OnDataIsRecorded(length);
|
||||
}
|
||||
|
||||
// This method is called on a high-priority thread from Java. The name of
|
||||
// the thread is 'AudioRecordThread'.
|
||||
void AudioRecordJni::OnDataIsRecorded(int length) {
|
||||
RTC_DCHECK(thread_checker_java_.IsCurrent());
|
||||
if (!audio_device_buffer_) {
|
||||
RTC_LOG(LS_ERROR) << "AttachAudioBuffer has not been called";
|
||||
return;
|
||||
}
|
||||
audio_device_buffer_->SetRecordedBuffer(direct_buffer_address_,
|
||||
frames_per_buffer_);
|
||||
// We provide one (combined) fixed delay estimate for the APM and use the
|
||||
// `playDelayMs` parameter only. Components like the AEC only sees the sum
|
||||
// of `playDelayMs` and `recDelayMs`, hence the distributions does not matter.
|
||||
audio_device_buffer_->SetVQEData(total_delay_in_milliseconds_, 0);
|
||||
if (audio_device_buffer_->DeliverRecordedData() == -1) {
|
||||
RTC_LOG(LS_INFO) << "AudioDeviceBuffer::DeliverRecordedData failed";
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,168 @@
|
|||
/*
|
||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef MODULES_AUDIO_DEVICE_ANDROID_AUDIO_RECORD_JNI_H_
|
||||
#define MODULES_AUDIO_DEVICE_ANDROID_AUDIO_RECORD_JNI_H_
|
||||
|
||||
#include <jni.h>
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "api/sequence_checker.h"
|
||||
#include "modules/audio_device/android/audio_manager.h"
|
||||
#include "modules/audio_device/audio_device_generic.h"
|
||||
#include "modules/audio_device/include/audio_device_defines.h"
|
||||
#include "modules/utility/include/helpers_android.h"
|
||||
#include "modules/utility/include/jvm_android.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// Implements 16-bit mono PCM audio input support for Android using the Java
|
||||
// AudioRecord interface. Most of the work is done by its Java counterpart in
|
||||
// WebRtcAudioRecord.java. This class is created and lives on a thread in
|
||||
// C++-land, but recorded audio buffers are delivered on a high-priority
|
||||
// thread managed by the Java class.
|
||||
//
|
||||
// The Java class makes use of AudioEffect features (mainly AEC) which are
|
||||
// first available in Jelly Bean. If it is instantiated running against earlier
|
||||
// SDKs, the AEC provided by the APM in WebRTC must be used and enabled
|
||||
// separately instead.
|
||||
//
|
||||
// An instance must be created and destroyed on one and the same thread.
|
||||
// All public methods must also be called on the same thread. A thread checker
|
||||
// will RTC_DCHECK if any method is called on an invalid thread.
|
||||
//
|
||||
// This class uses JvmThreadConnector to attach to a Java VM if needed
|
||||
// and detach when the object goes out of scope. Additional thread checking
|
||||
// guarantees that no other (possibly non attached) thread is used.
|
||||
class AudioRecordJni {
|
||||
public:
|
||||
// Wraps the Java specific parts of the AudioRecordJni into one helper class.
|
||||
class JavaAudioRecord {
|
||||
public:
|
||||
JavaAudioRecord(NativeRegistration* native_registration,
|
||||
std::unique_ptr<GlobalRef> audio_track);
|
||||
~JavaAudioRecord();
|
||||
|
||||
int InitRecording(int sample_rate, size_t channels);
|
||||
bool StartRecording();
|
||||
bool StopRecording();
|
||||
bool EnableBuiltInAEC(bool enable);
|
||||
bool EnableBuiltInNS(bool enable);
|
||||
|
||||
private:
|
||||
std::unique_ptr<GlobalRef> audio_record_;
|
||||
jmethodID init_recording_;
|
||||
jmethodID start_recording_;
|
||||
jmethodID stop_recording_;
|
||||
jmethodID enable_built_in_aec_;
|
||||
jmethodID enable_built_in_ns_;
|
||||
};
|
||||
|
||||
explicit AudioRecordJni(AudioManager* audio_manager);
|
||||
~AudioRecordJni();
|
||||
|
||||
int32_t Init();
|
||||
int32_t Terminate();
|
||||
|
||||
int32_t InitRecording();
|
||||
bool RecordingIsInitialized() const { return initialized_; }
|
||||
|
||||
int32_t StartRecording();
|
||||
int32_t StopRecording();
|
||||
bool Recording() const { return recording_; }
|
||||
|
||||
void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
|
||||
|
||||
int32_t EnableBuiltInAEC(bool enable);
|
||||
int32_t EnableBuiltInAGC(bool enable);
|
||||
int32_t EnableBuiltInNS(bool enable);
|
||||
|
||||
private:
|
||||
// Called from Java side so we can cache the address of the Java-manged
|
||||
// `byte_buffer` in `direct_buffer_address_`. The size of the buffer
|
||||
// is also stored in `direct_buffer_capacity_in_bytes_`.
|
||||
// This method will be called by the WebRtcAudioRecord constructor, i.e.,
|
||||
// on the same thread that this object is created on.
|
||||
static void JNICALL CacheDirectBufferAddress(JNIEnv* env,
|
||||
jobject obj,
|
||||
jobject byte_buffer,
|
||||
jlong nativeAudioRecord);
|
||||
void OnCacheDirectBufferAddress(JNIEnv* env, jobject byte_buffer);
|
||||
|
||||
// Called periodically by the Java based WebRtcAudioRecord object when
|
||||
// recording has started. Each call indicates that there are `length` new
|
||||
// bytes recorded in the memory area `direct_buffer_address_` and it is
|
||||
// now time to send these to the consumer.
|
||||
// This method is called on a high-priority thread from Java. The name of
|
||||
// the thread is 'AudioRecordThread'.
|
||||
static void JNICALL DataIsRecorded(JNIEnv* env,
|
||||
jobject obj,
|
||||
jint length,
|
||||
jlong nativeAudioRecord);
|
||||
void OnDataIsRecorded(int length);
|
||||
|
||||
// Stores thread ID in constructor.
|
||||
SequenceChecker thread_checker_;
|
||||
|
||||
// Stores thread ID in first call to OnDataIsRecorded() from high-priority
|
||||
// thread in Java. Detached during construction of this object.
|
||||
SequenceChecker thread_checker_java_;
|
||||
|
||||
// Calls JavaVM::AttachCurrentThread() if this thread is not attached at
|
||||
// construction.
|
||||
// Also ensures that DetachCurrentThread() is called at destruction.
|
||||
JvmThreadConnector attach_thread_if_needed_;
|
||||
|
||||
// Wraps the JNI interface pointer and methods associated with it.
|
||||
std::unique_ptr<JNIEnvironment> j_environment_;
|
||||
|
||||
// Contains factory method for creating the Java object.
|
||||
std::unique_ptr<NativeRegistration> j_native_registration_;
|
||||
|
||||
// Wraps the Java specific parts of the AudioRecordJni class.
|
||||
std::unique_ptr<AudioRecordJni::JavaAudioRecord> j_audio_record_;
|
||||
|
||||
// Raw pointer to the audio manger.
|
||||
const AudioManager* audio_manager_;
|
||||
|
||||
// Contains audio parameters provided to this class at construction by the
|
||||
// AudioManager.
|
||||
const AudioParameters audio_parameters_;
|
||||
|
||||
// Delay estimate of the total round-trip delay (input + output).
|
||||
// Fixed value set once in AttachAudioBuffer() and it can take one out of two
|
||||
// possible values. See audio_common.h for details.
|
||||
int total_delay_in_milliseconds_;
|
||||
|
||||
// Cached copy of address to direct audio buffer owned by `j_audio_record_`.
|
||||
void* direct_buffer_address_;
|
||||
|
||||
// Number of bytes in the direct audio buffer owned by `j_audio_record_`.
|
||||
size_t direct_buffer_capacity_in_bytes_;
|
||||
|
||||
// Number audio frames per audio buffer. Each audio frame corresponds to
|
||||
// one sample of PCM mono data at 16 bits per sample. Hence, each audio
|
||||
// frame contains 2 bytes (given that the Java layer only supports mono).
|
||||
// Example: 480 for 48000 Hz or 441 for 44100 Hz.
|
||||
size_t frames_per_buffer_;
|
||||
|
||||
bool initialized_;
|
||||
|
||||
bool recording_;
|
||||
|
||||
// Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the
|
||||
// AudioDeviceModuleImpl class and called by AudioDeviceModule::Create().
|
||||
AudioDeviceBuffer* audio_device_buffer_;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // MODULES_AUDIO_DEVICE_ANDROID_AUDIO_RECORD_JNI_H_
|
||||
|
|
@ -0,0 +1,278 @@
|
|||
/*
|
||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "modules/audio_device/android/audio_screen_record_jni.h"
|
||||
|
||||
#include <string>
|
||||
#include <utility>
|
||||
|
||||
#include "modules/audio_device/android/audio_common.h"
|
||||
#include "rtc_base/arraysize.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/platform_thread.h"
|
||||
#include "rtc_base/time_utils.h"
|
||||
#include "system_wrappers/include/metrics.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace {
|
||||
// Scoped class which logs its time of life as a UMA statistic. It generates
|
||||
// a histogram which measures the time it takes for a method/scope to execute.
|
||||
class ScopedHistogramTimer {
|
||||
public:
|
||||
explicit ScopedHistogramTimer(const std::string& name)
|
||||
: histogram_name_(name), start_time_ms_(rtc::TimeMillis()) {}
|
||||
~ScopedHistogramTimer() {
|
||||
const int64_t life_time_ms = rtc::TimeSince(start_time_ms_);
|
||||
RTC_HISTOGRAM_COUNTS_1000(histogram_name_, life_time_ms);
|
||||
RTC_LOG(LS_INFO) << histogram_name_ << ": " << life_time_ms;
|
||||
}
|
||||
|
||||
private:
|
||||
const std::string histogram_name_;
|
||||
int64_t start_time_ms_;
|
||||
};
|
||||
} // namespace
|
||||
|
||||
// AudioRecordJni::JavaAudioRecord implementation.
|
||||
AudioScreenRecordJni::JavaAudioRecord::JavaAudioRecord(
|
||||
NativeRegistration* native_reg,
|
||||
std::unique_ptr<GlobalRef> audio_record)
|
||||
: audio_record_(std::move(audio_record)),
|
||||
init_recording_(native_reg->GetMethodId("initRecording", "(II)I")),
|
||||
start_recording_(native_reg->GetMethodId("startRecording", "()Z")),
|
||||
stop_recording_(native_reg->GetMethodId("stopRecording", "()Z")),
|
||||
enable_built_in_aec_(native_reg->GetMethodId("enableBuiltInAEC", "(Z)Z")),
|
||||
enable_built_in_ns_(native_reg->GetMethodId("enableBuiltInNS", "(Z)Z")) {}
|
||||
|
||||
int AudioScreenRecordJni::JavaAudioRecord::InitRecording(int sample_rate,
|
||||
size_t channels) {
|
||||
return audio_record_->CallIntMethod(init_recording_,
|
||||
static_cast<jint>(sample_rate),
|
||||
static_cast<jint>(channels));
|
||||
}
|
||||
|
||||
bool AudioScreenRecordJni::JavaAudioRecord::StartRecording() {
|
||||
return audio_record_->CallBooleanMethod(start_recording_);
|
||||
}
|
||||
|
||||
bool AudioScreenRecordJni::JavaAudioRecord::StopRecording() {
|
||||
return audio_record_->CallBooleanMethod(stop_recording_);
|
||||
}
|
||||
|
||||
bool AudioScreenRecordJni::JavaAudioRecord::EnableBuiltInAEC(bool enable) {
|
||||
return audio_record_->CallBooleanMethod(enable_built_in_aec_,
|
||||
static_cast<jboolean>(enable));
|
||||
}
|
||||
|
||||
bool AudioScreenRecordJni::JavaAudioRecord::EnableBuiltInNS(bool enable) {
|
||||
return audio_record_->CallBooleanMethod(enable_built_in_ns_,
|
||||
static_cast<jboolean>(enable));
|
||||
}
|
||||
|
||||
// AudioRecordJni implementation.
|
||||
AudioScreenRecordJni::AudioScreenRecordJni(AudioManager* audio_manager)
|
||||
: j_environment_(JVM::GetInstance()->environment()),
|
||||
audio_manager_(audio_manager),
|
||||
audio_parameters_(audio_manager->GetRecordAudioParameters()),
|
||||
total_delay_in_milliseconds_(0),
|
||||
direct_buffer_address_(nullptr),
|
||||
direct_buffer_capacity_in_bytes_(0),
|
||||
frames_per_buffer_(0),
|
||||
initialized_(false),
|
||||
recording_(false),
|
||||
audio_device_buffer_(nullptr) {
|
||||
RTC_LOG(LS_INFO) << "ctor";
|
||||
RTC_DCHECK(audio_parameters_.is_valid());
|
||||
RTC_CHECK(j_environment_);
|
||||
JNINativeMethod native_methods[] = {
|
||||
{"nativeCacheDirectBufferAddress", "(Ljava/nio/ByteBuffer;J)V",
|
||||
reinterpret_cast<void*>(
|
||||
&webrtc::AudioScreenRecordJni::CacheDirectBufferAddress)},
|
||||
{"nativeDataIsRecorded", "(IJ)V",
|
||||
reinterpret_cast<void*>(&webrtc::AudioScreenRecordJni::DataIsRecorded)}};
|
||||
j_native_registration_ = j_environment_->RegisterNatives(
|
||||
"org/webrtc/voiceengine/WebRtcAudioRecord", native_methods,
|
||||
arraysize(native_methods));
|
||||
j_audio_record_.reset(
|
||||
new JavaAudioRecord(j_native_registration_.get(),
|
||||
j_native_registration_->NewObject(
|
||||
"<init>", "(JI)V", PointerTojlong(this), 1)));
|
||||
// Detach from this thread since we want to use the checker to verify calls
|
||||
// from the Java based audio thread.
|
||||
thread_checker_java_.Detach();
|
||||
}
|
||||
|
||||
AudioScreenRecordJni::~AudioScreenRecordJni() {
|
||||
RTC_LOG(LS_INFO) << "dtor";
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
Terminate();
|
||||
}
|
||||
|
||||
int32_t AudioScreenRecordJni::Init() {
|
||||
RTC_LOG(LS_INFO) << "Init";
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioScreenRecordJni::Terminate() {
|
||||
RTC_LOG(LS_INFO) << "Terminate";
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
StopRecording();
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioScreenRecordJni::InitRecording() {
|
||||
RTC_LOG(LS_INFO) << "InitRecording";
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
RTC_DCHECK(!initialized_);
|
||||
RTC_DCHECK(!recording_);
|
||||
ScopedHistogramTimer timer("WebRTC.Audio.InitRecordingDurationMs");
|
||||
int frames_per_buffer = j_audio_record_->InitRecording(
|
||||
audio_parameters_.sample_rate(), audio_parameters_.channels());
|
||||
if (frames_per_buffer < 0) {
|
||||
direct_buffer_address_ = nullptr;
|
||||
RTC_LOG(LS_ERROR) << "InitRecording failed";
|
||||
return -1;
|
||||
}
|
||||
frames_per_buffer_ = static_cast<size_t>(frames_per_buffer);
|
||||
RTC_LOG(LS_INFO) << "frames_per_buffer: " << frames_per_buffer_;
|
||||
const size_t bytes_per_frame = audio_parameters_.channels() * sizeof(int16_t);
|
||||
RTC_CHECK_EQ(direct_buffer_capacity_in_bytes_,
|
||||
frames_per_buffer_ * bytes_per_frame);
|
||||
RTC_CHECK_EQ(frames_per_buffer_, audio_parameters_.frames_per_10ms_buffer());
|
||||
initialized_ = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioScreenRecordJni::StartRecording() {
|
||||
RTC_LOG(LS_INFO) << "StartRecording";
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
RTC_DCHECK(!recording_);
|
||||
if (!initialized_) {
|
||||
RTC_DLOG(LS_WARNING)
|
||||
<< "Recording can not start since InitRecording must succeed first";
|
||||
return 0;
|
||||
}
|
||||
ScopedHistogramTimer timer("WebRTC.Audio.StartRecordingDurationMs");
|
||||
if (!j_audio_record_->StartRecording()) {
|
||||
RTC_LOG(LS_ERROR) << "StartRecording failed";
|
||||
return -1;
|
||||
}
|
||||
recording_ = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioScreenRecordJni::StopRecording() {
|
||||
RTC_LOG(LS_INFO) << "StopRecording";
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
if (!initialized_ || !recording_) {
|
||||
return 0;
|
||||
}
|
||||
if (!j_audio_record_->StopRecording()) {
|
||||
RTC_LOG(LS_ERROR) << "StopRecording failed";
|
||||
return -1;
|
||||
}
|
||||
// If we don't detach here, we will hit a RTC_DCHECK in OnDataIsRecorded()
|
||||
// next time StartRecording() is called since it will create a new Java
|
||||
// thread.
|
||||
thread_checker_java_.Detach();
|
||||
initialized_ = false;
|
||||
recording_ = false;
|
||||
direct_buffer_address_ = nullptr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void AudioScreenRecordJni::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
|
||||
RTC_LOG(LS_INFO) << "AttachAudioBuffer";
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
audio_device_buffer_ = audioBuffer;
|
||||
const int sample_rate_hz = audio_parameters_.sample_rate();
|
||||
RTC_LOG(LS_INFO) << "SetRecordingSampleRate(" << sample_rate_hz << ")";
|
||||
audio_device_buffer_->SetRecordingSampleRate(sample_rate_hz);
|
||||
const size_t channels = audio_parameters_.channels();
|
||||
RTC_LOG(LS_INFO) << "SetRecordingChannels(" << channels << ")";
|
||||
audio_device_buffer_->SetRecordingChannels(channels);
|
||||
total_delay_in_milliseconds_ =
|
||||
audio_manager_->GetDelayEstimateInMilliseconds();
|
||||
RTC_DCHECK_GT(total_delay_in_milliseconds_, 0);
|
||||
RTC_LOG(LS_INFO) << "total_delay_in_milliseconds: "
|
||||
<< total_delay_in_milliseconds_;
|
||||
}
|
||||
|
||||
int32_t AudioScreenRecordJni::EnableBuiltInAEC(bool enable) {
|
||||
RTC_LOG(LS_INFO) << "EnableBuiltInAEC(" << enable << ")";
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
return j_audio_record_->EnableBuiltInAEC(enable) ? 0 : -1;
|
||||
}
|
||||
|
||||
int32_t AudioScreenRecordJni::EnableBuiltInAGC(bool enable) {
|
||||
// TODO(henrika): possibly remove when no longer used by any client.
|
||||
RTC_CHECK_NOTREACHED();
|
||||
}
|
||||
|
||||
int32_t AudioScreenRecordJni::EnableBuiltInNS(bool enable) {
|
||||
RTC_LOG(LS_INFO) << "EnableBuiltInNS(" << enable << ")";
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
return j_audio_record_->EnableBuiltInNS(enable) ? 0 : -1;
|
||||
}
|
||||
|
||||
JNI_FUNCTION_ALIGN
|
||||
void JNICALL AudioScreenRecordJni::CacheDirectBufferAddress(JNIEnv* env,
|
||||
jobject obj,
|
||||
jobject byte_buffer,
|
||||
jlong nativeAudioRecord) {
|
||||
webrtc::AudioScreenRecordJni* this_object =
|
||||
reinterpret_cast<webrtc::AudioScreenRecordJni*>(nativeAudioRecord);
|
||||
this_object->OnCacheDirectBufferAddress(env, byte_buffer);
|
||||
}
|
||||
|
||||
void AudioScreenRecordJni::OnCacheDirectBufferAddress(JNIEnv* env,
|
||||
jobject byte_buffer) {
|
||||
RTC_LOG(LS_INFO) << "OnCacheDirectBufferAddress";
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
RTC_DCHECK(!direct_buffer_address_);
|
||||
direct_buffer_address_ = env->GetDirectBufferAddress(byte_buffer);
|
||||
jlong capacity = env->GetDirectBufferCapacity(byte_buffer);
|
||||
RTC_LOG(LS_INFO) << "direct buffer capacity: " << capacity;
|
||||
direct_buffer_capacity_in_bytes_ = static_cast<size_t>(capacity);
|
||||
}
|
||||
|
||||
JNI_FUNCTION_ALIGN
|
||||
void JNICALL AudioScreenRecordJni::DataIsRecorded(JNIEnv* env,
|
||||
jobject obj,
|
||||
jint length,
|
||||
jlong nativeAudioRecord) {
|
||||
webrtc::AudioScreenRecordJni* this_object =
|
||||
reinterpret_cast<webrtc::AudioScreenRecordJni*>(nativeAudioRecord);
|
||||
this_object->OnDataIsRecorded(length);
|
||||
}
|
||||
|
||||
// This method is called on a high-priority thread from Java. The name of
|
||||
// the thread is 'AudioRecordThread'.
|
||||
void AudioScreenRecordJni::OnDataIsRecorded(int length) {
|
||||
RTC_DCHECK(thread_checker_java_.IsCurrent());
|
||||
if (!audio_device_buffer_) {
|
||||
RTC_LOG(LS_ERROR) << "AttachAudioBuffer has not been called";
|
||||
return;
|
||||
}
|
||||
audio_device_buffer_->SetRecordedBuffer(direct_buffer_address_,
|
||||
frames_per_buffer_);
|
||||
// We provide one (combined) fixed delay estimate for the APM and use the
|
||||
// |playDelayMs| parameter only. Components like the AEC only sees the sum
|
||||
// of |playDelayMs| and |recDelayMs|, hence the distributions does not matter.
|
||||
audio_device_buffer_->SetVQEData(total_delay_in_milliseconds_, 0);
|
||||
if (audio_device_buffer_->DeliverRecordedData() == -1) {
|
||||
RTC_LOG(LS_INFO) << "AudioDeviceBuffer::DeliverRecordedData failed";
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,167 @@
|
|||
/*
|
||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef MODULES_AUDIO_DEVICE_ANDROID_AUDIO_SCREEN_RECORD_JNI_H_
|
||||
#define MODULES_AUDIO_DEVICE_ANDROID_AUDIO_SCREEN_RECORD_JNI_H_
|
||||
|
||||
#include <jni.h>
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "api/sequence_checker.h"
|
||||
#include "modules/audio_device/android/audio_manager.h"
|
||||
#include "modules/audio_device/audio_device_generic.h"
|
||||
#include "modules/audio_device/include/audio_device_defines.h"
|
||||
#include "modules/utility/include/helpers_android.h"
|
||||
#include "modules/utility/include/jvm_android.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// Implements 16-bit mono PCM audio input support for Android using the Java
|
||||
// AudioRecord interface. Most of the work is done by its Java counterpart in
|
||||
// WebRtcAudioRecord.java. This class is created and lives on a thread in
|
||||
// C++-land, but recorded audio buffers are delivered on a high-priority
|
||||
// thread managed by the Java class.
|
||||
//
|
||||
// The Java class makes use of AudioEffect features (mainly AEC) which are
|
||||
// first available in Jelly Bean. If it is instantiated running against earlier
|
||||
// SDKs, the AEC provided by the APM in WebRTC must be used and enabled
|
||||
// separately instead.
|
||||
//
|
||||
// An instance must be created and destroyed on one and the same thread.
|
||||
// All public methods must also be called on the same thread. A thread checker
|
||||
// will RTC_DCHECK if any method is called on an invalid thread.
|
||||
//
|
||||
// This class uses JvmThreadConnector to attach to a Java VM if needed
|
||||
// and detach when the object goes out of scope. Additional thread checking
|
||||
// guarantees that no other (possibly non attached) thread is used.
|
||||
class AudioScreenRecordJni {
|
||||
public:
|
||||
// Wraps the Java specific parts of the AudioRecordJni into one helper class.
|
||||
class JavaAudioRecord {
|
||||
public:
|
||||
JavaAudioRecord(NativeRegistration* native_registration,
|
||||
std::unique_ptr<GlobalRef> audio_track);
|
||||
|
||||
int InitRecording(int sample_rate, size_t channels);
|
||||
bool StartRecording();
|
||||
bool StopRecording();
|
||||
bool EnableBuiltInAEC(bool enable);
|
||||
bool EnableBuiltInNS(bool enable);
|
||||
|
||||
private:
|
||||
std::unique_ptr<GlobalRef> audio_record_;
|
||||
jmethodID init_recording_;
|
||||
jmethodID start_recording_;
|
||||
jmethodID stop_recording_;
|
||||
jmethodID enable_built_in_aec_;
|
||||
jmethodID enable_built_in_ns_;
|
||||
};
|
||||
|
||||
explicit AudioScreenRecordJni(AudioManager* audio_manager);
|
||||
~AudioScreenRecordJni();
|
||||
|
||||
int32_t Init();
|
||||
int32_t Terminate();
|
||||
|
||||
int32_t InitRecording();
|
||||
bool RecordingIsInitialized() const { return initialized_; }
|
||||
|
||||
int32_t StartRecording();
|
||||
int32_t StopRecording();
|
||||
bool Recording() const { return recording_; }
|
||||
|
||||
void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
|
||||
|
||||
int32_t EnableBuiltInAEC(bool enable);
|
||||
int32_t EnableBuiltInAGC(bool enable);
|
||||
int32_t EnableBuiltInNS(bool enable);
|
||||
|
||||
private:
|
||||
// Called from Java side so we can cache the address of the Java-manged
|
||||
// |byte_buffer| in |direct_buffer_address_|. The size of the buffer
|
||||
// is also stored in |direct_buffer_capacity_in_bytes_|.
|
||||
// This method will be called by the WebRtcAudioRecord constructor, i.e.,
|
||||
// on the same thread that this object is created on.
|
||||
static void JNICALL CacheDirectBufferAddress(JNIEnv* env,
|
||||
jobject obj,
|
||||
jobject byte_buffer,
|
||||
jlong nativeAudioRecord);
|
||||
void OnCacheDirectBufferAddress(JNIEnv* env, jobject byte_buffer);
|
||||
|
||||
// Called periodically by the Java based WebRtcAudioRecord object when
|
||||
// recording has started. Each call indicates that there are |length| new
|
||||
// bytes recorded in the memory area |direct_buffer_address_| and it is
|
||||
// now time to send these to the consumer.
|
||||
// This method is called on a high-priority thread from Java. The name of
|
||||
// the thread is 'AudioRecordThread'.
|
||||
static void JNICALL DataIsRecorded(JNIEnv* env,
|
||||
jobject obj,
|
||||
jint length,
|
||||
jlong nativeAudioRecord);
|
||||
void OnDataIsRecorded(int length);
|
||||
|
||||
// Stores thread ID in constructor.
|
||||
SequenceChecker thread_checker_;
|
||||
|
||||
// Stores thread ID in first call to OnDataIsRecorded() from high-priority
|
||||
// thread in Java. Detached during construction of this object.
|
||||
SequenceChecker thread_checker_java_;
|
||||
|
||||
// Calls JavaVM::AttachCurrentThread() if this thread is not attached at
|
||||
// construction.
|
||||
// Also ensures that DetachCurrentThread() is called at destruction.
|
||||
JvmThreadConnector attach_thread_if_needed_;
|
||||
|
||||
// Wraps the JNI interface pointer and methods associated with it.
|
||||
std::unique_ptr<JNIEnvironment> j_environment_;
|
||||
|
||||
// Contains factory method for creating the Java object.
|
||||
std::unique_ptr<NativeRegistration> j_native_registration_;
|
||||
|
||||
// Wraps the Java specific parts of the AudioRecordJni class.
|
||||
std::unique_ptr<AudioScreenRecordJni::JavaAudioRecord> j_audio_record_;
|
||||
|
||||
// Raw pointer to the audio manger.
|
||||
const AudioManager* audio_manager_;
|
||||
|
||||
// Contains audio parameters provided to this class at construction by the
|
||||
// AudioManager.
|
||||
const AudioParameters audio_parameters_;
|
||||
|
||||
// Delay estimate of the total round-trip delay (input + output).
|
||||
// Fixed value set once in AttachAudioBuffer() and it can take one out of two
|
||||
// possible values. See audio_common.h for details.
|
||||
int total_delay_in_milliseconds_;
|
||||
|
||||
// Cached copy of address to direct audio buffer owned by |j_audio_record_|.
|
||||
void* direct_buffer_address_;
|
||||
|
||||
// Number of bytes in the direct audio buffer owned by |j_audio_record_|.
|
||||
size_t direct_buffer_capacity_in_bytes_;
|
||||
|
||||
// Number audio frames per audio buffer. Each audio frame corresponds to
|
||||
// one sample of PCM mono data at 16 bits per sample. Hence, each audio
|
||||
// frame contains 2 bytes (given that the Java layer only supports mono).
|
||||
// Example: 480 for 48000 Hz or 441 for 44100 Hz.
|
||||
size_t frames_per_buffer_;
|
||||
|
||||
bool initialized_;
|
||||
|
||||
bool recording_;
|
||||
|
||||
// Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the
|
||||
// AudioDeviceModuleImpl class and called by AudioDeviceModule::Create().
|
||||
AudioDeviceBuffer* audio_device_buffer_;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // MODULES_AUDIO_DEVICE_ANDROID_AUDIO_RECORD_JNI_H_
|
||||
|
|
@ -0,0 +1,296 @@
|
|||
/*
|
||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "modules/audio_device/android/audio_track_jni.h"
|
||||
|
||||
#include <utility>
|
||||
|
||||
#include "modules/audio_device/android/audio_manager.h"
|
||||
#include "rtc_base/arraysize.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/platform_thread.h"
|
||||
#include "system_wrappers/include/field_trial.h"
|
||||
#include "system_wrappers/include/metrics.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// AudioTrackJni::JavaAudioTrack implementation.
|
||||
AudioTrackJni::JavaAudioTrack::JavaAudioTrack(
|
||||
NativeRegistration* native_reg,
|
||||
std::unique_ptr<GlobalRef> audio_track)
|
||||
: audio_track_(std::move(audio_track)),
|
||||
init_playout_(native_reg->GetMethodId("initPlayout", "(IID)I")),
|
||||
start_playout_(native_reg->GetMethodId("startPlayout", "()Z")),
|
||||
stop_playout_(native_reg->GetMethodId("stopPlayout", "()Z")),
|
||||
set_stream_volume_(native_reg->GetMethodId("setStreamVolume", "(I)Z")),
|
||||
get_stream_max_volume_(
|
||||
native_reg->GetMethodId("getStreamMaxVolume", "()I")),
|
||||
get_stream_volume_(native_reg->GetMethodId("getStreamVolume", "()I")),
|
||||
get_buffer_size_in_frames_(
|
||||
native_reg->GetMethodId("getBufferSizeInFrames", "()I")) {}
|
||||
|
||||
AudioTrackJni::JavaAudioTrack::~JavaAudioTrack() {}
|
||||
|
||||
bool AudioTrackJni::JavaAudioTrack::InitPlayout(int sample_rate, int channels) {
|
||||
double buffer_size_factor =
|
||||
strtod(webrtc::field_trial::FindFullName(
|
||||
"WebRTC-AudioDevicePlayoutBufferSizeFactor")
|
||||
.c_str(),
|
||||
nullptr);
|
||||
if (buffer_size_factor == 0)
|
||||
buffer_size_factor = 1.0;
|
||||
int requested_buffer_size_bytes = audio_track_->CallIntMethod(
|
||||
init_playout_, sample_rate, channels, buffer_size_factor);
|
||||
// Update UMA histograms for both the requested and actual buffer size.
|
||||
if (requested_buffer_size_bytes >= 0) {
|
||||
// To avoid division by zero, we assume the sample rate is 48k if an invalid
|
||||
// value is found.
|
||||
sample_rate = sample_rate <= 0 ? 48000 : sample_rate;
|
||||
// This calculation assumes that audio is mono.
|
||||
const int requested_buffer_size_ms =
|
||||
(requested_buffer_size_bytes * 1000) / (2 * sample_rate);
|
||||
RTC_HISTOGRAM_COUNTS("WebRTC.Audio.AndroidNativeRequestedAudioBufferSizeMs",
|
||||
requested_buffer_size_ms, 0, 1000, 100);
|
||||
int actual_buffer_size_frames =
|
||||
audio_track_->CallIntMethod(get_buffer_size_in_frames_);
|
||||
if (actual_buffer_size_frames >= 0) {
|
||||
const int actual_buffer_size_ms =
|
||||
actual_buffer_size_frames * 1000 / sample_rate;
|
||||
RTC_HISTOGRAM_COUNTS("WebRTC.Audio.AndroidNativeAudioBufferSizeMs",
|
||||
actual_buffer_size_ms, 0, 1000, 100);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool AudioTrackJni::JavaAudioTrack::StartPlayout() {
|
||||
return audio_track_->CallBooleanMethod(start_playout_);
|
||||
}
|
||||
|
||||
bool AudioTrackJni::JavaAudioTrack::StopPlayout() {
|
||||
return audio_track_->CallBooleanMethod(stop_playout_);
|
||||
}
|
||||
|
||||
bool AudioTrackJni::JavaAudioTrack::SetStreamVolume(int volume) {
|
||||
return audio_track_->CallBooleanMethod(set_stream_volume_, volume);
|
||||
}
|
||||
|
||||
int AudioTrackJni::JavaAudioTrack::GetStreamMaxVolume() {
|
||||
return audio_track_->CallIntMethod(get_stream_max_volume_);
|
||||
}
|
||||
|
||||
int AudioTrackJni::JavaAudioTrack::GetStreamVolume() {
|
||||
return audio_track_->CallIntMethod(get_stream_volume_);
|
||||
}
|
||||
|
||||
// TODO(henrika): possible extend usage of AudioManager and add it as member.
|
||||
AudioTrackJni::AudioTrackJni(AudioManager* audio_manager)
|
||||
: j_environment_(JVM::GetInstance()->environment()),
|
||||
audio_parameters_(audio_manager->GetPlayoutAudioParameters()),
|
||||
direct_buffer_address_(nullptr),
|
||||
direct_buffer_capacity_in_bytes_(0),
|
||||
frames_per_buffer_(0),
|
||||
initialized_(false),
|
||||
playing_(false),
|
||||
audio_device_buffer_(nullptr) {
|
||||
RTC_LOG(LS_INFO) << "ctor";
|
||||
RTC_DCHECK(audio_parameters_.is_valid());
|
||||
RTC_CHECK(j_environment_);
|
||||
JNINativeMethod native_methods[] = {
|
||||
{"nativeCacheDirectBufferAddress", "(Ljava/nio/ByteBuffer;J)V",
|
||||
reinterpret_cast<void*>(
|
||||
&webrtc::AudioTrackJni::CacheDirectBufferAddress)},
|
||||
{"nativeGetPlayoutData", "(IJ)V",
|
||||
reinterpret_cast<void*>(&webrtc::AudioTrackJni::GetPlayoutData)}};
|
||||
j_native_registration_ = j_environment_->RegisterNatives(
|
||||
"org/webrtc/voiceengine/WebRtcAudioTrack", native_methods,
|
||||
arraysize(native_methods));
|
||||
j_audio_track_.reset(
|
||||
new JavaAudioTrack(j_native_registration_.get(),
|
||||
j_native_registration_->NewObject(
|
||||
"<init>", "(J)V", PointerTojlong(this))));
|
||||
// Detach from this thread since we want to use the checker to verify calls
|
||||
// from the Java based audio thread.
|
||||
thread_checker_java_.Detach();
|
||||
}
|
||||
|
||||
AudioTrackJni::~AudioTrackJni() {
|
||||
RTC_LOG(LS_INFO) << "dtor";
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
Terminate();
|
||||
}
|
||||
|
||||
int32_t AudioTrackJni::Init() {
|
||||
RTC_LOG(LS_INFO) << "Init";
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioTrackJni::Terminate() {
|
||||
RTC_LOG(LS_INFO) << "Terminate";
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
StopPlayout();
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioTrackJni::InitPlayout() {
|
||||
RTC_LOG(LS_INFO) << "InitPlayout";
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
RTC_DCHECK(!initialized_);
|
||||
RTC_DCHECK(!playing_);
|
||||
if (!j_audio_track_->InitPlayout(audio_parameters_.sample_rate(),
|
||||
audio_parameters_.channels())) {
|
||||
RTC_LOG(LS_ERROR) << "InitPlayout failed";
|
||||
return -1;
|
||||
}
|
||||
initialized_ = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioTrackJni::StartPlayout() {
|
||||
RTC_LOG(LS_INFO) << "StartPlayout";
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
RTC_DCHECK(!playing_);
|
||||
if (!initialized_) {
|
||||
RTC_DLOG(LS_WARNING)
|
||||
<< "Playout can not start since InitPlayout must succeed first";
|
||||
return 0;
|
||||
}
|
||||
if (!j_audio_track_->StartPlayout()) {
|
||||
RTC_LOG(LS_ERROR) << "StartPlayout failed";
|
||||
return -1;
|
||||
}
|
||||
playing_ = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioTrackJni::StopPlayout() {
|
||||
RTC_LOG(LS_INFO) << "StopPlayout";
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
if (!initialized_ || !playing_ || j_audio_track_ == nullptr) {
|
||||
return 0;
|
||||
}
|
||||
if (!j_audio_track_->StopPlayout()) {
|
||||
RTC_LOG(LS_ERROR) << "StopPlayout failed";
|
||||
return -1;
|
||||
}
|
||||
// If we don't detach here, we will hit a RTC_DCHECK in OnDataIsRecorded()
|
||||
// next time StartRecording() is called since it will create a new Java
|
||||
// thread.
|
||||
thread_checker_java_.Detach();
|
||||
initialized_ = false;
|
||||
playing_ = false;
|
||||
direct_buffer_address_ = nullptr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int AudioTrackJni::SpeakerVolumeIsAvailable(bool& available) {
|
||||
available = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int AudioTrackJni::SetSpeakerVolume(uint32_t volume) {
|
||||
RTC_LOG(LS_INFO) << "SetSpeakerVolume(" << volume << ")";
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
return j_audio_track_->SetStreamVolume(volume) ? 0 : -1;
|
||||
}
|
||||
|
||||
int AudioTrackJni::MaxSpeakerVolume(uint32_t& max_volume) const {
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
max_volume = j_audio_track_->GetStreamMaxVolume();
|
||||
return 0;
|
||||
}
|
||||
|
||||
int AudioTrackJni::MinSpeakerVolume(uint32_t& min_volume) const {
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
min_volume = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int AudioTrackJni::SpeakerVolume(uint32_t& volume) const {
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
volume = j_audio_track_->GetStreamVolume();
|
||||
RTC_LOG(LS_INFO) << "SpeakerVolume: " << volume;
|
||||
return 0;
|
||||
}
|
||||
|
||||
// TODO(henrika): possibly add stereo support.
|
||||
void AudioTrackJni::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
|
||||
RTC_LOG(LS_INFO) << "AttachAudioBuffer";
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
audio_device_buffer_ = audioBuffer;
|
||||
const int sample_rate_hz = audio_parameters_.sample_rate();
|
||||
RTC_LOG(LS_INFO) << "SetPlayoutSampleRate(" << sample_rate_hz << ")";
|
||||
audio_device_buffer_->SetPlayoutSampleRate(sample_rate_hz);
|
||||
const size_t channels = audio_parameters_.channels();
|
||||
RTC_LOG(LS_INFO) << "SetPlayoutChannels(" << channels << ")";
|
||||
audio_device_buffer_->SetPlayoutChannels(channels);
|
||||
}
|
||||
|
||||
JNI_FUNCTION_ALIGN
|
||||
void JNICALL AudioTrackJni::CacheDirectBufferAddress(JNIEnv* env,
|
||||
jobject obj,
|
||||
jobject byte_buffer,
|
||||
jlong nativeAudioTrack) {
|
||||
webrtc::AudioTrackJni* this_object =
|
||||
reinterpret_cast<webrtc::AudioTrackJni*>(nativeAudioTrack);
|
||||
this_object->OnCacheDirectBufferAddress(env, byte_buffer);
|
||||
}
|
||||
|
||||
void AudioTrackJni::OnCacheDirectBufferAddress(JNIEnv* env,
|
||||
jobject byte_buffer) {
|
||||
RTC_LOG(LS_INFO) << "OnCacheDirectBufferAddress";
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
RTC_DCHECK(!direct_buffer_address_);
|
||||
direct_buffer_address_ = env->GetDirectBufferAddress(byte_buffer);
|
||||
jlong capacity = env->GetDirectBufferCapacity(byte_buffer);
|
||||
RTC_LOG(LS_INFO) << "direct buffer capacity: " << capacity;
|
||||
direct_buffer_capacity_in_bytes_ = static_cast<size_t>(capacity);
|
||||
const size_t bytes_per_frame = audio_parameters_.channels() * sizeof(int16_t);
|
||||
frames_per_buffer_ = direct_buffer_capacity_in_bytes_ / bytes_per_frame;
|
||||
RTC_LOG(LS_INFO) << "frames_per_buffer: " << frames_per_buffer_;
|
||||
}
|
||||
|
||||
JNI_FUNCTION_ALIGN
|
||||
void JNICALL AudioTrackJni::GetPlayoutData(JNIEnv* env,
|
||||
jobject obj,
|
||||
jint length,
|
||||
jlong nativeAudioTrack) {
|
||||
webrtc::AudioTrackJni* this_object =
|
||||
reinterpret_cast<webrtc::AudioTrackJni*>(nativeAudioTrack);
|
||||
this_object->OnGetPlayoutData(static_cast<size_t>(length));
|
||||
}
|
||||
|
||||
// This method is called on a high-priority thread from Java. The name of
|
||||
// the thread is 'AudioRecordTrack'.
|
||||
void AudioTrackJni::OnGetPlayoutData(size_t length) {
|
||||
RTC_DCHECK(thread_checker_java_.IsCurrent());
|
||||
const size_t bytes_per_frame = audio_parameters_.channels() * sizeof(int16_t);
|
||||
RTC_DCHECK_EQ(frames_per_buffer_, length / bytes_per_frame);
|
||||
if (!audio_device_buffer_) {
|
||||
RTC_LOG(LS_ERROR) << "AttachAudioBuffer has not been called";
|
||||
return;
|
||||
}
|
||||
// Pull decoded data (in 16-bit PCM format) from jitter buffer.
|
||||
int samples = audio_device_buffer_->RequestPlayoutData(frames_per_buffer_);
|
||||
if (samples <= 0) {
|
||||
RTC_LOG(LS_ERROR) << "AudioDeviceBuffer::RequestPlayoutData failed";
|
||||
return;
|
||||
}
|
||||
RTC_DCHECK_EQ(samples, frames_per_buffer_);
|
||||
// Copy decoded data into common byte buffer to ensure that it can be
|
||||
// written to the Java based audio track.
|
||||
samples = audio_device_buffer_->GetPlayoutData(direct_buffer_address_);
|
||||
RTC_DCHECK_EQ(length, bytes_per_frame * samples);
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,161 @@
|
|||
/*
|
||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef MODULES_AUDIO_DEVICE_ANDROID_AUDIO_TRACK_JNI_H_
|
||||
#define MODULES_AUDIO_DEVICE_ANDROID_AUDIO_TRACK_JNI_H_
|
||||
|
||||
#include <jni.h>
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "api/sequence_checker.h"
|
||||
#include "modules/audio_device/android/audio_common.h"
|
||||
#include "modules/audio_device/android/audio_manager.h"
|
||||
#include "modules/audio_device/audio_device_generic.h"
|
||||
#include "modules/audio_device/include/audio_device_defines.h"
|
||||
#include "modules/utility/include/helpers_android.h"
|
||||
#include "modules/utility/include/jvm_android.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// Implements 16-bit mono PCM audio output support for Android using the Java
|
||||
// AudioTrack interface. Most of the work is done by its Java counterpart in
|
||||
// WebRtcAudioTrack.java. This class is created and lives on a thread in
|
||||
// C++-land, but decoded audio buffers are requested on a high-priority
|
||||
// thread managed by the Java class.
|
||||
//
|
||||
// An instance must be created and destroyed on one and the same thread.
|
||||
// All public methods must also be called on the same thread. A thread checker
|
||||
// will RTC_DCHECK if any method is called on an invalid thread.
|
||||
//
|
||||
// This class uses JvmThreadConnector to attach to a Java VM if needed
|
||||
// and detach when the object goes out of scope. Additional thread checking
|
||||
// guarantees that no other (possibly non attached) thread is used.
|
||||
class AudioTrackJni {
|
||||
public:
|
||||
// Wraps the Java specific parts of the AudioTrackJni into one helper class.
|
||||
class JavaAudioTrack {
|
||||
public:
|
||||
JavaAudioTrack(NativeRegistration* native_registration,
|
||||
std::unique_ptr<GlobalRef> audio_track);
|
||||
~JavaAudioTrack();
|
||||
|
||||
bool InitPlayout(int sample_rate, int channels);
|
||||
bool StartPlayout();
|
||||
bool StopPlayout();
|
||||
bool SetStreamVolume(int volume);
|
||||
int GetStreamMaxVolume();
|
||||
int GetStreamVolume();
|
||||
|
||||
private:
|
||||
std::unique_ptr<GlobalRef> audio_track_;
|
||||
jmethodID init_playout_;
|
||||
jmethodID start_playout_;
|
||||
jmethodID stop_playout_;
|
||||
jmethodID set_stream_volume_;
|
||||
jmethodID get_stream_max_volume_;
|
||||
jmethodID get_stream_volume_;
|
||||
jmethodID get_buffer_size_in_frames_;
|
||||
};
|
||||
|
||||
explicit AudioTrackJni(AudioManager* audio_manager);
|
||||
~AudioTrackJni();
|
||||
|
||||
int32_t Init();
|
||||
int32_t Terminate();
|
||||
|
||||
int32_t InitPlayout();
|
||||
bool PlayoutIsInitialized() const { return initialized_; }
|
||||
|
||||
int32_t StartPlayout();
|
||||
int32_t StopPlayout();
|
||||
bool Playing() const { return playing_; }
|
||||
|
||||
int SpeakerVolumeIsAvailable(bool& available);
|
||||
int SetSpeakerVolume(uint32_t volume);
|
||||
int SpeakerVolume(uint32_t& volume) const;
|
||||
int MaxSpeakerVolume(uint32_t& max_volume) const;
|
||||
int MinSpeakerVolume(uint32_t& min_volume) const;
|
||||
|
||||
void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
|
||||
|
||||
private:
|
||||
// Called from Java side so we can cache the address of the Java-manged
|
||||
// `byte_buffer` in `direct_buffer_address_`. The size of the buffer
|
||||
// is also stored in `direct_buffer_capacity_in_bytes_`.
|
||||
// Called on the same thread as the creating thread.
|
||||
static void JNICALL CacheDirectBufferAddress(JNIEnv* env,
|
||||
jobject obj,
|
||||
jobject byte_buffer,
|
||||
jlong nativeAudioTrack);
|
||||
void OnCacheDirectBufferAddress(JNIEnv* env, jobject byte_buffer);
|
||||
|
||||
// Called periodically by the Java based WebRtcAudioTrack object when
|
||||
// playout has started. Each call indicates that `length` new bytes should
|
||||
// be written to the memory area `direct_buffer_address_` for playout.
|
||||
// This method is called on a high-priority thread from Java. The name of
|
||||
// the thread is 'AudioTrackThread'.
|
||||
static void JNICALL GetPlayoutData(JNIEnv* env,
|
||||
jobject obj,
|
||||
jint length,
|
||||
jlong nativeAudioTrack);
|
||||
void OnGetPlayoutData(size_t length);
|
||||
|
||||
// Stores thread ID in constructor.
|
||||
SequenceChecker thread_checker_;
|
||||
|
||||
// Stores thread ID in first call to OnGetPlayoutData() from high-priority
|
||||
// thread in Java. Detached during construction of this object.
|
||||
SequenceChecker thread_checker_java_;
|
||||
|
||||
// Calls JavaVM::AttachCurrentThread() if this thread is not attached at
|
||||
// construction.
|
||||
// Also ensures that DetachCurrentThread() is called at destruction.
|
||||
JvmThreadConnector attach_thread_if_needed_;
|
||||
|
||||
// Wraps the JNI interface pointer and methods associated with it.
|
||||
std::unique_ptr<JNIEnvironment> j_environment_;
|
||||
|
||||
// Contains factory method for creating the Java object.
|
||||
std::unique_ptr<NativeRegistration> j_native_registration_;
|
||||
|
||||
// Wraps the Java specific parts of the AudioTrackJni class.
|
||||
std::unique_ptr<AudioTrackJni::JavaAudioTrack> j_audio_track_;
|
||||
|
||||
// Contains audio parameters provided to this class at construction by the
|
||||
// AudioManager.
|
||||
const AudioParameters audio_parameters_;
|
||||
|
||||
// Cached copy of address to direct audio buffer owned by `j_audio_track_`.
|
||||
void* direct_buffer_address_;
|
||||
|
||||
// Number of bytes in the direct audio buffer owned by `j_audio_track_`.
|
||||
size_t direct_buffer_capacity_in_bytes_;
|
||||
|
||||
// Number of audio frames per audio buffer. Each audio frame corresponds to
|
||||
// one sample of PCM mono data at 16 bits per sample. Hence, each audio
|
||||
// frame contains 2 bytes (given that the Java layer only supports mono).
|
||||
// Example: 480 for 48000 Hz or 441 for 44100 Hz.
|
||||
size_t frames_per_buffer_;
|
||||
|
||||
bool initialized_;
|
||||
|
||||
bool playing_;
|
||||
|
||||
// Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the
|
||||
// AudioDeviceModuleImpl class and called by AudioDeviceModule::Create().
|
||||
// The AudioDeviceBuffer is a member of the AudioDeviceModuleImpl instance
|
||||
// and therefore outlives this object.
|
||||
AudioDeviceBuffer* audio_device_buffer_;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // MODULES_AUDIO_DEVICE_ANDROID_AUDIO_TRACK_JNI_H_
|
||||
|
|
@ -0,0 +1,59 @@
|
|||
/*
|
||||
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "modules/audio_device/android/build_info.h"
|
||||
|
||||
#include "modules/utility/include/helpers_android.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
BuildInfo::BuildInfo()
|
||||
: j_environment_(JVM::GetInstance()->environment()),
|
||||
j_build_info_(
|
||||
JVM::GetInstance()->GetClass("org/webrtc/voiceengine/BuildInfo")) {}
|
||||
|
||||
std::string BuildInfo::GetStringFromJava(const char* name) {
|
||||
jmethodID id = j_build_info_.GetStaticMethodId(name, "()Ljava/lang/String;");
|
||||
jstring j_string =
|
||||
static_cast<jstring>(j_build_info_.CallStaticObjectMethod(id));
|
||||
return j_environment_->JavaToStdString(j_string);
|
||||
}
|
||||
|
||||
std::string BuildInfo::GetDeviceModel() {
|
||||
return GetStringFromJava("getDeviceModel");
|
||||
}
|
||||
|
||||
std::string BuildInfo::GetBrand() {
|
||||
return GetStringFromJava("getBrand");
|
||||
}
|
||||
|
||||
std::string BuildInfo::GetDeviceManufacturer() {
|
||||
return GetStringFromJava("getDeviceManufacturer");
|
||||
}
|
||||
|
||||
std::string BuildInfo::GetAndroidBuildId() {
|
||||
return GetStringFromJava("getAndroidBuildId");
|
||||
}
|
||||
|
||||
std::string BuildInfo::GetBuildType() {
|
||||
return GetStringFromJava("getBuildType");
|
||||
}
|
||||
|
||||
std::string BuildInfo::GetBuildRelease() {
|
||||
return GetStringFromJava("getBuildRelease");
|
||||
}
|
||||
|
||||
SdkCode BuildInfo::GetSdkVersion() {
|
||||
jmethodID id = j_build_info_.GetStaticMethodId("getSdkVersion", "()I");
|
||||
jint j_version = j_build_info_.CallStaticIntMethod(id);
|
||||
return static_cast<SdkCode>(j_version);
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,86 @@
|
|||
/*
|
||||
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef MODULES_AUDIO_DEVICE_ANDROID_BUILD_INFO_H_
|
||||
#define MODULES_AUDIO_DEVICE_ANDROID_BUILD_INFO_H_
|
||||
|
||||
#include <jni.h>
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
#include "modules/utility/include/jvm_android.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// This enumeration maps to the values returned by BuildInfo::GetSdkVersion(),
|
||||
// indicating the Android release associated with a given SDK version.
|
||||
// See https://developer.android.com/guide/topics/manifest/uses-sdk-element.html
|
||||
// for details.
|
||||
enum SdkCode {
|
||||
SDK_CODE_JELLY_BEAN = 16, // Android 4.1
|
||||
SDK_CODE_JELLY_BEAN_MR1 = 17, // Android 4.2
|
||||
SDK_CODE_JELLY_BEAN_MR2 = 18, // Android 4.3
|
||||
SDK_CODE_KITKAT = 19, // Android 4.4
|
||||
SDK_CODE_WATCH = 20, // Android 4.4W
|
||||
SDK_CODE_LOLLIPOP = 21, // Android 5.0
|
||||
SDK_CODE_LOLLIPOP_MR1 = 22, // Android 5.1
|
||||
SDK_CODE_MARSHMALLOW = 23, // Android 6.0
|
||||
SDK_CODE_N = 24,
|
||||
};
|
||||
|
||||
// Utility class used to query the Java class (org/webrtc/voiceengine/BuildInfo)
|
||||
// for device and Android build information.
|
||||
// The calling thread is attached to the JVM at construction if needed and a
|
||||
// valid Java environment object is also created.
|
||||
// All Get methods must be called on the creating thread. If not, the code will
|
||||
// hit RTC_DCHECKs when calling JNIEnvironment::JavaToStdString().
|
||||
class BuildInfo {
|
||||
public:
|
||||
BuildInfo();
|
||||
~BuildInfo() {}
|
||||
|
||||
// End-user-visible name for the end product (e.g. "Nexus 6").
|
||||
std::string GetDeviceModel();
|
||||
// Consumer-visible brand (e.g. "google").
|
||||
std::string GetBrand();
|
||||
// Manufacturer of the product/hardware (e.g. "motorola").
|
||||
std::string GetDeviceManufacturer();
|
||||
// Android build ID (e.g. LMY47D).
|
||||
std::string GetAndroidBuildId();
|
||||
// The type of build (e.g. "user" or "eng").
|
||||
std::string GetBuildType();
|
||||
// The user-visible version string (e.g. "5.1").
|
||||
std::string GetBuildRelease();
|
||||
// The user-visible SDK version of the framework (e.g. 21). See SdkCode enum
|
||||
// for translation.
|
||||
SdkCode GetSdkVersion();
|
||||
|
||||
private:
|
||||
// Helper method which calls a static getter method with `name` and returns
|
||||
// a string from Java.
|
||||
std::string GetStringFromJava(const char* name);
|
||||
|
||||
// Ensures that this class can access a valid JNI interface pointer even
|
||||
// if the creating thread was not attached to the JVM.
|
||||
JvmThreadConnector attach_thread_if_needed_;
|
||||
|
||||
// Provides access to the JNIEnv interface pointer and the JavaToStdString()
|
||||
// method which is used to translate Java strings to std strings.
|
||||
std::unique_ptr<JNIEnvironment> j_environment_;
|
||||
|
||||
// Holds the jclass object and provides access to CallStaticObjectMethod().
|
||||
// Used by GetStringFromJava() during construction only.
|
||||
JavaClass j_build_info_;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // MODULES_AUDIO_DEVICE_ANDROID_BUILD_INFO_H_
|
||||
|
|
@ -0,0 +1,42 @@
|
|||
/*
|
||||
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "modules/audio_device/android/ensure_initialized.h"
|
||||
|
||||
#include <jni.h>
|
||||
#include <pthread.h>
|
||||
#include <stddef.h>
|
||||
|
||||
#include "modules/utility/include/jvm_android.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "sdk/android/src/jni/jvm.h"
|
||||
|
||||
namespace webrtc {
|
||||
namespace audiodevicemodule {
|
||||
|
||||
static pthread_once_t g_initialize_once = PTHREAD_ONCE_INIT;
|
||||
|
||||
void EnsureInitializedOnce() {
|
||||
RTC_CHECK(::webrtc::jni::GetJVM() != nullptr);
|
||||
|
||||
JNIEnv* jni = ::webrtc::jni::AttachCurrentThreadIfNeeded();
|
||||
JavaVM* jvm = NULL;
|
||||
RTC_CHECK_EQ(0, jni->GetJavaVM(&jvm));
|
||||
|
||||
// Initialize the Java environment (currently only used by the audio manager).
|
||||
webrtc::JVM::Initialize(jvm);
|
||||
}
|
||||
|
||||
void EnsureInitialized() {
|
||||
RTC_CHECK_EQ(0, pthread_once(&g_initialize_once, &EnsureInitializedOnce));
|
||||
}
|
||||
|
||||
} // namespace audiodevicemodule
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,17 @@
|
|||
/*
|
||||
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
namespace webrtc {
|
||||
namespace audiodevicemodule {
|
||||
|
||||
void EnsureInitialized();
|
||||
|
||||
} // namespace audiodevicemodule
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,103 @@
|
|||
/*
|
||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "modules/audio_device/android/opensles_common.h"
|
||||
|
||||
#include <SLES/OpenSLES.h>
|
||||
|
||||
#include "rtc_base/arraysize.h"
|
||||
#include "rtc_base/checks.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// Returns a string representation given an integer SL_RESULT_XXX code.
|
||||
// The mapping can be found in <SLES/OpenSLES.h>.
|
||||
const char* GetSLErrorString(size_t code) {
|
||||
static const char* sl_error_strings[] = {
|
||||
"SL_RESULT_SUCCESS", // 0
|
||||
"SL_RESULT_PRECONDITIONS_VIOLATED", // 1
|
||||
"SL_RESULT_PARAMETER_INVALID", // 2
|
||||
"SL_RESULT_MEMORY_FAILURE", // 3
|
||||
"SL_RESULT_RESOURCE_ERROR", // 4
|
||||
"SL_RESULT_RESOURCE_LOST", // 5
|
||||
"SL_RESULT_IO_ERROR", // 6
|
||||
"SL_RESULT_BUFFER_INSUFFICIENT", // 7
|
||||
"SL_RESULT_CONTENT_CORRUPTED", // 8
|
||||
"SL_RESULT_CONTENT_UNSUPPORTED", // 9
|
||||
"SL_RESULT_CONTENT_NOT_FOUND", // 10
|
||||
"SL_RESULT_PERMISSION_DENIED", // 11
|
||||
"SL_RESULT_FEATURE_UNSUPPORTED", // 12
|
||||
"SL_RESULT_INTERNAL_ERROR", // 13
|
||||
"SL_RESULT_UNKNOWN_ERROR", // 14
|
||||
"SL_RESULT_OPERATION_ABORTED", // 15
|
||||
"SL_RESULT_CONTROL_LOST", // 16
|
||||
};
|
||||
|
||||
if (code >= arraysize(sl_error_strings)) {
|
||||
return "SL_RESULT_UNKNOWN_ERROR";
|
||||
}
|
||||
return sl_error_strings[code];
|
||||
}
|
||||
|
||||
SLDataFormat_PCM CreatePCMConfiguration(size_t channels,
|
||||
int sample_rate,
|
||||
size_t bits_per_sample) {
|
||||
RTC_CHECK_EQ(bits_per_sample, SL_PCMSAMPLEFORMAT_FIXED_16);
|
||||
SLDataFormat_PCM format;
|
||||
format.formatType = SL_DATAFORMAT_PCM;
|
||||
format.numChannels = static_cast<SLuint32>(channels);
|
||||
// Note that, the unit of sample rate is actually in milliHertz and not Hertz.
|
||||
switch (sample_rate) {
|
||||
case 8000:
|
||||
format.samplesPerSec = SL_SAMPLINGRATE_8;
|
||||
break;
|
||||
case 16000:
|
||||
format.samplesPerSec = SL_SAMPLINGRATE_16;
|
||||
break;
|
||||
case 22050:
|
||||
format.samplesPerSec = SL_SAMPLINGRATE_22_05;
|
||||
break;
|
||||
case 32000:
|
||||
format.samplesPerSec = SL_SAMPLINGRATE_32;
|
||||
break;
|
||||
case 44100:
|
||||
format.samplesPerSec = SL_SAMPLINGRATE_44_1;
|
||||
break;
|
||||
case 48000:
|
||||
format.samplesPerSec = SL_SAMPLINGRATE_48;
|
||||
break;
|
||||
case 64000:
|
||||
format.samplesPerSec = SL_SAMPLINGRATE_64;
|
||||
break;
|
||||
case 88200:
|
||||
format.samplesPerSec = SL_SAMPLINGRATE_88_2;
|
||||
break;
|
||||
case 96000:
|
||||
format.samplesPerSec = SL_SAMPLINGRATE_96;
|
||||
break;
|
||||
default:
|
||||
RTC_CHECK(false) << "Unsupported sample rate: " << sample_rate;
|
||||
break;
|
||||
}
|
||||
format.bitsPerSample = SL_PCMSAMPLEFORMAT_FIXED_16;
|
||||
format.containerSize = SL_PCMSAMPLEFORMAT_FIXED_16;
|
||||
format.endianness = SL_BYTEORDER_LITTLEENDIAN;
|
||||
if (format.numChannels == 1) {
|
||||
format.channelMask = SL_SPEAKER_FRONT_CENTER;
|
||||
} else if (format.numChannels == 2) {
|
||||
format.channelMask = SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT;
|
||||
} else {
|
||||
RTC_CHECK(false) << "Unsupported number of channels: "
|
||||
<< format.numChannels;
|
||||
}
|
||||
return format;
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,62 @@
|
|||
/*
|
||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_COMMON_H_
|
||||
#define MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_COMMON_H_
|
||||
|
||||
#include <SLES/OpenSLES.h>
|
||||
#include <stddef.h>
|
||||
|
||||
#include "rtc_base/checks.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// Returns a string representation given an integer SL_RESULT_XXX code.
|
||||
// The mapping can be found in <SLES/OpenSLES.h>.
|
||||
const char* GetSLErrorString(size_t code);
|
||||
|
||||
// Configures an SL_DATAFORMAT_PCM structure based on native audio parameters.
|
||||
SLDataFormat_PCM CreatePCMConfiguration(size_t channels,
|
||||
int sample_rate,
|
||||
size_t bits_per_sample);
|
||||
|
||||
// Helper class for using SLObjectItf interfaces.
|
||||
template <typename SLType, typename SLDerefType>
|
||||
class ScopedSLObject {
|
||||
public:
|
||||
ScopedSLObject() : obj_(nullptr) {}
|
||||
|
||||
~ScopedSLObject() { Reset(); }
|
||||
|
||||
SLType* Receive() {
|
||||
RTC_DCHECK(!obj_);
|
||||
return &obj_;
|
||||
}
|
||||
|
||||
SLDerefType operator->() { return *obj_; }
|
||||
|
||||
SLType Get() const { return obj_; }
|
||||
|
||||
void Reset() {
|
||||
if (obj_) {
|
||||
(*obj_)->Destroy(obj_);
|
||||
obj_ = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
SLType obj_;
|
||||
};
|
||||
|
||||
typedef ScopedSLObject<SLObjectItf, const SLObjectItf_*> ScopedSLObjectItf;
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_COMMON_H_
|
||||
|
|
@ -0,0 +1,434 @@
|
|||
/*
|
||||
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "modules/audio_device/android/opensles_player.h"
|
||||
|
||||
#include <android/log.h>
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "api/array_view.h"
|
||||
#include "modules/audio_device/android/audio_common.h"
|
||||
#include "modules/audio_device/android/audio_manager.h"
|
||||
#include "modules/audio_device/fine_audio_buffer.h"
|
||||
#include "rtc_base/arraysize.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/platform_thread.h"
|
||||
#include "rtc_base/time_utils.h"
|
||||
|
||||
#define TAG "OpenSLESPlayer"
|
||||
#define ALOGV(...) __android_log_print(ANDROID_LOG_VERBOSE, TAG, __VA_ARGS__)
|
||||
#define ALOGD(...) __android_log_print(ANDROID_LOG_DEBUG, TAG, __VA_ARGS__)
|
||||
#define ALOGE(...) __android_log_print(ANDROID_LOG_ERROR, TAG, __VA_ARGS__)
|
||||
#define ALOGW(...) __android_log_print(ANDROID_LOG_WARN, TAG, __VA_ARGS__)
|
||||
#define ALOGI(...) __android_log_print(ANDROID_LOG_INFO, TAG, __VA_ARGS__)
|
||||
|
||||
#define RETURN_ON_ERROR(op, ...) \
|
||||
do { \
|
||||
SLresult err = (op); \
|
||||
if (err != SL_RESULT_SUCCESS) { \
|
||||
ALOGE("%s failed: %s", #op, GetSLErrorString(err)); \
|
||||
return __VA_ARGS__; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
OpenSLESPlayer::OpenSLESPlayer(AudioManager* audio_manager)
|
||||
: audio_manager_(audio_manager),
|
||||
audio_parameters_(audio_manager->GetPlayoutAudioParameters()),
|
||||
audio_device_buffer_(nullptr),
|
||||
initialized_(false),
|
||||
playing_(false),
|
||||
buffer_index_(0),
|
||||
engine_(nullptr),
|
||||
player_(nullptr),
|
||||
simple_buffer_queue_(nullptr),
|
||||
volume_(nullptr),
|
||||
last_play_time_(0) {
|
||||
ALOGD("ctor[tid=%d]", rtc::CurrentThreadId());
|
||||
// Use native audio output parameters provided by the audio manager and
|
||||
// define the PCM format structure.
|
||||
pcm_format_ = CreatePCMConfiguration(audio_parameters_.channels(),
|
||||
audio_parameters_.sample_rate(),
|
||||
audio_parameters_.bits_per_sample());
|
||||
// Detach from this thread since we want to use the checker to verify calls
|
||||
// from the internal audio thread.
|
||||
thread_checker_opensles_.Detach();
|
||||
}
|
||||
|
||||
OpenSLESPlayer::~OpenSLESPlayer() {
|
||||
ALOGD("dtor[tid=%d]", rtc::CurrentThreadId());
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
Terminate();
|
||||
DestroyAudioPlayer();
|
||||
DestroyMix();
|
||||
engine_ = nullptr;
|
||||
RTC_DCHECK(!engine_);
|
||||
RTC_DCHECK(!output_mix_.Get());
|
||||
RTC_DCHECK(!player_);
|
||||
RTC_DCHECK(!simple_buffer_queue_);
|
||||
RTC_DCHECK(!volume_);
|
||||
}
|
||||
|
||||
int OpenSLESPlayer::Init() {
|
||||
ALOGD("Init[tid=%d]", rtc::CurrentThreadId());
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
if (audio_parameters_.channels() == 2) {
|
||||
ALOGW("Stereo mode is enabled");
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int OpenSLESPlayer::Terminate() {
|
||||
ALOGD("Terminate[tid=%d]", rtc::CurrentThreadId());
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
StopPlayout();
|
||||
return 0;
|
||||
}
|
||||
|
||||
int OpenSLESPlayer::InitPlayout() {
|
||||
ALOGD("InitPlayout[tid=%d]", rtc::CurrentThreadId());
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
RTC_DCHECK(!initialized_);
|
||||
RTC_DCHECK(!playing_);
|
||||
if (!ObtainEngineInterface()) {
|
||||
ALOGE("Failed to obtain SL Engine interface");
|
||||
return -1;
|
||||
}
|
||||
CreateMix();
|
||||
initialized_ = true;
|
||||
buffer_index_ = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int OpenSLESPlayer::StartPlayout() {
|
||||
ALOGD("StartPlayout[tid=%d]", rtc::CurrentThreadId());
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
RTC_DCHECK(initialized_);
|
||||
RTC_DCHECK(!playing_);
|
||||
if (fine_audio_buffer_) {
|
||||
fine_audio_buffer_->ResetPlayout();
|
||||
}
|
||||
// The number of lower latency audio players is limited, hence we create the
|
||||
// audio player in Start() and destroy it in Stop().
|
||||
CreateAudioPlayer();
|
||||
// Fill up audio buffers to avoid initial glitch and to ensure that playback
|
||||
// starts when mode is later changed to SL_PLAYSTATE_PLAYING.
|
||||
// TODO(henrika): we can save some delay by only making one call to
|
||||
// EnqueuePlayoutData. Most likely not worth the risk of adding a glitch.
|
||||
last_play_time_ = rtc::Time();
|
||||
for (int i = 0; i < kNumOfOpenSLESBuffers; ++i) {
|
||||
EnqueuePlayoutData(true);
|
||||
}
|
||||
// Start streaming data by setting the play state to SL_PLAYSTATE_PLAYING.
|
||||
// For a player object, when the object is in the SL_PLAYSTATE_PLAYING
|
||||
// state, adding buffers will implicitly start playback.
|
||||
RETURN_ON_ERROR((*player_)->SetPlayState(player_, SL_PLAYSTATE_PLAYING), -1);
|
||||
playing_ = (GetPlayState() == SL_PLAYSTATE_PLAYING);
|
||||
RTC_DCHECK(playing_);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int OpenSLESPlayer::StopPlayout() {
|
||||
ALOGD("StopPlayout[tid=%d]", rtc::CurrentThreadId());
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
if (!initialized_ || !playing_) {
|
||||
return 0;
|
||||
}
|
||||
// Stop playing by setting the play state to SL_PLAYSTATE_STOPPED.
|
||||
RETURN_ON_ERROR((*player_)->SetPlayState(player_, SL_PLAYSTATE_STOPPED), -1);
|
||||
// Clear the buffer queue to flush out any remaining data.
|
||||
RETURN_ON_ERROR((*simple_buffer_queue_)->Clear(simple_buffer_queue_), -1);
|
||||
#if RTC_DCHECK_IS_ON
|
||||
// Verify that the buffer queue is in fact cleared as it should.
|
||||
SLAndroidSimpleBufferQueueState buffer_queue_state;
|
||||
(*simple_buffer_queue_)->GetState(simple_buffer_queue_, &buffer_queue_state);
|
||||
RTC_DCHECK_EQ(0, buffer_queue_state.count);
|
||||
RTC_DCHECK_EQ(0, buffer_queue_state.index);
|
||||
#endif
|
||||
// The number of lower latency audio players is limited, hence we create the
|
||||
// audio player in Start() and destroy it in Stop().
|
||||
DestroyAudioPlayer();
|
||||
thread_checker_opensles_.Detach();
|
||||
initialized_ = false;
|
||||
playing_ = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int OpenSLESPlayer::SpeakerVolumeIsAvailable(bool& available) {
|
||||
available = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int OpenSLESPlayer::MaxSpeakerVolume(uint32_t& maxVolume) const {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int OpenSLESPlayer::MinSpeakerVolume(uint32_t& minVolume) const {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int OpenSLESPlayer::SetSpeakerVolume(uint32_t volume) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int OpenSLESPlayer::SpeakerVolume(uint32_t& volume) const {
|
||||
return -1;
|
||||
}
|
||||
|
||||
void OpenSLESPlayer::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
|
||||
ALOGD("AttachAudioBuffer");
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
audio_device_buffer_ = audioBuffer;
|
||||
const int sample_rate_hz = audio_parameters_.sample_rate();
|
||||
ALOGD("SetPlayoutSampleRate(%d)", sample_rate_hz);
|
||||
audio_device_buffer_->SetPlayoutSampleRate(sample_rate_hz);
|
||||
const size_t channels = audio_parameters_.channels();
|
||||
ALOGD("SetPlayoutChannels(%zu)", channels);
|
||||
audio_device_buffer_->SetPlayoutChannels(channels);
|
||||
RTC_CHECK(audio_device_buffer_);
|
||||
AllocateDataBuffers();
|
||||
}
|
||||
|
||||
void OpenSLESPlayer::AllocateDataBuffers() {
|
||||
ALOGD("AllocateDataBuffers");
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
RTC_DCHECK(!simple_buffer_queue_);
|
||||
RTC_CHECK(audio_device_buffer_);
|
||||
// Create a modified audio buffer class which allows us to ask for any number
|
||||
// of samples (and not only multiple of 10ms) to match the native OpenSL ES
|
||||
// buffer size. The native buffer size corresponds to the
|
||||
// PROPERTY_OUTPUT_FRAMES_PER_BUFFER property which is the number of audio
|
||||
// frames that the HAL (Hardware Abstraction Layer) buffer can hold. It is
|
||||
// recommended to construct audio buffers so that they contain an exact
|
||||
// multiple of this number. If so, callbacks will occur at regular intervals,
|
||||
// which reduces jitter.
|
||||
const size_t buffer_size_in_samples =
|
||||
audio_parameters_.frames_per_buffer() * audio_parameters_.channels();
|
||||
ALOGD("native buffer size: %zu", buffer_size_in_samples);
|
||||
ALOGD("native buffer size in ms: %.2f",
|
||||
audio_parameters_.GetBufferSizeInMilliseconds());
|
||||
fine_audio_buffer_ = std::make_unique<FineAudioBuffer>(audio_device_buffer_);
|
||||
// Allocated memory for audio buffers.
|
||||
for (int i = 0; i < kNumOfOpenSLESBuffers; ++i) {
|
||||
audio_buffers_[i].reset(new SLint16[buffer_size_in_samples]);
|
||||
}
|
||||
}
|
||||
|
||||
bool OpenSLESPlayer::ObtainEngineInterface() {
|
||||
ALOGD("ObtainEngineInterface");
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
if (engine_)
|
||||
return true;
|
||||
// Get access to (or create if not already existing) the global OpenSL Engine
|
||||
// object.
|
||||
SLObjectItf engine_object = audio_manager_->GetOpenSLEngine();
|
||||
if (engine_object == nullptr) {
|
||||
ALOGE("Failed to access the global OpenSL engine");
|
||||
return false;
|
||||
}
|
||||
// Get the SL Engine Interface which is implicit.
|
||||
RETURN_ON_ERROR(
|
||||
(*engine_object)->GetInterface(engine_object, SL_IID_ENGINE, &engine_),
|
||||
false);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool OpenSLESPlayer::CreateMix() {
|
||||
ALOGD("CreateMix");
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
RTC_DCHECK(engine_);
|
||||
if (output_mix_.Get())
|
||||
return true;
|
||||
|
||||
// Create the ouput mix on the engine object. No interfaces will be used.
|
||||
RETURN_ON_ERROR((*engine_)->CreateOutputMix(engine_, output_mix_.Receive(), 0,
|
||||
nullptr, nullptr),
|
||||
false);
|
||||
RETURN_ON_ERROR(output_mix_->Realize(output_mix_.Get(), SL_BOOLEAN_FALSE),
|
||||
false);
|
||||
return true;
|
||||
}
|
||||
|
||||
void OpenSLESPlayer::DestroyMix() {
|
||||
ALOGD("DestroyMix");
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
if (!output_mix_.Get())
|
||||
return;
|
||||
output_mix_.Reset();
|
||||
}
|
||||
|
||||
bool OpenSLESPlayer::CreateAudioPlayer() {
|
||||
ALOGD("CreateAudioPlayer");
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
RTC_DCHECK(output_mix_.Get());
|
||||
if (player_object_.Get())
|
||||
return true;
|
||||
RTC_DCHECK(!player_);
|
||||
RTC_DCHECK(!simple_buffer_queue_);
|
||||
RTC_DCHECK(!volume_);
|
||||
|
||||
// source: Android Simple Buffer Queue Data Locator is source.
|
||||
SLDataLocator_AndroidSimpleBufferQueue simple_buffer_queue = {
|
||||
SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE,
|
||||
static_cast<SLuint32>(kNumOfOpenSLESBuffers)};
|
||||
SLDataSource audio_source = {&simple_buffer_queue, &pcm_format_};
|
||||
|
||||
// sink: OutputMix-based data is sink.
|
||||
SLDataLocator_OutputMix locator_output_mix = {SL_DATALOCATOR_OUTPUTMIX,
|
||||
output_mix_.Get()};
|
||||
SLDataSink audio_sink = {&locator_output_mix, nullptr};
|
||||
|
||||
// Define interfaces that we indend to use and realize.
|
||||
const SLInterfaceID interface_ids[] = {SL_IID_ANDROIDCONFIGURATION,
|
||||
SL_IID_BUFFERQUEUE, SL_IID_VOLUME};
|
||||
const SLboolean interface_required[] = {SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE,
|
||||
SL_BOOLEAN_TRUE};
|
||||
|
||||
// Create the audio player on the engine interface.
|
||||
RETURN_ON_ERROR(
|
||||
(*engine_)->CreateAudioPlayer(
|
||||
engine_, player_object_.Receive(), &audio_source, &audio_sink,
|
||||
arraysize(interface_ids), interface_ids, interface_required),
|
||||
false);
|
||||
|
||||
// Use the Android configuration interface to set platform-specific
|
||||
// parameters. Should be done before player is realized.
|
||||
SLAndroidConfigurationItf player_config;
|
||||
RETURN_ON_ERROR(
|
||||
player_object_->GetInterface(player_object_.Get(),
|
||||
SL_IID_ANDROIDCONFIGURATION, &player_config),
|
||||
false);
|
||||
// Set audio player configuration to SL_ANDROID_STREAM_VOICE which
|
||||
// corresponds to android.media.AudioManager.STREAM_VOICE_CALL.
|
||||
SLint32 stream_type = SL_ANDROID_STREAM_VOICE;
|
||||
RETURN_ON_ERROR(
|
||||
(*player_config)
|
||||
->SetConfiguration(player_config, SL_ANDROID_KEY_STREAM_TYPE,
|
||||
&stream_type, sizeof(SLint32)),
|
||||
false);
|
||||
|
||||
// Realize the audio player object after configuration has been set.
|
||||
RETURN_ON_ERROR(
|
||||
player_object_->Realize(player_object_.Get(), SL_BOOLEAN_FALSE), false);
|
||||
|
||||
// Get the SLPlayItf interface on the audio player.
|
||||
RETURN_ON_ERROR(
|
||||
player_object_->GetInterface(player_object_.Get(), SL_IID_PLAY, &player_),
|
||||
false);
|
||||
|
||||
// Get the SLAndroidSimpleBufferQueueItf interface on the audio player.
|
||||
RETURN_ON_ERROR(
|
||||
player_object_->GetInterface(player_object_.Get(), SL_IID_BUFFERQUEUE,
|
||||
&simple_buffer_queue_),
|
||||
false);
|
||||
|
||||
// Register callback method for the Android Simple Buffer Queue interface.
|
||||
// This method will be called when the native audio layer needs audio data.
|
||||
RETURN_ON_ERROR((*simple_buffer_queue_)
|
||||
->RegisterCallback(simple_buffer_queue_,
|
||||
SimpleBufferQueueCallback, this),
|
||||
false);
|
||||
|
||||
// Get the SLVolumeItf interface on the audio player.
|
||||
RETURN_ON_ERROR(player_object_->GetInterface(player_object_.Get(),
|
||||
SL_IID_VOLUME, &volume_),
|
||||
false);
|
||||
|
||||
// TODO(henrika): might not be required to set volume to max here since it
|
||||
// seems to be default on most devices. Might be required for unit tests.
|
||||
// RETURN_ON_ERROR((*volume_)->SetVolumeLevel(volume_, 0), false);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void OpenSLESPlayer::DestroyAudioPlayer() {
|
||||
ALOGD("DestroyAudioPlayer");
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
if (!player_object_.Get())
|
||||
return;
|
||||
(*simple_buffer_queue_)
|
||||
->RegisterCallback(simple_buffer_queue_, nullptr, nullptr);
|
||||
player_object_.Reset();
|
||||
player_ = nullptr;
|
||||
simple_buffer_queue_ = nullptr;
|
||||
volume_ = nullptr;
|
||||
}
|
||||
|
||||
// static
|
||||
void OpenSLESPlayer::SimpleBufferQueueCallback(
|
||||
SLAndroidSimpleBufferQueueItf caller,
|
||||
void* context) {
|
||||
OpenSLESPlayer* stream = reinterpret_cast<OpenSLESPlayer*>(context);
|
||||
stream->FillBufferQueue();
|
||||
}
|
||||
|
||||
void OpenSLESPlayer::FillBufferQueue() {
|
||||
RTC_DCHECK(thread_checker_opensles_.IsCurrent());
|
||||
SLuint32 state = GetPlayState();
|
||||
if (state != SL_PLAYSTATE_PLAYING) {
|
||||
ALOGW("Buffer callback in non-playing state!");
|
||||
return;
|
||||
}
|
||||
EnqueuePlayoutData(false);
|
||||
}
|
||||
|
||||
void OpenSLESPlayer::EnqueuePlayoutData(bool silence) {
|
||||
// Check delta time between two successive callbacks and provide a warning
|
||||
// if it becomes very large.
|
||||
// TODO(henrika): using 150ms as upper limit but this value is rather random.
|
||||
const uint32_t current_time = rtc::Time();
|
||||
const uint32_t diff = current_time - last_play_time_;
|
||||
if (diff > 150) {
|
||||
ALOGW("Bad OpenSL ES playout timing, dT=%u [ms]", diff);
|
||||
}
|
||||
last_play_time_ = current_time;
|
||||
SLint8* audio_ptr8 =
|
||||
reinterpret_cast<SLint8*>(audio_buffers_[buffer_index_].get());
|
||||
if (silence) {
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
// Avoid acquiring real audio data from WebRTC and fill the buffer with
|
||||
// zeros instead. Used to prime the buffer with silence and to avoid asking
|
||||
// for audio data from two different threads.
|
||||
memset(audio_ptr8, 0, audio_parameters_.GetBytesPerBuffer());
|
||||
} else {
|
||||
RTC_DCHECK(thread_checker_opensles_.IsCurrent());
|
||||
// Read audio data from the WebRTC source using the FineAudioBuffer object
|
||||
// to adjust for differences in buffer size between WebRTC (10ms) and native
|
||||
// OpenSL ES. Use hardcoded delay estimate since OpenSL ES does not support
|
||||
// delay estimation.
|
||||
fine_audio_buffer_->GetPlayoutData(
|
||||
rtc::ArrayView<int16_t>(audio_buffers_[buffer_index_].get(),
|
||||
audio_parameters_.frames_per_buffer() *
|
||||
audio_parameters_.channels()),
|
||||
25);
|
||||
}
|
||||
// Enqueue the decoded audio buffer for playback.
|
||||
SLresult err = (*simple_buffer_queue_)
|
||||
->Enqueue(simple_buffer_queue_, audio_ptr8,
|
||||
audio_parameters_.GetBytesPerBuffer());
|
||||
if (SL_RESULT_SUCCESS != err) {
|
||||
ALOGE("Enqueue failed: %d", err);
|
||||
}
|
||||
buffer_index_ = (buffer_index_ + 1) % kNumOfOpenSLESBuffers;
|
||||
}
|
||||
|
||||
SLuint32 OpenSLESPlayer::GetPlayState() const {
|
||||
RTC_DCHECK(player_);
|
||||
SLuint32 state;
|
||||
SLresult err = (*player_)->GetPlayState(player_, &state);
|
||||
if (SL_RESULT_SUCCESS != err) {
|
||||
ALOGE("GetPlayState failed: %d", err);
|
||||
}
|
||||
return state;
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,195 @@
|
|||
/*
|
||||
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_PLAYER_H_
|
||||
#define MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_PLAYER_H_
|
||||
|
||||
#include <SLES/OpenSLES.h>
|
||||
#include <SLES/OpenSLES_Android.h>
|
||||
#include <SLES/OpenSLES_AndroidConfiguration.h>
|
||||
|
||||
#include "api/sequence_checker.h"
|
||||
#include "modules/audio_device/android/audio_common.h"
|
||||
#include "modules/audio_device/android/audio_manager.h"
|
||||
#include "modules/audio_device/android/opensles_common.h"
|
||||
#include "modules/audio_device/audio_device_generic.h"
|
||||
#include "modules/audio_device/include/audio_device_defines.h"
|
||||
#include "modules/utility/include/helpers_android.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class FineAudioBuffer;
|
||||
|
||||
// Implements 16-bit mono PCM audio output support for Android using the
|
||||
// C based OpenSL ES API. No calls from C/C++ to Java using JNI is done.
|
||||
//
|
||||
// An instance must be created and destroyed on one and the same thread.
|
||||
// All public methods must also be called on the same thread. A thread checker
|
||||
// will RTC_DCHECK if any method is called on an invalid thread. Decoded audio
|
||||
// buffers are requested on a dedicated internal thread managed by the OpenSL
|
||||
// ES layer.
|
||||
//
|
||||
// The existing design forces the user to call InitPlayout() after Stoplayout()
|
||||
// to be able to call StartPlayout() again. This is inline with how the Java-
|
||||
// based implementation works.
|
||||
//
|
||||
// OpenSL ES is a native C API which have no Dalvik-related overhead such as
|
||||
// garbage collection pauses and it supports reduced audio output latency.
|
||||
// If the device doesn't claim this feature but supports API level 9 (Android
|
||||
// platform version 2.3) or later, then we can still use the OpenSL ES APIs but
|
||||
// the output latency may be higher.
|
||||
class OpenSLESPlayer {
|
||||
public:
|
||||
// Beginning with API level 17 (Android 4.2), a buffer count of 2 or more is
|
||||
// required for lower latency. Beginning with API level 18 (Android 4.3), a
|
||||
// buffer count of 1 is sufficient for lower latency. In addition, the buffer
|
||||
// size and sample rate must be compatible with the device's native output
|
||||
// configuration provided via the audio manager at construction.
|
||||
// TODO(henrika): perhaps set this value dynamically based on OS version.
|
||||
static const int kNumOfOpenSLESBuffers = 2;
|
||||
|
||||
explicit OpenSLESPlayer(AudioManager* audio_manager);
|
||||
~OpenSLESPlayer();
|
||||
|
||||
int Init();
|
||||
int Terminate();
|
||||
|
||||
int InitPlayout();
|
||||
bool PlayoutIsInitialized() const { return initialized_; }
|
||||
|
||||
int StartPlayout();
|
||||
int StopPlayout();
|
||||
bool Playing() const { return playing_; }
|
||||
|
||||
int SpeakerVolumeIsAvailable(bool& available);
|
||||
int SetSpeakerVolume(uint32_t volume);
|
||||
int SpeakerVolume(uint32_t& volume) const;
|
||||
int MaxSpeakerVolume(uint32_t& maxVolume) const;
|
||||
int MinSpeakerVolume(uint32_t& minVolume) const;
|
||||
|
||||
void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
|
||||
|
||||
private:
|
||||
// These callback methods are called when data is required for playout.
|
||||
// They are both called from an internal "OpenSL ES thread" which is not
|
||||
// attached to the Dalvik VM.
|
||||
static void SimpleBufferQueueCallback(SLAndroidSimpleBufferQueueItf caller,
|
||||
void* context);
|
||||
void FillBufferQueue();
|
||||
// Reads audio data in PCM format using the AudioDeviceBuffer.
|
||||
// Can be called both on the main thread (during Start()) and from the
|
||||
// internal audio thread while output streaming is active.
|
||||
// If the `silence` flag is set, the audio is filled with zeros instead of
|
||||
// asking the WebRTC layer for real audio data. This procedure is also known
|
||||
// as audio priming.
|
||||
void EnqueuePlayoutData(bool silence);
|
||||
|
||||
// Allocate memory for audio buffers which will be used to render audio
|
||||
// via the SLAndroidSimpleBufferQueueItf interface.
|
||||
void AllocateDataBuffers();
|
||||
|
||||
// Obtaines the SL Engine Interface from the existing global Engine object.
|
||||
// The interface exposes creation methods of all the OpenSL ES object types.
|
||||
// This method defines the `engine_` member variable.
|
||||
bool ObtainEngineInterface();
|
||||
|
||||
// Creates/destroys the output mix object.
|
||||
bool CreateMix();
|
||||
void DestroyMix();
|
||||
|
||||
// Creates/destroys the audio player and the simple-buffer object.
|
||||
// Also creates the volume object.
|
||||
bool CreateAudioPlayer();
|
||||
void DestroyAudioPlayer();
|
||||
|
||||
SLuint32 GetPlayState() const;
|
||||
|
||||
// Ensures that methods are called from the same thread as this object is
|
||||
// created on.
|
||||
SequenceChecker thread_checker_;
|
||||
|
||||
// Stores thread ID in first call to SimpleBufferQueueCallback() from internal
|
||||
// non-application thread which is not attached to the Dalvik JVM.
|
||||
// Detached during construction of this object.
|
||||
SequenceChecker thread_checker_opensles_;
|
||||
|
||||
// Raw pointer to the audio manager injected at construction. Used to cache
|
||||
// audio parameters and to access the global SL engine object needed by the
|
||||
// ObtainEngineInterface() method. The audio manager outlives any instance of
|
||||
// this class.
|
||||
AudioManager* audio_manager_;
|
||||
|
||||
// Contains audio parameters provided to this class at construction by the
|
||||
// AudioManager.
|
||||
const AudioParameters audio_parameters_;
|
||||
|
||||
// Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the
|
||||
// AudioDeviceModuleImpl class and called by AudioDeviceModule::Create().
|
||||
AudioDeviceBuffer* audio_device_buffer_;
|
||||
|
||||
bool initialized_;
|
||||
bool playing_;
|
||||
|
||||
// PCM-type format definition.
|
||||
// TODO(henrika): add support for SLAndroidDataFormat_PCM_EX (android-21) if
|
||||
// 32-bit float representation is needed.
|
||||
SLDataFormat_PCM pcm_format_;
|
||||
|
||||
// Queue of audio buffers to be used by the player object for rendering
|
||||
// audio.
|
||||
std::unique_ptr<SLint16[]> audio_buffers_[kNumOfOpenSLESBuffers];
|
||||
|
||||
// FineAudioBuffer takes an AudioDeviceBuffer which delivers audio data
|
||||
// in chunks of 10ms. It then allows for this data to be pulled in
|
||||
// a finer or coarser granularity. I.e. interacting with this class instead
|
||||
// of directly with the AudioDeviceBuffer one can ask for any number of
|
||||
// audio data samples.
|
||||
// Example: native buffer size can be 192 audio frames at 48kHz sample rate.
|
||||
// WebRTC will provide 480 audio frames per 10ms but OpenSL ES asks for 192
|
||||
// in each callback (one every 4th ms). This class can then ask for 192 and
|
||||
// the FineAudioBuffer will ask WebRTC for new data approximately only every
|
||||
// second callback and also cache non-utilized audio.
|
||||
std::unique_ptr<FineAudioBuffer> fine_audio_buffer_;
|
||||
|
||||
// Keeps track of active audio buffer 'n' in the audio_buffers_[n] queue.
|
||||
// Example (kNumOfOpenSLESBuffers = 2): counts 0, 1, 0, 1, ...
|
||||
int buffer_index_;
|
||||
|
||||
// This interface exposes creation methods for all the OpenSL ES object types.
|
||||
// It is the OpenSL ES API entry point.
|
||||
SLEngineItf engine_;
|
||||
|
||||
// Output mix object to be used by the player object.
|
||||
webrtc::ScopedSLObjectItf output_mix_;
|
||||
|
||||
// The audio player media object plays out audio to the speakers. It also
|
||||
// supports volume control.
|
||||
webrtc::ScopedSLObjectItf player_object_;
|
||||
|
||||
// This interface is supported on the audio player and it controls the state
|
||||
// of the audio player.
|
||||
SLPlayItf player_;
|
||||
|
||||
// The Android Simple Buffer Queue interface is supported on the audio player
|
||||
// and it provides methods to send audio data from the source to the audio
|
||||
// player for rendering.
|
||||
SLAndroidSimpleBufferQueueItf simple_buffer_queue_;
|
||||
|
||||
// This interface exposes controls for manipulating the object’s audio volume
|
||||
// properties. This interface is supported on the Audio Player object.
|
||||
SLVolumeItf volume_;
|
||||
|
||||
// Last time the OpenSL ES layer asked for audio data to play out.
|
||||
uint32_t last_play_time_;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_PLAYER_H_
|
||||
|
|
@ -0,0 +1,431 @@
|
|||
/*
|
||||
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "modules/audio_device/android/opensles_recorder.h"
|
||||
|
||||
#include <android/log.h>
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "api/array_view.h"
|
||||
#include "modules/audio_device/android/audio_common.h"
|
||||
#include "modules/audio_device/android/audio_manager.h"
|
||||
#include "modules/audio_device/fine_audio_buffer.h"
|
||||
#include "rtc_base/arraysize.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/platform_thread.h"
|
||||
#include "rtc_base/time_utils.h"
|
||||
|
||||
#define TAG "OpenSLESRecorder"
|
||||
#define ALOGV(...) __android_log_print(ANDROID_LOG_VERBOSE, TAG, __VA_ARGS__)
|
||||
#define ALOGD(...) __android_log_print(ANDROID_LOG_DEBUG, TAG, __VA_ARGS__)
|
||||
#define ALOGE(...) __android_log_print(ANDROID_LOG_ERROR, TAG, __VA_ARGS__)
|
||||
#define ALOGW(...) __android_log_print(ANDROID_LOG_WARN, TAG, __VA_ARGS__)
|
||||
#define ALOGI(...) __android_log_print(ANDROID_LOG_INFO, TAG, __VA_ARGS__)
|
||||
|
||||
#define LOG_ON_ERROR(op) \
|
||||
[](SLresult err) { \
|
||||
if (err != SL_RESULT_SUCCESS) { \
|
||||
ALOGE("%s:%d %s failed: %s", __FILE__, __LINE__, #op, \
|
||||
GetSLErrorString(err)); \
|
||||
return true; \
|
||||
} \
|
||||
return false; \
|
||||
}(op)
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
OpenSLESRecorder::OpenSLESRecorder(AudioManager* audio_manager)
|
||||
: audio_manager_(audio_manager),
|
||||
audio_parameters_(audio_manager->GetRecordAudioParameters()),
|
||||
audio_device_buffer_(nullptr),
|
||||
initialized_(false),
|
||||
recording_(false),
|
||||
engine_(nullptr),
|
||||
recorder_(nullptr),
|
||||
simple_buffer_queue_(nullptr),
|
||||
buffer_index_(0),
|
||||
last_rec_time_(0) {
|
||||
ALOGD("ctor[tid=%d]", rtc::CurrentThreadId());
|
||||
// Detach from this thread since we want to use the checker to verify calls
|
||||
// from the internal audio thread.
|
||||
thread_checker_opensles_.Detach();
|
||||
// Use native audio output parameters provided by the audio manager and
|
||||
// define the PCM format structure.
|
||||
pcm_format_ = CreatePCMConfiguration(audio_parameters_.channels(),
|
||||
audio_parameters_.sample_rate(),
|
||||
audio_parameters_.bits_per_sample());
|
||||
}
|
||||
|
||||
OpenSLESRecorder::~OpenSLESRecorder() {
|
||||
ALOGD("dtor[tid=%d]", rtc::CurrentThreadId());
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
Terminate();
|
||||
DestroyAudioRecorder();
|
||||
engine_ = nullptr;
|
||||
RTC_DCHECK(!engine_);
|
||||
RTC_DCHECK(!recorder_);
|
||||
RTC_DCHECK(!simple_buffer_queue_);
|
||||
}
|
||||
|
||||
int OpenSLESRecorder::Init() {
|
||||
ALOGD("Init[tid=%d]", rtc::CurrentThreadId());
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
if (audio_parameters_.channels() == 2) {
|
||||
ALOGD("Stereo mode is enabled");
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int OpenSLESRecorder::Terminate() {
|
||||
ALOGD("Terminate[tid=%d]", rtc::CurrentThreadId());
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
StopRecording();
|
||||
return 0;
|
||||
}
|
||||
|
||||
int OpenSLESRecorder::InitRecording() {
|
||||
ALOGD("InitRecording[tid=%d]", rtc::CurrentThreadId());
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
RTC_DCHECK(!initialized_);
|
||||
RTC_DCHECK(!recording_);
|
||||
if (!ObtainEngineInterface()) {
|
||||
ALOGE("Failed to obtain SL Engine interface");
|
||||
return -1;
|
||||
}
|
||||
CreateAudioRecorder();
|
||||
initialized_ = true;
|
||||
buffer_index_ = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int OpenSLESRecorder::StartRecording() {
|
||||
ALOGD("StartRecording[tid=%d]", rtc::CurrentThreadId());
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
RTC_DCHECK(initialized_);
|
||||
RTC_DCHECK(!recording_);
|
||||
if (fine_audio_buffer_) {
|
||||
fine_audio_buffer_->ResetRecord();
|
||||
}
|
||||
// Add buffers to the queue before changing state to SL_RECORDSTATE_RECORDING
|
||||
// to ensure that recording starts as soon as the state is modified. On some
|
||||
// devices, SLAndroidSimpleBufferQueue::Clear() used in Stop() does not flush
|
||||
// the buffers as intended and we therefore check the number of buffers
|
||||
// already queued first. Enqueue() can return SL_RESULT_BUFFER_INSUFFICIENT
|
||||
// otherwise.
|
||||
int num_buffers_in_queue = GetBufferCount();
|
||||
for (int i = 0; i < kNumOfOpenSLESBuffers - num_buffers_in_queue; ++i) {
|
||||
if (!EnqueueAudioBuffer()) {
|
||||
recording_ = false;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
num_buffers_in_queue = GetBufferCount();
|
||||
RTC_DCHECK_EQ(num_buffers_in_queue, kNumOfOpenSLESBuffers);
|
||||
LogBufferState();
|
||||
// Start audio recording by changing the state to SL_RECORDSTATE_RECORDING.
|
||||
// Given that buffers are already enqueued, recording should start at once.
|
||||
// The macro returns -1 if recording fails to start.
|
||||
last_rec_time_ = rtc::Time();
|
||||
if (LOG_ON_ERROR(
|
||||
(*recorder_)->SetRecordState(recorder_, SL_RECORDSTATE_RECORDING))) {
|
||||
return -1;
|
||||
}
|
||||
recording_ = (GetRecordState() == SL_RECORDSTATE_RECORDING);
|
||||
RTC_DCHECK(recording_);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int OpenSLESRecorder::StopRecording() {
|
||||
ALOGD("StopRecording[tid=%d]", rtc::CurrentThreadId());
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
if (!initialized_ || !recording_) {
|
||||
return 0;
|
||||
}
|
||||
// Stop recording by setting the record state to SL_RECORDSTATE_STOPPED.
|
||||
if (LOG_ON_ERROR(
|
||||
(*recorder_)->SetRecordState(recorder_, SL_RECORDSTATE_STOPPED))) {
|
||||
return -1;
|
||||
}
|
||||
// Clear the buffer queue to get rid of old data when resuming recording.
|
||||
if (LOG_ON_ERROR((*simple_buffer_queue_)->Clear(simple_buffer_queue_))) {
|
||||
return -1;
|
||||
}
|
||||
thread_checker_opensles_.Detach();
|
||||
initialized_ = false;
|
||||
recording_ = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void OpenSLESRecorder::AttachAudioBuffer(AudioDeviceBuffer* audio_buffer) {
|
||||
ALOGD("AttachAudioBuffer");
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
RTC_CHECK(audio_buffer);
|
||||
audio_device_buffer_ = audio_buffer;
|
||||
// Ensure that the audio device buffer is informed about the native sample
|
||||
// rate used on the recording side.
|
||||
const int sample_rate_hz = audio_parameters_.sample_rate();
|
||||
ALOGD("SetRecordingSampleRate(%d)", sample_rate_hz);
|
||||
audio_device_buffer_->SetRecordingSampleRate(sample_rate_hz);
|
||||
// Ensure that the audio device buffer is informed about the number of
|
||||
// channels preferred by the OS on the recording side.
|
||||
const size_t channels = audio_parameters_.channels();
|
||||
ALOGD("SetRecordingChannels(%zu)", channels);
|
||||
audio_device_buffer_->SetRecordingChannels(channels);
|
||||
// Allocated memory for internal data buffers given existing audio parameters.
|
||||
AllocateDataBuffers();
|
||||
}
|
||||
|
||||
int OpenSLESRecorder::EnableBuiltInAEC(bool enable) {
|
||||
ALOGD("EnableBuiltInAEC(%d)", enable);
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
ALOGE("Not implemented");
|
||||
return 0;
|
||||
}
|
||||
|
||||
int OpenSLESRecorder::EnableBuiltInAGC(bool enable) {
|
||||
ALOGD("EnableBuiltInAGC(%d)", enable);
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
ALOGE("Not implemented");
|
||||
return 0;
|
||||
}
|
||||
|
||||
int OpenSLESRecorder::EnableBuiltInNS(bool enable) {
|
||||
ALOGD("EnableBuiltInNS(%d)", enable);
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
ALOGE("Not implemented");
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool OpenSLESRecorder::ObtainEngineInterface() {
|
||||
ALOGD("ObtainEngineInterface");
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
if (engine_)
|
||||
return true;
|
||||
// Get access to (or create if not already existing) the global OpenSL Engine
|
||||
// object.
|
||||
SLObjectItf engine_object = audio_manager_->GetOpenSLEngine();
|
||||
if (engine_object == nullptr) {
|
||||
ALOGE("Failed to access the global OpenSL engine");
|
||||
return false;
|
||||
}
|
||||
// Get the SL Engine Interface which is implicit.
|
||||
if (LOG_ON_ERROR(
|
||||
(*engine_object)
|
||||
->GetInterface(engine_object, SL_IID_ENGINE, &engine_))) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool OpenSLESRecorder::CreateAudioRecorder() {
|
||||
ALOGD("CreateAudioRecorder");
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
if (recorder_object_.Get())
|
||||
return true;
|
||||
RTC_DCHECK(!recorder_);
|
||||
RTC_DCHECK(!simple_buffer_queue_);
|
||||
|
||||
// Audio source configuration.
|
||||
SLDataLocator_IODevice mic_locator = {SL_DATALOCATOR_IODEVICE,
|
||||
SL_IODEVICE_AUDIOINPUT,
|
||||
SL_DEFAULTDEVICEID_AUDIOINPUT, NULL};
|
||||
SLDataSource audio_source = {&mic_locator, NULL};
|
||||
|
||||
// Audio sink configuration.
|
||||
SLDataLocator_AndroidSimpleBufferQueue buffer_queue = {
|
||||
SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE,
|
||||
static_cast<SLuint32>(kNumOfOpenSLESBuffers)};
|
||||
SLDataSink audio_sink = {&buffer_queue, &pcm_format_};
|
||||
|
||||
// Create the audio recorder object (requires the RECORD_AUDIO permission).
|
||||
// Do not realize the recorder yet. Set the configuration first.
|
||||
const SLInterfaceID interface_id[] = {SL_IID_ANDROIDSIMPLEBUFFERQUEUE,
|
||||
SL_IID_ANDROIDCONFIGURATION};
|
||||
const SLboolean interface_required[] = {SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE};
|
||||
if (LOG_ON_ERROR((*engine_)->CreateAudioRecorder(
|
||||
engine_, recorder_object_.Receive(), &audio_source, &audio_sink,
|
||||
arraysize(interface_id), interface_id, interface_required))) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Configure the audio recorder (before it is realized).
|
||||
SLAndroidConfigurationItf recorder_config;
|
||||
if (LOG_ON_ERROR((recorder_object_->GetInterface(recorder_object_.Get(),
|
||||
SL_IID_ANDROIDCONFIGURATION,
|
||||
&recorder_config)))) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Uses the default microphone tuned for audio communication.
|
||||
// Note that, SL_ANDROID_RECORDING_PRESET_VOICE_RECOGNITION leads to a fast
|
||||
// track but also excludes usage of required effects like AEC, AGC and NS.
|
||||
// SL_ANDROID_RECORDING_PRESET_VOICE_COMMUNICATION
|
||||
SLint32 stream_type = SL_ANDROID_RECORDING_PRESET_VOICE_COMMUNICATION;
|
||||
if (LOG_ON_ERROR(((*recorder_config)
|
||||
->SetConfiguration(recorder_config,
|
||||
SL_ANDROID_KEY_RECORDING_PRESET,
|
||||
&stream_type, sizeof(SLint32))))) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// The audio recorder can now be realized (in synchronous mode).
|
||||
if (LOG_ON_ERROR((recorder_object_->Realize(recorder_object_.Get(),
|
||||
SL_BOOLEAN_FALSE)))) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Get the implicit recorder interface (SL_IID_RECORD).
|
||||
if (LOG_ON_ERROR((recorder_object_->GetInterface(
|
||||
recorder_object_.Get(), SL_IID_RECORD, &recorder_)))) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Get the simple buffer queue interface (SL_IID_ANDROIDSIMPLEBUFFERQUEUE).
|
||||
// It was explicitly requested.
|
||||
if (LOG_ON_ERROR((recorder_object_->GetInterface(
|
||||
recorder_object_.Get(), SL_IID_ANDROIDSIMPLEBUFFERQUEUE,
|
||||
&simple_buffer_queue_)))) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Register the input callback for the simple buffer queue.
|
||||
// This callback will be called when receiving new data from the device.
|
||||
if (LOG_ON_ERROR(((*simple_buffer_queue_)
|
||||
->RegisterCallback(simple_buffer_queue_,
|
||||
SimpleBufferQueueCallback, this)))) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void OpenSLESRecorder::DestroyAudioRecorder() {
|
||||
ALOGD("DestroyAudioRecorder");
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
if (!recorder_object_.Get())
|
||||
return;
|
||||
(*simple_buffer_queue_)
|
||||
->RegisterCallback(simple_buffer_queue_, nullptr, nullptr);
|
||||
recorder_object_.Reset();
|
||||
recorder_ = nullptr;
|
||||
simple_buffer_queue_ = nullptr;
|
||||
}
|
||||
|
||||
void OpenSLESRecorder::SimpleBufferQueueCallback(
|
||||
SLAndroidSimpleBufferQueueItf buffer_queue,
|
||||
void* context) {
|
||||
OpenSLESRecorder* stream = static_cast<OpenSLESRecorder*>(context);
|
||||
stream->ReadBufferQueue();
|
||||
}
|
||||
|
||||
void OpenSLESRecorder::AllocateDataBuffers() {
|
||||
ALOGD("AllocateDataBuffers");
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
RTC_DCHECK(!simple_buffer_queue_);
|
||||
RTC_CHECK(audio_device_buffer_);
|
||||
// Create a modified audio buffer class which allows us to deliver any number
|
||||
// of samples (and not only multiple of 10ms) to match the native audio unit
|
||||
// buffer size.
|
||||
ALOGD("frames per native buffer: %zu", audio_parameters_.frames_per_buffer());
|
||||
ALOGD("frames per 10ms buffer: %zu",
|
||||
audio_parameters_.frames_per_10ms_buffer());
|
||||
ALOGD("bytes per native buffer: %zu", audio_parameters_.GetBytesPerBuffer());
|
||||
ALOGD("native sample rate: %d", audio_parameters_.sample_rate());
|
||||
RTC_DCHECK(audio_device_buffer_);
|
||||
fine_audio_buffer_ = std::make_unique<FineAudioBuffer>(audio_device_buffer_);
|
||||
// Allocate queue of audio buffers that stores recorded audio samples.
|
||||
const int buffer_size_samples =
|
||||
audio_parameters_.frames_per_buffer() * audio_parameters_.channels();
|
||||
audio_buffers_.reset(new std::unique_ptr<SLint16[]>[kNumOfOpenSLESBuffers]);
|
||||
for (int i = 0; i < kNumOfOpenSLESBuffers; ++i) {
|
||||
audio_buffers_[i].reset(new SLint16[buffer_size_samples]);
|
||||
}
|
||||
}
|
||||
|
||||
void OpenSLESRecorder::ReadBufferQueue() {
|
||||
RTC_DCHECK(thread_checker_opensles_.IsCurrent());
|
||||
SLuint32 state = GetRecordState();
|
||||
if (state != SL_RECORDSTATE_RECORDING) {
|
||||
ALOGW("Buffer callback in non-recording state!");
|
||||
return;
|
||||
}
|
||||
// Check delta time between two successive callbacks and provide a warning
|
||||
// if it becomes very large.
|
||||
// TODO(henrika): using 150ms as upper limit but this value is rather random.
|
||||
const uint32_t current_time = rtc::Time();
|
||||
const uint32_t diff = current_time - last_rec_time_;
|
||||
if (diff > 150) {
|
||||
ALOGW("Bad OpenSL ES record timing, dT=%u [ms]", diff);
|
||||
}
|
||||
last_rec_time_ = current_time;
|
||||
// Send recorded audio data to the WebRTC sink.
|
||||
// TODO(henrika): fix delay estimates. It is OK to use fixed values for now
|
||||
// since there is no support to turn off built-in EC in combination with
|
||||
// OpenSL ES anyhow. Hence, as is, the WebRTC based AEC (which would use
|
||||
// these estimates) will never be active.
|
||||
fine_audio_buffer_->DeliverRecordedData(
|
||||
rtc::ArrayView<const int16_t>(
|
||||
audio_buffers_[buffer_index_].get(),
|
||||
audio_parameters_.frames_per_buffer() * audio_parameters_.channels()),
|
||||
25);
|
||||
// Enqueue the utilized audio buffer and use if for recording again.
|
||||
EnqueueAudioBuffer();
|
||||
}
|
||||
|
||||
bool OpenSLESRecorder::EnqueueAudioBuffer() {
|
||||
SLresult err =
|
||||
(*simple_buffer_queue_)
|
||||
->Enqueue(
|
||||
simple_buffer_queue_,
|
||||
reinterpret_cast<SLint8*>(audio_buffers_[buffer_index_].get()),
|
||||
audio_parameters_.GetBytesPerBuffer());
|
||||
if (SL_RESULT_SUCCESS != err) {
|
||||
ALOGE("Enqueue failed: %s", GetSLErrorString(err));
|
||||
return false;
|
||||
}
|
||||
buffer_index_ = (buffer_index_ + 1) % kNumOfOpenSLESBuffers;
|
||||
return true;
|
||||
}
|
||||
|
||||
SLuint32 OpenSLESRecorder::GetRecordState() const {
|
||||
RTC_DCHECK(recorder_);
|
||||
SLuint32 state;
|
||||
SLresult err = (*recorder_)->GetRecordState(recorder_, &state);
|
||||
if (SL_RESULT_SUCCESS != err) {
|
||||
ALOGE("GetRecordState failed: %s", GetSLErrorString(err));
|
||||
}
|
||||
return state;
|
||||
}
|
||||
|
||||
SLAndroidSimpleBufferQueueState OpenSLESRecorder::GetBufferQueueState() const {
|
||||
RTC_DCHECK(simple_buffer_queue_);
|
||||
// state.count: Number of buffers currently in the queue.
|
||||
// state.index: Index of the currently filling buffer. This is a linear index
|
||||
// that keeps a cumulative count of the number of buffers recorded.
|
||||
SLAndroidSimpleBufferQueueState state;
|
||||
SLresult err =
|
||||
(*simple_buffer_queue_)->GetState(simple_buffer_queue_, &state);
|
||||
if (SL_RESULT_SUCCESS != err) {
|
||||
ALOGE("GetState failed: %s", GetSLErrorString(err));
|
||||
}
|
||||
return state;
|
||||
}
|
||||
|
||||
void OpenSLESRecorder::LogBufferState() const {
|
||||
SLAndroidSimpleBufferQueueState state = GetBufferQueueState();
|
||||
ALOGD("state.count:%d state.index:%d", state.count, state.index);
|
||||
}
|
||||
|
||||
SLuint32 OpenSLESRecorder::GetBufferCount() {
|
||||
SLAndroidSimpleBufferQueueState state = GetBufferQueueState();
|
||||
return state.count;
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,193 @@
|
|||
/*
|
||||
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_RECORDER_H_
|
||||
#define MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_RECORDER_H_
|
||||
|
||||
#include <SLES/OpenSLES.h>
|
||||
#include <SLES/OpenSLES_Android.h>
|
||||
#include <SLES/OpenSLES_AndroidConfiguration.h>
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "api/sequence_checker.h"
|
||||
#include "modules/audio_device/android/audio_common.h"
|
||||
#include "modules/audio_device/android/audio_manager.h"
|
||||
#include "modules/audio_device/android/opensles_common.h"
|
||||
#include "modules/audio_device/audio_device_generic.h"
|
||||
#include "modules/audio_device/include/audio_device_defines.h"
|
||||
#include "modules/utility/include/helpers_android.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class FineAudioBuffer;
|
||||
|
||||
// Implements 16-bit mono PCM audio input support for Android using the
|
||||
// C based OpenSL ES API. No calls from C/C++ to Java using JNI is done.
|
||||
//
|
||||
// An instance must be created and destroyed on one and the same thread.
|
||||
// All public methods must also be called on the same thread. A thread checker
|
||||
// will RTC_DCHECK if any method is called on an invalid thread. Recorded audio
|
||||
// buffers are provided on a dedicated internal thread managed by the OpenSL
|
||||
// ES layer.
|
||||
//
|
||||
// The existing design forces the user to call InitRecording() after
|
||||
// StopRecording() to be able to call StartRecording() again. This is inline
|
||||
// with how the Java-based implementation works.
|
||||
//
|
||||
// As of API level 21, lower latency audio input is supported on select devices.
|
||||
// To take advantage of this feature, first confirm that lower latency output is
|
||||
// available. The capability for lower latency output is a prerequisite for the
|
||||
// lower latency input feature. Then, create an AudioRecorder with the same
|
||||
// sample rate and buffer size as would be used for output. OpenSL ES interfaces
|
||||
// for input effects preclude the lower latency path.
|
||||
// See https://developer.android.com/ndk/guides/audio/opensl-prog-notes.html
|
||||
// for more details.
|
||||
class OpenSLESRecorder {
|
||||
public:
|
||||
// Beginning with API level 17 (Android 4.2), a buffer count of 2 or more is
|
||||
// required for lower latency. Beginning with API level 18 (Android 4.3), a
|
||||
// buffer count of 1 is sufficient for lower latency. In addition, the buffer
|
||||
// size and sample rate must be compatible with the device's native input
|
||||
// configuration provided via the audio manager at construction.
|
||||
// TODO(henrika): perhaps set this value dynamically based on OS version.
|
||||
static const int kNumOfOpenSLESBuffers = 2;
|
||||
|
||||
explicit OpenSLESRecorder(AudioManager* audio_manager);
|
||||
~OpenSLESRecorder();
|
||||
|
||||
int Init();
|
||||
int Terminate();
|
||||
|
||||
int InitRecording();
|
||||
bool RecordingIsInitialized() const { return initialized_; }
|
||||
|
||||
int StartRecording();
|
||||
int StopRecording();
|
||||
bool Recording() const { return recording_; }
|
||||
|
||||
void AttachAudioBuffer(AudioDeviceBuffer* audio_buffer);
|
||||
|
||||
// TODO(henrika): add support using OpenSL ES APIs when available.
|
||||
int EnableBuiltInAEC(bool enable);
|
||||
int EnableBuiltInAGC(bool enable);
|
||||
int EnableBuiltInNS(bool enable);
|
||||
|
||||
private:
|
||||
// Obtaines the SL Engine Interface from the existing global Engine object.
|
||||
// The interface exposes creation methods of all the OpenSL ES object types.
|
||||
// This method defines the `engine_` member variable.
|
||||
bool ObtainEngineInterface();
|
||||
|
||||
// Creates/destroys the audio recorder and the simple-buffer queue object.
|
||||
bool CreateAudioRecorder();
|
||||
void DestroyAudioRecorder();
|
||||
|
||||
// Allocate memory for audio buffers which will be used to capture audio
|
||||
// via the SLAndroidSimpleBufferQueueItf interface.
|
||||
void AllocateDataBuffers();
|
||||
|
||||
// These callback methods are called when data has been written to the input
|
||||
// buffer queue. They are both called from an internal "OpenSL ES thread"
|
||||
// which is not attached to the Dalvik VM.
|
||||
static void SimpleBufferQueueCallback(SLAndroidSimpleBufferQueueItf caller,
|
||||
void* context);
|
||||
void ReadBufferQueue();
|
||||
|
||||
// Wraps calls to SLAndroidSimpleBufferQueueState::Enqueue() and it can be
|
||||
// called both on the main thread (but before recording has started) and from
|
||||
// the internal audio thread while input streaming is active. It uses
|
||||
// `simple_buffer_queue_` but no lock is needed since the initial calls from
|
||||
// the main thread and the native callback thread are mutually exclusive.
|
||||
bool EnqueueAudioBuffer();
|
||||
|
||||
// Returns the current recorder state.
|
||||
SLuint32 GetRecordState() const;
|
||||
|
||||
// Returns the current buffer queue state.
|
||||
SLAndroidSimpleBufferQueueState GetBufferQueueState() const;
|
||||
|
||||
// Number of buffers currently in the queue.
|
||||
SLuint32 GetBufferCount();
|
||||
|
||||
// Prints a log message of the current queue state. Can be used for debugging
|
||||
// purposes.
|
||||
void LogBufferState() const;
|
||||
|
||||
// Ensures that methods are called from the same thread as this object is
|
||||
// created on.
|
||||
SequenceChecker thread_checker_;
|
||||
|
||||
// Stores thread ID in first call to SimpleBufferQueueCallback() from internal
|
||||
// non-application thread which is not attached to the Dalvik JVM.
|
||||
// Detached during construction of this object.
|
||||
SequenceChecker thread_checker_opensles_;
|
||||
|
||||
// Raw pointer to the audio manager injected at construction. Used to cache
|
||||
// audio parameters and to access the global SL engine object needed by the
|
||||
// ObtainEngineInterface() method. The audio manager outlives any instance of
|
||||
// this class.
|
||||
AudioManager* const audio_manager_;
|
||||
|
||||
// Contains audio parameters provided to this class at construction by the
|
||||
// AudioManager.
|
||||
const AudioParameters audio_parameters_;
|
||||
|
||||
// Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the
|
||||
// AudioDeviceModuleImpl class and called by AudioDeviceModule::Create().
|
||||
AudioDeviceBuffer* audio_device_buffer_;
|
||||
|
||||
// PCM-type format definition.
|
||||
// TODO(henrika): add support for SLAndroidDataFormat_PCM_EX (android-21) if
|
||||
// 32-bit float representation is needed.
|
||||
SLDataFormat_PCM pcm_format_;
|
||||
|
||||
bool initialized_;
|
||||
bool recording_;
|
||||
|
||||
// This interface exposes creation methods for all the OpenSL ES object types.
|
||||
// It is the OpenSL ES API entry point.
|
||||
SLEngineItf engine_;
|
||||
|
||||
// The audio recorder media object records audio to the destination specified
|
||||
// by the data sink capturing it from the input specified by the data source.
|
||||
webrtc::ScopedSLObjectItf recorder_object_;
|
||||
|
||||
// This interface is supported on the audio recorder object and it controls
|
||||
// the state of the audio recorder.
|
||||
SLRecordItf recorder_;
|
||||
|
||||
// The Android Simple Buffer Queue interface is supported on the audio
|
||||
// recorder. For recording, an app should enqueue empty buffers. When a
|
||||
// registered callback sends notification that the system has finished writing
|
||||
// data to the buffer, the app can read the buffer.
|
||||
SLAndroidSimpleBufferQueueItf simple_buffer_queue_;
|
||||
|
||||
// Consumes audio of native buffer size and feeds the WebRTC layer with 10ms
|
||||
// chunks of audio.
|
||||
std::unique_ptr<FineAudioBuffer> fine_audio_buffer_;
|
||||
|
||||
// Queue of audio buffers to be used by the recorder object for capturing
|
||||
// audio. They will be used in a Round-robin way and the size of each buffer
|
||||
// is given by AudioParameters::frames_per_buffer(), i.e., it corresponds to
|
||||
// the native OpenSL ES buffer size.
|
||||
std::unique_ptr<std::unique_ptr<SLint16[]>[]> audio_buffers_;
|
||||
|
||||
// Keeps track of active audio buffer 'n' in the audio_buffers_[n] queue.
|
||||
// Example (kNumOfOpenSLESBuffers = 2): counts 0, 1, 0, 1, ...
|
||||
int buffer_index_;
|
||||
|
||||
// Last time the OpenSL ES layer delivered recorded audio data.
|
||||
uint32_t last_rec_time_;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_RECORDER_H_
|
||||
|
|
@ -0,0 +1,539 @@
|
|||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "modules/audio_device/audio_device_buffer.h"
|
||||
|
||||
#include <string.h>
|
||||
|
||||
#include <cmath>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
|
||||
#include "common_audio/signal_processing/include/signal_processing_library.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/time_utils.h"
|
||||
#include "rtc_base/trace_event.h"
|
||||
#include "system_wrappers/include/metrics.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
static const char kTimerQueueName[] = "AudioDeviceBufferTimer";
|
||||
|
||||
// Time between two sucessive calls to LogStats().
|
||||
static const size_t kTimerIntervalInSeconds = 10;
|
||||
static const size_t kTimerIntervalInMilliseconds =
|
||||
kTimerIntervalInSeconds * rtc::kNumMillisecsPerSec;
|
||||
// Min time required to qualify an audio session as a "call". If playout or
|
||||
// recording has been active for less than this time we will not store any
|
||||
// logs or UMA stats but instead consider the call as too short.
|
||||
static const size_t kMinValidCallTimeTimeInSeconds = 10;
|
||||
static const size_t kMinValidCallTimeTimeInMilliseconds =
|
||||
kMinValidCallTimeTimeInSeconds * rtc::kNumMillisecsPerSec;
|
||||
#ifdef AUDIO_DEVICE_PLAYS_SINUS_TONE
|
||||
static const double k2Pi = 6.28318530717959;
|
||||
#endif
|
||||
|
||||
AudioDeviceBuffer::AudioDeviceBuffer(TaskQueueFactory* task_queue_factory,
|
||||
bool create_detached)
|
||||
: task_queue_(task_queue_factory->CreateTaskQueue(
|
||||
kTimerQueueName,
|
||||
TaskQueueFactory::Priority::NORMAL)),
|
||||
audio_transport_cb_(nullptr),
|
||||
rec_sample_rate_(0),
|
||||
play_sample_rate_(0),
|
||||
rec_channels_(0),
|
||||
play_channels_(0),
|
||||
playing_(false),
|
||||
recording_(false),
|
||||
typing_status_(false),
|
||||
play_delay_ms_(0),
|
||||
rec_delay_ms_(0),
|
||||
num_stat_reports_(0),
|
||||
last_timer_task_time_(0),
|
||||
rec_stat_count_(0),
|
||||
play_stat_count_(0),
|
||||
play_start_time_(0),
|
||||
only_silence_recorded_(true),
|
||||
log_stats_(false) {
|
||||
RTC_LOG(LS_INFO) << "AudioDeviceBuffer::ctor";
|
||||
#ifdef AUDIO_DEVICE_PLAYS_SINUS_TONE
|
||||
phase_ = 0.0;
|
||||
RTC_LOG(LS_WARNING) << "AUDIO_DEVICE_PLAYS_SINUS_TONE is defined!";
|
||||
#endif
|
||||
if (create_detached) {
|
||||
main_thread_checker_.Detach();
|
||||
}
|
||||
}
|
||||
|
||||
AudioDeviceBuffer::~AudioDeviceBuffer() {
|
||||
RTC_DCHECK_RUN_ON(&main_thread_checker_);
|
||||
RTC_DCHECK(!playing_);
|
||||
RTC_DCHECK(!recording_);
|
||||
RTC_LOG(LS_INFO) << "AudioDeviceBuffer::~dtor";
|
||||
}
|
||||
|
||||
int32_t AudioDeviceBuffer::RegisterAudioCallback(
|
||||
AudioTransport* audio_callback) {
|
||||
RTC_DCHECK_RUN_ON(&main_thread_checker_);
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__;
|
||||
if (playing_ || recording_) {
|
||||
RTC_LOG(LS_ERROR) << "Failed to set audio transport since media was active";
|
||||
return -1;
|
||||
}
|
||||
audio_transport_cb_ = audio_callback;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void AudioDeviceBuffer::StartPlayout() {
|
||||
RTC_DCHECK_RUN_ON(&main_thread_checker_);
|
||||
// TODO(henrika): allow for usage of DCHECK(!playing_) here instead. Today the
|
||||
// ADM allows calling Start(), Start() by ignoring the second call but it
|
||||
// makes more sense to only allow one call.
|
||||
if (playing_) {
|
||||
return;
|
||||
}
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__;
|
||||
// Clear members tracking playout stats and do it on the task queue.
|
||||
task_queue_.PostTask([this] { ResetPlayStats(); });
|
||||
// Start a periodic timer based on task queue if not already done by the
|
||||
// recording side.
|
||||
if (!recording_) {
|
||||
StartPeriodicLogging();
|
||||
}
|
||||
const int64_t now_time = rtc::TimeMillis();
|
||||
// Clear members that are only touched on the main (creating) thread.
|
||||
play_start_time_ = now_time;
|
||||
playing_ = true;
|
||||
}
|
||||
|
||||
void AudioDeviceBuffer::StartRecording() {
|
||||
RTC_DCHECK_RUN_ON(&main_thread_checker_);
|
||||
if (recording_) {
|
||||
return;
|
||||
}
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__;
|
||||
// Clear members tracking recording stats and do it on the task queue.
|
||||
task_queue_.PostTask([this] { ResetRecStats(); });
|
||||
// Start a periodic timer based on task queue if not already done by the
|
||||
// playout side.
|
||||
if (!playing_) {
|
||||
StartPeriodicLogging();
|
||||
}
|
||||
// Clear members that will be touched on the main (creating) thread.
|
||||
rec_start_time_ = rtc::TimeMillis();
|
||||
recording_ = true;
|
||||
// And finally a member which can be modified on the native audio thread.
|
||||
// It is safe to do so since we know by design that the owning ADM has not
|
||||
// yet started the native audio recording.
|
||||
only_silence_recorded_ = true;
|
||||
}
|
||||
|
||||
void AudioDeviceBuffer::StopPlayout() {
|
||||
RTC_DCHECK_RUN_ON(&main_thread_checker_);
|
||||
if (!playing_) {
|
||||
return;
|
||||
}
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__;
|
||||
playing_ = false;
|
||||
// Stop periodic logging if no more media is active.
|
||||
if (!recording_) {
|
||||
StopPeriodicLogging();
|
||||
}
|
||||
RTC_LOG(LS_INFO) << "total playout time: "
|
||||
<< rtc::TimeSince(play_start_time_);
|
||||
}
|
||||
|
||||
void AudioDeviceBuffer::StopRecording() {
|
||||
RTC_DCHECK_RUN_ON(&main_thread_checker_);
|
||||
if (!recording_) {
|
||||
return;
|
||||
}
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__;
|
||||
recording_ = false;
|
||||
// Stop periodic logging if no more media is active.
|
||||
if (!playing_) {
|
||||
StopPeriodicLogging();
|
||||
}
|
||||
// Add UMA histogram to keep track of the case when only zeros have been
|
||||
// recorded. Measurements (max of absolute level) are taken twice per second,
|
||||
// which means that if e.g 10 seconds of audio has been recorded, a total of
|
||||
// 20 level estimates must all be identical to zero to trigger the histogram.
|
||||
// `only_silence_recorded_` can only be cleared on the native audio thread
|
||||
// that drives audio capture but we know by design that the audio has stopped
|
||||
// when this method is called, hence there should not be aby conflicts. Also,
|
||||
// the fact that `only_silence_recorded_` can be affected during the complete
|
||||
// call makes chances of conflicts with potentially one last callback very
|
||||
// small.
|
||||
const size_t time_since_start = rtc::TimeSince(rec_start_time_);
|
||||
if (time_since_start > kMinValidCallTimeTimeInMilliseconds) {
|
||||
const int only_zeros = static_cast<int>(only_silence_recorded_);
|
||||
RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.RecordedOnlyZeros", only_zeros);
|
||||
RTC_LOG(LS_INFO) << "HISTOGRAM(WebRTC.Audio.RecordedOnlyZeros): "
|
||||
<< only_zeros;
|
||||
}
|
||||
RTC_LOG(LS_INFO) << "total recording time: " << time_since_start;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceBuffer::SetRecordingSampleRate(uint32_t fsHz) {
|
||||
RTC_LOG(LS_INFO) << "SetRecordingSampleRate(" << fsHz << ")";
|
||||
rec_sample_rate_ = fsHz;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceBuffer::SetPlayoutSampleRate(uint32_t fsHz) {
|
||||
RTC_LOG(LS_INFO) << "SetPlayoutSampleRate(" << fsHz << ")";
|
||||
play_sample_rate_ = fsHz;
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint32_t AudioDeviceBuffer::RecordingSampleRate() const {
|
||||
return rec_sample_rate_;
|
||||
}
|
||||
|
||||
uint32_t AudioDeviceBuffer::PlayoutSampleRate() const {
|
||||
return play_sample_rate_;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceBuffer::SetRecordingChannels(size_t channels) {
|
||||
RTC_LOG(LS_INFO) << "SetRecordingChannels(" << channels << ")";
|
||||
rec_channels_ = channels;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceBuffer::SetPlayoutChannels(size_t channels) {
|
||||
RTC_LOG(LS_INFO) << "SetPlayoutChannels(" << channels << ")";
|
||||
play_channels_ = channels;
|
||||
return 0;
|
||||
}
|
||||
|
||||
size_t AudioDeviceBuffer::RecordingChannels() const {
|
||||
return rec_channels_;
|
||||
}
|
||||
|
||||
size_t AudioDeviceBuffer::PlayoutChannels() const {
|
||||
return play_channels_;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceBuffer::SetTypingStatus(bool typing_status) {
|
||||
typing_status_ = typing_status;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void AudioDeviceBuffer::SetVQEData(int play_delay_ms, int rec_delay_ms) {
|
||||
play_delay_ms_ = play_delay_ms;
|
||||
rec_delay_ms_ = rec_delay_ms;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceBuffer::SetRecordedBuffer(const void* audio_buffer,
|
||||
size_t samples_per_channel) {
|
||||
return SetRecordedBuffer(audio_buffer, samples_per_channel, absl::nullopt);
|
||||
}
|
||||
|
||||
int32_t AudioDeviceBuffer::SetRecordedBuffer(
|
||||
const void* audio_buffer,
|
||||
size_t samples_per_channel,
|
||||
absl::optional<int64_t> capture_timestamp_ns) {
|
||||
// Copy the complete input buffer to the local buffer.
|
||||
const size_t old_size = rec_buffer_.size();
|
||||
rec_buffer_.SetData(static_cast<const int16_t*>(audio_buffer),
|
||||
rec_channels_ * samples_per_channel);
|
||||
// Keep track of the size of the recording buffer. Only updated when the
|
||||
// size changes, which is a rare event.
|
||||
if (old_size != rec_buffer_.size()) {
|
||||
RTC_LOG(LS_INFO) << "Size of recording buffer: " << rec_buffer_.size();
|
||||
}
|
||||
|
||||
if (capture_timestamp_ns) {
|
||||
int64_t align_offsync_estimation_time = rtc::TimeMicros();
|
||||
if (align_offsync_estimation_time -
|
||||
rtc::TimestampAligner::kMinFrameIntervalUs >
|
||||
align_offsync_estimation_time_) {
|
||||
align_offsync_estimation_time_ = align_offsync_estimation_time;
|
||||
capture_timestamp_ns_ =
|
||||
rtc::kNumNanosecsPerMicrosec *
|
||||
timestamp_aligner_.TranslateTimestamp(
|
||||
*capture_timestamp_ns / rtc::kNumNanosecsPerMicrosec,
|
||||
align_offsync_estimation_time);
|
||||
} else {
|
||||
// The Timestamp aligner is designed to prevent timestamps that are too
|
||||
// similar, and produces warnings if it is called to often. We do not care
|
||||
// about that here, so we do this workaround. If we where to call the
|
||||
// aligner within a millisecond, we instead call this, that do not update
|
||||
// the clock offset estimation. This get us timestamps without generating
|
||||
// warnings, but could generate two timestamps within a millisecond.
|
||||
capture_timestamp_ns_ =
|
||||
rtc::kNumNanosecsPerMicrosec *
|
||||
timestamp_aligner_.TranslateTimestamp(*capture_timestamp_ns /
|
||||
rtc::kNumNanosecsPerMicrosec);
|
||||
}
|
||||
}
|
||||
// Derive a new level value twice per second and check if it is non-zero.
|
||||
int16_t max_abs = 0;
|
||||
RTC_DCHECK_LT(rec_stat_count_, 50);
|
||||
if (++rec_stat_count_ >= 50) {
|
||||
// Returns the largest absolute value in a signed 16-bit vector.
|
||||
max_abs = WebRtcSpl_MaxAbsValueW16(rec_buffer_.data(), rec_buffer_.size());
|
||||
rec_stat_count_ = 0;
|
||||
// Set `only_silence_recorded_` to false as soon as at least one detection
|
||||
// of a non-zero audio packet is found. It can only be restored to true
|
||||
// again by restarting the call.
|
||||
if (max_abs > 0) {
|
||||
only_silence_recorded_ = false;
|
||||
}
|
||||
}
|
||||
// Update recording stats which is used as base for periodic logging of the
|
||||
// audio input state.
|
||||
UpdateRecStats(max_abs, samples_per_channel);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceBuffer::DeliverRecordedData() {
|
||||
if (!audio_transport_cb_) {
|
||||
RTC_LOG(LS_WARNING) << "Invalid audio transport";
|
||||
return 0;
|
||||
}
|
||||
const size_t frames = rec_buffer_.size() / rec_channels_;
|
||||
const size_t bytes_per_frame = rec_channels_ * sizeof(int16_t);
|
||||
uint32_t new_mic_level_dummy = 0;
|
||||
uint32_t total_delay_ms = play_delay_ms_ + rec_delay_ms_;
|
||||
int32_t res = audio_transport_cb_->RecordedDataIsAvailable(
|
||||
rec_buffer_.data(), frames, bytes_per_frame, rec_channels_,
|
||||
rec_sample_rate_, total_delay_ms, 0, 0, typing_status_,
|
||||
new_mic_level_dummy, capture_timestamp_ns_);
|
||||
if (res == -1) {
|
||||
RTC_LOG(LS_ERROR) << "RecordedDataIsAvailable() failed";
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceBuffer::RequestPlayoutData(size_t samples_per_channel) {
|
||||
TRACE_EVENT1("webrtc", "AudioDeviceBuffer::RequestPlayoutData",
|
||||
"samples_per_channel", samples_per_channel);
|
||||
|
||||
// The consumer can change the requested size on the fly and we therefore
|
||||
// resize the buffer accordingly. Also takes place at the first call to this
|
||||
// method.
|
||||
const size_t total_samples = play_channels_ * samples_per_channel;
|
||||
if (play_buffer_.size() != total_samples) {
|
||||
play_buffer_.SetSize(total_samples);
|
||||
RTC_LOG(LS_INFO) << "Size of playout buffer: " << play_buffer_.size();
|
||||
}
|
||||
|
||||
size_t num_samples_out(0);
|
||||
// It is currently supported to start playout without a valid audio
|
||||
// transport object. Leads to warning and silence.
|
||||
if (!audio_transport_cb_) {
|
||||
RTC_LOG(LS_WARNING) << "Invalid audio transport";
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Retrieve new 16-bit PCM audio data using the audio transport instance.
|
||||
int64_t elapsed_time_ms = -1;
|
||||
int64_t ntp_time_ms = -1;
|
||||
const size_t bytes_per_frame = play_channels_ * sizeof(int16_t);
|
||||
uint32_t res = audio_transport_cb_->NeedMorePlayData(
|
||||
samples_per_channel, bytes_per_frame, play_channels_, play_sample_rate_,
|
||||
play_buffer_.data(), num_samples_out, &elapsed_time_ms, &ntp_time_ms);
|
||||
if (res != 0) {
|
||||
RTC_LOG(LS_ERROR) << "NeedMorePlayData() failed";
|
||||
}
|
||||
|
||||
// Derive a new level value twice per second.
|
||||
int16_t max_abs = 0;
|
||||
RTC_DCHECK_LT(play_stat_count_, 50);
|
||||
if (++play_stat_count_ >= 50) {
|
||||
// Returns the largest absolute value in a signed 16-bit vector.
|
||||
max_abs =
|
||||
WebRtcSpl_MaxAbsValueW16(play_buffer_.data(), play_buffer_.size());
|
||||
play_stat_count_ = 0;
|
||||
}
|
||||
// Update playout stats which is used as base for periodic logging of the
|
||||
// audio output state.
|
||||
UpdatePlayStats(max_abs, num_samples_out / play_channels_);
|
||||
return static_cast<int32_t>(num_samples_out / play_channels_);
|
||||
}
|
||||
|
||||
int32_t AudioDeviceBuffer::GetPlayoutData(void* audio_buffer) {
|
||||
RTC_DCHECK_GT(play_buffer_.size(), 0);
|
||||
#ifdef AUDIO_DEVICE_PLAYS_SINUS_TONE
|
||||
const double phase_increment =
|
||||
k2Pi * 440.0 / static_cast<double>(play_sample_rate_);
|
||||
int16_t* destination_r = reinterpret_cast<int16_t*>(audio_buffer);
|
||||
if (play_channels_ == 1) {
|
||||
for (size_t i = 0; i < play_buffer_.size(); ++i) {
|
||||
destination_r[i] = static_cast<int16_t>((sin(phase_) * (1 << 14)));
|
||||
phase_ += phase_increment;
|
||||
}
|
||||
} else if (play_channels_ == 2) {
|
||||
for (size_t i = 0; i < play_buffer_.size() / 2; ++i) {
|
||||
destination_r[2 * i] = destination_r[2 * i + 1] =
|
||||
static_cast<int16_t>((sin(phase_) * (1 << 14)));
|
||||
phase_ += phase_increment;
|
||||
}
|
||||
}
|
||||
#else
|
||||
memcpy(audio_buffer, play_buffer_.data(),
|
||||
play_buffer_.size() * sizeof(int16_t));
|
||||
#endif
|
||||
// Return samples per channel or number of frames.
|
||||
return static_cast<int32_t>(play_buffer_.size() / play_channels_);
|
||||
}
|
||||
|
||||
void AudioDeviceBuffer::StartPeriodicLogging() {
|
||||
task_queue_.PostTask([this] { LogStats(AudioDeviceBuffer::LOG_START); });
|
||||
}
|
||||
|
||||
void AudioDeviceBuffer::StopPeriodicLogging() {
|
||||
task_queue_.PostTask([this] { LogStats(AudioDeviceBuffer::LOG_STOP); });
|
||||
}
|
||||
|
||||
void AudioDeviceBuffer::LogStats(LogState state) {
|
||||
RTC_DCHECK_RUN_ON(&task_queue_);
|
||||
int64_t now_time = rtc::TimeMillis();
|
||||
|
||||
if (state == AudioDeviceBuffer::LOG_START) {
|
||||
// Reset counters at start. We will not add any logging in this state but
|
||||
// the timer will started by posting a new (delayed) task.
|
||||
num_stat_reports_ = 0;
|
||||
last_timer_task_time_ = now_time;
|
||||
log_stats_ = true;
|
||||
} else if (state == AudioDeviceBuffer::LOG_STOP) {
|
||||
// Stop logging and posting new tasks.
|
||||
log_stats_ = false;
|
||||
} else if (state == AudioDeviceBuffer::LOG_ACTIVE) {
|
||||
// Keep logging unless logging was disabled while task was posted.
|
||||
}
|
||||
|
||||
// Avoid adding more logs since we are in STOP mode.
|
||||
if (!log_stats_) {
|
||||
return;
|
||||
}
|
||||
|
||||
int64_t next_callback_time = now_time + kTimerIntervalInMilliseconds;
|
||||
int64_t time_since_last = rtc::TimeDiff(now_time, last_timer_task_time_);
|
||||
last_timer_task_time_ = now_time;
|
||||
|
||||
Stats stats;
|
||||
{
|
||||
MutexLock lock(&lock_);
|
||||
stats = stats_;
|
||||
stats_.max_rec_level = 0;
|
||||
stats_.max_play_level = 0;
|
||||
}
|
||||
|
||||
// Cache current sample rate from atomic members.
|
||||
const uint32_t rec_sample_rate = rec_sample_rate_;
|
||||
const uint32_t play_sample_rate = play_sample_rate_;
|
||||
|
||||
// Log the latest statistics but skip the first two rounds just after state
|
||||
// was set to LOG_START to ensure that we have at least one full stable
|
||||
// 10-second interval for sample-rate estimation. Hence, first printed log
|
||||
// will be after ~20 seconds.
|
||||
if (++num_stat_reports_ > 2 &&
|
||||
static_cast<size_t>(time_since_last) > kTimerIntervalInMilliseconds / 2) {
|
||||
uint32_t diff_samples = stats.rec_samples - last_stats_.rec_samples;
|
||||
float rate = diff_samples / (static_cast<float>(time_since_last) / 1000.0);
|
||||
uint32_t abs_diff_rate_in_percent = 0;
|
||||
if (rec_sample_rate > 0 && rate > 0) {
|
||||
abs_diff_rate_in_percent = static_cast<uint32_t>(
|
||||
0.5f +
|
||||
((100.0f * std::abs(rate - rec_sample_rate)) / rec_sample_rate));
|
||||
RTC_HISTOGRAM_PERCENTAGE("WebRTC.Audio.RecordSampleRateOffsetInPercent",
|
||||
abs_diff_rate_in_percent);
|
||||
RTC_LOG(LS_INFO) << "[REC : " << time_since_last << "msec, "
|
||||
<< rec_sample_rate / 1000 << "kHz] callbacks: "
|
||||
<< stats.rec_callbacks - last_stats_.rec_callbacks
|
||||
<< ", "
|
||||
"samples: "
|
||||
<< diff_samples
|
||||
<< ", "
|
||||
"rate: "
|
||||
<< static_cast<int>(rate + 0.5)
|
||||
<< ", "
|
||||
"rate diff: "
|
||||
<< abs_diff_rate_in_percent
|
||||
<< "%, "
|
||||
"level: "
|
||||
<< stats.max_rec_level;
|
||||
}
|
||||
|
||||
diff_samples = stats.play_samples - last_stats_.play_samples;
|
||||
rate = diff_samples / (static_cast<float>(time_since_last) / 1000.0);
|
||||
abs_diff_rate_in_percent = 0;
|
||||
if (play_sample_rate > 0 && rate > 0) {
|
||||
abs_diff_rate_in_percent = static_cast<uint32_t>(
|
||||
0.5f +
|
||||
((100.0f * std::abs(rate - play_sample_rate)) / play_sample_rate));
|
||||
RTC_HISTOGRAM_PERCENTAGE("WebRTC.Audio.PlayoutSampleRateOffsetInPercent",
|
||||
abs_diff_rate_in_percent);
|
||||
RTC_LOG(LS_INFO) << "[PLAY: " << time_since_last << "msec, "
|
||||
<< play_sample_rate / 1000 << "kHz] callbacks: "
|
||||
<< stats.play_callbacks - last_stats_.play_callbacks
|
||||
<< ", "
|
||||
"samples: "
|
||||
<< diff_samples
|
||||
<< ", "
|
||||
"rate: "
|
||||
<< static_cast<int>(rate + 0.5)
|
||||
<< ", "
|
||||
"rate diff: "
|
||||
<< abs_diff_rate_in_percent
|
||||
<< "%, "
|
||||
"level: "
|
||||
<< stats.max_play_level;
|
||||
}
|
||||
}
|
||||
last_stats_ = stats;
|
||||
|
||||
int64_t time_to_wait_ms = next_callback_time - rtc::TimeMillis();
|
||||
RTC_DCHECK_GT(time_to_wait_ms, 0) << "Invalid timer interval";
|
||||
|
||||
// Keep posting new (delayed) tasks until state is changed to kLogStop.
|
||||
task_queue_.PostDelayedTask(
|
||||
[this] { AudioDeviceBuffer::LogStats(AudioDeviceBuffer::LOG_ACTIVE); },
|
||||
TimeDelta::Millis(time_to_wait_ms));
|
||||
}
|
||||
|
||||
void AudioDeviceBuffer::ResetRecStats() {
|
||||
RTC_DCHECK_RUN_ON(&task_queue_);
|
||||
last_stats_.ResetRecStats();
|
||||
MutexLock lock(&lock_);
|
||||
stats_.ResetRecStats();
|
||||
}
|
||||
|
||||
void AudioDeviceBuffer::ResetPlayStats() {
|
||||
RTC_DCHECK_RUN_ON(&task_queue_);
|
||||
last_stats_.ResetPlayStats();
|
||||
MutexLock lock(&lock_);
|
||||
stats_.ResetPlayStats();
|
||||
}
|
||||
|
||||
void AudioDeviceBuffer::UpdateRecStats(int16_t max_abs,
|
||||
size_t samples_per_channel) {
|
||||
MutexLock lock(&lock_);
|
||||
++stats_.rec_callbacks;
|
||||
stats_.rec_samples += samples_per_channel;
|
||||
if (max_abs > stats_.max_rec_level) {
|
||||
stats_.max_rec_level = max_abs;
|
||||
}
|
||||
}
|
||||
|
||||
void AudioDeviceBuffer::UpdatePlayStats(int16_t max_abs,
|
||||
size_t samples_per_channel) {
|
||||
MutexLock lock(&lock_);
|
||||
++stats_.play_callbacks;
|
||||
stats_.play_samples += samples_per_channel;
|
||||
if (max_abs > stats_.max_play_level) {
|
||||
stats_.max_play_level = max_abs;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,253 @@
|
|||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef MODULES_AUDIO_DEVICE_AUDIO_DEVICE_BUFFER_H_
|
||||
#define MODULES_AUDIO_DEVICE_AUDIO_DEVICE_BUFFER_H_
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#include <atomic>
|
||||
|
||||
#include "api/sequence_checker.h"
|
||||
#include "api/task_queue/task_queue_factory.h"
|
||||
#include "modules/audio_device/include/audio_device_defines.h"
|
||||
#include "rtc_base/buffer.h"
|
||||
#include "rtc_base/synchronization/mutex.h"
|
||||
#include "rtc_base/task_queue.h"
|
||||
#include "rtc_base/thread_annotations.h"
|
||||
#include "rtc_base/timestamp_aligner.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// Delta times between two successive playout callbacks are limited to this
|
||||
// value before added to an internal array.
|
||||
const size_t kMaxDeltaTimeInMs = 500;
|
||||
// TODO(henrika): remove when no longer used by external client.
|
||||
const size_t kMaxBufferSizeBytes = 3840; // 10ms in stereo @ 96kHz
|
||||
|
||||
class AudioDeviceBuffer {
|
||||
public:
|
||||
enum LogState {
|
||||
LOG_START = 0,
|
||||
LOG_STOP,
|
||||
LOG_ACTIVE,
|
||||
};
|
||||
|
||||
struct Stats {
|
||||
void ResetRecStats() {
|
||||
rec_callbacks = 0;
|
||||
rec_samples = 0;
|
||||
max_rec_level = 0;
|
||||
}
|
||||
|
||||
void ResetPlayStats() {
|
||||
play_callbacks = 0;
|
||||
play_samples = 0;
|
||||
max_play_level = 0;
|
||||
}
|
||||
|
||||
// Total number of recording callbacks where the source provides 10ms audio
|
||||
// data each time.
|
||||
uint64_t rec_callbacks = 0;
|
||||
|
||||
// Total number of playback callbacks where the sink asks for 10ms audio
|
||||
// data each time.
|
||||
uint64_t play_callbacks = 0;
|
||||
|
||||
// Total number of recorded audio samples.
|
||||
uint64_t rec_samples = 0;
|
||||
|
||||
// Total number of played audio samples.
|
||||
uint64_t play_samples = 0;
|
||||
|
||||
// Contains max level (max(abs(x))) of recorded audio packets over the last
|
||||
// 10 seconds where a new measurement is done twice per second. The level
|
||||
// is reset to zero at each call to LogStats().
|
||||
int16_t max_rec_level = 0;
|
||||
|
||||
// Contains max level of recorded audio packets over the last 10 seconds
|
||||
// where a new measurement is done twice per second.
|
||||
int16_t max_play_level = 0;
|
||||
};
|
||||
|
||||
// If `create_detached` is true, the created buffer can be used on another
|
||||
// thread compared to the one on which it was created. It's useful for
|
||||
// testing.
|
||||
explicit AudioDeviceBuffer(TaskQueueFactory* task_queue_factory,
|
||||
bool create_detached = false);
|
||||
virtual ~AudioDeviceBuffer();
|
||||
|
||||
int32_t RegisterAudioCallback(AudioTransport* audio_callback);
|
||||
|
||||
void StartPlayout();
|
||||
void StartRecording();
|
||||
void StopPlayout();
|
||||
void StopRecording();
|
||||
|
||||
int32_t SetRecordingSampleRate(uint32_t fsHz);
|
||||
int32_t SetPlayoutSampleRate(uint32_t fsHz);
|
||||
uint32_t RecordingSampleRate() const;
|
||||
uint32_t PlayoutSampleRate() const;
|
||||
|
||||
int32_t SetRecordingChannels(size_t channels);
|
||||
int32_t SetPlayoutChannels(size_t channels);
|
||||
size_t RecordingChannels() const;
|
||||
size_t PlayoutChannels() const;
|
||||
|
||||
// TODO(bugs.webrtc.org/13621) Deprecate this function
|
||||
virtual int32_t SetRecordedBuffer(const void* audio_buffer,
|
||||
size_t samples_per_channel);
|
||||
|
||||
virtual int32_t SetRecordedBuffer(
|
||||
const void* audio_buffer,
|
||||
size_t samples_per_channel,
|
||||
absl::optional<int64_t> capture_timestamp_ns);
|
||||
virtual void SetVQEData(int play_delay_ms, int rec_delay_ms);
|
||||
virtual int32_t DeliverRecordedData();
|
||||
uint32_t NewMicLevel() const;
|
||||
|
||||
virtual int32_t RequestPlayoutData(size_t samples_per_channel);
|
||||
virtual int32_t GetPlayoutData(void* audio_buffer);
|
||||
|
||||
int32_t SetTypingStatus(bool typing_status);
|
||||
|
||||
private:
|
||||
// Starts/stops periodic logging of audio stats.
|
||||
void StartPeriodicLogging();
|
||||
void StopPeriodicLogging();
|
||||
|
||||
// Called periodically on the internal thread created by the TaskQueue.
|
||||
// Updates some stats but dooes it on the task queue to ensure that access of
|
||||
// members is serialized hence avoiding usage of locks.
|
||||
// state = LOG_START => members are initialized and the timer starts.
|
||||
// state = LOG_STOP => no logs are printed and the timer stops.
|
||||
// state = LOG_ACTIVE => logs are printed and the timer is kept alive.
|
||||
void LogStats(LogState state);
|
||||
|
||||
// Updates counters in each play/record callback. These counters are later
|
||||
// (periodically) read by LogStats() using a lock.
|
||||
void UpdateRecStats(int16_t max_abs, size_t samples_per_channel);
|
||||
void UpdatePlayStats(int16_t max_abs, size_t samples_per_channel);
|
||||
|
||||
// Clears all members tracking stats for recording and playout.
|
||||
// These methods both run on the task queue.
|
||||
void ResetRecStats();
|
||||
void ResetPlayStats();
|
||||
|
||||
// This object lives on the main (creating) thread and most methods are
|
||||
// called on that same thread. When audio has started some methods will be
|
||||
// called on either a native audio thread for playout or a native thread for
|
||||
// recording. Some members are not annotated since they are "protected by
|
||||
// design" and adding e.g. a race checker can cause failures for very few
|
||||
// edge cases and it is IMHO not worth the risk to use them in this class.
|
||||
// TODO(henrika): see if it is possible to refactor and annotate all members.
|
||||
|
||||
// Main thread on which this object is created.
|
||||
SequenceChecker main_thread_checker_;
|
||||
|
||||
Mutex lock_;
|
||||
|
||||
// Task queue used to invoke LogStats() periodically. Tasks are executed on a
|
||||
// worker thread but it does not necessarily have to be the same thread for
|
||||
// each task.
|
||||
rtc::TaskQueue task_queue_;
|
||||
|
||||
// Raw pointer to AudioTransport instance. Supplied to RegisterAudioCallback()
|
||||
// and it must outlive this object. It is not possible to change this member
|
||||
// while any media is active. It is possible to start media without calling
|
||||
// RegisterAudioCallback() but that will lead to ignored audio callbacks in
|
||||
// both directions where native audio will be active but no audio samples will
|
||||
// be transported.
|
||||
AudioTransport* audio_transport_cb_;
|
||||
|
||||
// Sample rate in Hertz. Accessed atomically.
|
||||
std::atomic<uint32_t> rec_sample_rate_;
|
||||
std::atomic<uint32_t> play_sample_rate_;
|
||||
|
||||
// Number of audio channels. Accessed atomically.
|
||||
std::atomic<size_t> rec_channels_;
|
||||
std::atomic<size_t> play_channels_;
|
||||
|
||||
// Keeps track of if playout/recording are active or not. A combination
|
||||
// of these states are used to determine when to start and stop the timer.
|
||||
// Only used on the creating thread and not used to control any media flow.
|
||||
bool playing_ RTC_GUARDED_BY(main_thread_checker_);
|
||||
bool recording_ RTC_GUARDED_BY(main_thread_checker_);
|
||||
|
||||
// Buffer used for audio samples to be played out. Size can be changed
|
||||
// dynamically. The 16-bit samples are interleaved, hence the size is
|
||||
// proportional to the number of channels.
|
||||
rtc::BufferT<int16_t> play_buffer_;
|
||||
|
||||
// Byte buffer used for recorded audio samples. Size can be changed
|
||||
// dynamically.
|
||||
rtc::BufferT<int16_t> rec_buffer_;
|
||||
|
||||
// Contains true of a key-press has been detected.
|
||||
bool typing_status_;
|
||||
|
||||
// Delay values used by the AEC.
|
||||
int play_delay_ms_;
|
||||
int rec_delay_ms_;
|
||||
|
||||
// Capture timestamp.
|
||||
absl::optional<int64_t> capture_timestamp_ns_;
|
||||
// The last time the Timestamp Aligner was used to estimate clock offset
|
||||
// between system clock and capture time from audio.
|
||||
// This is used to prevent estimating the clock offset too often.
|
||||
absl::optional<int64_t> align_offsync_estimation_time_;
|
||||
|
||||
// Counts number of times LogStats() has been called.
|
||||
size_t num_stat_reports_ RTC_GUARDED_BY(task_queue_);
|
||||
|
||||
// Time stamp of last timer task (drives logging).
|
||||
int64_t last_timer_task_time_ RTC_GUARDED_BY(task_queue_);
|
||||
|
||||
// Counts number of audio callbacks modulo 50 to create a signal when
|
||||
// a new storage of audio stats shall be done.
|
||||
int16_t rec_stat_count_;
|
||||
int16_t play_stat_count_;
|
||||
|
||||
// Time stamps of when playout and recording starts.
|
||||
int64_t play_start_time_ RTC_GUARDED_BY(main_thread_checker_);
|
||||
int64_t rec_start_time_ RTC_GUARDED_BY(main_thread_checker_);
|
||||
|
||||
// Contains counters for playout and recording statistics.
|
||||
Stats stats_ RTC_GUARDED_BY(lock_);
|
||||
|
||||
// Stores current stats at each timer task. Used to calculate differences
|
||||
// between two successive timer events.
|
||||
Stats last_stats_ RTC_GUARDED_BY(task_queue_);
|
||||
|
||||
// Set to true at construction and modified to false as soon as one audio-
|
||||
// level estimate larger than zero is detected.
|
||||
bool only_silence_recorded_;
|
||||
|
||||
// Set to true when logging of audio stats is enabled for the first time in
|
||||
// StartPeriodicLogging() and set to false by StopPeriodicLogging().
|
||||
// Setting this member to false prevents (possiby invalid) log messages from
|
||||
// being printed in the LogStats() task.
|
||||
bool log_stats_ RTC_GUARDED_BY(task_queue_);
|
||||
|
||||
// Used for converting capture timestaps (received from AudioRecordThread
|
||||
// via AudioRecordJni::DataIsRecorded) to RTC clock.
|
||||
rtc::TimestampAligner timestamp_aligner_;
|
||||
|
||||
// Should *never* be defined in production builds. Only used for testing.
|
||||
// When defined, the output signal will be replaced by a sinus tone at 440Hz.
|
||||
#ifdef AUDIO_DEVICE_PLAYS_SINUS_TONE
|
||||
double phase_;
|
||||
#endif
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // MODULES_AUDIO_DEVICE_AUDIO_DEVICE_BUFFER_H_
|
||||
|
|
@ -0,0 +1,30 @@
|
|||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef AUDIO_DEVICE_AUDIO_DEVICE_CONFIG_H_
|
||||
#define AUDIO_DEVICE_AUDIO_DEVICE_CONFIG_H_
|
||||
|
||||
// Enumerators
|
||||
//
|
||||
enum { GET_MIC_VOLUME_INTERVAL_MS = 1000 };
|
||||
|
||||
// Platform specifics
|
||||
//
|
||||
#if defined(_WIN32)
|
||||
#if (_MSC_VER >= 1400)
|
||||
#if !defined(WEBRTC_DUMMY_FILE_DEVICES)
|
||||
// Windows Core Audio is the default audio layer in Windows.
|
||||
// Only supported for VS 2005 and higher.
|
||||
#define WEBRTC_WINDOWS_CORE_AUDIO_BUILD
|
||||
#endif
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#endif // AUDIO_DEVICE_AUDIO_DEVICE_CONFIG_H_
|
||||
|
|
@ -0,0 +1,373 @@
|
|||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "modules/audio_device/include/audio_device_data_observer.h"
|
||||
|
||||
#include "api/make_ref_counted.h"
|
||||
#include "modules/audio_device/include/audio_device_defines.h"
|
||||
#include "rtc_base/checks.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace {
|
||||
|
||||
// A wrapper over AudioDeviceModule that registers itself as AudioTransport
|
||||
// callback and redirects the PCM data to AudioDeviceDataObserver callback.
|
||||
class ADMWrapper : public AudioDeviceModule, public AudioTransport {
|
||||
public:
|
||||
ADMWrapper(rtc::scoped_refptr<AudioDeviceModule> impl,
|
||||
AudioDeviceDataObserver* legacy_observer,
|
||||
std::unique_ptr<AudioDeviceDataObserver> observer)
|
||||
: impl_(impl),
|
||||
legacy_observer_(legacy_observer),
|
||||
observer_(std::move(observer)) {
|
||||
is_valid_ = impl_.get() != nullptr;
|
||||
}
|
||||
ADMWrapper(AudioLayer audio_layer,
|
||||
TaskQueueFactory* task_queue_factory,
|
||||
AudioDeviceDataObserver* legacy_observer,
|
||||
std::unique_ptr<AudioDeviceDataObserver> observer)
|
||||
: ADMWrapper(AudioDeviceModule::Create(audio_layer, task_queue_factory),
|
||||
legacy_observer,
|
||||
std::move(observer)) {}
|
||||
~ADMWrapper() override {
|
||||
audio_transport_ = nullptr;
|
||||
observer_ = nullptr;
|
||||
}
|
||||
|
||||
// Make sure we have a valid ADM before returning it to user.
|
||||
bool IsValid() { return is_valid_; }
|
||||
|
||||
int32_t RecordedDataIsAvailable(const void* audioSamples,
|
||||
size_t nSamples,
|
||||
size_t nBytesPerSample,
|
||||
size_t nChannels,
|
||||
uint32_t samples_per_sec,
|
||||
uint32_t total_delay_ms,
|
||||
int32_t clockDrift,
|
||||
uint32_t currentMicLevel,
|
||||
bool keyPressed,
|
||||
uint32_t& newMicLevel) override {
|
||||
return RecordedDataIsAvailable(
|
||||
audioSamples, nSamples, nBytesPerSample, nChannels, samples_per_sec,
|
||||
total_delay_ms, clockDrift, currentMicLevel, keyPressed, newMicLevel,
|
||||
/*capture_timestamp_ns=*/absl::nullopt);
|
||||
}
|
||||
|
||||
// AudioTransport methods overrides.
|
||||
int32_t RecordedDataIsAvailable(
|
||||
const void* audioSamples,
|
||||
size_t nSamples,
|
||||
size_t nBytesPerSample,
|
||||
size_t nChannels,
|
||||
uint32_t samples_per_sec,
|
||||
uint32_t total_delay_ms,
|
||||
int32_t clockDrift,
|
||||
uint32_t currentMicLevel,
|
||||
bool keyPressed,
|
||||
uint32_t& newMicLevel,
|
||||
absl::optional<int64_t> capture_timestamp_ns) override {
|
||||
int32_t res = 0;
|
||||
// Capture PCM data of locally captured audio.
|
||||
if (observer_) {
|
||||
observer_->OnCaptureData(audioSamples, nSamples, nBytesPerSample,
|
||||
nChannels, samples_per_sec);
|
||||
}
|
||||
|
||||
// Send to the actual audio transport.
|
||||
if (audio_transport_) {
|
||||
res = audio_transport_->RecordedDataIsAvailable(
|
||||
audioSamples, nSamples, nBytesPerSample, nChannels, samples_per_sec,
|
||||
total_delay_ms, clockDrift, currentMicLevel, keyPressed, newMicLevel,
|
||||
capture_timestamp_ns);
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
int32_t NeedMorePlayData(const size_t nSamples,
|
||||
const size_t nBytesPerSample,
|
||||
const size_t nChannels,
|
||||
const uint32_t samples_per_sec,
|
||||
void* audioSamples,
|
||||
size_t& nSamplesOut,
|
||||
int64_t* elapsed_time_ms,
|
||||
int64_t* ntp_time_ms) override {
|
||||
int32_t res = 0;
|
||||
// Set out parameters to safe values to be sure not to return corrupted
|
||||
// data.
|
||||
nSamplesOut = 0;
|
||||
*elapsed_time_ms = -1;
|
||||
*ntp_time_ms = -1;
|
||||
// Request data from audio transport.
|
||||
if (audio_transport_) {
|
||||
res = audio_transport_->NeedMorePlayData(
|
||||
nSamples, nBytesPerSample, nChannels, samples_per_sec, audioSamples,
|
||||
nSamplesOut, elapsed_time_ms, ntp_time_ms);
|
||||
}
|
||||
|
||||
// Capture rendered data.
|
||||
if (observer_) {
|
||||
observer_->OnRenderData(audioSamples, nSamples, nBytesPerSample,
|
||||
nChannels, samples_per_sec);
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
void PullRenderData(int bits_per_sample,
|
||||
int sample_rate,
|
||||
size_t number_of_channels,
|
||||
size_t number_of_frames,
|
||||
void* audio_data,
|
||||
int64_t* elapsed_time_ms,
|
||||
int64_t* ntp_time_ms) override {
|
||||
RTC_DCHECK_NOTREACHED();
|
||||
}
|
||||
|
||||
// Override AudioDeviceModule's RegisterAudioCallback method to remember the
|
||||
// actual audio transport (e.g.: voice engine).
|
||||
int32_t RegisterAudioCallback(AudioTransport* audio_callback) override {
|
||||
// Remember the audio callback to forward PCM data
|
||||
audio_transport_ = audio_callback;
|
||||
return 0;
|
||||
}
|
||||
|
||||
// AudioDeviceModule pass through method overrides.
|
||||
int32_t ActiveAudioLayer(AudioLayer* audio_layer) const override {
|
||||
return impl_->ActiveAudioLayer(audio_layer);
|
||||
}
|
||||
int32_t Init() override {
|
||||
int res = impl_->Init();
|
||||
if (res != 0) {
|
||||
return res;
|
||||
}
|
||||
// Register self as the audio transport callback for underlying ADM impl.
|
||||
impl_->RegisterAudioCallback(this);
|
||||
return res;
|
||||
}
|
||||
int32_t Terminate() override { return impl_->Terminate(); }
|
||||
bool Initialized() const override { return impl_->Initialized(); }
|
||||
int16_t PlayoutDevices() override { return impl_->PlayoutDevices(); }
|
||||
int16_t RecordingDevices() override { return impl_->RecordingDevices(); }
|
||||
int32_t PlayoutDeviceName(uint16_t index,
|
||||
char name[kAdmMaxDeviceNameSize],
|
||||
char guid[kAdmMaxGuidSize]) override {
|
||||
return impl_->PlayoutDeviceName(index, name, guid);
|
||||
}
|
||||
int32_t RecordingDeviceName(uint16_t index,
|
||||
char name[kAdmMaxDeviceNameSize],
|
||||
char guid[kAdmMaxGuidSize]) override {
|
||||
return impl_->RecordingDeviceName(index, name, guid);
|
||||
}
|
||||
int32_t SetPlayoutDevice(uint16_t index) override {
|
||||
return impl_->SetPlayoutDevice(index);
|
||||
}
|
||||
int32_t SetPlayoutDevice(WindowsDeviceType device) override {
|
||||
return impl_->SetPlayoutDevice(device);
|
||||
}
|
||||
int32_t SetRecordingDevice(uint16_t index) override {
|
||||
return impl_->SetRecordingDevice(index);
|
||||
}
|
||||
int32_t SetRecordingDevice(WindowsDeviceType device) override {
|
||||
return impl_->SetRecordingDevice(device);
|
||||
}
|
||||
int32_t PlayoutIsAvailable(bool* available) override {
|
||||
return impl_->PlayoutIsAvailable(available);
|
||||
}
|
||||
int32_t InitPlayout() override { return impl_->InitPlayout(); }
|
||||
bool PlayoutIsInitialized() const override {
|
||||
return impl_->PlayoutIsInitialized();
|
||||
}
|
||||
int32_t RecordingIsAvailable(bool* available) override {
|
||||
return impl_->RecordingIsAvailable(available);
|
||||
}
|
||||
int32_t InitRecording() override { return impl_->InitRecording(); }
|
||||
bool RecordingIsInitialized() const override {
|
||||
return impl_->RecordingIsInitialized();
|
||||
}
|
||||
int32_t StartPlayout() override { return impl_->StartPlayout(); }
|
||||
int32_t StopPlayout() override { return impl_->StopPlayout(); }
|
||||
bool Playing() const override { return impl_->Playing(); }
|
||||
int32_t StartRecording() override { return impl_->StartRecording(); }
|
||||
int32_t StopRecording() override { return impl_->StopRecording(); }
|
||||
bool Recording() const override { return impl_->Recording(); }
|
||||
int32_t InitSpeaker() override { return impl_->InitSpeaker(); }
|
||||
bool SpeakerIsInitialized() const override {
|
||||
return impl_->SpeakerIsInitialized();
|
||||
}
|
||||
int32_t InitMicrophone() override { return impl_->InitMicrophone(); }
|
||||
bool MicrophoneIsInitialized() const override {
|
||||
return impl_->MicrophoneIsInitialized();
|
||||
}
|
||||
int32_t SpeakerVolumeIsAvailable(bool* available) override {
|
||||
return impl_->SpeakerVolumeIsAvailable(available);
|
||||
}
|
||||
int32_t SetSpeakerVolume(uint32_t volume) override {
|
||||
return impl_->SetSpeakerVolume(volume);
|
||||
}
|
||||
int32_t SpeakerVolume(uint32_t* volume) const override {
|
||||
return impl_->SpeakerVolume(volume);
|
||||
}
|
||||
int32_t MaxSpeakerVolume(uint32_t* max_volume) const override {
|
||||
return impl_->MaxSpeakerVolume(max_volume);
|
||||
}
|
||||
int32_t MinSpeakerVolume(uint32_t* min_volume) const override {
|
||||
return impl_->MinSpeakerVolume(min_volume);
|
||||
}
|
||||
int32_t MicrophoneVolumeIsAvailable(bool* available) override {
|
||||
return impl_->MicrophoneVolumeIsAvailable(available);
|
||||
}
|
||||
int32_t SetMicrophoneVolume(uint32_t volume) override {
|
||||
return impl_->SetMicrophoneVolume(volume);
|
||||
}
|
||||
int32_t MicrophoneVolume(uint32_t* volume) const override {
|
||||
return impl_->MicrophoneVolume(volume);
|
||||
}
|
||||
int32_t MaxMicrophoneVolume(uint32_t* max_volume) const override {
|
||||
return impl_->MaxMicrophoneVolume(max_volume);
|
||||
}
|
||||
int32_t MinMicrophoneVolume(uint32_t* min_volume) const override {
|
||||
return impl_->MinMicrophoneVolume(min_volume);
|
||||
}
|
||||
int32_t SpeakerMuteIsAvailable(bool* available) override {
|
||||
return impl_->SpeakerMuteIsAvailable(available);
|
||||
}
|
||||
int32_t SetSpeakerMute(bool enable) override {
|
||||
return impl_->SetSpeakerMute(enable);
|
||||
}
|
||||
int32_t SpeakerMute(bool* enabled) const override {
|
||||
return impl_->SpeakerMute(enabled);
|
||||
}
|
||||
int32_t MicrophoneMuteIsAvailable(bool* available) override {
|
||||
return impl_->MicrophoneMuteIsAvailable(available);
|
||||
}
|
||||
int32_t SetMicrophoneMute(bool enable) override {
|
||||
return impl_->SetMicrophoneMute(enable);
|
||||
}
|
||||
int32_t MicrophoneMute(bool* enabled) const override {
|
||||
return impl_->MicrophoneMute(enabled);
|
||||
}
|
||||
int32_t StereoPlayoutIsAvailable(bool* available) const override {
|
||||
return impl_->StereoPlayoutIsAvailable(available);
|
||||
}
|
||||
int32_t SetStereoPlayout(bool enable) override {
|
||||
return impl_->SetStereoPlayout(enable);
|
||||
}
|
||||
int32_t StereoPlayout(bool* enabled) const override {
|
||||
return impl_->StereoPlayout(enabled);
|
||||
}
|
||||
int32_t StereoRecordingIsAvailable(bool* available) const override {
|
||||
return impl_->StereoRecordingIsAvailable(available);
|
||||
}
|
||||
int32_t SetStereoRecording(bool enable) override {
|
||||
return impl_->SetStereoRecording(enable);
|
||||
}
|
||||
int32_t StereoRecording(bool* enabled) const override {
|
||||
return impl_->StereoRecording(enabled);
|
||||
}
|
||||
int32_t PlayoutDelay(uint16_t* delay_ms) const override {
|
||||
return impl_->PlayoutDelay(delay_ms);
|
||||
}
|
||||
bool BuiltInAECIsAvailable() const override {
|
||||
return impl_->BuiltInAECIsAvailable();
|
||||
}
|
||||
bool BuiltInAGCIsAvailable() const override {
|
||||
return impl_->BuiltInAGCIsAvailable();
|
||||
}
|
||||
bool BuiltInNSIsAvailable() const override {
|
||||
return impl_->BuiltInNSIsAvailable();
|
||||
}
|
||||
int32_t EnableBuiltInAEC(bool enable) override {
|
||||
return impl_->EnableBuiltInAEC(enable);
|
||||
}
|
||||
int32_t EnableBuiltInAGC(bool enable) override {
|
||||
return impl_->EnableBuiltInAGC(enable);
|
||||
}
|
||||
int32_t EnableBuiltInNS(bool enable) override {
|
||||
return impl_->EnableBuiltInNS(enable);
|
||||
}
|
||||
int32_t GetPlayoutUnderrunCount() const override {
|
||||
return impl_->GetPlayoutUnderrunCount();
|
||||
}
|
||||
// Only supported on iOS.
|
||||
#if defined(WEBRTC_IOS)
|
||||
int GetPlayoutAudioParameters(AudioParameters* params) const override {
|
||||
return impl_->GetPlayoutAudioParameters(params);
|
||||
}
|
||||
int GetRecordAudioParameters(AudioParameters* params) const override {
|
||||
return impl_->GetRecordAudioParameters(params);
|
||||
}
|
||||
#endif // WEBRTC_IOS
|
||||
|
||||
protected:
|
||||
rtc::scoped_refptr<AudioDeviceModule> impl_;
|
||||
AudioDeviceDataObserver* legacy_observer_ = nullptr;
|
||||
std::unique_ptr<AudioDeviceDataObserver> observer_;
|
||||
AudioTransport* audio_transport_ = nullptr;
|
||||
bool is_valid_ = false;
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
rtc::scoped_refptr<AudioDeviceModule> CreateAudioDeviceWithDataObserver(
|
||||
rtc::scoped_refptr<AudioDeviceModule> impl,
|
||||
std::unique_ptr<AudioDeviceDataObserver> observer) {
|
||||
auto audio_device = rtc::make_ref_counted<ADMWrapper>(impl, observer.get(),
|
||||
std::move(observer));
|
||||
|
||||
if (!audio_device->IsValid()) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return audio_device;
|
||||
}
|
||||
|
||||
rtc::scoped_refptr<AudioDeviceModule> CreateAudioDeviceWithDataObserver(
|
||||
rtc::scoped_refptr<AudioDeviceModule> impl,
|
||||
AudioDeviceDataObserver* legacy_observer) {
|
||||
auto audio_device =
|
||||
rtc::make_ref_counted<ADMWrapper>(impl, legacy_observer, nullptr);
|
||||
|
||||
if (!audio_device->IsValid()) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return audio_device;
|
||||
}
|
||||
|
||||
rtc::scoped_refptr<AudioDeviceModule> CreateAudioDeviceWithDataObserver(
|
||||
AudioDeviceModule::AudioLayer audio_layer,
|
||||
TaskQueueFactory* task_queue_factory,
|
||||
std::unique_ptr<AudioDeviceDataObserver> observer) {
|
||||
auto audio_device = rtc::make_ref_counted<ADMWrapper>(
|
||||
audio_layer, task_queue_factory, observer.get(), std::move(observer));
|
||||
|
||||
if (!audio_device->IsValid()) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return audio_device;
|
||||
}
|
||||
|
||||
rtc::scoped_refptr<AudioDeviceModule> CreateAudioDeviceWithDataObserver(
|
||||
AudioDeviceModule::AudioLayer audio_layer,
|
||||
TaskQueueFactory* task_queue_factory,
|
||||
AudioDeviceDataObserver* legacy_observer) {
|
||||
auto audio_device = rtc::make_ref_counted<ADMWrapper>(
|
||||
audio_layer, task_queue_factory, legacy_observer, nullptr);
|
||||
|
||||
if (!audio_device->IsValid()) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return audio_device;
|
||||
}
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,66 @@
|
|||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "modules/audio_device/audio_device_generic.h"
|
||||
|
||||
#include "rtc_base/logging.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
bool AudioDeviceGeneric::BuiltInAECIsAvailable() const {
|
||||
RTC_LOG_F(LS_ERROR) << "Not supported on this platform";
|
||||
return false;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceGeneric::EnableBuiltInAEC(bool enable) {
|
||||
RTC_LOG_F(LS_ERROR) << "Not supported on this platform";
|
||||
return -1;
|
||||
}
|
||||
|
||||
bool AudioDeviceGeneric::BuiltInAGCIsAvailable() const {
|
||||
RTC_LOG_F(LS_ERROR) << "Not supported on this platform";
|
||||
return false;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceGeneric::EnableBuiltInAGC(bool enable) {
|
||||
RTC_LOG_F(LS_ERROR) << "Not supported on this platform";
|
||||
return -1;
|
||||
}
|
||||
|
||||
bool AudioDeviceGeneric::BuiltInNSIsAvailable() const {
|
||||
RTC_LOG_F(LS_ERROR) << "Not supported on this platform";
|
||||
return false;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceGeneric::EnableBuiltInNS(bool enable) {
|
||||
RTC_LOG_F(LS_ERROR) << "Not supported on this platform";
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceGeneric::GetPlayoutUnderrunCount() const {
|
||||
RTC_LOG_F(LS_ERROR) << "Not supported on this platform";
|
||||
return -1;
|
||||
}
|
||||
|
||||
#if defined(WEBRTC_IOS)
|
||||
int AudioDeviceGeneric::GetPlayoutAudioParameters(
|
||||
AudioParameters* params) const {
|
||||
RTC_LOG_F(LS_ERROR) << "Not supported on this platform";
|
||||
return -1;
|
||||
}
|
||||
|
||||
int AudioDeviceGeneric::GetRecordAudioParameters(
|
||||
AudioParameters* params) const {
|
||||
RTC_LOG_F(LS_ERROR) << "Not supported on this platform";
|
||||
return -1;
|
||||
}
|
||||
#endif // WEBRTC_IOS
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,145 @@
|
|||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef AUDIO_DEVICE_AUDIO_DEVICE_GENERIC_H_
|
||||
#define AUDIO_DEVICE_AUDIO_DEVICE_GENERIC_H_
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include "modules/audio_device/audio_device_buffer.h"
|
||||
#include "modules/audio_device/include/audio_device.h"
|
||||
#include "modules/audio_device/include/audio_device_defines.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class AudioDeviceGeneric {
|
||||
public:
|
||||
// For use with UMA logging. Must be kept in sync with histograms.xml in
|
||||
// Chrome, located at
|
||||
// https://cs.chromium.org/chromium/src/tools/metrics/histograms/histograms.xml
|
||||
enum class InitStatus {
|
||||
OK = 0,
|
||||
PLAYOUT_ERROR = 1,
|
||||
RECORDING_ERROR = 2,
|
||||
OTHER_ERROR = 3,
|
||||
NUM_STATUSES = 4
|
||||
};
|
||||
// Retrieve the currently utilized audio layer
|
||||
virtual int32_t ActiveAudioLayer(
|
||||
AudioDeviceModule::AudioLayer& audioLayer) const = 0;
|
||||
|
||||
// Main initializaton and termination
|
||||
virtual InitStatus Init() = 0;
|
||||
virtual int32_t Terminate() = 0;
|
||||
virtual bool Initialized() const = 0;
|
||||
|
||||
// Device enumeration
|
||||
virtual int16_t PlayoutDevices() = 0;
|
||||
virtual int16_t RecordingDevices() = 0;
|
||||
virtual int32_t PlayoutDeviceName(uint16_t index,
|
||||
char name[kAdmMaxDeviceNameSize],
|
||||
char guid[kAdmMaxGuidSize]) = 0;
|
||||
virtual int32_t RecordingDeviceName(uint16_t index,
|
||||
char name[kAdmMaxDeviceNameSize],
|
||||
char guid[kAdmMaxGuidSize]) = 0;
|
||||
|
||||
// Device selection
|
||||
virtual int32_t SetPlayoutDevice(uint16_t index) = 0;
|
||||
virtual int32_t SetPlayoutDevice(
|
||||
AudioDeviceModule::WindowsDeviceType device) = 0;
|
||||
virtual int32_t SetRecordingDevice(uint16_t index) = 0;
|
||||
virtual int32_t SetRecordingDevice(
|
||||
AudioDeviceModule::WindowsDeviceType device) = 0;
|
||||
|
||||
// Audio transport initialization
|
||||
virtual int32_t PlayoutIsAvailable(bool& available) = 0;
|
||||
virtual int32_t InitPlayout() = 0;
|
||||
virtual bool PlayoutIsInitialized() const = 0;
|
||||
virtual int32_t RecordingIsAvailable(bool& available) = 0;
|
||||
virtual int32_t InitRecording() = 0;
|
||||
virtual bool RecordingIsInitialized() const = 0;
|
||||
|
||||
// Audio transport control
|
||||
virtual int32_t StartPlayout() = 0;
|
||||
virtual int32_t StopPlayout() = 0;
|
||||
virtual bool Playing() const = 0;
|
||||
virtual int32_t StartRecording() = 0;
|
||||
virtual int32_t StopRecording() = 0;
|
||||
virtual bool Recording() const = 0;
|
||||
|
||||
// Audio mixer initialization
|
||||
virtual int32_t InitSpeaker() = 0;
|
||||
virtual bool SpeakerIsInitialized() const = 0;
|
||||
virtual int32_t InitMicrophone() = 0;
|
||||
virtual bool MicrophoneIsInitialized() const = 0;
|
||||
|
||||
// Speaker volume controls
|
||||
virtual int32_t SpeakerVolumeIsAvailable(bool& available) = 0;
|
||||
virtual int32_t SetSpeakerVolume(uint32_t volume) = 0;
|
||||
virtual int32_t SpeakerVolume(uint32_t& volume) const = 0;
|
||||
virtual int32_t MaxSpeakerVolume(uint32_t& maxVolume) const = 0;
|
||||
virtual int32_t MinSpeakerVolume(uint32_t& minVolume) const = 0;
|
||||
|
||||
// Microphone volume controls
|
||||
virtual int32_t MicrophoneVolumeIsAvailable(bool& available) = 0;
|
||||
virtual int32_t SetMicrophoneVolume(uint32_t volume) = 0;
|
||||
virtual int32_t MicrophoneVolume(uint32_t& volume) const = 0;
|
||||
virtual int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const = 0;
|
||||
virtual int32_t MinMicrophoneVolume(uint32_t& minVolume) const = 0;
|
||||
|
||||
// Speaker mute control
|
||||
virtual int32_t SpeakerMuteIsAvailable(bool& available) = 0;
|
||||
virtual int32_t SetSpeakerMute(bool enable) = 0;
|
||||
virtual int32_t SpeakerMute(bool& enabled) const = 0;
|
||||
|
||||
// Microphone mute control
|
||||
virtual int32_t MicrophoneMuteIsAvailable(bool& available) = 0;
|
||||
virtual int32_t SetMicrophoneMute(bool enable) = 0;
|
||||
virtual int32_t MicrophoneMute(bool& enabled) const = 0;
|
||||
|
||||
// Stereo support
|
||||
virtual int32_t StereoPlayoutIsAvailable(bool& available) = 0;
|
||||
virtual int32_t SetStereoPlayout(bool enable) = 0;
|
||||
virtual int32_t StereoPlayout(bool& enabled) const = 0;
|
||||
virtual int32_t StereoRecordingIsAvailable(bool& available) = 0;
|
||||
virtual int32_t SetStereoRecording(bool enable) = 0;
|
||||
virtual int32_t StereoRecording(bool& enabled) const = 0;
|
||||
|
||||
// Delay information and control
|
||||
virtual int32_t PlayoutDelay(uint16_t& delayMS) const = 0;
|
||||
|
||||
// Android only
|
||||
virtual bool BuiltInAECIsAvailable() const;
|
||||
virtual bool BuiltInAGCIsAvailable() const;
|
||||
virtual bool BuiltInNSIsAvailable() const;
|
||||
|
||||
// Windows Core Audio and Android only.
|
||||
virtual int32_t EnableBuiltInAEC(bool enable);
|
||||
virtual int32_t EnableBuiltInAGC(bool enable);
|
||||
virtual int32_t EnableBuiltInNS(bool enable);
|
||||
|
||||
// Play underrun count.
|
||||
virtual int32_t GetPlayoutUnderrunCount() const;
|
||||
|
||||
// iOS only.
|
||||
// TODO(henrika): add Android support.
|
||||
#if defined(WEBRTC_IOS)
|
||||
virtual int GetPlayoutAudioParameters(AudioParameters* params) const;
|
||||
virtual int GetRecordAudioParameters(AudioParameters* params) const;
|
||||
#endif // WEBRTC_IOS
|
||||
|
||||
virtual void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) = 0;
|
||||
|
||||
virtual ~AudioDeviceGeneric() {}
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // AUDIO_DEVICE_AUDIO_DEVICE_GENERIC_H_
|
||||
|
|
@ -0,0 +1,986 @@
|
|||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "modules/audio_device/audio_device_impl.h"
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
#include "api/make_ref_counted.h"
|
||||
#include "api/scoped_refptr.h"
|
||||
#include "modules/audio_device/audio_device_config.h" // IWYU pragma: keep
|
||||
#include "modules/audio_device/audio_device_generic.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/logging.h"
|
||||
#include "system_wrappers/include/metrics.h"
|
||||
|
||||
#if defined(_WIN32)
|
||||
#if defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD)
|
||||
#include "modules/audio_device/win/audio_device_core_win.h"
|
||||
#endif
|
||||
#elif defined(WEBRTC_ANDROID)
|
||||
#include <stdlib.h>
|
||||
#if defined(WEBRTC_AUDIO_DEVICE_INCLUDE_ANDROID_AAUDIO)
|
||||
#include "modules/audio_device/android/aaudio_player.h"
|
||||
#include "modules/audio_device/android/aaudio_recorder.h"
|
||||
#endif
|
||||
#include "modules/audio_device/android/audio_device_template.h"
|
||||
#include "modules/audio_device/android/audio_manager.h"
|
||||
#include "modules/audio_device/android/audio_record_jni.h"
|
||||
#include "modules/audio_device/android/audio_screen_record_jni.h"
|
||||
#include "modules/audio_device/android/audio_merged_screen_record_jni.h"
|
||||
#include "modules/audio_device/android/audio_track_jni.h"
|
||||
#include "modules/audio_device/android/opensles_player.h"
|
||||
#include "modules/audio_device/android/opensles_recorder.h"
|
||||
#elif defined(WEBRTC_LINUX)
|
||||
#if defined(WEBRTC_ENABLE_LINUX_ALSA)
|
||||
#include "modules/audio_device/linux/audio_device_alsa_linux.h"
|
||||
#endif
|
||||
#if defined(WEBRTC_ENABLE_LINUX_PULSE)
|
||||
#include "modules/audio_device/linux/audio_device_pulse_linux.h"
|
||||
#endif
|
||||
#elif defined(WEBRTC_IOS)
|
||||
#include "sdk/objc/native/src/audio/audio_device_ios.h"
|
||||
#elif defined(WEBRTC_MAC)
|
||||
#include "modules/audio_device/mac/audio_device_mac.h"
|
||||
#endif
|
||||
#if defined(WEBRTC_DUMMY_FILE_DEVICES)
|
||||
#include "modules/audio_device/dummy/file_audio_device.h"
|
||||
#include "modules/audio_device/dummy/file_audio_device_factory.h"
|
||||
#endif
|
||||
#include "modules/audio_device/dummy/audio_device_dummy.h"
|
||||
|
||||
#define CHECKinitialized_() \
|
||||
{ \
|
||||
if (!initialized_) { \
|
||||
return -1; \
|
||||
} \
|
||||
}
|
||||
|
||||
#define CHECKinitialized__BOOL() \
|
||||
{ \
|
||||
if (!initialized_) { \
|
||||
return false; \
|
||||
} \
|
||||
}
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
rtc::scoped_refptr<AudioDeviceModule> AudioDeviceModule::Create(
|
||||
AudioLayer audio_layer,
|
||||
TaskQueueFactory* task_queue_factory) {
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__;
|
||||
return AudioDeviceModule::CreateForTest(audio_layer, task_queue_factory);
|
||||
}
|
||||
|
||||
// static
|
||||
rtc::scoped_refptr<AudioDeviceModuleForTest> AudioDeviceModule::CreateForTest(
|
||||
AudioLayer audio_layer,
|
||||
TaskQueueFactory* task_queue_factory) {
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__;
|
||||
|
||||
// The "AudioDeviceModule::kWindowsCoreAudio2" audio layer has its own
|
||||
// dedicated factory method which should be used instead.
|
||||
if (audio_layer == AudioDeviceModule::kWindowsCoreAudio2) {
|
||||
RTC_LOG(LS_ERROR) << "Use the CreateWindowsCoreAudioAudioDeviceModule() "
|
||||
"factory method instead for this option.";
|
||||
return nullptr;
|
||||
} else if (audio_layer == AudioDeviceModule::kAndroidJavaAudio ||
|
||||
audio_layer == AudioDeviceModule::kAndroidOpenSLESAudio ||
|
||||
audio_layer == AudioDeviceModule::kAndroidJavaInputAndOpenSLESOutputAudio ||
|
||||
audio_layer == kAndroidAAudioAudio ||
|
||||
audio_layer == kAndroidJavaInputAndAAudioOutputAudio) {
|
||||
RTC_LOG(LS_ERROR) << "Use the CreateAndroidAudioDeviceModule() "
|
||||
"factory method instead for this option.";
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Create the generic reference counted (platform independent) implementation.
|
||||
auto audio_device = rtc::make_ref_counted<AudioDeviceModuleImpl>(
|
||||
audio_layer, task_queue_factory);
|
||||
|
||||
// Ensure that the current platform is supported.
|
||||
if (audio_device->CheckPlatform() == -1) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Create the platform-dependent implementation.
|
||||
if (audio_device->CreatePlatformSpecificObjects() == -1) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Ensure that the generic audio buffer can communicate with the platform
|
||||
// specific parts.
|
||||
if (audio_device->AttachAudioBuffer() == -1) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return audio_device;
|
||||
}
|
||||
|
||||
AudioDeviceModuleImpl::AudioDeviceModuleImpl(
|
||||
AudioLayer audio_layer,
|
||||
TaskQueueFactory* task_queue_factory)
|
||||
: audio_layer_(audio_layer), audio_device_buffer_(task_queue_factory) {
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__;
|
||||
}
|
||||
|
||||
AudioDeviceModuleImpl::AudioDeviceModuleImpl(
|
||||
AudioLayer audio_layer,
|
||||
std::unique_ptr<AudioDeviceGeneric> audio_device,
|
||||
TaskQueueFactory* task_queue_factory,
|
||||
bool create_detached)
|
||||
: audio_layer_(audio_layer),
|
||||
audio_device_buffer_(task_queue_factory, create_detached),
|
||||
audio_device_(std::move(audio_device)) {
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceModuleImpl::CheckPlatform() {
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__;
|
||||
// Ensure that the current platform is supported
|
||||
PlatformType platform(kPlatformNotSupported);
|
||||
#if defined(_WIN32)
|
||||
platform = kPlatformWin32;
|
||||
RTC_LOG(LS_INFO) << "current platform is Win32";
|
||||
#elif defined(WEBRTC_ANDROID)
|
||||
platform = kPlatformAndroid;
|
||||
RTC_LOG(LS_INFO) << "current platform is Android";
|
||||
#elif defined(WEBRTC_LINUX)
|
||||
platform = kPlatformLinux;
|
||||
RTC_LOG(LS_INFO) << "current platform is Linux";
|
||||
#elif defined(WEBRTC_IOS)
|
||||
platform = kPlatformIOS;
|
||||
RTC_LOG(LS_INFO) << "current platform is IOS";
|
||||
#elif defined(WEBRTC_MAC)
|
||||
platform = kPlatformMac;
|
||||
RTC_LOG(LS_INFO) << "current platform is Mac";
|
||||
#elif defined(WEBRTC_FUCHSIA)
|
||||
platform = kPlatformFuchsia;
|
||||
RTC_LOG(LS_INFO) << "current platform is Fuchsia";
|
||||
#endif
|
||||
if (platform == kPlatformNotSupported) {
|
||||
RTC_LOG(LS_ERROR)
|
||||
<< "current platform is not supported => this module will self "
|
||||
"destruct!";
|
||||
return -1;
|
||||
}
|
||||
platform_type_ = platform;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceModuleImpl::CreatePlatformSpecificObjects() {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__;
|
||||
if (audio_device_ != nullptr) {
|
||||
RTC_LOG(LS_INFO) << "Reusing provided audio device";
|
||||
return 0;
|
||||
}
|
||||
// Dummy ADM implementations if build flags are set.
|
||||
#if defined(WEBRTC_DUMMY_AUDIO_BUILD)
|
||||
audio_device_.reset(new AudioDeviceDummy());
|
||||
RTC_LOG(LS_INFO) << "Dummy Audio APIs will be utilized";
|
||||
#elif defined(WEBRTC_DUMMY_FILE_DEVICES)
|
||||
audio_device_.reset(FileAudioDeviceFactory::CreateFileAudioDevice());
|
||||
if (audio_device_) {
|
||||
RTC_LOG(LS_INFO) << "Will use file-playing dummy device.";
|
||||
} else {
|
||||
// Create a dummy device instead.
|
||||
audio_device_.reset(new AudioDeviceDummy());
|
||||
RTC_LOG(LS_INFO) << "Dummy Audio APIs will be utilized";
|
||||
}
|
||||
|
||||
// Real (non-dummy) ADM implementations.
|
||||
#else
|
||||
AudioLayer audio_layer(PlatformAudioLayer());
|
||||
// Windows ADM implementation.
|
||||
#if defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD)
|
||||
if ((audio_layer == kWindowsCoreAudio) ||
|
||||
(audio_layer == kPlatformDefaultAudio)) {
|
||||
RTC_LOG(LS_INFO) << "Attempting to use the Windows Core Audio APIs...";
|
||||
if (AudioDeviceWindowsCore::CoreAudioIsSupported()) {
|
||||
audio_device_.reset(new AudioDeviceWindowsCore());
|
||||
RTC_LOG(LS_INFO) << "Windows Core Audio APIs will be utilized";
|
||||
}
|
||||
}
|
||||
#endif // defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD)
|
||||
|
||||
#if defined(WEBRTC_ANDROID)
|
||||
// Create an Android audio manager.
|
||||
audio_manager_android_.reset(new AudioManager());
|
||||
// Select best possible combination of audio layers.
|
||||
if (audio_layer == kPlatformDefaultAudio) {
|
||||
if (audio_manager_android_->IsAAudioSupported()) {
|
||||
// Use of AAudio for both playout and recording has highest priority.
|
||||
audio_layer = kAndroidAAudioAudio;
|
||||
} else if (audio_manager_android_->IsLowLatencyPlayoutSupported() &&
|
||||
audio_manager_android_->IsLowLatencyRecordSupported()) {
|
||||
// Use OpenSL ES for both playout and recording.
|
||||
audio_layer = kAndroidOpenSLESAudio;
|
||||
} else if (audio_manager_android_->IsLowLatencyPlayoutSupported() &&
|
||||
!audio_manager_android_->IsLowLatencyRecordSupported()) {
|
||||
// Use OpenSL ES for output on devices that only supports the
|
||||
// low-latency output audio path.
|
||||
audio_layer = kAndroidJavaInputAndOpenSLESOutputAudio;
|
||||
} else {
|
||||
// Use Java-based audio in both directions when low-latency output is
|
||||
// not supported.
|
||||
audio_layer = kAndroidJavaAudio;
|
||||
}
|
||||
}
|
||||
AudioManager* audio_manager = audio_manager_android_.get();
|
||||
if (audio_layer == kAndroidJavaAudio) {
|
||||
// Java audio for both input and output audio.
|
||||
audio_device_.reset(new AudioDeviceTemplate<AudioRecordJni, AudioTrackJni>(
|
||||
audio_layer, audio_manager));
|
||||
} else if (audio_layer == kAndroidScreenAudio) {
|
||||
// Java audio for both input and output audio.
|
||||
audio_device_.reset(new AudioDeviceTemplate<AudioScreenRecordJni, AudioTrackJni>(
|
||||
audio_layer, audio_manager));
|
||||
} else if (audio_layer == kAndroidMergedScreenAudio) {
|
||||
// Java audio for both input and output audio.
|
||||
audio_device_.reset(new AudioDeviceTemplate<AudioMergedScreenRecordJni, AudioTrackJni>(
|
||||
audio_layer, audio_manager));
|
||||
} else if (audio_layer == kAndroidOpenSLESAudio) {
|
||||
// OpenSL ES based audio for both input and output audio.
|
||||
audio_device_.reset(
|
||||
new AudioDeviceTemplate<OpenSLESRecorder, OpenSLESPlayer>(
|
||||
audio_layer, audio_manager));
|
||||
} else if (audio_layer == kAndroidJavaInputAndOpenSLESOutputAudio) {
|
||||
// Java audio for input and OpenSL ES for output audio (i.e. mixed APIs).
|
||||
// This combination provides low-latency output audio and at the same
|
||||
// time support for HW AEC using the AudioRecord Java API.
|
||||
audio_device_.reset(new AudioDeviceTemplate<AudioRecordJni, OpenSLESPlayer>(
|
||||
audio_layer, audio_manager));
|
||||
} else if (audio_layer == kAndroidAAudioAudio) {
|
||||
#if defined(WEBRTC_AUDIO_DEVICE_INCLUDE_ANDROID_AAUDIO)
|
||||
// AAudio based audio for both input and output.
|
||||
audio_device_.reset(new AudioDeviceTemplate<AAudioRecorder, AAudioPlayer>(
|
||||
audio_layer, audio_manager));
|
||||
#endif
|
||||
} else if (audio_layer == kAndroidJavaInputAndAAudioOutputAudio) {
|
||||
#if defined(WEBRTC_AUDIO_DEVICE_INCLUDE_ANDROID_AAUDIO)
|
||||
// Java audio for input and AAudio for output audio (i.e. mixed APIs).
|
||||
audio_device_.reset(new AudioDeviceTemplate<AudioRecordJni, AAudioPlayer>(
|
||||
audio_layer, audio_manager));
|
||||
#endif
|
||||
} else {
|
||||
RTC_LOG(LS_ERROR) << "The requested audio layer is not supported";
|
||||
audio_device_.reset(nullptr);
|
||||
}
|
||||
#endif
|
||||
// END #if defined(WEBRTC_ANDROID)
|
||||
|
||||
// Linux ADM implementation.
|
||||
// Note that, WEBRTC_ENABLE_LINUX_ALSA is always defined by default when
|
||||
// WEBRTC_LINUX is defined. WEBRTC_ENABLE_LINUX_PULSE depends on the
|
||||
// 'rtc_include_pulse_audio' build flag.
|
||||
// TODO(bugs.webrtc.org/9127): improve support and make it more clear that
|
||||
// PulseAudio is the default selection.
|
||||
#if !defined(WEBRTC_ANDROID) && defined(WEBRTC_LINUX)
|
||||
#if !defined(WEBRTC_ENABLE_LINUX_PULSE)
|
||||
// Build flag 'rtc_include_pulse_audio' is set to false. In this mode:
|
||||
// - kPlatformDefaultAudio => ALSA, and
|
||||
// - kLinuxAlsaAudio => ALSA, and
|
||||
// - kLinuxPulseAudio => Invalid selection.
|
||||
RTC_LOG(LS_WARNING) << "PulseAudio is disabled using build flag.";
|
||||
if ((audio_layer == kLinuxAlsaAudio) ||
|
||||
(audio_layer == kPlatformDefaultAudio)) {
|
||||
audio_device_.reset(new AudioDeviceLinuxALSA());
|
||||
RTC_LOG(LS_INFO) << "Linux ALSA APIs will be utilized.";
|
||||
}
|
||||
#else
|
||||
// Build flag 'rtc_include_pulse_audio' is set to true (default). In this
|
||||
// mode:
|
||||
// - kPlatformDefaultAudio => PulseAudio, and
|
||||
// - kLinuxPulseAudio => PulseAudio, and
|
||||
// - kLinuxAlsaAudio => ALSA (supported but not default).
|
||||
RTC_LOG(LS_INFO) << "PulseAudio support is enabled.";
|
||||
if ((audio_layer == kLinuxPulseAudio) ||
|
||||
(audio_layer == kPlatformDefaultAudio)) {
|
||||
// Linux PulseAudio implementation is default.
|
||||
audio_device_.reset(new AudioDeviceLinuxPulse());
|
||||
RTC_LOG(LS_INFO) << "Linux PulseAudio APIs will be utilized";
|
||||
} else if (audio_layer == kLinuxAlsaAudio) {
|
||||
audio_device_.reset(new AudioDeviceLinuxALSA());
|
||||
RTC_LOG(LS_WARNING) << "Linux ALSA APIs will be utilized.";
|
||||
}
|
||||
#endif // #if !defined(WEBRTC_ENABLE_LINUX_PULSE)
|
||||
#endif // #if defined(WEBRTC_LINUX)
|
||||
|
||||
// iOS ADM implementation.
|
||||
#if defined(WEBRTC_IOS)
|
||||
if (audio_layer == kPlatformDefaultAudio) {
|
||||
audio_device_.reset(
|
||||
new ios_adm::AudioDeviceIOS(/*bypass_voice_processing=*/false));
|
||||
RTC_LOG(LS_INFO) << "iPhone Audio APIs will be utilized.";
|
||||
}
|
||||
// END #if defined(WEBRTC_IOS)
|
||||
|
||||
// Mac OS X ADM implementation.
|
||||
#elif defined(WEBRTC_MAC)
|
||||
if (audio_layer == kPlatformDefaultAudio) {
|
||||
audio_device_.reset(new AudioDeviceMac());
|
||||
RTC_LOG(LS_INFO) << "Mac OS X Audio APIs will be utilized.";
|
||||
}
|
||||
#endif // WEBRTC_MAC
|
||||
|
||||
// Dummy ADM implementation.
|
||||
if (audio_layer == kDummyAudio) {
|
||||
audio_device_.reset(new AudioDeviceDummy());
|
||||
RTC_LOG(LS_INFO) << "Dummy Audio APIs will be utilized.";
|
||||
}
|
||||
#endif // if defined(WEBRTC_DUMMY_AUDIO_BUILD)
|
||||
|
||||
if (!audio_device_) {
|
||||
RTC_LOG(LS_ERROR)
|
||||
<< "Failed to create the platform specific ADM implementation.";
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceModuleImpl::AttachAudioBuffer() {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__;
|
||||
audio_device_->AttachAudioBuffer(&audio_device_buffer_);
|
||||
return 0;
|
||||
}
|
||||
|
||||
AudioDeviceModuleImpl::~AudioDeviceModuleImpl() {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceModuleImpl::ActiveAudioLayer(AudioLayer* audioLayer) const {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__;
|
||||
AudioLayer activeAudio;
|
||||
if (audio_device_->ActiveAudioLayer(activeAudio) == -1) {
|
||||
return -1;
|
||||
}
|
||||
*audioLayer = activeAudio;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceModuleImpl::Init() {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__;
|
||||
if (initialized_)
|
||||
return 0;
|
||||
RTC_CHECK(audio_device_);
|
||||
AudioDeviceGeneric::InitStatus status = audio_device_->Init();
|
||||
RTC_HISTOGRAM_ENUMERATION(
|
||||
"WebRTC.Audio.InitializationResult", static_cast<int>(status),
|
||||
static_cast<int>(AudioDeviceGeneric::InitStatus::NUM_STATUSES));
|
||||
if (status != AudioDeviceGeneric::InitStatus::OK) {
|
||||
RTC_LOG(LS_ERROR) << "Audio device initialization failed.";
|
||||
return -1;
|
||||
}
|
||||
initialized_ = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceModuleImpl::Terminate() {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__;
|
||||
if (!initialized_)
|
||||
return 0;
|
||||
if (audio_device_->Terminate() == -1) {
|
||||
return -1;
|
||||
}
|
||||
initialized_ = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool AudioDeviceModuleImpl::Initialized() const {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__ << ": " << initialized_;
|
||||
return initialized_;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceModuleImpl::InitSpeaker() {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__;
|
||||
CHECKinitialized_();
|
||||
return audio_device_->InitSpeaker();
|
||||
}
|
||||
|
||||
int32_t AudioDeviceModuleImpl::InitMicrophone() {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__;
|
||||
CHECKinitialized_();
|
||||
return audio_device_->InitMicrophone();
|
||||
}
|
||||
|
||||
int32_t AudioDeviceModuleImpl::SpeakerVolumeIsAvailable(bool* available) {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__;
|
||||
CHECKinitialized_();
|
||||
bool isAvailable = false;
|
||||
if (audio_device_->SpeakerVolumeIsAvailable(isAvailable) == -1) {
|
||||
return -1;
|
||||
}
|
||||
*available = isAvailable;
|
||||
RTC_LOG(LS_INFO) << "output: " << isAvailable;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceModuleImpl::SetSpeakerVolume(uint32_t volume) {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << volume << ")";
|
||||
CHECKinitialized_();
|
||||
return audio_device_->SetSpeakerVolume(volume);
|
||||
}
|
||||
|
||||
int32_t AudioDeviceModuleImpl::SpeakerVolume(uint32_t* volume) const {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__;
|
||||
CHECKinitialized_();
|
||||
uint32_t level = 0;
|
||||
if (audio_device_->SpeakerVolume(level) == -1) {
|
||||
return -1;
|
||||
}
|
||||
*volume = level;
|
||||
RTC_LOG(LS_INFO) << "output: " << *volume;
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool AudioDeviceModuleImpl::SpeakerIsInitialized() const {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__;
|
||||
CHECKinitialized__BOOL();
|
||||
bool isInitialized = audio_device_->SpeakerIsInitialized();
|
||||
RTC_LOG(LS_INFO) << "output: " << isInitialized;
|
||||
return isInitialized;
|
||||
}
|
||||
|
||||
bool AudioDeviceModuleImpl::MicrophoneIsInitialized() const {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__;
|
||||
CHECKinitialized__BOOL();
|
||||
bool isInitialized = audio_device_->MicrophoneIsInitialized();
|
||||
RTC_LOG(LS_INFO) << "output: " << isInitialized;
|
||||
return isInitialized;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceModuleImpl::MaxSpeakerVolume(uint32_t* maxVolume) const {
|
||||
CHECKinitialized_();
|
||||
uint32_t maxVol = 0;
|
||||
if (audio_device_->MaxSpeakerVolume(maxVol) == -1) {
|
||||
return -1;
|
||||
}
|
||||
*maxVolume = maxVol;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceModuleImpl::MinSpeakerVolume(uint32_t* minVolume) const {
|
||||
CHECKinitialized_();
|
||||
uint32_t minVol = 0;
|
||||
if (audio_device_->MinSpeakerVolume(minVol) == -1) {
|
||||
return -1;
|
||||
}
|
||||
*minVolume = minVol;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceModuleImpl::SpeakerMuteIsAvailable(bool* available) {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__;
|
||||
CHECKinitialized_();
|
||||
bool isAvailable = false;
|
||||
if (audio_device_->SpeakerMuteIsAvailable(isAvailable) == -1) {
|
||||
return -1;
|
||||
}
|
||||
*available = isAvailable;
|
||||
RTC_LOG(LS_INFO) << "output: " << isAvailable;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceModuleImpl::SetSpeakerMute(bool enable) {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")";
|
||||
CHECKinitialized_();
|
||||
return audio_device_->SetSpeakerMute(enable);
|
||||
}
|
||||
|
||||
int32_t AudioDeviceModuleImpl::SpeakerMute(bool* enabled) const {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__;
|
||||
CHECKinitialized_();
|
||||
bool muted = false;
|
||||
if (audio_device_->SpeakerMute(muted) == -1) {
|
||||
return -1;
|
||||
}
|
||||
*enabled = muted;
|
||||
RTC_LOG(LS_INFO) << "output: " << muted;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceModuleImpl::MicrophoneMuteIsAvailable(bool* available) {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__;
|
||||
CHECKinitialized_();
|
||||
bool isAvailable = false;
|
||||
if (audio_device_->MicrophoneMuteIsAvailable(isAvailable) == -1) {
|
||||
return -1;
|
||||
}
|
||||
*available = isAvailable;
|
||||
RTC_LOG(LS_INFO) << "output: " << isAvailable;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceModuleImpl::SetMicrophoneMute(bool enable) {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")";
|
||||
CHECKinitialized_();
|
||||
return (audio_device_->SetMicrophoneMute(enable));
|
||||
}
|
||||
|
||||
int32_t AudioDeviceModuleImpl::MicrophoneMute(bool* enabled) const {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__;
|
||||
CHECKinitialized_();
|
||||
bool muted = false;
|
||||
if (audio_device_->MicrophoneMute(muted) == -1) {
|
||||
return -1;
|
||||
}
|
||||
*enabled = muted;
|
||||
RTC_LOG(LS_INFO) << "output: " << muted;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceModuleImpl::MicrophoneVolumeIsAvailable(bool* available) {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__;
|
||||
CHECKinitialized_();
|
||||
bool isAvailable = false;
|
||||
if (audio_device_->MicrophoneVolumeIsAvailable(isAvailable) == -1) {
|
||||
return -1;
|
||||
}
|
||||
*available = isAvailable;
|
||||
RTC_LOG(LS_INFO) << "output: " << isAvailable;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceModuleImpl::SetMicrophoneVolume(uint32_t volume) {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << volume << ")";
|
||||
CHECKinitialized_();
|
||||
return (audio_device_->SetMicrophoneVolume(volume));
|
||||
}
|
||||
|
||||
int32_t AudioDeviceModuleImpl::MicrophoneVolume(uint32_t* volume) const {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__;
|
||||
CHECKinitialized_();
|
||||
uint32_t level = 0;
|
||||
if (audio_device_->MicrophoneVolume(level) == -1) {
|
||||
return -1;
|
||||
}
|
||||
*volume = level;
|
||||
RTC_LOG(LS_INFO) << "output: " << *volume;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceModuleImpl::StereoRecordingIsAvailable(
|
||||
bool* available) const {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__;
|
||||
CHECKinitialized_();
|
||||
bool isAvailable = false;
|
||||
if (audio_device_->StereoRecordingIsAvailable(isAvailable) == -1) {
|
||||
return -1;
|
||||
}
|
||||
*available = isAvailable;
|
||||
RTC_LOG(LS_INFO) << "output: " << isAvailable;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceModuleImpl::SetStereoRecording(bool enable) {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")";
|
||||
CHECKinitialized_();
|
||||
if (audio_device_->RecordingIsInitialized()) {
|
||||
RTC_LOG(LS_ERROR)
|
||||
<< "unable to set stereo mode after recording is initialized";
|
||||
return -1;
|
||||
}
|
||||
if (audio_device_->SetStereoRecording(enable) == -1) {
|
||||
if (enable) {
|
||||
RTC_LOG(LS_WARNING) << "failed to enable stereo recording";
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
int8_t nChannels(1);
|
||||
if (enable) {
|
||||
nChannels = 2;
|
||||
}
|
||||
audio_device_buffer_.SetRecordingChannels(nChannels);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceModuleImpl::StereoRecording(bool* enabled) const {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__;
|
||||
CHECKinitialized_();
|
||||
bool stereo = false;
|
||||
if (audio_device_->StereoRecording(stereo) == -1) {
|
||||
return -1;
|
||||
}
|
||||
*enabled = stereo;
|
||||
RTC_LOG(LS_INFO) << "output: " << stereo;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceModuleImpl::StereoPlayoutIsAvailable(bool* available) const {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__;
|
||||
CHECKinitialized_();
|
||||
bool isAvailable = false;
|
||||
if (audio_device_->StereoPlayoutIsAvailable(isAvailable) == -1) {
|
||||
return -1;
|
||||
}
|
||||
*available = isAvailable;
|
||||
RTC_LOG(LS_INFO) << "output: " << isAvailable;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceModuleImpl::SetStereoPlayout(bool enable) {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")";
|
||||
CHECKinitialized_();
|
||||
if (audio_device_->PlayoutIsInitialized()) {
|
||||
RTC_LOG(LS_ERROR)
|
||||
<< "unable to set stereo mode while playing side is initialized";
|
||||
return -1;
|
||||
}
|
||||
if (audio_device_->SetStereoPlayout(enable)) {
|
||||
RTC_LOG(LS_WARNING) << "stereo playout is not supported";
|
||||
return -1;
|
||||
}
|
||||
int8_t nChannels(1);
|
||||
if (enable) {
|
||||
nChannels = 2;
|
||||
}
|
||||
audio_device_buffer_.SetPlayoutChannels(nChannels);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceModuleImpl::StereoPlayout(bool* enabled) const {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__;
|
||||
CHECKinitialized_();
|
||||
bool stereo = false;
|
||||
if (audio_device_->StereoPlayout(stereo) == -1) {
|
||||
return -1;
|
||||
}
|
||||
*enabled = stereo;
|
||||
RTC_LOG(LS_INFO) << "output: " << stereo;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceModuleImpl::PlayoutIsAvailable(bool* available) {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__;
|
||||
CHECKinitialized_();
|
||||
bool isAvailable = false;
|
||||
if (audio_device_->PlayoutIsAvailable(isAvailable) == -1) {
|
||||
return -1;
|
||||
}
|
||||
*available = isAvailable;
|
||||
RTC_LOG(LS_INFO) << "output: " << isAvailable;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceModuleImpl::RecordingIsAvailable(bool* available) {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__;
|
||||
CHECKinitialized_();
|
||||
bool isAvailable = false;
|
||||
if (audio_device_->RecordingIsAvailable(isAvailable) == -1) {
|
||||
return -1;
|
||||
}
|
||||
*available = isAvailable;
|
||||
RTC_LOG(LS_INFO) << "output: " << isAvailable;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceModuleImpl::MaxMicrophoneVolume(uint32_t* maxVolume) const {
|
||||
CHECKinitialized_();
|
||||
uint32_t maxVol(0);
|
||||
if (audio_device_->MaxMicrophoneVolume(maxVol) == -1) {
|
||||
return -1;
|
||||
}
|
||||
*maxVolume = maxVol;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceModuleImpl::MinMicrophoneVolume(uint32_t* minVolume) const {
|
||||
CHECKinitialized_();
|
||||
uint32_t minVol(0);
|
||||
if (audio_device_->MinMicrophoneVolume(minVol) == -1) {
|
||||
return -1;
|
||||
}
|
||||
*minVolume = minVol;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int16_t AudioDeviceModuleImpl::PlayoutDevices() {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__;
|
||||
CHECKinitialized_();
|
||||
uint16_t nPlayoutDevices = audio_device_->PlayoutDevices();
|
||||
RTC_LOG(LS_INFO) << "output: " << nPlayoutDevices;
|
||||
return (int16_t)(nPlayoutDevices);
|
||||
}
|
||||
|
||||
int32_t AudioDeviceModuleImpl::SetPlayoutDevice(uint16_t index) {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << index << ")";
|
||||
CHECKinitialized_();
|
||||
return audio_device_->SetPlayoutDevice(index);
|
||||
}
|
||||
|
||||
int32_t AudioDeviceModuleImpl::SetPlayoutDevice(WindowsDeviceType device) {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__;
|
||||
CHECKinitialized_();
|
||||
return audio_device_->SetPlayoutDevice(device);
|
||||
}
|
||||
|
||||
int32_t AudioDeviceModuleImpl::PlayoutDeviceName(
|
||||
uint16_t index,
|
||||
char name[kAdmMaxDeviceNameSize],
|
||||
char guid[kAdmMaxGuidSize]) {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << index << ", ...)";
|
||||
CHECKinitialized_();
|
||||
if (name == NULL) {
|
||||
return -1;
|
||||
}
|
||||
if (audio_device_->PlayoutDeviceName(index, name, guid) == -1) {
|
||||
return -1;
|
||||
}
|
||||
if (name != NULL) {
|
||||
RTC_LOG(LS_INFO) << "output: name = " << name;
|
||||
}
|
||||
if (guid != NULL) {
|
||||
RTC_LOG(LS_INFO) << "output: guid = " << guid;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceModuleImpl::RecordingDeviceName(
|
||||
uint16_t index,
|
||||
char name[kAdmMaxDeviceNameSize],
|
||||
char guid[kAdmMaxGuidSize]) {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << index << ", ...)";
|
||||
CHECKinitialized_();
|
||||
if (name == NULL) {
|
||||
return -1;
|
||||
}
|
||||
if (audio_device_->RecordingDeviceName(index, name, guid) == -1) {
|
||||
return -1;
|
||||
}
|
||||
if (name != NULL) {
|
||||
RTC_LOG(LS_INFO) << "output: name = " << name;
|
||||
}
|
||||
if (guid != NULL) {
|
||||
RTC_LOG(LS_INFO) << "output: guid = " << guid;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int16_t AudioDeviceModuleImpl::RecordingDevices() {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__;
|
||||
CHECKinitialized_();
|
||||
uint16_t nRecordingDevices = audio_device_->RecordingDevices();
|
||||
RTC_LOG(LS_INFO) << "output: " << nRecordingDevices;
|
||||
return (int16_t)nRecordingDevices;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceModuleImpl::SetRecordingDevice(uint16_t index) {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << index << ")";
|
||||
CHECKinitialized_();
|
||||
return audio_device_->SetRecordingDevice(index);
|
||||
}
|
||||
|
||||
int32_t AudioDeviceModuleImpl::SetRecordingDevice(WindowsDeviceType device) {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__;
|
||||
CHECKinitialized_();
|
||||
return audio_device_->SetRecordingDevice(device);
|
||||
}
|
||||
|
||||
int32_t AudioDeviceModuleImpl::InitPlayout() {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__;
|
||||
CHECKinitialized_();
|
||||
if (PlayoutIsInitialized()) {
|
||||
return 0;
|
||||
}
|
||||
int32_t result = audio_device_->InitPlayout();
|
||||
RTC_LOG(LS_INFO) << "output: " << result;
|
||||
RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.InitPlayoutSuccess",
|
||||
static_cast<int>(result == 0));
|
||||
return result;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceModuleImpl::InitRecording() {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__;
|
||||
CHECKinitialized_();
|
||||
if (RecordingIsInitialized()) {
|
||||
return 0;
|
||||
}
|
||||
int32_t result = audio_device_->InitRecording();
|
||||
RTC_LOG(LS_INFO) << "output: " << result;
|
||||
RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.InitRecordingSuccess",
|
||||
static_cast<int>(result == 0));
|
||||
return result;
|
||||
}
|
||||
|
||||
bool AudioDeviceModuleImpl::PlayoutIsInitialized() const {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__;
|
||||
CHECKinitialized__BOOL();
|
||||
return audio_device_->PlayoutIsInitialized();
|
||||
}
|
||||
|
||||
bool AudioDeviceModuleImpl::RecordingIsInitialized() const {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__;
|
||||
CHECKinitialized__BOOL();
|
||||
return audio_device_->RecordingIsInitialized();
|
||||
}
|
||||
|
||||
int32_t AudioDeviceModuleImpl::StartPlayout() {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__;
|
||||
CHECKinitialized_();
|
||||
if (Playing()) {
|
||||
return 0;
|
||||
}
|
||||
audio_device_buffer_.StartPlayout();
|
||||
int32_t result = audio_device_->StartPlayout();
|
||||
RTC_LOG(LS_INFO) << "output: " << result;
|
||||
RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StartPlayoutSuccess",
|
||||
static_cast<int>(result == 0));
|
||||
return result;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceModuleImpl::StopPlayout() {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__;
|
||||
CHECKinitialized_();
|
||||
int32_t result = audio_device_->StopPlayout();
|
||||
audio_device_buffer_.StopPlayout();
|
||||
RTC_LOG(LS_INFO) << "output: " << result;
|
||||
RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StopPlayoutSuccess",
|
||||
static_cast<int>(result == 0));
|
||||
return result;
|
||||
}
|
||||
|
||||
bool AudioDeviceModuleImpl::Playing() const {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__;
|
||||
CHECKinitialized__BOOL();
|
||||
return audio_device_->Playing();
|
||||
}
|
||||
|
||||
int32_t AudioDeviceModuleImpl::StartRecording() {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__;
|
||||
CHECKinitialized_();
|
||||
if (Recording()) {
|
||||
return 0;
|
||||
}
|
||||
audio_device_buffer_.StartRecording();
|
||||
int32_t result = audio_device_->StartRecording();
|
||||
RTC_LOG(LS_INFO) << "output: " << result;
|
||||
RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StartRecordingSuccess",
|
||||
static_cast<int>(result == 0));
|
||||
return result;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceModuleImpl::StopRecording() {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__;
|
||||
CHECKinitialized_();
|
||||
int32_t result = audio_device_->StopRecording();
|
||||
audio_device_buffer_.StopRecording();
|
||||
RTC_LOG(LS_INFO) << "output: " << result;
|
||||
RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StopRecordingSuccess",
|
||||
static_cast<int>(result == 0));
|
||||
return result;
|
||||
}
|
||||
|
||||
bool AudioDeviceModuleImpl::Recording() const {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__;
|
||||
CHECKinitialized__BOOL();
|
||||
return audio_device_->Recording();
|
||||
}
|
||||
|
||||
int32_t AudioDeviceModuleImpl::RegisterAudioCallback(
|
||||
AudioTransport* audioCallback) {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__;
|
||||
return audio_device_buffer_.RegisterAudioCallback(audioCallback);
|
||||
}
|
||||
|
||||
int32_t AudioDeviceModuleImpl::PlayoutDelay(uint16_t* delayMS) const {
|
||||
CHECKinitialized_();
|
||||
uint16_t delay = 0;
|
||||
if (audio_device_->PlayoutDelay(delay) == -1) {
|
||||
RTC_LOG(LS_ERROR) << "failed to retrieve the playout delay";
|
||||
return -1;
|
||||
}
|
||||
*delayMS = delay;
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool AudioDeviceModuleImpl::BuiltInAECIsAvailable() const {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__;
|
||||
CHECKinitialized__BOOL();
|
||||
bool isAvailable = audio_device_->BuiltInAECIsAvailable();
|
||||
RTC_LOG(LS_INFO) << "output: " << isAvailable;
|
||||
return isAvailable;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceModuleImpl::EnableBuiltInAEC(bool enable) {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")";
|
||||
CHECKinitialized_();
|
||||
int32_t ok = audio_device_->EnableBuiltInAEC(enable);
|
||||
RTC_LOG(LS_INFO) << "output: " << ok;
|
||||
return ok;
|
||||
}
|
||||
|
||||
bool AudioDeviceModuleImpl::BuiltInAGCIsAvailable() const {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__;
|
||||
CHECKinitialized__BOOL();
|
||||
bool isAvailable = audio_device_->BuiltInAGCIsAvailable();
|
||||
RTC_LOG(LS_INFO) << "output: " << isAvailable;
|
||||
return isAvailable;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceModuleImpl::EnableBuiltInAGC(bool enable) {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")";
|
||||
CHECKinitialized_();
|
||||
int32_t ok = audio_device_->EnableBuiltInAGC(enable);
|
||||
RTC_LOG(LS_INFO) << "output: " << ok;
|
||||
return ok;
|
||||
}
|
||||
|
||||
bool AudioDeviceModuleImpl::BuiltInNSIsAvailable() const {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__;
|
||||
CHECKinitialized__BOOL();
|
||||
bool isAvailable = audio_device_->BuiltInNSIsAvailable();
|
||||
RTC_LOG(LS_INFO) << "output: " << isAvailable;
|
||||
return isAvailable;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceModuleImpl::EnableBuiltInNS(bool enable) {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")";
|
||||
CHECKinitialized_();
|
||||
int32_t ok = audio_device_->EnableBuiltInNS(enable);
|
||||
RTC_LOG(LS_INFO) << "output: " << ok;
|
||||
return ok;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceModuleImpl::GetPlayoutUnderrunCount() const {
|
||||
CHECKinitialized_();
|
||||
int32_t underrunCount = audio_device_->GetPlayoutUnderrunCount();
|
||||
return underrunCount;
|
||||
}
|
||||
|
||||
#if defined(WEBRTC_IOS)
|
||||
int AudioDeviceModuleImpl::GetPlayoutAudioParameters(
|
||||
AudioParameters* params) const {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__;
|
||||
int r = audio_device_->GetPlayoutAudioParameters(params);
|
||||
RTC_LOG(LS_INFO) << "output: " << r;
|
||||
return r;
|
||||
}
|
||||
|
||||
int AudioDeviceModuleImpl::GetRecordAudioParameters(
|
||||
AudioParameters* params) const {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__;
|
||||
int r = audio_device_->GetRecordAudioParameters(params);
|
||||
RTC_LOG(LS_INFO) << "output: " << r;
|
||||
return r;
|
||||
}
|
||||
#endif // WEBRTC_IOS
|
||||
|
||||
AudioDeviceModuleImpl::PlatformType AudioDeviceModuleImpl::Platform() const {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__;
|
||||
return platform_type_;
|
||||
}
|
||||
|
||||
AudioDeviceModule::AudioLayer AudioDeviceModuleImpl::PlatformAudioLayer()
|
||||
const {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__;
|
||||
return audio_layer_;
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,191 @@
|
|||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef MODULES_AUDIO_DEVICE_AUDIO_DEVICE_IMPL_H_
|
||||
#define MODULES_AUDIO_DEVICE_AUDIO_DEVICE_IMPL_H_
|
||||
|
||||
#if defined(WEBRTC_INCLUDE_INTERNAL_AUDIO_DEVICE)
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "api/task_queue/task_queue_factory.h"
|
||||
#include "modules/audio_device/audio_device_buffer.h"
|
||||
#include "modules/audio_device/include/audio_device.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class AudioDeviceGeneric;
|
||||
class AudioManager;
|
||||
|
||||
class AudioDeviceModuleImpl : public AudioDeviceModuleForTest {
|
||||
public:
|
||||
enum PlatformType {
|
||||
kPlatformNotSupported = 0,
|
||||
kPlatformWin32 = 1,
|
||||
kPlatformWinCe = 2,
|
||||
kPlatformLinux = 3,
|
||||
kPlatformMac = 4,
|
||||
kPlatformAndroid = 5,
|
||||
kPlatformIOS = 6,
|
||||
// Fuchsia isn't fully supported, as there is no implementation for
|
||||
// AudioDeviceGeneric which will be created for Fuchsia, so
|
||||
// `CreatePlatformSpecificObjects()` call will fail unless usable
|
||||
// implementation will be provided by the user.
|
||||
kPlatformFuchsia = 7,
|
||||
};
|
||||
|
||||
int32_t CheckPlatform();
|
||||
int32_t CreatePlatformSpecificObjects();
|
||||
int32_t AttachAudioBuffer();
|
||||
|
||||
AudioDeviceModuleImpl(AudioLayer audio_layer,
|
||||
TaskQueueFactory* task_queue_factory);
|
||||
// If `create_detached` is true, created ADM can be used on another thread
|
||||
// compared to the one on which it was created. It's useful for testing.
|
||||
AudioDeviceModuleImpl(AudioLayer audio_layer,
|
||||
std::unique_ptr<AudioDeviceGeneric> audio_device,
|
||||
TaskQueueFactory* task_queue_factory,
|
||||
bool create_detached);
|
||||
~AudioDeviceModuleImpl() override;
|
||||
|
||||
// Retrieve the currently utilized audio layer
|
||||
int32_t ActiveAudioLayer(AudioLayer* audioLayer) const override;
|
||||
|
||||
// Full-duplex transportation of PCM audio
|
||||
int32_t RegisterAudioCallback(AudioTransport* audioCallback) override;
|
||||
|
||||
// Main initializaton and termination
|
||||
int32_t Init() override;
|
||||
int32_t Terminate() override;
|
||||
bool Initialized() const override;
|
||||
|
||||
// Device enumeration
|
||||
int16_t PlayoutDevices() override;
|
||||
int16_t RecordingDevices() override;
|
||||
int32_t PlayoutDeviceName(uint16_t index,
|
||||
char name[kAdmMaxDeviceNameSize],
|
||||
char guid[kAdmMaxGuidSize]) override;
|
||||
int32_t RecordingDeviceName(uint16_t index,
|
||||
char name[kAdmMaxDeviceNameSize],
|
||||
char guid[kAdmMaxGuidSize]) override;
|
||||
|
||||
// Device selection
|
||||
int32_t SetPlayoutDevice(uint16_t index) override;
|
||||
int32_t SetPlayoutDevice(WindowsDeviceType device) override;
|
||||
int32_t SetRecordingDevice(uint16_t index) override;
|
||||
int32_t SetRecordingDevice(WindowsDeviceType device) override;
|
||||
|
||||
// Audio transport initialization
|
||||
int32_t PlayoutIsAvailable(bool* available) override;
|
||||
int32_t InitPlayout() override;
|
||||
bool PlayoutIsInitialized() const override;
|
||||
int32_t RecordingIsAvailable(bool* available) override;
|
||||
int32_t InitRecording() override;
|
||||
bool RecordingIsInitialized() const override;
|
||||
|
||||
// Audio transport control
|
||||
int32_t StartPlayout() override;
|
||||
int32_t StopPlayout() override;
|
||||
bool Playing() const override;
|
||||
int32_t StartRecording() override;
|
||||
int32_t StopRecording() override;
|
||||
bool Recording() const override;
|
||||
|
||||
// Audio mixer initialization
|
||||
int32_t InitSpeaker() override;
|
||||
bool SpeakerIsInitialized() const override;
|
||||
int32_t InitMicrophone() override;
|
||||
bool MicrophoneIsInitialized() const override;
|
||||
|
||||
// Speaker volume controls
|
||||
int32_t SpeakerVolumeIsAvailable(bool* available) override;
|
||||
int32_t SetSpeakerVolume(uint32_t volume) override;
|
||||
int32_t SpeakerVolume(uint32_t* volume) const override;
|
||||
int32_t MaxSpeakerVolume(uint32_t* maxVolume) const override;
|
||||
int32_t MinSpeakerVolume(uint32_t* minVolume) const override;
|
||||
|
||||
// Microphone volume controls
|
||||
int32_t MicrophoneVolumeIsAvailable(bool* available) override;
|
||||
int32_t SetMicrophoneVolume(uint32_t volume) override;
|
||||
int32_t MicrophoneVolume(uint32_t* volume) const override;
|
||||
int32_t MaxMicrophoneVolume(uint32_t* maxVolume) const override;
|
||||
int32_t MinMicrophoneVolume(uint32_t* minVolume) const override;
|
||||
|
||||
// Speaker mute control
|
||||
int32_t SpeakerMuteIsAvailable(bool* available) override;
|
||||
int32_t SetSpeakerMute(bool enable) override;
|
||||
int32_t SpeakerMute(bool* enabled) const override;
|
||||
|
||||
// Microphone mute control
|
||||
int32_t MicrophoneMuteIsAvailable(bool* available) override;
|
||||
int32_t SetMicrophoneMute(bool enable) override;
|
||||
int32_t MicrophoneMute(bool* enabled) const override;
|
||||
|
||||
// Stereo support
|
||||
int32_t StereoPlayoutIsAvailable(bool* available) const override;
|
||||
int32_t SetStereoPlayout(bool enable) override;
|
||||
int32_t StereoPlayout(bool* enabled) const override;
|
||||
int32_t StereoRecordingIsAvailable(bool* available) const override;
|
||||
int32_t SetStereoRecording(bool enable) override;
|
||||
int32_t StereoRecording(bool* enabled) const override;
|
||||
|
||||
// Delay information and control
|
||||
int32_t PlayoutDelay(uint16_t* delayMS) const override;
|
||||
|
||||
bool BuiltInAECIsAvailable() const override;
|
||||
int32_t EnableBuiltInAEC(bool enable) override;
|
||||
bool BuiltInAGCIsAvailable() const override;
|
||||
int32_t EnableBuiltInAGC(bool enable) override;
|
||||
bool BuiltInNSIsAvailable() const override;
|
||||
int32_t EnableBuiltInNS(bool enable) override;
|
||||
|
||||
// Play underrun count.
|
||||
int32_t GetPlayoutUnderrunCount() const override;
|
||||
|
||||
#if defined(WEBRTC_IOS)
|
||||
int GetPlayoutAudioParameters(AudioParameters* params) const override;
|
||||
int GetRecordAudioParameters(AudioParameters* params) const override;
|
||||
#endif // WEBRTC_IOS
|
||||
|
||||
#if defined(WEBRTC_ANDROID)
|
||||
// Only use this acccessor for test purposes on Android.
|
||||
AudioManager* GetAndroidAudioManagerForTest() {
|
||||
return audio_manager_android_.get();
|
||||
}
|
||||
#endif
|
||||
AudioDeviceBuffer* GetAudioDeviceBuffer() { return &audio_device_buffer_; }
|
||||
|
||||
int RestartPlayoutInternally() override { return -1; }
|
||||
int RestartRecordingInternally() override { return -1; }
|
||||
int SetPlayoutSampleRate(uint32_t sample_rate) override { return -1; }
|
||||
int SetRecordingSampleRate(uint32_t sample_rate) override { return -1; }
|
||||
|
||||
private:
|
||||
PlatformType Platform() const;
|
||||
AudioLayer PlatformAudioLayer() const;
|
||||
|
||||
AudioLayer audio_layer_;
|
||||
PlatformType platform_type_ = kPlatformNotSupported;
|
||||
bool initialized_ = false;
|
||||
#if defined(WEBRTC_ANDROID)
|
||||
// Should be declared first to ensure that it outlives other resources.
|
||||
std::unique_ptr<AudioManager> audio_manager_android_;
|
||||
#endif
|
||||
AudioDeviceBuffer audio_device_buffer_;
|
||||
std::unique_ptr<AudioDeviceGeneric> audio_device_;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // defined(WEBRTC_INCLUDE_INTERNAL_AUDIO_DEVICE)
|
||||
|
||||
#endif // MODULES_AUDIO_DEVICE_AUDIO_DEVICE_IMPL_H_
|
||||
|
|
@ -0,0 +1,27 @@
|
|||
/*
|
||||
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "modules/audio_device/audio_device_name.h"
|
||||
|
||||
#include "absl/strings/string_view.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
const char AudioDeviceName::kDefaultDeviceId[] = "default";
|
||||
|
||||
AudioDeviceName::AudioDeviceName(absl::string_view device_name,
|
||||
absl::string_view unique_id)
|
||||
: device_name(device_name), unique_id(unique_id) {}
|
||||
|
||||
bool AudioDeviceName::IsValid() {
|
||||
return !device_name.empty() && !unique_id.empty();
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,50 @@
|
|||
/*
|
||||
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef MODULES_AUDIO_DEVICE_AUDIO_DEVICE_NAME_H_
|
||||
#define MODULES_AUDIO_DEVICE_AUDIO_DEVICE_NAME_H_
|
||||
|
||||
#include <deque>
|
||||
#include <string>
|
||||
|
||||
#include "absl/strings/string_view.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
struct AudioDeviceName {
|
||||
// Represents a default device. Note that, on Windows there are two different
|
||||
// types of default devices (Default and Default Communication). They can
|
||||
// either be two different physical devices or be two different roles for one
|
||||
// single device. Hence, this id must be combined with a "role parameter" on
|
||||
// Windows to uniquely identify a default device.
|
||||
static const char kDefaultDeviceId[];
|
||||
|
||||
AudioDeviceName() = default;
|
||||
AudioDeviceName(absl::string_view device_name, absl::string_view unique_id);
|
||||
|
||||
~AudioDeviceName() = default;
|
||||
|
||||
// Support copy and move.
|
||||
AudioDeviceName(const AudioDeviceName& other) = default;
|
||||
AudioDeviceName(AudioDeviceName&&) = default;
|
||||
AudioDeviceName& operator=(const AudioDeviceName&) = default;
|
||||
AudioDeviceName& operator=(AudioDeviceName&&) = default;
|
||||
|
||||
bool IsValid();
|
||||
|
||||
std::string device_name; // Friendly name of the device.
|
||||
std::string unique_id; // Unique identifier for the device.
|
||||
};
|
||||
|
||||
typedef std::deque<AudioDeviceName> AudioDeviceNames;
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // MODULES_AUDIO_DEVICE_AUDIO_DEVICE_NAME_H_
|
||||
|
|
@ -0,0 +1,226 @@
|
|||
/*
|
||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "modules/audio_device/dummy/audio_device_dummy.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
int32_t AudioDeviceDummy::ActiveAudioLayer(
|
||||
AudioDeviceModule::AudioLayer& audioLayer) const {
|
||||
return -1;
|
||||
}
|
||||
|
||||
AudioDeviceGeneric::InitStatus AudioDeviceDummy::Init() {
|
||||
return InitStatus::OK;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceDummy::Terminate() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool AudioDeviceDummy::Initialized() const {
|
||||
return true;
|
||||
}
|
||||
|
||||
int16_t AudioDeviceDummy::PlayoutDevices() {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int16_t AudioDeviceDummy::RecordingDevices() {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceDummy::PlayoutDeviceName(uint16_t index,
|
||||
char name[kAdmMaxDeviceNameSize],
|
||||
char guid[kAdmMaxGuidSize]) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceDummy::RecordingDeviceName(uint16_t index,
|
||||
char name[kAdmMaxDeviceNameSize],
|
||||
char guid[kAdmMaxGuidSize]) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceDummy::SetPlayoutDevice(uint16_t index) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceDummy::SetPlayoutDevice(
|
||||
AudioDeviceModule::WindowsDeviceType device) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceDummy::SetRecordingDevice(uint16_t index) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceDummy::SetRecordingDevice(
|
||||
AudioDeviceModule::WindowsDeviceType device) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceDummy::PlayoutIsAvailable(bool& available) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceDummy::InitPlayout() {
|
||||
return -1;
|
||||
}
|
||||
|
||||
bool AudioDeviceDummy::PlayoutIsInitialized() const {
|
||||
return false;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceDummy::RecordingIsAvailable(bool& available) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceDummy::InitRecording() {
|
||||
return -1;
|
||||
}
|
||||
|
||||
bool AudioDeviceDummy::RecordingIsInitialized() const {
|
||||
return false;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceDummy::StartPlayout() {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceDummy::StopPlayout() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool AudioDeviceDummy::Playing() const {
|
||||
return false;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceDummy::StartRecording() {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceDummy::StopRecording() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool AudioDeviceDummy::Recording() const {
|
||||
return false;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceDummy::InitSpeaker() {
|
||||
return -1;
|
||||
}
|
||||
|
||||
bool AudioDeviceDummy::SpeakerIsInitialized() const {
|
||||
return false;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceDummy::InitMicrophone() {
|
||||
return -1;
|
||||
}
|
||||
|
||||
bool AudioDeviceDummy::MicrophoneIsInitialized() const {
|
||||
return false;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceDummy::SpeakerVolumeIsAvailable(bool& available) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceDummy::SetSpeakerVolume(uint32_t volume) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceDummy::SpeakerVolume(uint32_t& volume) const {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceDummy::MaxSpeakerVolume(uint32_t& maxVolume) const {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceDummy::MinSpeakerVolume(uint32_t& minVolume) const {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceDummy::MicrophoneVolumeIsAvailable(bool& available) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceDummy::SetMicrophoneVolume(uint32_t volume) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceDummy::MicrophoneVolume(uint32_t& volume) const {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceDummy::MaxMicrophoneVolume(uint32_t& maxVolume) const {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceDummy::MinMicrophoneVolume(uint32_t& minVolume) const {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceDummy::SpeakerMuteIsAvailable(bool& available) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceDummy::SetSpeakerMute(bool enable) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceDummy::SpeakerMute(bool& enabled) const {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceDummy::MicrophoneMuteIsAvailable(bool& available) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceDummy::SetMicrophoneMute(bool enable) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceDummy::MicrophoneMute(bool& enabled) const {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceDummy::StereoPlayoutIsAvailable(bool& available) {
|
||||
return -1;
|
||||
}
|
||||
int32_t AudioDeviceDummy::SetStereoPlayout(bool enable) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceDummy::StereoPlayout(bool& enabled) const {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceDummy::StereoRecordingIsAvailable(bool& available) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceDummy::SetStereoRecording(bool enable) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceDummy::StereoRecording(bool& enabled) const {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t AudioDeviceDummy::PlayoutDelay(uint16_t& delayMS) const {
|
||||
return -1;
|
||||
}
|
||||
|
||||
void AudioDeviceDummy::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {}
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,117 @@
|
|||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef AUDIO_DEVICE_AUDIO_DEVICE_DUMMY_H_
|
||||
#define AUDIO_DEVICE_AUDIO_DEVICE_DUMMY_H_
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include "modules/audio_device/audio_device_buffer.h"
|
||||
#include "modules/audio_device/audio_device_generic.h"
|
||||
#include "modules/audio_device/include/audio_device.h"
|
||||
#include "modules/audio_device/include/audio_device_defines.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class AudioDeviceDummy : public AudioDeviceGeneric {
|
||||
public:
|
||||
AudioDeviceDummy() {}
|
||||
virtual ~AudioDeviceDummy() {}
|
||||
|
||||
// Retrieve the currently utilized audio layer
|
||||
int32_t ActiveAudioLayer(
|
||||
AudioDeviceModule::AudioLayer& audioLayer) const override;
|
||||
|
||||
// Main initializaton and termination
|
||||
InitStatus Init() override;
|
||||
int32_t Terminate() override;
|
||||
bool Initialized() const override;
|
||||
|
||||
// Device enumeration
|
||||
int16_t PlayoutDevices() override;
|
||||
int16_t RecordingDevices() override;
|
||||
int32_t PlayoutDeviceName(uint16_t index,
|
||||
char name[kAdmMaxDeviceNameSize],
|
||||
char guid[kAdmMaxGuidSize]) override;
|
||||
int32_t RecordingDeviceName(uint16_t index,
|
||||
char name[kAdmMaxDeviceNameSize],
|
||||
char guid[kAdmMaxGuidSize]) override;
|
||||
|
||||
// Device selection
|
||||
int32_t SetPlayoutDevice(uint16_t index) override;
|
||||
int32_t SetPlayoutDevice(
|
||||
AudioDeviceModule::WindowsDeviceType device) override;
|
||||
int32_t SetRecordingDevice(uint16_t index) override;
|
||||
int32_t SetRecordingDevice(
|
||||
AudioDeviceModule::WindowsDeviceType device) override;
|
||||
|
||||
// Audio transport initialization
|
||||
int32_t PlayoutIsAvailable(bool& available) override;
|
||||
int32_t InitPlayout() override;
|
||||
bool PlayoutIsInitialized() const override;
|
||||
int32_t RecordingIsAvailable(bool& available) override;
|
||||
int32_t InitRecording() override;
|
||||
bool RecordingIsInitialized() const override;
|
||||
|
||||
// Audio transport control
|
||||
int32_t StartPlayout() override;
|
||||
int32_t StopPlayout() override;
|
||||
bool Playing() const override;
|
||||
int32_t StartRecording() override;
|
||||
int32_t StopRecording() override;
|
||||
bool Recording() const override;
|
||||
|
||||
// Audio mixer initialization
|
||||
int32_t InitSpeaker() override;
|
||||
bool SpeakerIsInitialized() const override;
|
||||
int32_t InitMicrophone() override;
|
||||
bool MicrophoneIsInitialized() const override;
|
||||
|
||||
// Speaker volume controls
|
||||
int32_t SpeakerVolumeIsAvailable(bool& available) override;
|
||||
int32_t SetSpeakerVolume(uint32_t volume) override;
|
||||
int32_t SpeakerVolume(uint32_t& volume) const override;
|
||||
int32_t MaxSpeakerVolume(uint32_t& maxVolume) const override;
|
||||
int32_t MinSpeakerVolume(uint32_t& minVolume) const override;
|
||||
|
||||
// Microphone volume controls
|
||||
int32_t MicrophoneVolumeIsAvailable(bool& available) override;
|
||||
int32_t SetMicrophoneVolume(uint32_t volume) override;
|
||||
int32_t MicrophoneVolume(uint32_t& volume) const override;
|
||||
int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const override;
|
||||
int32_t MinMicrophoneVolume(uint32_t& minVolume) const override;
|
||||
|
||||
// Speaker mute control
|
||||
int32_t SpeakerMuteIsAvailable(bool& available) override;
|
||||
int32_t SetSpeakerMute(bool enable) override;
|
||||
int32_t SpeakerMute(bool& enabled) const override;
|
||||
|
||||
// Microphone mute control
|
||||
int32_t MicrophoneMuteIsAvailable(bool& available) override;
|
||||
int32_t SetMicrophoneMute(bool enable) override;
|
||||
int32_t MicrophoneMute(bool& enabled) const override;
|
||||
|
||||
// Stereo support
|
||||
int32_t StereoPlayoutIsAvailable(bool& available) override;
|
||||
int32_t SetStereoPlayout(bool enable) override;
|
||||
int32_t StereoPlayout(bool& enabled) const override;
|
||||
int32_t StereoRecordingIsAvailable(bool& available) override;
|
||||
int32_t SetStereoRecording(bool enable) override;
|
||||
int32_t StereoRecording(bool& enabled) const override;
|
||||
|
||||
// Delay information and control
|
||||
int32_t PlayoutDelay(uint16_t& delayMS) const override;
|
||||
|
||||
void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) override;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // AUDIO_DEVICE_AUDIO_DEVICE_DUMMY_H_
|
||||
|
|
@ -0,0 +1,508 @@
|
|||
/*
|
||||
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "modules/audio_device/dummy/file_audio_device.h"
|
||||
|
||||
#include <string.h>
|
||||
|
||||
#include "absl/strings/string_view.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/platform_thread.h"
|
||||
#include "rtc_base/time_utils.h"
|
||||
#include "system_wrappers/include/sleep.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
const int kRecordingFixedSampleRate = 48000;
|
||||
const size_t kRecordingNumChannels = 2;
|
||||
const int kPlayoutFixedSampleRate = 48000;
|
||||
const size_t kPlayoutNumChannels = 2;
|
||||
const size_t kPlayoutBufferSize =
|
||||
kPlayoutFixedSampleRate / 100 * kPlayoutNumChannels * 2;
|
||||
const size_t kRecordingBufferSize =
|
||||
kRecordingFixedSampleRate / 100 * kRecordingNumChannels * 2;
|
||||
|
||||
FileAudioDevice::FileAudioDevice(absl::string_view inputFilename,
|
||||
absl::string_view outputFilename)
|
||||
: _ptrAudioBuffer(NULL),
|
||||
_recordingBuffer(NULL),
|
||||
_playoutBuffer(NULL),
|
||||
_recordingFramesLeft(0),
|
||||
_playoutFramesLeft(0),
|
||||
_recordingBufferSizeIn10MS(0),
|
||||
_recordingFramesIn10MS(0),
|
||||
_playoutFramesIn10MS(0),
|
||||
_playing(false),
|
||||
_recording(false),
|
||||
_lastCallPlayoutMillis(0),
|
||||
_lastCallRecordMillis(0),
|
||||
_outputFilename(outputFilename),
|
||||
_inputFilename(inputFilename) {}
|
||||
|
||||
FileAudioDevice::~FileAudioDevice() {}
|
||||
|
||||
int32_t FileAudioDevice::ActiveAudioLayer(
|
||||
AudioDeviceModule::AudioLayer& audioLayer) const {
|
||||
return -1;
|
||||
}
|
||||
|
||||
AudioDeviceGeneric::InitStatus FileAudioDevice::Init() {
|
||||
return InitStatus::OK;
|
||||
}
|
||||
|
||||
int32_t FileAudioDevice::Terminate() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool FileAudioDevice::Initialized() const {
|
||||
return true;
|
||||
}
|
||||
|
||||
int16_t FileAudioDevice::PlayoutDevices() {
|
||||
return 1;
|
||||
}
|
||||
|
||||
int16_t FileAudioDevice::RecordingDevices() {
|
||||
return 1;
|
||||
}
|
||||
|
||||
int32_t FileAudioDevice::PlayoutDeviceName(uint16_t index,
|
||||
char name[kAdmMaxDeviceNameSize],
|
||||
char guid[kAdmMaxGuidSize]) {
|
||||
const char* kName = "dummy_device";
|
||||
const char* kGuid = "dummy_device_unique_id";
|
||||
if (index < 1) {
|
||||
memset(name, 0, kAdmMaxDeviceNameSize);
|
||||
memset(guid, 0, kAdmMaxGuidSize);
|
||||
memcpy(name, kName, strlen(kName));
|
||||
memcpy(guid, kGuid, strlen(guid));
|
||||
return 0;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t FileAudioDevice::RecordingDeviceName(uint16_t index,
|
||||
char name[kAdmMaxDeviceNameSize],
|
||||
char guid[kAdmMaxGuidSize]) {
|
||||
const char* kName = "dummy_device";
|
||||
const char* kGuid = "dummy_device_unique_id";
|
||||
if (index < 1) {
|
||||
memset(name, 0, kAdmMaxDeviceNameSize);
|
||||
memset(guid, 0, kAdmMaxGuidSize);
|
||||
memcpy(name, kName, strlen(kName));
|
||||
memcpy(guid, kGuid, strlen(guid));
|
||||
return 0;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t FileAudioDevice::SetPlayoutDevice(uint16_t index) {
|
||||
if (index == 0) {
|
||||
_playout_index = index;
|
||||
return 0;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t FileAudioDevice::SetPlayoutDevice(
|
||||
AudioDeviceModule::WindowsDeviceType device) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t FileAudioDevice::SetRecordingDevice(uint16_t index) {
|
||||
if (index == 0) {
|
||||
_record_index = index;
|
||||
return _record_index;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t FileAudioDevice::SetRecordingDevice(
|
||||
AudioDeviceModule::WindowsDeviceType device) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t FileAudioDevice::PlayoutIsAvailable(bool& available) {
|
||||
if (_playout_index == 0) {
|
||||
available = true;
|
||||
return _playout_index;
|
||||
}
|
||||
available = false;
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t FileAudioDevice::InitPlayout() {
|
||||
MutexLock lock(&mutex_);
|
||||
|
||||
if (_playing) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
_playoutFramesIn10MS = static_cast<size_t>(kPlayoutFixedSampleRate / 100);
|
||||
|
||||
if (_ptrAudioBuffer) {
|
||||
// Update webrtc audio buffer with the selected parameters
|
||||
_ptrAudioBuffer->SetPlayoutSampleRate(kPlayoutFixedSampleRate);
|
||||
_ptrAudioBuffer->SetPlayoutChannels(kPlayoutNumChannels);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool FileAudioDevice::PlayoutIsInitialized() const {
|
||||
return _playoutFramesIn10MS != 0;
|
||||
}
|
||||
|
||||
int32_t FileAudioDevice::RecordingIsAvailable(bool& available) {
|
||||
if (_record_index == 0) {
|
||||
available = true;
|
||||
return _record_index;
|
||||
}
|
||||
available = false;
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t FileAudioDevice::InitRecording() {
|
||||
MutexLock lock(&mutex_);
|
||||
|
||||
if (_recording) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
_recordingFramesIn10MS = static_cast<size_t>(kRecordingFixedSampleRate / 100);
|
||||
|
||||
if (_ptrAudioBuffer) {
|
||||
_ptrAudioBuffer->SetRecordingSampleRate(kRecordingFixedSampleRate);
|
||||
_ptrAudioBuffer->SetRecordingChannels(kRecordingNumChannels);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool FileAudioDevice::RecordingIsInitialized() const {
|
||||
return _recordingFramesIn10MS != 0;
|
||||
}
|
||||
|
||||
int32_t FileAudioDevice::StartPlayout() {
|
||||
if (_playing) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
_playing = true;
|
||||
_playoutFramesLeft = 0;
|
||||
|
||||
if (!_playoutBuffer) {
|
||||
_playoutBuffer = new int8_t[kPlayoutBufferSize];
|
||||
}
|
||||
if (!_playoutBuffer) {
|
||||
_playing = false;
|
||||
return -1;
|
||||
}
|
||||
|
||||
// PLAYOUT
|
||||
if (!_outputFilename.empty()) {
|
||||
_outputFile = FileWrapper::OpenWriteOnly(_outputFilename);
|
||||
if (!_outputFile.is_open()) {
|
||||
RTC_LOG(LS_ERROR) << "Failed to open playout file: " << _outputFilename;
|
||||
_playing = false;
|
||||
delete[] _playoutBuffer;
|
||||
_playoutBuffer = NULL;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
_ptrThreadPlay = rtc::PlatformThread::SpawnJoinable(
|
||||
[this] {
|
||||
while (PlayThreadProcess()) {
|
||||
}
|
||||
},
|
||||
"webrtc_audio_module_play_thread",
|
||||
rtc::ThreadAttributes().SetPriority(rtc::ThreadPriority::kRealtime));
|
||||
|
||||
RTC_LOG(LS_INFO) << "Started playout capture to output file: "
|
||||
<< _outputFilename;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t FileAudioDevice::StopPlayout() {
|
||||
{
|
||||
MutexLock lock(&mutex_);
|
||||
_playing = false;
|
||||
}
|
||||
|
||||
// stop playout thread first
|
||||
if (!_ptrThreadPlay.empty())
|
||||
_ptrThreadPlay.Finalize();
|
||||
|
||||
MutexLock lock(&mutex_);
|
||||
|
||||
_playoutFramesLeft = 0;
|
||||
delete[] _playoutBuffer;
|
||||
_playoutBuffer = NULL;
|
||||
_outputFile.Close();
|
||||
|
||||
RTC_LOG(LS_INFO) << "Stopped playout capture to output file: "
|
||||
<< _outputFilename;
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool FileAudioDevice::Playing() const {
|
||||
return _playing;
|
||||
}
|
||||
|
||||
int32_t FileAudioDevice::StartRecording() {
|
||||
_recording = true;
|
||||
|
||||
// Make sure we only create the buffer once.
|
||||
_recordingBufferSizeIn10MS =
|
||||
_recordingFramesIn10MS * kRecordingNumChannels * 2;
|
||||
if (!_recordingBuffer) {
|
||||
_recordingBuffer = new int8_t[_recordingBufferSizeIn10MS];
|
||||
}
|
||||
|
||||
if (!_inputFilename.empty()) {
|
||||
_inputFile = FileWrapper::OpenReadOnly(_inputFilename);
|
||||
if (!_inputFile.is_open()) {
|
||||
RTC_LOG(LS_ERROR) << "Failed to open audio input file: "
|
||||
<< _inputFilename;
|
||||
_recording = false;
|
||||
delete[] _recordingBuffer;
|
||||
_recordingBuffer = NULL;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
_ptrThreadRec = rtc::PlatformThread::SpawnJoinable(
|
||||
[this] {
|
||||
while (RecThreadProcess()) {
|
||||
}
|
||||
},
|
||||
"webrtc_audio_module_capture_thread",
|
||||
rtc::ThreadAttributes().SetPriority(rtc::ThreadPriority::kRealtime));
|
||||
|
||||
RTC_LOG(LS_INFO) << "Started recording from input file: " << _inputFilename;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t FileAudioDevice::StopRecording() {
|
||||
{
|
||||
MutexLock lock(&mutex_);
|
||||
_recording = false;
|
||||
}
|
||||
|
||||
if (!_ptrThreadRec.empty())
|
||||
_ptrThreadRec.Finalize();
|
||||
|
||||
MutexLock lock(&mutex_);
|
||||
_recordingFramesLeft = 0;
|
||||
if (_recordingBuffer) {
|
||||
delete[] _recordingBuffer;
|
||||
_recordingBuffer = NULL;
|
||||
}
|
||||
_inputFile.Close();
|
||||
|
||||
RTC_LOG(LS_INFO) << "Stopped recording from input file: " << _inputFilename;
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool FileAudioDevice::Recording() const {
|
||||
return _recording;
|
||||
}
|
||||
|
||||
int32_t FileAudioDevice::InitSpeaker() {
|
||||
return -1;
|
||||
}
|
||||
|
||||
bool FileAudioDevice::SpeakerIsInitialized() const {
|
||||
return false;
|
||||
}
|
||||
|
||||
int32_t FileAudioDevice::InitMicrophone() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool FileAudioDevice::MicrophoneIsInitialized() const {
|
||||
return true;
|
||||
}
|
||||
|
||||
int32_t FileAudioDevice::SpeakerVolumeIsAvailable(bool& available) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t FileAudioDevice::SetSpeakerVolume(uint32_t volume) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t FileAudioDevice::SpeakerVolume(uint32_t& volume) const {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t FileAudioDevice::MaxSpeakerVolume(uint32_t& maxVolume) const {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t FileAudioDevice::MinSpeakerVolume(uint32_t& minVolume) const {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t FileAudioDevice::MicrophoneVolumeIsAvailable(bool& available) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t FileAudioDevice::SetMicrophoneVolume(uint32_t volume) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t FileAudioDevice::MicrophoneVolume(uint32_t& volume) const {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t FileAudioDevice::MaxMicrophoneVolume(uint32_t& maxVolume) const {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t FileAudioDevice::MinMicrophoneVolume(uint32_t& minVolume) const {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t FileAudioDevice::SpeakerMuteIsAvailable(bool& available) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t FileAudioDevice::SetSpeakerMute(bool enable) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t FileAudioDevice::SpeakerMute(bool& enabled) const {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t FileAudioDevice::MicrophoneMuteIsAvailable(bool& available) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t FileAudioDevice::SetMicrophoneMute(bool enable) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t FileAudioDevice::MicrophoneMute(bool& enabled) const {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t FileAudioDevice::StereoPlayoutIsAvailable(bool& available) {
|
||||
available = true;
|
||||
return 0;
|
||||
}
|
||||
int32_t FileAudioDevice::SetStereoPlayout(bool enable) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t FileAudioDevice::StereoPlayout(bool& enabled) const {
|
||||
enabled = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t FileAudioDevice::StereoRecordingIsAvailable(bool& available) {
|
||||
available = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t FileAudioDevice::SetStereoRecording(bool enable) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t FileAudioDevice::StereoRecording(bool& enabled) const {
|
||||
enabled = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t FileAudioDevice::PlayoutDelay(uint16_t& delayMS) const {
|
||||
return 0;
|
||||
}
|
||||
|
||||
void FileAudioDevice::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
|
||||
MutexLock lock(&mutex_);
|
||||
|
||||
_ptrAudioBuffer = audioBuffer;
|
||||
|
||||
// Inform the AudioBuffer about default settings for this implementation.
|
||||
// Set all values to zero here since the actual settings will be done by
|
||||
// InitPlayout and InitRecording later.
|
||||
_ptrAudioBuffer->SetRecordingSampleRate(0);
|
||||
_ptrAudioBuffer->SetPlayoutSampleRate(0);
|
||||
_ptrAudioBuffer->SetRecordingChannels(0);
|
||||
_ptrAudioBuffer->SetPlayoutChannels(0);
|
||||
}
|
||||
|
||||
bool FileAudioDevice::PlayThreadProcess() {
|
||||
if (!_playing) {
|
||||
return false;
|
||||
}
|
||||
int64_t currentTime = rtc::TimeMillis();
|
||||
mutex_.Lock();
|
||||
|
||||
if (_lastCallPlayoutMillis == 0 ||
|
||||
currentTime - _lastCallPlayoutMillis >= 10) {
|
||||
mutex_.Unlock();
|
||||
_ptrAudioBuffer->RequestPlayoutData(_playoutFramesIn10MS);
|
||||
mutex_.Lock();
|
||||
|
||||
_playoutFramesLeft = _ptrAudioBuffer->GetPlayoutData(_playoutBuffer);
|
||||
RTC_DCHECK_EQ(_playoutFramesIn10MS, _playoutFramesLeft);
|
||||
if (_outputFile.is_open()) {
|
||||
_outputFile.Write(_playoutBuffer, kPlayoutBufferSize);
|
||||
}
|
||||
_lastCallPlayoutMillis = currentTime;
|
||||
}
|
||||
_playoutFramesLeft = 0;
|
||||
mutex_.Unlock();
|
||||
|
||||
int64_t deltaTimeMillis = rtc::TimeMillis() - currentTime;
|
||||
if (deltaTimeMillis < 10) {
|
||||
SleepMs(10 - deltaTimeMillis);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool FileAudioDevice::RecThreadProcess() {
|
||||
if (!_recording) {
|
||||
return false;
|
||||
}
|
||||
|
||||
int64_t currentTime = rtc::TimeMillis();
|
||||
mutex_.Lock();
|
||||
|
||||
if (_lastCallRecordMillis == 0 || currentTime - _lastCallRecordMillis >= 10) {
|
||||
if (_inputFile.is_open()) {
|
||||
if (_inputFile.Read(_recordingBuffer, kRecordingBufferSize) > 0) {
|
||||
_ptrAudioBuffer->SetRecordedBuffer(_recordingBuffer,
|
||||
_recordingFramesIn10MS);
|
||||
} else {
|
||||
_inputFile.Rewind();
|
||||
}
|
||||
_lastCallRecordMillis = currentTime;
|
||||
mutex_.Unlock();
|
||||
_ptrAudioBuffer->DeliverRecordedData();
|
||||
mutex_.Lock();
|
||||
}
|
||||
}
|
||||
|
||||
mutex_.Unlock();
|
||||
|
||||
int64_t deltaTimeMillis = rtc::TimeMillis() - currentTime;
|
||||
if (deltaTimeMillis < 10) {
|
||||
SleepMs(10 - deltaTimeMillis);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,163 @@
|
|||
/*
|
||||
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef AUDIO_DEVICE_FILE_AUDIO_DEVICE_H_
|
||||
#define AUDIO_DEVICE_FILE_AUDIO_DEVICE_H_
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
#include "absl/strings/string_view.h"
|
||||
#include "modules/audio_device/audio_device_generic.h"
|
||||
#include "rtc_base/platform_thread.h"
|
||||
#include "rtc_base/synchronization/mutex.h"
|
||||
#include "rtc_base/system/file_wrapper.h"
|
||||
#include "rtc_base/time_utils.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// This is a fake audio device which plays audio from a file as its microphone
|
||||
// and plays out into a file.
|
||||
class FileAudioDevice : public AudioDeviceGeneric {
|
||||
public:
|
||||
// Constructs a file audio device with `id`. It will read audio from
|
||||
// `inputFilename` and record output audio to `outputFilename`.
|
||||
//
|
||||
// The input file should be a readable 48k stereo raw file, and the output
|
||||
// file should point to a writable location. The output format will also be
|
||||
// 48k stereo raw audio.
|
||||
FileAudioDevice(absl::string_view inputFilename,
|
||||
absl::string_view outputFilename);
|
||||
virtual ~FileAudioDevice();
|
||||
|
||||
// Retrieve the currently utilized audio layer
|
||||
int32_t ActiveAudioLayer(
|
||||
AudioDeviceModule::AudioLayer& audioLayer) const override;
|
||||
|
||||
// Main initializaton and termination
|
||||
InitStatus Init() override;
|
||||
int32_t Terminate() override;
|
||||
bool Initialized() const override;
|
||||
|
||||
// Device enumeration
|
||||
int16_t PlayoutDevices() override;
|
||||
int16_t RecordingDevices() override;
|
||||
int32_t PlayoutDeviceName(uint16_t index,
|
||||
char name[kAdmMaxDeviceNameSize],
|
||||
char guid[kAdmMaxGuidSize]) override;
|
||||
int32_t RecordingDeviceName(uint16_t index,
|
||||
char name[kAdmMaxDeviceNameSize],
|
||||
char guid[kAdmMaxGuidSize]) override;
|
||||
|
||||
// Device selection
|
||||
int32_t SetPlayoutDevice(uint16_t index) override;
|
||||
int32_t SetPlayoutDevice(
|
||||
AudioDeviceModule::WindowsDeviceType device) override;
|
||||
int32_t SetRecordingDevice(uint16_t index) override;
|
||||
int32_t SetRecordingDevice(
|
||||
AudioDeviceModule::WindowsDeviceType device) override;
|
||||
|
||||
// Audio transport initialization
|
||||
int32_t PlayoutIsAvailable(bool& available) override;
|
||||
int32_t InitPlayout() override;
|
||||
bool PlayoutIsInitialized() const override;
|
||||
int32_t RecordingIsAvailable(bool& available) override;
|
||||
int32_t InitRecording() override;
|
||||
bool RecordingIsInitialized() const override;
|
||||
|
||||
// Audio transport control
|
||||
int32_t StartPlayout() override;
|
||||
int32_t StopPlayout() override;
|
||||
bool Playing() const override;
|
||||
int32_t StartRecording() override;
|
||||
int32_t StopRecording() override;
|
||||
bool Recording() const override;
|
||||
|
||||
// Audio mixer initialization
|
||||
int32_t InitSpeaker() override;
|
||||
bool SpeakerIsInitialized() const override;
|
||||
int32_t InitMicrophone() override;
|
||||
bool MicrophoneIsInitialized() const override;
|
||||
|
||||
// Speaker volume controls
|
||||
int32_t SpeakerVolumeIsAvailable(bool& available) override;
|
||||
int32_t SetSpeakerVolume(uint32_t volume) override;
|
||||
int32_t SpeakerVolume(uint32_t& volume) const override;
|
||||
int32_t MaxSpeakerVolume(uint32_t& maxVolume) const override;
|
||||
int32_t MinSpeakerVolume(uint32_t& minVolume) const override;
|
||||
|
||||
// Microphone volume controls
|
||||
int32_t MicrophoneVolumeIsAvailable(bool& available) override;
|
||||
int32_t SetMicrophoneVolume(uint32_t volume) override;
|
||||
int32_t MicrophoneVolume(uint32_t& volume) const override;
|
||||
int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const override;
|
||||
int32_t MinMicrophoneVolume(uint32_t& minVolume) const override;
|
||||
|
||||
// Speaker mute control
|
||||
int32_t SpeakerMuteIsAvailable(bool& available) override;
|
||||
int32_t SetSpeakerMute(bool enable) override;
|
||||
int32_t SpeakerMute(bool& enabled) const override;
|
||||
|
||||
// Microphone mute control
|
||||
int32_t MicrophoneMuteIsAvailable(bool& available) override;
|
||||
int32_t SetMicrophoneMute(bool enable) override;
|
||||
int32_t MicrophoneMute(bool& enabled) const override;
|
||||
|
||||
// Stereo support
|
||||
int32_t StereoPlayoutIsAvailable(bool& available) override;
|
||||
int32_t SetStereoPlayout(bool enable) override;
|
||||
int32_t StereoPlayout(bool& enabled) const override;
|
||||
int32_t StereoRecordingIsAvailable(bool& available) override;
|
||||
int32_t SetStereoRecording(bool enable) override;
|
||||
int32_t StereoRecording(bool& enabled) const override;
|
||||
|
||||
// Delay information and control
|
||||
int32_t PlayoutDelay(uint16_t& delayMS) const override;
|
||||
|
||||
void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) override;
|
||||
|
||||
private:
|
||||
static void RecThreadFunc(void*);
|
||||
static void PlayThreadFunc(void*);
|
||||
bool RecThreadProcess();
|
||||
bool PlayThreadProcess();
|
||||
|
||||
int32_t _playout_index;
|
||||
int32_t _record_index;
|
||||
AudioDeviceBuffer* _ptrAudioBuffer;
|
||||
int8_t* _recordingBuffer; // In bytes.
|
||||
int8_t* _playoutBuffer; // In bytes.
|
||||
uint32_t _recordingFramesLeft;
|
||||
uint32_t _playoutFramesLeft;
|
||||
Mutex mutex_;
|
||||
|
||||
size_t _recordingBufferSizeIn10MS;
|
||||
size_t _recordingFramesIn10MS;
|
||||
size_t _playoutFramesIn10MS;
|
||||
|
||||
rtc::PlatformThread _ptrThreadRec;
|
||||
rtc::PlatformThread _ptrThreadPlay;
|
||||
|
||||
bool _playing;
|
||||
bool _recording;
|
||||
int64_t _lastCallPlayoutMillis;
|
||||
int64_t _lastCallRecordMillis;
|
||||
|
||||
FileWrapper _outputFile;
|
||||
FileWrapper _inputFile;
|
||||
std::string _outputFilename;
|
||||
std::string _inputFilename;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // AUDIO_DEVICE_FILE_AUDIO_DEVICE_H_
|
||||
|
|
@ -0,0 +1,62 @@
|
|||
/*
|
||||
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "modules/audio_device/dummy/file_audio_device_factory.h"
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#include <cstdlib>
|
||||
|
||||
#include "absl/strings/string_view.h"
|
||||
#include "modules/audio_device/dummy/file_audio_device.h"
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/string_utils.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
bool FileAudioDeviceFactory::_isConfigured = false;
|
||||
char FileAudioDeviceFactory::_inputAudioFilename[MAX_FILENAME_LEN] = "";
|
||||
char FileAudioDeviceFactory::_outputAudioFilename[MAX_FILENAME_LEN] = "";
|
||||
|
||||
FileAudioDevice* FileAudioDeviceFactory::CreateFileAudioDevice() {
|
||||
// Bail out here if the files haven't been set explicitly.
|
||||
// audio_device_impl.cc should then fall back to dummy audio.
|
||||
if (!_isConfigured) {
|
||||
RTC_LOG(LS_WARNING)
|
||||
<< "WebRTC configured with WEBRTC_DUMMY_FILE_DEVICES but "
|
||||
"no device files supplied. Will fall back to dummy "
|
||||
"audio.";
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
return new FileAudioDevice(_inputAudioFilename, _outputAudioFilename);
|
||||
}
|
||||
|
||||
void FileAudioDeviceFactory::SetFilenamesToUse(
|
||||
absl::string_view inputAudioFilename,
|
||||
absl::string_view outputAudioFilename) {
|
||||
#ifdef WEBRTC_DUMMY_FILE_DEVICES
|
||||
RTC_DCHECK_LT(inputAudioFilename.size(), MAX_FILENAME_LEN);
|
||||
RTC_DCHECK_LT(outputAudioFilename.size(), MAX_FILENAME_LEN);
|
||||
|
||||
// Copy the strings since we don't know the lifetime of the input pointers.
|
||||
rtc::strcpyn(_inputAudioFilename, MAX_FILENAME_LEN, inputAudioFilename);
|
||||
rtc::strcpyn(_outputAudioFilename, MAX_FILENAME_LEN, outputAudioFilename);
|
||||
_isConfigured = true;
|
||||
#else
|
||||
// Sanity: must be compiled with the right define to run this.
|
||||
printf(
|
||||
"Trying to use dummy file devices, but is not compiled "
|
||||
"with WEBRTC_DUMMY_FILE_DEVICES. Bailing out.\n");
|
||||
std::exit(1);
|
||||
#endif
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,44 @@
|
|||
/*
|
||||
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef AUDIO_DEVICE_FILE_AUDIO_DEVICE_FACTORY_H_
|
||||
#define AUDIO_DEVICE_FILE_AUDIO_DEVICE_FACTORY_H_
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include "absl/strings/string_view.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class FileAudioDevice;
|
||||
|
||||
// This class is used by audio_device_impl.cc when WebRTC is compiled with
|
||||
// WEBRTC_DUMMY_FILE_DEVICES. The application must include this file and set the
|
||||
// filenames to use before the audio device module is initialized. This is
|
||||
// intended for test tools which use the audio device module.
|
||||
class FileAudioDeviceFactory {
|
||||
public:
|
||||
static FileAudioDevice* CreateFileAudioDevice();
|
||||
|
||||
// The input file must be a readable 48k stereo raw file. The output
|
||||
// file must be writable. The strings will be copied.
|
||||
static void SetFilenamesToUse(absl::string_view inputAudioFilename,
|
||||
absl::string_view outputAudioFilename);
|
||||
|
||||
private:
|
||||
enum : uint32_t { MAX_FILENAME_LEN = 512 };
|
||||
static bool _isConfigured;
|
||||
static char _inputAudioFilename[MAX_FILENAME_LEN];
|
||||
static char _outputAudioFilename[MAX_FILENAME_LEN];
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // AUDIO_DEVICE_FILE_AUDIO_DEVICE_FACTORY_H_
|
||||
|
|
@ -0,0 +1,133 @@
|
|||
/*
|
||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "modules/audio_device/fine_audio_buffer.h"
|
||||
|
||||
#include <cstdint>
|
||||
#include <cstring>
|
||||
|
||||
#include "api/array_view.h"
|
||||
#include "modules/audio_device/audio_device_buffer.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/numerics/safe_conversions.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
FineAudioBuffer::FineAudioBuffer(AudioDeviceBuffer* audio_device_buffer)
|
||||
: audio_device_buffer_(audio_device_buffer),
|
||||
playout_samples_per_channel_10ms_(rtc::dchecked_cast<size_t>(
|
||||
audio_device_buffer->PlayoutSampleRate() * 10 / 1000)),
|
||||
record_samples_per_channel_10ms_(rtc::dchecked_cast<size_t>(
|
||||
audio_device_buffer->RecordingSampleRate() * 10 / 1000)),
|
||||
playout_channels_(audio_device_buffer->PlayoutChannels()),
|
||||
record_channels_(audio_device_buffer->RecordingChannels()) {
|
||||
RTC_DCHECK(audio_device_buffer_);
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__;
|
||||
if (IsReadyForPlayout()) {
|
||||
RTC_DLOG(LS_INFO) << "playout_samples_per_channel_10ms: "
|
||||
<< playout_samples_per_channel_10ms_;
|
||||
RTC_DLOG(LS_INFO) << "playout_channels: " << playout_channels_;
|
||||
}
|
||||
if (IsReadyForRecord()) {
|
||||
RTC_DLOG(LS_INFO) << "record_samples_per_channel_10ms: "
|
||||
<< record_samples_per_channel_10ms_;
|
||||
RTC_DLOG(LS_INFO) << "record_channels: " << record_channels_;
|
||||
}
|
||||
}
|
||||
|
||||
FineAudioBuffer::~FineAudioBuffer() {
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__;
|
||||
}
|
||||
|
||||
void FineAudioBuffer::ResetPlayout() {
|
||||
playout_buffer_.Clear();
|
||||
}
|
||||
|
||||
void FineAudioBuffer::ResetRecord() {
|
||||
record_buffer_.Clear();
|
||||
}
|
||||
|
||||
bool FineAudioBuffer::IsReadyForPlayout() const {
|
||||
return playout_samples_per_channel_10ms_ > 0 && playout_channels_ > 0;
|
||||
}
|
||||
|
||||
bool FineAudioBuffer::IsReadyForRecord() const {
|
||||
return record_samples_per_channel_10ms_ > 0 && record_channels_ > 0;
|
||||
}
|
||||
|
||||
void FineAudioBuffer::GetPlayoutData(rtc::ArrayView<int16_t> audio_buffer,
|
||||
int playout_delay_ms) {
|
||||
RTC_DCHECK(IsReadyForPlayout());
|
||||
// Ask WebRTC for new data in chunks of 10ms until we have enough to
|
||||
// fulfill the request. It is possible that the buffer already contains
|
||||
// enough samples from the last round.
|
||||
while (playout_buffer_.size() < audio_buffer.size()) {
|
||||
// Get 10ms decoded audio from WebRTC. The ADB knows about number of
|
||||
// channels; hence we can ask for number of samples per channel here.
|
||||
if (audio_device_buffer_->RequestPlayoutData(
|
||||
playout_samples_per_channel_10ms_) ==
|
||||
static_cast<int32_t>(playout_samples_per_channel_10ms_)) {
|
||||
// Append 10ms to the end of the local buffer taking number of channels
|
||||
// into account.
|
||||
const size_t num_elements_10ms =
|
||||
playout_channels_ * playout_samples_per_channel_10ms_;
|
||||
const size_t written_elements = playout_buffer_.AppendData(
|
||||
num_elements_10ms, [&](rtc::ArrayView<int16_t> buf) {
|
||||
const size_t samples_per_channel_10ms =
|
||||
audio_device_buffer_->GetPlayoutData(buf.data());
|
||||
return playout_channels_ * samples_per_channel_10ms;
|
||||
});
|
||||
RTC_DCHECK_EQ(num_elements_10ms, written_elements);
|
||||
} else {
|
||||
// Provide silence if AudioDeviceBuffer::RequestPlayoutData() fails.
|
||||
// Can e.g. happen when an AudioTransport has not been registered.
|
||||
const size_t num_bytes = audio_buffer.size() * sizeof(int16_t);
|
||||
std::memset(audio_buffer.data(), 0, num_bytes);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Provide the requested number of bytes to the consumer.
|
||||
const size_t num_bytes = audio_buffer.size() * sizeof(int16_t);
|
||||
memcpy(audio_buffer.data(), playout_buffer_.data(), num_bytes);
|
||||
// Move remaining samples to start of buffer to prepare for next round.
|
||||
memmove(playout_buffer_.data(), playout_buffer_.data() + audio_buffer.size(),
|
||||
(playout_buffer_.size() - audio_buffer.size()) * sizeof(int16_t));
|
||||
playout_buffer_.SetSize(playout_buffer_.size() - audio_buffer.size());
|
||||
// Cache playout latency for usage in DeliverRecordedData();
|
||||
playout_delay_ms_ = playout_delay_ms;
|
||||
}
|
||||
|
||||
void FineAudioBuffer::DeliverRecordedData(
|
||||
rtc::ArrayView<const int16_t> audio_buffer,
|
||||
int record_delay_ms,
|
||||
absl::optional<int64_t> capture_time_ns) {
|
||||
RTC_DCHECK(IsReadyForRecord());
|
||||
// Always append new data and grow the buffer when needed.
|
||||
record_buffer_.AppendData(audio_buffer.data(), audio_buffer.size());
|
||||
// Consume samples from buffer in chunks of 10ms until there is not
|
||||
// enough data left. The number of remaining samples in the cache is given by
|
||||
// the new size of the internal `record_buffer_`.
|
||||
const size_t num_elements_10ms =
|
||||
record_channels_ * record_samples_per_channel_10ms_;
|
||||
while (record_buffer_.size() >= num_elements_10ms) {
|
||||
audio_device_buffer_->SetRecordedBuffer(record_buffer_.data(),
|
||||
record_samples_per_channel_10ms_,
|
||||
capture_time_ns);
|
||||
audio_device_buffer_->SetVQEData(playout_delay_ms_, record_delay_ms);
|
||||
audio_device_buffer_->DeliverRecordedData();
|
||||
memmove(record_buffer_.data(), record_buffer_.data() + num_elements_10ms,
|
||||
(record_buffer_.size() - num_elements_10ms) * sizeof(int16_t));
|
||||
record_buffer_.SetSize(record_buffer_.size() - num_elements_10ms);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,103 @@
|
|||
/*
|
||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef MODULES_AUDIO_DEVICE_FINE_AUDIO_BUFFER_H_
|
||||
#define MODULES_AUDIO_DEVICE_FINE_AUDIO_BUFFER_H_
|
||||
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "api/array_view.h"
|
||||
#include "rtc_base/buffer.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class AudioDeviceBuffer;
|
||||
|
||||
// FineAudioBuffer takes an AudioDeviceBuffer (ADB) which deals with 16-bit PCM
|
||||
// audio samples corresponding to 10ms of data. It then allows for this data
|
||||
// to be pulled in a finer or coarser granularity. I.e. interacting with this
|
||||
// class instead of directly with the AudioDeviceBuffer one can ask for any
|
||||
// number of audio data samples. This class also ensures that audio data can be
|
||||
// delivered to the ADB in 10ms chunks when the size of the provided audio
|
||||
// buffers differs from 10ms.
|
||||
// As an example: calling DeliverRecordedData() with 5ms buffers will deliver
|
||||
// accumulated 10ms worth of data to the ADB every second call.
|
||||
class FineAudioBuffer {
|
||||
public:
|
||||
// `device_buffer` is a buffer that provides 10ms of audio data.
|
||||
FineAudioBuffer(AudioDeviceBuffer* audio_device_buffer);
|
||||
~FineAudioBuffer();
|
||||
|
||||
// Clears buffers and counters dealing with playout and/or recording.
|
||||
void ResetPlayout();
|
||||
void ResetRecord();
|
||||
|
||||
// Utility methods which returns true if valid parameters are acquired at
|
||||
// constructions.
|
||||
bool IsReadyForPlayout() const;
|
||||
bool IsReadyForRecord() const;
|
||||
|
||||
// Copies audio samples into `audio_buffer` where number of requested
|
||||
// elements is specified by `audio_buffer.size()`. The producer will always
|
||||
// fill up the audio buffer and if no audio exists, the buffer will contain
|
||||
// silence instead. The provided delay estimate in `playout_delay_ms` should
|
||||
// contain an estimate of the latency between when an audio frame is read from
|
||||
// WebRTC and when it is played out on the speaker.
|
||||
void GetPlayoutData(rtc::ArrayView<int16_t> audio_buffer,
|
||||
int playout_delay_ms);
|
||||
|
||||
// Consumes the audio data in `audio_buffer` and sends it to the WebRTC layer
|
||||
// in chunks of 10ms. The sum of the provided delay estimate in
|
||||
// `record_delay_ms` and the latest `playout_delay_ms` in GetPlayoutData()
|
||||
// are given to the AEC in the audio processing module.
|
||||
// They can be fixed values on most platforms and they are ignored if an
|
||||
// external (hardware/built-in) AEC is used.
|
||||
// Example: buffer size is 5ms => call #1 stores 5ms of data, call #2 stores
|
||||
// 5ms of data and sends a total of 10ms to WebRTC and clears the internal
|
||||
// cache. Call #3 restarts the scheme above.
|
||||
void DeliverRecordedData(rtc::ArrayView<const int16_t> audio_buffer,
|
||||
int record_delay_ms) {
|
||||
DeliverRecordedData(audio_buffer, record_delay_ms, absl::nullopt);
|
||||
}
|
||||
void DeliverRecordedData(rtc::ArrayView<const int16_t> audio_buffer,
|
||||
int record_delay_ms,
|
||||
absl::optional<int64_t> capture_time_ns);
|
||||
|
||||
private:
|
||||
// Device buffer that works with 10ms chunks of data both for playout and
|
||||
// for recording. I.e., the WebRTC side will always be asked for audio to be
|
||||
// played out in 10ms chunks and recorded audio will be sent to WebRTC in
|
||||
// 10ms chunks as well. This raw pointer is owned by the constructor of this
|
||||
// class and the owner must ensure that the pointer is valid during the life-
|
||||
// time of this object.
|
||||
AudioDeviceBuffer* const audio_device_buffer_;
|
||||
// Number of audio samples per channel per 10ms. Set once at construction
|
||||
// based on parameters in `audio_device_buffer`.
|
||||
const size_t playout_samples_per_channel_10ms_;
|
||||
const size_t record_samples_per_channel_10ms_;
|
||||
// Number of audio channels. Set once at construction based on parameters in
|
||||
// `audio_device_buffer`.
|
||||
const size_t playout_channels_;
|
||||
const size_t record_channels_;
|
||||
// Storage for output samples from which a consumer can read audio buffers
|
||||
// in any size using GetPlayoutData().
|
||||
rtc::BufferT<int16_t> playout_buffer_;
|
||||
// Storage for input samples that are about to be delivered to the WebRTC
|
||||
// ADB or remains from the last successful delivery of a 10ms audio buffer.
|
||||
rtc::BufferT<int16_t> record_buffer_;
|
||||
// Contains latest delay estimate given to GetPlayoutData().
|
||||
int playout_delay_ms_ = 0;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // MODULES_AUDIO_DEVICE_FINE_AUDIO_BUFFER_H_
|
||||
|
|
@ -0,0 +1,196 @@
|
|||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef MODULES_AUDIO_DEVICE_INCLUDE_AUDIO_DEVICE_H_
|
||||
#define MODULES_AUDIO_DEVICE_INCLUDE_AUDIO_DEVICE_H_
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "api/ref_count.h"
|
||||
#include "api/scoped_refptr.h"
|
||||
#include "api/task_queue/task_queue_factory.h"
|
||||
#include "modules/audio_device/include/audio_device_defines.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class AudioDeviceModuleForTest;
|
||||
|
||||
class AudioDeviceModule : public webrtc::RefCountInterface {
|
||||
public:
|
||||
enum AudioLayer {
|
||||
kPlatformDefaultAudio = 0,
|
||||
kWindowsCoreAudio,
|
||||
kWindowsCoreAudio2,
|
||||
kLinuxAlsaAudio,
|
||||
kLinuxPulseAudio,
|
||||
kAndroidJavaAudio,
|
||||
kAndroidOpenSLESAudio,
|
||||
kAndroidJavaInputAndOpenSLESOutputAudio,
|
||||
kAndroidAAudioAudio,
|
||||
kAndroidJavaInputAndAAudioOutputAudio,
|
||||
kDummyAudio,
|
||||
kAndroidScreenAudio,
|
||||
kAndroidMergedScreenAudio
|
||||
};
|
||||
|
||||
enum WindowsDeviceType {
|
||||
kDefaultCommunicationDevice = -1,
|
||||
kDefaultDevice = -2
|
||||
};
|
||||
|
||||
struct Stats {
|
||||
// The fields below correspond to similarly-named fields in the WebRTC stats
|
||||
// spec. https://w3c.github.io/webrtc-stats/#playoutstats-dict*
|
||||
double synthesized_samples_duration_s = 0;
|
||||
uint64_t synthesized_samples_events = 0;
|
||||
double total_samples_duration_s = 0;
|
||||
double total_playout_delay_s = 0;
|
||||
uint64_t total_samples_count = 0;
|
||||
};
|
||||
|
||||
public:
|
||||
// Creates a default ADM for usage in production code.
|
||||
static rtc::scoped_refptr<AudioDeviceModule> Create(
|
||||
AudioLayer audio_layer,
|
||||
TaskQueueFactory* task_queue_factory);
|
||||
// Creates an ADM with support for extra test methods. Don't use this factory
|
||||
// in production code.
|
||||
static rtc::scoped_refptr<AudioDeviceModuleForTest> CreateForTest(
|
||||
AudioLayer audio_layer,
|
||||
TaskQueueFactory* task_queue_factory);
|
||||
|
||||
// Retrieve the currently utilized audio layer
|
||||
virtual int32_t ActiveAudioLayer(AudioLayer* audioLayer) const = 0;
|
||||
|
||||
// Full-duplex transportation of PCM audio
|
||||
virtual int32_t RegisterAudioCallback(AudioTransport* audioCallback) = 0;
|
||||
|
||||
// Main initialization and termination
|
||||
virtual int32_t Init() = 0;
|
||||
virtual int32_t Terminate() = 0;
|
||||
virtual bool Initialized() const = 0;
|
||||
|
||||
// Device enumeration
|
||||
virtual int16_t PlayoutDevices() = 0;
|
||||
virtual int16_t RecordingDevices() = 0;
|
||||
virtual int32_t PlayoutDeviceName(uint16_t index,
|
||||
char name[kAdmMaxDeviceNameSize],
|
||||
char guid[kAdmMaxGuidSize]) = 0;
|
||||
virtual int32_t RecordingDeviceName(uint16_t index,
|
||||
char name[kAdmMaxDeviceNameSize],
|
||||
char guid[kAdmMaxGuidSize]) = 0;
|
||||
|
||||
// Device selection
|
||||
virtual int32_t SetPlayoutDevice(uint16_t index) = 0;
|
||||
virtual int32_t SetPlayoutDevice(WindowsDeviceType device) = 0;
|
||||
virtual int32_t SetRecordingDevice(uint16_t index) = 0;
|
||||
virtual int32_t SetRecordingDevice(WindowsDeviceType device) = 0;
|
||||
|
||||
// Audio transport initialization
|
||||
virtual int32_t PlayoutIsAvailable(bool* available) = 0;
|
||||
virtual int32_t InitPlayout() = 0;
|
||||
virtual bool PlayoutIsInitialized() const = 0;
|
||||
virtual int32_t RecordingIsAvailable(bool* available) = 0;
|
||||
virtual int32_t InitRecording() = 0;
|
||||
virtual bool RecordingIsInitialized() const = 0;
|
||||
|
||||
// Audio transport control
|
||||
virtual int32_t StartPlayout() = 0;
|
||||
virtual int32_t StopPlayout() = 0;
|
||||
virtual bool Playing() const = 0;
|
||||
virtual int32_t StartRecording() = 0;
|
||||
virtual int32_t StopRecording() = 0;
|
||||
virtual bool Recording() const = 0;
|
||||
|
||||
// Audio mixer initialization
|
||||
virtual int32_t InitSpeaker() = 0;
|
||||
virtual bool SpeakerIsInitialized() const = 0;
|
||||
virtual int32_t InitMicrophone() = 0;
|
||||
virtual bool MicrophoneIsInitialized() const = 0;
|
||||
|
||||
// Speaker volume controls
|
||||
virtual int32_t SpeakerVolumeIsAvailable(bool* available) = 0;
|
||||
virtual int32_t SetSpeakerVolume(uint32_t volume) = 0;
|
||||
virtual int32_t SpeakerVolume(uint32_t* volume) const = 0;
|
||||
virtual int32_t MaxSpeakerVolume(uint32_t* maxVolume) const = 0;
|
||||
virtual int32_t MinSpeakerVolume(uint32_t* minVolume) const = 0;
|
||||
|
||||
// Microphone volume controls
|
||||
virtual int32_t MicrophoneVolumeIsAvailable(bool* available) = 0;
|
||||
virtual int32_t SetMicrophoneVolume(uint32_t volume) = 0;
|
||||
virtual int32_t MicrophoneVolume(uint32_t* volume) const = 0;
|
||||
virtual int32_t MaxMicrophoneVolume(uint32_t* maxVolume) const = 0;
|
||||
virtual int32_t MinMicrophoneVolume(uint32_t* minVolume) const = 0;
|
||||
|
||||
// Speaker mute control
|
||||
virtual int32_t SpeakerMuteIsAvailable(bool* available) = 0;
|
||||
virtual int32_t SetSpeakerMute(bool enable) = 0;
|
||||
virtual int32_t SpeakerMute(bool* enabled) const = 0;
|
||||
|
||||
// Microphone mute control
|
||||
virtual int32_t MicrophoneMuteIsAvailable(bool* available) = 0;
|
||||
virtual int32_t SetMicrophoneMute(bool enable) = 0;
|
||||
virtual int32_t MicrophoneMute(bool* enabled) const = 0;
|
||||
|
||||
// Stereo support
|
||||
virtual int32_t StereoPlayoutIsAvailable(bool* available) const = 0;
|
||||
virtual int32_t SetStereoPlayout(bool enable) = 0;
|
||||
virtual int32_t StereoPlayout(bool* enabled) const = 0;
|
||||
virtual int32_t StereoRecordingIsAvailable(bool* available) const = 0;
|
||||
virtual int32_t SetStereoRecording(bool enable) = 0;
|
||||
virtual int32_t StereoRecording(bool* enabled) const = 0;
|
||||
|
||||
// Playout delay
|
||||
virtual int32_t PlayoutDelay(uint16_t* delayMS) const = 0;
|
||||
|
||||
// Only supported on Android.
|
||||
virtual bool BuiltInAECIsAvailable() const = 0;
|
||||
virtual bool BuiltInAGCIsAvailable() const = 0;
|
||||
virtual bool BuiltInNSIsAvailable() const = 0;
|
||||
|
||||
// Enables the built-in audio effects. Only supported on Android.
|
||||
virtual int32_t EnableBuiltInAEC(bool enable) = 0;
|
||||
virtual int32_t EnableBuiltInAGC(bool enable) = 0;
|
||||
virtual int32_t EnableBuiltInNS(bool enable) = 0;
|
||||
|
||||
// Play underrun count. Only supported on Android.
|
||||
// TODO(alexnarest): Make it abstract after upstream projects support it.
|
||||
virtual int32_t GetPlayoutUnderrunCount() const { return -1; }
|
||||
|
||||
// Used to generate RTC stats. If not implemented, RTCAudioPlayoutStats will
|
||||
// not be present in the stats.
|
||||
virtual absl::optional<Stats> GetStats() const { return absl::nullopt; }
|
||||
|
||||
// Only supported on iOS.
|
||||
#if defined(WEBRTC_IOS)
|
||||
virtual int GetPlayoutAudioParameters(AudioParameters* params) const = 0;
|
||||
virtual int GetRecordAudioParameters(AudioParameters* params) const = 0;
|
||||
#endif // WEBRTC_IOS
|
||||
|
||||
protected:
|
||||
~AudioDeviceModule() override {}
|
||||
};
|
||||
|
||||
// Extends the default ADM interface with some extra test methods.
|
||||
// Intended for usage in tests only and requires a unique factory method.
|
||||
class AudioDeviceModuleForTest : public AudioDeviceModule {
|
||||
public:
|
||||
// Triggers internal restart sequences of audio streaming. Can be used by
|
||||
// tests to emulate events corresponding to e.g. removal of an active audio
|
||||
// device or other actions which causes the stream to be disconnected.
|
||||
virtual int RestartPlayoutInternally() = 0;
|
||||
virtual int RestartRecordingInternally() = 0;
|
||||
|
||||
virtual int SetPlayoutSampleRate(uint32_t sample_rate) = 0;
|
||||
virtual int SetRecordingSampleRate(uint32_t sample_rate) = 0;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // MODULES_AUDIO_DEVICE_INCLUDE_AUDIO_DEVICE_H_
|
||||
|
|
@ -0,0 +1,72 @@
|
|||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef MODULES_AUDIO_DEVICE_INCLUDE_AUDIO_DEVICE_DATA_OBSERVER_H_
|
||||
#define MODULES_AUDIO_DEVICE_INCLUDE_AUDIO_DEVICE_DATA_OBSERVER_H_
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#include "absl/base/attributes.h"
|
||||
#include "api/scoped_refptr.h"
|
||||
#include "api/task_queue/task_queue_factory.h"
|
||||
#include "modules/audio_device/include/audio_device.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// This interface will capture the raw PCM data of both the local captured as
|
||||
// well as the mixed/rendered remote audio.
|
||||
class AudioDeviceDataObserver {
|
||||
public:
|
||||
virtual void OnCaptureData(const void* audio_samples,
|
||||
size_t num_samples,
|
||||
size_t bytes_per_sample,
|
||||
size_t num_channels,
|
||||
uint32_t samples_per_sec) = 0;
|
||||
|
||||
virtual void OnRenderData(const void* audio_samples,
|
||||
size_t num_samples,
|
||||
size_t bytes_per_sample,
|
||||
size_t num_channels,
|
||||
uint32_t samples_per_sec) = 0;
|
||||
|
||||
AudioDeviceDataObserver() = default;
|
||||
virtual ~AudioDeviceDataObserver() = default;
|
||||
};
|
||||
|
||||
// Creates an ADMWrapper around an ADM instance that registers
|
||||
// the provided AudioDeviceDataObserver.
|
||||
rtc::scoped_refptr<AudioDeviceModule> CreateAudioDeviceWithDataObserver(
|
||||
rtc::scoped_refptr<AudioDeviceModule> impl,
|
||||
std::unique_ptr<AudioDeviceDataObserver> observer);
|
||||
|
||||
// Creates an ADMWrapper around an ADM instance that registers
|
||||
// the provided AudioDeviceDataObserver.
|
||||
ABSL_DEPRECATED("")
|
||||
rtc::scoped_refptr<AudioDeviceModule> CreateAudioDeviceWithDataObserver(
|
||||
rtc::scoped_refptr<AudioDeviceModule> impl,
|
||||
AudioDeviceDataObserver* observer);
|
||||
|
||||
// Creates an ADM instance with AudioDeviceDataObserver registered.
|
||||
rtc::scoped_refptr<AudioDeviceModule> CreateAudioDeviceWithDataObserver(
|
||||
AudioDeviceModule::AudioLayer audio_layer,
|
||||
TaskQueueFactory* task_queue_factory,
|
||||
std::unique_ptr<AudioDeviceDataObserver> observer);
|
||||
|
||||
// Creates an ADM instance with AudioDeviceDataObserver registered.
|
||||
ABSL_DEPRECATED("")
|
||||
rtc::scoped_refptr<AudioDeviceModule> CreateAudioDeviceWithDataObserver(
|
||||
AudioDeviceModule::AudioLayer audio_layer,
|
||||
TaskQueueFactory* task_queue_factory,
|
||||
AudioDeviceDataObserver* observer);
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // MODULES_AUDIO_DEVICE_INCLUDE_AUDIO_DEVICE_DATA_OBSERVER_H_
|
||||
|
|
@ -0,0 +1,132 @@
|
|||
/*
|
||||
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef MODULES_AUDIO_DEVICE_INCLUDE_AUDIO_DEVICE_DEFAULT_H_
|
||||
#define MODULES_AUDIO_DEVICE_INCLUDE_AUDIO_DEVICE_DEFAULT_H_
|
||||
|
||||
#include "modules/audio_device/include/audio_device.h"
|
||||
|
||||
namespace webrtc {
|
||||
namespace webrtc_impl {
|
||||
|
||||
// AudioDeviceModuleDefault template adds default implementation for all
|
||||
// AudioDeviceModule methods to the class, which inherits from
|
||||
// AudioDeviceModuleDefault<T>.
|
||||
template <typename T>
|
||||
class AudioDeviceModuleDefault : public T {
|
||||
public:
|
||||
AudioDeviceModuleDefault() {}
|
||||
virtual ~AudioDeviceModuleDefault() {}
|
||||
|
||||
int32_t RegisterAudioCallback(AudioTransport* audioCallback) override {
|
||||
return 0;
|
||||
}
|
||||
int32_t Init() override { return 0; }
|
||||
int32_t InitSpeaker() override { return 0; }
|
||||
int32_t SetPlayoutDevice(uint16_t index) override { return 0; }
|
||||
int32_t SetPlayoutDevice(
|
||||
AudioDeviceModule::WindowsDeviceType device) override {
|
||||
return 0;
|
||||
}
|
||||
int32_t SetStereoPlayout(bool enable) override { return 0; }
|
||||
int32_t StopPlayout() override { return 0; }
|
||||
int32_t InitMicrophone() override { return 0; }
|
||||
int32_t SetRecordingDevice(uint16_t index) override { return 0; }
|
||||
int32_t SetRecordingDevice(
|
||||
AudioDeviceModule::WindowsDeviceType device) override {
|
||||
return 0;
|
||||
}
|
||||
int32_t SetStereoRecording(bool enable) override { return 0; }
|
||||
int32_t StopRecording() override { return 0; }
|
||||
|
||||
int32_t Terminate() override { return 0; }
|
||||
|
||||
int32_t ActiveAudioLayer(
|
||||
AudioDeviceModule::AudioLayer* audioLayer) const override {
|
||||
return 0;
|
||||
}
|
||||
bool Initialized() const override { return true; }
|
||||
int16_t PlayoutDevices() override { return 0; }
|
||||
int16_t RecordingDevices() override { return 0; }
|
||||
int32_t PlayoutDeviceName(uint16_t index,
|
||||
char name[kAdmMaxDeviceNameSize],
|
||||
char guid[kAdmMaxGuidSize]) override {
|
||||
return 0;
|
||||
}
|
||||
int32_t RecordingDeviceName(uint16_t index,
|
||||
char name[kAdmMaxDeviceNameSize],
|
||||
char guid[kAdmMaxGuidSize]) override {
|
||||
return 0;
|
||||
}
|
||||
int32_t PlayoutIsAvailable(bool* available) override { return 0; }
|
||||
int32_t InitPlayout() override { return 0; }
|
||||
bool PlayoutIsInitialized() const override { return true; }
|
||||
int32_t RecordingIsAvailable(bool* available) override { return 0; }
|
||||
int32_t InitRecording() override { return 0; }
|
||||
bool RecordingIsInitialized() const override { return true; }
|
||||
int32_t StartPlayout() override { return 0; }
|
||||
bool Playing() const override { return false; }
|
||||
int32_t StartRecording() override { return 0; }
|
||||
bool Recording() const override { return false; }
|
||||
bool SpeakerIsInitialized() const override { return true; }
|
||||
bool MicrophoneIsInitialized() const override { return true; }
|
||||
int32_t SpeakerVolumeIsAvailable(bool* available) override { return 0; }
|
||||
int32_t SetSpeakerVolume(uint32_t volume) override { return 0; }
|
||||
int32_t SpeakerVolume(uint32_t* volume) const override { return 0; }
|
||||
int32_t MaxSpeakerVolume(uint32_t* maxVolume) const override { return 0; }
|
||||
int32_t MinSpeakerVolume(uint32_t* minVolume) const override { return 0; }
|
||||
int32_t MicrophoneVolumeIsAvailable(bool* available) override { return 0; }
|
||||
int32_t SetMicrophoneVolume(uint32_t volume) override { return 0; }
|
||||
int32_t MicrophoneVolume(uint32_t* volume) const override { return 0; }
|
||||
int32_t MaxMicrophoneVolume(uint32_t* maxVolume) const override { return 0; }
|
||||
int32_t MinMicrophoneVolume(uint32_t* minVolume) const override { return 0; }
|
||||
int32_t SpeakerMuteIsAvailable(bool* available) override { return 0; }
|
||||
int32_t SetSpeakerMute(bool enable) override { return 0; }
|
||||
int32_t SpeakerMute(bool* enabled) const override { return 0; }
|
||||
int32_t MicrophoneMuteIsAvailable(bool* available) override { return 0; }
|
||||
int32_t SetMicrophoneMute(bool enable) override { return 0; }
|
||||
int32_t MicrophoneMute(bool* enabled) const override { return 0; }
|
||||
int32_t StereoPlayoutIsAvailable(bool* available) const override {
|
||||
*available = false;
|
||||
return 0;
|
||||
}
|
||||
int32_t StereoPlayout(bool* enabled) const override { return 0; }
|
||||
int32_t StereoRecordingIsAvailable(bool* available) const override {
|
||||
*available = false;
|
||||
return 0;
|
||||
}
|
||||
int32_t StereoRecording(bool* enabled) const override { return 0; }
|
||||
int32_t PlayoutDelay(uint16_t* delayMS) const override {
|
||||
*delayMS = 0;
|
||||
return 0;
|
||||
}
|
||||
bool BuiltInAECIsAvailable() const override { return false; }
|
||||
int32_t EnableBuiltInAEC(bool enable) override { return -1; }
|
||||
bool BuiltInAGCIsAvailable() const override { return false; }
|
||||
int32_t EnableBuiltInAGC(bool enable) override { return -1; }
|
||||
bool BuiltInNSIsAvailable() const override { return false; }
|
||||
int32_t EnableBuiltInNS(bool enable) override { return -1; }
|
||||
|
||||
int32_t GetPlayoutUnderrunCount() const override { return -1; }
|
||||
|
||||
#if defined(WEBRTC_IOS)
|
||||
int GetPlayoutAudioParameters(AudioParameters* params) const override {
|
||||
return -1;
|
||||
}
|
||||
int GetRecordAudioParameters(AudioParameters* params) const override {
|
||||
return -1;
|
||||
}
|
||||
#endif // WEBRTC_IOS
|
||||
};
|
||||
|
||||
} // namespace webrtc_impl
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // MODULES_AUDIO_DEVICE_INCLUDE_AUDIO_DEVICE_DEFAULT_H_
|
||||
|
|
@ -0,0 +1,177 @@
|
|||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef MODULES_AUDIO_DEVICE_INCLUDE_AUDIO_DEVICE_DEFINES_H_
|
||||
#define MODULES_AUDIO_DEVICE_INCLUDE_AUDIO_DEVICE_DEFINES_H_
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
#include <string>
|
||||
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/strings/string_builder.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
static const int kAdmMaxDeviceNameSize = 128;
|
||||
static const int kAdmMaxFileNameSize = 512;
|
||||
static const int kAdmMaxGuidSize = 128;
|
||||
|
||||
static const int kAdmMinPlayoutBufferSizeMs = 10;
|
||||
static const int kAdmMaxPlayoutBufferSizeMs = 250;
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// AudioTransport
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
class AudioTransport {
|
||||
public:
|
||||
// TODO(bugs.webrtc.org/13620) Deprecate this function
|
||||
virtual int32_t RecordedDataIsAvailable(const void* audioSamples,
|
||||
size_t nSamples,
|
||||
size_t nBytesPerSample,
|
||||
size_t nChannels,
|
||||
uint32_t samplesPerSec,
|
||||
uint32_t totalDelayMS,
|
||||
int32_t clockDrift,
|
||||
uint32_t currentMicLevel,
|
||||
bool keyPressed,
|
||||
uint32_t& newMicLevel) = 0; // NOLINT
|
||||
|
||||
virtual int32_t RecordedDataIsAvailable(
|
||||
const void* audioSamples,
|
||||
size_t nSamples,
|
||||
size_t nBytesPerSample,
|
||||
size_t nChannels,
|
||||
uint32_t samplesPerSec,
|
||||
uint32_t totalDelayMS,
|
||||
int32_t clockDrift,
|
||||
uint32_t currentMicLevel,
|
||||
bool keyPressed,
|
||||
uint32_t& newMicLevel,
|
||||
absl::optional<int64_t> estimatedCaptureTimeNS) { // NOLINT
|
||||
// TODO(webrtc:13620) Make the default behaver of the new API to behave as
|
||||
// the old API. This can be pure virtual if all uses of the old API is
|
||||
// removed.
|
||||
return RecordedDataIsAvailable(
|
||||
audioSamples, nSamples, nBytesPerSample, nChannels, samplesPerSec,
|
||||
totalDelayMS, clockDrift, currentMicLevel, keyPressed, newMicLevel);
|
||||
}
|
||||
|
||||
// Implementation has to setup safe values for all specified out parameters.
|
||||
virtual int32_t NeedMorePlayData(size_t nSamples,
|
||||
size_t nBytesPerSample,
|
||||
size_t nChannels,
|
||||
uint32_t samplesPerSec,
|
||||
void* audioSamples,
|
||||
size_t& nSamplesOut, // NOLINT
|
||||
int64_t* elapsed_time_ms,
|
||||
int64_t* ntp_time_ms) = 0; // NOLINT
|
||||
|
||||
// Method to pull mixed render audio data from all active VoE channels.
|
||||
// The data will not be passed as reference for audio processing internally.
|
||||
virtual void PullRenderData(int bits_per_sample,
|
||||
int sample_rate,
|
||||
size_t number_of_channels,
|
||||
size_t number_of_frames,
|
||||
void* audio_data,
|
||||
int64_t* elapsed_time_ms,
|
||||
int64_t* ntp_time_ms) = 0;
|
||||
|
||||
protected:
|
||||
virtual ~AudioTransport() {}
|
||||
};
|
||||
|
||||
// Helper class for storage of fundamental audio parameters such as sample rate,
|
||||
// number of channels, native buffer size etc.
|
||||
// Note that one audio frame can contain more than one channel sample and each
|
||||
// sample is assumed to be a 16-bit PCM sample. Hence, one audio frame in
|
||||
// stereo contains 2 * (16/8) = 4 bytes of data.
|
||||
class AudioParameters {
|
||||
public:
|
||||
// This implementation does only support 16-bit PCM samples.
|
||||
static const size_t kBitsPerSample = 16;
|
||||
AudioParameters()
|
||||
: sample_rate_(0),
|
||||
channels_(0),
|
||||
frames_per_buffer_(0),
|
||||
frames_per_10ms_buffer_(0) {}
|
||||
AudioParameters(int sample_rate, size_t channels, size_t frames_per_buffer)
|
||||
: sample_rate_(sample_rate),
|
||||
channels_(channels),
|
||||
frames_per_buffer_(frames_per_buffer),
|
||||
frames_per_10ms_buffer_(static_cast<size_t>(sample_rate / 100)) {}
|
||||
void reset(int sample_rate, size_t channels, size_t frames_per_buffer) {
|
||||
sample_rate_ = sample_rate;
|
||||
channels_ = channels;
|
||||
frames_per_buffer_ = frames_per_buffer;
|
||||
frames_per_10ms_buffer_ = static_cast<size_t>(sample_rate / 100);
|
||||
}
|
||||
size_t bits_per_sample() const { return kBitsPerSample; }
|
||||
void reset(int sample_rate, size_t channels, double buffer_duration) {
|
||||
reset(sample_rate, channels,
|
||||
static_cast<size_t>(sample_rate * buffer_duration + 0.5));
|
||||
}
|
||||
void reset(int sample_rate, size_t channels) {
|
||||
reset(sample_rate, channels, static_cast<size_t>(0));
|
||||
}
|
||||
int sample_rate() const { return sample_rate_; }
|
||||
size_t channels() const { return channels_; }
|
||||
size_t frames_per_buffer() const { return frames_per_buffer_; }
|
||||
size_t frames_per_10ms_buffer() const { return frames_per_10ms_buffer_; }
|
||||
size_t GetBytesPerFrame() const { return channels_ * kBitsPerSample / 8; }
|
||||
size_t GetBytesPerBuffer() const {
|
||||
return frames_per_buffer_ * GetBytesPerFrame();
|
||||
}
|
||||
// The WebRTC audio device buffer (ADB) only requires that the sample rate
|
||||
// and number of channels are configured. Hence, to be "valid", only these
|
||||
// two attributes must be set.
|
||||
bool is_valid() const { return ((sample_rate_ > 0) && (channels_ > 0)); }
|
||||
// Most platforms also require that a native buffer size is defined.
|
||||
// An audio parameter instance is considered to be "complete" if it is both
|
||||
// "valid" (can be used by the ADB) and also has a native frame size.
|
||||
bool is_complete() const { return (is_valid() && (frames_per_buffer_ > 0)); }
|
||||
size_t GetBytesPer10msBuffer() const {
|
||||
return frames_per_10ms_buffer_ * GetBytesPerFrame();
|
||||
}
|
||||
double GetBufferSizeInMilliseconds() const {
|
||||
if (sample_rate_ == 0)
|
||||
return 0.0;
|
||||
return frames_per_buffer_ / (sample_rate_ / 1000.0);
|
||||
}
|
||||
double GetBufferSizeInSeconds() const {
|
||||
if (sample_rate_ == 0)
|
||||
return 0.0;
|
||||
return static_cast<double>(frames_per_buffer_) / (sample_rate_);
|
||||
}
|
||||
std::string ToString() const {
|
||||
char ss_buf[1024];
|
||||
rtc::SimpleStringBuilder ss(ss_buf);
|
||||
ss << "AudioParameters: ";
|
||||
ss << "sample_rate=" << sample_rate() << ", channels=" << channels();
|
||||
ss << ", frames_per_buffer=" << frames_per_buffer();
|
||||
ss << ", frames_per_10ms_buffer=" << frames_per_10ms_buffer();
|
||||
ss << ", bytes_per_frame=" << GetBytesPerFrame();
|
||||
ss << ", bytes_per_buffer=" << GetBytesPerBuffer();
|
||||
ss << ", bytes_per_10ms_buffer=" << GetBytesPer10msBuffer();
|
||||
ss << ", size_in_ms=" << GetBufferSizeInMilliseconds();
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
private:
|
||||
int sample_rate_;
|
||||
size_t channels_;
|
||||
size_t frames_per_buffer_;
|
||||
size_t frames_per_10ms_buffer_;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // MODULES_AUDIO_DEVICE_INCLUDE_AUDIO_DEVICE_DEFINES_H_
|
||||
|
|
@ -0,0 +1,53 @@
|
|||
/*
|
||||
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "modules/audio_device/include/audio_device_factory.h"
|
||||
|
||||
#include <memory>
|
||||
|
||||
#if defined(WEBRTC_WIN)
|
||||
#include "modules/audio_device/win/audio_device_module_win.h"
|
||||
#include "modules/audio_device/win/core_audio_input_win.h"
|
||||
#include "modules/audio_device/win/core_audio_output_win.h"
|
||||
#include "modules/audio_device/win/core_audio_utility_win.h"
|
||||
#endif
|
||||
|
||||
#include "api/task_queue/task_queue_factory.h"
|
||||
#include "rtc_base/logging.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
rtc::scoped_refptr<AudioDeviceModule> CreateWindowsCoreAudioAudioDeviceModule(
|
||||
TaskQueueFactory* task_queue_factory,
|
||||
bool automatic_restart) {
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__;
|
||||
return CreateWindowsCoreAudioAudioDeviceModuleForTest(task_queue_factory,
|
||||
automatic_restart);
|
||||
}
|
||||
|
||||
rtc::scoped_refptr<AudioDeviceModuleForTest>
|
||||
CreateWindowsCoreAudioAudioDeviceModuleForTest(
|
||||
TaskQueueFactory* task_queue_factory,
|
||||
bool automatic_restart) {
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__;
|
||||
// Returns NULL if Core Audio is not supported or if COM has not been
|
||||
// initialized correctly using ScopedCOMInitializer.
|
||||
if (!webrtc_win::core_audio_utility::IsSupported()) {
|
||||
RTC_LOG(LS_ERROR)
|
||||
<< "Unable to create ADM since Core Audio is not supported";
|
||||
return nullptr;
|
||||
}
|
||||
return CreateWindowsCoreAudioAudioDeviceModuleFromInputAndOutput(
|
||||
std::make_unique<webrtc_win::CoreAudioInput>(automatic_restart),
|
||||
std::make_unique<webrtc_win::CoreAudioOutput>(automatic_restart),
|
||||
task_queue_factory);
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,59 @@
|
|||
/*
|
||||
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef MODULES_AUDIO_DEVICE_INCLUDE_AUDIO_DEVICE_FACTORY_H_
|
||||
#define MODULES_AUDIO_DEVICE_INCLUDE_AUDIO_DEVICE_FACTORY_H_
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "api/task_queue/task_queue_factory.h"
|
||||
#include "modules/audio_device/include/audio_device.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// Creates an AudioDeviceModule (ADM) for Windows based on the Core Audio API.
|
||||
// The creating thread must be a COM thread; otherwise nullptr will be returned.
|
||||
// By default `automatic_restart` is set to true and it results in support for
|
||||
// automatic restart of audio if e.g. the existing device is removed. If set to
|
||||
// false, no attempt to restart audio is performed under these conditions.
|
||||
//
|
||||
// Example (assuming webrtc namespace):
|
||||
//
|
||||
// public:
|
||||
// rtc::scoped_refptr<AudioDeviceModule> CreateAudioDevice() {
|
||||
// task_queue_factory_ = CreateDefaultTaskQueueFactory();
|
||||
// // Tell COM that this thread shall live in the MTA.
|
||||
// com_initializer_ = std::make_unique<ScopedCOMInitializer>(
|
||||
// ScopedCOMInitializer::kMTA);
|
||||
// if (!com_initializer_->Succeeded()) {
|
||||
// return nullptr;
|
||||
// }
|
||||
// // Create the ADM with support for automatic restart if devices are
|
||||
// // unplugged.
|
||||
// return CreateWindowsCoreAudioAudioDeviceModule(
|
||||
// task_queue_factory_.get());
|
||||
// }
|
||||
//
|
||||
// private:
|
||||
// std::unique_ptr<ScopedCOMInitializer> com_initializer_;
|
||||
// std::unique_ptr<TaskQueueFactory> task_queue_factory_;
|
||||
//
|
||||
rtc::scoped_refptr<AudioDeviceModule> CreateWindowsCoreAudioAudioDeviceModule(
|
||||
TaskQueueFactory* task_queue_factory,
|
||||
bool automatic_restart = true);
|
||||
|
||||
rtc::scoped_refptr<AudioDeviceModuleForTest>
|
||||
CreateWindowsCoreAudioAudioDeviceModuleForTest(
|
||||
TaskQueueFactory* task_queue_factory,
|
||||
bool automatic_restart = true);
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // MODULES_AUDIO_DEVICE_INCLUDE_AUDIO_DEVICE_FACTORY_H_
|
||||
|
|
@ -0,0 +1,33 @@
|
|||
/*
|
||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef MODULES_AUDIO_DEVICE_INCLUDE_FAKE_AUDIO_DEVICE_H_
|
||||
#define MODULES_AUDIO_DEVICE_INCLUDE_FAKE_AUDIO_DEVICE_H_
|
||||
|
||||
#include "modules/audio_device/include/audio_device.h"
|
||||
#include "modules/audio_device/include/audio_device_default.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class FakeAudioDeviceModule
|
||||
: public webrtc_impl::AudioDeviceModuleDefault<AudioDeviceModule> {
|
||||
public:
|
||||
// TODO(bugs.webrtc.org/12701): Fix all users of this class to managed
|
||||
// references using scoped_refptr. Current code doesn't always use refcounting
|
||||
// for this class.
|
||||
void AddRef() const override {}
|
||||
webrtc::RefCountReleaseStatus Release() const override {
|
||||
return webrtc::RefCountReleaseStatus::kDroppedLastRef;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // MODULES_AUDIO_DEVICE_INCLUDE_FAKE_AUDIO_DEVICE_H_
|
||||
|
|
@ -0,0 +1,156 @@
|
|||
/*
|
||||
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef MODULES_AUDIO_DEVICE_INCLUDE_MOCK_AUDIO_DEVICE_H_
|
||||
#define MODULES_AUDIO_DEVICE_INCLUDE_MOCK_AUDIO_DEVICE_H_
|
||||
|
||||
#include <string>
|
||||
|
||||
#include "api/make_ref_counted.h"
|
||||
#include "modules/audio_device/include/audio_device.h"
|
||||
#include "test/gmock.h"
|
||||
|
||||
namespace webrtc {
|
||||
namespace test {
|
||||
|
||||
class MockAudioDeviceModule : public AudioDeviceModule {
|
||||
public:
|
||||
static rtc::scoped_refptr<MockAudioDeviceModule> CreateNice() {
|
||||
return rtc::make_ref_counted<::testing::NiceMock<MockAudioDeviceModule>>();
|
||||
}
|
||||
static rtc::scoped_refptr<MockAudioDeviceModule> CreateStrict() {
|
||||
return rtc::make_ref_counted<
|
||||
::testing::StrictMock<MockAudioDeviceModule>>();
|
||||
}
|
||||
|
||||
// AudioDeviceModule.
|
||||
MOCK_METHOD(int32_t,
|
||||
ActiveAudioLayer,
|
||||
(AudioLayer * audioLayer),
|
||||
(const, override));
|
||||
MOCK_METHOD(int32_t,
|
||||
RegisterAudioCallback,
|
||||
(AudioTransport * audioCallback),
|
||||
(override));
|
||||
MOCK_METHOD(int32_t, Init, (), (override));
|
||||
MOCK_METHOD(int32_t, Terminate, (), (override));
|
||||
MOCK_METHOD(bool, Initialized, (), (const, override));
|
||||
MOCK_METHOD(int16_t, PlayoutDevices, (), (override));
|
||||
MOCK_METHOD(int16_t, RecordingDevices, (), (override));
|
||||
MOCK_METHOD(int32_t,
|
||||
PlayoutDeviceName,
|
||||
(uint16_t index,
|
||||
char name[kAdmMaxDeviceNameSize],
|
||||
char guid[kAdmMaxGuidSize]),
|
||||
(override));
|
||||
MOCK_METHOD(int32_t,
|
||||
RecordingDeviceName,
|
||||
(uint16_t index,
|
||||
char name[kAdmMaxDeviceNameSize],
|
||||
char guid[kAdmMaxGuidSize]),
|
||||
(override));
|
||||
MOCK_METHOD(int32_t, SetPlayoutDevice, (uint16_t index), (override));
|
||||
MOCK_METHOD(int32_t,
|
||||
SetPlayoutDevice,
|
||||
(WindowsDeviceType device),
|
||||
(override));
|
||||
MOCK_METHOD(int32_t, SetRecordingDevice, (uint16_t index), (override));
|
||||
MOCK_METHOD(int32_t,
|
||||
SetRecordingDevice,
|
||||
(WindowsDeviceType device),
|
||||
(override));
|
||||
MOCK_METHOD(int32_t, PlayoutIsAvailable, (bool* available), (override));
|
||||
MOCK_METHOD(int32_t, InitPlayout, (), (override));
|
||||
MOCK_METHOD(bool, PlayoutIsInitialized, (), (const, override));
|
||||
MOCK_METHOD(int32_t, RecordingIsAvailable, (bool* available), (override));
|
||||
MOCK_METHOD(int32_t, InitRecording, (), (override));
|
||||
MOCK_METHOD(bool, RecordingIsInitialized, (), (const, override));
|
||||
MOCK_METHOD(int32_t, StartPlayout, (), (override));
|
||||
MOCK_METHOD(int32_t, StopPlayout, (), (override));
|
||||
MOCK_METHOD(bool, Playing, (), (const, override));
|
||||
MOCK_METHOD(int32_t, StartRecording, (), (override));
|
||||
MOCK_METHOD(int32_t, StopRecording, (), (override));
|
||||
MOCK_METHOD(bool, Recording, (), (const, override));
|
||||
MOCK_METHOD(int32_t, InitSpeaker, (), (override));
|
||||
MOCK_METHOD(bool, SpeakerIsInitialized, (), (const, override));
|
||||
MOCK_METHOD(int32_t, InitMicrophone, (), (override));
|
||||
MOCK_METHOD(bool, MicrophoneIsInitialized, (), (const, override));
|
||||
MOCK_METHOD(int32_t, SpeakerVolumeIsAvailable, (bool* available), (override));
|
||||
MOCK_METHOD(int32_t, SetSpeakerVolume, (uint32_t volume), (override));
|
||||
MOCK_METHOD(int32_t, SpeakerVolume, (uint32_t * volume), (const, override));
|
||||
MOCK_METHOD(int32_t,
|
||||
MaxSpeakerVolume,
|
||||
(uint32_t * maxVolume),
|
||||
(const, override));
|
||||
MOCK_METHOD(int32_t,
|
||||
MinSpeakerVolume,
|
||||
(uint32_t * minVolume),
|
||||
(const, override));
|
||||
MOCK_METHOD(int32_t,
|
||||
MicrophoneVolumeIsAvailable,
|
||||
(bool* available),
|
||||
(override));
|
||||
MOCK_METHOD(int32_t, SetMicrophoneVolume, (uint32_t volume), (override));
|
||||
MOCK_METHOD(int32_t,
|
||||
MicrophoneVolume,
|
||||
(uint32_t * volume),
|
||||
(const, override));
|
||||
MOCK_METHOD(int32_t,
|
||||
MaxMicrophoneVolume,
|
||||
(uint32_t * maxVolume),
|
||||
(const, override));
|
||||
MOCK_METHOD(int32_t,
|
||||
MinMicrophoneVolume,
|
||||
(uint32_t * minVolume),
|
||||
(const, override));
|
||||
MOCK_METHOD(int32_t, SpeakerMuteIsAvailable, (bool* available), (override));
|
||||
MOCK_METHOD(int32_t, SetSpeakerMute, (bool enable), (override));
|
||||
MOCK_METHOD(int32_t, SpeakerMute, (bool* enabled), (const, override));
|
||||
MOCK_METHOD(int32_t,
|
||||
MicrophoneMuteIsAvailable,
|
||||
(bool* available),
|
||||
(override));
|
||||
MOCK_METHOD(int32_t, SetMicrophoneMute, (bool enable), (override));
|
||||
MOCK_METHOD(int32_t, MicrophoneMute, (bool* enabled), (const, override));
|
||||
MOCK_METHOD(int32_t,
|
||||
StereoPlayoutIsAvailable,
|
||||
(bool* available),
|
||||
(const, override));
|
||||
MOCK_METHOD(int32_t, SetStereoPlayout, (bool enable), (override));
|
||||
MOCK_METHOD(int32_t, StereoPlayout, (bool* enabled), (const, override));
|
||||
MOCK_METHOD(int32_t,
|
||||
StereoRecordingIsAvailable,
|
||||
(bool* available),
|
||||
(const, override));
|
||||
MOCK_METHOD(int32_t, SetStereoRecording, (bool enable), (override));
|
||||
MOCK_METHOD(int32_t, StereoRecording, (bool* enabled), (const, override));
|
||||
MOCK_METHOD(int32_t, PlayoutDelay, (uint16_t * delayMS), (const, override));
|
||||
MOCK_METHOD(bool, BuiltInAECIsAvailable, (), (const, override));
|
||||
MOCK_METHOD(bool, BuiltInAGCIsAvailable, (), (const, override));
|
||||
MOCK_METHOD(bool, BuiltInNSIsAvailable, (), (const, override));
|
||||
MOCK_METHOD(int32_t, EnableBuiltInAEC, (bool enable), (override));
|
||||
MOCK_METHOD(int32_t, EnableBuiltInAGC, (bool enable), (override));
|
||||
MOCK_METHOD(int32_t, EnableBuiltInNS, (bool enable), (override));
|
||||
MOCK_METHOD(int32_t, GetPlayoutUnderrunCount, (), (const, override));
|
||||
#if defined(WEBRTC_IOS)
|
||||
MOCK_METHOD(int,
|
||||
GetPlayoutAudioParameters,
|
||||
(AudioParameters * params),
|
||||
(const, override));
|
||||
MOCK_METHOD(int,
|
||||
GetRecordAudioParameters,
|
||||
(AudioParameters * params),
|
||||
(const, override));
|
||||
#endif // WEBRTC_IOS
|
||||
};
|
||||
} // namespace test
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // MODULES_AUDIO_DEVICE_INCLUDE_MOCK_AUDIO_DEVICE_H_
|
||||
|
|
@ -0,0 +1,81 @@
|
|||
/*
|
||||
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef MODULES_AUDIO_DEVICE_INCLUDE_MOCK_AUDIO_TRANSPORT_H_
|
||||
#define MODULES_AUDIO_DEVICE_INCLUDE_MOCK_AUDIO_TRANSPORT_H_
|
||||
|
||||
#include "modules/audio_device/include/audio_device_defines.h"
|
||||
#include "test/gmock.h"
|
||||
|
||||
namespace webrtc {
|
||||
namespace test {
|
||||
|
||||
class MockAudioTransport : public AudioTransport {
|
||||
public:
|
||||
MockAudioTransport() {}
|
||||
~MockAudioTransport() {}
|
||||
|
||||
MOCK_METHOD(int32_t,
|
||||
RecordedDataIsAvailable,
|
||||
(const void* audioSamples,
|
||||
size_t nSamples,
|
||||
size_t nBytesPerSample,
|
||||
size_t nChannels,
|
||||
uint32_t samplesPerSec,
|
||||
uint32_t totalDelayMS,
|
||||
int32_t clockDrift,
|
||||
uint32_t currentMicLevel,
|
||||
bool keyPressed,
|
||||
uint32_t& newMicLevel),
|
||||
(override));
|
||||
|
||||
MOCK_METHOD(int32_t,
|
||||
RecordedDataIsAvailable,
|
||||
(const void* audioSamples,
|
||||
size_t nSamples,
|
||||
size_t nBytesPerSample,
|
||||
size_t nChannels,
|
||||
uint32_t samplesPerSec,
|
||||
uint32_t totalDelayMS,
|
||||
int32_t clockDrift,
|
||||
uint32_t currentMicLevel,
|
||||
bool keyPressed,
|
||||
uint32_t& newMicLevel,
|
||||
absl::optional<int64_t> estimated_capture_time_ns),
|
||||
(override));
|
||||
|
||||
MOCK_METHOD(int32_t,
|
||||
NeedMorePlayData,
|
||||
(size_t nSamples,
|
||||
size_t nBytesPerSample,
|
||||
size_t nChannels,
|
||||
uint32_t samplesPerSec,
|
||||
void* audioSamples,
|
||||
size_t& nSamplesOut,
|
||||
int64_t* elapsed_time_ms,
|
||||
int64_t* ntp_time_ms),
|
||||
(override));
|
||||
|
||||
MOCK_METHOD(void,
|
||||
PullRenderData,
|
||||
(int bits_per_sample,
|
||||
int sample_rate,
|
||||
size_t number_of_channels,
|
||||
size_t number_of_frames,
|
||||
void* audio_data,
|
||||
int64_t* elapsed_time_ms,
|
||||
int64_t* ntp_time_ms),
|
||||
(override));
|
||||
};
|
||||
|
||||
} // namespace test
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // MODULES_AUDIO_DEVICE_INCLUDE_MOCK_AUDIO_TRANSPORT_H_
|
||||
|
|
@ -0,0 +1,540 @@
|
|||
/*
|
||||
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
#include "modules/audio_device/include/test_audio_device.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstdint>
|
||||
#include <cstdlib>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <type_traits>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/strings/string_view.h"
|
||||
#include "api/array_view.h"
|
||||
#include "api/make_ref_counted.h"
|
||||
#include "api/task_queue/task_queue_factory.h"
|
||||
#include "common_audio/wav_file.h"
|
||||
#include "modules/audio_device/audio_device_impl.h"
|
||||
#include "modules/audio_device/include/audio_device_default.h"
|
||||
#include "modules/audio_device/test_audio_device_impl.h"
|
||||
#include "rtc_base/buffer.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/event.h"
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/numerics/safe_conversions.h"
|
||||
#include "rtc_base/platform_thread.h"
|
||||
#include "rtc_base/random.h"
|
||||
#include "rtc_base/synchronization/mutex.h"
|
||||
#include "rtc_base/task_utils/repeating_task.h"
|
||||
#include "rtc_base/thread_annotations.h"
|
||||
#include "rtc_base/time_utils.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace {
|
||||
|
||||
constexpr int kFrameLengthUs = 10000;
|
||||
constexpr int kFramesPerSecond = rtc::kNumMicrosecsPerSec / kFrameLengthUs;
|
||||
|
||||
class TestAudioDeviceModuleImpl : public AudioDeviceModuleImpl {
|
||||
public:
|
||||
TestAudioDeviceModuleImpl(
|
||||
TaskQueueFactory* task_queue_factory,
|
||||
std::unique_ptr<TestAudioDeviceModule::Capturer> capturer,
|
||||
std::unique_ptr<TestAudioDeviceModule::Renderer> renderer,
|
||||
float speed = 1)
|
||||
: AudioDeviceModuleImpl(
|
||||
AudioLayer::kDummyAudio,
|
||||
std::make_unique<TestAudioDevice>(task_queue_factory,
|
||||
std::move(capturer),
|
||||
std::move(renderer),
|
||||
speed),
|
||||
task_queue_factory,
|
||||
/*create_detached=*/true) {}
|
||||
|
||||
~TestAudioDeviceModuleImpl() override = default;
|
||||
};
|
||||
|
||||
// A fake capturer that generates pulses with random samples between
|
||||
// -max_amplitude and +max_amplitude.
|
||||
class PulsedNoiseCapturerImpl final
|
||||
: public TestAudioDeviceModule::PulsedNoiseCapturer {
|
||||
public:
|
||||
// Assuming 10ms audio packets.
|
||||
PulsedNoiseCapturerImpl(int16_t max_amplitude,
|
||||
int sampling_frequency_in_hz,
|
||||
int num_channels)
|
||||
: sampling_frequency_in_hz_(sampling_frequency_in_hz),
|
||||
fill_with_zero_(false),
|
||||
random_generator_(1),
|
||||
max_amplitude_(max_amplitude),
|
||||
num_channels_(num_channels) {
|
||||
RTC_DCHECK_GT(max_amplitude, 0);
|
||||
}
|
||||
|
||||
int SamplingFrequency() const override { return sampling_frequency_in_hz_; }
|
||||
|
||||
int NumChannels() const override { return num_channels_; }
|
||||
|
||||
bool Capture(rtc::BufferT<int16_t>* buffer) override {
|
||||
fill_with_zero_ = !fill_with_zero_;
|
||||
int16_t max_amplitude;
|
||||
{
|
||||
MutexLock lock(&lock_);
|
||||
max_amplitude = max_amplitude_;
|
||||
}
|
||||
buffer->SetData(
|
||||
TestAudioDeviceModule::SamplesPerFrame(sampling_frequency_in_hz_) *
|
||||
num_channels_,
|
||||
[&](rtc::ArrayView<int16_t> data) {
|
||||
if (fill_with_zero_) {
|
||||
std::fill(data.begin(), data.end(), 0);
|
||||
} else {
|
||||
std::generate(data.begin(), data.end(), [&]() {
|
||||
return random_generator_.Rand(-max_amplitude, max_amplitude);
|
||||
});
|
||||
}
|
||||
return data.size();
|
||||
});
|
||||
return true;
|
||||
}
|
||||
|
||||
void SetMaxAmplitude(int16_t amplitude) override {
|
||||
MutexLock lock(&lock_);
|
||||
max_amplitude_ = amplitude;
|
||||
}
|
||||
|
||||
private:
|
||||
int sampling_frequency_in_hz_;
|
||||
bool fill_with_zero_;
|
||||
Random random_generator_;
|
||||
Mutex lock_;
|
||||
int16_t max_amplitude_ RTC_GUARDED_BY(lock_);
|
||||
const int num_channels_;
|
||||
};
|
||||
|
||||
class WavFileReader final : public TestAudioDeviceModule::Capturer {
|
||||
public:
|
||||
WavFileReader(absl::string_view filename,
|
||||
int sampling_frequency_in_hz,
|
||||
int num_channels,
|
||||
bool repeat)
|
||||
: WavFileReader(std::make_unique<WavReader>(filename),
|
||||
sampling_frequency_in_hz,
|
||||
num_channels,
|
||||
repeat) {}
|
||||
|
||||
int SamplingFrequency() const override { return sampling_frequency_in_hz_; }
|
||||
|
||||
int NumChannels() const override { return num_channels_; }
|
||||
|
||||
bool Capture(rtc::BufferT<int16_t>* buffer) override {
|
||||
buffer->SetData(
|
||||
TestAudioDeviceModule::SamplesPerFrame(sampling_frequency_in_hz_) *
|
||||
num_channels_,
|
||||
[&](rtc::ArrayView<int16_t> data) {
|
||||
size_t read = wav_reader_->ReadSamples(data.size(), data.data());
|
||||
if (read < data.size() && repeat_) {
|
||||
do {
|
||||
wav_reader_->Reset();
|
||||
size_t delta = wav_reader_->ReadSamples(
|
||||
data.size() - read, data.subview(read).data());
|
||||
RTC_CHECK_GT(delta, 0) << "No new data read from file";
|
||||
read += delta;
|
||||
} while (read < data.size());
|
||||
}
|
||||
return read;
|
||||
});
|
||||
return buffer->size() > 0;
|
||||
}
|
||||
|
||||
private:
|
||||
WavFileReader(std::unique_ptr<WavReader> wav_reader,
|
||||
int sampling_frequency_in_hz,
|
||||
int num_channels,
|
||||
bool repeat)
|
||||
: sampling_frequency_in_hz_(sampling_frequency_in_hz),
|
||||
num_channels_(num_channels),
|
||||
wav_reader_(std::move(wav_reader)),
|
||||
repeat_(repeat) {
|
||||
RTC_CHECK_EQ(wav_reader_->sample_rate(), sampling_frequency_in_hz);
|
||||
RTC_CHECK_EQ(wav_reader_->num_channels(), num_channels);
|
||||
}
|
||||
|
||||
const int sampling_frequency_in_hz_;
|
||||
const int num_channels_;
|
||||
std::unique_ptr<WavReader> wav_reader_;
|
||||
const bool repeat_;
|
||||
};
|
||||
|
||||
class WavFileWriter final : public TestAudioDeviceModule::Renderer {
|
||||
public:
|
||||
WavFileWriter(absl::string_view filename,
|
||||
int sampling_frequency_in_hz,
|
||||
int num_channels)
|
||||
: WavFileWriter(std::make_unique<WavWriter>(filename,
|
||||
sampling_frequency_in_hz,
|
||||
num_channels),
|
||||
sampling_frequency_in_hz,
|
||||
num_channels) {}
|
||||
|
||||
int SamplingFrequency() const override { return sampling_frequency_in_hz_; }
|
||||
|
||||
int NumChannels() const override { return num_channels_; }
|
||||
|
||||
bool Render(rtc::ArrayView<const int16_t> data) override {
|
||||
wav_writer_->WriteSamples(data.data(), data.size());
|
||||
return true;
|
||||
}
|
||||
|
||||
private:
|
||||
WavFileWriter(std::unique_ptr<WavWriter> wav_writer,
|
||||
int sampling_frequency_in_hz,
|
||||
int num_channels)
|
||||
: sampling_frequency_in_hz_(sampling_frequency_in_hz),
|
||||
wav_writer_(std::move(wav_writer)),
|
||||
num_channels_(num_channels) {}
|
||||
|
||||
int sampling_frequency_in_hz_;
|
||||
std::unique_ptr<WavWriter> wav_writer_;
|
||||
const int num_channels_;
|
||||
};
|
||||
|
||||
class BoundedWavFileWriter : public TestAudioDeviceModule::Renderer {
|
||||
public:
|
||||
BoundedWavFileWriter(absl::string_view filename,
|
||||
int sampling_frequency_in_hz,
|
||||
int num_channels)
|
||||
: sampling_frequency_in_hz_(sampling_frequency_in_hz),
|
||||
wav_writer_(filename, sampling_frequency_in_hz, num_channels),
|
||||
num_channels_(num_channels),
|
||||
silent_audio_(
|
||||
TestAudioDeviceModule::SamplesPerFrame(sampling_frequency_in_hz) *
|
||||
num_channels,
|
||||
0),
|
||||
started_writing_(false),
|
||||
trailing_zeros_(0) {}
|
||||
|
||||
int SamplingFrequency() const override { return sampling_frequency_in_hz_; }
|
||||
|
||||
int NumChannels() const override { return num_channels_; }
|
||||
|
||||
bool Render(rtc::ArrayView<const int16_t> data) override {
|
||||
const int16_t kAmplitudeThreshold = 5;
|
||||
|
||||
const int16_t* begin = data.begin();
|
||||
const int16_t* end = data.end();
|
||||
if (!started_writing_) {
|
||||
// Cut off silence at the beginning.
|
||||
while (begin < end) {
|
||||
if (std::abs(*begin) > kAmplitudeThreshold) {
|
||||
started_writing_ = true;
|
||||
break;
|
||||
}
|
||||
++begin;
|
||||
}
|
||||
}
|
||||
if (started_writing_) {
|
||||
// Cut off silence at the end.
|
||||
while (begin < end) {
|
||||
if (*(end - 1) != 0) {
|
||||
break;
|
||||
}
|
||||
--end;
|
||||
}
|
||||
if (begin < end) {
|
||||
// If it turns out that the silence was not final, need to write all the
|
||||
// skipped zeros and continue writing audio.
|
||||
while (trailing_zeros_ > 0) {
|
||||
const size_t zeros_to_write =
|
||||
std::min(trailing_zeros_, silent_audio_.size());
|
||||
wav_writer_.WriteSamples(silent_audio_.data(), zeros_to_write);
|
||||
trailing_zeros_ -= zeros_to_write;
|
||||
}
|
||||
wav_writer_.WriteSamples(begin, end - begin);
|
||||
}
|
||||
// Save the number of zeros we skipped in case this needs to be restored.
|
||||
trailing_zeros_ += data.end() - end;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
private:
|
||||
int sampling_frequency_in_hz_;
|
||||
WavWriter wav_writer_;
|
||||
const int num_channels_;
|
||||
std::vector<int16_t> silent_audio_;
|
||||
bool started_writing_;
|
||||
size_t trailing_zeros_;
|
||||
};
|
||||
|
||||
class DiscardRenderer final : public TestAudioDeviceModule::Renderer {
|
||||
public:
|
||||
explicit DiscardRenderer(int sampling_frequency_in_hz, int num_channels)
|
||||
: sampling_frequency_in_hz_(sampling_frequency_in_hz),
|
||||
num_channels_(num_channels) {}
|
||||
|
||||
int SamplingFrequency() const override { return sampling_frequency_in_hz_; }
|
||||
|
||||
int NumChannels() const override { return num_channels_; }
|
||||
|
||||
bool Render(rtc::ArrayView<const int16_t> data) override { return true; }
|
||||
|
||||
private:
|
||||
int sampling_frequency_in_hz_;
|
||||
const int num_channels_;
|
||||
};
|
||||
|
||||
class RawFileReader final : public TestAudioDeviceModule::Capturer {
|
||||
public:
|
||||
RawFileReader(absl::string_view input_file_name,
|
||||
int sampling_frequency_in_hz,
|
||||
int num_channels,
|
||||
bool repeat)
|
||||
: input_file_name_(input_file_name),
|
||||
sampling_frequency_in_hz_(sampling_frequency_in_hz),
|
||||
num_channels_(num_channels),
|
||||
repeat_(repeat),
|
||||
read_buffer_(
|
||||
TestAudioDeviceModule::SamplesPerFrame(sampling_frequency_in_hz) *
|
||||
num_channels * 2,
|
||||
0) {
|
||||
input_file_ = FileWrapper::OpenReadOnly(input_file_name_);
|
||||
RTC_CHECK(input_file_.is_open())
|
||||
<< "Failed to open audio input file: " << input_file_name_;
|
||||
}
|
||||
|
||||
~RawFileReader() override { input_file_.Close(); }
|
||||
|
||||
int SamplingFrequency() const override { return sampling_frequency_in_hz_; }
|
||||
|
||||
int NumChannels() const override { return num_channels_; }
|
||||
|
||||
bool Capture(rtc::BufferT<int16_t>* buffer) override {
|
||||
buffer->SetData(
|
||||
TestAudioDeviceModule::SamplesPerFrame(SamplingFrequency()) *
|
||||
NumChannels(),
|
||||
[&](rtc::ArrayView<int16_t> data) {
|
||||
rtc::ArrayView<int8_t> read_buffer_view = ReadBufferView();
|
||||
size_t size = data.size() * 2;
|
||||
size_t read = input_file_.Read(read_buffer_view.data(), size);
|
||||
if (read < size && repeat_) {
|
||||
do {
|
||||
input_file_.Rewind();
|
||||
size_t delta = input_file_.Read(
|
||||
read_buffer_view.subview(read).data(), size - read);
|
||||
RTC_CHECK_GT(delta, 0) << "No new data to read from file";
|
||||
read += delta;
|
||||
} while (read < size);
|
||||
}
|
||||
memcpy(data.data(), read_buffer_view.data(), size);
|
||||
return read / 2;
|
||||
});
|
||||
return buffer->size() > 0;
|
||||
}
|
||||
|
||||
private:
|
||||
rtc::ArrayView<int8_t> ReadBufferView() { return read_buffer_; }
|
||||
|
||||
const std::string input_file_name_;
|
||||
const int sampling_frequency_in_hz_;
|
||||
const int num_channels_;
|
||||
const bool repeat_;
|
||||
FileWrapper input_file_;
|
||||
std::vector<int8_t> read_buffer_;
|
||||
};
|
||||
|
||||
class RawFileWriter : public TestAudioDeviceModule::Renderer {
|
||||
public:
|
||||
RawFileWriter(absl::string_view output_file_name,
|
||||
int sampling_frequency_in_hz,
|
||||
int num_channels)
|
||||
: output_file_name_(output_file_name),
|
||||
sampling_frequency_in_hz_(sampling_frequency_in_hz),
|
||||
num_channels_(num_channels),
|
||||
silent_audio_(
|
||||
TestAudioDeviceModule::SamplesPerFrame(sampling_frequency_in_hz) *
|
||||
num_channels * 2,
|
||||
0),
|
||||
write_buffer_(
|
||||
TestAudioDeviceModule::SamplesPerFrame(sampling_frequency_in_hz) *
|
||||
num_channels * 2,
|
||||
0),
|
||||
started_writing_(false),
|
||||
trailing_zeros_(0) {
|
||||
output_file_ = FileWrapper::OpenWriteOnly(output_file_name_);
|
||||
RTC_CHECK(output_file_.is_open())
|
||||
<< "Failed to open playout file" << output_file_name_;
|
||||
}
|
||||
~RawFileWriter() override { output_file_.Close(); }
|
||||
|
||||
int SamplingFrequency() const override { return sampling_frequency_in_hz_; }
|
||||
|
||||
int NumChannels() const override { return num_channels_; }
|
||||
|
||||
bool Render(rtc::ArrayView<const int16_t> data) override {
|
||||
const int16_t kAmplitudeThreshold = 5;
|
||||
|
||||
const int16_t* begin = data.begin();
|
||||
const int16_t* end = data.end();
|
||||
if (!started_writing_) {
|
||||
// Cut off silence at the beginning.
|
||||
while (begin < end) {
|
||||
if (std::abs(*begin) > kAmplitudeThreshold) {
|
||||
started_writing_ = true;
|
||||
break;
|
||||
}
|
||||
++begin;
|
||||
}
|
||||
}
|
||||
if (started_writing_) {
|
||||
// Cut off silence at the end.
|
||||
while (begin < end) {
|
||||
if (*(end - 1) != 0) {
|
||||
break;
|
||||
}
|
||||
--end;
|
||||
}
|
||||
if (begin < end) {
|
||||
// If it turns out that the silence was not final, need to write all the
|
||||
// skipped zeros and continue writing audio.
|
||||
while (trailing_zeros_ > 0) {
|
||||
const size_t zeros_to_write =
|
||||
std::min(trailing_zeros_, silent_audio_.size());
|
||||
output_file_.Write(silent_audio_.data(), zeros_to_write * 2);
|
||||
trailing_zeros_ -= zeros_to_write;
|
||||
}
|
||||
WriteInt16(begin, end);
|
||||
}
|
||||
// Save the number of zeros we skipped in case this needs to be restored.
|
||||
trailing_zeros_ += data.end() - end;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
private:
|
||||
void WriteInt16(const int16_t* begin, const int16_t* end) {
|
||||
int size = (end - begin) * sizeof(int16_t);
|
||||
memcpy(write_buffer_.data(), begin, size);
|
||||
output_file_.Write(write_buffer_.data(), size);
|
||||
}
|
||||
|
||||
const std::string output_file_name_;
|
||||
const int sampling_frequency_in_hz_;
|
||||
const int num_channels_;
|
||||
FileWrapper output_file_;
|
||||
std::vector<int8_t> silent_audio_;
|
||||
std::vector<int8_t> write_buffer_;
|
||||
bool started_writing_;
|
||||
size_t trailing_zeros_;
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
size_t TestAudioDeviceModule::SamplesPerFrame(int sampling_frequency_in_hz) {
|
||||
return rtc::CheckedDivExact(sampling_frequency_in_hz, kFramesPerSecond);
|
||||
}
|
||||
|
||||
rtc::scoped_refptr<AudioDeviceModule> TestAudioDeviceModule::Create(
|
||||
TaskQueueFactory* task_queue_factory,
|
||||
std::unique_ptr<TestAudioDeviceModule::Capturer> capturer,
|
||||
std::unique_ptr<TestAudioDeviceModule::Renderer> renderer,
|
||||
float speed) {
|
||||
auto audio_device = rtc::make_ref_counted<TestAudioDeviceModuleImpl>(
|
||||
task_queue_factory, std::move(capturer), std::move(renderer), speed);
|
||||
|
||||
// Ensure that the current platform is supported.
|
||||
if (audio_device->CheckPlatform() == -1) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Create the platform-dependent implementation.
|
||||
if (audio_device->CreatePlatformSpecificObjects() == -1) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Ensure that the generic audio buffer can communicate with the platform
|
||||
// specific parts.
|
||||
if (audio_device->AttachAudioBuffer() == -1) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return audio_device;
|
||||
}
|
||||
|
||||
std::unique_ptr<TestAudioDeviceModule::PulsedNoiseCapturer>
|
||||
TestAudioDeviceModule::CreatePulsedNoiseCapturer(int16_t max_amplitude,
|
||||
int sampling_frequency_in_hz,
|
||||
int num_channels) {
|
||||
return std::make_unique<PulsedNoiseCapturerImpl>(
|
||||
max_amplitude, sampling_frequency_in_hz, num_channels);
|
||||
}
|
||||
|
||||
std::unique_ptr<TestAudioDeviceModule::Renderer>
|
||||
TestAudioDeviceModule::CreateDiscardRenderer(int sampling_frequency_in_hz,
|
||||
int num_channels) {
|
||||
return std::make_unique<DiscardRenderer>(sampling_frequency_in_hz,
|
||||
num_channels);
|
||||
}
|
||||
|
||||
std::unique_ptr<TestAudioDeviceModule::Capturer>
|
||||
TestAudioDeviceModule::CreateWavFileReader(absl::string_view filename,
|
||||
int sampling_frequency_in_hz,
|
||||
int num_channels) {
|
||||
return std::make_unique<WavFileReader>(filename, sampling_frequency_in_hz,
|
||||
num_channels, false);
|
||||
}
|
||||
|
||||
std::unique_ptr<TestAudioDeviceModule::Capturer>
|
||||
TestAudioDeviceModule::CreateWavFileReader(absl::string_view filename,
|
||||
bool repeat) {
|
||||
WavReader reader(filename);
|
||||
int sampling_frequency_in_hz = reader.sample_rate();
|
||||
int num_channels = rtc::checked_cast<int>(reader.num_channels());
|
||||
return std::make_unique<WavFileReader>(filename, sampling_frequency_in_hz,
|
||||
num_channels, repeat);
|
||||
}
|
||||
|
||||
std::unique_ptr<TestAudioDeviceModule::Renderer>
|
||||
TestAudioDeviceModule::CreateWavFileWriter(absl::string_view filename,
|
||||
int sampling_frequency_in_hz,
|
||||
int num_channels) {
|
||||
return std::make_unique<WavFileWriter>(filename, sampling_frequency_in_hz,
|
||||
num_channels);
|
||||
}
|
||||
|
||||
std::unique_ptr<TestAudioDeviceModule::Renderer>
|
||||
TestAudioDeviceModule::CreateBoundedWavFileWriter(absl::string_view filename,
|
||||
int sampling_frequency_in_hz,
|
||||
int num_channels) {
|
||||
return std::make_unique<BoundedWavFileWriter>(
|
||||
filename, sampling_frequency_in_hz, num_channels);
|
||||
}
|
||||
|
||||
std::unique_ptr<TestAudioDeviceModule::Capturer>
|
||||
TestAudioDeviceModule::CreateRawFileReader(absl::string_view filename,
|
||||
int sampling_frequency_in_hz,
|
||||
int num_channels,
|
||||
bool repeat) {
|
||||
return std::make_unique<RawFileReader>(filename, sampling_frequency_in_hz,
|
||||
num_channels, repeat);
|
||||
}
|
||||
|
||||
std::unique_ptr<TestAudioDeviceModule::Renderer>
|
||||
TestAudioDeviceModule::CreateRawFileWriter(absl::string_view filename,
|
||||
int sampling_frequency_in_hz,
|
||||
int num_channels) {
|
||||
return std::make_unique<RawFileWriter>(filename, sampling_frequency_in_hz,
|
||||
num_channels);
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,155 @@
|
|||
/*
|
||||
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
#ifndef MODULES_AUDIO_DEVICE_INCLUDE_TEST_AUDIO_DEVICE_H_
|
||||
#define MODULES_AUDIO_DEVICE_INCLUDE_TEST_AUDIO_DEVICE_H_
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
#include "absl/strings/string_view.h"
|
||||
#include "api/array_view.h"
|
||||
#include "api/scoped_refptr.h"
|
||||
#include "api/task_queue/task_queue_factory.h"
|
||||
#include "modules/audio_device/include/audio_device.h"
|
||||
#include "modules/audio_device/include/audio_device_defines.h"
|
||||
#include "rtc_base/buffer.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// This is test API and is in development, so it can be changed/removed without
|
||||
// notice.
|
||||
|
||||
// This class exists for historical reasons. For now it only contains static
|
||||
// methods to create test AudioDeviceModule. Implementation details of that
|
||||
// module are considered private. This class isn't intended to be instantiated.
|
||||
class TestAudioDeviceModule {
|
||||
public:
|
||||
// Returns the number of samples that Capturers and Renderers with this
|
||||
// sampling frequency will work with every time Capture or Render is called.
|
||||
static size_t SamplesPerFrame(int sampling_frequency_in_hz);
|
||||
|
||||
class Capturer {
|
||||
public:
|
||||
virtual ~Capturer() {}
|
||||
// Returns the sampling frequency in Hz of the audio data that this
|
||||
// capturer produces.
|
||||
virtual int SamplingFrequency() const = 0;
|
||||
// Returns the number of channels of captured audio data.
|
||||
virtual int NumChannels() const = 0;
|
||||
// Replaces the contents of `buffer` with 10ms of captured audio data
|
||||
// (see TestAudioDeviceModule::SamplesPerFrame). Returns true if the
|
||||
// capturer can keep producing data, or false when the capture finishes.
|
||||
virtual bool Capture(rtc::BufferT<int16_t>* buffer) = 0;
|
||||
};
|
||||
|
||||
class Renderer {
|
||||
public:
|
||||
virtual ~Renderer() {}
|
||||
// Returns the sampling frequency in Hz of the audio data that this
|
||||
// renderer receives.
|
||||
virtual int SamplingFrequency() const = 0;
|
||||
// Returns the number of channels of audio data to be required.
|
||||
virtual int NumChannels() const = 0;
|
||||
// Renders the passed audio data and returns true if the renderer wants
|
||||
// to keep receiving data, or false otherwise.
|
||||
virtual bool Render(rtc::ArrayView<const int16_t> data) = 0;
|
||||
};
|
||||
|
||||
// A fake capturer that generates pulses with random samples between
|
||||
// -max_amplitude and +max_amplitude.
|
||||
class PulsedNoiseCapturer : public Capturer {
|
||||
public:
|
||||
~PulsedNoiseCapturer() override {}
|
||||
|
||||
virtual void SetMaxAmplitude(int16_t amplitude) = 0;
|
||||
};
|
||||
|
||||
// Creates a new TestAudioDeviceModule. When capturing or playing, 10 ms audio
|
||||
// frames will be processed every 10ms / `speed`.
|
||||
// `capturer` is an object that produces audio data. Can be nullptr if this
|
||||
// device is never used for recording.
|
||||
// `renderer` is an object that receives audio data that would have been
|
||||
// played out. Can be nullptr if this device is never used for playing.
|
||||
// Use one of the Create... functions to get these instances.
|
||||
static rtc::scoped_refptr<AudioDeviceModule> Create(
|
||||
TaskQueueFactory* task_queue_factory,
|
||||
std::unique_ptr<Capturer> capturer,
|
||||
std::unique_ptr<Renderer> renderer,
|
||||
float speed = 1);
|
||||
|
||||
// Returns a Capturer instance that generates a signal of `num_channels`
|
||||
// channels where every second frame is zero and every second frame is evenly
|
||||
// distributed random noise with max amplitude `max_amplitude`.
|
||||
static std::unique_ptr<PulsedNoiseCapturer> CreatePulsedNoiseCapturer(
|
||||
int16_t max_amplitude,
|
||||
int sampling_frequency_in_hz,
|
||||
int num_channels = 1);
|
||||
|
||||
// Returns a Renderer instance that does nothing with the audio data.
|
||||
static std::unique_ptr<Renderer> CreateDiscardRenderer(
|
||||
int sampling_frequency_in_hz,
|
||||
int num_channels = 1);
|
||||
|
||||
// WavReader and WavWriter creation based on file name.
|
||||
|
||||
// Returns a Capturer instance that gets its data from a WAV file. The sample
|
||||
// rate and channels will be checked against the Wav file.
|
||||
static std::unique_ptr<Capturer> CreateWavFileReader(
|
||||
absl::string_view filename,
|
||||
int sampling_frequency_in_hz,
|
||||
int num_channels = 1);
|
||||
|
||||
// Returns a Capturer instance that gets its data from a file.
|
||||
// Automatically detects sample rate and num of channels.
|
||||
// `repeat` - if true, the file will be replayed from the start when we reach
|
||||
// the end of file.
|
||||
static std::unique_ptr<Capturer> CreateWavFileReader(
|
||||
absl::string_view filename,
|
||||
bool repeat = false);
|
||||
|
||||
// Returns a Renderer instance that writes its data to a file.
|
||||
static std::unique_ptr<Renderer> CreateWavFileWriter(
|
||||
absl::string_view filename,
|
||||
int sampling_frequency_in_hz,
|
||||
int num_channels = 1);
|
||||
|
||||
// Returns a Renderer instance that writes its data to a WAV file, cutting
|
||||
// off silence at the beginning (not necessarily perfect silence, see
|
||||
// kAmplitudeThreshold) and at the end (only actual 0 samples in this case).
|
||||
static std::unique_ptr<Renderer> CreateBoundedWavFileWriter(
|
||||
absl::string_view filename,
|
||||
int sampling_frequency_in_hz,
|
||||
int num_channels = 1);
|
||||
|
||||
// Returns a Capturer instance that gets its data from a raw file (*.raw).
|
||||
static std::unique_ptr<Capturer> CreateRawFileReader(
|
||||
absl::string_view filename,
|
||||
int sampling_frequency_in_hz = 48000,
|
||||
int num_channels = 2,
|
||||
bool repeat = true);
|
||||
|
||||
// Returns a Renderer instance that writes its data to a raw file (*.raw),
|
||||
// cutting off silence at the beginning (not necessarily perfect silence, see
|
||||
// kAmplitudeThreshold) and at the end (only actual 0 samples in this case).
|
||||
static std::unique_ptr<Renderer> CreateRawFileWriter(
|
||||
absl::string_view filename,
|
||||
int sampling_frequency_in_hz = 48000,
|
||||
int num_channels = 2);
|
||||
|
||||
private:
|
||||
TestAudioDeviceModule() = default;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // MODULES_AUDIO_DEVICE_INCLUDE_TEST_AUDIO_DEVICE_H_
|
||||
|
|
@ -0,0 +1,528 @@
|
|||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "modules/audio_device/include/test_audio_device.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <array>
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "api/array_view.h"
|
||||
#include "api/task_queue/task_queue_factory.h"
|
||||
#include "api/units/time_delta.h"
|
||||
#include "api/units/timestamp.h"
|
||||
#include "common_audio/wav_file.h"
|
||||
#include "common_audio/wav_header.h"
|
||||
#include "modules/audio_device/include/audio_device_defines.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/synchronization/mutex.h"
|
||||
#include "test/gmock.h"
|
||||
#include "test/gtest.h"
|
||||
#include "test/testsupport/file_utils.h"
|
||||
#include "test/time_controller/simulated_time_controller.h"
|
||||
|
||||
namespace webrtc {
|
||||
namespace {
|
||||
|
||||
void RunWavTest(const std::vector<int16_t>& input_samples,
|
||||
const std::vector<int16_t>& expected_samples) {
|
||||
const ::testing::TestInfo* const test_info =
|
||||
::testing::UnitTest::GetInstance()->current_test_info();
|
||||
|
||||
const std::string output_filename = test::OutputPathWithRandomDirectory() +
|
||||
"BoundedWavFileWriterTest_" +
|
||||
test_info->name() + ".wav";
|
||||
|
||||
static const size_t kSamplesPerFrame = 8;
|
||||
static const int kSampleRate = kSamplesPerFrame * 100;
|
||||
EXPECT_EQ(TestAudioDeviceModule::SamplesPerFrame(kSampleRate),
|
||||
kSamplesPerFrame);
|
||||
|
||||
// Test through file name API.
|
||||
{
|
||||
std::unique_ptr<TestAudioDeviceModule::Renderer> writer =
|
||||
TestAudioDeviceModule::CreateBoundedWavFileWriter(output_filename, 800);
|
||||
|
||||
for (size_t i = 0; i < input_samples.size(); i += kSamplesPerFrame) {
|
||||
EXPECT_TRUE(writer->Render(rtc::ArrayView<const int16_t>(
|
||||
&input_samples[i],
|
||||
std::min(kSamplesPerFrame, input_samples.size() - i))));
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
WavReader reader(output_filename);
|
||||
std::vector<int16_t> read_samples(expected_samples.size());
|
||||
EXPECT_EQ(expected_samples.size(),
|
||||
reader.ReadSamples(read_samples.size(), read_samples.data()));
|
||||
EXPECT_EQ(expected_samples, read_samples);
|
||||
|
||||
EXPECT_EQ(0u, reader.ReadSamples(read_samples.size(), read_samples.data()));
|
||||
}
|
||||
|
||||
remove(output_filename.c_str());
|
||||
}
|
||||
|
||||
TEST(BoundedWavFileWriterTest, NoSilence) {
|
||||
static const std::vector<int16_t> kInputSamples = {
|
||||
75, 1234, 243, -1231, -22222, 0, 3, 88,
|
||||
1222, -1213, -13222, -7, -3525, 5787, -25247, 8};
|
||||
static const std::vector<int16_t> kExpectedSamples = kInputSamples;
|
||||
RunWavTest(kInputSamples, kExpectedSamples);
|
||||
}
|
||||
|
||||
TEST(BoundedWavFileWriterTest, SomeStartSilence) {
|
||||
static const std::vector<int16_t> kInputSamples = {
|
||||
0, 0, 0, 0, 3, 0, 0, 0, 0, 3, -13222, -7, -3525, 5787, -25247, 8};
|
||||
static const std::vector<int16_t> kExpectedSamples(kInputSamples.begin() + 10,
|
||||
kInputSamples.end());
|
||||
RunWavTest(kInputSamples, kExpectedSamples);
|
||||
}
|
||||
|
||||
TEST(BoundedWavFileWriterTest, NegativeStartSilence) {
|
||||
static const std::vector<int16_t> kInputSamples = {
|
||||
0, -4, -6, 0, 3, 0, 0, 0, 0, 3, -13222, -7, -3525, 5787, -25247, 8};
|
||||
static const std::vector<int16_t> kExpectedSamples(kInputSamples.begin() + 2,
|
||||
kInputSamples.end());
|
||||
RunWavTest(kInputSamples, kExpectedSamples);
|
||||
}
|
||||
|
||||
TEST(BoundedWavFileWriterTest, SomeEndSilence) {
|
||||
static const std::vector<int16_t> kInputSamples = {
|
||||
75, 1234, 243, -1231, -22222, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0};
|
||||
static const std::vector<int16_t> kExpectedSamples(kInputSamples.begin(),
|
||||
kInputSamples.end() - 9);
|
||||
RunWavTest(kInputSamples, kExpectedSamples);
|
||||
}
|
||||
|
||||
TEST(BoundedWavFileWriterTest, DoubleEndSilence) {
|
||||
static const std::vector<int16_t> kInputSamples = {
|
||||
75, 1234, 243, -1231, -22222, 0, 0, 0,
|
||||
0, -1213, -13222, -7, -3525, 5787, 0, 0};
|
||||
static const std::vector<int16_t> kExpectedSamples(kInputSamples.begin(),
|
||||
kInputSamples.end() - 2);
|
||||
RunWavTest(kInputSamples, kExpectedSamples);
|
||||
}
|
||||
|
||||
TEST(BoundedWavFileWriterTest, DoubleSilence) {
|
||||
static const std::vector<int16_t> kInputSamples = {0, -1213, -13222, -7,
|
||||
-3525, 5787, 0, 0};
|
||||
static const std::vector<int16_t> kExpectedSamples(kInputSamples.begin() + 1,
|
||||
kInputSamples.end() - 2);
|
||||
RunWavTest(kInputSamples, kExpectedSamples);
|
||||
}
|
||||
|
||||
TEST(BoundedWavFileWriterTest, EndSilenceCutoff) {
|
||||
static const std::vector<int16_t> kInputSamples = {
|
||||
75, 1234, 243, -1231, -22222, 0, 1, 0, 0, 0, 0};
|
||||
static const std::vector<int16_t> kExpectedSamples(kInputSamples.begin(),
|
||||
kInputSamples.end() - 4);
|
||||
RunWavTest(kInputSamples, kExpectedSamples);
|
||||
}
|
||||
|
||||
TEST(WavFileReaderTest, RepeatedTrueWithSingleFrameFileReadTwice) {
|
||||
static const std::vector<int16_t> kInputSamples = {75, 1234, 243, -1231,
|
||||
-22222, 0, 3, 88};
|
||||
static const rtc::BufferT<int16_t> kExpectedSamples(kInputSamples.data(),
|
||||
kInputSamples.size());
|
||||
|
||||
const std::string output_filename = test::OutputPathWithRandomDirectory() +
|
||||
"WavFileReaderTest_RepeatedTrue_" +
|
||||
".wav";
|
||||
|
||||
static const size_t kSamplesPerFrame = 8;
|
||||
static const int kSampleRate = kSamplesPerFrame * 100;
|
||||
EXPECT_EQ(TestAudioDeviceModule::SamplesPerFrame(kSampleRate),
|
||||
kSamplesPerFrame);
|
||||
|
||||
// Create raw file to read.
|
||||
{
|
||||
std::unique_ptr<TestAudioDeviceModule::Renderer> writer =
|
||||
TestAudioDeviceModule::CreateWavFileWriter(output_filename, 800);
|
||||
|
||||
for (size_t i = 0; i < kInputSamples.size(); i += kSamplesPerFrame) {
|
||||
EXPECT_TRUE(writer->Render(rtc::ArrayView<const int16_t>(
|
||||
&kInputSamples[i],
|
||||
std::min(kSamplesPerFrame, kInputSamples.size() - i))));
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
std::unique_ptr<TestAudioDeviceModule::Capturer> reader =
|
||||
TestAudioDeviceModule::CreateWavFileReader(output_filename, true);
|
||||
rtc::BufferT<int16_t> buffer(kExpectedSamples.size());
|
||||
EXPECT_TRUE(reader->Capture(&buffer));
|
||||
EXPECT_EQ(kExpectedSamples, buffer);
|
||||
EXPECT_TRUE(reader->Capture(&buffer));
|
||||
EXPECT_EQ(kExpectedSamples, buffer);
|
||||
}
|
||||
|
||||
remove(output_filename.c_str());
|
||||
}
|
||||
|
||||
void RunRawTestNoRepeat(const std::vector<int16_t>& input_samples,
|
||||
const std::vector<int16_t>& expected_samples) {
|
||||
const ::testing::TestInfo* const test_info =
|
||||
::testing::UnitTest::GetInstance()->current_test_info();
|
||||
|
||||
const std::string output_filename = test::OutputPathWithRandomDirectory() +
|
||||
"RawFileTest_" + test_info->name() +
|
||||
".raw";
|
||||
|
||||
static const size_t kSamplesPerFrame = 8;
|
||||
static const int kSampleRate = kSamplesPerFrame * 100;
|
||||
EXPECT_EQ(TestAudioDeviceModule::SamplesPerFrame(kSampleRate),
|
||||
kSamplesPerFrame);
|
||||
|
||||
// Test through file name API.
|
||||
{
|
||||
std::unique_ptr<TestAudioDeviceModule::Renderer> writer =
|
||||
TestAudioDeviceModule::CreateRawFileWriter(
|
||||
output_filename, /*sampling_frequency_in_hz=*/800);
|
||||
|
||||
for (size_t i = 0; i < input_samples.size(); i += kSamplesPerFrame) {
|
||||
EXPECT_TRUE(writer->Render(rtc::ArrayView<const int16_t>(
|
||||
&input_samples[i],
|
||||
std::min(kSamplesPerFrame, input_samples.size() - i))));
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
std::unique_ptr<TestAudioDeviceModule::Capturer> reader =
|
||||
TestAudioDeviceModule::CreateRawFileReader(
|
||||
output_filename, /*sampling_frequency_in_hz=*/800,
|
||||
/*num_channels=*/2, /*repeat=*/false);
|
||||
rtc::BufferT<int16_t> buffer(expected_samples.size());
|
||||
rtc::BufferT<int16_t> expected_buffer(expected_samples.size());
|
||||
expected_buffer.SetData(expected_samples);
|
||||
EXPECT_TRUE(reader->Capture(&buffer));
|
||||
EXPECT_EQ(expected_buffer, buffer);
|
||||
EXPECT_FALSE(reader->Capture(&buffer));
|
||||
EXPECT_TRUE(buffer.empty());
|
||||
}
|
||||
|
||||
remove(output_filename.c_str());
|
||||
}
|
||||
|
||||
TEST(RawFileWriterTest, NoSilence) {
|
||||
static const std::vector<int16_t> kInputSamples = {
|
||||
75, 1234, 243, -1231, -22222, 0, 3, 88,
|
||||
1222, -1213, -13222, -7, -3525, 5787, -25247, 8};
|
||||
static const std::vector<int16_t> kExpectedSamples = kInputSamples;
|
||||
RunRawTestNoRepeat(kInputSamples, kExpectedSamples);
|
||||
}
|
||||
|
||||
TEST(RawFileWriterTest, SomeStartSilence) {
|
||||
static const std::vector<int16_t> kInputSamples = {
|
||||
0, 0, 0, 0, 3, 0, 0, 0, 0, 3, -13222, -7, -3525, 5787, -25247, 8};
|
||||
static const std::vector<int16_t> kExpectedSamples(kInputSamples.begin() + 10,
|
||||
kInputSamples.end());
|
||||
RunRawTestNoRepeat(kInputSamples, kExpectedSamples);
|
||||
}
|
||||
|
||||
TEST(RawFileWriterTest, NegativeStartSilence) {
|
||||
static const std::vector<int16_t> kInputSamples = {
|
||||
0, -4, -6, 0, 3, 0, 0, 0, 0, 3, -13222, -7, -3525, 5787, -25247, 8};
|
||||
static const std::vector<int16_t> kExpectedSamples(kInputSamples.begin() + 2,
|
||||
kInputSamples.end());
|
||||
RunRawTestNoRepeat(kInputSamples, kExpectedSamples);
|
||||
}
|
||||
|
||||
TEST(RawFileWriterTest, SomeEndSilence) {
|
||||
static const std::vector<int16_t> kInputSamples = {
|
||||
75, 1234, 243, -1231, -22222, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0};
|
||||
static const std::vector<int16_t> kExpectedSamples(kInputSamples.begin(),
|
||||
kInputSamples.end() - 9);
|
||||
RunRawTestNoRepeat(kInputSamples, kExpectedSamples);
|
||||
}
|
||||
|
||||
TEST(RawFileWriterTest, DoubleEndSilence) {
|
||||
static const std::vector<int16_t> kInputSamples = {
|
||||
75, 1234, 243, -1231, -22222, 0, 0, 0,
|
||||
0, -1213, -13222, -7, -3525, 5787, 0, 0};
|
||||
static const std::vector<int16_t> kExpectedSamples(kInputSamples.begin(),
|
||||
kInputSamples.end() - 2);
|
||||
RunRawTestNoRepeat(kInputSamples, kExpectedSamples);
|
||||
}
|
||||
|
||||
TEST(RawFileWriterTest, DoubleSilence) {
|
||||
static const std::vector<int16_t> kInputSamples = {0, -1213, -13222, -7,
|
||||
-3525, 5787, 0, 0};
|
||||
static const std::vector<int16_t> kExpectedSamples(kInputSamples.begin() + 1,
|
||||
kInputSamples.end() - 2);
|
||||
RunRawTestNoRepeat(kInputSamples, kExpectedSamples);
|
||||
}
|
||||
|
||||
TEST(RawFileWriterTest, EndSilenceCutoff) {
|
||||
static const std::vector<int16_t> kInputSamples = {
|
||||
75, 1234, 243, -1231, -22222, 0, 1, 0, 0, 0, 0};
|
||||
static const std::vector<int16_t> kExpectedSamples(kInputSamples.begin(),
|
||||
kInputSamples.end() - 4);
|
||||
RunRawTestNoRepeat(kInputSamples, kExpectedSamples);
|
||||
}
|
||||
|
||||
TEST(RawFileWriterTest, Repeat) {
|
||||
static const std::vector<int16_t> kInputSamples = {
|
||||
75, 1234, 243, -1231, -22222, 0, 3, 88,
|
||||
1222, -1213, -13222, -7, -3525, 5787, -25247, 8};
|
||||
static const rtc::BufferT<int16_t> kExpectedSamples(kInputSamples.data(),
|
||||
kInputSamples.size());
|
||||
|
||||
const ::testing::TestInfo* const test_info =
|
||||
::testing::UnitTest::GetInstance()->current_test_info();
|
||||
|
||||
const std::string output_filename = test::OutputPathWithRandomDirectory() +
|
||||
"RawFileTest_" + test_info->name() + "_" +
|
||||
std::to_string(std::rand()) + ".raw";
|
||||
|
||||
static const size_t kSamplesPerFrame = 8;
|
||||
static const int kSampleRate = kSamplesPerFrame * 100;
|
||||
EXPECT_EQ(TestAudioDeviceModule::SamplesPerFrame(kSampleRate),
|
||||
kSamplesPerFrame);
|
||||
|
||||
// Test through file name API.
|
||||
{
|
||||
std::unique_ptr<TestAudioDeviceModule::Renderer> writer =
|
||||
TestAudioDeviceModule::CreateRawFileWriter(
|
||||
output_filename, /*sampling_frequency_in_hz=*/800);
|
||||
|
||||
for (size_t i = 0; i < kInputSamples.size(); i += kSamplesPerFrame) {
|
||||
EXPECT_TRUE(writer->Render(rtc::ArrayView<const int16_t>(
|
||||
&kInputSamples[i],
|
||||
std::min(kSamplesPerFrame, kInputSamples.size() - i))));
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
std::unique_ptr<TestAudioDeviceModule::Capturer> reader =
|
||||
TestAudioDeviceModule::CreateRawFileReader(
|
||||
output_filename, /*sampling_frequency_in_hz=*/800,
|
||||
/*num_channels=*/2, /*repeat=*/true);
|
||||
rtc::BufferT<int16_t> buffer(kExpectedSamples.size());
|
||||
EXPECT_TRUE(reader->Capture(&buffer));
|
||||
EXPECT_EQ(kExpectedSamples, buffer);
|
||||
EXPECT_TRUE(reader->Capture(&buffer));
|
||||
EXPECT_EQ(kExpectedSamples, buffer);
|
||||
}
|
||||
|
||||
remove(output_filename.c_str());
|
||||
}
|
||||
|
||||
TEST(PulsedNoiseCapturerTest, SetMaxAmplitude) {
|
||||
const int16_t kAmplitude = 50;
|
||||
std::unique_ptr<TestAudioDeviceModule::PulsedNoiseCapturer> capturer =
|
||||
TestAudioDeviceModule::CreatePulsedNoiseCapturer(
|
||||
kAmplitude, /*sampling_frequency_in_hz=*/8000);
|
||||
rtc::BufferT<int16_t> recording_buffer;
|
||||
|
||||
// Verify that the capturer doesn't create entries louder than than
|
||||
// kAmplitude. Since the pulse generator alternates between writing
|
||||
// zeroes and actual entries, we need to do the capturing twice.
|
||||
capturer->Capture(&recording_buffer);
|
||||
capturer->Capture(&recording_buffer);
|
||||
int16_t max_sample =
|
||||
*std::max_element(recording_buffer.begin(), recording_buffer.end());
|
||||
EXPECT_LE(max_sample, kAmplitude);
|
||||
|
||||
// Increase the amplitude and verify that the samples can now be louder
|
||||
// than the previous max.
|
||||
capturer->SetMaxAmplitude(kAmplitude * 2);
|
||||
capturer->Capture(&recording_buffer);
|
||||
capturer->Capture(&recording_buffer);
|
||||
max_sample =
|
||||
*std::max_element(recording_buffer.begin(), recording_buffer.end());
|
||||
EXPECT_GT(max_sample, kAmplitude);
|
||||
}
|
||||
|
||||
using ::testing::ElementsAre;
|
||||
|
||||
constexpr Timestamp kStartTime = Timestamp::Millis(10000);
|
||||
|
||||
class TestAudioTransport : public AudioTransport {
|
||||
public:
|
||||
enum class Mode { kPlaying, kRecording };
|
||||
|
||||
explicit TestAudioTransport(Mode mode) : mode_(mode) {}
|
||||
~TestAudioTransport() override = default;
|
||||
|
||||
int32_t RecordedDataIsAvailable(
|
||||
const void* audioSamples,
|
||||
size_t samples_per_channel,
|
||||
size_t bytes_per_sample,
|
||||
size_t number_of_channels,
|
||||
uint32_t samples_per_second,
|
||||
uint32_t total_delay_ms,
|
||||
int32_t clock_drift,
|
||||
uint32_t current_mic_level,
|
||||
bool key_pressed,
|
||||
uint32_t& new_mic_level,
|
||||
absl::optional<int64_t> estimated_capture_time_ns) override {
|
||||
new_mic_level = 1;
|
||||
|
||||
if (mode_ != Mode::kRecording) {
|
||||
EXPECT_TRUE(false)
|
||||
<< "NeedMorePlayData mustn't be called when mode isn't kRecording";
|
||||
return -1;
|
||||
}
|
||||
|
||||
MutexLock lock(&mutex_);
|
||||
samples_per_channel_.push_back(samples_per_channel);
|
||||
number_of_channels_.push_back(number_of_channels);
|
||||
bytes_per_sample_.push_back(bytes_per_sample);
|
||||
samples_per_second_.push_back(samples_per_second);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t NeedMorePlayData(size_t samples_per_channel,
|
||||
size_t bytes_per_sample,
|
||||
size_t number_of_channels,
|
||||
uint32_t samples_per_second,
|
||||
void* audio_samples,
|
||||
size_t& samples_out,
|
||||
int64_t* elapsed_time_ms,
|
||||
int64_t* ntp_time_ms) override {
|
||||
const size_t num_bytes = samples_per_channel * number_of_channels;
|
||||
std::memset(audio_samples, 1, num_bytes);
|
||||
samples_out = samples_per_channel * number_of_channels;
|
||||
*elapsed_time_ms = 0;
|
||||
*ntp_time_ms = 0;
|
||||
|
||||
if (mode_ != Mode::kPlaying) {
|
||||
EXPECT_TRUE(false)
|
||||
<< "NeedMorePlayData mustn't be called when mode isn't kPlaying";
|
||||
return -1;
|
||||
}
|
||||
|
||||
MutexLock lock(&mutex_);
|
||||
samples_per_channel_.push_back(samples_per_channel);
|
||||
number_of_channels_.push_back(number_of_channels);
|
||||
bytes_per_sample_.push_back(bytes_per_sample);
|
||||
samples_per_second_.push_back(samples_per_second);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t RecordedDataIsAvailable(const void* audio_samples,
|
||||
size_t samples_per_channel,
|
||||
size_t bytes_per_sample,
|
||||
size_t number_of_channels,
|
||||
uint32_t samples_per_second,
|
||||
uint32_t total_delay_ms,
|
||||
int32_t clockDrift,
|
||||
uint32_t current_mic_level,
|
||||
bool key_pressed,
|
||||
uint32_t& new_mic_level) override {
|
||||
RTC_CHECK(false) << "This methods should be never executed";
|
||||
}
|
||||
|
||||
void PullRenderData(int bits_per_sample,
|
||||
int sample_rate,
|
||||
size_t number_of_channels,
|
||||
size_t number_of_frames,
|
||||
void* audio_data,
|
||||
int64_t* elapsed_time_ms,
|
||||
int64_t* ntp_time_ms) override {
|
||||
RTC_CHECK(false) << "This methods should be never executed";
|
||||
}
|
||||
|
||||
std::vector<size_t> samples_per_channel() const {
|
||||
MutexLock lock(&mutex_);
|
||||
return samples_per_channel_;
|
||||
}
|
||||
std::vector<size_t> number_of_channels() const {
|
||||
MutexLock lock(&mutex_);
|
||||
return number_of_channels_;
|
||||
}
|
||||
std::vector<size_t> bytes_per_sample() const {
|
||||
MutexLock lock(&mutex_);
|
||||
return bytes_per_sample_;
|
||||
}
|
||||
std::vector<size_t> samples_per_second() const {
|
||||
MutexLock lock(&mutex_);
|
||||
return samples_per_second_;
|
||||
}
|
||||
|
||||
private:
|
||||
const Mode mode_;
|
||||
|
||||
mutable Mutex mutex_;
|
||||
std::vector<size_t> samples_per_channel_ RTC_GUARDED_BY(mutex_);
|
||||
std::vector<size_t> number_of_channels_ RTC_GUARDED_BY(mutex_);
|
||||
std::vector<size_t> bytes_per_sample_ RTC_GUARDED_BY(mutex_);
|
||||
std::vector<size_t> samples_per_second_ RTC_GUARDED_BY(mutex_);
|
||||
};
|
||||
|
||||
TEST(TestAudioDeviceModuleTest, CreatedADMCanRecord) {
|
||||
GlobalSimulatedTimeController time_controller(kStartTime);
|
||||
TestAudioTransport audio_transport(TestAudioTransport::Mode::kRecording);
|
||||
std::unique_ptr<TestAudioDeviceModule::PulsedNoiseCapturer> capturer =
|
||||
TestAudioDeviceModule::CreatePulsedNoiseCapturer(
|
||||
/*max_amplitude=*/1000,
|
||||
/*sampling_frequency_in_hz=*/48000, /*num_channels=*/2);
|
||||
|
||||
rtc::scoped_refptr<AudioDeviceModule> adm = TestAudioDeviceModule::Create(
|
||||
time_controller.GetTaskQueueFactory(), std::move(capturer),
|
||||
/*renderer=*/nullptr);
|
||||
|
||||
ASSERT_EQ(adm->RegisterAudioCallback(&audio_transport), 0);
|
||||
ASSERT_EQ(adm->Init(), 0);
|
||||
|
||||
EXPECT_FALSE(adm->RecordingIsInitialized());
|
||||
ASSERT_EQ(adm->InitRecording(), 0);
|
||||
EXPECT_TRUE(adm->RecordingIsInitialized());
|
||||
ASSERT_EQ(adm->StartRecording(), 0);
|
||||
time_controller.AdvanceTime(TimeDelta::Millis(10));
|
||||
ASSERT_TRUE(adm->Recording());
|
||||
time_controller.AdvanceTime(TimeDelta::Millis(10));
|
||||
ASSERT_EQ(adm->StopRecording(), 0);
|
||||
|
||||
EXPECT_THAT(audio_transport.samples_per_channel(),
|
||||
ElementsAre(480, 480, 480));
|
||||
EXPECT_THAT(audio_transport.number_of_channels(), ElementsAre(2, 2, 2));
|
||||
EXPECT_THAT(audio_transport.bytes_per_sample(), ElementsAre(4, 4, 4));
|
||||
EXPECT_THAT(audio_transport.samples_per_second(),
|
||||
ElementsAre(48000, 48000, 48000));
|
||||
}
|
||||
|
||||
TEST(TestAudioDeviceModuleTest, CreatedADMCanPlay) {
|
||||
GlobalSimulatedTimeController time_controller(kStartTime);
|
||||
TestAudioTransport audio_transport(TestAudioTransport::Mode::kPlaying);
|
||||
std::unique_ptr<TestAudioDeviceModule::Renderer> renderer =
|
||||
TestAudioDeviceModule::CreateDiscardRenderer(
|
||||
/*sampling_frequency_in_hz=*/48000, /*num_channels=*/2);
|
||||
|
||||
rtc::scoped_refptr<AudioDeviceModule> adm =
|
||||
TestAudioDeviceModule::Create(time_controller.GetTaskQueueFactory(),
|
||||
/*capturer=*/nullptr, std::move(renderer));
|
||||
|
||||
ASSERT_EQ(adm->RegisterAudioCallback(&audio_transport), 0);
|
||||
ASSERT_EQ(adm->Init(), 0);
|
||||
|
||||
EXPECT_FALSE(adm->PlayoutIsInitialized());
|
||||
ASSERT_EQ(adm->InitPlayout(), 0);
|
||||
EXPECT_TRUE(adm->PlayoutIsInitialized());
|
||||
ASSERT_EQ(adm->StartPlayout(), 0);
|
||||
time_controller.AdvanceTime(TimeDelta::Millis(10));
|
||||
ASSERT_TRUE(adm->Playing());
|
||||
time_controller.AdvanceTime(TimeDelta::Millis(10));
|
||||
ASSERT_EQ(adm->StopPlayout(), 0);
|
||||
|
||||
EXPECT_THAT(audio_transport.samples_per_channel(),
|
||||
ElementsAre(480, 480, 480));
|
||||
EXPECT_THAT(audio_transport.number_of_channels(), ElementsAre(2, 2, 2));
|
||||
EXPECT_THAT(audio_transport.bytes_per_sample(), ElementsAre(4, 4, 4));
|
||||
EXPECT_THAT(audio_transport.samples_per_second(),
|
||||
ElementsAre(48000, 48000, 48000));
|
||||
}
|
||||
|
||||
} // namespace
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,40 @@
|
|||
/*
|
||||
* libjingle
|
||||
* Copyright 2004--2010, Google Inc.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice,
|
||||
* this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
* 3. The name of the author may not be used to endorse or promote products
|
||||
* derived from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
|
||||
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
|
||||
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
|
||||
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
|
||||
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
|
||||
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include "modules/audio_device/linux/alsasymboltable_linux.h"
|
||||
|
||||
namespace webrtc {
|
||||
namespace adm_linux_alsa {
|
||||
|
||||
LATE_BINDING_SYMBOL_TABLE_DEFINE_BEGIN(AlsaSymbolTable, "libasound.so.2")
|
||||
#define X(sym) LATE_BINDING_SYMBOL_TABLE_DEFINE_ENTRY(AlsaSymbolTable, sym)
|
||||
ALSA_SYMBOLS_LIST
|
||||
#undef X
|
||||
LATE_BINDING_SYMBOL_TABLE_DEFINE_END(AlsaSymbolTable)
|
||||
|
||||
} // namespace adm_linux_alsa
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,148 @@
|
|||
/*
|
||||
* libjingle
|
||||
* Copyright 2004--2010, Google Inc.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice,
|
||||
* this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
* 3. The name of the author may not be used to endorse or promote products
|
||||
* derived from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
|
||||
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
|
||||
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
|
||||
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
|
||||
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
|
||||
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef AUDIO_DEVICE_ALSASYMBOLTABLE_LINUX_H_
|
||||
#define AUDIO_DEVICE_ALSASYMBOLTABLE_LINUX_H_
|
||||
|
||||
#include "modules/audio_device/linux/latebindingsymboltable_linux.h"
|
||||
|
||||
namespace webrtc {
|
||||
namespace adm_linux_alsa {
|
||||
|
||||
// The ALSA symbols we need, as an X-Macro list.
|
||||
// This list must contain precisely every libasound function that is used in
|
||||
// alsasoundsystem.cc.
|
||||
#define ALSA_SYMBOLS_LIST \
|
||||
X(snd_device_name_free_hint) \
|
||||
X(snd_device_name_get_hint) \
|
||||
X(snd_device_name_hint) \
|
||||
X(snd_pcm_avail_update) \
|
||||
X(snd_pcm_close) \
|
||||
X(snd_pcm_delay) \
|
||||
X(snd_pcm_drop) \
|
||||
X(snd_pcm_open) \
|
||||
X(snd_pcm_prepare) \
|
||||
X(snd_pcm_readi) \
|
||||
X(snd_pcm_recover) \
|
||||
X(snd_pcm_resume) \
|
||||
X(snd_pcm_reset) \
|
||||
X(snd_pcm_state) \
|
||||
X(snd_pcm_set_params) \
|
||||
X(snd_pcm_get_params) \
|
||||
X(snd_pcm_start) \
|
||||
X(snd_pcm_stream) \
|
||||
X(snd_pcm_frames_to_bytes) \
|
||||
X(snd_pcm_bytes_to_frames) \
|
||||
X(snd_pcm_wait) \
|
||||
X(snd_pcm_writei) \
|
||||
X(snd_pcm_info_get_class) \
|
||||
X(snd_pcm_info_get_subdevices_avail) \
|
||||
X(snd_pcm_info_get_subdevice_name) \
|
||||
X(snd_pcm_info_set_subdevice) \
|
||||
X(snd_pcm_info_get_id) \
|
||||
X(snd_pcm_info_set_device) \
|
||||
X(snd_pcm_info_set_stream) \
|
||||
X(snd_pcm_info_get_name) \
|
||||
X(snd_pcm_info_get_subdevices_count) \
|
||||
X(snd_pcm_info_sizeof) \
|
||||
X(snd_pcm_hw_params) \
|
||||
X(snd_pcm_hw_params_malloc) \
|
||||
X(snd_pcm_hw_params_free) \
|
||||
X(snd_pcm_hw_params_any) \
|
||||
X(snd_pcm_hw_params_set_access) \
|
||||
X(snd_pcm_hw_params_set_format) \
|
||||
X(snd_pcm_hw_params_set_channels) \
|
||||
X(snd_pcm_hw_params_set_rate_near) \
|
||||
X(snd_pcm_hw_params_set_buffer_size_near) \
|
||||
X(snd_card_next) \
|
||||
X(snd_card_get_name) \
|
||||
X(snd_config_update) \
|
||||
X(snd_config_copy) \
|
||||
X(snd_config_get_id) \
|
||||
X(snd_ctl_open) \
|
||||
X(snd_ctl_close) \
|
||||
X(snd_ctl_card_info) \
|
||||
X(snd_ctl_card_info_sizeof) \
|
||||
X(snd_ctl_card_info_get_id) \
|
||||
X(snd_ctl_card_info_get_name) \
|
||||
X(snd_ctl_pcm_next_device) \
|
||||
X(snd_ctl_pcm_info) \
|
||||
X(snd_mixer_load) \
|
||||
X(snd_mixer_free) \
|
||||
X(snd_mixer_detach) \
|
||||
X(snd_mixer_close) \
|
||||
X(snd_mixer_open) \
|
||||
X(snd_mixer_attach) \
|
||||
X(snd_mixer_first_elem) \
|
||||
X(snd_mixer_elem_next) \
|
||||
X(snd_mixer_selem_get_name) \
|
||||
X(snd_mixer_selem_is_active) \
|
||||
X(snd_mixer_selem_register) \
|
||||
X(snd_mixer_selem_set_playback_volume_all) \
|
||||
X(snd_mixer_selem_get_playback_volume) \
|
||||
X(snd_mixer_selem_has_playback_volume) \
|
||||
X(snd_mixer_selem_get_playback_volume_range) \
|
||||
X(snd_mixer_selem_has_playback_switch) \
|
||||
X(snd_mixer_selem_get_playback_switch) \
|
||||
X(snd_mixer_selem_set_playback_switch_all) \
|
||||
X(snd_mixer_selem_has_capture_switch) \
|
||||
X(snd_mixer_selem_get_capture_switch) \
|
||||
X(snd_mixer_selem_set_capture_switch_all) \
|
||||
X(snd_mixer_selem_has_capture_volume) \
|
||||
X(snd_mixer_selem_set_capture_volume_all) \
|
||||
X(snd_mixer_selem_get_capture_volume) \
|
||||
X(snd_mixer_selem_get_capture_volume_range) \
|
||||
X(snd_dlopen) \
|
||||
X(snd_dlclose) \
|
||||
X(snd_config) \
|
||||
X(snd_config_search) \
|
||||
X(snd_config_get_string) \
|
||||
X(snd_config_search_definition) \
|
||||
X(snd_config_get_type) \
|
||||
X(snd_config_delete) \
|
||||
X(snd_config_iterator_entry) \
|
||||
X(snd_config_iterator_first) \
|
||||
X(snd_config_iterator_next) \
|
||||
X(snd_config_iterator_end) \
|
||||
X(snd_config_delete_compound_members) \
|
||||
X(snd_config_get_integer) \
|
||||
X(snd_config_get_bool) \
|
||||
X(snd_dlsym) \
|
||||
X(snd_strerror) \
|
||||
X(snd_lib_error) \
|
||||
X(snd_lib_error_set_handler)
|
||||
|
||||
LATE_BINDING_SYMBOL_TABLE_DECLARE_BEGIN(AlsaSymbolTable)
|
||||
#define X(sym) LATE_BINDING_SYMBOL_TABLE_DECLARE_ENTRY(AlsaSymbolTable, sym)
|
||||
ALSA_SYMBOLS_LIST
|
||||
#undef X
|
||||
LATE_BINDING_SYMBOL_TABLE_DECLARE_END(AlsaSymbolTable)
|
||||
|
||||
} // namespace adm_linux_alsa
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // AUDIO_DEVICE_ALSASYMBOLTABLE_LINUX_H_
|
||||
File diff suppressed because it is too large
Load diff
|
|
@ -0,0 +1,208 @@
|
|||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef AUDIO_DEVICE_AUDIO_DEVICE_ALSA_LINUX_H_
|
||||
#define AUDIO_DEVICE_AUDIO_DEVICE_ALSA_LINUX_H_
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "modules/audio_device/audio_device_generic.h"
|
||||
#include "modules/audio_device/linux/audio_mixer_manager_alsa_linux.h"
|
||||
#include "rtc_base/platform_thread.h"
|
||||
#include "rtc_base/synchronization/mutex.h"
|
||||
|
||||
#if defined(WEBRTC_USE_X11)
|
||||
#include <X11/Xlib.h>
|
||||
#endif
|
||||
#include <alsa/asoundlib.h>
|
||||
#include <sys/ioctl.h>
|
||||
#include <sys/soundcard.h>
|
||||
|
||||
typedef webrtc::adm_linux_alsa::AlsaSymbolTable WebRTCAlsaSymbolTable;
|
||||
WebRTCAlsaSymbolTable* GetAlsaSymbolTable();
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class AudioDeviceLinuxALSA : public AudioDeviceGeneric {
|
||||
public:
|
||||
AudioDeviceLinuxALSA();
|
||||
virtual ~AudioDeviceLinuxALSA();
|
||||
|
||||
// Retrieve the currently utilized audio layer
|
||||
int32_t ActiveAudioLayer(
|
||||
AudioDeviceModule::AudioLayer& audioLayer) const override;
|
||||
|
||||
// Main initializaton and termination
|
||||
InitStatus Init() RTC_LOCKS_EXCLUDED(mutex_) override;
|
||||
int32_t Terminate() RTC_LOCKS_EXCLUDED(mutex_) override;
|
||||
bool Initialized() const override;
|
||||
|
||||
// Device enumeration
|
||||
int16_t PlayoutDevices() override;
|
||||
int16_t RecordingDevices() override;
|
||||
int32_t PlayoutDeviceName(uint16_t index,
|
||||
char name[kAdmMaxDeviceNameSize],
|
||||
char guid[kAdmMaxGuidSize]) override;
|
||||
int32_t RecordingDeviceName(uint16_t index,
|
||||
char name[kAdmMaxDeviceNameSize],
|
||||
char guid[kAdmMaxGuidSize]) override;
|
||||
|
||||
// Device selection
|
||||
int32_t SetPlayoutDevice(uint16_t index) override;
|
||||
int32_t SetPlayoutDevice(
|
||||
AudioDeviceModule::WindowsDeviceType device) override;
|
||||
int32_t SetRecordingDevice(uint16_t index) override;
|
||||
int32_t SetRecordingDevice(
|
||||
AudioDeviceModule::WindowsDeviceType device) override;
|
||||
|
||||
// Audio transport initialization
|
||||
int32_t PlayoutIsAvailable(bool& available) override;
|
||||
int32_t InitPlayout() RTC_LOCKS_EXCLUDED(mutex_) override;
|
||||
bool PlayoutIsInitialized() const override;
|
||||
int32_t RecordingIsAvailable(bool& available) override;
|
||||
int32_t InitRecording() RTC_LOCKS_EXCLUDED(mutex_) override;
|
||||
bool RecordingIsInitialized() const override;
|
||||
|
||||
// Audio transport control
|
||||
int32_t StartPlayout() override;
|
||||
int32_t StopPlayout() RTC_LOCKS_EXCLUDED(mutex_) override;
|
||||
bool Playing() const override;
|
||||
int32_t StartRecording() override;
|
||||
int32_t StopRecording() RTC_LOCKS_EXCLUDED(mutex_) override;
|
||||
bool Recording() const override;
|
||||
|
||||
// Audio mixer initialization
|
||||
int32_t InitSpeaker() RTC_LOCKS_EXCLUDED(mutex_) override;
|
||||
bool SpeakerIsInitialized() const override;
|
||||
int32_t InitMicrophone() RTC_LOCKS_EXCLUDED(mutex_) override;
|
||||
bool MicrophoneIsInitialized() const override;
|
||||
|
||||
// Speaker volume controls
|
||||
int32_t SpeakerVolumeIsAvailable(bool& available) override;
|
||||
int32_t SetSpeakerVolume(uint32_t volume) override;
|
||||
int32_t SpeakerVolume(uint32_t& volume) const override;
|
||||
int32_t MaxSpeakerVolume(uint32_t& maxVolume) const override;
|
||||
int32_t MinSpeakerVolume(uint32_t& minVolume) const override;
|
||||
|
||||
// Microphone volume controls
|
||||
int32_t MicrophoneVolumeIsAvailable(bool& available) override;
|
||||
int32_t SetMicrophoneVolume(uint32_t volume) override;
|
||||
int32_t MicrophoneVolume(uint32_t& volume) const override;
|
||||
int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const override;
|
||||
int32_t MinMicrophoneVolume(uint32_t& minVolume) const override;
|
||||
|
||||
// Speaker mute control
|
||||
int32_t SpeakerMuteIsAvailable(bool& available) override;
|
||||
int32_t SetSpeakerMute(bool enable) override;
|
||||
int32_t SpeakerMute(bool& enabled) const override;
|
||||
|
||||
// Microphone mute control
|
||||
int32_t MicrophoneMuteIsAvailable(bool& available) override;
|
||||
int32_t SetMicrophoneMute(bool enable) override;
|
||||
int32_t MicrophoneMute(bool& enabled) const override;
|
||||
|
||||
// Stereo support
|
||||
int32_t StereoPlayoutIsAvailable(bool& available)
|
||||
RTC_LOCKS_EXCLUDED(mutex_) override;
|
||||
int32_t SetStereoPlayout(bool enable) override;
|
||||
int32_t StereoPlayout(bool& enabled) const override;
|
||||
int32_t StereoRecordingIsAvailable(bool& available)
|
||||
RTC_LOCKS_EXCLUDED(mutex_) override;
|
||||
int32_t SetStereoRecording(bool enable) override;
|
||||
int32_t StereoRecording(bool& enabled) const override;
|
||||
|
||||
// Delay information and control
|
||||
int32_t PlayoutDelay(uint16_t& delayMS) const override;
|
||||
|
||||
void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer)
|
||||
RTC_LOCKS_EXCLUDED(mutex_) override;
|
||||
|
||||
private:
|
||||
int32_t InitRecordingLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
|
||||
int32_t StopRecordingLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
|
||||
int32_t StopPlayoutLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
|
||||
int32_t InitPlayoutLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
|
||||
int32_t InitSpeakerLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
|
||||
int32_t InitMicrophoneLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
|
||||
int32_t GetDevicesInfo(int32_t function,
|
||||
bool playback,
|
||||
int32_t enumDeviceNo = 0,
|
||||
char* enumDeviceName = NULL,
|
||||
int32_t ednLen = 0) const;
|
||||
int32_t ErrorRecovery(int32_t error, snd_pcm_t* deviceHandle);
|
||||
|
||||
bool KeyPressed() const;
|
||||
|
||||
void Lock() RTC_EXCLUSIVE_LOCK_FUNCTION(mutex_) { mutex_.Lock(); }
|
||||
void UnLock() RTC_UNLOCK_FUNCTION(mutex_) { mutex_.Unlock(); }
|
||||
|
||||
inline int32_t InputSanityCheckAfterUnlockedPeriod() const;
|
||||
inline int32_t OutputSanityCheckAfterUnlockedPeriod() const;
|
||||
|
||||
static void RecThreadFunc(void*);
|
||||
static void PlayThreadFunc(void*);
|
||||
bool RecThreadProcess();
|
||||
bool PlayThreadProcess();
|
||||
|
||||
AudioDeviceBuffer* _ptrAudioBuffer;
|
||||
|
||||
Mutex mutex_;
|
||||
|
||||
rtc::PlatformThread _ptrThreadRec;
|
||||
rtc::PlatformThread _ptrThreadPlay;
|
||||
|
||||
AudioMixerManagerLinuxALSA _mixerManager;
|
||||
|
||||
uint16_t _inputDeviceIndex;
|
||||
uint16_t _outputDeviceIndex;
|
||||
bool _inputDeviceIsSpecified;
|
||||
bool _outputDeviceIsSpecified;
|
||||
|
||||
snd_pcm_t* _handleRecord;
|
||||
snd_pcm_t* _handlePlayout;
|
||||
|
||||
snd_pcm_uframes_t _recordingBuffersizeInFrame;
|
||||
snd_pcm_uframes_t _recordingPeriodSizeInFrame;
|
||||
snd_pcm_uframes_t _playoutBufferSizeInFrame;
|
||||
snd_pcm_uframes_t _playoutPeriodSizeInFrame;
|
||||
|
||||
ssize_t _recordingBufferSizeIn10MS;
|
||||
ssize_t _playoutBufferSizeIn10MS;
|
||||
uint32_t _recordingFramesIn10MS;
|
||||
uint32_t _playoutFramesIn10MS;
|
||||
|
||||
uint32_t _recordingFreq;
|
||||
uint32_t _playoutFreq;
|
||||
uint8_t _recChannels;
|
||||
uint8_t _playChannels;
|
||||
|
||||
int8_t* _recordingBuffer; // in byte
|
||||
int8_t* _playoutBuffer; // in byte
|
||||
uint32_t _recordingFramesLeft;
|
||||
uint32_t _playoutFramesLeft;
|
||||
|
||||
bool _initialized;
|
||||
bool _recording;
|
||||
bool _playing;
|
||||
bool _recIsInitialized;
|
||||
bool _playIsInitialized;
|
||||
|
||||
snd_pcm_sframes_t _recordingDelay;
|
||||
snd_pcm_sframes_t _playoutDelay;
|
||||
|
||||
char _oldKeyState[32];
|
||||
#if defined(WEBRTC_USE_X11)
|
||||
Display* _XDisplay;
|
||||
#endif
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // MODULES_AUDIO_DEVICE_MAIN_SOURCE_LINUX_AUDIO_DEVICE_ALSA_LINUX_H_
|
||||
File diff suppressed because it is too large
Load diff
|
|
@ -0,0 +1,349 @@
|
|||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef AUDIO_DEVICE_AUDIO_DEVICE_PULSE_LINUX_H_
|
||||
#define AUDIO_DEVICE_AUDIO_DEVICE_PULSE_LINUX_H_
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "api/sequence_checker.h"
|
||||
#include "modules/audio_device/audio_device_buffer.h"
|
||||
#include "modules/audio_device/audio_device_generic.h"
|
||||
#include "modules/audio_device/include/audio_device.h"
|
||||
#include "modules/audio_device/include/audio_device_defines.h"
|
||||
#include "modules/audio_device/linux/audio_mixer_manager_pulse_linux.h"
|
||||
#include "modules/audio_device/linux/pulseaudiosymboltable_linux.h"
|
||||
#include "rtc_base/event.h"
|
||||
#include "rtc_base/platform_thread.h"
|
||||
#include "rtc_base/synchronization/mutex.h"
|
||||
#include "rtc_base/thread_annotations.h"
|
||||
|
||||
#if defined(WEBRTC_USE_X11)
|
||||
#include <X11/Xlib.h>
|
||||
#endif
|
||||
|
||||
#include <pulse/pulseaudio.h>
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
// We define this flag if it's missing from our headers, because we want to be
|
||||
// able to compile against old headers but still use PA_STREAM_ADJUST_LATENCY
|
||||
// if run against a recent version of the library.
|
||||
#ifndef PA_STREAM_ADJUST_LATENCY
|
||||
#define PA_STREAM_ADJUST_LATENCY 0x2000U
|
||||
#endif
|
||||
#ifndef PA_STREAM_START_MUTED
|
||||
#define PA_STREAM_START_MUTED 0x1000U
|
||||
#endif
|
||||
|
||||
// Set this constant to 0 to disable latency reading
|
||||
const uint32_t WEBRTC_PA_REPORT_LATENCY = 1;
|
||||
|
||||
// Constants from implementation by Tristan Schmelcher [tschmelcher@google.com]
|
||||
|
||||
// First PulseAudio protocol version that supports PA_STREAM_ADJUST_LATENCY.
|
||||
const uint32_t WEBRTC_PA_ADJUST_LATENCY_PROTOCOL_VERSION = 13;
|
||||
|
||||
// Some timing constants for optimal operation. See
|
||||
// https://tango.0pointer.de/pipermail/pulseaudio-discuss/2008-January/001170.html
|
||||
// for a good explanation of some of the factors that go into this.
|
||||
|
||||
// Playback.
|
||||
|
||||
// For playback, there is a round-trip delay to fill the server-side playback
|
||||
// buffer, so setting too low of a latency is a buffer underflow risk. We will
|
||||
// automatically increase the latency if a buffer underflow does occur, but we
|
||||
// also enforce a sane minimum at start-up time. Anything lower would be
|
||||
// virtually guaranteed to underflow at least once, so there's no point in
|
||||
// allowing lower latencies.
|
||||
const uint32_t WEBRTC_PA_PLAYBACK_LATENCY_MINIMUM_MSECS = 20;
|
||||
|
||||
// Every time a playback stream underflows, we will reconfigure it with target
|
||||
// latency that is greater by this amount.
|
||||
const uint32_t WEBRTC_PA_PLAYBACK_LATENCY_INCREMENT_MSECS = 20;
|
||||
|
||||
// We also need to configure a suitable request size. Too small and we'd burn
|
||||
// CPU from the overhead of transfering small amounts of data at once. Too large
|
||||
// and the amount of data remaining in the buffer right before refilling it
|
||||
// would be a buffer underflow risk. We set it to half of the buffer size.
|
||||
const uint32_t WEBRTC_PA_PLAYBACK_REQUEST_FACTOR = 2;
|
||||
|
||||
// Capture.
|
||||
|
||||
// For capture, low latency is not a buffer overflow risk, but it makes us burn
|
||||
// CPU from the overhead of transfering small amounts of data at once, so we set
|
||||
// a recommended value that we use for the kLowLatency constant (but if the user
|
||||
// explicitly requests something lower then we will honour it).
|
||||
// 1ms takes about 6-7% CPU. 5ms takes about 5%. 10ms takes about 4.x%.
|
||||
const uint32_t WEBRTC_PA_LOW_CAPTURE_LATENCY_MSECS = 10;
|
||||
|
||||
// There is a round-trip delay to ack the data to the server, so the
|
||||
// server-side buffer needs extra space to prevent buffer overflow. 20ms is
|
||||
// sufficient, but there is no penalty to making it bigger, so we make it huge.
|
||||
// (750ms is libpulse's default value for the _total_ buffer size in the
|
||||
// kNoLatencyRequirements case.)
|
||||
const uint32_t WEBRTC_PA_CAPTURE_BUFFER_EXTRA_MSECS = 750;
|
||||
|
||||
const uint32_t WEBRTC_PA_MSECS_PER_SEC = 1000;
|
||||
|
||||
// Init _configuredLatencyRec/Play to this value to disable latency requirements
|
||||
const int32_t WEBRTC_PA_NO_LATENCY_REQUIREMENTS = -1;
|
||||
|
||||
// Set this const to 1 to account for peeked and used data in latency
|
||||
// calculation
|
||||
const uint32_t WEBRTC_PA_CAPTURE_BUFFER_LATENCY_ADJUSTMENT = 0;
|
||||
|
||||
typedef webrtc::adm_linux_pulse::PulseAudioSymbolTable WebRTCPulseSymbolTable;
|
||||
WebRTCPulseSymbolTable* GetPulseSymbolTable();
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class AudioDeviceLinuxPulse : public AudioDeviceGeneric {
|
||||
public:
|
||||
AudioDeviceLinuxPulse();
|
||||
virtual ~AudioDeviceLinuxPulse();
|
||||
|
||||
// Retrieve the currently utilized audio layer
|
||||
int32_t ActiveAudioLayer(
|
||||
AudioDeviceModule::AudioLayer& audioLayer) const override;
|
||||
|
||||
// Main initializaton and termination
|
||||
InitStatus Init() override;
|
||||
int32_t Terminate() RTC_LOCKS_EXCLUDED(mutex_) override;
|
||||
bool Initialized() const override;
|
||||
|
||||
// Device enumeration
|
||||
int16_t PlayoutDevices() override;
|
||||
int16_t RecordingDevices() override;
|
||||
int32_t PlayoutDeviceName(uint16_t index,
|
||||
char name[kAdmMaxDeviceNameSize],
|
||||
char guid[kAdmMaxGuidSize]) override;
|
||||
int32_t RecordingDeviceName(uint16_t index,
|
||||
char name[kAdmMaxDeviceNameSize],
|
||||
char guid[kAdmMaxGuidSize]) override;
|
||||
|
||||
// Device selection
|
||||
int32_t SetPlayoutDevice(uint16_t index) override;
|
||||
int32_t SetPlayoutDevice(
|
||||
AudioDeviceModule::WindowsDeviceType device) override;
|
||||
int32_t SetRecordingDevice(uint16_t index) override;
|
||||
int32_t SetRecordingDevice(
|
||||
AudioDeviceModule::WindowsDeviceType device) override;
|
||||
|
||||
// Audio transport initialization
|
||||
int32_t PlayoutIsAvailable(bool& available) override;
|
||||
int32_t InitPlayout() RTC_LOCKS_EXCLUDED(mutex_) override;
|
||||
bool PlayoutIsInitialized() const override;
|
||||
int32_t RecordingIsAvailable(bool& available) override;
|
||||
int32_t InitRecording() override;
|
||||
bool RecordingIsInitialized() const override;
|
||||
|
||||
// Audio transport control
|
||||
int32_t StartPlayout() RTC_LOCKS_EXCLUDED(mutex_) override;
|
||||
int32_t StopPlayout() RTC_LOCKS_EXCLUDED(mutex_) override;
|
||||
bool Playing() const override;
|
||||
int32_t StartRecording() RTC_LOCKS_EXCLUDED(mutex_) override;
|
||||
int32_t StopRecording() RTC_LOCKS_EXCLUDED(mutex_) override;
|
||||
bool Recording() const override;
|
||||
|
||||
// Audio mixer initialization
|
||||
int32_t InitSpeaker() override;
|
||||
bool SpeakerIsInitialized() const override;
|
||||
int32_t InitMicrophone() override;
|
||||
bool MicrophoneIsInitialized() const override;
|
||||
|
||||
// Speaker volume controls
|
||||
int32_t SpeakerVolumeIsAvailable(bool& available) override;
|
||||
int32_t SetSpeakerVolume(uint32_t volume) override;
|
||||
int32_t SpeakerVolume(uint32_t& volume) const override;
|
||||
int32_t MaxSpeakerVolume(uint32_t& maxVolume) const override;
|
||||
int32_t MinSpeakerVolume(uint32_t& minVolume) const override;
|
||||
|
||||
// Microphone volume controls
|
||||
int32_t MicrophoneVolumeIsAvailable(bool& available) override;
|
||||
int32_t SetMicrophoneVolume(uint32_t volume) override;
|
||||
int32_t MicrophoneVolume(uint32_t& volume) const override;
|
||||
int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const override;
|
||||
int32_t MinMicrophoneVolume(uint32_t& minVolume) const override;
|
||||
|
||||
// Speaker mute control
|
||||
int32_t SpeakerMuteIsAvailable(bool& available) override;
|
||||
int32_t SetSpeakerMute(bool enable) override;
|
||||
int32_t SpeakerMute(bool& enabled) const override;
|
||||
|
||||
// Microphone mute control
|
||||
int32_t MicrophoneMuteIsAvailable(bool& available) override;
|
||||
int32_t SetMicrophoneMute(bool enable) override;
|
||||
int32_t MicrophoneMute(bool& enabled) const override;
|
||||
|
||||
// Stereo support
|
||||
int32_t StereoPlayoutIsAvailable(bool& available) override;
|
||||
int32_t SetStereoPlayout(bool enable) override;
|
||||
int32_t StereoPlayout(bool& enabled) const override;
|
||||
int32_t StereoRecordingIsAvailable(bool& available) override;
|
||||
int32_t SetStereoRecording(bool enable) override;
|
||||
int32_t StereoRecording(bool& enabled) const override;
|
||||
|
||||
// Delay information and control
|
||||
int32_t PlayoutDelay(uint16_t& delayMS) const
|
||||
RTC_LOCKS_EXCLUDED(mutex_) override;
|
||||
|
||||
void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) override;
|
||||
|
||||
private:
|
||||
void Lock() RTC_EXCLUSIVE_LOCK_FUNCTION(mutex_) { mutex_.Lock(); }
|
||||
void UnLock() RTC_UNLOCK_FUNCTION(mutex_) { mutex_.Unlock(); }
|
||||
void WaitForOperationCompletion(pa_operation* paOperation) const;
|
||||
void WaitForSuccess(pa_operation* paOperation) const;
|
||||
|
||||
bool KeyPressed() const;
|
||||
|
||||
static void PaContextStateCallback(pa_context* c, void* pThis);
|
||||
static void PaSinkInfoCallback(pa_context* c,
|
||||
const pa_sink_info* i,
|
||||
int eol,
|
||||
void* pThis);
|
||||
static void PaSourceInfoCallback(pa_context* c,
|
||||
const pa_source_info* i,
|
||||
int eol,
|
||||
void* pThis);
|
||||
static void PaServerInfoCallback(pa_context* c,
|
||||
const pa_server_info* i,
|
||||
void* pThis);
|
||||
static void PaStreamStateCallback(pa_stream* p, void* pThis);
|
||||
void PaContextStateCallbackHandler(pa_context* c);
|
||||
void PaSinkInfoCallbackHandler(const pa_sink_info* i, int eol);
|
||||
void PaSourceInfoCallbackHandler(const pa_source_info* i, int eol);
|
||||
void PaServerInfoCallbackHandler(const pa_server_info* i);
|
||||
void PaStreamStateCallbackHandler(pa_stream* p);
|
||||
|
||||
void EnableWriteCallback();
|
||||
void DisableWriteCallback();
|
||||
static void PaStreamWriteCallback(pa_stream* unused,
|
||||
size_t buffer_space,
|
||||
void* pThis);
|
||||
void PaStreamWriteCallbackHandler(size_t buffer_space);
|
||||
static void PaStreamUnderflowCallback(pa_stream* unused, void* pThis);
|
||||
void PaStreamUnderflowCallbackHandler();
|
||||
void EnableReadCallback();
|
||||
void DisableReadCallback();
|
||||
static void PaStreamReadCallback(pa_stream* unused1,
|
||||
size_t unused2,
|
||||
void* pThis);
|
||||
void PaStreamReadCallbackHandler();
|
||||
static void PaStreamOverflowCallback(pa_stream* unused, void* pThis);
|
||||
void PaStreamOverflowCallbackHandler();
|
||||
int32_t LatencyUsecs(pa_stream* stream);
|
||||
int32_t ReadRecordedData(const void* bufferData, size_t bufferSize);
|
||||
int32_t ProcessRecordedData(int8_t* bufferData,
|
||||
uint32_t bufferSizeInSamples,
|
||||
uint32_t recDelay);
|
||||
|
||||
int32_t CheckPulseAudioVersion();
|
||||
int32_t InitSamplingFrequency();
|
||||
int32_t GetDefaultDeviceInfo(bool recDevice, char* name, uint16_t& index);
|
||||
int32_t InitPulseAudio();
|
||||
int32_t TerminatePulseAudio();
|
||||
|
||||
void PaLock();
|
||||
void PaUnLock();
|
||||
|
||||
static void RecThreadFunc(void*);
|
||||
static void PlayThreadFunc(void*);
|
||||
bool RecThreadProcess() RTC_LOCKS_EXCLUDED(mutex_);
|
||||
bool PlayThreadProcess() RTC_LOCKS_EXCLUDED(mutex_);
|
||||
|
||||
AudioDeviceBuffer* _ptrAudioBuffer;
|
||||
|
||||
mutable Mutex mutex_;
|
||||
rtc::Event _timeEventRec;
|
||||
rtc::Event _timeEventPlay;
|
||||
rtc::Event _recStartEvent;
|
||||
rtc::Event _playStartEvent;
|
||||
|
||||
rtc::PlatformThread _ptrThreadPlay;
|
||||
rtc::PlatformThread _ptrThreadRec;
|
||||
|
||||
AudioMixerManagerLinuxPulse _mixerManager;
|
||||
|
||||
uint16_t _inputDeviceIndex;
|
||||
uint16_t _outputDeviceIndex;
|
||||
bool _inputDeviceIsSpecified;
|
||||
bool _outputDeviceIsSpecified;
|
||||
|
||||
int sample_rate_hz_;
|
||||
uint8_t _recChannels;
|
||||
uint8_t _playChannels;
|
||||
|
||||
// Stores thread ID in constructor.
|
||||
// We can then use RTC_DCHECK_RUN_ON(&worker_thread_checker_) to ensure that
|
||||
// other methods are called from the same thread.
|
||||
// Currently only does RTC_DCHECK(thread_checker_.IsCurrent()).
|
||||
SequenceChecker thread_checker_;
|
||||
|
||||
bool _initialized;
|
||||
bool _recording;
|
||||
bool _playing;
|
||||
bool _recIsInitialized;
|
||||
bool _playIsInitialized;
|
||||
bool _startRec;
|
||||
bool _startPlay;
|
||||
bool update_speaker_volume_at_startup_;
|
||||
bool quit_ RTC_GUARDED_BY(&mutex_);
|
||||
|
||||
uint32_t _sndCardPlayDelay RTC_GUARDED_BY(&mutex_);
|
||||
|
||||
int32_t _writeErrors;
|
||||
|
||||
uint16_t _deviceIndex;
|
||||
int16_t _numPlayDevices;
|
||||
int16_t _numRecDevices;
|
||||
char* _playDeviceName;
|
||||
char* _recDeviceName;
|
||||
char* _playDisplayDeviceName;
|
||||
char* _recDisplayDeviceName;
|
||||
char _paServerVersion[32];
|
||||
|
||||
int8_t* _playBuffer;
|
||||
size_t _playbackBufferSize;
|
||||
size_t _playbackBufferUnused;
|
||||
size_t _tempBufferSpace;
|
||||
int8_t* _recBuffer;
|
||||
size_t _recordBufferSize;
|
||||
size_t _recordBufferUsed;
|
||||
const void* _tempSampleData;
|
||||
size_t _tempSampleDataSize;
|
||||
int32_t _configuredLatencyPlay;
|
||||
int32_t _configuredLatencyRec;
|
||||
|
||||
// PulseAudio
|
||||
uint16_t _paDeviceIndex;
|
||||
bool _paStateChanged;
|
||||
|
||||
pa_threaded_mainloop* _paMainloop;
|
||||
pa_mainloop_api* _paMainloopApi;
|
||||
pa_context* _paContext;
|
||||
|
||||
pa_stream* _recStream;
|
||||
pa_stream* _playStream;
|
||||
uint32_t _recStreamFlags;
|
||||
uint32_t _playStreamFlags;
|
||||
pa_buffer_attr _playBufferAttr;
|
||||
pa_buffer_attr _recBufferAttr;
|
||||
|
||||
char _oldKeyState[32];
|
||||
#if defined(WEBRTC_USE_X11)
|
||||
Display* _XDisplay;
|
||||
#endif
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // MODULES_AUDIO_DEVICE_MAIN_SOURCE_LINUX_AUDIO_DEVICE_PULSE_LINUX_H_
|
||||
|
|
@ -0,0 +1,979 @@
|
|||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "modules/audio_device/linux/audio_mixer_manager_alsa_linux.h"
|
||||
|
||||
#include "modules/audio_device/linux/audio_device_alsa_linux.h"
|
||||
#include "rtc_base/logging.h"
|
||||
|
||||
// Accesses ALSA functions through our late-binding symbol table instead of
|
||||
// directly. This way we don't have to link to libasound, which means our binary
|
||||
// will work on systems that don't have it.
|
||||
#define LATE(sym) \
|
||||
LATESYM_GET(webrtc::adm_linux_alsa::AlsaSymbolTable, GetAlsaSymbolTable(), \
|
||||
sym)
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
AudioMixerManagerLinuxALSA::AudioMixerManagerLinuxALSA()
|
||||
: _outputMixerHandle(NULL),
|
||||
_inputMixerHandle(NULL),
|
||||
_outputMixerElement(NULL),
|
||||
_inputMixerElement(NULL) {
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__ << " created";
|
||||
|
||||
memset(_outputMixerStr, 0, kAdmMaxDeviceNameSize);
|
||||
memset(_inputMixerStr, 0, kAdmMaxDeviceNameSize);
|
||||
}
|
||||
|
||||
AudioMixerManagerLinuxALSA::~AudioMixerManagerLinuxALSA() {
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__ << " destroyed";
|
||||
Close();
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// PUBLIC METHODS
|
||||
// ============================================================================
|
||||
|
||||
int32_t AudioMixerManagerLinuxALSA::Close() {
|
||||
RTC_DLOG(LS_VERBOSE) << __FUNCTION__;
|
||||
|
||||
MutexLock lock(&mutex_);
|
||||
|
||||
CloseSpeakerLocked();
|
||||
CloseMicrophoneLocked();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioMixerManagerLinuxALSA::CloseSpeaker() {
|
||||
MutexLock lock(&mutex_);
|
||||
return CloseSpeakerLocked();
|
||||
}
|
||||
|
||||
int32_t AudioMixerManagerLinuxALSA::CloseSpeakerLocked() {
|
||||
RTC_DLOG(LS_VERBOSE) << __FUNCTION__;
|
||||
|
||||
int errVal = 0;
|
||||
|
||||
if (_outputMixerHandle != NULL) {
|
||||
RTC_LOG(LS_VERBOSE) << "Closing playout mixer";
|
||||
LATE(snd_mixer_free)(_outputMixerHandle);
|
||||
if (errVal < 0) {
|
||||
RTC_LOG(LS_ERROR) << "Error freeing playout mixer: "
|
||||
<< LATE(snd_strerror)(errVal);
|
||||
}
|
||||
errVal = LATE(snd_mixer_detach)(_outputMixerHandle, _outputMixerStr);
|
||||
if (errVal < 0) {
|
||||
RTC_LOG(LS_ERROR) << "Error detaching playout mixer: "
|
||||
<< LATE(snd_strerror)(errVal);
|
||||
}
|
||||
errVal = LATE(snd_mixer_close)(_outputMixerHandle);
|
||||
if (errVal < 0) {
|
||||
RTC_LOG(LS_ERROR) << "Error snd_mixer_close(handleMixer) errVal="
|
||||
<< errVal;
|
||||
}
|
||||
_outputMixerHandle = NULL;
|
||||
_outputMixerElement = NULL;
|
||||
}
|
||||
memset(_outputMixerStr, 0, kAdmMaxDeviceNameSize);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioMixerManagerLinuxALSA::CloseMicrophone() {
|
||||
MutexLock lock(&mutex_);
|
||||
return CloseMicrophoneLocked();
|
||||
}
|
||||
|
||||
int32_t AudioMixerManagerLinuxALSA::CloseMicrophoneLocked() {
|
||||
RTC_DLOG(LS_VERBOSE) << __FUNCTION__;
|
||||
|
||||
int errVal = 0;
|
||||
|
||||
if (_inputMixerHandle != NULL) {
|
||||
RTC_LOG(LS_VERBOSE) << "Closing record mixer";
|
||||
|
||||
LATE(snd_mixer_free)(_inputMixerHandle);
|
||||
if (errVal < 0) {
|
||||
RTC_LOG(LS_ERROR) << "Error freeing record mixer: "
|
||||
<< LATE(snd_strerror)(errVal);
|
||||
}
|
||||
RTC_LOG(LS_VERBOSE) << "Closing record mixer 2";
|
||||
|
||||
errVal = LATE(snd_mixer_detach)(_inputMixerHandle, _inputMixerStr);
|
||||
if (errVal < 0) {
|
||||
RTC_LOG(LS_ERROR) << "Error detaching record mixer: "
|
||||
<< LATE(snd_strerror)(errVal);
|
||||
}
|
||||
RTC_LOG(LS_VERBOSE) << "Closing record mixer 3";
|
||||
|
||||
errVal = LATE(snd_mixer_close)(_inputMixerHandle);
|
||||
if (errVal < 0) {
|
||||
RTC_LOG(LS_ERROR) << "Error snd_mixer_close(handleMixer) errVal="
|
||||
<< errVal;
|
||||
}
|
||||
|
||||
RTC_LOG(LS_VERBOSE) << "Closing record mixer 4";
|
||||
_inputMixerHandle = NULL;
|
||||
_inputMixerElement = NULL;
|
||||
}
|
||||
memset(_inputMixerStr, 0, kAdmMaxDeviceNameSize);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioMixerManagerLinuxALSA::OpenSpeaker(char* deviceName) {
|
||||
RTC_LOG(LS_VERBOSE) << "AudioMixerManagerLinuxALSA::OpenSpeaker(name="
|
||||
<< deviceName << ")";
|
||||
|
||||
MutexLock lock(&mutex_);
|
||||
|
||||
int errVal = 0;
|
||||
|
||||
// Close any existing output mixer handle
|
||||
//
|
||||
if (_outputMixerHandle != NULL) {
|
||||
RTC_LOG(LS_VERBOSE) << "Closing playout mixer";
|
||||
|
||||
LATE(snd_mixer_free)(_outputMixerHandle);
|
||||
if (errVal < 0) {
|
||||
RTC_LOG(LS_ERROR) << "Error freeing playout mixer: "
|
||||
<< LATE(snd_strerror)(errVal);
|
||||
}
|
||||
errVal = LATE(snd_mixer_detach)(_outputMixerHandle, _outputMixerStr);
|
||||
if (errVal < 0) {
|
||||
RTC_LOG(LS_ERROR) << "Error detaching playout mixer: "
|
||||
<< LATE(snd_strerror)(errVal);
|
||||
}
|
||||
errVal = LATE(snd_mixer_close)(_outputMixerHandle);
|
||||
if (errVal < 0) {
|
||||
RTC_LOG(LS_ERROR) << "Error snd_mixer_close(handleMixer) errVal="
|
||||
<< errVal;
|
||||
}
|
||||
}
|
||||
_outputMixerHandle = NULL;
|
||||
_outputMixerElement = NULL;
|
||||
|
||||
errVal = LATE(snd_mixer_open)(&_outputMixerHandle, 0);
|
||||
if (errVal < 0) {
|
||||
RTC_LOG(LS_ERROR) << "snd_mixer_open(&_outputMixerHandle, 0) - error";
|
||||
return -1;
|
||||
}
|
||||
|
||||
char controlName[kAdmMaxDeviceNameSize] = {0};
|
||||
GetControlName(controlName, deviceName);
|
||||
|
||||
RTC_LOG(LS_VERBOSE) << "snd_mixer_attach(_outputMixerHandle, " << controlName
|
||||
<< ")";
|
||||
|
||||
errVal = LATE(snd_mixer_attach)(_outputMixerHandle, controlName);
|
||||
if (errVal < 0) {
|
||||
RTC_LOG(LS_ERROR) << "snd_mixer_attach(_outputMixerHandle, " << controlName
|
||||
<< ") error: " << LATE(snd_strerror)(errVal);
|
||||
_outputMixerHandle = NULL;
|
||||
return -1;
|
||||
}
|
||||
strcpy(_outputMixerStr, controlName);
|
||||
|
||||
errVal = LATE(snd_mixer_selem_register)(_outputMixerHandle, NULL, NULL);
|
||||
if (errVal < 0) {
|
||||
RTC_LOG(LS_ERROR)
|
||||
<< "snd_mixer_selem_register(_outputMixerHandle, NULL, NULL), "
|
||||
"error: "
|
||||
<< LATE(snd_strerror)(errVal);
|
||||
_outputMixerHandle = NULL;
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Load and find the proper mixer element
|
||||
if (LoadSpeakerMixerElement() < 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (_outputMixerHandle != NULL) {
|
||||
RTC_LOG(LS_VERBOSE) << "the output mixer device is now open ("
|
||||
<< _outputMixerHandle << ")";
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioMixerManagerLinuxALSA::OpenMicrophone(char* deviceName) {
|
||||
RTC_LOG(LS_VERBOSE) << "AudioMixerManagerLinuxALSA::OpenMicrophone(name="
|
||||
<< deviceName << ")";
|
||||
|
||||
MutexLock lock(&mutex_);
|
||||
|
||||
int errVal = 0;
|
||||
|
||||
// Close any existing input mixer handle
|
||||
//
|
||||
if (_inputMixerHandle != NULL) {
|
||||
RTC_LOG(LS_VERBOSE) << "Closing record mixer";
|
||||
|
||||
LATE(snd_mixer_free)(_inputMixerHandle);
|
||||
if (errVal < 0) {
|
||||
RTC_LOG(LS_ERROR) << "Error freeing record mixer: "
|
||||
<< LATE(snd_strerror)(errVal);
|
||||
}
|
||||
RTC_LOG(LS_VERBOSE) << "Closing record mixer";
|
||||
|
||||
errVal = LATE(snd_mixer_detach)(_inputMixerHandle, _inputMixerStr);
|
||||
if (errVal < 0) {
|
||||
RTC_LOG(LS_ERROR) << "Error detaching record mixer: "
|
||||
<< LATE(snd_strerror)(errVal);
|
||||
}
|
||||
RTC_LOG(LS_VERBOSE) << "Closing record mixer";
|
||||
|
||||
errVal = LATE(snd_mixer_close)(_inputMixerHandle);
|
||||
if (errVal < 0) {
|
||||
RTC_LOG(LS_ERROR) << "Error snd_mixer_close(handleMixer) errVal="
|
||||
<< errVal;
|
||||
}
|
||||
RTC_LOG(LS_VERBOSE) << "Closing record mixer";
|
||||
}
|
||||
_inputMixerHandle = NULL;
|
||||
_inputMixerElement = NULL;
|
||||
|
||||
errVal = LATE(snd_mixer_open)(&_inputMixerHandle, 0);
|
||||
if (errVal < 0) {
|
||||
RTC_LOG(LS_ERROR) << "snd_mixer_open(&_inputMixerHandle, 0) - error";
|
||||
return -1;
|
||||
}
|
||||
|
||||
char controlName[kAdmMaxDeviceNameSize] = {0};
|
||||
GetControlName(controlName, deviceName);
|
||||
|
||||
RTC_LOG(LS_VERBOSE) << "snd_mixer_attach(_inputMixerHandle, " << controlName
|
||||
<< ")";
|
||||
|
||||
errVal = LATE(snd_mixer_attach)(_inputMixerHandle, controlName);
|
||||
if (errVal < 0) {
|
||||
RTC_LOG(LS_ERROR) << "snd_mixer_attach(_inputMixerHandle, " << controlName
|
||||
<< ") error: " << LATE(snd_strerror)(errVal);
|
||||
|
||||
_inputMixerHandle = NULL;
|
||||
return -1;
|
||||
}
|
||||
strcpy(_inputMixerStr, controlName);
|
||||
|
||||
errVal = LATE(snd_mixer_selem_register)(_inputMixerHandle, NULL, NULL);
|
||||
if (errVal < 0) {
|
||||
RTC_LOG(LS_ERROR)
|
||||
<< "snd_mixer_selem_register(_inputMixerHandle, NULL, NULL), "
|
||||
"error: "
|
||||
<< LATE(snd_strerror)(errVal);
|
||||
|
||||
_inputMixerHandle = NULL;
|
||||
return -1;
|
||||
}
|
||||
// Load and find the proper mixer element
|
||||
if (LoadMicMixerElement() < 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (_inputMixerHandle != NULL) {
|
||||
RTC_LOG(LS_VERBOSE) << "the input mixer device is now open ("
|
||||
<< _inputMixerHandle << ")";
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool AudioMixerManagerLinuxALSA::SpeakerIsInitialized() const {
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__;
|
||||
|
||||
return (_outputMixerHandle != NULL);
|
||||
}
|
||||
|
||||
bool AudioMixerManagerLinuxALSA::MicrophoneIsInitialized() const {
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__;
|
||||
|
||||
return (_inputMixerHandle != NULL);
|
||||
}
|
||||
|
||||
int32_t AudioMixerManagerLinuxALSA::SetSpeakerVolume(uint32_t volume) {
|
||||
RTC_LOG(LS_VERBOSE) << "AudioMixerManagerLinuxALSA::SetSpeakerVolume(volume="
|
||||
<< volume << ")";
|
||||
|
||||
MutexLock lock(&mutex_);
|
||||
|
||||
if (_outputMixerElement == NULL) {
|
||||
RTC_LOG(LS_WARNING) << "no avaliable output mixer element exists";
|
||||
return -1;
|
||||
}
|
||||
|
||||
int errVal = LATE(snd_mixer_selem_set_playback_volume_all)(
|
||||
_outputMixerElement, volume);
|
||||
if (errVal < 0) {
|
||||
RTC_LOG(LS_ERROR) << "Error changing master volume: "
|
||||
<< LATE(snd_strerror)(errVal);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
int32_t AudioMixerManagerLinuxALSA::SpeakerVolume(uint32_t& volume) const {
|
||||
if (_outputMixerElement == NULL) {
|
||||
RTC_LOG(LS_WARNING) << "no avaliable output mixer element exists";
|
||||
return -1;
|
||||
}
|
||||
|
||||
long int vol(0);
|
||||
|
||||
int errVal = LATE(snd_mixer_selem_get_playback_volume)(
|
||||
_outputMixerElement, (snd_mixer_selem_channel_id_t)0, &vol);
|
||||
if (errVal < 0) {
|
||||
RTC_LOG(LS_ERROR) << "Error getting outputvolume: "
|
||||
<< LATE(snd_strerror)(errVal);
|
||||
return -1;
|
||||
}
|
||||
RTC_LOG(LS_VERBOSE) << "AudioMixerManagerLinuxALSA::SpeakerVolume() => vol="
|
||||
<< vol;
|
||||
|
||||
volume = static_cast<uint32_t>(vol);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioMixerManagerLinuxALSA::MaxSpeakerVolume(
|
||||
uint32_t& maxVolume) const {
|
||||
if (_outputMixerElement == NULL) {
|
||||
RTC_LOG(LS_WARNING) << "no avilable output mixer element exists";
|
||||
return -1;
|
||||
}
|
||||
|
||||
long int minVol(0);
|
||||
long int maxVol(0);
|
||||
|
||||
int errVal = LATE(snd_mixer_selem_get_playback_volume_range)(
|
||||
_outputMixerElement, &minVol, &maxVol);
|
||||
|
||||
RTC_LOG(LS_VERBOSE) << "Playout hardware volume range, min: " << minVol
|
||||
<< ", max: " << maxVol;
|
||||
|
||||
if (maxVol <= minVol) {
|
||||
RTC_LOG(LS_ERROR) << "Error getting get_playback_volume_range: "
|
||||
<< LATE(snd_strerror)(errVal);
|
||||
}
|
||||
|
||||
maxVolume = static_cast<uint32_t>(maxVol);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioMixerManagerLinuxALSA::MinSpeakerVolume(
|
||||
uint32_t& minVolume) const {
|
||||
if (_outputMixerElement == NULL) {
|
||||
RTC_LOG(LS_WARNING) << "no avaliable output mixer element exists";
|
||||
return -1;
|
||||
}
|
||||
|
||||
long int minVol(0);
|
||||
long int maxVol(0);
|
||||
|
||||
int errVal = LATE(snd_mixer_selem_get_playback_volume_range)(
|
||||
_outputMixerElement, &minVol, &maxVol);
|
||||
|
||||
RTC_LOG(LS_VERBOSE) << "Playout hardware volume range, min: " << minVol
|
||||
<< ", max: " << maxVol;
|
||||
|
||||
if (maxVol <= minVol) {
|
||||
RTC_LOG(LS_ERROR) << "Error getting get_playback_volume_range: "
|
||||
<< LATE(snd_strerror)(errVal);
|
||||
}
|
||||
|
||||
minVolume = static_cast<uint32_t>(minVol);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
// TL: Have done testnig with these but they don't seem reliable and
|
||||
// they were therefore not added
|
||||
/*
|
||||
// ----------------------------------------------------------------------------
|
||||
// SetMaxSpeakerVolume
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
int32_t AudioMixerManagerLinuxALSA::SetMaxSpeakerVolume(
|
||||
uint32_t maxVolume)
|
||||
{
|
||||
|
||||
if (_outputMixerElement == NULL)
|
||||
{
|
||||
RTC_LOG(LS_WARNING) << "no avaliable output mixer element exists";
|
||||
return -1;
|
||||
}
|
||||
|
||||
long int minVol(0);
|
||||
long int maxVol(0);
|
||||
|
||||
int errVal = snd_mixer_selem_get_playback_volume_range(
|
||||
_outputMixerElement, &minVol, &maxVol);
|
||||
if ((maxVol <= minVol) || (errVal != 0))
|
||||
{
|
||||
RTC_LOG(LS_WARNING) << "Error getting playback volume range: "
|
||||
<< snd_strerror(errVal);
|
||||
}
|
||||
|
||||
maxVol = maxVolume;
|
||||
errVal = snd_mixer_selem_set_playback_volume_range(
|
||||
_outputMixerElement, minVol, maxVol);
|
||||
RTC_LOG(LS_VERBOSE) << "Playout hardware volume range, min: " << minVol
|
||||
<< ", max: " << maxVol;
|
||||
if (errVal != 0)
|
||||
{
|
||||
RTC_LOG(LS_ERROR) << "Error setting playback volume range: "
|
||||
<< snd_strerror(errVal);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// SetMinSpeakerVolume
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
int32_t AudioMixerManagerLinuxALSA::SetMinSpeakerVolume(
|
||||
uint32_t minVolume)
|
||||
{
|
||||
|
||||
if (_outputMixerElement == NULL)
|
||||
{
|
||||
RTC_LOG(LS_WARNING) << "no avaliable output mixer element exists";
|
||||
return -1;
|
||||
}
|
||||
|
||||
long int minVol(0);
|
||||
long int maxVol(0);
|
||||
|
||||
int errVal = snd_mixer_selem_get_playback_volume_range(
|
||||
_outputMixerElement, &minVol, &maxVol);
|
||||
if ((maxVol <= minVol) || (errVal != 0))
|
||||
{
|
||||
RTC_LOG(LS_WARNING) << "Error getting playback volume range: "
|
||||
<< snd_strerror(errVal);
|
||||
}
|
||||
|
||||
minVol = minVolume;
|
||||
errVal = snd_mixer_selem_set_playback_volume_range(
|
||||
_outputMixerElement, minVol, maxVol);
|
||||
RTC_LOG(LS_VERBOSE) << "Playout hardware volume range, min: " << minVol
|
||||
<< ", max: " << maxVol;
|
||||
if (errVal != 0)
|
||||
{
|
||||
RTC_LOG(LS_ERROR) << "Error setting playback volume range: "
|
||||
<< snd_strerror(errVal);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
*/
|
||||
|
||||
int32_t AudioMixerManagerLinuxALSA::SpeakerVolumeIsAvailable(bool& available) {
|
||||
if (_outputMixerElement == NULL) {
|
||||
RTC_LOG(LS_WARNING) << "no avaliable output mixer element exists";
|
||||
return -1;
|
||||
}
|
||||
|
||||
available = LATE(snd_mixer_selem_has_playback_volume)(_outputMixerElement);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioMixerManagerLinuxALSA::SpeakerMuteIsAvailable(bool& available) {
|
||||
if (_outputMixerElement == NULL) {
|
||||
RTC_LOG(LS_WARNING) << "no avaliable output mixer element exists";
|
||||
return -1;
|
||||
}
|
||||
|
||||
available = LATE(snd_mixer_selem_has_playback_switch)(_outputMixerElement);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioMixerManagerLinuxALSA::SetSpeakerMute(bool enable) {
|
||||
RTC_LOG(LS_VERBOSE) << "AudioMixerManagerLinuxALSA::SetSpeakerMute(enable="
|
||||
<< enable << ")";
|
||||
|
||||
MutexLock lock(&mutex_);
|
||||
|
||||
if (_outputMixerElement == NULL) {
|
||||
RTC_LOG(LS_WARNING) << "no avaliable output mixer element exists";
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Ensure that the selected speaker destination has a valid mute control.
|
||||
bool available(false);
|
||||
SpeakerMuteIsAvailable(available);
|
||||
if (!available) {
|
||||
RTC_LOG(LS_WARNING) << "it is not possible to mute the speaker";
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Note value = 0 (off) means muted
|
||||
int errVal = LATE(snd_mixer_selem_set_playback_switch_all)(
|
||||
_outputMixerElement, !enable);
|
||||
if (errVal < 0) {
|
||||
RTC_LOG(LS_ERROR) << "Error setting playback switch: "
|
||||
<< LATE(snd_strerror)(errVal);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
int32_t AudioMixerManagerLinuxALSA::SpeakerMute(bool& enabled) const {
|
||||
if (_outputMixerElement == NULL) {
|
||||
RTC_LOG(LS_WARNING) << "no avaliable output mixer exists";
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Ensure that the selected speaker destination has a valid mute control.
|
||||
bool available =
|
||||
LATE(snd_mixer_selem_has_playback_switch)(_outputMixerElement);
|
||||
if (!available) {
|
||||
RTC_LOG(LS_WARNING) << "it is not possible to mute the speaker";
|
||||
return -1;
|
||||
}
|
||||
|
||||
int value(false);
|
||||
|
||||
// Retrieve one boolean control value for a specified mute-control
|
||||
//
|
||||
int errVal = LATE(snd_mixer_selem_get_playback_switch)(
|
||||
_outputMixerElement, (snd_mixer_selem_channel_id_t)0, &value);
|
||||
if (errVal < 0) {
|
||||
RTC_LOG(LS_ERROR) << "Error getting playback switch: "
|
||||
<< LATE(snd_strerror)(errVal);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Note value = 0 (off) means muted
|
||||
enabled = (bool)!value;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioMixerManagerLinuxALSA::MicrophoneMuteIsAvailable(bool& available) {
|
||||
if (_inputMixerElement == NULL) {
|
||||
RTC_LOG(LS_WARNING) << "no avaliable input mixer element exists";
|
||||
return -1;
|
||||
}
|
||||
|
||||
available = LATE(snd_mixer_selem_has_capture_switch)(_inputMixerElement);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioMixerManagerLinuxALSA::SetMicrophoneMute(bool enable) {
|
||||
RTC_LOG(LS_VERBOSE) << "AudioMixerManagerLinuxALSA::SetMicrophoneMute(enable="
|
||||
<< enable << ")";
|
||||
|
||||
MutexLock lock(&mutex_);
|
||||
|
||||
if (_inputMixerElement == NULL) {
|
||||
RTC_LOG(LS_WARNING) << "no avaliable input mixer element exists";
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Ensure that the selected microphone destination has a valid mute control.
|
||||
bool available(false);
|
||||
MicrophoneMuteIsAvailable(available);
|
||||
if (!available) {
|
||||
RTC_LOG(LS_WARNING) << "it is not possible to mute the microphone";
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Note value = 0 (off) means muted
|
||||
int errVal =
|
||||
LATE(snd_mixer_selem_set_capture_switch_all)(_inputMixerElement, !enable);
|
||||
if (errVal < 0) {
|
||||
RTC_LOG(LS_ERROR) << "Error setting capture switch: "
|
||||
<< LATE(snd_strerror)(errVal);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
int32_t AudioMixerManagerLinuxALSA::MicrophoneMute(bool& enabled) const {
|
||||
if (_inputMixerElement == NULL) {
|
||||
RTC_LOG(LS_WARNING) << "no avaliable input mixer exists";
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Ensure that the selected microphone destination has a valid mute control.
|
||||
bool available = LATE(snd_mixer_selem_has_capture_switch)(_inputMixerElement);
|
||||
if (!available) {
|
||||
RTC_LOG(LS_WARNING) << "it is not possible to mute the microphone";
|
||||
return -1;
|
||||
}
|
||||
|
||||
int value(false);
|
||||
|
||||
// Retrieve one boolean control value for a specified mute-control
|
||||
//
|
||||
int errVal = LATE(snd_mixer_selem_get_capture_switch)(
|
||||
_inputMixerElement, (snd_mixer_selem_channel_id_t)0, &value);
|
||||
if (errVal < 0) {
|
||||
RTC_LOG(LS_ERROR) << "Error getting capture switch: "
|
||||
<< LATE(snd_strerror)(errVal);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Note value = 0 (off) means muted
|
||||
enabled = (bool)!value;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioMixerManagerLinuxALSA::MicrophoneVolumeIsAvailable(
|
||||
bool& available) {
|
||||
if (_inputMixerElement == NULL) {
|
||||
RTC_LOG(LS_WARNING) << "no avaliable input mixer element exists";
|
||||
return -1;
|
||||
}
|
||||
|
||||
available = LATE(snd_mixer_selem_has_capture_volume)(_inputMixerElement);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioMixerManagerLinuxALSA::SetMicrophoneVolume(uint32_t volume) {
|
||||
RTC_LOG(LS_VERBOSE)
|
||||
<< "AudioMixerManagerLinuxALSA::SetMicrophoneVolume(volume=" << volume
|
||||
<< ")";
|
||||
|
||||
MutexLock lock(&mutex_);
|
||||
|
||||
if (_inputMixerElement == NULL) {
|
||||
RTC_LOG(LS_WARNING) << "no avaliable input mixer element exists";
|
||||
return -1;
|
||||
}
|
||||
|
||||
int errVal =
|
||||
LATE(snd_mixer_selem_set_capture_volume_all)(_inputMixerElement, volume);
|
||||
if (errVal < 0) {
|
||||
RTC_LOG(LS_ERROR) << "Error changing microphone volume: "
|
||||
<< LATE(snd_strerror)(errVal);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
// TL: Have done testnig with these but they don't seem reliable and
|
||||
// they were therefore not added
|
||||
/*
|
||||
// ----------------------------------------------------------------------------
|
||||
// SetMaxMicrophoneVolume
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
int32_t AudioMixerManagerLinuxALSA::SetMaxMicrophoneVolume(
|
||||
uint32_t maxVolume)
|
||||
{
|
||||
|
||||
if (_inputMixerElement == NULL)
|
||||
{
|
||||
RTC_LOG(LS_WARNING) << "no avaliable output mixer element exists";
|
||||
return -1;
|
||||
}
|
||||
|
||||
long int minVol(0);
|
||||
long int maxVol(0);
|
||||
|
||||
int errVal = snd_mixer_selem_get_capture_volume_range(_inputMixerElement,
|
||||
&minVol, &maxVol);
|
||||
if ((maxVol <= minVol) || (errVal != 0))
|
||||
{
|
||||
RTC_LOG(LS_WARNING) << "Error getting capture volume range: "
|
||||
<< snd_strerror(errVal);
|
||||
}
|
||||
|
||||
maxVol = (long int)maxVolume;
|
||||
printf("min %d max %d", minVol, maxVol);
|
||||
errVal = snd_mixer_selem_set_capture_volume_range(_inputMixerElement, minVol,
|
||||
maxVol); RTC_LOG(LS_VERBOSE) << "Capture hardware volume range, min: " <<
|
||||
minVol
|
||||
<< ", max: " << maxVol;
|
||||
if (errVal != 0)
|
||||
{
|
||||
RTC_LOG(LS_ERROR) << "Error setting capture volume range: "
|
||||
<< snd_strerror(errVal);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// SetMinMicrophoneVolume
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
int32_t AudioMixerManagerLinuxALSA::SetMinMicrophoneVolume(
|
||||
uint32_t minVolume)
|
||||
{
|
||||
|
||||
if (_inputMixerElement == NULL)
|
||||
{
|
||||
RTC_LOG(LS_WARNING) << "no avaliable output mixer element exists";
|
||||
return -1;
|
||||
}
|
||||
|
||||
long int minVol(0);
|
||||
long int maxVol(0);
|
||||
|
||||
int errVal = snd_mixer_selem_get_capture_volume_range(
|
||||
_inputMixerElement, &minVol, &maxVol);
|
||||
if (maxVol <= minVol)
|
||||
{
|
||||
//maxVol = 255;
|
||||
RTC_LOG(LS_WARNING) << "Error getting capture volume range: "
|
||||
<< snd_strerror(errVal);
|
||||
}
|
||||
|
||||
printf("min %d max %d", minVol, maxVol);
|
||||
minVol = (long int)minVolume;
|
||||
errVal = snd_mixer_selem_set_capture_volume_range(
|
||||
_inputMixerElement, minVol, maxVol);
|
||||
RTC_LOG(LS_VERBOSE) << "Capture hardware volume range, min: " << minVol
|
||||
<< ", max: " << maxVol;
|
||||
if (errVal != 0)
|
||||
{
|
||||
RTC_LOG(LS_ERROR) << "Error setting capture volume range: "
|
||||
<< snd_strerror(errVal);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
*/
|
||||
|
||||
int32_t AudioMixerManagerLinuxALSA::MicrophoneVolume(uint32_t& volume) const {
|
||||
if (_inputMixerElement == NULL) {
|
||||
RTC_LOG(LS_WARNING) << "no avaliable input mixer element exists";
|
||||
return -1;
|
||||
}
|
||||
|
||||
long int vol(0);
|
||||
|
||||
int errVal = LATE(snd_mixer_selem_get_capture_volume)(
|
||||
_inputMixerElement, (snd_mixer_selem_channel_id_t)0, &vol);
|
||||
if (errVal < 0) {
|
||||
RTC_LOG(LS_ERROR) << "Error getting inputvolume: "
|
||||
<< LATE(snd_strerror)(errVal);
|
||||
return -1;
|
||||
}
|
||||
RTC_LOG(LS_VERBOSE)
|
||||
<< "AudioMixerManagerLinuxALSA::MicrophoneVolume() => vol=" << vol;
|
||||
|
||||
volume = static_cast<uint32_t>(vol);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioMixerManagerLinuxALSA::MaxMicrophoneVolume(
|
||||
uint32_t& maxVolume) const {
|
||||
if (_inputMixerElement == NULL) {
|
||||
RTC_LOG(LS_WARNING) << "no avaliable input mixer element exists";
|
||||
return -1;
|
||||
}
|
||||
|
||||
long int minVol(0);
|
||||
long int maxVol(0);
|
||||
|
||||
// check if we have mic volume at all
|
||||
if (!LATE(snd_mixer_selem_has_capture_volume)(_inputMixerElement)) {
|
||||
RTC_LOG(LS_ERROR) << "No microphone volume available";
|
||||
return -1;
|
||||
}
|
||||
|
||||
int errVal = LATE(snd_mixer_selem_get_capture_volume_range)(
|
||||
_inputMixerElement, &minVol, &maxVol);
|
||||
|
||||
RTC_LOG(LS_VERBOSE) << "Microphone hardware volume range, min: " << minVol
|
||||
<< ", max: " << maxVol;
|
||||
if (maxVol <= minVol) {
|
||||
RTC_LOG(LS_ERROR) << "Error getting microphone volume range: "
|
||||
<< LATE(snd_strerror)(errVal);
|
||||
}
|
||||
|
||||
maxVolume = static_cast<uint32_t>(maxVol);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioMixerManagerLinuxALSA::MinMicrophoneVolume(
|
||||
uint32_t& minVolume) const {
|
||||
if (_inputMixerElement == NULL) {
|
||||
RTC_LOG(LS_WARNING) << "no avaliable input mixer element exists";
|
||||
return -1;
|
||||
}
|
||||
|
||||
long int minVol(0);
|
||||
long int maxVol(0);
|
||||
|
||||
int errVal = LATE(snd_mixer_selem_get_capture_volume_range)(
|
||||
_inputMixerElement, &minVol, &maxVol);
|
||||
|
||||
RTC_LOG(LS_VERBOSE) << "Microphone hardware volume range, min: " << minVol
|
||||
<< ", max: " << maxVol;
|
||||
if (maxVol <= minVol) {
|
||||
RTC_LOG(LS_ERROR) << "Error getting microphone volume range: "
|
||||
<< LATE(snd_strerror)(errVal);
|
||||
}
|
||||
|
||||
minVolume = static_cast<uint32_t>(minVol);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Private Methods
|
||||
// ============================================================================
|
||||
|
||||
int32_t AudioMixerManagerLinuxALSA::LoadMicMixerElement() const {
|
||||
int errVal = LATE(snd_mixer_load)(_inputMixerHandle);
|
||||
if (errVal < 0) {
|
||||
RTC_LOG(LS_ERROR) << "snd_mixer_load(_inputMixerHandle), error: "
|
||||
<< LATE(snd_strerror)(errVal);
|
||||
_inputMixerHandle = NULL;
|
||||
return -1;
|
||||
}
|
||||
|
||||
snd_mixer_elem_t* elem = NULL;
|
||||
snd_mixer_elem_t* micElem = NULL;
|
||||
unsigned mixerIdx = 0;
|
||||
const char* selemName = NULL;
|
||||
|
||||
// Find and store handles to the right mixer elements
|
||||
for (elem = LATE(snd_mixer_first_elem)(_inputMixerHandle); elem;
|
||||
elem = LATE(snd_mixer_elem_next)(elem), mixerIdx++) {
|
||||
if (LATE(snd_mixer_selem_is_active)(elem)) {
|
||||
selemName = LATE(snd_mixer_selem_get_name)(elem);
|
||||
if (strcmp(selemName, "Capture") == 0) // "Capture", "Mic"
|
||||
{
|
||||
_inputMixerElement = elem;
|
||||
RTC_LOG(LS_VERBOSE) << "Capture element set";
|
||||
} else if (strcmp(selemName, "Mic") == 0) {
|
||||
micElem = elem;
|
||||
RTC_LOG(LS_VERBOSE) << "Mic element found";
|
||||
}
|
||||
}
|
||||
|
||||
if (_inputMixerElement) {
|
||||
// Use the first Capture element that is found
|
||||
// The second one may not work
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (_inputMixerElement == NULL) {
|
||||
// We didn't find a Capture handle, use Mic.
|
||||
if (micElem != NULL) {
|
||||
_inputMixerElement = micElem;
|
||||
RTC_LOG(LS_VERBOSE) << "Using Mic as capture volume.";
|
||||
} else {
|
||||
_inputMixerElement = NULL;
|
||||
RTC_LOG(LS_ERROR) << "Could not find capture volume on the mixer.";
|
||||
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioMixerManagerLinuxALSA::LoadSpeakerMixerElement() const {
|
||||
int errVal = LATE(snd_mixer_load)(_outputMixerHandle);
|
||||
if (errVal < 0) {
|
||||
RTC_LOG(LS_ERROR) << "snd_mixer_load(_outputMixerHandle), error: "
|
||||
<< LATE(snd_strerror)(errVal);
|
||||
_outputMixerHandle = NULL;
|
||||
return -1;
|
||||
}
|
||||
|
||||
snd_mixer_elem_t* elem = NULL;
|
||||
snd_mixer_elem_t* masterElem = NULL;
|
||||
snd_mixer_elem_t* speakerElem = NULL;
|
||||
unsigned mixerIdx = 0;
|
||||
const char* selemName = NULL;
|
||||
|
||||
// Find and store handles to the right mixer elements
|
||||
for (elem = LATE(snd_mixer_first_elem)(_outputMixerHandle); elem;
|
||||
elem = LATE(snd_mixer_elem_next)(elem), mixerIdx++) {
|
||||
if (LATE(snd_mixer_selem_is_active)(elem)) {
|
||||
selemName = LATE(snd_mixer_selem_get_name)(elem);
|
||||
RTC_LOG(LS_VERBOSE) << "snd_mixer_selem_get_name " << mixerIdx << ": "
|
||||
<< selemName << " =" << elem;
|
||||
|
||||
// "Master", "PCM", "Wave", "Master Mono", "PC Speaker", "PCM", "Wave"
|
||||
if (strcmp(selemName, "PCM") == 0) {
|
||||
_outputMixerElement = elem;
|
||||
RTC_LOG(LS_VERBOSE) << "PCM element set";
|
||||
} else if (strcmp(selemName, "Master") == 0) {
|
||||
masterElem = elem;
|
||||
RTC_LOG(LS_VERBOSE) << "Master element found";
|
||||
} else if (strcmp(selemName, "Speaker") == 0) {
|
||||
speakerElem = elem;
|
||||
RTC_LOG(LS_VERBOSE) << "Speaker element found";
|
||||
}
|
||||
}
|
||||
|
||||
if (_outputMixerElement) {
|
||||
// We have found the element we want
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// If we didn't find a PCM Handle, use Master or Speaker
|
||||
if (_outputMixerElement == NULL) {
|
||||
if (masterElem != NULL) {
|
||||
_outputMixerElement = masterElem;
|
||||
RTC_LOG(LS_VERBOSE) << "Using Master as output volume.";
|
||||
} else if (speakerElem != NULL) {
|
||||
_outputMixerElement = speakerElem;
|
||||
RTC_LOG(LS_VERBOSE) << "Using Speaker as output volume.";
|
||||
} else {
|
||||
_outputMixerElement = NULL;
|
||||
RTC_LOG(LS_ERROR) << "Could not find output volume in the mixer.";
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void AudioMixerManagerLinuxALSA::GetControlName(char* controlName,
|
||||
char* deviceName) const {
|
||||
// Example
|
||||
// deviceName: "front:CARD=Intel,DEV=0"
|
||||
// controlName: "hw:CARD=Intel"
|
||||
char* pos1 = strchr(deviceName, ':');
|
||||
char* pos2 = strchr(deviceName, ',');
|
||||
if (!pos2) {
|
||||
// Can also be default:CARD=Intel
|
||||
pos2 = &deviceName[strlen(deviceName)];
|
||||
}
|
||||
if (pos1 && pos2) {
|
||||
strcpy(controlName, "hw");
|
||||
int nChar = (int)(pos2 - pos1);
|
||||
strncpy(&controlName[2], pos1, nChar);
|
||||
controlName[2 + nChar] = '\0';
|
||||
} else {
|
||||
strcpy(controlName, deviceName);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,71 @@
|
|||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef AUDIO_DEVICE_AUDIO_MIXER_MANAGER_ALSA_LINUX_H_
|
||||
#define AUDIO_DEVICE_AUDIO_MIXER_MANAGER_ALSA_LINUX_H_
|
||||
|
||||
#include <alsa/asoundlib.h>
|
||||
|
||||
#include "modules/audio_device/include/audio_device.h"
|
||||
#include "modules/audio_device/linux/alsasymboltable_linux.h"
|
||||
#include "rtc_base/synchronization/mutex.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class AudioMixerManagerLinuxALSA {
|
||||
public:
|
||||
int32_t OpenSpeaker(char* deviceName) RTC_LOCKS_EXCLUDED(mutex_);
|
||||
int32_t OpenMicrophone(char* deviceName) RTC_LOCKS_EXCLUDED(mutex_);
|
||||
int32_t SetSpeakerVolume(uint32_t volume) RTC_LOCKS_EXCLUDED(mutex_);
|
||||
int32_t SpeakerVolume(uint32_t& volume) const;
|
||||
int32_t MaxSpeakerVolume(uint32_t& maxVolume) const;
|
||||
int32_t MinSpeakerVolume(uint32_t& minVolume) const;
|
||||
int32_t SpeakerVolumeIsAvailable(bool& available);
|
||||
int32_t SpeakerMuteIsAvailable(bool& available);
|
||||
int32_t SetSpeakerMute(bool enable) RTC_LOCKS_EXCLUDED(mutex_);
|
||||
int32_t SpeakerMute(bool& enabled) const;
|
||||
int32_t MicrophoneMuteIsAvailable(bool& available);
|
||||
int32_t SetMicrophoneMute(bool enable) RTC_LOCKS_EXCLUDED(mutex_);
|
||||
int32_t MicrophoneMute(bool& enabled) const;
|
||||
int32_t MicrophoneVolumeIsAvailable(bool& available);
|
||||
int32_t SetMicrophoneVolume(uint32_t volume) RTC_LOCKS_EXCLUDED(mutex_);
|
||||
int32_t MicrophoneVolume(uint32_t& volume) const;
|
||||
int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const;
|
||||
int32_t MinMicrophoneVolume(uint32_t& minVolume) const;
|
||||
int32_t Close() RTC_LOCKS_EXCLUDED(mutex_);
|
||||
int32_t CloseSpeaker() RTC_LOCKS_EXCLUDED(mutex_);
|
||||
int32_t CloseMicrophone() RTC_LOCKS_EXCLUDED(mutex_);
|
||||
bool SpeakerIsInitialized() const;
|
||||
bool MicrophoneIsInitialized() const;
|
||||
|
||||
public:
|
||||
AudioMixerManagerLinuxALSA();
|
||||
~AudioMixerManagerLinuxALSA();
|
||||
|
||||
private:
|
||||
int32_t CloseSpeakerLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
|
||||
int32_t CloseMicrophoneLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
|
||||
int32_t LoadMicMixerElement() const;
|
||||
int32_t LoadSpeakerMixerElement() const;
|
||||
void GetControlName(char* controlName, char* deviceName) const;
|
||||
|
||||
private:
|
||||
Mutex mutex_;
|
||||
mutable snd_mixer_t* _outputMixerHandle;
|
||||
char _outputMixerStr[kAdmMaxDeviceNameSize];
|
||||
mutable snd_mixer_t* _inputMixerHandle;
|
||||
char _inputMixerStr[kAdmMaxDeviceNameSize];
|
||||
mutable snd_mixer_elem_t* _outputMixerElement;
|
||||
mutable snd_mixer_elem_t* _inputMixerElement;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // MODULES_AUDIO_DEVICE_MAIN_SOURCE_LINUX_AUDIO_MIXER_MANAGER_ALSA_LINUX_H_
|
||||
|
|
@ -0,0 +1,844 @@
|
|||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "modules/audio_device/linux/audio_mixer_manager_pulse_linux.h"
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
#include "modules/audio_device/linux/audio_device_pulse_linux.h"
|
||||
#include "modules/audio_device/linux/latebindingsymboltable_linux.h"
|
||||
#include "modules/audio_device/linux/pulseaudiosymboltable_linux.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/logging.h"
|
||||
|
||||
// Accesses Pulse functions through our late-binding symbol table instead of
|
||||
// directly. This way we don't have to link to libpulse, which means our binary
|
||||
// will work on systems that don't have it.
|
||||
#define LATE(sym) \
|
||||
LATESYM_GET(webrtc::adm_linux_pulse::PulseAudioSymbolTable, \
|
||||
GetPulseSymbolTable(), sym)
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class AutoPulseLock {
|
||||
public:
|
||||
explicit AutoPulseLock(pa_threaded_mainloop* pa_mainloop)
|
||||
: pa_mainloop_(pa_mainloop) {
|
||||
LATE(pa_threaded_mainloop_lock)(pa_mainloop_);
|
||||
}
|
||||
|
||||
~AutoPulseLock() { LATE(pa_threaded_mainloop_unlock)(pa_mainloop_); }
|
||||
|
||||
private:
|
||||
pa_threaded_mainloop* const pa_mainloop_;
|
||||
};
|
||||
|
||||
AudioMixerManagerLinuxPulse::AudioMixerManagerLinuxPulse()
|
||||
: _paOutputDeviceIndex(-1),
|
||||
_paInputDeviceIndex(-1),
|
||||
_paPlayStream(NULL),
|
||||
_paRecStream(NULL),
|
||||
_paMainloop(NULL),
|
||||
_paContext(NULL),
|
||||
_paVolume(0),
|
||||
_paMute(0),
|
||||
_paVolSteps(0),
|
||||
_paSpeakerMute(false),
|
||||
_paSpeakerVolume(PA_VOLUME_NORM),
|
||||
_paChannels(0),
|
||||
_paObjectsSet(false) {
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__ << " created";
|
||||
}
|
||||
|
||||
AudioMixerManagerLinuxPulse::~AudioMixerManagerLinuxPulse() {
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__ << " destroyed";
|
||||
|
||||
Close();
|
||||
}
|
||||
|
||||
// ===========================================================================
|
||||
// PUBLIC METHODS
|
||||
// ===========================================================================
|
||||
|
||||
int32_t AudioMixerManagerLinuxPulse::SetPulseAudioObjects(
|
||||
pa_threaded_mainloop* mainloop,
|
||||
pa_context* context) {
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
RTC_DLOG(LS_VERBOSE) << __FUNCTION__;
|
||||
|
||||
if (!mainloop || !context) {
|
||||
RTC_LOG(LS_ERROR) << "could not set PulseAudio objects for mixer";
|
||||
return -1;
|
||||
}
|
||||
|
||||
_paMainloop = mainloop;
|
||||
_paContext = context;
|
||||
_paObjectsSet = true;
|
||||
|
||||
RTC_LOG(LS_VERBOSE) << "the PulseAudio objects for the mixer has been set";
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioMixerManagerLinuxPulse::Close() {
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
RTC_DLOG(LS_VERBOSE) << __FUNCTION__;
|
||||
|
||||
CloseSpeaker();
|
||||
CloseMicrophone();
|
||||
|
||||
_paMainloop = NULL;
|
||||
_paContext = NULL;
|
||||
_paObjectsSet = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioMixerManagerLinuxPulse::CloseSpeaker() {
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
RTC_DLOG(LS_VERBOSE) << __FUNCTION__;
|
||||
|
||||
// Reset the index to -1
|
||||
_paOutputDeviceIndex = -1;
|
||||
_paPlayStream = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioMixerManagerLinuxPulse::CloseMicrophone() {
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
RTC_DLOG(LS_VERBOSE) << __FUNCTION__;
|
||||
|
||||
// Reset the index to -1
|
||||
_paInputDeviceIndex = -1;
|
||||
_paRecStream = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioMixerManagerLinuxPulse::SetPlayStream(pa_stream* playStream) {
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
RTC_LOG(LS_VERBOSE)
|
||||
<< "AudioMixerManagerLinuxPulse::SetPlayStream(playStream)";
|
||||
|
||||
_paPlayStream = playStream;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioMixerManagerLinuxPulse::SetRecStream(pa_stream* recStream) {
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
RTC_LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::SetRecStream(recStream)";
|
||||
|
||||
_paRecStream = recStream;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioMixerManagerLinuxPulse::OpenSpeaker(uint16_t deviceIndex) {
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
RTC_LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::OpenSpeaker(deviceIndex="
|
||||
<< deviceIndex << ")";
|
||||
|
||||
// No point in opening the speaker
|
||||
// if PA objects have not been set
|
||||
if (!_paObjectsSet) {
|
||||
RTC_LOG(LS_ERROR) << "PulseAudio objects has not been set";
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Set the index for the PulseAudio
|
||||
// output device to control
|
||||
_paOutputDeviceIndex = deviceIndex;
|
||||
|
||||
RTC_LOG(LS_VERBOSE) << "the output mixer device is now open";
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioMixerManagerLinuxPulse::OpenMicrophone(uint16_t deviceIndex) {
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
RTC_LOG(LS_VERBOSE)
|
||||
<< "AudioMixerManagerLinuxPulse::OpenMicrophone(deviceIndex="
|
||||
<< deviceIndex << ")";
|
||||
|
||||
// No point in opening the microphone
|
||||
// if PA objects have not been set
|
||||
if (!_paObjectsSet) {
|
||||
RTC_LOG(LS_ERROR) << "PulseAudio objects have not been set";
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Set the index for the PulseAudio
|
||||
// input device to control
|
||||
_paInputDeviceIndex = deviceIndex;
|
||||
|
||||
RTC_LOG(LS_VERBOSE) << "the input mixer device is now open";
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool AudioMixerManagerLinuxPulse::SpeakerIsInitialized() const {
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__;
|
||||
|
||||
return (_paOutputDeviceIndex != -1);
|
||||
}
|
||||
|
||||
bool AudioMixerManagerLinuxPulse::MicrophoneIsInitialized() const {
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
RTC_DLOG(LS_INFO) << __FUNCTION__;
|
||||
|
||||
return (_paInputDeviceIndex != -1);
|
||||
}
|
||||
|
||||
int32_t AudioMixerManagerLinuxPulse::SetSpeakerVolume(uint32_t volume) {
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
RTC_LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::SetSpeakerVolume(volume="
|
||||
<< volume << ")";
|
||||
|
||||
if (_paOutputDeviceIndex == -1) {
|
||||
RTC_LOG(LS_WARNING) << "output device index has not been set";
|
||||
return -1;
|
||||
}
|
||||
|
||||
bool setFailed(false);
|
||||
|
||||
if (_paPlayStream &&
|
||||
(LATE(pa_stream_get_state)(_paPlayStream) != PA_STREAM_UNCONNECTED)) {
|
||||
// We can only really set the volume if we have a connected stream
|
||||
AutoPulseLock auto_lock(_paMainloop);
|
||||
|
||||
// Get the number of channels from the sample specification
|
||||
const pa_sample_spec* spec = LATE(pa_stream_get_sample_spec)(_paPlayStream);
|
||||
if (!spec) {
|
||||
RTC_LOG(LS_ERROR) << "could not get sample specification";
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Set the same volume for all channels
|
||||
pa_cvolume cVolumes;
|
||||
LATE(pa_cvolume_set)(&cVolumes, spec->channels, volume);
|
||||
|
||||
pa_operation* paOperation = NULL;
|
||||
paOperation = LATE(pa_context_set_sink_input_volume)(
|
||||
_paContext, LATE(pa_stream_get_index)(_paPlayStream), &cVolumes,
|
||||
PaSetVolumeCallback, NULL);
|
||||
if (!paOperation) {
|
||||
setFailed = true;
|
||||
}
|
||||
|
||||
// Don't need to wait for the completion
|
||||
LATE(pa_operation_unref)(paOperation);
|
||||
} else {
|
||||
// We have not created a stream or it's not connected to the sink
|
||||
// Save the volume to be set at connection
|
||||
_paSpeakerVolume = volume;
|
||||
}
|
||||
|
||||
if (setFailed) {
|
||||
RTC_LOG(LS_WARNING) << "could not set speaker volume, error="
|
||||
<< LATE(pa_context_errno)(_paContext);
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioMixerManagerLinuxPulse::SpeakerVolume(uint32_t& volume) const {
|
||||
if (_paOutputDeviceIndex == -1) {
|
||||
RTC_LOG(LS_WARNING) << "output device index has not been set";
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (_paPlayStream &&
|
||||
(LATE(pa_stream_get_state)(_paPlayStream) != PA_STREAM_UNCONNECTED)) {
|
||||
// We can only get the volume if we have a connected stream
|
||||
if (!GetSinkInputInfo())
|
||||
return -1;
|
||||
|
||||
AutoPulseLock auto_lock(_paMainloop);
|
||||
volume = static_cast<uint32_t>(_paVolume);
|
||||
} else {
|
||||
AutoPulseLock auto_lock(_paMainloop);
|
||||
volume = _paSpeakerVolume;
|
||||
}
|
||||
|
||||
RTC_LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::SpeakerVolume() => vol="
|
||||
<< volume;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioMixerManagerLinuxPulse::MaxSpeakerVolume(
|
||||
uint32_t& maxVolume) const {
|
||||
if (_paOutputDeviceIndex == -1) {
|
||||
RTC_LOG(LS_WARNING) << "output device index has not been set";
|
||||
return -1;
|
||||
}
|
||||
|
||||
// PA_VOLUME_NORM corresponds to 100% (0db)
|
||||
// but PA allows up to 150 db amplification
|
||||
maxVolume = static_cast<uint32_t>(PA_VOLUME_NORM);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioMixerManagerLinuxPulse::MinSpeakerVolume(
|
||||
uint32_t& minVolume) const {
|
||||
if (_paOutputDeviceIndex == -1) {
|
||||
RTC_LOG(LS_WARNING) << "output device index has not been set";
|
||||
return -1;
|
||||
}
|
||||
|
||||
minVolume = static_cast<uint32_t>(PA_VOLUME_MUTED);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioMixerManagerLinuxPulse::SpeakerVolumeIsAvailable(bool& available) {
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
if (_paOutputDeviceIndex == -1) {
|
||||
RTC_LOG(LS_WARNING) << "output device index has not been set";
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Always available in Pulse Audio
|
||||
available = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioMixerManagerLinuxPulse::SpeakerMuteIsAvailable(bool& available) {
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
if (_paOutputDeviceIndex == -1) {
|
||||
RTC_LOG(LS_WARNING) << "output device index has not been set";
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Always available in Pulse Audio
|
||||
available = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioMixerManagerLinuxPulse::SetSpeakerMute(bool enable) {
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
RTC_LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::SetSpeakerMute(enable="
|
||||
<< enable << ")";
|
||||
|
||||
if (_paOutputDeviceIndex == -1) {
|
||||
RTC_LOG(LS_WARNING) << "output device index has not been set";
|
||||
return -1;
|
||||
}
|
||||
|
||||
bool setFailed(false);
|
||||
|
||||
if (_paPlayStream &&
|
||||
(LATE(pa_stream_get_state)(_paPlayStream) != PA_STREAM_UNCONNECTED)) {
|
||||
// We can only really mute if we have a connected stream
|
||||
AutoPulseLock auto_lock(_paMainloop);
|
||||
|
||||
pa_operation* paOperation = NULL;
|
||||
paOperation = LATE(pa_context_set_sink_input_mute)(
|
||||
_paContext, LATE(pa_stream_get_index)(_paPlayStream), (int)enable,
|
||||
PaSetVolumeCallback, NULL);
|
||||
if (!paOperation) {
|
||||
setFailed = true;
|
||||
}
|
||||
|
||||
// Don't need to wait for the completion
|
||||
LATE(pa_operation_unref)(paOperation);
|
||||
} else {
|
||||
// We have not created a stream or it's not connected to the sink
|
||||
// Save the mute status to be set at connection
|
||||
_paSpeakerMute = enable;
|
||||
}
|
||||
|
||||
if (setFailed) {
|
||||
RTC_LOG(LS_WARNING) << "could not mute speaker, error="
|
||||
<< LATE(pa_context_errno)(_paContext);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioMixerManagerLinuxPulse::SpeakerMute(bool& enabled) const {
|
||||
if (_paOutputDeviceIndex == -1) {
|
||||
RTC_LOG(LS_WARNING) << "output device index has not been set";
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (_paPlayStream &&
|
||||
(LATE(pa_stream_get_state)(_paPlayStream) != PA_STREAM_UNCONNECTED)) {
|
||||
// We can only get the mute status if we have a connected stream
|
||||
if (!GetSinkInputInfo())
|
||||
return -1;
|
||||
|
||||
enabled = static_cast<bool>(_paMute);
|
||||
} else {
|
||||
enabled = _paSpeakerMute;
|
||||
}
|
||||
RTC_LOG(LS_VERBOSE)
|
||||
<< "AudioMixerManagerLinuxPulse::SpeakerMute() => enabled=" << enabled;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioMixerManagerLinuxPulse::StereoPlayoutIsAvailable(bool& available) {
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
if (_paOutputDeviceIndex == -1) {
|
||||
RTC_LOG(LS_WARNING) << "output device index has not been set";
|
||||
return -1;
|
||||
}
|
||||
|
||||
uint32_t deviceIndex = (uint32_t)_paOutputDeviceIndex;
|
||||
|
||||
{
|
||||
AutoPulseLock auto_lock(_paMainloop);
|
||||
|
||||
// Get the actual stream device index if we have a connected stream
|
||||
// The device used by the stream can be changed
|
||||
// during the call
|
||||
if (_paPlayStream &&
|
||||
(LATE(pa_stream_get_state)(_paPlayStream) != PA_STREAM_UNCONNECTED)) {
|
||||
deviceIndex = LATE(pa_stream_get_device_index)(_paPlayStream);
|
||||
}
|
||||
}
|
||||
|
||||
if (!GetSinkInfoByIndex(deviceIndex))
|
||||
return -1;
|
||||
|
||||
available = static_cast<bool>(_paChannels == 2);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioMixerManagerLinuxPulse::StereoRecordingIsAvailable(
|
||||
bool& available) {
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
if (_paInputDeviceIndex == -1) {
|
||||
RTC_LOG(LS_WARNING) << "input device index has not been set";
|
||||
return -1;
|
||||
}
|
||||
|
||||
uint32_t deviceIndex = (uint32_t)_paInputDeviceIndex;
|
||||
|
||||
AutoPulseLock auto_lock(_paMainloop);
|
||||
|
||||
// Get the actual stream device index if we have a connected stream
|
||||
// The device used by the stream can be changed
|
||||
// during the call
|
||||
if (_paRecStream &&
|
||||
(LATE(pa_stream_get_state)(_paRecStream) != PA_STREAM_UNCONNECTED)) {
|
||||
deviceIndex = LATE(pa_stream_get_device_index)(_paRecStream);
|
||||
}
|
||||
|
||||
pa_operation* paOperation = NULL;
|
||||
|
||||
// Get info for this source
|
||||
// We want to know if the actual device can record in stereo
|
||||
paOperation = LATE(pa_context_get_source_info_by_index)(
|
||||
_paContext, deviceIndex, PaSourceInfoCallback, (void*)this);
|
||||
|
||||
WaitForOperationCompletion(paOperation);
|
||||
|
||||
available = static_cast<bool>(_paChannels == 2);
|
||||
|
||||
RTC_LOG(LS_VERBOSE)
|
||||
<< "AudioMixerManagerLinuxPulse::StereoRecordingIsAvailable()"
|
||||
" => available="
|
||||
<< available;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioMixerManagerLinuxPulse::MicrophoneMuteIsAvailable(
|
||||
bool& available) {
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
if (_paInputDeviceIndex == -1) {
|
||||
RTC_LOG(LS_WARNING) << "input device index has not been set";
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Always available in Pulse Audio
|
||||
available = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioMixerManagerLinuxPulse::SetMicrophoneMute(bool enable) {
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
RTC_LOG(LS_VERBOSE)
|
||||
<< "AudioMixerManagerLinuxPulse::SetMicrophoneMute(enable=" << enable
|
||||
<< ")";
|
||||
|
||||
if (_paInputDeviceIndex == -1) {
|
||||
RTC_LOG(LS_WARNING) << "input device index has not been set";
|
||||
return -1;
|
||||
}
|
||||
|
||||
bool setFailed(false);
|
||||
pa_operation* paOperation = NULL;
|
||||
|
||||
uint32_t deviceIndex = (uint32_t)_paInputDeviceIndex;
|
||||
|
||||
AutoPulseLock auto_lock(_paMainloop);
|
||||
|
||||
// Get the actual stream device index if we have a connected stream
|
||||
// The device used by the stream can be changed
|
||||
// during the call
|
||||
if (_paRecStream &&
|
||||
(LATE(pa_stream_get_state)(_paRecStream) != PA_STREAM_UNCONNECTED)) {
|
||||
deviceIndex = LATE(pa_stream_get_device_index)(_paRecStream);
|
||||
}
|
||||
|
||||
// Set mute switch for the source
|
||||
paOperation = LATE(pa_context_set_source_mute_by_index)(
|
||||
_paContext, deviceIndex, enable, PaSetVolumeCallback, NULL);
|
||||
|
||||
if (!paOperation) {
|
||||
setFailed = true;
|
||||
}
|
||||
|
||||
// Don't need to wait for this to complete.
|
||||
LATE(pa_operation_unref)(paOperation);
|
||||
|
||||
if (setFailed) {
|
||||
RTC_LOG(LS_WARNING) << "could not mute microphone, error="
|
||||
<< LATE(pa_context_errno)(_paContext);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioMixerManagerLinuxPulse::MicrophoneMute(bool& enabled) const {
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
if (_paInputDeviceIndex == -1) {
|
||||
RTC_LOG(LS_WARNING) << "input device index has not been set";
|
||||
return -1;
|
||||
}
|
||||
|
||||
uint32_t deviceIndex = (uint32_t)_paInputDeviceIndex;
|
||||
|
||||
{
|
||||
AutoPulseLock auto_lock(_paMainloop);
|
||||
// Get the actual stream device index if we have a connected stream
|
||||
// The device used by the stream can be changed
|
||||
// during the call
|
||||
if (_paRecStream &&
|
||||
(LATE(pa_stream_get_state)(_paRecStream) != PA_STREAM_UNCONNECTED)) {
|
||||
deviceIndex = LATE(pa_stream_get_device_index)(_paRecStream);
|
||||
}
|
||||
}
|
||||
|
||||
if (!GetSourceInfoByIndex(deviceIndex))
|
||||
return -1;
|
||||
|
||||
enabled = static_cast<bool>(_paMute);
|
||||
|
||||
RTC_LOG(LS_VERBOSE)
|
||||
<< "AudioMixerManagerLinuxPulse::MicrophoneMute() => enabled=" << enabled;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioMixerManagerLinuxPulse::MicrophoneVolumeIsAvailable(
|
||||
bool& available) {
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
if (_paInputDeviceIndex == -1) {
|
||||
RTC_LOG(LS_WARNING) << "input device index has not been set";
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Always available in Pulse Audio
|
||||
available = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioMixerManagerLinuxPulse::SetMicrophoneVolume(uint32_t volume) {
|
||||
RTC_LOG(LS_VERBOSE)
|
||||
<< "AudioMixerManagerLinuxPulse::SetMicrophoneVolume(volume=" << volume
|
||||
<< ")";
|
||||
|
||||
if (_paInputDeviceIndex == -1) {
|
||||
RTC_LOG(LS_WARNING) << "input device index has not been set";
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Unlike output streams, input streams have no concept of a stream
|
||||
// volume, only a device volume. So we have to change the volume of the
|
||||
// device itself.
|
||||
|
||||
// The device may have a different number of channels than the stream and
|
||||
// their mapping may be different, so we don't want to use the channel
|
||||
// count from our sample spec. We could use PA_CHANNELS_MAX to cover our
|
||||
// bases, and the server allows that even if the device's channel count
|
||||
// is lower, but some buggy PA clients don't like that (the pavucontrol
|
||||
// on Hardy dies in an assert if the channel count is different). So
|
||||
// instead we look up the actual number of channels that the device has.
|
||||
AutoPulseLock auto_lock(_paMainloop);
|
||||
uint32_t deviceIndex = (uint32_t)_paInputDeviceIndex;
|
||||
|
||||
// Get the actual stream device index if we have a connected stream
|
||||
// The device used by the stream can be changed
|
||||
// during the call
|
||||
if (_paRecStream &&
|
||||
(LATE(pa_stream_get_state)(_paRecStream) != PA_STREAM_UNCONNECTED)) {
|
||||
deviceIndex = LATE(pa_stream_get_device_index)(_paRecStream);
|
||||
}
|
||||
|
||||
bool setFailed(false);
|
||||
pa_operation* paOperation = NULL;
|
||||
|
||||
// Get the number of channels for this source
|
||||
paOperation = LATE(pa_context_get_source_info_by_index)(
|
||||
_paContext, deviceIndex, PaSourceInfoCallback, (void*)this);
|
||||
|
||||
WaitForOperationCompletion(paOperation);
|
||||
|
||||
uint8_t channels = _paChannels;
|
||||
pa_cvolume cVolumes;
|
||||
LATE(pa_cvolume_set)(&cVolumes, channels, volume);
|
||||
|
||||
// Set the volume for the source
|
||||
paOperation = LATE(pa_context_set_source_volume_by_index)(
|
||||
_paContext, deviceIndex, &cVolumes, PaSetVolumeCallback, NULL);
|
||||
|
||||
if (!paOperation) {
|
||||
setFailed = true;
|
||||
}
|
||||
|
||||
// Don't need to wait for this to complete.
|
||||
LATE(pa_operation_unref)(paOperation);
|
||||
|
||||
if (setFailed) {
|
||||
RTC_LOG(LS_WARNING) << "could not set microphone volume, error="
|
||||
<< LATE(pa_context_errno)(_paContext);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioMixerManagerLinuxPulse::MicrophoneVolume(uint32_t& volume) const {
|
||||
if (_paInputDeviceIndex == -1) {
|
||||
RTC_LOG(LS_WARNING) << "input device index has not been set";
|
||||
return -1;
|
||||
}
|
||||
|
||||
uint32_t deviceIndex = (uint32_t)_paInputDeviceIndex;
|
||||
|
||||
{
|
||||
AutoPulseLock auto_lock(_paMainloop);
|
||||
// Get the actual stream device index if we have a connected stream.
|
||||
// The device used by the stream can be changed during the call.
|
||||
if (_paRecStream &&
|
||||
(LATE(pa_stream_get_state)(_paRecStream) != PA_STREAM_UNCONNECTED)) {
|
||||
deviceIndex = LATE(pa_stream_get_device_index)(_paRecStream);
|
||||
}
|
||||
}
|
||||
|
||||
if (!GetSourceInfoByIndex(deviceIndex))
|
||||
return -1;
|
||||
|
||||
{
|
||||
AutoPulseLock auto_lock(_paMainloop);
|
||||
volume = static_cast<uint32_t>(_paVolume);
|
||||
}
|
||||
|
||||
RTC_LOG(LS_VERBOSE)
|
||||
<< "AudioMixerManagerLinuxPulse::MicrophoneVolume() => vol=" << volume;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioMixerManagerLinuxPulse::MaxMicrophoneVolume(
|
||||
uint32_t& maxVolume) const {
|
||||
if (_paInputDeviceIndex == -1) {
|
||||
RTC_LOG(LS_WARNING) << "input device index has not been set";
|
||||
return -1;
|
||||
}
|
||||
|
||||
// PA_VOLUME_NORM corresponds to 100% (0db)
|
||||
// PA allows up to 150 db amplification (PA_VOLUME_MAX)
|
||||
// but that doesn't work well for all sound cards
|
||||
maxVolume = static_cast<uint32_t>(PA_VOLUME_NORM);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AudioMixerManagerLinuxPulse::MinMicrophoneVolume(
|
||||
uint32_t& minVolume) const {
|
||||
if (_paInputDeviceIndex == -1) {
|
||||
RTC_LOG(LS_WARNING) << "input device index has not been set";
|
||||
return -1;
|
||||
}
|
||||
|
||||
minVolume = static_cast<uint32_t>(PA_VOLUME_MUTED);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
// ===========================================================================
|
||||
// Private Methods
|
||||
// ===========================================================================
|
||||
|
||||
void AudioMixerManagerLinuxPulse::PaSinkInfoCallback(pa_context* /*c*/,
|
||||
const pa_sink_info* i,
|
||||
int eol,
|
||||
void* pThis) {
|
||||
static_cast<AudioMixerManagerLinuxPulse*>(pThis)->PaSinkInfoCallbackHandler(
|
||||
i, eol);
|
||||
}
|
||||
|
||||
void AudioMixerManagerLinuxPulse::PaSinkInputInfoCallback(
|
||||
pa_context* /*c*/,
|
||||
const pa_sink_input_info* i,
|
||||
int eol,
|
||||
void* pThis) {
|
||||
static_cast<AudioMixerManagerLinuxPulse*>(pThis)
|
||||
->PaSinkInputInfoCallbackHandler(i, eol);
|
||||
}
|
||||
|
||||
void AudioMixerManagerLinuxPulse::PaSourceInfoCallback(pa_context* /*c*/,
|
||||
const pa_source_info* i,
|
||||
int eol,
|
||||
void* pThis) {
|
||||
static_cast<AudioMixerManagerLinuxPulse*>(pThis)->PaSourceInfoCallbackHandler(
|
||||
i, eol);
|
||||
}
|
||||
|
||||
void AudioMixerManagerLinuxPulse::PaSetVolumeCallback(pa_context* c,
|
||||
int success,
|
||||
void* /*pThis*/) {
|
||||
if (!success) {
|
||||
RTC_LOG(LS_ERROR) << "failed to set volume";
|
||||
}
|
||||
}
|
||||
|
||||
void AudioMixerManagerLinuxPulse::PaSinkInfoCallbackHandler(
|
||||
const pa_sink_info* i,
|
||||
int eol) {
|
||||
if (eol) {
|
||||
// Signal that we are done
|
||||
LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
_paChannels = i->channel_map.channels; // Get number of channels
|
||||
pa_volume_t paVolume = PA_VOLUME_MUTED; // Minimum possible value.
|
||||
for (int j = 0; j < _paChannels; ++j) {
|
||||
if (paVolume < i->volume.values[j]) {
|
||||
paVolume = i->volume.values[j];
|
||||
}
|
||||
}
|
||||
_paVolume = paVolume; // get the max volume for any channel
|
||||
_paMute = i->mute; // get mute status
|
||||
|
||||
// supported since PA 0.9.15
|
||||
//_paVolSteps = i->n_volume_steps; // get the number of volume steps
|
||||
// default value is PA_VOLUME_NORM+1
|
||||
_paVolSteps = PA_VOLUME_NORM + 1;
|
||||
}
|
||||
|
||||
void AudioMixerManagerLinuxPulse::PaSinkInputInfoCallbackHandler(
|
||||
const pa_sink_input_info* i,
|
||||
int eol) {
|
||||
if (eol) {
|
||||
// Signal that we are done
|
||||
LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
_paChannels = i->channel_map.channels; // Get number of channels
|
||||
pa_volume_t paVolume = PA_VOLUME_MUTED; // Minimum possible value.
|
||||
for (int j = 0; j < _paChannels; ++j) {
|
||||
if (paVolume < i->volume.values[j]) {
|
||||
paVolume = i->volume.values[j];
|
||||
}
|
||||
}
|
||||
_paVolume = paVolume; // Get the max volume for any channel
|
||||
_paMute = i->mute; // Get mute status
|
||||
}
|
||||
|
||||
void AudioMixerManagerLinuxPulse::PaSourceInfoCallbackHandler(
|
||||
const pa_source_info* i,
|
||||
int eol) {
|
||||
if (eol) {
|
||||
// Signal that we are done
|
||||
LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
_paChannels = i->channel_map.channels; // Get number of channels
|
||||
pa_volume_t paVolume = PA_VOLUME_MUTED; // Minimum possible value.
|
||||
for (int j = 0; j < _paChannels; ++j) {
|
||||
if (paVolume < i->volume.values[j]) {
|
||||
paVolume = i->volume.values[j];
|
||||
}
|
||||
}
|
||||
_paVolume = paVolume; // Get the max volume for any channel
|
||||
_paMute = i->mute; // Get mute status
|
||||
|
||||
// supported since PA 0.9.15
|
||||
//_paVolSteps = i->n_volume_steps; // Get the number of volume steps
|
||||
// default value is PA_VOLUME_NORM+1
|
||||
_paVolSteps = PA_VOLUME_NORM + 1;
|
||||
}
|
||||
|
||||
void AudioMixerManagerLinuxPulse::WaitForOperationCompletion(
|
||||
pa_operation* paOperation) const {
|
||||
while (LATE(pa_operation_get_state)(paOperation) == PA_OPERATION_RUNNING) {
|
||||
LATE(pa_threaded_mainloop_wait)(_paMainloop);
|
||||
}
|
||||
|
||||
LATE(pa_operation_unref)(paOperation);
|
||||
}
|
||||
|
||||
bool AudioMixerManagerLinuxPulse::GetSinkInputInfo() const {
|
||||
pa_operation* paOperation = NULL;
|
||||
|
||||
AutoPulseLock auto_lock(_paMainloop);
|
||||
// Get info for this stream (sink input).
|
||||
paOperation = LATE(pa_context_get_sink_input_info)(
|
||||
_paContext, LATE(pa_stream_get_index)(_paPlayStream),
|
||||
PaSinkInputInfoCallback, (void*)this);
|
||||
|
||||
WaitForOperationCompletion(paOperation);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool AudioMixerManagerLinuxPulse::GetSinkInfoByIndex(int device_index) const {
|
||||
pa_operation* paOperation = NULL;
|
||||
|
||||
AutoPulseLock auto_lock(_paMainloop);
|
||||
paOperation = LATE(pa_context_get_sink_info_by_index)(
|
||||
_paContext, device_index, PaSinkInfoCallback, (void*)this);
|
||||
|
||||
WaitForOperationCompletion(paOperation);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool AudioMixerManagerLinuxPulse::GetSourceInfoByIndex(int device_index) const {
|
||||
pa_operation* paOperation = NULL;
|
||||
|
||||
AutoPulseLock auto_lock(_paMainloop);
|
||||
paOperation = LATE(pa_context_get_source_info_by_index)(
|
||||
_paContext, device_index, PaSourceInfoCallback, (void*)this);
|
||||
|
||||
WaitForOperationCompletion(paOperation);
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,114 @@
|
|||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef AUDIO_DEVICE_AUDIO_MIXER_MANAGER_PULSE_LINUX_H_
|
||||
#define AUDIO_DEVICE_AUDIO_MIXER_MANAGER_PULSE_LINUX_H_
|
||||
|
||||
#include <pulse/pulseaudio.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#include "api/sequence_checker.h"
|
||||
|
||||
#ifndef UINT32_MAX
|
||||
#define UINT32_MAX ((uint32_t)-1)
|
||||
#endif
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class AudioMixerManagerLinuxPulse {
|
||||
public:
|
||||
int32_t SetPlayStream(pa_stream* playStream);
|
||||
int32_t SetRecStream(pa_stream* recStream);
|
||||
int32_t OpenSpeaker(uint16_t deviceIndex);
|
||||
int32_t OpenMicrophone(uint16_t deviceIndex);
|
||||
int32_t SetSpeakerVolume(uint32_t volume);
|
||||
int32_t SpeakerVolume(uint32_t& volume) const;
|
||||
int32_t MaxSpeakerVolume(uint32_t& maxVolume) const;
|
||||
int32_t MinSpeakerVolume(uint32_t& minVolume) const;
|
||||
int32_t SpeakerVolumeIsAvailable(bool& available);
|
||||
int32_t SpeakerMuteIsAvailable(bool& available);
|
||||
int32_t SetSpeakerMute(bool enable);
|
||||
int32_t StereoPlayoutIsAvailable(bool& available);
|
||||
int32_t StereoRecordingIsAvailable(bool& available);
|
||||
int32_t SpeakerMute(bool& enabled) const;
|
||||
int32_t MicrophoneMuteIsAvailable(bool& available);
|
||||
int32_t SetMicrophoneMute(bool enable);
|
||||
int32_t MicrophoneMute(bool& enabled) const;
|
||||
int32_t MicrophoneVolumeIsAvailable(bool& available);
|
||||
int32_t SetMicrophoneVolume(uint32_t volume);
|
||||
int32_t MicrophoneVolume(uint32_t& volume) const;
|
||||
int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const;
|
||||
int32_t MinMicrophoneVolume(uint32_t& minVolume) const;
|
||||
int32_t SetPulseAudioObjects(pa_threaded_mainloop* mainloop,
|
||||
pa_context* context);
|
||||
int32_t Close();
|
||||
int32_t CloseSpeaker();
|
||||
int32_t CloseMicrophone();
|
||||
bool SpeakerIsInitialized() const;
|
||||
bool MicrophoneIsInitialized() const;
|
||||
|
||||
public:
|
||||
AudioMixerManagerLinuxPulse();
|
||||
~AudioMixerManagerLinuxPulse();
|
||||
|
||||
private:
|
||||
static void PaSinkInfoCallback(pa_context* c,
|
||||
const pa_sink_info* i,
|
||||
int eol,
|
||||
void* pThis);
|
||||
static void PaSinkInputInfoCallback(pa_context* c,
|
||||
const pa_sink_input_info* i,
|
||||
int eol,
|
||||
void* pThis);
|
||||
static void PaSourceInfoCallback(pa_context* c,
|
||||
const pa_source_info* i,
|
||||
int eol,
|
||||
void* pThis);
|
||||
static void PaSetVolumeCallback(pa_context* /*c*/,
|
||||
int success,
|
||||
void* /*pThis*/);
|
||||
void PaSinkInfoCallbackHandler(const pa_sink_info* i, int eol);
|
||||
void PaSinkInputInfoCallbackHandler(const pa_sink_input_info* i, int eol);
|
||||
void PaSourceInfoCallbackHandler(const pa_source_info* i, int eol);
|
||||
|
||||
void WaitForOperationCompletion(pa_operation* paOperation) const;
|
||||
|
||||
bool GetSinkInputInfo() const;
|
||||
bool GetSinkInfoByIndex(int device_index) const;
|
||||
bool GetSourceInfoByIndex(int device_index) const;
|
||||
|
||||
private:
|
||||
int16_t _paOutputDeviceIndex;
|
||||
int16_t _paInputDeviceIndex;
|
||||
|
||||
pa_stream* _paPlayStream;
|
||||
pa_stream* _paRecStream;
|
||||
|
||||
pa_threaded_mainloop* _paMainloop;
|
||||
pa_context* _paContext;
|
||||
|
||||
mutable uint32_t _paVolume;
|
||||
mutable uint32_t _paMute;
|
||||
mutable uint32_t _paVolSteps;
|
||||
bool _paSpeakerMute;
|
||||
mutable uint32_t _paSpeakerVolume;
|
||||
mutable uint8_t _paChannels;
|
||||
bool _paObjectsSet;
|
||||
|
||||
// Stores thread ID in constructor.
|
||||
// We can then use RTC_DCHECK_RUN_ON(&worker_thread_checker_) to ensure that
|
||||
// other methods are called from the same thread.
|
||||
// Currently only does RTC_DCHECK(thread_checker_.IsCurrent()).
|
||||
SequenceChecker thread_checker_;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // MODULES_AUDIO_DEVICE_MAIN_SOURCE_LINUX_AUDIO_MIXER_MANAGER_PULSE_LINUX_H_
|
||||
|
|
@ -0,0 +1,106 @@
|
|||
/*
|
||||
* Copyright (c) 2010 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "modules/audio_device/linux/latebindingsymboltable_linux.h"
|
||||
|
||||
#include "absl/strings/string_view.h"
|
||||
#include "rtc_base/logging.h"
|
||||
|
||||
#ifdef WEBRTC_LINUX
|
||||
#include <dlfcn.h>
|
||||
#endif
|
||||
|
||||
namespace webrtc {
|
||||
namespace adm_linux {
|
||||
|
||||
inline static const char* GetDllError() {
|
||||
#ifdef WEBRTC_LINUX
|
||||
char* err = dlerror();
|
||||
if (err) {
|
||||
return err;
|
||||
} else {
|
||||
return "No error";
|
||||
}
|
||||
#else
|
||||
#error Not implemented
|
||||
#endif
|
||||
}
|
||||
|
||||
DllHandle InternalLoadDll(absl::string_view dll_name) {
|
||||
#ifdef WEBRTC_LINUX
|
||||
DllHandle handle = dlopen(std::string(dll_name).c_str(), RTLD_NOW);
|
||||
#else
|
||||
#error Not implemented
|
||||
#endif
|
||||
if (handle == kInvalidDllHandle) {
|
||||
RTC_LOG(LS_WARNING) << "Can't load " << dll_name << " : " << GetDllError();
|
||||
}
|
||||
return handle;
|
||||
}
|
||||
|
||||
void InternalUnloadDll(DllHandle handle) {
|
||||
#ifdef WEBRTC_LINUX
|
||||
// TODO(pbos): Remove this dlclose() exclusion when leaks and suppressions from
|
||||
// here are gone (or AddressSanitizer can display them properly).
|
||||
//
|
||||
// Skip dlclose() on AddressSanitizer as leaks including this module in the
|
||||
// stack trace gets displayed as <unknown module> instead of the actual library
|
||||
// -> it can not be suppressed.
|
||||
// https://code.google.com/p/address-sanitizer/issues/detail?id=89
|
||||
#if !defined(ADDRESS_SANITIZER)
|
||||
if (dlclose(handle) != 0) {
|
||||
RTC_LOG(LS_ERROR) << GetDllError();
|
||||
}
|
||||
#endif // !defined(ADDRESS_SANITIZER)
|
||||
#else
|
||||
#error Not implemented
|
||||
#endif
|
||||
}
|
||||
|
||||
static bool LoadSymbol(DllHandle handle,
|
||||
absl::string_view symbol_name,
|
||||
void** symbol) {
|
||||
#ifdef WEBRTC_LINUX
|
||||
*symbol = dlsym(handle, std::string(symbol_name).c_str());
|
||||
char* err = dlerror();
|
||||
if (err) {
|
||||
RTC_LOG(LS_ERROR) << "Error loading symbol " << symbol_name << " : " << err;
|
||||
return false;
|
||||
} else if (!*symbol) {
|
||||
RTC_LOG(LS_ERROR) << "Symbol " << symbol_name << " is NULL";
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
#else
|
||||
#error Not implemented
|
||||
#endif
|
||||
}
|
||||
|
||||
// This routine MUST assign SOME value for every symbol, even if that value is
|
||||
// NULL, or else some symbols may be left with uninitialized data that the
|
||||
// caller may later interpret as a valid address.
|
||||
bool InternalLoadSymbols(DllHandle handle,
|
||||
int num_symbols,
|
||||
const char* const symbol_names[],
|
||||
void* symbols[]) {
|
||||
#ifdef WEBRTC_LINUX
|
||||
// Clear any old errors.
|
||||
dlerror();
|
||||
#endif
|
||||
for (int i = 0; i < num_symbols; ++i) {
|
||||
if (!LoadSymbol(handle, symbol_names[i], &symbols[i])) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace adm_linux
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,168 @@
|
|||
/*
|
||||
* Copyright (c) 2010 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef AUDIO_DEVICE_LATEBINDINGSYMBOLTABLE_LINUX_H_
|
||||
#define AUDIO_DEVICE_LATEBINDINGSYMBOLTABLE_LINUX_H_
|
||||
|
||||
#include <stddef.h> // for NULL
|
||||
#include <string.h>
|
||||
|
||||
#include "absl/strings/string_view.h"
|
||||
#include "rtc_base/checks.h"
|
||||
|
||||
// This file provides macros for creating "symbol table" classes to simplify the
|
||||
// dynamic loading of symbols from DLLs. Currently the implementation only
|
||||
// supports Linux and pure C symbols.
|
||||
// See talk/sound/pulseaudiosymboltable.(h|cc) for an example.
|
||||
|
||||
namespace webrtc {
|
||||
namespace adm_linux {
|
||||
|
||||
#ifdef WEBRTC_LINUX
|
||||
typedef void* DllHandle;
|
||||
|
||||
const DllHandle kInvalidDllHandle = NULL;
|
||||
#else
|
||||
#error Not implemented
|
||||
#endif
|
||||
|
||||
// These are helpers for use only by the class below.
|
||||
DllHandle InternalLoadDll(absl::string_view);
|
||||
|
||||
void InternalUnloadDll(DllHandle handle);
|
||||
|
||||
bool InternalLoadSymbols(DllHandle handle,
|
||||
int num_symbols,
|
||||
const char* const symbol_names[],
|
||||
void* symbols[]);
|
||||
|
||||
template <int SYMBOL_TABLE_SIZE,
|
||||
const char kDllName[],
|
||||
const char* const kSymbolNames[]>
|
||||
class LateBindingSymbolTable {
|
||||
public:
|
||||
LateBindingSymbolTable()
|
||||
: handle_(kInvalidDllHandle), undefined_symbols_(false) {
|
||||
memset(symbols_, 0, sizeof(symbols_));
|
||||
}
|
||||
|
||||
~LateBindingSymbolTable() { Unload(); }
|
||||
|
||||
LateBindingSymbolTable(const LateBindingSymbolTable&) = delete;
|
||||
LateBindingSymbolTable& operator=(LateBindingSymbolTable&) = delete;
|
||||
|
||||
static int NumSymbols() { return SYMBOL_TABLE_SIZE; }
|
||||
|
||||
// We do not use this, but we offer it for theoretical convenience.
|
||||
static const char* GetSymbolName(int index) {
|
||||
RTC_DCHECK_LT(index, NumSymbols());
|
||||
return kSymbolNames[index];
|
||||
}
|
||||
|
||||
bool IsLoaded() const { return handle_ != kInvalidDllHandle; }
|
||||
|
||||
// Loads the DLL and the symbol table. Returns true iff the DLL and symbol
|
||||
// table loaded successfully.
|
||||
bool Load() {
|
||||
if (IsLoaded()) {
|
||||
return true;
|
||||
}
|
||||
if (undefined_symbols_) {
|
||||
// We do not attempt to load again because repeated attempts are not
|
||||
// likely to succeed and DLL loading is costly.
|
||||
return false;
|
||||
}
|
||||
handle_ = InternalLoadDll(kDllName);
|
||||
if (!IsLoaded()) {
|
||||
return false;
|
||||
}
|
||||
if (!InternalLoadSymbols(handle_, NumSymbols(), kSymbolNames, symbols_)) {
|
||||
undefined_symbols_ = true;
|
||||
Unload();
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void Unload() {
|
||||
if (!IsLoaded()) {
|
||||
return;
|
||||
}
|
||||
InternalUnloadDll(handle_);
|
||||
handle_ = kInvalidDllHandle;
|
||||
memset(symbols_, 0, sizeof(symbols_));
|
||||
}
|
||||
|
||||
// Retrieves the given symbol. NOTE: Recommended to use LATESYM_GET below
|
||||
// instead of this.
|
||||
void* GetSymbol(int index) const {
|
||||
RTC_DCHECK(IsLoaded());
|
||||
RTC_DCHECK_LT(index, NumSymbols());
|
||||
return symbols_[index];
|
||||
}
|
||||
|
||||
private:
|
||||
DllHandle handle_;
|
||||
bool undefined_symbols_;
|
||||
void* symbols_[SYMBOL_TABLE_SIZE];
|
||||
};
|
||||
|
||||
// This macro must be invoked in a header to declare a symbol table class.
|
||||
#define LATE_BINDING_SYMBOL_TABLE_DECLARE_BEGIN(ClassName) enum {
|
||||
// This macro must be invoked in the header declaration once for each symbol
|
||||
// (recommended to use an X-Macro to avoid duplication).
|
||||
// This macro defines an enum with names built from the symbols, which
|
||||
// essentially creates a hash table in the compiler from symbol names to their
|
||||
// indices in the symbol table class.
|
||||
#define LATE_BINDING_SYMBOL_TABLE_DECLARE_ENTRY(ClassName, sym) \
|
||||
ClassName##_SYMBOL_TABLE_INDEX_##sym,
|
||||
|
||||
// This macro completes the header declaration.
|
||||
#define LATE_BINDING_SYMBOL_TABLE_DECLARE_END(ClassName) \
|
||||
ClassName##_SYMBOL_TABLE_SIZE \
|
||||
} \
|
||||
; \
|
||||
\
|
||||
extern const char ClassName##_kDllName[]; \
|
||||
extern const char* const \
|
||||
ClassName##_kSymbolNames[ClassName##_SYMBOL_TABLE_SIZE]; \
|
||||
\
|
||||
typedef ::webrtc::adm_linux::LateBindingSymbolTable< \
|
||||
ClassName##_SYMBOL_TABLE_SIZE, ClassName##_kDllName, \
|
||||
ClassName##_kSymbolNames> \
|
||||
ClassName;
|
||||
|
||||
// This macro must be invoked in a .cc file to define a previously-declared
|
||||
// symbol table class.
|
||||
#define LATE_BINDING_SYMBOL_TABLE_DEFINE_BEGIN(ClassName, dllName) \
|
||||
const char ClassName##_kDllName[] = dllName; \
|
||||
const char* const ClassName##_kSymbolNames[ClassName##_SYMBOL_TABLE_SIZE] = {
|
||||
// This macro must be invoked in the .cc definition once for each symbol
|
||||
// (recommended to use an X-Macro to avoid duplication).
|
||||
// This would have to use the mangled name if we were to ever support C++
|
||||
// symbols.
|
||||
#define LATE_BINDING_SYMBOL_TABLE_DEFINE_ENTRY(ClassName, sym) #sym,
|
||||
|
||||
#define LATE_BINDING_SYMBOL_TABLE_DEFINE_END(ClassName) \
|
||||
} \
|
||||
;
|
||||
|
||||
// Index of a given symbol in the given symbol table class.
|
||||
#define LATESYM_INDEXOF(ClassName, sym) (ClassName##_SYMBOL_TABLE_INDEX_##sym)
|
||||
|
||||
// Returns a reference to the given late-binded symbol, with the correct type.
|
||||
#define LATESYM_GET(ClassName, inst, sym) \
|
||||
(*reinterpret_cast<__typeof__(&sym)>( \
|
||||
(inst)->GetSymbol(LATESYM_INDEXOF(ClassName, sym))))
|
||||
|
||||
} // namespace adm_linux
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // ADM_LATEBINDINGSYMBOLTABLE_LINUX_H_
|
||||
|
|
@ -0,0 +1,41 @@
|
|||
/*
|
||||
* libjingle
|
||||
* Copyright 2004--2010, Google Inc.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice,
|
||||
* this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
* 3. The name of the author may not be used to endorse or promote products
|
||||
* derived from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
|
||||
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
|
||||
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
|
||||
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
|
||||
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
|
||||
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include "modules/audio_device/linux/pulseaudiosymboltable_linux.h"
|
||||
|
||||
namespace webrtc {
|
||||
namespace adm_linux_pulse {
|
||||
|
||||
LATE_BINDING_SYMBOL_TABLE_DEFINE_BEGIN(PulseAudioSymbolTable, "libpulse.so.0")
|
||||
#define X(sym) \
|
||||
LATE_BINDING_SYMBOL_TABLE_DEFINE_ENTRY(PulseAudioSymbolTable, sym)
|
||||
PULSE_AUDIO_SYMBOLS_LIST
|
||||
#undef X
|
||||
LATE_BINDING_SYMBOL_TABLE_DEFINE_END(PulseAudioSymbolTable)
|
||||
|
||||
} // namespace adm_linux_pulse
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,106 @@
|
|||
/*
|
||||
* libjingle
|
||||
* Copyright 2004--2010, Google Inc.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice,
|
||||
* this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
* 3. The name of the author may not be used to endorse or promote products
|
||||
* derived from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
|
||||
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
|
||||
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
|
||||
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
|
||||
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
|
||||
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef AUDIO_DEVICE_PULSEAUDIOSYMBOLTABLE_LINUX_H_
|
||||
#define AUDIO_DEVICE_PULSEAUDIOSYMBOLTABLE_LINUX_H_
|
||||
|
||||
#include "modules/audio_device/linux/latebindingsymboltable_linux.h"
|
||||
|
||||
namespace webrtc {
|
||||
namespace adm_linux_pulse {
|
||||
|
||||
// The PulseAudio symbols we need, as an X-Macro list.
|
||||
// This list must contain precisely every libpulse function that is used in
|
||||
// the ADM LINUX PULSE Device and Mixer classes
|
||||
#define PULSE_AUDIO_SYMBOLS_LIST \
|
||||
X(pa_bytes_per_second) \
|
||||
X(pa_context_connect) \
|
||||
X(pa_context_disconnect) \
|
||||
X(pa_context_errno) \
|
||||
X(pa_context_get_protocol_version) \
|
||||
X(pa_context_get_server_info) \
|
||||
X(pa_context_get_sink_info_list) \
|
||||
X(pa_context_get_sink_info_by_index) \
|
||||
X(pa_context_get_sink_info_by_name) \
|
||||
X(pa_context_get_sink_input_info) \
|
||||
X(pa_context_get_source_info_by_index) \
|
||||
X(pa_context_get_source_info_by_name) \
|
||||
X(pa_context_get_source_info_list) \
|
||||
X(pa_context_get_state) \
|
||||
X(pa_context_new) \
|
||||
X(pa_context_set_sink_input_volume) \
|
||||
X(pa_context_set_sink_input_mute) \
|
||||
X(pa_context_set_source_volume_by_index) \
|
||||
X(pa_context_set_source_mute_by_index) \
|
||||
X(pa_context_set_state_callback) \
|
||||
X(pa_context_unref) \
|
||||
X(pa_cvolume_set) \
|
||||
X(pa_operation_get_state) \
|
||||
X(pa_operation_unref) \
|
||||
X(pa_stream_connect_playback) \
|
||||
X(pa_stream_connect_record) \
|
||||
X(pa_stream_disconnect) \
|
||||
X(pa_stream_drop) \
|
||||
X(pa_stream_get_device_index) \
|
||||
X(pa_stream_get_index) \
|
||||
X(pa_stream_get_latency) \
|
||||
X(pa_stream_get_sample_spec) \
|
||||
X(pa_stream_get_state) \
|
||||
X(pa_stream_new) \
|
||||
X(pa_stream_peek) \
|
||||
X(pa_stream_readable_size) \
|
||||
X(pa_stream_set_buffer_attr) \
|
||||
X(pa_stream_set_overflow_callback) \
|
||||
X(pa_stream_set_read_callback) \
|
||||
X(pa_stream_set_state_callback) \
|
||||
X(pa_stream_set_underflow_callback) \
|
||||
X(pa_stream_set_write_callback) \
|
||||
X(pa_stream_unref) \
|
||||
X(pa_stream_writable_size) \
|
||||
X(pa_stream_write) \
|
||||
X(pa_strerror) \
|
||||
X(pa_threaded_mainloop_free) \
|
||||
X(pa_threaded_mainloop_get_api) \
|
||||
X(pa_threaded_mainloop_lock) \
|
||||
X(pa_threaded_mainloop_new) \
|
||||
X(pa_threaded_mainloop_signal) \
|
||||
X(pa_threaded_mainloop_start) \
|
||||
X(pa_threaded_mainloop_stop) \
|
||||
X(pa_threaded_mainloop_unlock) \
|
||||
X(pa_threaded_mainloop_wait)
|
||||
|
||||
LATE_BINDING_SYMBOL_TABLE_DECLARE_BEGIN(PulseAudioSymbolTable)
|
||||
#define X(sym) \
|
||||
LATE_BINDING_SYMBOL_TABLE_DECLARE_ENTRY(PulseAudioSymbolTable, sym)
|
||||
PULSE_AUDIO_SYMBOLS_LIST
|
||||
#undef X
|
||||
LATE_BINDING_SYMBOL_TABLE_DECLARE_END(PulseAudioSymbolTable)
|
||||
|
||||
} // namespace adm_linux_pulse
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // AUDIO_DEVICE_PULSEAUDIOSYMBOLTABLE_LINUX_H_
|
||||
|
|
@ -0,0 +1,38 @@
|
|||
/*
|
||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef MODULES_AUDIO_DEVICE_MOCK_AUDIO_DEVICE_BUFFER_H_
|
||||
#define MODULES_AUDIO_DEVICE_MOCK_AUDIO_DEVICE_BUFFER_H_
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "modules/audio_device/audio_device_buffer.h"
|
||||
#include "test/gmock.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class MockAudioDeviceBuffer : public AudioDeviceBuffer {
|
||||
public:
|
||||
using AudioDeviceBuffer::AudioDeviceBuffer;
|
||||
virtual ~MockAudioDeviceBuffer() {}
|
||||
MOCK_METHOD(int32_t, RequestPlayoutData, (size_t nSamples), (override));
|
||||
MOCK_METHOD(int32_t, GetPlayoutData, (void* audioBuffer), (override));
|
||||
MOCK_METHOD(int32_t,
|
||||
SetRecordedBuffer,
|
||||
(const void* audioBuffer,
|
||||
size_t nSamples,
|
||||
absl::optional<int64_t> capture_time_ns),
|
||||
(override));
|
||||
MOCK_METHOD(void, SetVQEData, (int playDelayMS, int recDelayMS), (override));
|
||||
MOCK_METHOD(int32_t, DeliverRecordedData, (), (override));
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // MODULES_AUDIO_DEVICE_MOCK_AUDIO_DEVICE_BUFFER_H_
|
||||
|
|
@ -0,0 +1,209 @@
|
|||
/*
|
||||
* Copyright (c) 2023 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
#include "modules/audio_device/test_audio_device_impl.h"
|
||||
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "api/array_view.h"
|
||||
#include "api/task_queue/task_queue_factory.h"
|
||||
#include "api/units/time_delta.h"
|
||||
#include "modules/audio_device/include/test_audio_device.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/synchronization/mutex.h"
|
||||
#include "rtc_base/task_utils/repeating_task.h"
|
||||
|
||||
namespace webrtc {
|
||||
namespace {
|
||||
|
||||
constexpr int kFrameLengthUs = 10000;
|
||||
|
||||
}
|
||||
|
||||
TestAudioDevice::TestAudioDevice(
|
||||
TaskQueueFactory* task_queue_factory,
|
||||
std::unique_ptr<TestAudioDeviceModule::Capturer> capturer,
|
||||
std::unique_ptr<TestAudioDeviceModule::Renderer> renderer,
|
||||
float speed)
|
||||
: task_queue_factory_(task_queue_factory),
|
||||
capturer_(std::move(capturer)),
|
||||
renderer_(std::move(renderer)),
|
||||
process_interval_us_(kFrameLengthUs / speed),
|
||||
audio_buffer_(nullptr),
|
||||
rendering_(false),
|
||||
capturing_(false) {
|
||||
auto good_sample_rate = [](int sr) {
|
||||
return sr == 8000 || sr == 16000 || sr == 32000 || sr == 44100 ||
|
||||
sr == 48000;
|
||||
};
|
||||
|
||||
if (renderer_) {
|
||||
const int sample_rate = renderer_->SamplingFrequency();
|
||||
playout_buffer_.resize(TestAudioDeviceModule::SamplesPerFrame(sample_rate) *
|
||||
renderer_->NumChannels(),
|
||||
0);
|
||||
RTC_CHECK(good_sample_rate(sample_rate));
|
||||
}
|
||||
if (capturer_) {
|
||||
RTC_CHECK(good_sample_rate(capturer_->SamplingFrequency()));
|
||||
}
|
||||
}
|
||||
|
||||
AudioDeviceGeneric::InitStatus TestAudioDevice::Init() {
|
||||
task_queue_ = task_queue_factory_->CreateTaskQueue(
|
||||
"TestAudioDeviceModuleImpl", TaskQueueFactory::Priority::NORMAL);
|
||||
|
||||
RepeatingTaskHandle::Start(task_queue_.get(), [this]() {
|
||||
ProcessAudio();
|
||||
return TimeDelta::Micros(process_interval_us_);
|
||||
});
|
||||
return InitStatus::OK;
|
||||
}
|
||||
|
||||
int32_t TestAudioDevice::PlayoutIsAvailable(bool& available) {
|
||||
MutexLock lock(&lock_);
|
||||
available = renderer_ != nullptr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t TestAudioDevice::InitPlayout() {
|
||||
MutexLock lock(&lock_);
|
||||
|
||||
if (rendering_) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (audio_buffer_ != nullptr && renderer_ != nullptr) {
|
||||
// Update webrtc audio buffer with the selected parameters
|
||||
audio_buffer_->SetPlayoutSampleRate(renderer_->SamplingFrequency());
|
||||
audio_buffer_->SetPlayoutChannels(renderer_->NumChannels());
|
||||
}
|
||||
rendering_initialized_ = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool TestAudioDevice::PlayoutIsInitialized() const {
|
||||
MutexLock lock(&lock_);
|
||||
return rendering_initialized_;
|
||||
}
|
||||
|
||||
int32_t TestAudioDevice::StartPlayout() {
|
||||
MutexLock lock(&lock_);
|
||||
RTC_CHECK(renderer_);
|
||||
rendering_ = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t TestAudioDevice::StopPlayout() {
|
||||
MutexLock lock(&lock_);
|
||||
rendering_ = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t TestAudioDevice::RecordingIsAvailable(bool& available) {
|
||||
MutexLock lock(&lock_);
|
||||
available = capturer_ != nullptr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t TestAudioDevice::InitRecording() {
|
||||
MutexLock lock(&lock_);
|
||||
|
||||
if (capturing_) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (audio_buffer_ != nullptr && capturer_ != nullptr) {
|
||||
// Update webrtc audio buffer with the selected parameters
|
||||
audio_buffer_->SetRecordingSampleRate(capturer_->SamplingFrequency());
|
||||
audio_buffer_->SetRecordingChannels(capturer_->NumChannels());
|
||||
}
|
||||
capturing_initialized_ = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool TestAudioDevice::RecordingIsInitialized() const {
|
||||
MutexLock lock(&lock_);
|
||||
return capturing_initialized_;
|
||||
}
|
||||
|
||||
int32_t TestAudioDevice::StartRecording() {
|
||||
MutexLock lock(&lock_);
|
||||
capturing_ = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t TestAudioDevice::StopRecording() {
|
||||
MutexLock lock(&lock_);
|
||||
capturing_ = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool TestAudioDevice::Playing() const {
|
||||
MutexLock lock(&lock_);
|
||||
return rendering_;
|
||||
}
|
||||
|
||||
bool TestAudioDevice::Recording() const {
|
||||
MutexLock lock(&lock_);
|
||||
return capturing_;
|
||||
}
|
||||
|
||||
void TestAudioDevice::ProcessAudio() {
|
||||
MutexLock lock(&lock_);
|
||||
if (audio_buffer_ == nullptr) {
|
||||
return;
|
||||
}
|
||||
if (capturing_ && capturer_ != nullptr) {
|
||||
// Capture 10ms of audio. 2 bytes per sample.
|
||||
const bool keep_capturing = capturer_->Capture(&recording_buffer_);
|
||||
if (recording_buffer_.size() > 0) {
|
||||
audio_buffer_->SetRecordedBuffer(
|
||||
recording_buffer_.data(),
|
||||
recording_buffer_.size() / capturer_->NumChannels(),
|
||||
absl::make_optional(rtc::TimeNanos()));
|
||||
audio_buffer_->DeliverRecordedData();
|
||||
}
|
||||
if (!keep_capturing) {
|
||||
capturing_ = false;
|
||||
}
|
||||
}
|
||||
if (rendering_) {
|
||||
const int sampling_frequency = renderer_->SamplingFrequency();
|
||||
int32_t samples_per_channel = audio_buffer_->RequestPlayoutData(
|
||||
TestAudioDeviceModule::SamplesPerFrame(sampling_frequency));
|
||||
audio_buffer_->GetPlayoutData(playout_buffer_.data());
|
||||
size_t samples_out = samples_per_channel * renderer_->NumChannels();
|
||||
RTC_CHECK_LE(samples_out, playout_buffer_.size());
|
||||
const bool keep_rendering = renderer_->Render(
|
||||
rtc::ArrayView<const int16_t>(playout_buffer_.data(), samples_out));
|
||||
if (!keep_rendering) {
|
||||
rendering_ = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void TestAudioDevice::AttachAudioBuffer(AudioDeviceBuffer* audio_buffer) {
|
||||
MutexLock lock(&lock_);
|
||||
RTC_DCHECK(audio_buffer || audio_buffer_);
|
||||
audio_buffer_ = audio_buffer;
|
||||
|
||||
if (renderer_ != nullptr) {
|
||||
audio_buffer_->SetPlayoutSampleRate(renderer_->SamplingFrequency());
|
||||
audio_buffer_->SetPlayoutChannels(renderer_->NumChannels());
|
||||
}
|
||||
if (capturer_ != nullptr) {
|
||||
audio_buffer_->SetRecordingSampleRate(capturer_->SamplingFrequency());
|
||||
audio_buffer_->SetRecordingChannels(capturer_->NumChannels());
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
|
@ -0,0 +1,198 @@
|
|||
/*
|
||||
* Copyright (c) 2023 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef MODULES_AUDIO_DEVICE_TEST_AUDIO_DEVICE_IMPL_H_
|
||||
#define MODULES_AUDIO_DEVICE_TEST_AUDIO_DEVICE_IMPL_H_
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "api/task_queue/task_queue_base.h"
|
||||
#include "api/task_queue/task_queue_factory.h"
|
||||
#include "modules/audio_device/audio_device_buffer.h"
|
||||
#include "modules/audio_device/audio_device_generic.h"
|
||||
#include "modules/audio_device/include/audio_device.h"
|
||||
#include "modules/audio_device/include/audio_device_defines.h"
|
||||
#include "modules/audio_device/include/test_audio_device.h"
|
||||
#include "rtc_base/buffer.h"
|
||||
#include "rtc_base/synchronization/mutex.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class TestAudioDevice : public AudioDeviceGeneric {
|
||||
public:
|
||||
// Creates a new TestAudioDevice. When capturing or playing, 10 ms audio
|
||||
// frames will be processed every 10ms / `speed`.
|
||||
// `capturer` is an object that produces audio data. Can be nullptr if this
|
||||
// device is never used for recording.
|
||||
// `renderer` is an object that receives audio data that would have been
|
||||
// played out. Can be nullptr if this device is never used for playing.
|
||||
TestAudioDevice(TaskQueueFactory* task_queue_factory,
|
||||
std::unique_ptr<TestAudioDeviceModule::Capturer> capturer,
|
||||
std::unique_ptr<TestAudioDeviceModule::Renderer> renderer,
|
||||
float speed = 1);
|
||||
TestAudioDevice(const TestAudioDevice&) = delete;
|
||||
TestAudioDevice& operator=(const TestAudioDevice&) = delete;
|
||||
~TestAudioDevice() override = default;
|
||||
|
||||
// Retrieve the currently utilized audio layer
|
||||
int32_t ActiveAudioLayer(
|
||||
AudioDeviceModule::AudioLayer& audioLayer) const override {
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Main initializaton and termination
|
||||
InitStatus Init() override;
|
||||
int32_t Terminate() override { return 0; }
|
||||
bool Initialized() const override { return true; }
|
||||
|
||||
// Device enumeration
|
||||
int16_t PlayoutDevices() override { return 0; }
|
||||
int16_t RecordingDevices() override { return 0; }
|
||||
int32_t PlayoutDeviceName(uint16_t index,
|
||||
char name[kAdmMaxDeviceNameSize],
|
||||
char guid[kAdmMaxGuidSize]) override {
|
||||
return 0;
|
||||
}
|
||||
int32_t RecordingDeviceName(uint16_t index,
|
||||
char name[kAdmMaxDeviceNameSize],
|
||||
char guid[kAdmMaxGuidSize]) override {
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Device selection
|
||||
int32_t SetPlayoutDevice(uint16_t index) override { return 0; }
|
||||
int32_t SetPlayoutDevice(
|
||||
AudioDeviceModule::WindowsDeviceType device) override {
|
||||
return 0;
|
||||
}
|
||||
int32_t SetRecordingDevice(uint16_t index) override { return 0; }
|
||||
int32_t SetRecordingDevice(
|
||||
AudioDeviceModule::WindowsDeviceType device) override {
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Audio transport initialization
|
||||
int32_t PlayoutIsAvailable(bool& available) override;
|
||||
int32_t InitPlayout() override;
|
||||
bool PlayoutIsInitialized() const override;
|
||||
int32_t RecordingIsAvailable(bool& available) override;
|
||||
int32_t InitRecording() override;
|
||||
bool RecordingIsInitialized() const override;
|
||||
|
||||
// Audio transport control
|
||||
int32_t StartPlayout() override;
|
||||
int32_t StopPlayout() override;
|
||||
bool Playing() const override;
|
||||
int32_t StartRecording() override;
|
||||
int32_t StopRecording() override;
|
||||
bool Recording() const override;
|
||||
|
||||
// Audio mixer initialization
|
||||
int32_t InitSpeaker() override { return 0; }
|
||||
bool SpeakerIsInitialized() const override { return true; }
|
||||
int32_t InitMicrophone() override { return 0; }
|
||||
bool MicrophoneIsInitialized() const override { return true; }
|
||||
|
||||
// Speaker volume controls
|
||||
int32_t SpeakerVolumeIsAvailable(bool& available) override { return 0; }
|
||||
int32_t SetSpeakerVolume(uint32_t volume) override { return 0; }
|
||||
int32_t SpeakerVolume(uint32_t& volume) const override { return 0; }
|
||||
int32_t MaxSpeakerVolume(uint32_t& maxVolume) const override { return 0; }
|
||||
int32_t MinSpeakerVolume(uint32_t& minVolume) const override { return 0; }
|
||||
|
||||
// Microphone volume controls
|
||||
int32_t MicrophoneVolumeIsAvailable(bool& available) override { return 0; }
|
||||
int32_t SetMicrophoneVolume(uint32_t volume) override { return 0; }
|
||||
int32_t MicrophoneVolume(uint32_t& volume) const override { return 0; }
|
||||
int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const override { return 0; }
|
||||
int32_t MinMicrophoneVolume(uint32_t& minVolume) const override { return 0; }
|
||||
|
||||
// Speaker mute control
|
||||
int32_t SpeakerMuteIsAvailable(bool& available) override { return 0; }
|
||||
int32_t SetSpeakerMute(bool enable) override { return 0; }
|
||||
int32_t SpeakerMute(bool& enabled) const override { return 0; }
|
||||
|
||||
// Microphone mute control
|
||||
int32_t MicrophoneMuteIsAvailable(bool& available) override { return 0; }
|
||||
int32_t SetMicrophoneMute(bool enable) override { return 0; }
|
||||
int32_t MicrophoneMute(bool& enabled) const override { return 0; }
|
||||
|
||||
// Stereo support
|
||||
int32_t StereoPlayoutIsAvailable(bool& available) override {
|
||||
available = false;
|
||||
return 0;
|
||||
}
|
||||
int32_t SetStereoPlayout(bool enable) override { return 0; }
|
||||
int32_t StereoPlayout(bool& enabled) const override { return 0; }
|
||||
int32_t StereoRecordingIsAvailable(bool& available) override {
|
||||
available = false;
|
||||
return 0;
|
||||
}
|
||||
int32_t SetStereoRecording(bool enable) override { return 0; }
|
||||
int32_t StereoRecording(bool& enabled) const override { return 0; }
|
||||
|
||||
// Delay information and control
|
||||
int32_t PlayoutDelay(uint16_t& delayMS) const override {
|
||||
delayMS = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Android only
|
||||
bool BuiltInAECIsAvailable() const override { return false; }
|
||||
bool BuiltInAGCIsAvailable() const override { return false; }
|
||||
bool BuiltInNSIsAvailable() const override { return false; }
|
||||
|
||||
// Windows Core Audio and Android only.
|
||||
int32_t EnableBuiltInAEC(bool enable) override { return -1; }
|
||||
int32_t EnableBuiltInAGC(bool enable) override { return -1; }
|
||||
int32_t EnableBuiltInNS(bool enable) override { return -1; }
|
||||
|
||||
// Play underrun count.
|
||||
int32_t GetPlayoutUnderrunCount() const override { return -1; }
|
||||
|
||||
// iOS only.
|
||||
// TODO(henrika): add Android support.
|
||||
#if defined(WEBRTC_IOS)
|
||||
int GetPlayoutAudioParameters(AudioParameters* params) const override {
|
||||
return -1;
|
||||
}
|
||||
int GetRecordAudioParameters(AudioParameters* params) const override {
|
||||
return -1;
|
||||
}
|
||||
#endif // WEBRTC_IOS
|
||||
|
||||
void AttachAudioBuffer(AudioDeviceBuffer* audio_buffer) override;
|
||||
|
||||
private:
|
||||
void ProcessAudio();
|
||||
|
||||
TaskQueueFactory* const task_queue_factory_;
|
||||
const std::unique_ptr<TestAudioDeviceModule::Capturer> capturer_
|
||||
RTC_GUARDED_BY(lock_);
|
||||
const std::unique_ptr<TestAudioDeviceModule::Renderer> renderer_
|
||||
RTC_GUARDED_BY(lock_);
|
||||
const int64_t process_interval_us_;
|
||||
|
||||
mutable Mutex lock_;
|
||||
AudioDeviceBuffer* audio_buffer_ RTC_GUARDED_BY(lock_) = nullptr;
|
||||
bool rendering_ RTC_GUARDED_BY(lock_) = false;
|
||||
bool capturing_ RTC_GUARDED_BY(lock_) = false;
|
||||
bool rendering_initialized_ RTC_GUARDED_BY(lock_) = false;
|
||||
bool capturing_initialized_ RTC_GUARDED_BY(lock_) = false;
|
||||
|
||||
std::vector<int16_t> playout_buffer_ RTC_GUARDED_BY(lock_);
|
||||
rtc::BufferT<int16_t> recording_buffer_ RTC_GUARDED_BY(lock_);
|
||||
std::unique_ptr<TaskQueueBase, TaskQueueDeleter> task_queue_;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // MODULES_AUDIO_DEVICE_TEST_AUDIO_DEVICE_IMPL_H_
|
||||
|
|
@ -0,0 +1,275 @@
|
|||
/*
|
||||
* Copyright (c) 2023 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
#include "modules/audio_device/test_audio_device_impl.h"
|
||||
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "api/task_queue/task_queue_factory.h"
|
||||
#include "api/units/time_delta.h"
|
||||
#include "api/units/timestamp.h"
|
||||
#include "modules/audio_device/audio_device_buffer.h"
|
||||
#include "modules/audio_device/audio_device_generic.h"
|
||||
#include "modules/audio_device/include/audio_device.h"
|
||||
#include "modules/audio_device/include/audio_device_defines.h"
|
||||
#include "modules/audio_device/include/test_audio_device.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/synchronization/mutex.h"
|
||||
#include "test/gmock.h"
|
||||
#include "test/gtest.h"
|
||||
#include "test/time_controller/simulated_time_controller.h"
|
||||
|
||||
namespace webrtc {
|
||||
namespace {
|
||||
|
||||
using ::testing::ElementsAre;
|
||||
|
||||
constexpr Timestamp kStartTime = Timestamp::Millis(10000);
|
||||
|
||||
class TestAudioTransport : public AudioTransport {
|
||||
public:
|
||||
enum class Mode { kPlaying, kRecording };
|
||||
|
||||
explicit TestAudioTransport(Mode mode) : mode_(mode) {}
|
||||
~TestAudioTransport() override = default;
|
||||
|
||||
int32_t RecordedDataIsAvailable(
|
||||
const void* audioSamples,
|
||||
size_t samples_per_channel,
|
||||
size_t bytes_per_sample,
|
||||
size_t number_of_channels,
|
||||
uint32_t samples_per_second,
|
||||
uint32_t total_delay_ms,
|
||||
int32_t clock_drift,
|
||||
uint32_t current_mic_level,
|
||||
bool key_pressed,
|
||||
uint32_t& new_mic_level,
|
||||
absl::optional<int64_t> estimated_capture_time_ns) override {
|
||||
new_mic_level = 1;
|
||||
|
||||
if (mode_ != Mode::kRecording) {
|
||||
EXPECT_TRUE(false) << "RecordedDataIsAvailable mustn't be called when "
|
||||
"mode isn't kRecording";
|
||||
return -1;
|
||||
}
|
||||
|
||||
MutexLock lock(&mutex_);
|
||||
samples_per_channel_.push_back(samples_per_channel);
|
||||
number_of_channels_.push_back(number_of_channels);
|
||||
bytes_per_sample_.push_back(bytes_per_sample);
|
||||
samples_per_second_.push_back(samples_per_second);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t NeedMorePlayData(size_t samples_per_channel,
|
||||
size_t bytes_per_sample,
|
||||
size_t number_of_channels,
|
||||
uint32_t samples_per_second,
|
||||
void* audio_samples,
|
||||
size_t& samples_out,
|
||||
int64_t* elapsed_time_ms,
|
||||
int64_t* ntp_time_ms) override {
|
||||
const size_t num_bytes = samples_per_channel * number_of_channels;
|
||||
std::memset(audio_samples, 1, num_bytes);
|
||||
samples_out = samples_per_channel * number_of_channels;
|
||||
*elapsed_time_ms = 0;
|
||||
*ntp_time_ms = 0;
|
||||
|
||||
if (mode_ != Mode::kPlaying) {
|
||||
EXPECT_TRUE(false)
|
||||
<< "NeedMorePlayData mustn't be called when mode isn't kPlaying";
|
||||
return -1;
|
||||
}
|
||||
|
||||
MutexLock lock(&mutex_);
|
||||
samples_per_channel_.push_back(samples_per_channel);
|
||||
number_of_channels_.push_back(number_of_channels);
|
||||
bytes_per_sample_.push_back(bytes_per_sample);
|
||||
samples_per_second_.push_back(samples_per_second);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t RecordedDataIsAvailable(const void* audio_samples,
|
||||
size_t samples_per_channel,
|
||||
size_t bytes_per_sample,
|
||||
size_t number_of_channels,
|
||||
uint32_t samples_per_second,
|
||||
uint32_t total_delay_ms,
|
||||
int32_t clockDrift,
|
||||
uint32_t current_mic_level,
|
||||
bool key_pressed,
|
||||
uint32_t& new_mic_level) override {
|
||||
RTC_CHECK(false) << "This methods should be never executed";
|
||||
}
|
||||
|
||||
void PullRenderData(int bits_per_sample,
|
||||
int sample_rate,
|
||||
size_t number_of_channels,
|
||||
size_t number_of_frames,
|
||||
void* audio_data,
|
||||
int64_t* elapsed_time_ms,
|
||||
int64_t* ntp_time_ms) override {
|
||||
RTC_CHECK(false) << "This methods should be never executed";
|
||||
}
|
||||
|
||||
std::vector<size_t> samples_per_channel() const {
|
||||
MutexLock lock(&mutex_);
|
||||
return samples_per_channel_;
|
||||
}
|
||||
std::vector<size_t> number_of_channels() const {
|
||||
MutexLock lock(&mutex_);
|
||||
return number_of_channels_;
|
||||
}
|
||||
std::vector<size_t> bytes_per_sample() const {
|
||||
MutexLock lock(&mutex_);
|
||||
return bytes_per_sample_;
|
||||
}
|
||||
std::vector<size_t> samples_per_second() const {
|
||||
MutexLock lock(&mutex_);
|
||||
return samples_per_second_;
|
||||
}
|
||||
|
||||
private:
|
||||
const Mode mode_;
|
||||
|
||||
mutable Mutex mutex_;
|
||||
std::vector<size_t> samples_per_channel_ RTC_GUARDED_BY(mutex_);
|
||||
std::vector<size_t> number_of_channels_ RTC_GUARDED_BY(mutex_);
|
||||
std::vector<size_t> bytes_per_sample_ RTC_GUARDED_BY(mutex_);
|
||||
std::vector<size_t> samples_per_second_ RTC_GUARDED_BY(mutex_);
|
||||
};
|
||||
|
||||
TEST(TestAudioDeviceTest, EnablingRecordingProducesAudio) {
|
||||
GlobalSimulatedTimeController time_controller(kStartTime);
|
||||
TestAudioTransport audio_transport(TestAudioTransport::Mode::kRecording);
|
||||
AudioDeviceBuffer audio_buffer(time_controller.GetTaskQueueFactory());
|
||||
ASSERT_EQ(audio_buffer.RegisterAudioCallback(&audio_transport), 0);
|
||||
std::unique_ptr<TestAudioDeviceModule::PulsedNoiseCapturer> capturer =
|
||||
TestAudioDeviceModule::CreatePulsedNoiseCapturer(
|
||||
/*max_amplitude=*/1000,
|
||||
/*sampling_frequency_in_hz=*/48000, /*num_channels=*/2);
|
||||
|
||||
TestAudioDevice audio_device(time_controller.GetTaskQueueFactory(),
|
||||
std::move(capturer),
|
||||
/*renderer=*/nullptr);
|
||||
ASSERT_EQ(audio_device.Init(), AudioDeviceGeneric::InitStatus::OK);
|
||||
audio_device.AttachAudioBuffer(&audio_buffer);
|
||||
|
||||
EXPECT_FALSE(audio_device.RecordingIsInitialized());
|
||||
ASSERT_EQ(audio_device.InitRecording(), 0);
|
||||
EXPECT_TRUE(audio_device.RecordingIsInitialized());
|
||||
audio_buffer.StartRecording();
|
||||
ASSERT_EQ(audio_device.StartRecording(), 0);
|
||||
time_controller.AdvanceTime(TimeDelta::Millis(10));
|
||||
ASSERT_TRUE(audio_device.Recording());
|
||||
time_controller.AdvanceTime(TimeDelta::Millis(10));
|
||||
ASSERT_EQ(audio_device.StopRecording(), 0);
|
||||
audio_buffer.StopRecording();
|
||||
|
||||
EXPECT_THAT(audio_transport.samples_per_channel(),
|
||||
ElementsAre(480, 480, 480));
|
||||
EXPECT_THAT(audio_transport.number_of_channels(), ElementsAre(2, 2, 2));
|
||||
EXPECT_THAT(audio_transport.bytes_per_sample(), ElementsAre(4, 4, 4));
|
||||
EXPECT_THAT(audio_transport.samples_per_second(),
|
||||
ElementsAre(48000, 48000, 48000));
|
||||
}
|
||||
|
||||
TEST(TestAudioDeviceTest, RecordingIsAvailableWhenCapturerIsSet) {
|
||||
GlobalSimulatedTimeController time_controller(kStartTime);
|
||||
std::unique_ptr<TestAudioDeviceModule::PulsedNoiseCapturer> capturer =
|
||||
TestAudioDeviceModule::CreatePulsedNoiseCapturer(
|
||||
/*max_amplitude=*/1000,
|
||||
/*sampling_frequency_in_hz=*/48000, /*num_channels=*/2);
|
||||
|
||||
TestAudioDevice audio_device(time_controller.GetTaskQueueFactory(),
|
||||
std::move(capturer),
|
||||
/*renderer=*/nullptr);
|
||||
ASSERT_EQ(audio_device.Init(), AudioDeviceGeneric::InitStatus::OK);
|
||||
|
||||
bool available;
|
||||
EXPECT_EQ(audio_device.RecordingIsAvailable(available), 0);
|
||||
EXPECT_TRUE(available);
|
||||
}
|
||||
|
||||
TEST(TestAudioDeviceTest, RecordingIsNotAvailableWhenCapturerIsNotSet) {
|
||||
GlobalSimulatedTimeController time_controller(kStartTime);
|
||||
TestAudioDevice audio_device(time_controller.GetTaskQueueFactory(),
|
||||
/*capturer=*/nullptr,
|
||||
/*renderer=*/nullptr);
|
||||
ASSERT_EQ(audio_device.Init(), AudioDeviceGeneric::InitStatus::OK);
|
||||
|
||||
bool available;
|
||||
EXPECT_EQ(audio_device.RecordingIsAvailable(available), 0);
|
||||
EXPECT_FALSE(available);
|
||||
}
|
||||
|
||||
TEST(TestAudioDeviceTest, EnablingPlayoutProducesAudio) {
|
||||
GlobalSimulatedTimeController time_controller(kStartTime);
|
||||
TestAudioTransport audio_transport(TestAudioTransport::Mode::kPlaying);
|
||||
AudioDeviceBuffer audio_buffer(time_controller.GetTaskQueueFactory());
|
||||
ASSERT_EQ(audio_buffer.RegisterAudioCallback(&audio_transport), 0);
|
||||
std::unique_ptr<TestAudioDeviceModule::Renderer> renderer =
|
||||
TestAudioDeviceModule::CreateDiscardRenderer(
|
||||
/*sampling_frequency_in_hz=*/48000, /*num_channels=*/2);
|
||||
|
||||
TestAudioDevice audio_device(time_controller.GetTaskQueueFactory(),
|
||||
/*capturer=*/nullptr, std::move(renderer));
|
||||
ASSERT_EQ(audio_device.Init(), AudioDeviceGeneric::InitStatus::OK);
|
||||
audio_device.AttachAudioBuffer(&audio_buffer);
|
||||
|
||||
EXPECT_FALSE(audio_device.PlayoutIsInitialized());
|
||||
ASSERT_EQ(audio_device.InitPlayout(), 0);
|
||||
EXPECT_TRUE(audio_device.PlayoutIsInitialized());
|
||||
audio_buffer.StartPlayout();
|
||||
ASSERT_EQ(audio_device.StartPlayout(), 0);
|
||||
time_controller.AdvanceTime(TimeDelta::Millis(10));
|
||||
ASSERT_TRUE(audio_device.Playing());
|
||||
time_controller.AdvanceTime(TimeDelta::Millis(10));
|
||||
ASSERT_EQ(audio_device.StopPlayout(), 0);
|
||||
audio_buffer.StopPlayout();
|
||||
|
||||
EXPECT_THAT(audio_transport.samples_per_channel(),
|
||||
ElementsAre(480, 480, 480));
|
||||
EXPECT_THAT(audio_transport.number_of_channels(), ElementsAre(2, 2, 2));
|
||||
EXPECT_THAT(audio_transport.bytes_per_sample(), ElementsAre(4, 4, 4));
|
||||
EXPECT_THAT(audio_transport.samples_per_second(),
|
||||
ElementsAre(48000, 48000, 48000));
|
||||
}
|
||||
|
||||
TEST(TestAudioDeviceTest, PlayoutIsAvailableWhenRendererIsSet) {
|
||||
GlobalSimulatedTimeController time_controller(kStartTime);
|
||||
std::unique_ptr<TestAudioDeviceModule::Renderer> renderer =
|
||||
TestAudioDeviceModule::CreateDiscardRenderer(
|
||||
/*sampling_frequency_in_hz=*/48000, /*num_channels=*/2);
|
||||
|
||||
TestAudioDevice audio_device(time_controller.GetTaskQueueFactory(),
|
||||
/*capturer=*/nullptr, std::move(renderer));
|
||||
ASSERT_EQ(audio_device.Init(), AudioDeviceGeneric::InitStatus::OK);
|
||||
|
||||
bool available;
|
||||
EXPECT_EQ(audio_device.PlayoutIsAvailable(available), 0);
|
||||
EXPECT_TRUE(available);
|
||||
}
|
||||
|
||||
TEST(TestAudioDeviceTest, PlayoutIsNotAvailableWhenRendererIsNotSet) {
|
||||
GlobalSimulatedTimeController time_controller(kStartTime);
|
||||
TestAudioDevice audio_device(time_controller.GetTaskQueueFactory(),
|
||||
/*capturer=*/nullptr,
|
||||
/*renderer=*/nullptr);
|
||||
ASSERT_EQ(audio_device.Init(), AudioDeviceGeneric::InitStatus::OK);
|
||||
|
||||
bool available;
|
||||
EXPECT_EQ(audio_device.PlayoutIsAvailable(available), 0);
|
||||
EXPECT_FALSE(available);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
} // namespace webrtc
|
||||
Loading…
Add table
Add a link
Reference in a new issue