Repo created

This commit is contained in:
Fr4nz D13trich 2025-11-22 14:04:28 +01:00
parent 81b91f4139
commit f8c34fa5ee
22732 changed files with 4815320 additions and 2 deletions

View file

@ -0,0 +1,3 @@
include_rules = [
"+logging/rtc_event_log",
]

View file

@ -0,0 +1,17 @@
# NetEQ RTP Play tool
## Testing of the command line arguments
The command line tool `neteq_rtpplay` can be tested by running `neteq_rtpplay_test.sh`, which is not use on try bots, but it can be used before submitting any CLs that may break the behavior of the command line arguments of `neteq_rtpplay`.
Run `neteq_rtpplay_test.sh` as follows from the `src/` folder:
```
src$ ./modules/audio_coding/neteq/tools/neteq_rtpplay_test.sh \
out/Default/neteq_rtpplay \
resources/audio_coding/neteq_opus.rtp \
resources/short_mixed_mono_48.pcm
```
You can replace the RTP and PCM files with any other compatible files.
If you get an error using the files indicated above, try running `gclient sync`.
Requirements: `awk` and `md5sum`.

View file

@ -0,0 +1,64 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_AUDIO_CHECKSUM_H_
#define MODULES_AUDIO_CODING_NETEQ_TOOLS_AUDIO_CHECKSUM_H_
#include <memory>
#include <string>
#include "modules/audio_coding/neteq/tools/audio_sink.h"
#include "rtc_base/buffer.h"
#include "rtc_base/message_digest.h"
#include "rtc_base/string_encode.h"
#include "rtc_base/system/arch.h"
namespace webrtc {
namespace test {
class AudioChecksum : public AudioSink {
public:
AudioChecksum()
: checksum_(rtc::MessageDigestFactory::Create(rtc::DIGEST_MD5)),
checksum_result_(checksum_->Size()),
finished_(false) {}
AudioChecksum(const AudioChecksum&) = delete;
AudioChecksum& operator=(const AudioChecksum&) = delete;
bool WriteArray(const int16_t* audio, size_t num_samples) override {
if (finished_)
return false;
#ifndef WEBRTC_ARCH_LITTLE_ENDIAN
#error "Big-endian gives a different checksum"
#endif
checksum_->Update(audio, num_samples * sizeof(*audio));
return true;
}
// Finalizes the computations, and returns the checksum.
std::string Finish() {
if (!finished_) {
finished_ = true;
checksum_->Finish(checksum_result_.data(), checksum_result_.size());
}
return rtc::hex_encode(checksum_result_);
}
private:
std::unique_ptr<rtc::MessageDigest> checksum_;
rtc::Buffer checksum_result_;
bool finished_;
};
} // namespace test
} // namespace webrtc
#endif // MODULES_AUDIO_CODING_NETEQ_TOOLS_AUDIO_CHECKSUM_H_

View file

@ -0,0 +1,61 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_coding/neteq/tools/audio_loop.h"
#include <stdio.h>
#include <string.h>
#include "absl/strings/string_view.h"
namespace webrtc {
namespace test {
bool AudioLoop::Init(absl::string_view file_name,
size_t max_loop_length_samples,
size_t block_length_samples) {
FILE* fp = fopen(std::string(file_name).c_str(), "rb");
if (!fp)
return false;
audio_array_.reset(
new int16_t[max_loop_length_samples + block_length_samples]);
size_t samples_read =
fread(audio_array_.get(), sizeof(int16_t), max_loop_length_samples, fp);
fclose(fp);
// Block length must be shorter than the loop length.
if (block_length_samples > samples_read)
return false;
// Add an extra block length of samples to the end of the array, starting
// over again from the beginning of the array. This is done to simplify
// the reading process when reading over the end of the loop.
memcpy(&audio_array_[samples_read], audio_array_.get(),
block_length_samples * sizeof(int16_t));
loop_length_samples_ = samples_read;
block_length_samples_ = block_length_samples;
next_index_ = 0;
return true;
}
rtc::ArrayView<const int16_t> AudioLoop::GetNextBlock() {
// Check that the AudioLoop is initialized.
if (block_length_samples_ == 0)
return rtc::ArrayView<const int16_t>();
const int16_t* output_ptr = &audio_array_[next_index_];
next_index_ = (next_index_ + block_length_samples_) % loop_length_samples_;
return rtc::ArrayView<const int16_t>(output_ptr, block_length_samples_);
}
} // namespace test
} // namespace webrtc

View file

@ -0,0 +1,57 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_AUDIO_LOOP_H_
#define MODULES_AUDIO_CODING_NETEQ_TOOLS_AUDIO_LOOP_H_
#include <memory>
#include <string>
#include "absl/strings/string_view.h"
#include "api/array_view.h"
namespace webrtc {
namespace test {
// Class serving as an infinite source of audio, realized by looping an audio
// clip.
class AudioLoop {
public:
AudioLoop()
: next_index_(0), loop_length_samples_(0), block_length_samples_(0) {}
virtual ~AudioLoop() {}
AudioLoop(const AudioLoop&) = delete;
AudioLoop& operator=(const AudioLoop&) = delete;
// Initializes the AudioLoop by reading from `file_name`. The loop will be no
// longer than `max_loop_length_samples`, if the length of the file is
// greater. Otherwise, the loop length is the same as the file length.
// The audio will be delivered in blocks of `block_length_samples`.
// Returns false if the initialization failed, otherwise true.
bool Init(absl::string_view file_name,
size_t max_loop_length_samples,
size_t block_length_samples);
// Returns a (pointer,size) pair for the next block of audio. The size is
// equal to the `block_length_samples` Init() argument.
rtc::ArrayView<const int16_t> GetNextBlock();
private:
size_t next_index_;
size_t loop_length_samples_;
size_t block_length_samples_;
std::unique_ptr<int16_t[]> audio_array_;
};
} // namespace test
} // namespace webrtc
#endif // MODULES_AUDIO_CODING_NETEQ_TOOLS_AUDIO_LOOP_H_

View file

@ -0,0 +1,26 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_coding/neteq/tools/audio_sink.h"
namespace webrtc {
namespace test {
bool AudioSinkFork::WriteArray(const int16_t* audio, size_t num_samples) {
return left_sink_->WriteArray(audio, num_samples) &&
right_sink_->WriteArray(audio, num_samples);
}
bool VoidAudioSink::WriteArray(const int16_t* audio, size_t num_samples) {
return true;
}
} // namespace test
} // namespace webrtc

View file

@ -0,0 +1,70 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_AUDIO_SINK_H_
#define MODULES_AUDIO_CODING_NETEQ_TOOLS_AUDIO_SINK_H_
#include "api/audio/audio_frame.h"
namespace webrtc {
namespace test {
// Interface class for an object receiving raw output audio from test
// applications.
class AudioSink {
public:
AudioSink() {}
virtual ~AudioSink() {}
AudioSink(const AudioSink&) = delete;
AudioSink& operator=(const AudioSink&) = delete;
// Writes `num_samples` from `audio` to the AudioSink. Returns true if
// successful, otherwise false.
virtual bool WriteArray(const int16_t* audio, size_t num_samples) = 0;
// Writes `audio_frame` to the AudioSink. Returns true if successful,
// otherwise false.
bool WriteAudioFrame(const AudioFrame& audio_frame) {
return WriteArray(audio_frame.data(), audio_frame.samples_per_channel_ *
audio_frame.num_channels_);
}
};
// Forks the output audio to two AudioSink objects.
class AudioSinkFork : public AudioSink {
public:
AudioSinkFork(AudioSink* left, AudioSink* right)
: left_sink_(left), right_sink_(right) {}
AudioSinkFork(const AudioSinkFork&) = delete;
AudioSinkFork& operator=(const AudioSinkFork&) = delete;
bool WriteArray(const int16_t* audio, size_t num_samples) override;
private:
AudioSink* left_sink_;
AudioSink* right_sink_;
};
// An AudioSink implementation that does nothing.
class VoidAudioSink : public AudioSink {
public:
VoidAudioSink() = default;
VoidAudioSink(const VoidAudioSink&) = delete;
VoidAudioSink& operator=(const VoidAudioSink&) = delete;
bool WriteArray(const int16_t* audio, size_t num_samples) override;
};
} // namespace test
} // namespace webrtc
#endif // MODULES_AUDIO_CODING_NETEQ_TOOLS_AUDIO_SINK_H_

View file

@ -0,0 +1,71 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_coding/neteq/tools/constant_pcm_packet_source.h"
#include <algorithm>
#include "modules/audio_coding/codecs/pcm16b/pcm16b.h"
#include "modules/audio_coding/neteq/tools/packet.h"
#include "rtc_base/checks.h"
namespace webrtc {
namespace test {
ConstantPcmPacketSource::ConstantPcmPacketSource(size_t payload_len_samples,
int16_t sample_value,
int sample_rate_hz,
int payload_type)
: payload_len_samples_(payload_len_samples),
packet_len_bytes_(2 * payload_len_samples_ + kHeaderLenBytes),
samples_per_ms_(sample_rate_hz / 1000),
next_arrival_time_ms_(0.0),
payload_type_(payload_type),
seq_number_(0),
timestamp_(0),
payload_ssrc_(0xABCD1234) {
size_t encoded_len = WebRtcPcm16b_Encode(&sample_value, 1, encoded_sample_);
RTC_CHECK_EQ(2U, encoded_len);
}
std::unique_ptr<Packet> ConstantPcmPacketSource::NextPacket() {
RTC_CHECK_GT(packet_len_bytes_, kHeaderLenBytes);
rtc::CopyOnWriteBuffer packet_buffer(packet_len_bytes_);
uint8_t* packet_memory = packet_buffer.MutableData();
// Fill the payload part of the packet memory with the pre-encoded value.
for (unsigned i = 0; i < 2 * payload_len_samples_; ++i)
packet_memory[kHeaderLenBytes + i] = encoded_sample_[i % 2];
WriteHeader(packet_memory);
// `packet` assumes ownership of `packet_memory`.
auto packet =
std::make_unique<Packet>(std::move(packet_buffer), next_arrival_time_ms_);
next_arrival_time_ms_ += payload_len_samples_ / samples_per_ms_;
return packet;
}
void ConstantPcmPacketSource::WriteHeader(uint8_t* packet_memory) {
packet_memory[0] = 0x80;
packet_memory[1] = static_cast<uint8_t>(payload_type_);
packet_memory[2] = seq_number_ >> 8;
packet_memory[3] = seq_number_ & 0xFF;
packet_memory[4] = timestamp_ >> 24;
packet_memory[5] = (timestamp_ >> 16) & 0xFF;
packet_memory[6] = (timestamp_ >> 8) & 0xFF;
packet_memory[7] = timestamp_ & 0xFF;
packet_memory[8] = payload_ssrc_ >> 24;
packet_memory[9] = (payload_ssrc_ >> 16) & 0xFF;
packet_memory[10] = (payload_ssrc_ >> 8) & 0xFF;
packet_memory[11] = payload_ssrc_ & 0xFF;
++seq_number_;
timestamp_ += static_cast<uint32_t>(payload_len_samples_);
}
} // namespace test
} // namespace webrtc

View file

@ -0,0 +1,55 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_CONSTANT_PCM_PACKET_SOURCE_H_
#define MODULES_AUDIO_CODING_NETEQ_TOOLS_CONSTANT_PCM_PACKET_SOURCE_H_
#include <stdio.h>
#include <string>
#include "modules/audio_coding/neteq/tools/packet_source.h"
namespace webrtc {
namespace test {
// This class implements a packet source that delivers PCM16b encoded packets
// with a constant sample value. The payload length, constant sample value,
// sample rate, and payload type are all set in the constructor.
class ConstantPcmPacketSource : public PacketSource {
public:
ConstantPcmPacketSource(size_t payload_len_samples,
int16_t sample_value,
int sample_rate_hz,
int payload_type);
ConstantPcmPacketSource(const ConstantPcmPacketSource&) = delete;
ConstantPcmPacketSource& operator=(const ConstantPcmPacketSource&) = delete;
std::unique_ptr<Packet> NextPacket() override;
private:
void WriteHeader(uint8_t* packet_memory);
const size_t kHeaderLenBytes = 12;
const size_t payload_len_samples_;
const size_t packet_len_bytes_;
uint8_t encoded_sample_[2];
const int samples_per_ms_;
double next_arrival_time_ms_;
const int payload_type_;
uint16_t seq_number_;
uint32_t timestamp_;
const uint32_t payload_ssrc_;
};
} // namespace test
} // namespace webrtc
#endif // MODULES_AUDIO_CODING_NETEQ_TOOLS_CONSTANT_PCM_PACKET_SOURCE_H_

View file

@ -0,0 +1,94 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_coding/neteq/tools/encode_neteq_input.h"
#include <utility>
#include "rtc_base/checks.h"
#include "rtc_base/numerics/safe_conversions.h"
namespace webrtc {
namespace test {
EncodeNetEqInput::EncodeNetEqInput(std::unique_ptr<Generator> generator,
std::unique_ptr<AudioEncoder> encoder,
int64_t input_duration_ms)
: generator_(std::move(generator)),
encoder_(std::move(encoder)),
input_duration_ms_(input_duration_ms) {
CreatePacket();
}
EncodeNetEqInput::~EncodeNetEqInput() = default;
absl::optional<int64_t> EncodeNetEqInput::NextPacketTime() const {
RTC_DCHECK(packet_data_);
return static_cast<int64_t>(packet_data_->time_ms);
}
absl::optional<int64_t> EncodeNetEqInput::NextOutputEventTime() const {
return next_output_event_ms_;
}
std::unique_ptr<NetEqInput::PacketData> EncodeNetEqInput::PopPacket() {
RTC_DCHECK(packet_data_);
// Grab the packet to return...
std::unique_ptr<PacketData> packet_to_return = std::move(packet_data_);
// ... and line up the next packet for future use.
CreatePacket();
return packet_to_return;
}
void EncodeNetEqInput::AdvanceOutputEvent() {
next_output_event_ms_ += kOutputPeriodMs;
}
bool EncodeNetEqInput::ended() const {
return next_output_event_ms_ > input_duration_ms_;
}
absl::optional<RTPHeader> EncodeNetEqInput::NextHeader() const {
RTC_DCHECK(packet_data_);
return packet_data_->header;
}
void EncodeNetEqInput::CreatePacket() {
// Create a new PacketData object.
RTC_DCHECK(!packet_data_);
packet_data_.reset(new NetEqInput::PacketData);
RTC_DCHECK_EQ(packet_data_->payload.size(), 0);
// Loop until we get a packet.
AudioEncoder::EncodedInfo info;
RTC_DCHECK(!info.send_even_if_empty);
int num_blocks = 0;
while (packet_data_->payload.size() == 0 && !info.send_even_if_empty) {
const size_t num_samples = rtc::CheckedDivExact(
static_cast<int>(encoder_->SampleRateHz() * kOutputPeriodMs), 1000);
info = encoder_->Encode(rtp_timestamp_, generator_->Generate(num_samples),
&packet_data_->payload);
rtp_timestamp_ += rtc::dchecked_cast<uint32_t>(
num_samples * encoder_->RtpTimestampRateHz() /
encoder_->SampleRateHz());
++num_blocks;
}
packet_data_->header.timestamp = info.encoded_timestamp;
packet_data_->header.payloadType = info.payload_type;
packet_data_->header.sequenceNumber = sequence_number_++;
packet_data_->time_ms = next_packet_time_ms_;
next_packet_time_ms_ += num_blocks * kOutputPeriodMs;
}
} // namespace test
} // namespace webrtc

View file

@ -0,0 +1,75 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_ENCODE_NETEQ_INPUT_H_
#define MODULES_AUDIO_CODING_NETEQ_TOOLS_ENCODE_NETEQ_INPUT_H_
#include <memory>
#include "api/audio_codecs/audio_encoder.h"
#include "modules/audio_coding/neteq/tools/neteq_input.h"
namespace webrtc {
namespace test {
// This class provides a NetEqInput that takes audio from a generator object and
// encodes it using a given audio encoder.
class EncodeNetEqInput : public NetEqInput {
public:
// Generator class, to be provided to the EncodeNetEqInput constructor.
class Generator {
public:
virtual ~Generator() = default;
// Returns the next num_samples values from the signal generator.
virtual rtc::ArrayView<const int16_t> Generate(size_t num_samples) = 0;
};
// The source will end after the given input duration.
EncodeNetEqInput(std::unique_ptr<Generator> generator,
std::unique_ptr<AudioEncoder> encoder,
int64_t input_duration_ms);
~EncodeNetEqInput() override;
absl::optional<int64_t> NextPacketTime() const override;
absl::optional<int64_t> NextOutputEventTime() const override;
absl::optional<SetMinimumDelayInfo> NextSetMinimumDelayInfo() const override {
return absl::nullopt;
}
std::unique_ptr<PacketData> PopPacket() override;
void AdvanceOutputEvent() override;
void AdvanceSetMinimumDelay() override {}
bool ended() const override;
absl::optional<RTPHeader> NextHeader() const override;
private:
static constexpr int64_t kOutputPeriodMs = 10;
void CreatePacket();
std::unique_ptr<Generator> generator_;
std::unique_ptr<AudioEncoder> encoder_;
std::unique_ptr<PacketData> packet_data_;
uint32_t rtp_timestamp_ = 0;
int16_t sequence_number_ = 0;
int64_t next_packet_time_ms_ = 0;
int64_t next_output_event_ms_ = 0;
const int64_t input_duration_ms_;
};
} // namespace test
} // namespace webrtc
#endif // MODULES_AUDIO_CODING_NETEQ_TOOLS_ENCODE_NETEQ_INPUT_H_

View file

@ -0,0 +1,122 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_coding/neteq/tools/fake_decode_from_file.h"
#include "modules/rtp_rtcp/source/byte_io.h"
#include "rtc_base/checks.h"
#include "rtc_base/numerics/safe_conversions.h"
namespace webrtc {
namespace test {
namespace {
class FakeEncodedFrame : public AudioDecoder::EncodedAudioFrame {
public:
FakeEncodedFrame(FakeDecodeFromFile* decoder,
uint32_t timestamp,
size_t duration,
bool is_dtx)
: decoder_(decoder),
timestamp_(timestamp),
duration_(duration),
is_dtx_(is_dtx) {}
size_t Duration() const override { return duration_; }
absl::optional<DecodeResult> Decode(
rtc::ArrayView<int16_t> decoded) const override {
if (is_dtx_) {
std::fill_n(decoded.data(), duration_, 0);
return DecodeResult{duration_, AudioDecoder::kComfortNoise};
}
decoder_->ReadFromFile(timestamp_, duration_, decoded.data());
return DecodeResult{Duration(), AudioDecoder::kSpeech};
}
bool IsDtxPacket() const override { return is_dtx_; }
private:
FakeDecodeFromFile* const decoder_;
const uint32_t timestamp_;
const size_t duration_;
const bool is_dtx_;
};
} // namespace
void FakeDecodeFromFile::ReadFromFile(uint32_t timestamp,
size_t samples,
int16_t* destination) {
if (next_timestamp_from_input_ && timestamp != *next_timestamp_from_input_) {
// A gap in the timestamp sequence is detected. Skip the same number of
// samples from the file.
uint32_t jump = timestamp - *next_timestamp_from_input_;
RTC_CHECK(input_->Seek(jump));
}
next_timestamp_from_input_ = timestamp + samples;
RTC_CHECK(input_->Read(static_cast<size_t>(samples), destination));
if (stereo_) {
InputAudioFile::DuplicateInterleaved(destination, samples, 2, destination);
}
}
int FakeDecodeFromFile::DecodeInternal(const uint8_t* encoded,
size_t encoded_len,
int sample_rate_hz,
int16_t* decoded,
SpeechType* speech_type) {
// This call is only used to produce codec-internal comfort noise.
RTC_DCHECK_EQ(sample_rate_hz, SampleRateHz());
RTC_DCHECK_EQ(encoded_len, 0);
RTC_DCHECK(!encoded); // NetEq always sends nullptr in this case.
const int samples_to_decode = rtc::CheckedDivExact(SampleRateHz(), 100);
const int total_samples_to_decode = samples_to_decode * (stereo_ ? 2 : 1);
std::fill_n(decoded, total_samples_to_decode, 0);
*speech_type = kComfortNoise;
return rtc::dchecked_cast<int>(total_samples_to_decode);
}
void FakeDecodeFromFile::PrepareEncoded(uint32_t timestamp,
size_t samples,
size_t original_payload_size_bytes,
rtc::ArrayView<uint8_t> encoded) {
RTC_CHECK_GE(encoded.size(), 12);
ByteWriter<uint32_t>::WriteLittleEndian(&encoded[0], timestamp);
ByteWriter<uint32_t>::WriteLittleEndian(&encoded[4],
rtc::checked_cast<uint32_t>(samples));
ByteWriter<uint32_t>::WriteLittleEndian(
&encoded[8], rtc::checked_cast<uint32_t>(original_payload_size_bytes));
}
std::vector<AudioDecoder::ParseResult> FakeDecodeFromFile::ParsePayload(
rtc::Buffer&& payload,
uint32_t timestamp) {
RTC_CHECK_GE(payload.size(), 12);
// Parse payload encoded in PrepareEncoded.
RTC_CHECK_EQ(timestamp, ByteReader<uint32_t>::ReadLittleEndian(&payload[0]));
size_t samples = ByteReader<uint32_t>::ReadLittleEndian(&payload[4]);
size_t original_payload_size_bytes =
ByteReader<uint32_t>::ReadLittleEndian(&payload[8]);
bool opus_dtx = original_payload_size_bytes <= 2;
std::vector<ParseResult> results;
results.emplace_back(
timestamp, 0,
std::make_unique<FakeEncodedFrame>(this, timestamp, samples, opus_dtx));
return results;
}
} // namespace test
} // namespace webrtc

View file

@ -0,0 +1,77 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_FAKE_DECODE_FROM_FILE_H_
#define MODULES_AUDIO_CODING_NETEQ_TOOLS_FAKE_DECODE_FROM_FILE_H_
#include <memory>
#include "absl/types/optional.h"
#include "api/array_view.h"
#include "api/audio_codecs/audio_decoder.h"
#include "modules/audio_coding/neteq/tools/input_audio_file.h"
namespace webrtc {
namespace test {
// Provides an AudioDecoder implementation that delivers audio data from a file.
// The "encoded" input should contain information about what RTP timestamp the
// encoding represents, and how many samples the decoder should produce for that
// encoding. A helper method PrepareEncoded is provided to prepare such
// encodings. If packets are missing, as determined from the timestamps, the
// file reading will skip forward to match the loss.
class FakeDecodeFromFile : public AudioDecoder {
public:
FakeDecodeFromFile(std::unique_ptr<InputAudioFile> input,
int sample_rate_hz,
bool stereo)
: input_(std::move(input)),
sample_rate_hz_(sample_rate_hz),
stereo_(stereo) {}
~FakeDecodeFromFile() = default;
std::vector<ParseResult> ParsePayload(rtc::Buffer&& payload,
uint32_t timestamp) override;
void Reset() override {}
int SampleRateHz() const override { return sample_rate_hz_; }
size_t Channels() const override { return stereo_ ? 2 : 1; }
int DecodeInternal(const uint8_t* encoded,
size_t encoded_len,
int sample_rate_hz,
int16_t* decoded,
SpeechType* speech_type) override;
// Reads `samples` from the input file and writes the results to
// `destination`. Location in file is determined by `timestamp`.
void ReadFromFile(uint32_t timestamp, size_t samples, int16_t* destination);
// Helper method. Writes `timestamp`, `samples` and
// `original_payload_size_bytes` to `encoded` in a format that the
// FakeDecodeFromFile decoder will understand. `encoded` must be at least 12
// bytes long.
static void PrepareEncoded(uint32_t timestamp,
size_t samples,
size_t original_payload_size_bytes,
rtc::ArrayView<uint8_t> encoded);
private:
std::unique_ptr<InputAudioFile> input_;
absl::optional<uint32_t> next_timestamp_from_input_;
const int sample_rate_hz_;
const bool stereo_;
};
} // namespace test
} // namespace webrtc
#endif // MODULES_AUDIO_CODING_NETEQ_TOOLS_FAKE_DECODE_FROM_FILE_H_

View file

@ -0,0 +1,88 @@
/*
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_coding/neteq/tools/initial_packet_inserter_neteq_input.h"
#include <limits>
#include <memory>
#include <utility>
#include "rtc_base/checks.h"
namespace webrtc {
namespace test {
InitialPacketInserterNetEqInput::InitialPacketInserterNetEqInput(
std::unique_ptr<NetEqInput> source,
int number_of_initial_packets,
int sample_rate_hz)
: source_(std::move(source)),
packets_to_insert_(number_of_initial_packets),
sample_rate_hz_(sample_rate_hz) {}
absl::optional<int64_t> InitialPacketInserterNetEqInput::NextPacketTime()
const {
return source_->NextPacketTime();
}
absl::optional<int64_t> InitialPacketInserterNetEqInput::NextOutputEventTime()
const {
return source_->NextOutputEventTime();
}
absl::optional<NetEqInput::SetMinimumDelayInfo>
InitialPacketInserterNetEqInput::NextSetMinimumDelayInfo() const {
return source_->NextSetMinimumDelayInfo();
}
std::unique_ptr<InitialPacketInserterNetEqInput::PacketData>
InitialPacketInserterNetEqInput::PopPacket() {
if (!first_packet_) {
first_packet_ = source_->PopPacket();
if (!first_packet_) {
// The source has no packets, so we should not insert any dummy packets.
packets_to_insert_ = 0;
}
}
if (packets_to_insert_ > 0) {
RTC_CHECK(first_packet_);
auto dummy_packet = std::unique_ptr<PacketData>(new PacketData());
dummy_packet->header = first_packet_->header;
dummy_packet->payload = rtc::Buffer(first_packet_->payload.data(),
first_packet_->payload.size());
dummy_packet->time_ms = first_packet_->time_ms;
dummy_packet->header.sequenceNumber -= packets_to_insert_;
// This assumes 20ms per packet.
dummy_packet->header.timestamp -=
20 * sample_rate_hz_ * packets_to_insert_ / 1000;
packets_to_insert_--;
return dummy_packet;
}
return source_->PopPacket();
}
void InitialPacketInserterNetEqInput::AdvanceSetMinimumDelay() {
source_->AdvanceSetMinimumDelay();
}
void InitialPacketInserterNetEqInput::AdvanceOutputEvent() {
source_->AdvanceOutputEvent();
}
bool InitialPacketInserterNetEqInput::ended() const {
return source_->ended();
}
absl::optional<RTPHeader> InitialPacketInserterNetEqInput::NextHeader() const {
return source_->NextHeader();
}
} // namespace test
} // namespace webrtc

View file

@ -0,0 +1,48 @@
/*
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_INITIAL_PACKET_INSERTER_NETEQ_INPUT_H_
#define MODULES_AUDIO_CODING_NETEQ_TOOLS_INITIAL_PACKET_INSERTER_NETEQ_INPUT_H_
#include <map>
#include <memory>
#include <string>
#include "modules/audio_coding/neteq/tools/neteq_input.h"
namespace webrtc {
namespace test {
// Wrapper class that can insert a number of packets at the start of the
// simulation.
class InitialPacketInserterNetEqInput final : public NetEqInput {
public:
InitialPacketInserterNetEqInput(std::unique_ptr<NetEqInput> source,
int number_of_initial_packets,
int sample_rate_hz);
absl::optional<int64_t> NextPacketTime() const override;
absl::optional<int64_t> NextOutputEventTime() const override;
absl::optional<SetMinimumDelayInfo> NextSetMinimumDelayInfo() const override;
std::unique_ptr<PacketData> PopPacket() override;
void AdvanceOutputEvent() override;
void AdvanceSetMinimumDelay() override;
bool ended() const override;
absl::optional<RTPHeader> NextHeader() const override;
private:
const std::unique_ptr<NetEqInput> source_;
int packets_to_insert_;
const int sample_rate_hz_;
std::unique_ptr<PacketData> first_packet_;
};
} // namespace test
} // namespace webrtc
#endif // MODULES_AUDIO_CODING_NETEQ_TOOLS_INITIAL_PACKET_INSERTER_NETEQ_INPUT_H_

View file

@ -0,0 +1,96 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_coding/neteq/tools/input_audio_file.h"
#include "absl/strings/string_view.h"
#include "rtc_base/checks.h"
namespace webrtc {
namespace test {
InputAudioFile::InputAudioFile(absl::string_view file_name, bool loop_at_end)
: loop_at_end_(loop_at_end) {
fp_ = fopen(std::string(file_name).c_str(), "rb");
RTC_DCHECK(fp_) << file_name << " could not be opened.";
}
InputAudioFile::~InputAudioFile() {
RTC_DCHECK(fp_);
fclose(fp_);
}
bool InputAudioFile::Read(size_t samples, int16_t* destination) {
if (!fp_) {
return false;
}
size_t samples_read = fread(destination, sizeof(int16_t), samples, fp_);
if (samples_read < samples) {
if (!loop_at_end_) {
return false;
}
// Rewind and read the missing samples.
rewind(fp_);
size_t missing_samples = samples - samples_read;
if (fread(destination + samples_read, sizeof(int16_t), missing_samples,
fp_) < missing_samples) {
// Could not read enough even after rewinding the file.
return false;
}
}
return true;
}
bool InputAudioFile::Seek(int samples) {
if (!fp_) {
return false;
}
// Find file boundaries.
const long current_pos = ftell(fp_);
RTC_CHECK_NE(EOF, current_pos)
<< "Error returned when getting file position.";
RTC_CHECK_EQ(0, fseek(fp_, 0, SEEK_END)); // Move to end of file.
const long file_size = ftell(fp_);
RTC_CHECK_NE(EOF, file_size) << "Error returned when getting file position.";
// Find new position.
long new_pos = current_pos + sizeof(int16_t) * samples; // Samples to bytes.
if (loop_at_end_) {
new_pos = new_pos % file_size; // Wrap around the end of the file.
if (new_pos < 0) {
// For negative values of new_pos, newpos % file_size will also be
// negative. To get the correct result it's needed to add file_size.
new_pos += file_size;
}
} else {
new_pos = new_pos > file_size ? file_size : new_pos; // Don't loop.
}
RTC_CHECK_GE(new_pos, 0)
<< "Trying to move to before the beginning of the file";
// Move to new position relative to the beginning of the file.
RTC_CHECK_EQ(0, fseek(fp_, new_pos, SEEK_SET));
return true;
}
void InputAudioFile::DuplicateInterleaved(const int16_t* source,
size_t samples,
size_t channels,
int16_t* destination) {
// Start from the end of `source` and `destination`, and work towards the
// beginning. This is to allow in-place interleaving of the same array (i.e.,
// `source` and `destination` are the same array).
for (int i = static_cast<int>(samples - 1); i >= 0; --i) {
for (int j = static_cast<int>(channels - 1); j >= 0; --j) {
destination[i * channels + j] = source[i];
}
}
}
} // namespace test
} // namespace webrtc

View file

@ -0,0 +1,62 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_INPUT_AUDIO_FILE_H_
#define MODULES_AUDIO_CODING_NETEQ_TOOLS_INPUT_AUDIO_FILE_H_
#include <stdio.h>
#include <string>
#include "absl/strings/string_view.h"
namespace webrtc {
namespace test {
// Class for handling a looping input audio file.
class InputAudioFile {
public:
explicit InputAudioFile(absl::string_view file_name, bool loop_at_end = true);
virtual ~InputAudioFile();
InputAudioFile(const InputAudioFile&) = delete;
InputAudioFile& operator=(const InputAudioFile&) = delete;
// Reads `samples` elements from source file to `destination`. Returns true
// if the read was successful, otherwise false. If the file end is reached,
// the file is rewound and reading continues from the beginning.
// The output `destination` must have the capacity to hold `samples` elements.
virtual bool Read(size_t samples, int16_t* destination);
// Fast-forwards (`samples` > 0) or -backwards (`samples` < 0) the file by the
// indicated number of samples. Just like Read(), Seek() starts over at the
// beginning of the file if the end is reached. However, seeking backwards
// past the beginning of the file is not possible.
virtual bool Seek(int samples);
// Creates a multi-channel signal from a mono signal. Each sample is repeated
// `channels` times to create an interleaved multi-channel signal where all
// channels are identical. The output `destination` must have the capacity to
// hold samples * channels elements. Note that `source` and `destination` can
// be the same array (i.e., point to the same address).
static void DuplicateInterleaved(const int16_t* source,
size_t samples,
size_t channels,
int16_t* destination);
private:
FILE* fp_;
const bool loop_at_end_;
};
} // namespace test
} // namespace webrtc
#endif // MODULES_AUDIO_CODING_NETEQ_TOOLS_INPUT_AUDIO_FILE_H_

View file

@ -0,0 +1,59 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
// Unit tests for test InputAudioFile class.
#include "modules/audio_coding/neteq/tools/input_audio_file.h"
#include "rtc_base/numerics/safe_conversions.h"
#include "test/gtest.h"
namespace webrtc {
namespace test {
TEST(TestInputAudioFile, DuplicateInterleaveSeparateSrcDst) {
static const size_t kSamples = 10;
static const size_t kChannels = 2;
int16_t input[kSamples];
for (size_t i = 0; i < kSamples; ++i) {
input[i] = rtc::checked_cast<int16_t>(i);
}
int16_t output[kSamples * kChannels];
InputAudioFile::DuplicateInterleaved(input, kSamples, kChannels, output);
// Verify output
int16_t* output_ptr = output;
for (size_t i = 0; i < kSamples; ++i) {
for (size_t j = 0; j < kChannels; ++j) {
EXPECT_EQ(static_cast<int16_t>(i), *output_ptr++);
}
}
}
TEST(TestInputAudioFile, DuplicateInterleaveSameSrcDst) {
static const size_t kSamples = 10;
static const size_t kChannels = 5;
int16_t input[kSamples * kChannels];
for (size_t i = 0; i < kSamples; ++i) {
input[i] = rtc::checked_cast<int16_t>(i);
}
InputAudioFile::DuplicateInterleaved(input, kSamples, kChannels, input);
// Verify output
int16_t* output_ptr = input;
for (size_t i = 0; i < kSamples; ++i) {
for (size_t j = 0; j < kChannels; ++j) {
EXPECT_EQ(static_cast<int16_t>(i), *output_ptr++);
}
}
}
} // namespace test
} // namespace webrtc

View file

@ -0,0 +1,307 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_coding/neteq/tools/neteq_delay_analyzer.h"
#include <algorithm>
#include <fstream>
#include <ios>
#include <iterator>
#include <limits>
#include <utility>
#include "absl/strings/string_view.h"
#include "rtc_base/checks.h"
#include "rtc_base/numerics/sequence_number_unwrapper.h"
namespace webrtc {
namespace test {
namespace {
constexpr char kArrivalDelayX[] = "arrival_delay_x";
constexpr char kArrivalDelayY[] = "arrival_delay_y";
constexpr char kTargetDelayX[] = "target_delay_x";
constexpr char kTargetDelayY[] = "target_delay_y";
constexpr char kPlayoutDelayX[] = "playout_delay_x";
constexpr char kPlayoutDelayY[] = "playout_delay_y";
// Helper function for NetEqDelayAnalyzer::CreateGraphs. Returns the
// interpolated value of a function at the point x. Vector x_vec contains the
// sample points, and y_vec contains the function values at these points. The
// return value is a linear interpolation between y_vec values.
double LinearInterpolate(double x,
const std::vector<int64_t>& x_vec,
const std::vector<int64_t>& y_vec) {
// Find first element which is larger than x.
auto it = std::upper_bound(x_vec.begin(), x_vec.end(), x);
if (it == x_vec.end()) {
--it;
}
const size_t upper_ix = it - x_vec.begin();
size_t lower_ix;
if (upper_ix == 0 || x_vec[upper_ix] <= x) {
lower_ix = upper_ix;
} else {
lower_ix = upper_ix - 1;
}
double y;
if (lower_ix == upper_ix) {
y = y_vec[lower_ix];
} else {
RTC_DCHECK_NE(x_vec[lower_ix], x_vec[upper_ix]);
y = (x - x_vec[lower_ix]) * (y_vec[upper_ix] - y_vec[lower_ix]) /
(x_vec[upper_ix] - x_vec[lower_ix]) +
y_vec[lower_ix];
}
return y;
}
void PrintDelays(const NetEqDelayAnalyzer::Delays& delays,
int64_t ref_time_ms,
absl::string_view var_name_x,
absl::string_view var_name_y,
std::ofstream& output,
absl::string_view terminator = "") {
output << var_name_x << " = [ ";
for (const std::pair<int64_t, float>& delay : delays) {
output << (delay.first - ref_time_ms) / 1000.f << ", ";
}
output << "]" << terminator << std::endl;
output << var_name_y << " = [ ";
for (const std::pair<int64_t, float>& delay : delays) {
output << delay.second << ", ";
}
output << "]" << terminator << std::endl;
}
} // namespace
void NetEqDelayAnalyzer::AfterInsertPacket(
const test::NetEqInput::PacketData& packet,
NetEq* neteq) {
data_.insert(
std::make_pair(packet.header.timestamp, TimingData(packet.time_ms)));
ssrcs_.insert(packet.header.ssrc);
payload_types_.insert(packet.header.payloadType);
}
void NetEqDelayAnalyzer::BeforeGetAudio(NetEq* neteq) {
last_sync_buffer_ms_ = neteq->SyncBufferSizeMs();
}
void NetEqDelayAnalyzer::AfterGetAudio(int64_t time_now_ms,
const AudioFrame& audio_frame,
bool /*muted*/,
NetEq* neteq) {
get_audio_time_ms_.push_back(time_now_ms);
for (const RtpPacketInfo& info : audio_frame.packet_infos_) {
auto it = data_.find(info.rtp_timestamp());
if (it == data_.end()) {
// This is a packet that was split out from another packet. Skip it.
continue;
}
auto& it_timing = it->second;
RTC_CHECK(!it_timing.decode_get_audio_count)
<< "Decode time already written";
it_timing.decode_get_audio_count = get_audio_count_;
RTC_CHECK(!it_timing.sync_delay_ms) << "Decode time already written";
it_timing.sync_delay_ms = last_sync_buffer_ms_;
it_timing.target_delay_ms = neteq->TargetDelayMs();
it_timing.current_delay_ms = neteq->FilteredCurrentDelayMs();
}
last_sample_rate_hz_ = audio_frame.sample_rate_hz_;
++get_audio_count_;
}
void NetEqDelayAnalyzer::CreateGraphs(Delays* arrival_delay_ms,
Delays* corrected_arrival_delay_ms,
Delays* playout_delay_ms,
Delays* target_delay_ms) const {
if (get_audio_time_ms_.empty()) {
return;
}
// Create nominal_get_audio_time_ms, a vector starting at
// get_audio_time_ms_[0] and increasing by 10 for each element.
std::vector<int64_t> nominal_get_audio_time_ms(get_audio_time_ms_.size());
nominal_get_audio_time_ms[0] = get_audio_time_ms_[0];
std::transform(
nominal_get_audio_time_ms.begin(), nominal_get_audio_time_ms.end() - 1,
nominal_get_audio_time_ms.begin() + 1, [](int64_t& x) { return x + 10; });
RTC_DCHECK(
std::is_sorted(get_audio_time_ms_.begin(), get_audio_time_ms_.end()));
std::vector<double> rtp_timestamps_ms;
double offset = std::numeric_limits<double>::max();
RtpTimestampUnwrapper unwrapper;
// This loop traverses data_ and populates rtp_timestamps_ms as well as
// calculates the base offset.
for (auto& d : data_) {
rtp_timestamps_ms.push_back(
static_cast<double>(unwrapper.Unwrap(d.first)) /
rtc::CheckedDivExact(last_sample_rate_hz_, 1000));
offset =
std::min(offset, d.second.arrival_time_ms - rtp_timestamps_ms.back());
}
// This loop traverses the data again and populates the graph vectors. The
// reason to have two loops and traverse twice is that the offset cannot be
// known until the first traversal is done. Meanwhile, the final offset must
// be known already at the start of this second loop.
size_t i = 0;
for (const auto& data : data_) {
const double offset_send_time_ms = rtp_timestamps_ms[i++] + offset;
const auto& timing = data.second;
corrected_arrival_delay_ms->push_back(std::make_pair(
timing.arrival_time_ms,
LinearInterpolate(timing.arrival_time_ms, get_audio_time_ms_,
nominal_get_audio_time_ms) -
offset_send_time_ms));
arrival_delay_ms->push_back(std::make_pair(
timing.arrival_time_ms, timing.arrival_time_ms - offset_send_time_ms));
if (timing.decode_get_audio_count) {
// This packet was decoded.
RTC_DCHECK(timing.sync_delay_ms);
const int64_t get_audio_time =
*timing.decode_get_audio_count * 10 + get_audio_time_ms_[0];
const float playout_ms =
get_audio_time + *timing.sync_delay_ms - offset_send_time_ms;
playout_delay_ms->push_back(std::make_pair(get_audio_time, playout_ms));
RTC_DCHECK(timing.target_delay_ms);
RTC_DCHECK(timing.current_delay_ms);
const float target =
playout_ms - *timing.current_delay_ms + *timing.target_delay_ms;
target_delay_ms->push_back(std::make_pair(get_audio_time, target));
}
}
}
void NetEqDelayAnalyzer::CreateMatlabScript(
absl::string_view script_name) const {
Delays arrival_delay_ms;
Delays corrected_arrival_delay_ms;
Delays playout_delay_ms;
Delays target_delay_ms;
CreateGraphs(&arrival_delay_ms, &corrected_arrival_delay_ms,
&playout_delay_ms, &target_delay_ms);
// Maybe better to find the actually smallest timestamp, to surely avoid
// x-axis starting from negative.
const int64_t ref_time_ms = arrival_delay_ms.front().first;
// Create an output file stream to Matlab script file.
std::ofstream output(std::string{script_name});
PrintDelays(corrected_arrival_delay_ms, ref_time_ms, kArrivalDelayX,
kArrivalDelayY, output, ";");
// PrintDelays(corrected_arrival_delay_x, kCorrectedArrivalDelayX,
// kCorrectedArrivalDelayY, output);
PrintDelays(playout_delay_ms, ref_time_ms, kPlayoutDelayX, kPlayoutDelayY,
output, ";");
PrintDelays(target_delay_ms, ref_time_ms, kTargetDelayX, kTargetDelayY,
output, ";");
output << "h=plot(" << kArrivalDelayX << ", " << kArrivalDelayY << ", "
<< kTargetDelayX << ", " << kTargetDelayY << ", 'g.', "
<< kPlayoutDelayX << ", " << kPlayoutDelayY << ");" << std::endl;
output << "set(h(1),'color',0.75*[1 1 1]);" << std::endl;
output << "set(h(2),'markersize',6);" << std::endl;
output << "set(h(3),'linew',1.5);" << std::endl;
output << "ax1=axis;" << std::endl;
output << "axis tight" << std::endl;
output << "ax2=axis;" << std::endl;
output << "axis([ax2(1:3) ax1(4)])" << std::endl;
output << "xlabel('time [s]');" << std::endl;
output << "ylabel('relative delay [ms]');" << std::endl;
if (!ssrcs_.empty()) {
auto ssrc_it = ssrcs_.cbegin();
output << "title('SSRC: 0x" << std::hex << static_cast<int64_t>(*ssrc_it++);
while (ssrc_it != ssrcs_.end()) {
output << ", 0x" << std::hex << static_cast<int64_t>(*ssrc_it++);
}
output << std::dec;
auto pt_it = payload_types_.cbegin();
output << "; Payload Types: " << *pt_it++;
while (pt_it != payload_types_.end()) {
output << ", " << *pt_it++;
}
output << "');" << std::endl;
}
}
void NetEqDelayAnalyzer::CreatePythonScript(
absl::string_view script_name) const {
Delays arrival_delay_ms;
Delays corrected_arrival_delay_ms;
Delays playout_delay_ms;
Delays target_delay_ms;
CreateGraphs(&arrival_delay_ms, &corrected_arrival_delay_ms,
&playout_delay_ms, &target_delay_ms);
// Maybe better to find the actually smallest timestamp, to surely avoid
// x-axis starting from negative.
const int64_t ref_time_ms = arrival_delay_ms.front().first;
// Create an output file stream to the python script file.
std::ofstream output(std::string{script_name});
// Necessary includes
output << "import numpy as np" << std::endl;
output << "import matplotlib.pyplot as plt" << std::endl;
PrintDelays(corrected_arrival_delay_ms, ref_time_ms, kArrivalDelayX,
kArrivalDelayY, output);
// PrintDelays(corrected_arrival_delay_x, kCorrectedArrivalDelayX,
// kCorrectedArrivalDelayY, output);
PrintDelays(playout_delay_ms, ref_time_ms, kPlayoutDelayX, kPlayoutDelayY,
output);
PrintDelays(target_delay_ms, ref_time_ms, kTargetDelayX, kTargetDelayY,
output);
output << "if __name__ == '__main__':" << std::endl;
output << " h=plt.plot(" << kArrivalDelayX << ", " << kArrivalDelayY << ", "
<< kTargetDelayX << ", " << kTargetDelayY << ", 'g.', "
<< kPlayoutDelayX << ", " << kPlayoutDelayY << ")" << std::endl;
output << " plt.setp(h[0],'color',[.75, .75, .75])" << std::endl;
output << " plt.setp(h[1],'markersize',6)" << std::endl;
output << " plt.setp(h[2],'linewidth',1.5)" << std::endl;
output << " plt.axis('tight')" << std::endl;
output << " plt.xlabel('time [s]')" << std::endl;
output << " plt.ylabel('relative delay [ms]')" << std::endl;
if (!ssrcs_.empty()) {
auto ssrc_it = ssrcs_.cbegin();
output << " plt.legend((\"arrival delay\", \"target delay\", \"playout "
"delay\"))"
<< std::endl;
output << " plt.title('SSRC: 0x" << std::hex
<< static_cast<int64_t>(*ssrc_it++);
while (ssrc_it != ssrcs_.end()) {
output << ", 0x" << std::hex << static_cast<int64_t>(*ssrc_it++);
}
output << std::dec;
auto pt_it = payload_types_.cbegin();
output << "; Payload Types: " << *pt_it++;
while (pt_it != payload_types_.end()) {
output << ", " << *pt_it++;
}
output << "')" << std::endl;
}
output << " plt.show()" << std::endl;
}
} // namespace test
} // namespace webrtc

View file

@ -0,0 +1,76 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_DELAY_ANALYZER_H_
#define MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_DELAY_ANALYZER_H_
#include <map>
#include <set>
#include <string>
#include <vector>
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "modules/audio_coding/neteq/tools/neteq_input.h"
#include "modules/audio_coding/neteq/tools/neteq_test.h"
namespace webrtc {
namespace test {
class NetEqDelayAnalyzer : public test::NetEqPostInsertPacket,
public test::NetEqGetAudioCallback {
public:
void AfterInsertPacket(const test::NetEqInput::PacketData& packet,
NetEq* neteq) override;
void BeforeGetAudio(NetEq* neteq) override;
void AfterGetAudio(int64_t time_now_ms,
const AudioFrame& audio_frame,
bool muted,
NetEq* neteq) override;
using Delays = std::vector<std::pair<int64_t, float>>;
void CreateGraphs(Delays* arrival_delay_ms,
Delays* corrected_arrival_delay_ms,
Delays* playout_delay_ms,
Delays* target_delay_ms) const;
// Creates a matlab script with file name script_name. When executed in
// Matlab, the script will generate graphs with the same timing information
// as provided by CreateGraphs.
void CreateMatlabScript(absl::string_view script_name) const;
// Creates a python script with file name `script_name`. When executed in
// Python, the script will generate graphs with the same timing information
// as provided by CreateGraphs.
void CreatePythonScript(absl::string_view script_name) const;
private:
struct TimingData {
explicit TimingData(int64_t at) : arrival_time_ms(at) {}
int64_t arrival_time_ms;
absl::optional<int64_t> decode_get_audio_count;
absl::optional<int64_t> sync_delay_ms;
absl::optional<int> target_delay_ms;
absl::optional<int> current_delay_ms;
};
std::map<uint32_t, TimingData> data_;
std::vector<int64_t> get_audio_time_ms_;
size_t get_audio_count_ = 0;
size_t last_sync_buffer_ms_ = 0;
int last_sample_rate_hz_ = 0;
std::set<uint32_t> ssrcs_;
std::set<int> payload_types_;
};
} // namespace test
} // namespace webrtc
#endif // MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_DELAY_ANALYZER_H_

View file

@ -0,0 +1,167 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_coding/neteq/tools/neteq_event_log_input.h"
#include <limits>
#include <memory>
#include "absl/strings/string_view.h"
#include "rtc_base/checks.h"
namespace webrtc {
namespace test {
namespace {
class NetEqEventLogInput : public NetEqInput {
public:
NetEqEventLogInput(const std::vector<LoggedRtpPacketIncoming>& packet_stream,
const std::vector<LoggedAudioPlayoutEvent>& output_events,
const std::vector<LoggedNetEqSetMinimumDelayEvent>&
neteq_set_minimum_delay_events,
absl::optional<int64_t> end_time_ms)
: packet_stream_(packet_stream),
packet_stream_it_(packet_stream_.begin()),
output_events_(output_events),
output_events_it_(output_events_.begin()),
neteq_set_minimum_delay_events_(neteq_set_minimum_delay_events),
neteq_set_minimum_delay_events_it_(
neteq_set_minimum_delay_events_.begin()),
end_time_ms_(end_time_ms) {
// Ignore all output events before the first packet.
while (output_events_it_ != output_events_.end() &&
output_events_it_->log_time_ms() <
packet_stream_it_->log_time_ms()) {
++output_events_it_;
}
}
absl::optional<int64_t> NextPacketTime() const override {
if (packet_stream_it_ == packet_stream_.end()) {
return absl::nullopt;
}
if (end_time_ms_ && packet_stream_it_->rtp.log_time_ms() > *end_time_ms_) {
return absl::nullopt;
}
return packet_stream_it_->rtp.log_time_ms();
}
absl::optional<int64_t> NextOutputEventTime() const override {
if (output_events_it_ == output_events_.end()) {
return absl::nullopt;
}
if (end_time_ms_ && output_events_it_->log_time_ms() > *end_time_ms_) {
return absl::nullopt;
}
return output_events_it_->log_time_ms();
}
absl::optional<SetMinimumDelayInfo> NextSetMinimumDelayInfo() const override {
if (neteq_set_minimum_delay_events_it_ ==
neteq_set_minimum_delay_events_.end()) {
return absl::nullopt;
}
if (end_time_ms_ &&
neteq_set_minimum_delay_events_it_->log_time_ms() > *end_time_ms_) {
return absl::nullopt;
}
return SetMinimumDelayInfo(
neteq_set_minimum_delay_events_it_->log_time_ms(),
neteq_set_minimum_delay_events_it_->minimum_delay_ms);
}
std::unique_ptr<PacketData> PopPacket() override {
if (packet_stream_it_ == packet_stream_.end()) {
return nullptr;
}
auto packet_data = std::make_unique<PacketData>();
packet_data->header = packet_stream_it_->rtp.header;
packet_data->time_ms = packet_stream_it_->rtp.log_time_ms();
// This is a header-only "dummy" packet. Set the payload to all zeros, with
// length according to the virtual length.
packet_data->payload.SetSize(packet_stream_it_->rtp.total_length -
packet_stream_it_->rtp.header_length);
std::fill_n(packet_data->payload.data(), packet_data->payload.size(), 0);
++packet_stream_it_;
return packet_data;
}
void AdvanceOutputEvent() override {
if (output_events_it_ != output_events_.end()) {
++output_events_it_;
}
}
void AdvanceSetMinimumDelay() override {
if (neteq_set_minimum_delay_events_it_ !=
neteq_set_minimum_delay_events_.end()) {
++neteq_set_minimum_delay_events_it_;
}
}
bool ended() const override { return !NextEventTime(); }
absl::optional<RTPHeader> NextHeader() const override {
if (packet_stream_it_ == packet_stream_.end()) {
return absl::nullopt;
}
return packet_stream_it_->rtp.header;
}
private:
const std::vector<LoggedRtpPacketIncoming> packet_stream_;
std::vector<LoggedRtpPacketIncoming>::const_iterator packet_stream_it_;
const std::vector<LoggedAudioPlayoutEvent> output_events_;
std::vector<LoggedAudioPlayoutEvent>::const_iterator output_events_it_;
const std::vector<LoggedNetEqSetMinimumDelayEvent>
neteq_set_minimum_delay_events_;
std::vector<LoggedNetEqSetMinimumDelayEvent>::const_iterator
neteq_set_minimum_delay_events_it_;
const absl::optional<int64_t> end_time_ms_;
};
} // namespace
std::unique_ptr<NetEqInput> CreateNetEqEventLogInput(
const ParsedRtcEventLog& parsed_log,
absl::optional<uint32_t> ssrc) {
if (parsed_log.incoming_audio_ssrcs().empty()) {
return nullptr;
}
// Pick the first SSRC if none was provided.
ssrc = ssrc.value_or(*parsed_log.incoming_audio_ssrcs().begin());
auto streams = parsed_log.incoming_rtp_packets_by_ssrc();
auto stream =
std::find_if(streams.begin(), streams.end(),
[ssrc](auto stream) { return stream.ssrc == ssrc; });
if (stream == streams.end()) {
return nullptr;
}
auto output_events_it = parsed_log.audio_playout_events().find(*ssrc);
if (output_events_it == parsed_log.audio_playout_events().end()) {
return nullptr;
}
std::vector<LoggedNetEqSetMinimumDelayEvent> neteq_set_minimum_delay_events;
auto neteq_set_minimum_delay_events_it =
parsed_log.neteq_set_minimum_delay_events().find(*ssrc);
if (neteq_set_minimum_delay_events_it !=
parsed_log.neteq_set_minimum_delay_events().end()) {
neteq_set_minimum_delay_events = neteq_set_minimum_delay_events_it->second;
}
int64_t end_time_ms = parsed_log.first_log_segment().stop_time_ms();
return std::make_unique<NetEqEventLogInput>(
stream->incoming_packets, output_events_it->second,
neteq_set_minimum_delay_events, end_time_ms);
}
} // namespace test
} // namespace webrtc

View file

@ -0,0 +1,28 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_EVENT_LOG_INPUT_H_
#define MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_EVENT_LOG_INPUT_H_
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "logging/rtc_event_log/rtc_event_log_parser.h"
#include "modules/audio_coding/neteq/tools/neteq_input.h"
namespace webrtc {
namespace test {
std::unique_ptr<NetEqInput> CreateNetEqEventLogInput(
const ParsedRtcEventLog& parsed_log,
absl::optional<uint32_t> ssrc);
} // namespace test
} // namespace webrtc
#endif // MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_EVENT_LOG_INPUT_H_

View file

@ -0,0 +1,105 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_coding/neteq/tools/neteq_input.h"
#include "rtc_base/strings/string_builder.h"
namespace webrtc {
namespace test {
NetEqInput::PacketData::PacketData() = default;
NetEqInput::PacketData::~PacketData() = default;
std::string NetEqInput::PacketData::ToString() const {
rtc::StringBuilder ss;
ss << "{"
"time_ms: "
<< static_cast<int64_t>(time_ms)
<< ", "
"header: {"
"pt: "
<< static_cast<int>(header.payloadType)
<< ", "
"sn: "
<< header.sequenceNumber
<< ", "
"ts: "
<< header.timestamp
<< ", "
"ssrc: "
<< header.ssrc
<< "}, "
"payload bytes: "
<< payload.size() << "}";
return ss.Release();
}
TimeLimitedNetEqInput::TimeLimitedNetEqInput(std::unique_ptr<NetEqInput> input,
int64_t duration_ms)
: input_(std::move(input)),
start_time_ms_(input_->NextEventTime()),
duration_ms_(duration_ms) {}
TimeLimitedNetEqInput::~TimeLimitedNetEqInput() = default;
absl::optional<int64_t> TimeLimitedNetEqInput::NextPacketTime() const {
return ended_ ? absl::nullopt : input_->NextPacketTime();
}
absl::optional<int64_t> TimeLimitedNetEqInput::NextOutputEventTime() const {
return ended_ ? absl::nullopt : input_->NextOutputEventTime();
}
absl::optional<NetEqInput::SetMinimumDelayInfo>
TimeLimitedNetEqInput::NextSetMinimumDelayInfo() const {
return ended_ ? absl::nullopt : input_->NextSetMinimumDelayInfo();
}
std::unique_ptr<NetEqInput::PacketData> TimeLimitedNetEqInput::PopPacket() {
if (ended_) {
return std::unique_ptr<PacketData>();
}
auto packet = input_->PopPacket();
MaybeSetEnded();
return packet;
}
void TimeLimitedNetEqInput::AdvanceOutputEvent() {
if (!ended_) {
input_->AdvanceOutputEvent();
MaybeSetEnded();
}
}
void TimeLimitedNetEqInput::AdvanceSetMinimumDelay() {
if (!ended_) {
input_->AdvanceSetMinimumDelay();
MaybeSetEnded();
}
}
bool TimeLimitedNetEqInput::ended() const {
return ended_ || input_->ended();
}
absl::optional<RTPHeader> TimeLimitedNetEqInput::NextHeader() const {
return ended_ ? absl::nullopt : input_->NextHeader();
}
void TimeLimitedNetEqInput::MaybeSetEnded() {
if (NextEventTime() && start_time_ms_ &&
*NextEventTime() - *start_time_ms_ > duration_ms_) {
ended_ = true;
}
}
} // namespace test
} // namespace webrtc

View file

@ -0,0 +1,135 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_INPUT_H_
#define MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_INPUT_H_
#include <algorithm>
#include <memory>
#include <string>
#include "absl/types/optional.h"
#include "modules/audio_coding/neteq/tools/packet.h"
#include "modules/audio_coding/neteq/tools/packet_source.h"
#include "rtc_base/buffer.h"
namespace webrtc {
namespace test {
// Interface class for input to the NetEqTest class.
class NetEqInput {
public:
struct PacketData {
PacketData();
~PacketData();
std::string ToString() const;
RTPHeader header;
rtc::Buffer payload;
int64_t time_ms;
};
struct SetMinimumDelayInfo {
SetMinimumDelayInfo(int64_t timestamp_ms_in, int delay_ms_in)
: timestamp_ms(timestamp_ms_in), delay_ms(delay_ms_in) {}
int64_t timestamp_ms;
int delay_ms;
};
virtual ~NetEqInput() = default;
// Returns at what time (in ms) NetEq::InsertPacket should be called next, or
// empty if the source is out of packets.
virtual absl::optional<int64_t> NextPacketTime() const = 0;
// Returns at what time (in ms) NetEq::GetAudio should be called next, or
// empty if no more output events are available.
virtual absl::optional<int64_t> NextOutputEventTime() const = 0;
// Returns the information related to the next NetEq set minimum delay event
// if available.
virtual absl::optional<SetMinimumDelayInfo> NextSetMinimumDelayInfo()
const = 0;
// Returns the time (in ms) for the next event (packet, output or set minimum
// delay event) or empty if there are no more events.
absl::optional<int64_t> NextEventTime() const {
absl::optional<int64_t> next_event_time = NextPacketTime();
const auto next_output_time = NextOutputEventTime();
// Return the minimum of non-empty `a` and `b`, or empty if both are empty.
if (next_output_time) {
next_event_time = next_event_time ? std::min(next_event_time.value(),
next_output_time.value())
: next_output_time;
}
const auto next_neteq_minimum_delay = NextSetMinimumDelayInfo();
if (next_neteq_minimum_delay) {
next_event_time =
next_event_time
? std::min(next_event_time.value(),
next_neteq_minimum_delay.value().timestamp_ms)
: next_neteq_minimum_delay.value().timestamp_ms;
}
return next_event_time;
}
// Returns the next packet to be inserted into NetEq. The packet following the
// returned one is pre-fetched in the NetEqInput object, such that future
// calls to NextPacketTime() or NextHeader() will return information from that
// packet.
virtual std::unique_ptr<PacketData> PopPacket() = 0;
// Move to the next output event. This will make NextOutputEventTime() return
// a new value (potentially the same if several output events share the same
// time).
virtual void AdvanceOutputEvent() = 0;
// Move to the next NetEq set minimum delay. This will make
// `NextSetMinimumDelayInfo` return a new value.
virtual void AdvanceSetMinimumDelay() = 0;
// Returns true if the source has come to an end. An implementation must
// eventually return true from this method, or the test will end up in an
// infinite loop.
virtual bool ended() const = 0;
// Returns the RTP header for the next packet, i.e., the packet that will be
// delivered next by PopPacket().
virtual absl::optional<RTPHeader> NextHeader() const = 0;
};
// Wrapper class to impose a time limit on a NetEqInput object, typically
// another time limit than what the object itself provides. For example, an
// input taken from a file can be cut shorter by wrapping it in this class.
class TimeLimitedNetEqInput : public NetEqInput {
public:
TimeLimitedNetEqInput(std::unique_ptr<NetEqInput> input, int64_t duration_ms);
~TimeLimitedNetEqInput() override;
absl::optional<int64_t> NextPacketTime() const override;
absl::optional<int64_t> NextOutputEventTime() const override;
absl::optional<SetMinimumDelayInfo> NextSetMinimumDelayInfo() const override;
std::unique_ptr<PacketData> PopPacket() override;
void AdvanceOutputEvent() override;
void AdvanceSetMinimumDelay() override;
bool ended() const override;
absl::optional<RTPHeader> NextHeader() const override;
private:
void MaybeSetEnded();
std::unique_ptr<NetEqInput> input_;
const absl::optional<int64_t> start_time_ms_;
const int64_t duration_ms_;
bool ended_ = false;
};
} // namespace test
} // namespace webrtc
#endif // MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_INPUT_H_

View file

@ -0,0 +1,128 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_coding/neteq/tools/neteq_performance_test.h"
#include "api/audio/audio_frame.h"
#include "api/audio_codecs/builtin_audio_decoder_factory.h"
#include "api/neteq/neteq.h"
#include "modules/audio_coding/codecs/pcm16b/pcm16b.h"
#include "modules/audio_coding/neteq/default_neteq_factory.h"
#include "modules/audio_coding/neteq/tools/audio_loop.h"
#include "modules/audio_coding/neteq/tools/rtp_generator.h"
#include "rtc_base/checks.h"
#include "system_wrappers/include/clock.h"
#include "test/testsupport/file_utils.h"
using webrtc::NetEq;
using webrtc::test::AudioLoop;
using webrtc::test::RtpGenerator;
namespace webrtc {
namespace test {
int64_t NetEqPerformanceTest::Run(int runtime_ms,
int lossrate,
double drift_factor) {
const std::string kInputFileName =
webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm");
const int kSampRateHz = 32000;
const std::string kDecoderName = "pcm16-swb32";
const int kPayloadType = 95;
// Initialize NetEq instance.
NetEq::Config config;
config.sample_rate_hz = kSampRateHz;
webrtc::Clock* clock = webrtc::Clock::GetRealTimeClock();
auto audio_decoder_factory = CreateBuiltinAudioDecoderFactory();
auto neteq =
DefaultNetEqFactory().CreateNetEq(config, audio_decoder_factory, clock);
// Register decoder in `neteq`.
if (!neteq->RegisterPayloadType(kPayloadType,
SdpAudioFormat("l16", kSampRateHz, 1)))
return -1;
// Set up AudioLoop object.
AudioLoop audio_loop;
const size_t kMaxLoopLengthSamples = kSampRateHz * 10; // 10 second loop.
const size_t kInputBlockSizeSamples = 60 * kSampRateHz / 1000; // 60 ms.
if (!audio_loop.Init(kInputFileName, kMaxLoopLengthSamples,
kInputBlockSizeSamples))
return -1;
int32_t time_now_ms = 0;
// Get first input packet.
RTPHeader rtp_header;
RtpGenerator rtp_gen(kSampRateHz / 1000);
// Start with positive drift first half of simulation.
rtp_gen.set_drift_factor(drift_factor);
bool drift_flipped = false;
int32_t packet_input_time_ms =
rtp_gen.GetRtpHeader(kPayloadType, kInputBlockSizeSamples, &rtp_header);
auto input_samples = audio_loop.GetNextBlock();
if (input_samples.empty())
exit(1);
uint8_t input_payload[kInputBlockSizeSamples * sizeof(int16_t)];
size_t payload_len = WebRtcPcm16b_Encode(input_samples.data(),
input_samples.size(), input_payload);
RTC_CHECK_EQ(sizeof(input_payload), payload_len);
// Main loop.
int64_t start_time_ms = clock->TimeInMilliseconds();
AudioFrame out_frame;
while (time_now_ms < runtime_ms) {
while (packet_input_time_ms <= time_now_ms) {
// Drop every N packets, where N = FLAG_lossrate.
bool lost = false;
if (lossrate > 0) {
lost = ((rtp_header.sequenceNumber - 1) % lossrate) == 0;
}
if (!lost) {
// Insert packet.
int error = neteq->InsertPacket(rtp_header, input_payload);
if (error != NetEq::kOK)
return -1;
}
// Get next packet.
packet_input_time_ms = rtp_gen.GetRtpHeader(
kPayloadType, kInputBlockSizeSamples, &rtp_header);
input_samples = audio_loop.GetNextBlock();
if (input_samples.empty())
return -1;
payload_len = WebRtcPcm16b_Encode(input_samples.data(),
input_samples.size(), input_payload);
RTC_DCHECK_EQ(payload_len, kInputBlockSizeSamples * sizeof(int16_t));
}
// Get output audio, but don't do anything with it.
bool muted;
int error = neteq->GetAudio(&out_frame, &muted);
RTC_CHECK(!muted);
if (error != NetEq::kOK)
return -1;
RTC_DCHECK_EQ(out_frame.samples_per_channel_, (kSampRateHz * 10) / 1000);
static const int kOutputBlockSizeMs = 10;
time_now_ms += kOutputBlockSizeMs;
if (time_now_ms >= runtime_ms / 2 && !drift_flipped) {
// Apply negative drift second half of simulation.
rtp_gen.set_drift_factor(-drift_factor);
drift_flipped = true;
}
}
int64_t end_time_ms = clock->TimeInMilliseconds();
return end_time_ms - start_time_ms;
}
} // namespace test
} // namespace webrtc

View file

@ -0,0 +1,32 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_PERFORMANCE_TEST_H_
#define MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_PERFORMANCE_TEST_H_
#include <stdint.h>
namespace webrtc {
namespace test {
class NetEqPerformanceTest {
public:
// Runs a performance test with parameters as follows:
// `runtime_ms`: the simulation time, i.e., the duration of the audio data.
// `lossrate`: drop one out of `lossrate` packets, e.g., one out of 10.
// `drift_factor`: clock drift in [0, 1].
// Returns the runtime in ms.
static int64_t Run(int runtime_ms, int lossrate, double drift_factor);
};
} // namespace test
} // namespace webrtc
#endif // MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_PERFORMANCE_TEST_H_

View file

@ -0,0 +1,482 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_coding/neteq/tools/neteq_quality_test.h"
#include <stdio.h>
#include <cmath>
#include "absl/flags/flag.h"
#include "absl/strings/string_view.h"
#include "modules/audio_coding/neteq/default_neteq_factory.h"
#include "modules/audio_coding/neteq/tools/neteq_quality_test.h"
#include "modules/audio_coding/neteq/tools/output_audio_file.h"
#include "modules/audio_coding/neteq/tools/output_wav_file.h"
#include "modules/audio_coding/neteq/tools/resample_input_audio_file.h"
#include "rtc_base/checks.h"
#include "rtc_base/string_encode.h"
#include "system_wrappers/include/clock.h"
#include "test/testsupport/file_utils.h"
ABSL_FLAG(std::string,
in_filename,
"audio_coding/speech_mono_16kHz.pcm",
"Path of the input file (relative to the resources/ directory) for "
"input audio (specify sample rate with --input_sample_rate, "
"and channels with --channels).");
ABSL_FLAG(int, input_sample_rate, 16000, "Sample rate of input file in Hz.");
ABSL_FLAG(int, channels, 1, "Number of channels in input audio.");
ABSL_FLAG(std::string,
out_filename,
"neteq_quality_test_out.pcm",
"Name of output audio file, which will be saved in " +
::webrtc::test::OutputPath());
ABSL_FLAG(
int,
runtime_ms,
10000,
"Simulated runtime (milliseconds). -1 will consume the complete file.");
ABSL_FLAG(int, packet_loss_rate, 10, "Percentile of packet loss.");
ABSL_FLAG(int,
random_loss_mode,
::webrtc::test::kUniformLoss,
"Random loss mode: 0--no loss, 1--uniform loss, 2--Gilbert Elliot "
"loss, 3--fixed loss.");
ABSL_FLAG(int,
burst_length,
30,
"Burst length in milliseconds, only valid for Gilbert Elliot loss.");
ABSL_FLAG(float, drift_factor, 0.0, "Time drift factor.");
ABSL_FLAG(int,
preload_packets,
1,
"Preload the buffer with this many packets.");
ABSL_FLAG(std::string,
loss_events,
"",
"List of loss events time and duration separated by comma: "
"<first_event_time> <first_event_duration>, <second_event_time> "
"<second_event_duration>, ...");
namespace webrtc {
namespace test {
namespace {
std::unique_ptr<NetEq> CreateNetEq(
const NetEq::Config& config,
Clock* clock,
const rtc::scoped_refptr<AudioDecoderFactory>& decoder_factory) {
return DefaultNetEqFactory().CreateNetEq(config, decoder_factory, clock);
}
const std::string& GetInFilenamePath(absl::string_view file_name) {
std::vector<absl::string_view> name_parts = rtc::split(file_name, '.');
RTC_CHECK_EQ(name_parts.size(), 2);
static const std::string path =
::webrtc::test::ResourcePath(name_parts[0], name_parts[1]);
return path;
}
const std::string& GetOutFilenamePath(absl::string_view file_name) {
static const std::string path =
::webrtc::test::OutputPath() + std::string(file_name);
return path;
}
} // namespace
const uint8_t kPayloadType = 95;
const int kOutputSizeMs = 10;
const int kInitSeed = 0x12345678;
const int kPacketLossTimeUnitMs = 10;
// Common validator for file names.
static bool ValidateFilename(absl::string_view value, bool is_output) {
if (!is_output) {
RTC_CHECK_NE(value.substr(value.find_last_of('.') + 1), "wav")
<< "WAV file input is not supported";
}
FILE* fid = is_output ? fopen(std::string(value).c_str(), "wb")
: fopen(std::string(value).c_str(), "rb");
if (fid == nullptr)
return false;
fclose(fid);
return true;
}
// ProbTrans00Solver() is to calculate the transition probability from no-loss
// state to itself in a modified Gilbert Elliot packet loss model. The result is
// to achieve the target packet loss rate `loss_rate`, when a packet is not
// lost only if all `units` drawings within the duration of the packet result in
// no-loss.
static double ProbTrans00Solver(int units,
double loss_rate,
double prob_trans_10) {
if (units == 1)
return prob_trans_10 / (1.0f - loss_rate) - prob_trans_10;
// 0 == prob_trans_00 ^ (units - 1) + (1 - loss_rate) / prob_trans_10 *
// prob_trans_00 - (1 - loss_rate) * (1 + 1 / prob_trans_10).
// There is a unique solution between 0.0 and 1.0, due to the monotonicity and
// an opposite sign at 0.0 and 1.0.
// For simplicity, we reformulate the equation as
// f(x) = x ^ (units - 1) + a x + b.
// Its derivative is
// f'(x) = (units - 1) x ^ (units - 2) + a.
// The derivative is strictly greater than 0 when x is between 0 and 1.
// We use Newton's method to solve the equation, iteration is
// x(k+1) = x(k) - f(x) / f'(x);
const double kPrecision = 0.001f;
const int kIterations = 100;
const double a = (1.0f - loss_rate) / prob_trans_10;
const double b = (loss_rate - 1.0f) * (1.0f + 1.0f / prob_trans_10);
double x = 0.0; // Starting point;
double f = b;
double f_p;
int iter = 0;
while ((f >= kPrecision || f <= -kPrecision) && iter < kIterations) {
f_p = (units - 1.0f) * std::pow(x, units - 2) + a;
x -= f / f_p;
if (x > 1.0f) {
x = 1.0f;
} else if (x < 0.0f) {
x = 0.0f;
}
f = std::pow(x, units - 1) + a * x + b;
iter++;
}
return x;
}
NetEqQualityTest::NetEqQualityTest(
int block_duration_ms,
int in_sampling_khz,
int out_sampling_khz,
const SdpAudioFormat& format,
const rtc::scoped_refptr<AudioDecoderFactory>& decoder_factory)
: audio_format_(format),
channels_(absl::GetFlag(FLAGS_channels)),
decoded_time_ms_(0),
decodable_time_ms_(0),
drift_factor_(absl::GetFlag(FLAGS_drift_factor)),
packet_loss_rate_(absl::GetFlag(FLAGS_packet_loss_rate)),
block_duration_ms_(block_duration_ms),
in_sampling_khz_(in_sampling_khz),
out_sampling_khz_(out_sampling_khz),
in_size_samples_(
static_cast<size_t>(in_sampling_khz_ * block_duration_ms_)),
payload_size_bytes_(0),
max_payload_bytes_(0),
in_file_(new ResampleInputAudioFile(
GetInFilenamePath(absl::GetFlag(FLAGS_in_filename)),
absl::GetFlag(FLAGS_input_sample_rate),
in_sampling_khz * 1000,
absl::GetFlag(FLAGS_runtime_ms) > 0)),
rtp_generator_(
new RtpGenerator(in_sampling_khz_, 0, 0, decodable_time_ms_)),
total_payload_size_bytes_(0) {
// Flag validation
RTC_CHECK(ValidateFilename(
GetInFilenamePath(absl::GetFlag(FLAGS_in_filename)), false))
<< "Invalid input filename.";
RTC_CHECK(absl::GetFlag(FLAGS_input_sample_rate) == 8000 ||
absl::GetFlag(FLAGS_input_sample_rate) == 16000 ||
absl::GetFlag(FLAGS_input_sample_rate) == 32000 ||
absl::GetFlag(FLAGS_input_sample_rate) == 48000)
<< "Invalid sample rate should be 8000, 16000, 32000 or 48000 Hz.";
RTC_CHECK_EQ(absl::GetFlag(FLAGS_channels), 1)
<< "Invalid number of channels, current support only 1.";
RTC_CHECK(ValidateFilename(
GetOutFilenamePath(absl::GetFlag(FLAGS_out_filename)), true))
<< "Invalid output filename.";
RTC_CHECK(absl::GetFlag(FLAGS_packet_loss_rate) >= 0 &&
absl::GetFlag(FLAGS_packet_loss_rate) <= 100)
<< "Invalid packet loss percentile, should be between 0 and 100.";
RTC_CHECK(absl::GetFlag(FLAGS_random_loss_mode) >= 0 &&
absl::GetFlag(FLAGS_random_loss_mode) < kLastLossMode)
<< "Invalid random packet loss mode, should be between 0 and "
<< kLastLossMode - 1 << ".";
RTC_CHECK_GE(absl::GetFlag(FLAGS_burst_length), kPacketLossTimeUnitMs)
<< "Invalid burst length, should be greater than or equal to "
<< kPacketLossTimeUnitMs << " ms.";
RTC_CHECK_GT(absl::GetFlag(FLAGS_drift_factor), -0.1)
<< "Invalid drift factor, should be greater than -0.1.";
RTC_CHECK_GE(absl::GetFlag(FLAGS_preload_packets), 0)
<< "Invalid number of packets to preload; must be non-negative.";
const std::string out_filename =
GetOutFilenamePath(absl::GetFlag(FLAGS_out_filename));
const std::string log_filename = out_filename + ".log";
log_file_.open(log_filename.c_str(), std::ofstream::out);
RTC_CHECK(log_file_.is_open());
if (out_filename.size() >= 4 &&
out_filename.substr(out_filename.size() - 4) == ".wav") {
// Open a wav file.
output_.reset(
new webrtc::test::OutputWavFile(out_filename, 1000 * out_sampling_khz));
} else {
// Open a pcm file.
output_.reset(new webrtc::test::OutputAudioFile(out_filename));
}
NetEq::Config config;
config.sample_rate_hz = out_sampling_khz_ * 1000;
neteq_ = CreateNetEq(config, Clock::GetRealTimeClock(), decoder_factory);
max_payload_bytes_ = in_size_samples_ * channels_ * sizeof(int16_t);
in_data_.reset(new int16_t[in_size_samples_ * channels_]);
}
NetEqQualityTest::~NetEqQualityTest() {
log_file_.close();
}
bool NoLoss::Lost(int now_ms) {
return false;
}
UniformLoss::UniformLoss(double loss_rate) : loss_rate_(loss_rate) {}
bool UniformLoss::Lost(int now_ms) {
int drop_this = rand();
return (drop_this < loss_rate_ * RAND_MAX);
}
GilbertElliotLoss::GilbertElliotLoss(double prob_trans_11, double prob_trans_01)
: prob_trans_11_(prob_trans_11),
prob_trans_01_(prob_trans_01),
lost_last_(false),
uniform_loss_model_(new UniformLoss(0)) {}
GilbertElliotLoss::~GilbertElliotLoss() {}
bool GilbertElliotLoss::Lost(int now_ms) {
// Simulate bursty channel (Gilbert model).
// (1st order) Markov chain model with memory of the previous/last
// packet state (lost or received).
if (lost_last_) {
// Previous packet was not received.
uniform_loss_model_->set_loss_rate(prob_trans_11_);
return lost_last_ = uniform_loss_model_->Lost(now_ms);
} else {
uniform_loss_model_->set_loss_rate(prob_trans_01_);
return lost_last_ = uniform_loss_model_->Lost(now_ms);
}
}
FixedLossModel::FixedLossModel(
std::set<FixedLossEvent, FixedLossEventCmp> loss_events)
: loss_events_(loss_events) {
loss_events_it_ = loss_events_.begin();
}
FixedLossModel::~FixedLossModel() {}
bool FixedLossModel::Lost(int now_ms) {
if (loss_events_it_ != loss_events_.end() &&
now_ms > loss_events_it_->start_ms) {
if (now_ms <= loss_events_it_->start_ms + loss_events_it_->duration_ms) {
return true;
} else {
++loss_events_it_;
return false;
}
}
return false;
}
void NetEqQualityTest::SetUp() {
ASSERT_TRUE(neteq_->RegisterPayloadType(kPayloadType, audio_format_));
rtp_generator_->set_drift_factor(drift_factor_);
int units = block_duration_ms_ / kPacketLossTimeUnitMs;
switch (absl::GetFlag(FLAGS_random_loss_mode)) {
case kUniformLoss: {
// `unit_loss_rate` is the packet loss rate for each unit time interval
// (kPacketLossTimeUnitMs). Since a packet loss event is generated if any
// of |block_duration_ms_ / kPacketLossTimeUnitMs| unit time intervals of
// a full packet duration is drawn with a loss, `unit_loss_rate` fulfills
// (1 - unit_loss_rate) ^ (block_duration_ms_ / kPacketLossTimeUnitMs) ==
// 1 - packet_loss_rate.
double unit_loss_rate =
(1.0 - std::pow(1.0 - 0.01 * packet_loss_rate_, 1.0 / units));
loss_model_.reset(new UniformLoss(unit_loss_rate));
break;
}
case kGilbertElliotLoss: {
// `FLAGS_burst_length` should be integer times of kPacketLossTimeUnitMs.
ASSERT_EQ(0, absl::GetFlag(FLAGS_burst_length) % kPacketLossTimeUnitMs);
// We do not allow 100 percent packet loss in Gilbert Elliot model, which
// makes no sense.
ASSERT_GT(100, packet_loss_rate_);
// To guarantee the overall packet loss rate, transition probabilities
// need to satisfy:
// pi_0 * (1 - prob_trans_01_) ^ units +
// pi_1 * prob_trans_10_ ^ (units - 1) == 1 - loss_rate
// pi_0 = prob_trans_10 / (prob_trans_10 + prob_trans_01_)
// is the stationary state probability of no-loss
// pi_1 = prob_trans_01_ / (prob_trans_10 + prob_trans_01_)
// is the stationary state probability of loss
// After a derivation prob_trans_00 should satisfy:
// prob_trans_00 ^ (units - 1) = (loss_rate - 1) / prob_trans_10 *
// prob_trans_00 + (1 - loss_rate) * (1 + 1 / prob_trans_10).
double loss_rate = 0.01f * packet_loss_rate_;
double prob_trans_10 =
1.0f * kPacketLossTimeUnitMs / absl::GetFlag(FLAGS_burst_length);
double prob_trans_00 = ProbTrans00Solver(units, loss_rate, prob_trans_10);
loss_model_.reset(
new GilbertElliotLoss(1.0f - prob_trans_10, 1.0f - prob_trans_00));
break;
}
case kFixedLoss: {
std::istringstream loss_events_stream(absl::GetFlag(FLAGS_loss_events));
std::string loss_event_string;
std::set<FixedLossEvent, FixedLossEventCmp> loss_events;
while (std::getline(loss_events_stream, loss_event_string, ',')) {
std::vector<int> loss_event_params;
std::istringstream loss_event_params_stream(loss_event_string);
std::copy(std::istream_iterator<int>(loss_event_params_stream),
std::istream_iterator<int>(),
std::back_inserter(loss_event_params));
RTC_CHECK_EQ(loss_event_params.size(), 2);
auto result = loss_events.insert(
FixedLossEvent(loss_event_params[0], loss_event_params[1]));
RTC_CHECK(result.second);
}
RTC_CHECK_GT(loss_events.size(), 0);
loss_model_.reset(new FixedLossModel(loss_events));
break;
}
default: {
loss_model_.reset(new NoLoss);
break;
}
}
// Make sure that the packet loss profile is same for all derived tests.
srand(kInitSeed);
}
std::ofstream& NetEqQualityTest::Log() {
return log_file_;
}
bool NetEqQualityTest::PacketLost() {
int cycles = block_duration_ms_ / kPacketLossTimeUnitMs;
// The loop is to make sure that codecs with different block lengths share the
// same packet loss profile.
bool lost = false;
for (int idx = 0; idx < cycles; idx++) {
if (loss_model_->Lost(decoded_time_ms_)) {
// The packet will be lost if any of the drawings indicates a loss, but
// the loop has to go on to make sure that codecs with different block
// lengths keep the same pace.
lost = true;
}
}
return lost;
}
int NetEqQualityTest::Transmit() {
int packet_input_time_ms = rtp_generator_->GetRtpHeader(
kPayloadType, in_size_samples_, &rtp_header_);
Log() << "Packet of size " << payload_size_bytes_ << " bytes, for frame at "
<< packet_input_time_ms << " ms ";
if (payload_size_bytes_ > 0) {
if (!PacketLost()) {
int ret = neteq_->InsertPacket(
rtp_header_,
rtc::ArrayView<const uint8_t>(payload_.data(), payload_size_bytes_));
if (ret != NetEq::kOK)
return -1;
Log() << "was sent.";
} else {
Log() << "was lost.";
}
}
Log() << std::endl;
return packet_input_time_ms;
}
int NetEqQualityTest::DecodeBlock() {
bool muted;
int ret = neteq_->GetAudio(&out_frame_, &muted);
RTC_CHECK(!muted);
if (ret != NetEq::kOK) {
return -1;
} else {
RTC_DCHECK_EQ(out_frame_.num_channels_, channels_);
RTC_DCHECK_EQ(out_frame_.samples_per_channel_,
static_cast<size_t>(kOutputSizeMs * out_sampling_khz_));
RTC_CHECK(output_->WriteArray(
out_frame_.data(),
out_frame_.samples_per_channel_ * out_frame_.num_channels_));
return static_cast<int>(out_frame_.samples_per_channel_);
}
}
void NetEqQualityTest::Simulate() {
int audio_size_samples;
bool end_of_input = false;
int runtime_ms = absl::GetFlag(FLAGS_runtime_ms) >= 0
? absl::GetFlag(FLAGS_runtime_ms)
: INT_MAX;
while (!end_of_input && decoded_time_ms_ < runtime_ms) {
// Preload the buffer if needed.
while (decodable_time_ms_ -
absl::GetFlag(FLAGS_preload_packets) * block_duration_ms_ <
decoded_time_ms_) {
if (!in_file_->Read(in_size_samples_ * channels_, &in_data_[0])) {
end_of_input = true;
ASSERT_TRUE(end_of_input && absl::GetFlag(FLAGS_runtime_ms) < 0);
break;
}
payload_.Clear();
payload_size_bytes_ = EncodeBlock(&in_data_[0], in_size_samples_,
&payload_, max_payload_bytes_);
total_payload_size_bytes_ += payload_size_bytes_;
decodable_time_ms_ = Transmit() + block_duration_ms_;
}
audio_size_samples = DecodeBlock();
if (audio_size_samples > 0) {
decoded_time_ms_ += audio_size_samples / out_sampling_khz_;
}
}
Log() << "Average bit rate was "
<< 8.0f * total_payload_size_bytes_ / absl::GetFlag(FLAGS_runtime_ms)
<< " kbps" << std::endl;
}
} // namespace test
} // namespace webrtc

View file

@ -0,0 +1,176 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_QUALITY_TEST_H_
#define MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_QUALITY_TEST_H_
#include <fstream>
#include <memory>
#include "api/audio_codecs/builtin_audio_decoder_factory.h"
#include "api/neteq/neteq.h"
#include "modules/audio_coding/neteq/tools/audio_sink.h"
#include "modules/audio_coding/neteq/tools/input_audio_file.h"
#include "modules/audio_coding/neteq/tools/rtp_generator.h"
#include "system_wrappers/include/clock.h"
#include "test/gtest.h"
namespace webrtc {
namespace test {
enum LossModes {
kNoLoss,
kUniformLoss,
kGilbertElliotLoss,
kFixedLoss,
kLastLossMode
};
class LossModel {
public:
virtual ~LossModel() {}
virtual bool Lost(int now_ms) = 0;
};
class NoLoss : public LossModel {
public:
bool Lost(int now_ms) override;
};
class UniformLoss : public LossModel {
public:
UniformLoss(double loss_rate);
bool Lost(int now_ms) override;
void set_loss_rate(double loss_rate) { loss_rate_ = loss_rate; }
private:
double loss_rate_;
};
class GilbertElliotLoss : public LossModel {
public:
GilbertElliotLoss(double prob_trans_11, double prob_trans_01);
~GilbertElliotLoss() override;
bool Lost(int now_ms) override;
private:
// Prob. of losing current packet, when previous packet is lost.
double prob_trans_11_;
// Prob. of losing current packet, when previous packet is not lost.
double prob_trans_01_;
bool lost_last_;
std::unique_ptr<UniformLoss> uniform_loss_model_;
};
struct FixedLossEvent {
int start_ms;
int duration_ms;
FixedLossEvent(int start_ms, int duration_ms)
: start_ms(start_ms), duration_ms(duration_ms) {}
};
struct FixedLossEventCmp {
bool operator()(const FixedLossEvent& l_event,
const FixedLossEvent& r_event) const {
return l_event.start_ms < r_event.start_ms;
}
};
class FixedLossModel : public LossModel {
public:
FixedLossModel(std::set<FixedLossEvent, FixedLossEventCmp> loss_events);
~FixedLossModel() override;
bool Lost(int now_ms) override;
private:
std::set<FixedLossEvent, FixedLossEventCmp> loss_events_;
std::set<FixedLossEvent, FixedLossEventCmp>::iterator loss_events_it_;
};
class NetEqQualityTest : public ::testing::Test {
protected:
NetEqQualityTest(
int block_duration_ms,
int in_sampling_khz,
int out_sampling_khz,
const SdpAudioFormat& format,
const rtc::scoped_refptr<AudioDecoderFactory>& decoder_factory =
webrtc::CreateBuiltinAudioDecoderFactory());
~NetEqQualityTest() override;
void SetUp() override;
// EncodeBlock(...) does the following:
// 1. encodes a block of audio, saved in `in_data` and has a length of
// `block_size_samples` (samples per channel),
// 2. save the bit stream to `payload` of `max_bytes` bytes in size,
// 3. returns the length of the payload (in bytes),
virtual int EncodeBlock(int16_t* in_data,
size_t block_size_samples,
rtc::Buffer* payload,
size_t max_bytes) = 0;
// PacketLost(...) determines weather a packet sent at an indicated time gets
// lost or not.
bool PacketLost();
// DecodeBlock() decodes a block of audio using the payload stored in
// `payload_` with the length of `payload_size_bytes_` (bytes). The decoded
// audio is to be stored in `out_data_`.
int DecodeBlock();
// Transmit() uses `rtp_generator_` to generate a packet and passes it to
// `neteq_`.
int Transmit();
// Runs encoding / transmitting / decoding.
void Simulate();
// Write to log file. Usage Log() << ...
std::ofstream& Log();
SdpAudioFormat audio_format_;
const size_t channels_;
private:
int decoded_time_ms_;
int decodable_time_ms_;
double drift_factor_;
int packet_loss_rate_;
const int block_duration_ms_;
const int in_sampling_khz_;
const int out_sampling_khz_;
// Number of samples per channel in a frame.
const size_t in_size_samples_;
size_t payload_size_bytes_;
size_t max_payload_bytes_;
std::unique_ptr<InputAudioFile> in_file_;
std::unique_ptr<AudioSink> output_;
std::ofstream log_file_;
std::unique_ptr<RtpGenerator> rtp_generator_;
std::unique_ptr<NetEq> neteq_;
std::unique_ptr<LossModel> loss_model_;
std::unique_ptr<int16_t[]> in_data_;
rtc::Buffer payload_;
AudioFrame out_frame_;
RTPHeader rtp_header_;
size_t total_payload_size_bytes_;
};
} // namespace test
} // namespace webrtc
#endif // MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_QUALITY_TEST_H_

View file

@ -0,0 +1,126 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_coding/neteq/tools/neteq_replacement_input.h"
#include "modules/audio_coding/neteq/tools/fake_decode_from_file.h"
#include "rtc_base/checks.h"
namespace webrtc {
namespace test {
NetEqReplacementInput::NetEqReplacementInput(
std::unique_ptr<NetEqInput> source,
uint8_t replacement_payload_type,
const std::set<uint8_t>& comfort_noise_types,
const std::set<uint8_t>& forbidden_types)
: source_(std::move(source)),
replacement_payload_type_(replacement_payload_type),
comfort_noise_types_(comfort_noise_types),
forbidden_types_(forbidden_types) {
RTC_CHECK(source_);
packet_ = source_->PopPacket();
ReplacePacket();
}
absl::optional<int64_t> NetEqReplacementInput::NextPacketTime() const {
return packet_
? absl::optional<int64_t>(static_cast<int64_t>(packet_->time_ms))
: absl::nullopt;
}
absl::optional<int64_t> NetEqReplacementInput::NextOutputEventTime() const {
return source_->NextOutputEventTime();
}
absl::optional<NetEqInput::SetMinimumDelayInfo>
NetEqReplacementInput::NextSetMinimumDelayInfo() const {
return source_->NextSetMinimumDelayInfo();
}
std::unique_ptr<NetEqInput::PacketData> NetEqReplacementInput::PopPacket() {
std::unique_ptr<PacketData> to_return = std::move(packet_);
while (true) {
packet_ = source_->PopPacket();
if (!packet_)
break;
if (packet_->payload.size() > packet_->header.paddingLength) {
// Not padding only. Good to go. Skip this packet otherwise.
break;
}
}
ReplacePacket();
return to_return;
}
void NetEqReplacementInput::AdvanceOutputEvent() {
source_->AdvanceOutputEvent();
}
void NetEqReplacementInput::AdvanceSetMinimumDelay() {
source_->AdvanceSetMinimumDelay();
}
bool NetEqReplacementInput::ended() const {
return source_->ended();
}
absl::optional<RTPHeader> NetEqReplacementInput::NextHeader() const {
return source_->NextHeader();
}
void NetEqReplacementInput::ReplacePacket() {
if (!source_->NextPacketTime()) {
// End of input. Cannot do proper replacement on the very last packet, so we
// delete it instead.
packet_.reset();
return;
}
RTC_DCHECK(packet_);
RTC_CHECK_EQ(forbidden_types_.count(packet_->header.payloadType), 0)
<< "Payload type " << static_cast<int>(packet_->header.payloadType)
<< " is forbidden.";
// Check if this packet is comfort noise.
if (comfort_noise_types_.count(packet_->header.payloadType) != 0) {
// If CNG, simply insert a zero-energy one-byte payload.
uint8_t cng_payload[1] = {127}; // Max attenuation of CNG.
packet_->payload.SetData(cng_payload);
return;
}
absl::optional<RTPHeader> next_hdr = source_->NextHeader();
RTC_DCHECK(next_hdr);
uint8_t payload[12];
RTC_DCHECK_LE(last_frame_size_timestamps_, 120 * 48);
uint32_t input_frame_size_timestamps = last_frame_size_timestamps_;
const uint32_t timestamp_diff =
next_hdr->timestamp - packet_->header.timestamp;
const bool opus_dtx = packet_->payload.size() <= 2;
if (next_hdr->sequenceNumber == packet_->header.sequenceNumber + 1 &&
timestamp_diff <= 120 * 48 && timestamp_diff > 0 && !opus_dtx) {
// Packets are in order and the timestamp diff is less than 5760 samples.
// Accept the timestamp diff as a valid frame size.
input_frame_size_timestamps = timestamp_diff;
last_frame_size_timestamps_ = input_frame_size_timestamps;
}
RTC_DCHECK_LE(input_frame_size_timestamps, 120 * 48);
FakeDecodeFromFile::PrepareEncoded(packet_->header.timestamp,
input_frame_size_timestamps,
packet_->payload.size(), payload);
packet_->payload.SetData(payload);
packet_->header.payloadType = replacement_payload_type_;
return;
}
} // namespace test
} // namespace webrtc

View file

@ -0,0 +1,53 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_REPLACEMENT_INPUT_H_
#define MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_REPLACEMENT_INPUT_H_
#include <memory>
#include <set>
#include "modules/audio_coding/neteq/tools/neteq_input.h"
namespace webrtc {
namespace test {
// This class converts the packets from a NetEqInput to fake encodings to be
// decoded by a FakeDecodeFromFile decoder.
class NetEqReplacementInput : public NetEqInput {
public:
NetEqReplacementInput(std::unique_ptr<NetEqInput> source,
uint8_t replacement_payload_type,
const std::set<uint8_t>& comfort_noise_types,
const std::set<uint8_t>& forbidden_types);
absl::optional<int64_t> NextPacketTime() const override;
absl::optional<int64_t> NextOutputEventTime() const override;
absl::optional<SetMinimumDelayInfo> NextSetMinimumDelayInfo() const override;
std::unique_ptr<PacketData> PopPacket() override;
void AdvanceOutputEvent() override;
void AdvanceSetMinimumDelay() override;
bool ended() const override;
absl::optional<RTPHeader> NextHeader() const override;
private:
void ReplacePacket();
std::unique_ptr<NetEqInput> source_;
const uint8_t replacement_payload_type_;
const std::set<uint8_t> comfort_noise_types_;
const std::set<uint8_t> forbidden_types_;
std::unique_ptr<PacketData> packet_; // The next packet to deliver.
uint32_t last_frame_size_timestamps_ = 960; // Initial guess: 20 ms @ 48 kHz.
};
} // namespace test
} // namespace webrtc
#endif // MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_REPLACEMENT_INPUT_H_

View file

@ -0,0 +1,109 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_coding/neteq/tools/neteq_rtp_dump_input.h"
#include "absl/strings/string_view.h"
#include "modules/audio_coding/neteq/tools/rtp_file_source.h"
namespace webrtc {
namespace test {
namespace {
// An adapter class to dress up a PacketSource object as a NetEqInput.
class NetEqRtpDumpInput : public NetEqInput {
public:
NetEqRtpDumpInput(absl::string_view file_name,
const std::map<int, RTPExtensionType>& hdr_ext_map,
absl::optional<uint32_t> ssrc_filter)
: source_(RtpFileSource::Create(file_name, ssrc_filter)) {
for (const auto& ext_pair : hdr_ext_map) {
source_->RegisterRtpHeaderExtension(ext_pair.second, ext_pair.first);
}
LoadNextPacket();
}
absl::optional<int64_t> NextOutputEventTime() const override {
return next_output_event_ms_;
}
absl::optional<SetMinimumDelayInfo> NextSetMinimumDelayInfo() const override {
return absl::nullopt;
}
void AdvanceOutputEvent() override {
if (next_output_event_ms_) {
*next_output_event_ms_ += kOutputPeriodMs;
}
if (!NextPacketTime()) {
next_output_event_ms_ = absl::nullopt;
}
}
void AdvanceSetMinimumDelay() override {}
absl::optional<int64_t> NextPacketTime() const override {
return packet_ ? absl::optional<int64_t>(
static_cast<int64_t>(packet_->time_ms()))
: absl::nullopt;
}
std::unique_ptr<PacketData> PopPacket() override {
if (!packet_) {
return std::unique_ptr<PacketData>();
}
std::unique_ptr<PacketData> packet_data(new PacketData);
packet_data->header = packet_->header();
if (packet_->payload_length_bytes() == 0 &&
packet_->virtual_payload_length_bytes() > 0) {
// This is a header-only "dummy" packet. Set the payload to all zeros,
// with length according to the virtual length.
packet_data->payload.SetSize(packet_->virtual_payload_length_bytes());
std::fill_n(packet_data->payload.data(), packet_data->payload.size(), 0);
} else {
packet_data->payload.SetData(packet_->payload(),
packet_->payload_length_bytes());
}
packet_data->time_ms = packet_->time_ms();
LoadNextPacket();
return packet_data;
}
absl::optional<RTPHeader> NextHeader() const override {
return packet_ ? absl::optional<RTPHeader>(packet_->header())
: absl::nullopt;
}
bool ended() const override { return !next_output_event_ms_; }
private:
void LoadNextPacket() { packet_ = source_->NextPacket(); }
absl::optional<int64_t> next_output_event_ms_ = 0;
static constexpr int64_t kOutputPeriodMs = 10;
std::unique_ptr<RtpFileSource> source_;
std::unique_ptr<Packet> packet_;
};
} // namespace
std::unique_ptr<NetEqInput> CreateNetEqRtpDumpInput(
absl::string_view file_name,
const std::map<int, RTPExtensionType>& hdr_ext_map,
absl::optional<uint32_t> ssrc_filter) {
return std::make_unique<NetEqRtpDumpInput>(file_name, hdr_ext_map,
ssrc_filter);
}
} // namespace test
} // namespace webrtc

View file

@ -0,0 +1,32 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_RTP_DUMP_INPUT_H_
#define MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_RTP_DUMP_INPUT_H_
#include <map>
#include <memory>
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "modules/audio_coding/neteq/tools/neteq_input.h"
#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
namespace webrtc {
namespace test {
std::unique_ptr<NetEqInput> CreateNetEqRtpDumpInput(
absl::string_view file_name,
const std::map<int, RTPExtensionType>& hdr_ext_map,
absl::optional<uint32_t> ssrc_filter);
} // namespace test
} // namespace webrtc
#endif // MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_RTP_DUMP_INPUT_H_

View file

@ -0,0 +1,406 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <iostream>
#include <string>
#include "absl/flags/flag.h"
#include "absl/flags/parse.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "modules/audio_coding/neteq/tools/neteq_test.h"
#include "modules/audio_coding/neteq/tools/neteq_test_factory.h"
#include "rtc_base/strings/string_builder.h"
#include "system_wrappers/include/field_trial.h"
#include "test/field_trial.h"
using TestConfig = webrtc::test::NetEqTestFactory::Config;
ABSL_FLAG(bool,
codec_map,
false,
"Prints the mapping between RTP payload type and "
"codec");
ABSL_FLAG(std::string,
force_fieldtrials,
"",
"Field trials control experimental feature code which can be forced. "
"E.g. running with --force_fieldtrials=WebRTC-FooFeature/Enable/"
" will assign the group Enable to field trial WebRTC-FooFeature.");
ABSL_FLAG(int, pcmu, TestConfig::default_pcmu(), "RTP payload type for PCM-u");
ABSL_FLAG(int, pcma, TestConfig::default_pcma(), "RTP payload type for PCM-a");
ABSL_FLAG(int, ilbc, TestConfig::default_ilbc(), "RTP payload type for iLBC");
ABSL_FLAG(int, isac, TestConfig::default_isac(), "RTP payload type for iSAC");
ABSL_FLAG(int,
isac_swb,
TestConfig::default_isac_swb(),
"RTP payload type for iSAC-swb (32 kHz)");
ABSL_FLAG(int, opus, TestConfig::default_opus(), "RTP payload type for Opus");
ABSL_FLAG(int,
pcm16b,
TestConfig::default_pcm16b(),
"RTP payload type for PCM16b-nb (8 kHz)");
ABSL_FLAG(int,
pcm16b_wb,
TestConfig::default_pcm16b_wb(),
"RTP payload type for PCM16b-wb (16 kHz)");
ABSL_FLAG(int,
pcm16b_swb32,
TestConfig::default_pcm16b_swb32(),
"RTP payload type for PCM16b-swb32 (32 kHz)");
ABSL_FLAG(int,
pcm16b_swb48,
TestConfig::default_pcm16b_swb48(),
"RTP payload type for PCM16b-swb48 (48 kHz)");
ABSL_FLAG(int, g722, TestConfig::default_g722(), "RTP payload type for G.722");
ABSL_FLAG(int,
avt,
TestConfig::default_avt(),
"RTP payload type for AVT/DTMF (8 kHz)");
ABSL_FLAG(int,
avt_16,
TestConfig::default_avt_16(),
"RTP payload type for AVT/DTMF (16 kHz)");
ABSL_FLAG(int,
avt_32,
TestConfig::default_avt_32(),
"RTP payload type for AVT/DTMF (32 kHz)");
ABSL_FLAG(int,
avt_48,
TestConfig::default_avt_48(),
"RTP payload type for AVT/DTMF (48 kHz)");
ABSL_FLAG(int,
red,
TestConfig::default_red(),
"RTP payload type for redundant audio (RED)");
ABSL_FLAG(int,
cn_nb,
TestConfig::default_cn_nb(),
"RTP payload type for comfort noise (8 kHz)");
ABSL_FLAG(int,
cn_wb,
TestConfig::default_cn_wb(),
"RTP payload type for comfort noise (16 kHz)");
ABSL_FLAG(int,
cn_swb32,
TestConfig::default_cn_swb32(),
"RTP payload type for comfort noise (32 kHz)");
ABSL_FLAG(int,
cn_swb48,
TestConfig::default_cn_swb48(),
"RTP payload type for comfort noise (48 kHz)");
ABSL_FLAG(std::string,
replacement_audio_file,
"",
"A PCM file that will be used to populate dummy"
" RTP packets");
ABSL_FLAG(std::string,
ssrc,
"",
"Only use packets with this SSRC (decimal or hex, the latter "
"starting with 0x)");
ABSL_FLAG(int,
audio_level,
TestConfig::default_audio_level(),
"Extension ID for audio level (RFC 6464)");
ABSL_FLAG(int,
abs_send_time,
TestConfig::default_abs_send_time(),
"Extension ID for absolute sender time");
ABSL_FLAG(int,
transport_seq_no,
TestConfig::default_transport_seq_no(),
"Extension ID for transport sequence number");
ABSL_FLAG(int,
video_content_type,
TestConfig::default_video_content_type(),
"Extension ID for video content type");
ABSL_FLAG(int,
video_timing,
TestConfig::default_video_timing(),
"Extension ID for video timing");
ABSL_FLAG(std::string,
output_files_base_name,
"",
"Custom path used as prefix for the output files - i.e., "
"matlab plot, python plot, text log.");
ABSL_FLAG(bool,
matlabplot,
false,
"Generates a matlab script for plotting the delay profile");
ABSL_FLAG(bool,
pythonplot,
false,
"Generates a python script for plotting the delay profile");
ABSL_FLAG(bool,
textlog,
false,
"Generates a text log describing the simulation on a "
"step-by-step basis.");
ABSL_FLAG(bool, concealment_events, false, "Prints concealment events");
ABSL_FLAG(int,
max_nr_packets_in_buffer,
TestConfig::default_max_nr_packets_in_buffer(),
"Maximum allowed number of packets in the buffer");
ABSL_FLAG(bool,
enable_fast_accelerate,
false,
"Enables jitter buffer fast accelerate");
namespace {
// Parses the input string for a valid SSRC (at the start of the string). If a
// valid SSRC is found, it is written to the output variable `ssrc`, and true is
// returned. Otherwise, false is returned.
bool ParseSsrc(absl::string_view str, uint32_t* ssrc) {
if (str.empty())
return true;
int base = 10;
// Look for "0x" or "0X" at the start and change base to 16 if found.
if ((str.compare(0, 2, "0x") == 0) || (str.compare(0, 2, "0X") == 0))
base = 16;
errno = 0;
char* end_ptr;
std::string str_str = std::string(str);
unsigned long value = strtoul(str_str.c_str(), &end_ptr, base); // NOLINT
if (value == ULONG_MAX && errno == ERANGE)
return false; // Value out of range for unsigned long.
if (sizeof(unsigned long) > sizeof(uint32_t) && value > 0xFFFFFFFF) // NOLINT
return false; // Value out of range for uint32_t.
if (end_ptr - str_str.c_str() < static_cast<ptrdiff_t>(str.length()))
return false; // Part of the string was not parsed.
*ssrc = static_cast<uint32_t>(value);
return true;
}
static bool ValidateExtensionId(int value) {
if (value > 0 && value <= 255) // Value is ok.
return true;
printf("Extension ID must be between 1 and 255, not %d\n",
static_cast<int>(value));
return false;
}
// Flag validators.
bool ValidatePayloadType(int value) {
if (value >= 0 && value <= 127) // Value is ok.
return true;
printf("Payload type must be between 0 and 127, not %d\n",
static_cast<int>(value));
return false;
}
bool ValidateSsrcValue(absl::string_view str) {
uint32_t dummy_ssrc;
if (ParseSsrc(str, &dummy_ssrc)) // Value is ok.
return true;
printf("Invalid SSRC: %.*s\n", static_cast<int>(str.size()), str.data());
return false;
}
void PrintCodecMappingEntry(absl::string_view codec, int flag) {
std::cout << codec << ": " << flag << std::endl;
}
void PrintCodecMapping() {
PrintCodecMappingEntry("PCM-u", absl::GetFlag(FLAGS_pcmu));
PrintCodecMappingEntry("PCM-a", absl::GetFlag(FLAGS_pcma));
PrintCodecMappingEntry("iLBC", absl::GetFlag(FLAGS_ilbc));
PrintCodecMappingEntry("iSAC", absl::GetFlag(FLAGS_isac));
PrintCodecMappingEntry("iSAC-swb (32 kHz)", absl::GetFlag(FLAGS_isac_swb));
PrintCodecMappingEntry("Opus", absl::GetFlag(FLAGS_opus));
PrintCodecMappingEntry("PCM16b-nb (8 kHz)", absl::GetFlag(FLAGS_pcm16b));
PrintCodecMappingEntry("PCM16b-wb (16 kHz)", absl::GetFlag(FLAGS_pcm16b_wb));
PrintCodecMappingEntry("PCM16b-swb32 (32 kHz)",
absl::GetFlag(FLAGS_pcm16b_swb32));
PrintCodecMappingEntry("PCM16b-swb48 (48 kHz)",
absl::GetFlag(FLAGS_pcm16b_swb48));
PrintCodecMappingEntry("G.722", absl::GetFlag(FLAGS_g722));
PrintCodecMappingEntry("AVT/DTMF (8 kHz)", absl::GetFlag(FLAGS_avt));
PrintCodecMappingEntry("AVT/DTMF (16 kHz)", absl::GetFlag(FLAGS_avt_16));
PrintCodecMappingEntry("AVT/DTMF (32 kHz)", absl::GetFlag(FLAGS_avt_32));
PrintCodecMappingEntry("AVT/DTMF (48 kHz)", absl::GetFlag(FLAGS_avt_48));
PrintCodecMappingEntry("redundant audio (RED)", absl::GetFlag(FLAGS_red));
PrintCodecMappingEntry("comfort noise (8 kHz)", absl::GetFlag(FLAGS_cn_nb));
PrintCodecMappingEntry("comfort noise (16 kHz)", absl::GetFlag(FLAGS_cn_wb));
PrintCodecMappingEntry("comfort noise (32 kHz)",
absl::GetFlag(FLAGS_cn_swb32));
PrintCodecMappingEntry("comfort noise (48 kHz)",
absl::GetFlag(FLAGS_cn_swb48));
}
bool ValidateOutputFilesOptions(bool textlog,
bool plotting,
absl::string_view output_files_base_name,
absl::string_view output_audio_filename) {
bool output_files_base_name_specified = !output_files_base_name.empty();
if (!textlog && !plotting && output_files_base_name_specified) {
std::cout << "Error: --output_files_base_name cannot be used without at "
"least one of the following flags: --textlog, --matlabplot, "
"--pythonplot."
<< std::endl;
return false;
}
// Without `output_audio_filename`, `output_files_base_name` is required when
// plotting output files must be generated (in order to form a valid output
// file name).
if (output_audio_filename.empty() && plotting &&
!output_files_base_name_specified) {
std::cout << "Error: when no output audio file is specified and "
"--matlabplot and/or --pythonplot are used, "
"--output_files_base_name must be also used."
<< std::endl;
return false;
}
return true;
}
absl::optional<std::string> CreateOptionalOutputFileName(
bool output_requested,
absl::string_view basename,
absl::string_view output_audio_filename,
absl::string_view suffix) {
if (!output_requested) {
return absl::nullopt;
}
if (!basename.empty()) {
// Override the automatic assignment.
rtc::StringBuilder sb(basename);
sb << suffix;
return sb.str();
}
if (!output_audio_filename.empty()) {
// Automatically assign name.
rtc::StringBuilder sb(output_audio_filename);
sb << suffix;
return sb.str();
}
std::cout << "Error: invalid text log file parameters.";
return absl::nullopt;
}
} // namespace
int main(int argc, char* argv[]) {
std::vector<char*> args = absl::ParseCommandLine(argc, argv);
webrtc::test::NetEqTestFactory factory;
std::string usage =
"Tool for decoding an RTP dump file using NetEq.\n"
"Example usage:\n"
"./neteq_rtpplay input.rtp [output.{pcm, wav}]\n";
if (absl::GetFlag(FLAGS_codec_map)) {
PrintCodecMapping();
exit(0);
}
if (args.size() != 2 &&
args.size() != 3) { // The output audio file is optional.
// Print usage information.
std::cout << usage;
exit(0);
}
const std::string output_audio_filename((args.size() == 3) ? args[2] : "");
const std::string output_files_base_name(
absl::GetFlag(FLAGS_output_files_base_name));
RTC_CHECK(ValidateOutputFilesOptions(
absl::GetFlag(FLAGS_textlog),
absl::GetFlag(FLAGS_matlabplot) || absl::GetFlag(FLAGS_pythonplot),
output_files_base_name, output_audio_filename));
RTC_CHECK(ValidatePayloadType(absl::GetFlag(FLAGS_pcmu)));
RTC_CHECK(ValidatePayloadType(absl::GetFlag(FLAGS_pcma)));
RTC_CHECK(ValidatePayloadType(absl::GetFlag(FLAGS_ilbc)));
RTC_CHECK(ValidatePayloadType(absl::GetFlag(FLAGS_isac)));
RTC_CHECK(ValidatePayloadType(absl::GetFlag(FLAGS_isac_swb)));
RTC_CHECK(ValidatePayloadType(absl::GetFlag(FLAGS_opus)));
RTC_CHECK(ValidatePayloadType(absl::GetFlag(FLAGS_pcm16b)));
RTC_CHECK(ValidatePayloadType(absl::GetFlag(FLAGS_pcm16b_wb)));
RTC_CHECK(ValidatePayloadType(absl::GetFlag(FLAGS_pcm16b_swb32)));
RTC_CHECK(ValidatePayloadType(absl::GetFlag(FLAGS_pcm16b_swb48)));
RTC_CHECK(ValidatePayloadType(absl::GetFlag(FLAGS_g722)));
RTC_CHECK(ValidatePayloadType(absl::GetFlag(FLAGS_avt)));
RTC_CHECK(ValidatePayloadType(absl::GetFlag(FLAGS_avt_16)));
RTC_CHECK(ValidatePayloadType(absl::GetFlag(FLAGS_avt_32)));
RTC_CHECK(ValidatePayloadType(absl::GetFlag(FLAGS_avt_48)));
RTC_CHECK(ValidatePayloadType(absl::GetFlag(FLAGS_red)));
RTC_CHECK(ValidatePayloadType(absl::GetFlag(FLAGS_cn_nb)));
RTC_CHECK(ValidatePayloadType(absl::GetFlag(FLAGS_cn_wb)));
RTC_CHECK(ValidatePayloadType(absl::GetFlag(FLAGS_cn_swb32)));
RTC_CHECK(ValidatePayloadType(absl::GetFlag(FLAGS_cn_swb48)));
RTC_CHECK(ValidateSsrcValue(absl::GetFlag(FLAGS_ssrc)));
RTC_CHECK(ValidateExtensionId(absl::GetFlag(FLAGS_audio_level)));
RTC_CHECK(ValidateExtensionId(absl::GetFlag(FLAGS_abs_send_time)));
RTC_CHECK(ValidateExtensionId(absl::GetFlag(FLAGS_transport_seq_no)));
RTC_CHECK(ValidateExtensionId(absl::GetFlag(FLAGS_video_content_type)));
RTC_CHECK(ValidateExtensionId(absl::GetFlag(FLAGS_video_timing)));
// Make force_fieldtrials persistent string during entire program live as
// absl::GetFlag creates temporary string and c_str() will point to
// deallocated string.
const std::string force_fieldtrials = absl::GetFlag(FLAGS_force_fieldtrials);
webrtc::field_trial::InitFieldTrialsFromString(force_fieldtrials.c_str());
webrtc::test::NetEqTestFactory::Config config;
config.pcmu = absl::GetFlag(FLAGS_pcmu);
config.pcma = absl::GetFlag(FLAGS_pcma);
config.ilbc = absl::GetFlag(FLAGS_ilbc);
config.isac = absl::GetFlag(FLAGS_isac);
config.isac_swb = absl::GetFlag(FLAGS_isac_swb);
config.opus = absl::GetFlag(FLAGS_opus);
config.pcm16b = absl::GetFlag(FLAGS_pcm16b);
config.pcm16b_wb = absl::GetFlag(FLAGS_pcm16b_wb);
config.pcm16b_swb32 = absl::GetFlag(FLAGS_pcm16b_swb32);
config.pcm16b_swb48 = absl::GetFlag(FLAGS_pcm16b_swb48);
config.g722 = absl::GetFlag(FLAGS_g722);
config.avt = absl::GetFlag(FLAGS_avt);
config.avt_16 = absl::GetFlag(FLAGS_avt_16);
config.avt_32 = absl::GetFlag(FLAGS_avt_32);
config.avt_48 = absl::GetFlag(FLAGS_avt_48);
config.red = absl::GetFlag(FLAGS_red);
config.cn_nb = absl::GetFlag(FLAGS_cn_nb);
config.cn_wb = absl::GetFlag(FLAGS_cn_wb);
config.cn_swb32 = absl::GetFlag(FLAGS_cn_swb32);
config.cn_swb48 = absl::GetFlag(FLAGS_cn_swb48);
config.replacement_audio_file = absl::GetFlag(FLAGS_replacement_audio_file);
config.audio_level = absl::GetFlag(FLAGS_audio_level);
config.abs_send_time = absl::GetFlag(FLAGS_abs_send_time);
config.transport_seq_no = absl::GetFlag(FLAGS_transport_seq_no);
config.video_content_type = absl::GetFlag(FLAGS_video_content_type);
config.video_timing = absl::GetFlag(FLAGS_video_timing);
config.matlabplot = absl::GetFlag(FLAGS_matlabplot);
config.pythonplot = absl::GetFlag(FLAGS_pythonplot);
config.concealment_events = absl::GetFlag(FLAGS_concealment_events);
config.max_nr_packets_in_buffer =
absl::GetFlag(FLAGS_max_nr_packets_in_buffer);
config.enable_fast_accelerate = absl::GetFlag(FLAGS_enable_fast_accelerate);
if (!output_audio_filename.empty()) {
config.output_audio_filename = output_audio_filename;
}
config.textlog = absl::GetFlag(FLAGS_textlog);
config.textlog_filename = CreateOptionalOutputFileName(
absl::GetFlag(FLAGS_textlog), output_files_base_name,
output_audio_filename, ".text_log.txt");
config.plot_scripts_basename = CreateOptionalOutputFileName(
absl::GetFlag(FLAGS_matlabplot) || absl::GetFlag(FLAGS_pythonplot),
output_files_base_name, output_audio_filename, "");
// Check if an SSRC value was provided.
if (absl::GetFlag(FLAGS_ssrc).size() > 0) {
uint32_t ssrc;
RTC_CHECK(ParseSsrc(absl::GetFlag(FLAGS_ssrc), &ssrc))
<< "Flag verification has failed.";
config.ssrc_filter = absl::make_optional(ssrc);
}
std::unique_ptr<webrtc::test::NetEqTest> test =
factory.InitializeTestFromFile(/*input_filename=*/args[1],
/*factory=*/nullptr, config);
RTC_CHECK(test) << "ERROR: Unable to run test";
test->Run();
return 0;
}

View file

@ -0,0 +1,183 @@
#!/bin/bash
#
# Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
#
# Aliases.
BIN=$1
TEST_RTC_EVENT_LOG=$2
INPUT_PCM_FILE=$3
# Check setup.
if [ ! -f $BIN ]; then
echo "Cannot find neteq_rtpplay binary."
exit 99
fi
if [ ! -f $TEST_RTC_EVENT_LOG ]; then
echo "Cannot find RTC event log file."
exit 99
fi
if [ ! -f $INPUT_PCM_FILE ]; then
echo "Cannot find PCM file."
exit 99
fi
# Defines.
TMP_DIR=$(mktemp -d /tmp/tmp_XXXXXXXXXX)
PASS=0
FAIL=1
TEST_SUITE_RESULT=$PASS
file_hash () {
md5sum $1 | awk '{ print $1 }'
}
test_passed () {
echo PASS
}
test_failed () {
echo "FAIL: $1"
TEST_SUITE_RESULT=$FAIL
}
test_file_checksums_match () {
if [ ! -f $1 ] || [ ! -f $2 ]; then
test_failed "Cannot compare hash values: file(s) not found."
return
fi
HASH1=$(file_hash $1)
HASH2=$(file_hash $2)
if [ "$HASH1" = "$HASH2" ]; then
test_passed
else
test_failed "$1 differs from $2"
fi
}
test_file_exists () {
if [ -f $1 ]; then
test_passed
else
test_failed "$1 does not exist"
fi
}
test_exit_code_0 () {
if [ $1 -eq 0 ]; then
test_passed
else
test_failed "$1 did not return 0"
fi
}
test_exit_code_not_0 () {
if [ $1 -eq 0 ]; then
test_failed "$1 returned 0"
else
test_passed
fi
}
# Generate test data.
# Case 1. Pre-existing way.
CASE1_WAV=$TMP_DIR/case1.wav
$BIN $TEST_RTC_EVENT_LOG $CASE1_WAV \
--replacement_audio_file $INPUT_PCM_FILE \
--textlog --pythonplot --matlabplot \
> $TMP_DIR/case1.stdout 2> /dev/null
CASE1_RETURN_CODE=$?
CASE1_TEXTLOG=$TMP_DIR/case1.wav.text_log.txt
CASE1_PYPLOT=$TMP_DIR/case1_wav.py
CASE1_MATPLOT=$TMP_DIR/case1_wav.m
# Case 2. No output files.
$BIN $TEST_RTC_EVENT_LOG --replacement_audio_file $INPUT_PCM_FILE \
> $TMP_DIR/case2.stdout 2> /dev/null
CASE2_RETURN_CODE=$?
# Case 3. No output audio file.
# Case 3.1 Without --output_files_base_name (won't run).
$BIN $TEST_RTC_EVENT_LOG \
--replacement_audio_file $INPUT_PCM_FILE \
--textlog --pythonplot --matlabplot \
&> /dev/null
CASE3_1_RETURN_CODE=$?
# Case 3.2 With --output_files_base_name (runs).
$BIN $TEST_RTC_EVENT_LOG \
--replacement_audio_file $INPUT_PCM_FILE \
--output_files_base_name $TMP_DIR/case3_2 \
--textlog --pythonplot --matlabplot \
> $TMP_DIR/case3_2.stdout 2> /dev/null
CASE3_2_RETURN_CODE=$?
CASE3_2_TEXTLOG=$TMP_DIR/case3_2.text_log.txt
CASE3_2_PYPLOT=$TMP_DIR/case3_2.py
CASE3_2_MATPLOT=$TMP_DIR/case3_2.m
# Case 4. With output audio file and --output_files_base_name.
CASE4_WAV=$TMP_DIR/case4.wav
$BIN $TEST_RTC_EVENT_LOG $TMP_DIR/case4.wav \
--replacement_audio_file $INPUT_PCM_FILE \
--output_files_base_name $TMP_DIR/case4 \
--textlog --pythonplot --matlabplot \
> $TMP_DIR/case4.stdout 2> /dev/null
CASE4_RETURN_CODE=$?
CASE4_TEXTLOG=$TMP_DIR/case4.text_log.txt
CASE4_PYPLOT=$TMP_DIR/case4.py
CASE4_MATPLOT=$TMP_DIR/case4.m
# Tests.
echo Check exit codes
test_exit_code_0 $CASE1_RETURN_CODE
test_exit_code_0 $CASE2_RETURN_CODE
test_exit_code_not_0 $CASE3_1_RETURN_CODE
test_exit_code_0 $CASE3_2_RETURN_CODE
test_exit_code_0 $CASE4_RETURN_CODE
echo Check that the expected output files exist
test_file_exists $CASE1_TEXTLOG
test_file_exists $CASE3_2_TEXTLOG
test_file_exists $CASE4_TEXTLOG
test_file_exists $CASE1_PYPLOT
test_file_exists $CASE3_2_PYPLOT
test_file_exists $CASE4_PYPLOT
test_file_exists $CASE1_MATPLOT
test_file_exists $CASE3_2_MATPLOT
test_file_exists $CASE4_MATPLOT
echo Check that the same WAV file is produced
test_file_checksums_match $CASE1_WAV $CASE4_WAV
echo Check that the same text log is produced
test_file_checksums_match $CASE1_TEXTLOG $CASE3_2_TEXTLOG
test_file_checksums_match $CASE1_TEXTLOG $CASE4_TEXTLOG
echo Check that the same python plot scripts is produced
test_file_checksums_match $CASE1_PYPLOT $CASE3_2_PYPLOT
test_file_checksums_match $CASE1_PYPLOT $CASE4_PYPLOT
echo Check that the same matlab plot scripts is produced
test_file_checksums_match $CASE1_MATPLOT $CASE3_2_MATPLOT
test_file_checksums_match $CASE1_MATPLOT $CASE4_MATPLOT
# Clean up
rm -fr $TMP_DIR
if [ $TEST_SUITE_RESULT -eq $PASS ]; then
echo All tests passed.
exit 0
else
echo One or more tests failed.
exit 1
fi

View file

@ -0,0 +1,139 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_coding/neteq/tools/neteq_stats_getter.h"
#include <algorithm>
#include <numeric>
#include <utility>
#include "rtc_base/checks.h"
#include "rtc_base/strings/string_builder.h"
#include "rtc_base/time_utils.h"
namespace webrtc {
namespace test {
std::string NetEqStatsGetter::ConcealmentEvent::ToString() const {
char ss_buf[256];
rtc::SimpleStringBuilder ss(ss_buf);
ss << "ConcealmentEvent duration_ms:" << duration_ms
<< " event_number:" << concealment_event_number
<< " time_from_previous_event_end_ms:" << time_from_previous_event_end_ms;
return ss.str();
}
NetEqStatsGetter::NetEqStatsGetter(
std::unique_ptr<NetEqDelayAnalyzer> delay_analyzer)
: delay_analyzer_(std::move(delay_analyzer)) {}
void NetEqStatsGetter::BeforeGetAudio(NetEq* neteq) {
if (delay_analyzer_) {
delay_analyzer_->BeforeGetAudio(neteq);
}
}
void NetEqStatsGetter::AfterGetAudio(int64_t time_now_ms,
const AudioFrame& audio_frame,
bool muted,
NetEq* neteq) {
// TODO(minyue): Get stats should better not be called as a call back after
// get audio. It is called independently from get audio in practice.
const auto lifetime_stat = neteq->GetLifetimeStatistics();
if (last_stats_query_time_ms_ == 0 ||
rtc::TimeDiff(time_now_ms, last_stats_query_time_ms_) >=
stats_query_interval_ms_) {
NetEqNetworkStatistics stats;
RTC_CHECK_EQ(neteq->NetworkStatistics(&stats), 0);
stats_.push_back(std::make_pair(time_now_ms, stats));
lifetime_stats_.push_back(std::make_pair(time_now_ms, lifetime_stat));
last_stats_query_time_ms_ = time_now_ms;
}
const auto voice_concealed_samples =
lifetime_stat.concealed_samples - lifetime_stat.silent_concealed_samples;
if (current_concealment_event_ != lifetime_stat.concealment_events &&
voice_concealed_samples_until_last_event_ < voice_concealed_samples) {
if (last_event_end_time_ms_ > 0) {
// Do not account for the first event to avoid start of the call
// skewing.
ConcealmentEvent concealment_event;
uint64_t last_event_voice_concealed_samples =
voice_concealed_samples - voice_concealed_samples_until_last_event_;
RTC_CHECK_GT(last_event_voice_concealed_samples, 0);
concealment_event.duration_ms = last_event_voice_concealed_samples /
(audio_frame.sample_rate_hz_ / 1000);
concealment_event.concealment_event_number = current_concealment_event_;
concealment_event.time_from_previous_event_end_ms =
time_now_ms - last_event_end_time_ms_;
concealment_events_.emplace_back(concealment_event);
voice_concealed_samples_until_last_event_ = voice_concealed_samples;
}
last_event_end_time_ms_ = time_now_ms;
voice_concealed_samples_until_last_event_ = voice_concealed_samples;
current_concealment_event_ = lifetime_stat.concealment_events;
}
if (delay_analyzer_) {
delay_analyzer_->AfterGetAudio(time_now_ms, audio_frame, muted, neteq);
}
}
double NetEqStatsGetter::AverageSpeechExpandRate() const {
double sum_speech_expand = std::accumulate(
stats_.begin(), stats_.end(), double{0.0},
[](double a, std::pair<int64_t, NetEqNetworkStatistics> b) {
return a + static_cast<double>(b.second.speech_expand_rate);
});
return sum_speech_expand / 16384.0 / stats_.size();
}
NetEqStatsGetter::Stats NetEqStatsGetter::AverageStats() const {
Stats sum_stats = std::accumulate(
stats_.begin(), stats_.end(), Stats(),
[](Stats a, std::pair<int64_t, NetEqNetworkStatistics> bb) {
const auto& b = bb.second;
a.current_buffer_size_ms += b.current_buffer_size_ms;
a.preferred_buffer_size_ms += b.preferred_buffer_size_ms;
a.jitter_peaks_found += b.jitter_peaks_found;
a.expand_rate += b.expand_rate / 16384.0;
a.speech_expand_rate += b.speech_expand_rate / 16384.0;
a.preemptive_rate += b.preemptive_rate / 16384.0;
a.accelerate_rate += b.accelerate_rate / 16384.0;
a.secondary_decoded_rate += b.secondary_decoded_rate / 16384.0;
a.secondary_discarded_rate += b.secondary_discarded_rate / 16384.0;
a.mean_waiting_time_ms += b.mean_waiting_time_ms;
a.median_waiting_time_ms += b.median_waiting_time_ms;
a.min_waiting_time_ms = std::min(
a.min_waiting_time_ms, static_cast<double>(b.min_waiting_time_ms));
a.max_waiting_time_ms = std::max(
a.max_waiting_time_ms, static_cast<double>(b.max_waiting_time_ms));
return a;
});
sum_stats.current_buffer_size_ms /= stats_.size();
sum_stats.preferred_buffer_size_ms /= stats_.size();
sum_stats.jitter_peaks_found /= stats_.size();
sum_stats.packet_loss_rate /= stats_.size();
sum_stats.expand_rate /= stats_.size();
sum_stats.speech_expand_rate /= stats_.size();
sum_stats.preemptive_rate /= stats_.size();
sum_stats.accelerate_rate /= stats_.size();
sum_stats.secondary_decoded_rate /= stats_.size();
sum_stats.secondary_discarded_rate /= stats_.size();
sum_stats.added_zero_samples /= stats_.size();
sum_stats.mean_waiting_time_ms /= stats_.size();
sum_stats.median_waiting_time_ms /= stats_.size();
return sum_stats;
}
} // namespace test
} // namespace webrtc

View file

@ -0,0 +1,106 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_STATS_GETTER_H_
#define MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_STATS_GETTER_H_
#include <memory>
#include <string>
#include <vector>
#include "modules/audio_coding/neteq/tools/neteq_delay_analyzer.h"
#include "modules/audio_coding/neteq/tools/neteq_test.h"
namespace webrtc {
namespace test {
class NetEqStatsGetter : public NetEqGetAudioCallback {
public:
// This struct is a replica of webrtc::NetEqNetworkStatistics, but with all
// values stored in double precision.
struct Stats {
double current_buffer_size_ms = 0.0;
double preferred_buffer_size_ms = 0.0;
double jitter_peaks_found = 0.0;
double packet_loss_rate = 0.0;
double expand_rate = 0.0;
double speech_expand_rate = 0.0;
double preemptive_rate = 0.0;
double accelerate_rate = 0.0;
double secondary_decoded_rate = 0.0;
double secondary_discarded_rate = 0.0;
double clockdrift_ppm = 0.0;
double added_zero_samples = 0.0;
double mean_waiting_time_ms = 0.0;
double median_waiting_time_ms = 0.0;
double min_waiting_time_ms = 0.0;
double max_waiting_time_ms = 0.0;
};
struct ConcealmentEvent {
uint64_t duration_ms;
size_t concealment_event_number;
int64_t time_from_previous_event_end_ms;
std::string ToString() const;
};
// Takes a pointer to another callback object, which will be invoked after
// this object finishes. This does not transfer ownership, and null is a
// valid value.
explicit NetEqStatsGetter(std::unique_ptr<NetEqDelayAnalyzer> delay_analyzer);
void set_stats_query_interval_ms(int64_t stats_query_interval_ms) {
stats_query_interval_ms_ = stats_query_interval_ms;
}
void BeforeGetAudio(NetEq* neteq) override;
void AfterGetAudio(int64_t time_now_ms,
const AudioFrame& audio_frame,
bool muted,
NetEq* neteq) override;
double AverageSpeechExpandRate() const;
NetEqDelayAnalyzer* delay_analyzer() const { return delay_analyzer_.get(); }
const std::vector<ConcealmentEvent>& concealment_events() const {
// Do not account for the last concealment event to avoid potential end
// call skewing.
return concealment_events_;
}
const std::vector<std::pair<int64_t, NetEqNetworkStatistics>>* stats() const {
return &stats_;
}
const std::vector<std::pair<int64_t, NetEqLifetimeStatistics>>*
lifetime_stats() const {
return &lifetime_stats_;
}
Stats AverageStats() const;
private:
std::unique_ptr<NetEqDelayAnalyzer> delay_analyzer_;
int64_t stats_query_interval_ms_ = 1000;
int64_t last_stats_query_time_ms_ = 0;
std::vector<std::pair<int64_t, NetEqNetworkStatistics>> stats_;
std::vector<std::pair<int64_t, NetEqLifetimeStatistics>> lifetime_stats_;
size_t current_concealment_event_ = 1;
uint64_t voice_concealed_samples_until_last_event_ = 0;
std::vector<ConcealmentEvent> concealment_events_;
int64_t last_event_end_time_ms_ = 0;
};
} // namespace test
} // namespace webrtc
#endif // MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_STATS_GETTER_H_

View file

@ -0,0 +1,113 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_coding/neteq/tools/neteq_stats_plotter.h"
#include <inttypes.h>
#include <stdio.h>
#include <utility>
#include "absl/strings/string_view.h"
namespace webrtc {
namespace test {
NetEqStatsPlotter::NetEqStatsPlotter(bool make_matlab_plot,
bool make_python_plot,
bool show_concealment_events,
absl::string_view base_file_name)
: make_matlab_plot_(make_matlab_plot),
make_python_plot_(make_python_plot),
show_concealment_events_(show_concealment_events),
base_file_name_(base_file_name) {
std::unique_ptr<NetEqDelayAnalyzer> delay_analyzer;
if (make_matlab_plot || make_python_plot) {
delay_analyzer.reset(new NetEqDelayAnalyzer);
}
stats_getter_.reset(new NetEqStatsGetter(std::move(delay_analyzer)));
}
void NetEqStatsPlotter::SimulationEnded(int64_t simulation_time_ms) {
if (make_matlab_plot_) {
auto matlab_script_name = base_file_name_;
std::replace(matlab_script_name.begin(), matlab_script_name.end(), '.',
'_');
printf("Creating Matlab plot script %s.m\n", matlab_script_name.c_str());
stats_getter_->delay_analyzer()->CreateMatlabScript(matlab_script_name +
".m");
}
if (make_python_plot_) {
auto python_script_name = base_file_name_;
std::replace(python_script_name.begin(), python_script_name.end(), '.',
'_');
printf("Creating Python plot script %s.py\n", python_script_name.c_str());
stats_getter_->delay_analyzer()->CreatePythonScript(python_script_name +
".py");
}
printf("Simulation statistics:\n");
printf(" output duration: %" PRId64 " ms\n", simulation_time_ms);
auto stats = stats_getter_->AverageStats();
printf(" packet_loss_rate: %f %%\n", 100.0 * stats.packet_loss_rate);
printf(" expand_rate: %f %%\n", 100.0 * stats.expand_rate);
printf(" speech_expand_rate: %f %%\n", 100.0 * stats.speech_expand_rate);
printf(" preemptive_rate: %f %%\n", 100.0 * stats.preemptive_rate);
printf(" accelerate_rate: %f %%\n", 100.0 * stats.accelerate_rate);
printf(" secondary_decoded_rate: %f %%\n",
100.0 * stats.secondary_decoded_rate);
printf(" secondary_discarded_rate: %f %%\n",
100.0 * stats.secondary_discarded_rate);
printf(" clockdrift_ppm: %f ppm\n", stats.clockdrift_ppm);
printf(" mean_waiting_time_ms: %f ms\n", stats.mean_waiting_time_ms);
printf(" median_waiting_time_ms: %f ms\n", stats.median_waiting_time_ms);
printf(" min_waiting_time_ms: %f ms\n", stats.min_waiting_time_ms);
printf(" max_waiting_time_ms: %f ms\n", stats.max_waiting_time_ms);
printf(" current_buffer_size_ms: %f ms\n", stats.current_buffer_size_ms);
printf(" preferred_buffer_size_ms: %f ms\n", stats.preferred_buffer_size_ms);
if (show_concealment_events_) {
printf(" concealment_events_ms:\n");
for (auto concealment_event : stats_getter_->concealment_events())
printf("%s\n", concealment_event.ToString().c_str());
printf(" end of concealment_events_ms\n");
}
const auto lifetime_stats_vector = stats_getter_->lifetime_stats();
if (!lifetime_stats_vector->empty()) {
auto lifetime_stats = lifetime_stats_vector->back().second;
printf(" total_samples_received: %" PRIu64 "\n",
lifetime_stats.total_samples_received);
printf(" concealed_samples: %" PRIu64 "\n",
lifetime_stats.concealed_samples);
printf(" concealment_events: %" PRIu64 "\n",
lifetime_stats.concealment_events);
printf(" delayed_packet_outage_samples: %" PRIu64 "\n",
lifetime_stats.delayed_packet_outage_samples);
printf(" delayed_packet_outage_events: %" PRIu64 "\n",
lifetime_stats.delayed_packet_outage_events);
printf(" num_interruptions: %d\n", lifetime_stats.interruption_count);
printf(" sum_interruption_length_ms: %d ms\n",
lifetime_stats.total_interruption_duration_ms);
printf(" interruption_ratio: %f\n",
static_cast<double>(lifetime_stats.total_interruption_duration_ms) /
simulation_time_ms);
printf(" removed_samples_for_acceleration: %" PRIu64 "\n",
lifetime_stats.removed_samples_for_acceleration);
printf(" inserted_samples_for_deceleration: %" PRIu64 "\n",
lifetime_stats.inserted_samples_for_deceleration);
printf(" generated_noise_samples: %" PRIu64 "\n",
lifetime_stats.generated_noise_samples);
printf(" packets_discarded: %" PRIu64 "\n",
lifetime_stats.packets_discarded);
}
}
} // namespace test
} // namespace webrtc

View file

@ -0,0 +1,47 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_STATS_PLOTTER_H_
#define MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_STATS_PLOTTER_H_
#include <memory>
#include <string>
#include "absl/strings/string_view.h"
#include "modules/audio_coding/neteq/tools/neteq_delay_analyzer.h"
#include "modules/audio_coding/neteq/tools/neteq_stats_getter.h"
#include "modules/audio_coding/neteq/tools/neteq_test.h"
namespace webrtc {
namespace test {
class NetEqStatsPlotter : public NetEqSimulationEndedCallback {
public:
NetEqStatsPlotter(bool make_matlab_plot,
bool make_python_plot,
bool show_concealment_events,
absl::string_view base_file_name);
void SimulationEnded(int64_t simulation_time_ms) override;
NetEqStatsGetter* stats_getter() { return stats_getter_.get(); }
private:
std::unique_ptr<NetEqStatsGetter> stats_getter_;
const bool make_matlab_plot_;
const bool make_python_plot_;
const bool show_concealment_events_;
const std::string base_file_name_;
};
} // namespace test
} // namespace webrtc
#endif // MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_STATS_PLOTTER_H_

View file

@ -0,0 +1,341 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_coding/neteq/tools/neteq_test.h"
#include <iomanip>
#include <iostream>
#include "modules/audio_coding/neteq/default_neteq_factory.h"
#include "modules/rtp_rtcp/source/byte_io.h"
#include "system_wrappers/include/clock.h"
namespace webrtc {
namespace test {
namespace {
absl::optional<NetEq::Operation> ActionToOperations(
absl::optional<NetEqSimulator::Action> a) {
if (!a) {
return absl::nullopt;
}
switch (*a) {
case NetEqSimulator::Action::kAccelerate:
return absl::make_optional(NetEq::Operation::kAccelerate);
case NetEqSimulator::Action::kExpand:
return absl::make_optional(NetEq::Operation::kExpand);
case NetEqSimulator::Action::kNormal:
return absl::make_optional(NetEq::Operation::kNormal);
case NetEqSimulator::Action::kPreemptiveExpand:
return absl::make_optional(NetEq::Operation::kPreemptiveExpand);
}
}
std::unique_ptr<NetEq> CreateNetEq(
const NetEq::Config& config,
Clock* clock,
const rtc::scoped_refptr<AudioDecoderFactory>& decoder_factory) {
return DefaultNetEqFactory().CreateNetEq(config, decoder_factory, clock);
}
} // namespace
void DefaultNetEqTestErrorCallback::OnInsertPacketError(
const NetEqInput::PacketData& packet) {
std::cerr << "InsertPacket returned an error." << std::endl;
std::cerr << "Packet data: " << packet.ToString() << std::endl;
RTC_FATAL();
}
void DefaultNetEqTestErrorCallback::OnGetAudioError() {
std::cerr << "GetAudio returned an error." << std::endl;
RTC_FATAL();
}
NetEqTest::NetEqTest(const NetEq::Config& config,
rtc::scoped_refptr<AudioDecoderFactory> decoder_factory,
const DecoderMap& codecs,
std::unique_ptr<std::ofstream> text_log,
NetEqFactory* neteq_factory,
std::unique_ptr<NetEqInput> input,
std::unique_ptr<AudioSink> output,
Callbacks callbacks)
: input_(std::move(input)),
clock_(Timestamp::Millis(input_->NextEventTime().value_or(0))),
neteq_(neteq_factory
? neteq_factory->CreateNetEq(config, decoder_factory, &clock_)
: CreateNetEq(config, &clock_, decoder_factory)),
output_(std::move(output)),
callbacks_(callbacks),
sample_rate_hz_(config.sample_rate_hz),
text_log_(std::move(text_log)) {
RTC_CHECK(!config.enable_muted_state)
<< "The code does not handle enable_muted_state";
RegisterDecoders(codecs);
}
NetEqTest::~NetEqTest() = default;
int64_t NetEqTest::Run() {
int64_t simulation_time = 0;
SimulationStepResult step_result;
do {
step_result = RunToNextGetAudio();
simulation_time += step_result.simulation_step_ms;
} while (!step_result.is_simulation_finished);
if (callbacks_.simulation_ended_callback) {
callbacks_.simulation_ended_callback->SimulationEnded(simulation_time);
}
return simulation_time;
}
NetEqTest::SimulationStepResult NetEqTest::RunToNextGetAudio() {
SimulationStepResult result;
const int64_t start_time_ms = *input_->NextEventTime();
int64_t time_now_ms = clock_.CurrentTime().ms();
current_state_.packet_iat_ms.clear();
while (!input_->ended()) {
// Advance time to next event.
RTC_DCHECK(input_->NextEventTime());
clock_.AdvanceTimeMilliseconds(*input_->NextEventTime() - time_now_ms);
time_now_ms = *input_->NextEventTime();
// Check if it is time to insert packet.
if (input_->NextPacketTime() && time_now_ms >= *input_->NextPacketTime()) {
std::unique_ptr<NetEqInput::PacketData> packet_data = input_->PopPacket();
RTC_CHECK(packet_data);
const size_t payload_data_length =
packet_data->payload.size() - packet_data->header.paddingLength;
if (payload_data_length != 0) {
int error = neteq_->InsertPacket(
packet_data->header,
rtc::ArrayView<const uint8_t>(packet_data->payload));
if (error != NetEq::kOK && callbacks_.error_callback) {
callbacks_.error_callback->OnInsertPacketError(*packet_data);
}
if (callbacks_.post_insert_packet) {
callbacks_.post_insert_packet->AfterInsertPacket(*packet_data,
neteq_.get());
}
} else {
neteq_->InsertEmptyPacket(packet_data->header);
}
if (last_packet_time_ms_) {
current_state_.packet_iat_ms.push_back(time_now_ms -
*last_packet_time_ms_);
}
if (text_log_) {
const auto ops_state = neteq_->GetOperationsAndState();
const auto delta_wallclock =
last_packet_time_ms_ ? (time_now_ms - *last_packet_time_ms_) : -1;
const auto delta_timestamp =
last_packet_timestamp_
? (static_cast<int64_t>(packet_data->header.timestamp) -
*last_packet_timestamp_) *
1000 / sample_rate_hz_
: -1;
const auto packet_size_bytes =
packet_data->payload.size() == 12
? ByteReader<uint32_t>::ReadLittleEndian(
&packet_data->payload[8])
: -1;
*text_log_ << "Packet - wallclock: " << std::setw(5) << time_now_ms
<< ", delta wc: " << std::setw(4) << delta_wallclock
<< ", seq_no: " << packet_data->header.sequenceNumber
<< ", timestamp: " << std::setw(10)
<< packet_data->header.timestamp
<< ", delta ts: " << std::setw(4) << delta_timestamp
<< ", size: " << std::setw(5) << packet_size_bytes
<< ", frame size: " << std::setw(3)
<< ops_state.current_frame_size_ms
<< ", buffer size: " << std::setw(4)
<< ops_state.current_buffer_size_ms << std::endl;
}
last_packet_time_ms_ = absl::make_optional<int>(time_now_ms);
last_packet_timestamp_ =
absl::make_optional<uint32_t>(packet_data->header.timestamp);
}
if (input_->NextSetMinimumDelayInfo().has_value() &&
time_now_ms >= input_->NextSetMinimumDelayInfo().value().timestamp_ms) {
neteq_->SetBaseMinimumDelayMs(
input_->NextSetMinimumDelayInfo().value().delay_ms);
input_->AdvanceSetMinimumDelay();
}
// Check if it is time to get output audio.
if (input_->NextOutputEventTime() &&
time_now_ms >= *input_->NextOutputEventTime()) {
if (callbacks_.get_audio_callback) {
callbacks_.get_audio_callback->BeforeGetAudio(neteq_.get());
}
AudioFrame out_frame;
bool muted;
int error = neteq_->GetAudio(&out_frame, &muted, nullptr,
ActionToOperations(next_action_));
next_action_ = absl::nullopt;
RTC_CHECK(!muted) << "The code does not handle enable_muted_state";
if (error != NetEq::kOK) {
if (callbacks_.error_callback) {
callbacks_.error_callback->OnGetAudioError();
}
} else {
sample_rate_hz_ = out_frame.sample_rate_hz_;
}
if (callbacks_.get_audio_callback) {
callbacks_.get_audio_callback->AfterGetAudio(time_now_ms, out_frame,
muted, neteq_.get());
}
if (output_) {
RTC_CHECK(output_->WriteArray(
out_frame.data(),
out_frame.samples_per_channel_ * out_frame.num_channels_));
}
input_->AdvanceOutputEvent();
result.simulation_step_ms =
input_->NextEventTime().value_or(time_now_ms) - start_time_ms;
const auto operations_state = neteq_->GetOperationsAndState();
current_state_.current_delay_ms = operations_state.current_buffer_size_ms;
current_state_.packet_size_ms = operations_state.current_frame_size_ms;
current_state_.next_packet_available =
operations_state.next_packet_available;
current_state_.packet_buffer_flushed =
operations_state.packet_buffer_flushes >
prev_ops_state_.packet_buffer_flushes;
// TODO(ivoc): Add more accurate reporting by tracking the origin of
// samples in the sync buffer.
result.action_times_ms[Action::kExpand] = 0;
result.action_times_ms[Action::kAccelerate] = 0;
result.action_times_ms[Action::kPreemptiveExpand] = 0;
result.action_times_ms[Action::kNormal] = 0;
if (out_frame.speech_type_ == AudioFrame::SpeechType::kPLC ||
out_frame.speech_type_ == AudioFrame::SpeechType::kPLCCNG) {
// Consider the whole frame to be the result of expansion.
result.action_times_ms[Action::kExpand] = 10;
} else if (operations_state.accelerate_samples -
prev_ops_state_.accelerate_samples >
0) {
// Consider the whole frame to be the result of acceleration.
result.action_times_ms[Action::kAccelerate] = 10;
} else if (operations_state.preemptive_samples -
prev_ops_state_.preemptive_samples >
0) {
// Consider the whole frame to be the result of preemptive expansion.
result.action_times_ms[Action::kPreemptiveExpand] = 10;
} else {
// Consider the whole frame to be the result of normal playout.
result.action_times_ms[Action::kNormal] = 10;
}
auto lifetime_stats = LifetimeStats();
if (text_log_) {
const bool plc =
(out_frame.speech_type_ == AudioFrame::SpeechType::kPLC) ||
(out_frame.speech_type_ == AudioFrame::SpeechType::kPLCCNG);
const bool cng = out_frame.speech_type_ == AudioFrame::SpeechType::kCNG;
const bool voice_concealed =
(lifetime_stats.concealed_samples -
lifetime_stats.silent_concealed_samples) >
(prev_lifetime_stats_.concealed_samples -
prev_lifetime_stats_.silent_concealed_samples);
*text_log_ << "GetAudio - wallclock: " << std::setw(5) << time_now_ms
<< ", delta wc: " << std::setw(4)
<< (input_->NextEventTime().value_or(time_now_ms) -
start_time_ms)
<< ", CNG: " << cng << ", PLC: " << plc
<< ", voice concealed: " << voice_concealed
<< ", buffer size: " << std::setw(4)
<< current_state_.current_delay_ms << std::endl;
if (lifetime_stats.packets_discarded >
prev_lifetime_stats_.packets_discarded) {
*text_log_ << "Discarded "
<< (lifetime_stats.packets_discarded -
prev_lifetime_stats_.packets_discarded)
<< " primary packets." << std::endl;
}
if (operations_state.packet_buffer_flushes >
prev_ops_state_.packet_buffer_flushes) {
*text_log_ << "Flushed packet buffer "
<< (operations_state.packet_buffer_flushes -
prev_ops_state_.packet_buffer_flushes)
<< " times." << std::endl;
}
}
prev_lifetime_stats_ = lifetime_stats;
const bool no_more_packets_to_decode =
!input_->NextPacketTime() && !operations_state.next_packet_available;
result.is_simulation_finished =
no_more_packets_to_decode || input_->ended();
prev_ops_state_ = operations_state;
return result;
}
}
result.simulation_step_ms =
input_->NextEventTime().value_or(time_now_ms) - start_time_ms;
result.is_simulation_finished = true;
return result;
}
void NetEqTest::SetNextAction(NetEqTest::Action next_operation) {
next_action_ = absl::optional<Action>(next_operation);
}
NetEqTest::NetEqState NetEqTest::GetNetEqState() {
return current_state_;
}
NetEqNetworkStatistics NetEqTest::SimulationStats() {
NetEqNetworkStatistics stats;
RTC_CHECK_EQ(neteq_->NetworkStatistics(&stats), 0);
return stats;
}
NetEqLifetimeStatistics NetEqTest::LifetimeStats() const {
return neteq_->GetLifetimeStatistics();
}
NetEqTest::DecoderMap NetEqTest::StandardDecoderMap() {
DecoderMap codecs = {{0, SdpAudioFormat("pcmu", 8000, 1)},
{8, SdpAudioFormat("pcma", 8000, 1)},
#ifdef WEBRTC_CODEC_ILBC
{102, SdpAudioFormat("ilbc", 8000, 1)},
#endif
#ifdef WEBRTC_CODEC_OPUS
{111, SdpAudioFormat("opus", 48000, 2)},
#endif
{93, SdpAudioFormat("l16", 8000, 1)},
{94, SdpAudioFormat("l16", 16000, 1)},
{95, SdpAudioFormat("l16", 32000, 1)},
{96, SdpAudioFormat("l16", 48000, 1)},
{9, SdpAudioFormat("g722", 8000, 1)},
{106, SdpAudioFormat("telephone-event", 8000, 1)},
{114, SdpAudioFormat("telephone-event", 16000, 1)},
{115, SdpAudioFormat("telephone-event", 32000, 1)},
{116, SdpAudioFormat("telephone-event", 48000, 1)},
{117, SdpAudioFormat("red", 8000, 1)},
{13, SdpAudioFormat("cn", 8000, 1)},
{98, SdpAudioFormat("cn", 16000, 1)},
{99, SdpAudioFormat("cn", 32000, 1)},
{100, SdpAudioFormat("cn", 48000, 1)}};
return codecs;
}
void NetEqTest::RegisterDecoders(const DecoderMap& codecs) {
for (const auto& c : codecs) {
RTC_CHECK(neteq_->RegisterPayloadType(c.first, c.second))
<< "Cannot register " << c.second.name << " to payload type "
<< c.first;
}
}
} // namespace test
} // namespace webrtc

View file

@ -0,0 +1,129 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_TEST_H_
#define MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_TEST_H_
#include <fstream>
#include <map>
#include <memory>
#include <string>
#include <utility>
#include "absl/types/optional.h"
#include "api/audio_codecs/audio_decoder_factory.h"
#include "api/neteq/neteq.h"
#include "api/neteq/neteq_factory.h"
#include "api/test/neteq_simulator.h"
#include "modules/audio_coding/neteq/tools/audio_sink.h"
#include "modules/audio_coding/neteq/tools/neteq_input.h"
#include "system_wrappers/include/clock.h"
namespace webrtc {
namespace test {
class NetEqTestErrorCallback {
public:
virtual ~NetEqTestErrorCallback() = default;
virtual void OnInsertPacketError(const NetEqInput::PacketData& packet) {}
virtual void OnGetAudioError() {}
};
class DefaultNetEqTestErrorCallback : public NetEqTestErrorCallback {
void OnInsertPacketError(const NetEqInput::PacketData& packet) override;
void OnGetAudioError() override;
};
class NetEqPostInsertPacket {
public:
virtual ~NetEqPostInsertPacket() = default;
virtual void AfterInsertPacket(const NetEqInput::PacketData& packet,
NetEq* neteq) = 0;
};
class NetEqGetAudioCallback {
public:
virtual ~NetEqGetAudioCallback() = default;
virtual void BeforeGetAudio(NetEq* neteq) = 0;
virtual void AfterGetAudio(int64_t time_now_ms,
const AudioFrame& audio_frame,
bool muted,
NetEq* neteq) = 0;
};
class NetEqSimulationEndedCallback {
public:
virtual ~NetEqSimulationEndedCallback() = default;
virtual void SimulationEnded(int64_t simulation_time_ms) = 0;
};
// Class that provides an input--output test for NetEq. The input (both packets
// and output events) is provided by a NetEqInput object, while the output is
// directed to an AudioSink object.
class NetEqTest : public NetEqSimulator {
public:
using DecoderMap = std::map<int, SdpAudioFormat>;
struct Callbacks {
NetEqTestErrorCallback* error_callback = nullptr;
NetEqPostInsertPacket* post_insert_packet = nullptr;
NetEqGetAudioCallback* get_audio_callback = nullptr;
NetEqSimulationEndedCallback* simulation_ended_callback = nullptr;
};
// Sets up the test with given configuration, codec mappings, input, ouput,
// and callback objects for error reporting.
NetEqTest(const NetEq::Config& config,
rtc::scoped_refptr<AudioDecoderFactory> decoder_factory,
const DecoderMap& codecs,
std::unique_ptr<std::ofstream> text_log,
NetEqFactory* neteq_factory,
std::unique_ptr<NetEqInput> input,
std::unique_ptr<AudioSink> output,
Callbacks callbacks);
~NetEqTest() override;
// Runs the test. Returns the duration of the produced audio in ms.
int64_t Run() override;
// Runs the simulation until we hit the next GetAudio event. If the simulation
// is finished, is_simulation_finished will be set to true in the returned
// SimulationStepResult.
SimulationStepResult RunToNextGetAudio() override;
void SetNextAction(Action next_operation) override;
NetEqState GetNetEqState() override;
// Returns the statistics from NetEq.
NetEqNetworkStatistics SimulationStats();
NetEqLifetimeStatistics LifetimeStats() const;
static DecoderMap StandardDecoderMap();
private:
void RegisterDecoders(const DecoderMap& codecs);
std::unique_ptr<NetEqInput> input_;
SimulatedClock clock_;
absl::optional<Action> next_action_;
absl::optional<int> last_packet_time_ms_;
std::unique_ptr<NetEq> neteq_;
std::unique_ptr<AudioSink> output_;
Callbacks callbacks_;
int sample_rate_hz_;
NetEqState current_state_;
NetEqOperationsAndState prev_ops_state_;
NetEqLifetimeStatistics prev_lifetime_stats_;
absl::optional<uint32_t> last_packet_timestamp_;
std::unique_ptr<std::ofstream> text_log_;
};
} // namespace test
} // namespace webrtc
#endif // MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_TEST_H_

View file

@ -0,0 +1,354 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_coding/neteq/tools/neteq_test_factory.h"
#include <errno.h>
#include <limits.h> // For ULONG_MAX returned by strtoul.
#include <stdio.h>
#include <stdlib.h> // For strtoul.
#include <fstream>
#include <iostream>
#include <memory>
#include <set>
#include <string>
#include <utility>
#include "absl/strings/string_view.h"
#include "api/audio_codecs/builtin_audio_decoder_factory.h"
#include "api/neteq/neteq.h"
#include "modules/audio_coding/neteq/tools/audio_sink.h"
#include "modules/audio_coding/neteq/tools/fake_decode_from_file.h"
#include "modules/audio_coding/neteq/tools/initial_packet_inserter_neteq_input.h"
#include "modules/audio_coding/neteq/tools/input_audio_file.h"
#include "modules/audio_coding/neteq/tools/neteq_delay_analyzer.h"
#include "modules/audio_coding/neteq/tools/neteq_event_log_input.h"
#include "modules/audio_coding/neteq/tools/neteq_replacement_input.h"
#include "modules/audio_coding/neteq/tools/neteq_rtp_dump_input.h"
#include "modules/audio_coding/neteq/tools/neteq_stats_getter.h"
#include "modules/audio_coding/neteq/tools/neteq_stats_plotter.h"
#include "modules/audio_coding/neteq/tools/neteq_test.h"
#include "modules/audio_coding/neteq/tools/output_audio_file.h"
#include "modules/audio_coding/neteq/tools/output_wav_file.h"
#include "modules/audio_coding/neteq/tools/rtp_file_source.h"
#include "rtc_base/checks.h"
#include "test/function_audio_decoder_factory.h"
#include "test/testsupport/file_utils.h"
namespace webrtc {
namespace test {
namespace {
absl::optional<int> CodecSampleRate(
uint8_t payload_type,
webrtc::test::NetEqTestFactory::Config config) {
if (payload_type == config.pcmu || payload_type == config.pcma ||
payload_type == config.ilbc || payload_type == config.pcm16b ||
payload_type == config.cn_nb || payload_type == config.avt)
return 8000;
if (payload_type == config.isac || payload_type == config.pcm16b_wb ||
payload_type == config.g722 || payload_type == config.cn_wb ||
payload_type == config.avt_16)
return 16000;
if (payload_type == config.isac_swb || payload_type == config.pcm16b_swb32 ||
payload_type == config.cn_swb32 || payload_type == config.avt_32)
return 32000;
if (payload_type == config.opus || payload_type == config.pcm16b_swb48 ||
payload_type == config.cn_swb48 || payload_type == config.avt_48)
return 48000;
if (payload_type == config.red)
return 0;
return absl::nullopt;
}
} // namespace
// A callback class which prints whenver the inserted packet stream changes
// the SSRC.
class SsrcSwitchDetector : public NetEqPostInsertPacket {
public:
// Takes a pointer to another callback object, which will be invoked after
// this object finishes. This does not transfer ownership, and null is a
// valid value.
explicit SsrcSwitchDetector(NetEqPostInsertPacket* other_callback)
: other_callback_(other_callback) {}
void AfterInsertPacket(const NetEqInput::PacketData& packet,
NetEq* neteq) override {
if (last_ssrc_ && packet.header.ssrc != *last_ssrc_) {
std::cout << "Changing streams from 0x" << std::hex << *last_ssrc_
<< " to 0x" << std::hex << packet.header.ssrc << std::dec
<< " (payload type "
<< static_cast<int>(packet.header.payloadType) << ")"
<< std::endl;
}
last_ssrc_ = packet.header.ssrc;
if (other_callback_) {
other_callback_->AfterInsertPacket(packet, neteq);
}
}
private:
NetEqPostInsertPacket* other_callback_;
absl::optional<uint32_t> last_ssrc_;
};
NetEqTestFactory::NetEqTestFactory() = default;
NetEqTestFactory::~NetEqTestFactory() = default;
NetEqTestFactory::Config::Config() = default;
NetEqTestFactory::Config::Config(const Config& other) = default;
NetEqTestFactory::Config::~Config() = default;
std::unique_ptr<NetEqTest> NetEqTestFactory::InitializeTestFromString(
absl::string_view input_string,
NetEqFactory* factory,
const Config& config) {
ParsedRtcEventLog parsed_log;
auto status = parsed_log.ParseString(input_string);
if (!status.ok()) {
std::cerr << "Failed to parse event log: " << status.message() << std::endl;
return nullptr;
}
std::unique_ptr<NetEqInput> input =
CreateNetEqEventLogInput(parsed_log, config.ssrc_filter);
if (!input) {
std::cerr << "Error: Cannot parse input string" << std::endl;
return nullptr;
}
return InitializeTest(std::move(input), factory, config);
}
std::unique_ptr<NetEqTest> NetEqTestFactory::InitializeTestFromFile(
absl::string_view input_file_name,
NetEqFactory* factory,
const Config& config) {
// Gather RTP header extensions in a map.
std::map<int, RTPExtensionType> rtp_ext_map = {
{config.audio_level, kRtpExtensionAudioLevel},
{config.abs_send_time, kRtpExtensionAbsoluteSendTime},
{config.transport_seq_no, kRtpExtensionTransportSequenceNumber},
{config.video_content_type, kRtpExtensionVideoContentType},
{config.video_timing, kRtpExtensionVideoTiming}};
std::unique_ptr<NetEqInput> input;
if (RtpFileSource::ValidRtpDump(input_file_name) ||
RtpFileSource::ValidPcap(input_file_name)) {
input = CreateNetEqRtpDumpInput(input_file_name, rtp_ext_map,
config.ssrc_filter);
} else {
ParsedRtcEventLog parsed_log;
auto status = parsed_log.ParseFile(input_file_name);
if (!status.ok()) {
std::cerr << "Failed to parse event log: " << status.message()
<< std::endl;
return nullptr;
}
input = CreateNetEqEventLogInput(parsed_log, config.ssrc_filter);
}
std::cout << "Input file: " << input_file_name << std::endl;
if (!input) {
std::cerr << "Error: Cannot open input file" << std::endl;
return nullptr;
}
return InitializeTest(std::move(input), factory, config);
}
std::unique_ptr<NetEqTest> NetEqTestFactory::InitializeTest(
std::unique_ptr<NetEqInput> input,
NetEqFactory* factory,
const Config& config) {
if (input->ended()) {
std::cerr << "Error: Input is empty" << std::endl;
return nullptr;
}
if (!config.field_trial_string.empty()) {
field_trials_ =
std::make_unique<ScopedFieldTrials>(config.field_trial_string);
}
// Skip some initial events/packets if requested.
if (config.skip_get_audio_events > 0) {
std::cout << "Skipping " << config.skip_get_audio_events
<< " get_audio events" << std::endl;
if (!input->NextPacketTime() || !input->NextOutputEventTime()) {
std::cerr << "No events found" << std::endl;
return nullptr;
}
for (int i = 0; i < config.skip_get_audio_events; i++) {
input->AdvanceOutputEvent();
if (!input->NextOutputEventTime()) {
std::cerr << "Not enough get_audio events found" << std::endl;
return nullptr;
}
}
while (*input->NextPacketTime() < *input->NextOutputEventTime()) {
input->PopPacket();
if (!input->NextPacketTime()) {
std::cerr << "Not enough incoming packets found" << std::endl;
return nullptr;
}
}
}
// Check the sample rate.
absl::optional<int> sample_rate_hz;
std::set<std::pair<int, uint32_t>> discarded_pt_and_ssrc;
while (absl::optional<RTPHeader> first_rtp_header = input->NextHeader()) {
RTC_DCHECK(first_rtp_header);
sample_rate_hz = CodecSampleRate(first_rtp_header->payloadType, config);
if (sample_rate_hz) {
std::cout << "Found valid packet with payload type "
<< static_cast<int>(first_rtp_header->payloadType)
<< " and SSRC 0x" << std::hex << first_rtp_header->ssrc
<< std::dec << std::endl;
if (config.initial_dummy_packets > 0) {
std::cout << "Nr of initial dummy packets: "
<< config.initial_dummy_packets << std::endl;
input = std::make_unique<InitialPacketInserterNetEqInput>(
std::move(input), config.initial_dummy_packets, *sample_rate_hz);
}
break;
}
// Discard this packet and move to the next. Keep track of discarded payload
// types and SSRCs.
discarded_pt_and_ssrc.emplace(first_rtp_header->payloadType,
first_rtp_header->ssrc);
input->PopPacket();
}
if (!discarded_pt_and_ssrc.empty()) {
std::cout << "Discarded initial packets with the following payload types "
"and SSRCs:"
<< std::endl;
for (const auto& d : discarded_pt_and_ssrc) {
std::cout << "PT " << d.first << "; SSRC 0x" << std::hex
<< static_cast<int>(d.second) << std::dec << std::endl;
}
}
if (!sample_rate_hz) {
std::cerr << "Cannot find any packets with known payload types"
<< std::endl;
return nullptr;
}
// If an output file is requested, open it.
std::unique_ptr<AudioSink> output;
if (!config.output_audio_filename.has_value()) {
output = std::make_unique<VoidAudioSink>();
std::cout << "No output audio file" << std::endl;
} else if (config.output_audio_filename->size() >= 4 &&
config.output_audio_filename->substr(
config.output_audio_filename->size() - 4) == ".wav") {
// Open a wav file with the known sample rate.
output = std::make_unique<OutputWavFile>(*config.output_audio_filename,
*sample_rate_hz);
std::cout << "Output WAV file: " << *config.output_audio_filename
<< std::endl;
} else {
// Open a pcm file.
output = std::make_unique<OutputAudioFile>(*config.output_audio_filename);
std::cout << "Output PCM file: " << *config.output_audio_filename
<< std::endl;
}
NetEqTest::DecoderMap codecs = NetEqTest::StandardDecoderMap();
rtc::scoped_refptr<AudioDecoderFactory> decoder_factory =
CreateBuiltinAudioDecoderFactory();
// Check if a replacement audio file was provided.
if (config.replacement_audio_file.size() > 0) {
// Find largest unused payload type.
int replacement_pt = 127;
while (codecs.find(replacement_pt) != codecs.end()) {
--replacement_pt;
if (replacement_pt <= 0) {
std::cerr << "Error: Unable to find available replacement payload type"
<< std::endl;
return nullptr;
}
}
auto std_set_int32_to_uint8 = [](const std::set<int32_t>& a) {
std::set<uint8_t> b;
for (auto& x : a) {
b.insert(static_cast<uint8_t>(x));
}
return b;
};
std::set<uint8_t> cn_types = std_set_int32_to_uint8(
{config.cn_nb, config.cn_wb, config.cn_swb32, config.cn_swb48});
std::set<uint8_t> forbidden_types =
std_set_int32_to_uint8({config.g722, config.red, config.avt,
config.avt_16, config.avt_32, config.avt_48});
input.reset(new NetEqReplacementInput(std::move(input), replacement_pt,
cn_types, forbidden_types));
// Note that capture-by-copy implies that the lambda captures the value of
// decoder_factory before it's reassigned on the left-hand side.
decoder_factory = rtc::make_ref_counted<FunctionAudioDecoderFactory>(
[decoder_factory, config](
const SdpAudioFormat& format,
absl::optional<AudioCodecPairId> codec_pair_id) {
std::unique_ptr<AudioDecoder> decoder =
decoder_factory->MakeAudioDecoder(format, codec_pair_id);
if (!decoder && format.name == "replacement") {
decoder = std::make_unique<FakeDecodeFromFile>(
std::make_unique<InputAudioFile>(config.replacement_audio_file),
format.clockrate_hz, format.num_channels > 1);
}
return decoder;
});
if (!codecs
.insert({replacement_pt, SdpAudioFormat("replacement", 48000, 1)})
.second) {
std::cerr << "Error: Unable to insert replacement audio codec"
<< std::endl;
return nullptr;
}
}
// Create a text log output stream if needed.
std::unique_ptr<std::ofstream> text_log;
if (config.textlog && config.textlog_filename.has_value()) {
// Write to file.
text_log = std::make_unique<std::ofstream>(*config.textlog_filename);
} else if (config.textlog) {
// Print to stdout.
text_log = std::make_unique<std::ofstream>();
text_log->basic_ios<char>::rdbuf(std::cout.rdbuf());
}
NetEqTest::Callbacks callbacks;
stats_plotter_ = std::make_unique<NetEqStatsPlotter>(
config.matlabplot, config.pythonplot, config.concealment_events,
config.plot_scripts_basename.value_or(""));
ssrc_switch_detector_.reset(
new SsrcSwitchDetector(stats_plotter_->stats_getter()->delay_analyzer()));
callbacks.post_insert_packet = ssrc_switch_detector_.get();
callbacks.get_audio_callback = stats_plotter_->stats_getter();
callbacks.simulation_ended_callback = stats_plotter_.get();
NetEq::Config neteq_config;
neteq_config.sample_rate_hz = *sample_rate_hz;
neteq_config.max_packets_in_buffer = config.max_nr_packets_in_buffer;
neteq_config.enable_fast_accelerate = config.enable_fast_accelerate;
return std::make_unique<NetEqTest>(
neteq_config, decoder_factory, codecs, std::move(text_log), factory,
std::move(input), std::move(output), callbacks);
}
} // namespace test
} // namespace webrtc

View file

@ -0,0 +1,172 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_TEST_FACTORY_H_
#define MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_TEST_FACTORY_H_
#include <memory>
#include <string>
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "modules/audio_coding/neteq/tools/neteq_test.h"
#include "test/field_trial.h"
namespace webrtc {
namespace test {
class SsrcSwitchDetector;
class NetEqStatsGetter;
class NetEqStatsPlotter;
// Note that the NetEqTestFactory needs to be alive when the NetEqTest object is
// used for a simulation.
class NetEqTestFactory {
public:
NetEqTestFactory();
~NetEqTestFactory();
struct Config {
Config();
Config(const Config& other);
~Config();
// RTP payload type for PCM-u.
static constexpr int default_pcmu() { return 0; }
int pcmu = default_pcmu();
// RTP payload type for PCM-a.
static constexpr int default_pcma() { return 8; }
int pcma = default_pcma();
// RTP payload type for iLBC.
static constexpr int default_ilbc() { return 102; }
int ilbc = default_ilbc();
// RTP payload type for iSAC.
static constexpr int default_isac() { return 103; }
int isac = default_isac();
// RTP payload type for iSAC-swb (32 kHz).
static constexpr int default_isac_swb() { return 104; }
int isac_swb = default_isac_swb();
// RTP payload type for Opus.
static constexpr int default_opus() { return 111; }
int opus = default_opus();
// RTP payload type for PCM16b-nb (8 kHz).
static constexpr int default_pcm16b() { return 93; }
int pcm16b = default_pcm16b();
// RTP payload type for PCM16b-wb (16 kHz).
static constexpr int default_pcm16b_wb() { return 94; }
int pcm16b_wb = default_pcm16b_wb();
// RTP payload type for PCM16b-swb32 (32 kHz).
static constexpr int default_pcm16b_swb32() { return 95; }
int pcm16b_swb32 = default_pcm16b_swb32();
// RTP payload type for PCM16b-swb48 (48 kHz).
static constexpr int default_pcm16b_swb48() { return 96; }
int pcm16b_swb48 = default_pcm16b_swb48();
// RTP payload type for G.722.
static constexpr int default_g722() { return 9; }
int g722 = default_g722();
// RTP payload type for AVT/DTMF (8 kHz).
static constexpr int default_avt() { return 106; }
int avt = default_avt();
// RTP payload type for AVT/DTMF (16 kHz).
static constexpr int default_avt_16() { return 114; }
int avt_16 = default_avt_16();
// RTP payload type for AVT/DTMF (32 kHz).
static constexpr int default_avt_32() { return 115; }
int avt_32 = default_avt_32();
// RTP payload type for AVT/DTMF (48 kHz).
static constexpr int default_avt_48() { return 116; }
int avt_48 = default_avt_48();
// RTP payload type for redundant audio (RED).
static constexpr int default_red() { return 117; }
int red = default_red();
// RTP payload type for comfort noise (8 kHz).
static constexpr int default_cn_nb() { return 13; }
int cn_nb = default_cn_nb();
// RTP payload type for comfort noise (16 kHz).
static constexpr int default_cn_wb() { return 98; }
int cn_wb = default_cn_wb();
// RTP payload type for comfort noise (32 kHz).
static constexpr int default_cn_swb32() { return 99; }
int cn_swb32 = default_cn_swb32();
// RTP payload type for comfort noise (48 kHz).
static constexpr int default_cn_swb48() { return 100; }
int cn_swb48 = default_cn_swb48();
// A PCM file that will be used to populate dummy RTP packets.
std::string replacement_audio_file;
// Only use packets with this SSRC.
absl::optional<uint32_t> ssrc_filter;
// Extension ID for audio level (RFC 6464).
static constexpr int default_audio_level() { return 1; }
int audio_level = default_audio_level();
// Extension ID for absolute sender time.
static constexpr int default_abs_send_time() { return 3; }
int abs_send_time = default_abs_send_time();
// Extension ID for transport sequence number.
static constexpr int default_transport_seq_no() { return 5; }
int transport_seq_no = default_transport_seq_no();
// Extension ID for video content type.
static constexpr int default_video_content_type() { return 7; }
int video_content_type = default_video_content_type();
// Extension ID for video timing.
static constexpr int default_video_timing() { return 8; }
int video_timing = default_video_timing();
// Generate a matlab script for plotting the delay profile.
bool matlabplot = false;
// Generates a python script for plotting the delay profile.
bool pythonplot = false;
// Prints concealment events.
bool concealment_events = false;
// Maximum allowed number of packets in the buffer.
static constexpr int default_max_nr_packets_in_buffer() { return 200; }
int max_nr_packets_in_buffer = default_max_nr_packets_in_buffer();
// Number of dummy packets to put in the packet buffer at the start of the
// simulation.
static constexpr int default_initial_dummy_packets() { return 0; }
int initial_dummy_packets = default_initial_dummy_packets();
// Number of getAudio events to skip at the start of the simulation.
static constexpr int default_skip_get_audio_events() { return 0; }
int skip_get_audio_events = default_skip_get_audio_events();
// Enables jitter buffer fast accelerate.
bool enable_fast_accelerate = false;
// Dumps events that describes the simulation on a step-by-step basis.
bool textlog = false;
// If specified and `textlog` is true, the output of `textlog` is written to
// the specified file name.
absl::optional<std::string> textlog_filename;
// Base name for the output script files for plotting the delay profile.
absl::optional<std::string> plot_scripts_basename;
// Path to the output audio file.
absl::optional<std::string> output_audio_filename;
// Field trials to use during the simulation.
std::string field_trial_string;
};
std::unique_ptr<NetEqTest> InitializeTestFromFile(
absl::string_view input_filename,
NetEqFactory* neteq_factory,
const Config& config);
std::unique_ptr<NetEqTest> InitializeTestFromString(
absl::string_view input_string,
NetEqFactory* neteq_factory,
const Config& config);
private:
std::unique_ptr<NetEqTest> InitializeTest(std::unique_ptr<NetEqInput> input,
NetEqFactory* neteq_factory,
const Config& config);
std::unique_ptr<SsrcSwitchDetector> ssrc_switch_detector_;
std::unique_ptr<NetEqStatsPlotter> stats_plotter_;
// The field trials are stored in the test factory, because neteq_test is not
// in a testonly target, and therefore cannot use ScopedFieldTrials.
std::unique_ptr<ScopedFieldTrials> field_trials_;
};
} // namespace test
} // namespace webrtc
#endif // MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_TEST_FACTORY_H_

View file

@ -0,0 +1,51 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_OUTPUT_AUDIO_FILE_H_
#define MODULES_AUDIO_CODING_NETEQ_TOOLS_OUTPUT_AUDIO_FILE_H_
#include <stdio.h>
#include <string>
#include "absl/strings/string_view.h"
#include "modules/audio_coding/neteq/tools/audio_sink.h"
namespace webrtc {
namespace test {
class OutputAudioFile : public AudioSink {
public:
// Creates an OutputAudioFile, opening a file named `file_name` for writing.
// The file format is 16-bit signed host-endian PCM.
explicit OutputAudioFile(absl::string_view file_name) {
out_file_ = fopen(std::string(file_name).c_str(), "wb");
}
virtual ~OutputAudioFile() {
if (out_file_)
fclose(out_file_);
}
OutputAudioFile(const OutputAudioFile&) = delete;
OutputAudioFile& operator=(const OutputAudioFile&) = delete;
bool WriteArray(const int16_t* audio, size_t num_samples) override {
RTC_DCHECK(out_file_);
return fwrite(audio, sizeof(*audio), num_samples, out_file_) == num_samples;
}
private:
FILE* out_file_;
};
} // namespace test
} // namespace webrtc
#endif // MODULES_AUDIO_CODING_NETEQ_TOOLS_OUTPUT_AUDIO_FILE_H_

View file

@ -0,0 +1,46 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_OUTPUT_WAV_FILE_H_
#define MODULES_AUDIO_CODING_NETEQ_TOOLS_OUTPUT_WAV_FILE_H_
#include <string>
#include "absl/strings/string_view.h"
#include "common_audio/wav_file.h"
#include "modules/audio_coding/neteq/tools/audio_sink.h"
namespace webrtc {
namespace test {
class OutputWavFile : public AudioSink {
public:
// Creates an OutputWavFile, opening a file named `file_name` for writing.
// The output file is a PCM encoded wav file.
OutputWavFile(absl::string_view file_name,
int sample_rate_hz,
int num_channels = 1)
: wav_writer_(file_name, sample_rate_hz, num_channels) {}
OutputWavFile(const OutputWavFile&) = delete;
OutputWavFile& operator=(const OutputWavFile&) = delete;
bool WriteArray(const int16_t* audio, size_t num_samples) override {
wav_writer_.WriteSamples(audio, num_samples);
return true;
}
private:
WavWriter wav_writer_;
};
} // namespace test
} // namespace webrtc
#endif // MODULES_AUDIO_CODING_NETEQ_TOOLS_OUTPUT_WAV_FILE_H_

View file

@ -0,0 +1,133 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_coding/neteq/tools/packet.h"
#include "api/array_view.h"
#include "modules/rtp_rtcp/source/rtp_packet_received.h"
#include "rtc_base/checks.h"
#include "rtc_base/copy_on_write_buffer.h"
namespace webrtc {
namespace test {
Packet::Packet(rtc::CopyOnWriteBuffer packet,
size_t virtual_packet_length_bytes,
double time_ms,
const RtpHeaderExtensionMap* extension_map)
: packet_(std::move(packet)),
virtual_packet_length_bytes_(virtual_packet_length_bytes),
time_ms_(time_ms),
valid_header_(ParseHeader(extension_map)) {}
Packet::Packet(const RTPHeader& header,
size_t virtual_packet_length_bytes,
size_t virtual_payload_length_bytes,
double time_ms)
: header_(header),
virtual_packet_length_bytes_(virtual_packet_length_bytes),
virtual_payload_length_bytes_(virtual_payload_length_bytes),
time_ms_(time_ms),
valid_header_(true) {}
Packet::~Packet() = default;
bool Packet::ExtractRedHeaders(std::list<RTPHeader*>* headers) const {
//
// 0 1 2 3
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// |1| block PT | timestamp offset | block length |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// |1| ... |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// |0| block PT |
// +-+-+-+-+-+-+-+-+
//
const uint8_t* payload_ptr = payload();
const uint8_t* payload_end_ptr = payload_ptr + payload_length_bytes();
// Find all RED headers with the extension bit set to 1. That is, all headers
// but the last one.
while ((payload_ptr < payload_end_ptr) && (*payload_ptr & 0x80)) {
RTPHeader* header = new RTPHeader;
CopyToHeader(header);
header->payloadType = payload_ptr[0] & 0x7F;
uint32_t offset = (payload_ptr[1] << 6) + ((payload_ptr[2] & 0xFC) >> 2);
header->timestamp -= offset;
headers->push_front(header);
payload_ptr += 4;
}
// Last header.
RTC_DCHECK_LT(payload_ptr, payload_end_ptr);
if (payload_ptr >= payload_end_ptr) {
return false; // Payload too short.
}
RTPHeader* header = new RTPHeader;
CopyToHeader(header);
header->payloadType = payload_ptr[0] & 0x7F;
headers->push_front(header);
return true;
}
void Packet::DeleteRedHeaders(std::list<RTPHeader*>* headers) {
while (!headers->empty()) {
delete headers->front();
headers->pop_front();
}
}
bool Packet::ParseHeader(const RtpHeaderExtensionMap* extension_map) {
// Use RtpPacketReceived instead of RtpPacket because former already has a
// converter into legacy RTPHeader.
webrtc::RtpPacketReceived rtp_packet(extension_map);
// Because of the special case of dummy packets that have padding marked in
// the RTP header, but do not have rtp payload with the padding size, handle
// padding manually. Regular RTP packet parser reports failure, but it is fine
// in this context.
bool padding = (packet_[0] & 0b0010'0000);
size_t padding_size = 0;
if (padding) {
// Clear the padding bit to prevent failure when rtp payload is omited.
rtc::CopyOnWriteBuffer packet(packet_);
packet.MutableData()[0] &= ~0b0010'0000;
if (!rtp_packet.Parse(std::move(packet))) {
return false;
}
if (rtp_packet.payload_size() > 0) {
padding_size = rtp_packet.data()[rtp_packet.size() - 1];
}
if (padding_size > rtp_packet.payload_size()) {
return false;
}
} else {
if (!rtp_packet.Parse(packet_)) {
return false;
}
}
rtp_payload_ = rtc::MakeArrayView(packet_.data() + rtp_packet.headers_size(),
rtp_packet.payload_size() - padding_size);
rtp_packet.GetHeader(&header_);
RTC_CHECK_GE(virtual_packet_length_bytes_, rtp_packet.size());
RTC_DCHECK_GE(virtual_packet_length_bytes_, rtp_packet.headers_size());
virtual_payload_length_bytes_ =
virtual_packet_length_bytes_ - rtp_packet.headers_size();
return true;
}
void Packet::CopyToHeader(RTPHeader* destination) const {
*destination = header_;
}
} // namespace test
} // namespace webrtc

View file

@ -0,0 +1,104 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_PACKET_H_
#define MODULES_AUDIO_CODING_NETEQ_TOOLS_PACKET_H_
#include <list>
#include "api/array_view.h"
#include "api/rtp_headers.h"
#include "modules/rtp_rtcp/include/rtp_header_extension_map.h"
#include "rtc_base/copy_on_write_buffer.h"
namespace webrtc {
namespace test {
// Class for handling RTP packets in test applications.
class Packet {
public:
// Creates a packet, with the packet payload (including header bytes) in
// `packet`. The `time_ms` is an extra time associated with this packet,
// typically used to denote arrival time.
// `virtual_packet_length_bytes` is typically used when reading RTP dump files
// that only contain the RTP headers, and no payload (a.k.a RTP dummy files or
// RTP light). The `virtual_packet_length_bytes` tells what size the packet
// had on wire, including the now discarded payload.
Packet(rtc::CopyOnWriteBuffer packet,
size_t virtual_packet_length_bytes,
double time_ms,
const RtpHeaderExtensionMap* extension_map = nullptr);
Packet(rtc::CopyOnWriteBuffer packet,
double time_ms,
const RtpHeaderExtensionMap* extension_map = nullptr)
: Packet(packet, packet.size(), time_ms, extension_map) {}
// Same as above, but creates the packet from an already parsed RTPHeader.
// This is typically used when reading RTP dump files that only contain the
// RTP headers, and no payload. The `virtual_packet_length_bytes` tells what
// size the packet had on wire, including the now discarded payload,
// The `virtual_payload_length_bytes` tells the size of the payload.
Packet(const RTPHeader& header,
size_t virtual_packet_length_bytes,
size_t virtual_payload_length_bytes,
double time_ms);
virtual ~Packet();
Packet(const Packet&) = delete;
Packet& operator=(const Packet&) = delete;
// Parses the first bytes of the RTP payload, interpreting them as RED headers
// according to RFC 2198. The headers will be inserted into `headers`. The
// caller of the method assumes ownership of the objects in the list, and
// must delete them properly.
bool ExtractRedHeaders(std::list<RTPHeader*>* headers) const;
// Deletes all RTPHeader objects in `headers`, but does not delete `headers`
// itself.
static void DeleteRedHeaders(std::list<RTPHeader*>* headers);
const uint8_t* payload() const { return rtp_payload_.data(); }
size_t packet_length_bytes() const { return packet_.size(); }
size_t payload_length_bytes() const { return rtp_payload_.size(); }
size_t virtual_packet_length_bytes() const {
return virtual_packet_length_bytes_;
}
size_t virtual_payload_length_bytes() const {
return virtual_payload_length_bytes_;
}
const RTPHeader& header() const { return header_; }
double time_ms() const { return time_ms_; }
bool valid_header() const { return valid_header_; }
private:
bool ParseHeader(const RtpHeaderExtensionMap* extension_map);
void CopyToHeader(RTPHeader* destination) const;
RTPHeader header_;
const rtc::CopyOnWriteBuffer packet_;
rtc::ArrayView<const uint8_t> rtp_payload_; // Empty for dummy RTP packets.
// Virtual lengths are used when parsing RTP header files (dummy RTP files).
const size_t virtual_packet_length_bytes_;
size_t virtual_payload_length_bytes_ = 0;
const double time_ms_; // Used to denote a packet's arrival time.
const bool valid_header_;
};
} // namespace test
} // namespace webrtc
#endif // MODULES_AUDIO_CODING_NETEQ_TOOLS_PACKET_H_

View file

@ -0,0 +1,25 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_coding/neteq/tools/packet_source.h"
namespace webrtc {
namespace test {
PacketSource::PacketSource() = default;
PacketSource::~PacketSource() = default;
void PacketSource::FilterOutPayloadType(uint8_t payload_type) {
filter_.set(payload_type, true);
}
} // namespace test
} // namespace webrtc

View file

@ -0,0 +1,43 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_PACKET_SOURCE_H_
#define MODULES_AUDIO_CODING_NETEQ_TOOLS_PACKET_SOURCE_H_
#include <bitset>
#include <memory>
#include "modules/audio_coding/neteq/tools/packet.h"
namespace webrtc {
namespace test {
// Interface class for an object delivering RTP packets to test applications.
class PacketSource {
public:
PacketSource();
virtual ~PacketSource();
PacketSource(const PacketSource&) = delete;
PacketSource& operator=(const PacketSource&) = delete;
// Returns next packet. Returns nullptr if the source is depleted, or if an
// error occurred.
virtual std::unique_ptr<Packet> NextPacket() = 0;
virtual void FilterOutPayloadType(uint8_t payload_type);
protected:
std::bitset<128> filter_; // Payload type is 7 bits in the RFC.
};
} // namespace test
} // namespace webrtc
#endif // MODULES_AUDIO_CODING_NETEQ_TOOLS_PACKET_SOURCE_H_

View file

@ -0,0 +1,226 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
// Unit tests for test Packet class.
#include "modules/audio_coding/neteq/tools/packet.h"
#include "test/gtest.h"
namespace webrtc {
namespace test {
namespace {
const int kHeaderLengthBytes = 12;
void MakeRtpHeader(int payload_type,
int seq_number,
uint32_t timestamp,
uint32_t ssrc,
uint8_t* rtp_data) {
rtp_data[0] = 0x80;
rtp_data[1] = static_cast<uint8_t>(payload_type);
rtp_data[2] = (seq_number >> 8) & 0xFF;
rtp_data[3] = (seq_number)&0xFF;
rtp_data[4] = timestamp >> 24;
rtp_data[5] = (timestamp >> 16) & 0xFF;
rtp_data[6] = (timestamp >> 8) & 0xFF;
rtp_data[7] = timestamp & 0xFF;
rtp_data[8] = ssrc >> 24;
rtp_data[9] = (ssrc >> 16) & 0xFF;
rtp_data[10] = (ssrc >> 8) & 0xFF;
rtp_data[11] = ssrc & 0xFF;
}
} // namespace
TEST(TestPacket, RegularPacket) {
const size_t kPacketLengthBytes = 100;
rtc::CopyOnWriteBuffer packet_memory(kPacketLengthBytes);
const uint8_t kPayloadType = 17;
const uint16_t kSequenceNumber = 4711;
const uint32_t kTimestamp = 47114711;
const uint32_t kSsrc = 0x12345678;
MakeRtpHeader(kPayloadType, kSequenceNumber, kTimestamp, kSsrc,
packet_memory.MutableData());
const double kPacketTime = 1.0;
Packet packet(std::move(packet_memory), kPacketTime);
ASSERT_TRUE(packet.valid_header());
EXPECT_EQ(kPayloadType, packet.header().payloadType);
EXPECT_EQ(kSequenceNumber, packet.header().sequenceNumber);
EXPECT_EQ(kTimestamp, packet.header().timestamp);
EXPECT_EQ(kSsrc, packet.header().ssrc);
EXPECT_EQ(0, packet.header().numCSRCs);
EXPECT_EQ(kPacketLengthBytes, packet.packet_length_bytes());
EXPECT_EQ(kPacketLengthBytes - kHeaderLengthBytes,
packet.payload_length_bytes());
EXPECT_EQ(kPacketLengthBytes, packet.virtual_packet_length_bytes());
EXPECT_EQ(kPacketLengthBytes - kHeaderLengthBytes,
packet.virtual_payload_length_bytes());
EXPECT_EQ(kPacketTime, packet.time_ms());
}
TEST(TestPacket, DummyPacket) {
const size_t kPacketLengthBytes = kHeaderLengthBytes; // Only RTP header.
const size_t kVirtualPacketLengthBytes = 100;
rtc::CopyOnWriteBuffer packet_memory(kPacketLengthBytes);
const uint8_t kPayloadType = 17;
const uint16_t kSequenceNumber = 4711;
const uint32_t kTimestamp = 47114711;
const uint32_t kSsrc = 0x12345678;
MakeRtpHeader(kPayloadType, kSequenceNumber, kTimestamp, kSsrc,
packet_memory.MutableData());
const double kPacketTime = 1.0;
Packet packet(std::move(packet_memory), kVirtualPacketLengthBytes,
kPacketTime);
ASSERT_TRUE(packet.valid_header());
EXPECT_EQ(kPayloadType, packet.header().payloadType);
EXPECT_EQ(kSequenceNumber, packet.header().sequenceNumber);
EXPECT_EQ(kTimestamp, packet.header().timestamp);
EXPECT_EQ(kSsrc, packet.header().ssrc);
EXPECT_EQ(0, packet.header().numCSRCs);
EXPECT_EQ(kPacketLengthBytes, packet.packet_length_bytes());
EXPECT_EQ(kPacketLengthBytes - kHeaderLengthBytes,
packet.payload_length_bytes());
EXPECT_EQ(kVirtualPacketLengthBytes, packet.virtual_packet_length_bytes());
EXPECT_EQ(kVirtualPacketLengthBytes - kHeaderLengthBytes,
packet.virtual_payload_length_bytes());
EXPECT_EQ(kPacketTime, packet.time_ms());
}
TEST(TestPacket, DummyPaddingPacket) {
const size_t kPacketLengthBytes = kHeaderLengthBytes; // Only RTP header.
const size_t kVirtualPacketLengthBytes = 100;
rtc::CopyOnWriteBuffer packet_memory(kPacketLengthBytes);
const uint8_t kPayloadType = 17;
const uint16_t kSequenceNumber = 4711;
const uint32_t kTimestamp = 47114711;
const uint32_t kSsrc = 0x12345678;
MakeRtpHeader(kPayloadType, kSequenceNumber, kTimestamp, kSsrc,
packet_memory.MutableData());
packet_memory.MutableData()[0] |= 0b0010'0000; // Set the padding bit.
const double kPacketTime = 1.0;
Packet packet(std::move(packet_memory), kVirtualPacketLengthBytes,
kPacketTime);
ASSERT_TRUE(packet.valid_header());
EXPECT_EQ(kPayloadType, packet.header().payloadType);
EXPECT_EQ(kSequenceNumber, packet.header().sequenceNumber);
EXPECT_EQ(kTimestamp, packet.header().timestamp);
EXPECT_EQ(kSsrc, packet.header().ssrc);
EXPECT_EQ(0, packet.header().numCSRCs);
EXPECT_EQ(kPacketLengthBytes, packet.packet_length_bytes());
EXPECT_EQ(kPacketLengthBytes - kHeaderLengthBytes,
packet.payload_length_bytes());
EXPECT_EQ(kVirtualPacketLengthBytes, packet.virtual_packet_length_bytes());
EXPECT_EQ(kVirtualPacketLengthBytes - kHeaderLengthBytes,
packet.virtual_payload_length_bytes());
EXPECT_EQ(kPacketTime, packet.time_ms());
}
namespace {
// Writes one RED block header starting at `rtp_data`, according to RFC 2198.
// returns the number of bytes written (1 or 4).
//
// Format if `last_payoad` is false:
// 0 1 2 3
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// |1| block PT | timestamp offset | block length |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
//
// Format if `last_payoad` is true:
// 0 1 2 3 4 5 6 7
// +-+-+-+-+-+-+-+-+
// |0| Block PT |
// +-+-+-+-+-+-+-+-+
int MakeRedHeader(int payload_type,
uint32_t timestamp_offset,
int block_length,
bool last_payload,
uint8_t* rtp_data) {
rtp_data[0] = 0x80 | (payload_type & 0x7F); // Set the first bit to 1.
if (last_payload) {
rtp_data[0] &= 0x7F; // Reset the first but to 0 to indicate last block.
return 1;
}
rtp_data[1] = timestamp_offset >> 6;
rtp_data[2] = (timestamp_offset & 0x3F) << 2;
rtp_data[2] |= block_length >> 8;
rtp_data[3] = block_length & 0xFF;
return 4;
}
} // namespace
TEST(TestPacket, RED) {
const size_t kPacketLengthBytes = 100;
rtc::CopyOnWriteBuffer packet_memory(kPacketLengthBytes);
const uint8_t kRedPayloadType = 17;
const uint16_t kSequenceNumber = 4711;
const uint32_t kTimestamp = 47114711;
const uint32_t kSsrc = 0x12345678;
MakeRtpHeader(kRedPayloadType, kSequenceNumber, kTimestamp, kSsrc,
packet_memory.MutableData());
// Create four RED headers.
// Payload types are just the same as the block index the offset is 100 times
// the block index.
const int kRedBlocks = 4;
uint8_t* payload_ptr = packet_memory.MutableData() +
kHeaderLengthBytes; // First byte after header.
for (int i = 0; i < kRedBlocks; ++i) {
int payload_type = i;
// Offset value is not used for the last block.
uint32_t timestamp_offset = 100 * i;
int block_length = 10 * i;
bool last_block = (i == kRedBlocks - 1) ? true : false;
payload_ptr += MakeRedHeader(payload_type, timestamp_offset, block_length,
last_block, payload_ptr);
}
const double kPacketTime = 1.0;
// Hand over ownership of `packet_memory` to `packet`.
Packet packet(packet_memory, kPacketLengthBytes, kPacketTime);
ASSERT_TRUE(packet.valid_header());
EXPECT_EQ(kRedPayloadType, packet.header().payloadType);
EXPECT_EQ(kSequenceNumber, packet.header().sequenceNumber);
EXPECT_EQ(kTimestamp, packet.header().timestamp);
EXPECT_EQ(kSsrc, packet.header().ssrc);
EXPECT_EQ(0, packet.header().numCSRCs);
EXPECT_EQ(kPacketLengthBytes, packet.packet_length_bytes());
EXPECT_EQ(kPacketLengthBytes - kHeaderLengthBytes,
packet.payload_length_bytes());
EXPECT_EQ(kPacketLengthBytes, packet.virtual_packet_length_bytes());
EXPECT_EQ(kPacketLengthBytes - kHeaderLengthBytes,
packet.virtual_payload_length_bytes());
EXPECT_EQ(kPacketTime, packet.time_ms());
std::list<RTPHeader*> red_headers;
EXPECT_TRUE(packet.ExtractRedHeaders(&red_headers));
EXPECT_EQ(kRedBlocks, static_cast<int>(red_headers.size()));
int block_index = 0;
for (std::list<RTPHeader*>::reverse_iterator it = red_headers.rbegin();
it != red_headers.rend(); ++it) {
// Reading list from the back, since the extraction puts the main payload
// (which is the last one on wire) first.
RTPHeader* red_block = *it;
EXPECT_EQ(block_index, red_block->payloadType);
EXPECT_EQ(kSequenceNumber, red_block->sequenceNumber);
if (block_index == kRedBlocks - 1) {
// Last block has zero offset per definition.
EXPECT_EQ(kTimestamp, red_block->timestamp);
} else {
EXPECT_EQ(kTimestamp - 100 * block_index, red_block->timestamp);
}
EXPECT_EQ(kSsrc, red_block->ssrc);
EXPECT_EQ(0, red_block->numCSRCs);
++block_index;
}
Packet::DeleteRedHeaders(&red_headers);
}
} // namespace test
} // namespace webrtc

View file

@ -0,0 +1,48 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_coding/neteq/tools/resample_input_audio_file.h"
#include <memory>
#include "rtc_base/checks.h"
namespace webrtc {
namespace test {
bool ResampleInputAudioFile::Read(size_t samples,
int output_rate_hz,
int16_t* destination) {
const size_t samples_to_read = samples * file_rate_hz_ / output_rate_hz;
RTC_CHECK_EQ(samples_to_read * output_rate_hz, samples * file_rate_hz_)
<< "Frame size and sample rates don't add up to an integer.";
std::unique_ptr<int16_t[]> temp_destination(new int16_t[samples_to_read]);
if (!InputAudioFile::Read(samples_to_read, temp_destination.get()))
return false;
resampler_.ResetIfNeeded(file_rate_hz_, output_rate_hz, 1);
size_t output_length = 0;
RTC_CHECK_EQ(resampler_.Push(temp_destination.get(), samples_to_read,
destination, samples, output_length),
0);
RTC_CHECK_EQ(samples, output_length);
return true;
}
bool ResampleInputAudioFile::Read(size_t samples, int16_t* destination) {
RTC_CHECK_GT(output_rate_hz_, 0) << "Output rate not set.";
return Read(samples, output_rate_hz_, destination);
}
void ResampleInputAudioFile::set_output_rate_hz(int rate_hz) {
output_rate_hz_ = rate_hz;
}
} // namespace test
} // namespace webrtc

View file

@ -0,0 +1,55 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_RESAMPLE_INPUT_AUDIO_FILE_H_
#define MODULES_AUDIO_CODING_NETEQ_TOOLS_RESAMPLE_INPUT_AUDIO_FILE_H_
#include <string>
#include "absl/strings/string_view.h"
#include "common_audio/resampler/include/resampler.h"
#include "modules/audio_coding/neteq/tools/input_audio_file.h"
namespace webrtc {
namespace test {
// Class for handling a looping input audio file with resampling.
class ResampleInputAudioFile : public InputAudioFile {
public:
ResampleInputAudioFile(absl::string_view file_name,
int file_rate_hz,
bool loop_at_end = true)
: InputAudioFile(file_name, loop_at_end),
file_rate_hz_(file_rate_hz),
output_rate_hz_(-1) {}
ResampleInputAudioFile(absl::string_view file_name,
int file_rate_hz,
int output_rate_hz,
bool loop_at_end = true)
: InputAudioFile(file_name, loop_at_end),
file_rate_hz_(file_rate_hz),
output_rate_hz_(output_rate_hz) {}
ResampleInputAudioFile(const ResampleInputAudioFile&) = delete;
ResampleInputAudioFile& operator=(const ResampleInputAudioFile&) = delete;
bool Read(size_t samples, int output_rate_hz, int16_t* destination);
bool Read(size_t samples, int16_t* destination) override;
void set_output_rate_hz(int rate_hz);
private:
const int file_rate_hz_;
int output_rate_hz_;
Resampler resampler_;
};
} // namespace test
} // namespace webrtc
#endif // MODULES_AUDIO_CODING_NETEQ_TOOLS_RESAMPLE_INPUT_AUDIO_FILE_H_

View file

@ -0,0 +1,166 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <stdio.h>
#include <memory>
#include <vector>
#include "absl/flags/flag.h"
#include "absl/flags/parse.h"
#include "modules/audio_coding/neteq/tools/packet.h"
#include "modules/audio_coding/neteq/tools/rtp_file_source.h"
ABSL_FLAG(int, red, 117, "RTP payload type for RED");
ABSL_FLAG(int,
audio_level,
-1,
"Extension ID for audio level (RFC 6464); "
"-1 not to print audio level");
ABSL_FLAG(int,
abs_send_time,
-1,
"Extension ID for absolute sender time; "
"-1 not to print absolute send time");
int main(int argc, char* argv[]) {
std::vector<char*> args = absl::ParseCommandLine(argc, argv);
std::string usage =
"Tool for parsing an RTP dump file to text output.\n"
"Example usage:\n"
"./rtp_analyze input.rtp output.txt\n\n"
"Output is sent to stdout if no output file is given. "
"Note that this tool can read files with or without payloads.\n";
if (args.size() != 2 && args.size() != 3) {
printf("%s", usage.c_str());
return 1;
}
RTC_CHECK(absl::GetFlag(FLAGS_red) >= 0 &&
absl::GetFlag(FLAGS_red) <= 127); // Payload type
RTC_CHECK(absl::GetFlag(FLAGS_audio_level) == -1 || // Default
(absl::GetFlag(FLAGS_audio_level) > 0 &&
absl::GetFlag(FLAGS_audio_level) <= 255)); // Extension ID
RTC_CHECK(absl::GetFlag(FLAGS_abs_send_time) == -1 || // Default
(absl::GetFlag(FLAGS_abs_send_time) > 0 &&
absl::GetFlag(FLAGS_abs_send_time) <= 255)); // Extension ID
printf("Input file: %s\n", args[1]);
std::unique_ptr<webrtc::test::RtpFileSource> file_source(
webrtc::test::RtpFileSource::Create(args[1]));
RTC_DCHECK(file_source.get());
// Set RTP extension IDs.
bool print_audio_level = false;
if (absl::GetFlag(FLAGS_audio_level) != -1) {
print_audio_level = true;
file_source->RegisterRtpHeaderExtension(webrtc::kRtpExtensionAudioLevel,
absl::GetFlag(FLAGS_audio_level));
}
bool print_abs_send_time = false;
if (absl::GetFlag(FLAGS_abs_send_time) != -1) {
print_abs_send_time = true;
file_source->RegisterRtpHeaderExtension(
webrtc::kRtpExtensionAbsoluteSendTime,
absl::GetFlag(FLAGS_abs_send_time));
}
FILE* out_file;
if (args.size() == 3) {
out_file = fopen(args[2], "wt");
if (!out_file) {
printf("Cannot open output file %s\n", args[2]);
return -1;
}
printf("Output file: %s\n\n", args[2]);
} else {
out_file = stdout;
}
// Print file header.
fprintf(out_file, "SeqNo TimeStamp SendTime Size PT M SSRC");
if (print_audio_level) {
fprintf(out_file, " AuLvl (V)");
}
if (print_abs_send_time) {
fprintf(out_file, " AbsSendTime");
}
fprintf(out_file, "\n");
uint32_t max_abs_send_time = 0;
int cycles = -1;
std::unique_ptr<webrtc::test::Packet> packet;
while (true) {
packet = file_source->NextPacket();
if (!packet.get()) {
// End of file reached.
break;
}
// Write packet data to file. Use virtual_packet_length_bytes so that the
// correct packet sizes are printed also for RTP header-only dumps.
fprintf(out_file, "%5u %10u %10u %5i %5i %2i %#08X",
packet->header().sequenceNumber, packet->header().timestamp,
static_cast<unsigned int>(packet->time_ms()),
static_cast<int>(packet->virtual_packet_length_bytes()),
packet->header().payloadType, packet->header().markerBit,
packet->header().ssrc);
if (print_audio_level && packet->header().extension.hasAudioLevel) {
fprintf(out_file, " %5u (%1i)", packet->header().extension.audioLevel,
packet->header().extension.voiceActivity);
}
if (print_abs_send_time && packet->header().extension.hasAbsoluteSendTime) {
if (cycles == -1) {
// Initialize.
max_abs_send_time = packet->header().extension.absoluteSendTime;
cycles = 0;
}
// Abs sender time is 24 bit 6.18 fixed point. Shift by 8 to normalize to
// 32 bits (unsigned). Calculate the difference between this packet's
// send time and the maximum observed. Cast to signed 32-bit to get the
// desired wrap-around behavior.
if (static_cast<int32_t>(
(packet->header().extension.absoluteSendTime << 8) -
(max_abs_send_time << 8)) >= 0) {
// The difference is non-negative, meaning that this packet is newer
// than the previously observed maximum absolute send time.
if (packet->header().extension.absoluteSendTime < max_abs_send_time) {
// Wrap detected.
cycles++;
}
max_abs_send_time = packet->header().extension.absoluteSendTime;
}
// Abs sender time is 24 bit 6.18 fixed point. Divide by 2^18 to convert
// to floating point representation.
double send_time_seconds =
static_cast<double>(packet->header().extension.absoluteSendTime) /
262144 +
64.0 * cycles;
fprintf(out_file, " %11f", send_time_seconds);
}
fprintf(out_file, "\n");
if (packet->header().payloadType == absl::GetFlag(FLAGS_red)) {
std::list<webrtc::RTPHeader*> red_headers;
packet->ExtractRedHeaders(&red_headers);
while (!red_headers.empty()) {
webrtc::RTPHeader* red = red_headers.front();
RTC_DCHECK(red);
fprintf(out_file, "* %5u %10u %10u %5i\n", red->sequenceNumber,
red->timestamp, static_cast<unsigned int>(packet->time_ms()),
red->payloadType);
red_headers.pop_front();
delete red;
}
}
}
fclose(out_file);
return 0;
}

View file

@ -0,0 +1,354 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <stdio.h>
#ifdef WIN32
#include <winsock2.h>
#endif
#if defined(WEBRTC_LINUX) || defined(WEBRTC_FUCHSIA)
#include <netinet/in.h>
#endif
#include <iostream>
#include <map>
#include <string>
#include <vector>
#include "absl/flags/flag.h"
#include "absl/flags/parse.h"
#include "absl/memory/memory.h"
#include "api/audio/audio_frame.h"
#include "api/audio_codecs/L16/audio_encoder_L16.h"
#include "api/audio_codecs/g711/audio_encoder_g711.h"
#include "api/audio_codecs/g722/audio_encoder_g722.h"
#include "api/audio_codecs/ilbc/audio_encoder_ilbc.h"
#include "api/audio_codecs/opus/audio_encoder_opus.h"
#include "modules/audio_coding/codecs/cng/audio_encoder_cng.h"
#include "modules/audio_coding/include/audio_coding_module.h"
#include "modules/audio_coding/neteq/tools/input_audio_file.h"
#include "rtc_base/numerics/safe_conversions.h"
ABSL_FLAG(bool, list_codecs, false, "Enumerate all codecs");
ABSL_FLAG(std::string, codec, "opus", "Codec to use");
ABSL_FLAG(int,
frame_len,
0,
"Frame length in ms; 0 indicates codec default value");
ABSL_FLAG(int, bitrate, 0, "Bitrate in bps; 0 indicates codec default value");
ABSL_FLAG(int,
payload_type,
-1,
"RTP payload type; -1 indicates codec default value");
ABSL_FLAG(int,
cng_payload_type,
-1,
"RTP payload type for CNG; -1 indicates default value");
ABSL_FLAG(int, ssrc, 0, "SSRC to write to the RTP header");
ABSL_FLAG(bool, dtx, false, "Use DTX/CNG");
ABSL_FLAG(int, sample_rate, 48000, "Sample rate of the input file");
ABSL_FLAG(bool, fec, false, "Use Opus FEC");
ABSL_FLAG(int, expected_loss, 0, "Expected packet loss percentage");
namespace webrtc {
namespace test {
namespace {
// Add new codecs here, and to the map below.
enum class CodecType {
kOpus,
kPcmU,
kPcmA,
kG722,
kPcm16b8,
kPcm16b16,
kPcm16b32,
kPcm16b48,
kIlbc,
};
struct CodecTypeAndInfo {
CodecType type;
int default_payload_type;
bool internal_dtx;
};
// List all supported codecs here. This map defines the command-line parameter
// value (the key string) for selecting each codec, together with information
// whether it is using internal or external DTX/CNG.
const std::map<std::string, CodecTypeAndInfo>& CodecList() {
static const auto* const codec_list =
new std::map<std::string, CodecTypeAndInfo>{
{"opus", {CodecType::kOpus, 111, true}},
{"pcmu", {CodecType::kPcmU, 0, false}},
{"pcma", {CodecType::kPcmA, 8, false}},
{"g722", {CodecType::kG722, 9, false}},
{"pcm16b_8", {CodecType::kPcm16b8, 93, false}},
{"pcm16b_16", {CodecType::kPcm16b16, 94, false}},
{"pcm16b_32", {CodecType::kPcm16b32, 95, false}},
{"pcm16b_48", {CodecType::kPcm16b48, 96, false}},
{"ilbc", {CodecType::kIlbc, 102, false}}};
return *codec_list;
}
// This class will receive callbacks from ACM when a packet is ready, and write
// it to the output file.
class Packetizer : public AudioPacketizationCallback {
public:
Packetizer(FILE* out_file, uint32_t ssrc, int timestamp_rate_hz)
: out_file_(out_file),
ssrc_(ssrc),
timestamp_rate_hz_(timestamp_rate_hz) {}
int32_t SendData(AudioFrameType frame_type,
uint8_t payload_type,
uint32_t timestamp,
const uint8_t* payload_data,
size_t payload_len_bytes,
int64_t absolute_capture_timestamp_ms) override {
if (payload_len_bytes == 0) {
return 0;
}
constexpr size_t kRtpHeaderLength = 12;
constexpr size_t kRtpDumpHeaderLength = 8;
const uint16_t length = htons(rtc::checked_cast<uint16_t>(
kRtpHeaderLength + kRtpDumpHeaderLength + payload_len_bytes));
const uint16_t plen = htons(
rtc::checked_cast<uint16_t>(kRtpHeaderLength + payload_len_bytes));
const uint32_t offset = htonl(timestamp / (timestamp_rate_hz_ / 1000));
RTC_CHECK_EQ(fwrite(&length, sizeof(uint16_t), 1, out_file_), 1);
RTC_CHECK_EQ(fwrite(&plen, sizeof(uint16_t), 1, out_file_), 1);
RTC_CHECK_EQ(fwrite(&offset, sizeof(uint32_t), 1, out_file_), 1);
const uint8_t rtp_header[] = {0x80,
static_cast<uint8_t>(payload_type & 0x7F),
static_cast<uint8_t>(sequence_number_ >> 8),
static_cast<uint8_t>(sequence_number_),
static_cast<uint8_t>(timestamp >> 24),
static_cast<uint8_t>(timestamp >> 16),
static_cast<uint8_t>(timestamp >> 8),
static_cast<uint8_t>(timestamp),
static_cast<uint8_t>(ssrc_ >> 24),
static_cast<uint8_t>(ssrc_ >> 16),
static_cast<uint8_t>(ssrc_ >> 8),
static_cast<uint8_t>(ssrc_)};
static_assert(sizeof(rtp_header) == kRtpHeaderLength, "");
RTC_CHECK_EQ(
fwrite(rtp_header, sizeof(uint8_t), kRtpHeaderLength, out_file_),
kRtpHeaderLength);
++sequence_number_; // Intended to wrap on overflow.
RTC_CHECK_EQ(
fwrite(payload_data, sizeof(uint8_t), payload_len_bytes, out_file_),
payload_len_bytes);
return 0;
}
private:
FILE* const out_file_;
const uint32_t ssrc_;
const int timestamp_rate_hz_;
uint16_t sequence_number_ = 0;
};
void SetFrameLenIfFlagIsPositive(int* config_frame_len) {
if (absl::GetFlag(FLAGS_frame_len) > 0) {
*config_frame_len = absl::GetFlag(FLAGS_frame_len);
}
}
template <typename T>
typename T::Config GetCodecConfig() {
typename T::Config config;
SetFrameLenIfFlagIsPositive(&config.frame_size_ms);
RTC_CHECK(config.IsOk());
return config;
}
AudioEncoderL16::Config Pcm16bConfig(CodecType codec_type) {
auto config = GetCodecConfig<AudioEncoderL16>();
switch (codec_type) {
case CodecType::kPcm16b8:
config.sample_rate_hz = 8000;
return config;
case CodecType::kPcm16b16:
config.sample_rate_hz = 16000;
return config;
case CodecType::kPcm16b32:
config.sample_rate_hz = 32000;
return config;
case CodecType::kPcm16b48:
config.sample_rate_hz = 48000;
return config;
default:
RTC_DCHECK_NOTREACHED();
return config;
}
}
std::unique_ptr<AudioEncoder> CreateEncoder(CodecType codec_type,
int payload_type) {
switch (codec_type) {
case CodecType::kOpus: {
AudioEncoderOpus::Config config = GetCodecConfig<AudioEncoderOpus>();
if (absl::GetFlag(FLAGS_bitrate) > 0) {
config.bitrate_bps = absl::GetFlag(FLAGS_bitrate);
}
config.dtx_enabled = absl::GetFlag(FLAGS_dtx);
config.fec_enabled = absl::GetFlag(FLAGS_fec);
RTC_CHECK(config.IsOk());
return AudioEncoderOpus::MakeAudioEncoder(config, payload_type);
}
case CodecType::kPcmU:
case CodecType::kPcmA: {
AudioEncoderG711::Config config = GetCodecConfig<AudioEncoderG711>();
config.type = codec_type == CodecType::kPcmU
? AudioEncoderG711::Config::Type::kPcmU
: AudioEncoderG711::Config::Type::kPcmA;
RTC_CHECK(config.IsOk());
return AudioEncoderG711::MakeAudioEncoder(config, payload_type);
}
case CodecType::kG722: {
return AudioEncoderG722::MakeAudioEncoder(
GetCodecConfig<AudioEncoderG722>(), payload_type);
}
case CodecType::kPcm16b8:
case CodecType::kPcm16b16:
case CodecType::kPcm16b32:
case CodecType::kPcm16b48: {
return AudioEncoderL16::MakeAudioEncoder(Pcm16bConfig(codec_type),
payload_type);
}
case CodecType::kIlbc: {
return AudioEncoderIlbc::MakeAudioEncoder(
GetCodecConfig<AudioEncoderIlbc>(), payload_type);
}
}
RTC_DCHECK_NOTREACHED();
return nullptr;
}
AudioEncoderCngConfig GetCngConfig(int sample_rate_hz) {
AudioEncoderCngConfig cng_config;
const auto default_payload_type = [&] {
switch (sample_rate_hz) {
case 8000:
return 13;
case 16000:
return 98;
case 32000:
return 99;
case 48000:
return 100;
default:
RTC_DCHECK_NOTREACHED();
}
return 0;
};
cng_config.payload_type = absl::GetFlag(FLAGS_cng_payload_type) != -1
? absl::GetFlag(FLAGS_cng_payload_type)
: default_payload_type();
return cng_config;
}
int RunRtpEncode(int argc, char* argv[]) {
std::vector<char*> args = absl::ParseCommandLine(argc, argv);
const std::string usage =
"Tool for generating an RTP dump file from audio input.\n"
"Example usage:\n"
"./rtp_encode input.pcm output.rtp --codec=[codec] "
"--frame_len=[frame_len] --bitrate=[bitrate]\n\n";
if (!absl::GetFlag(FLAGS_list_codecs) && args.size() != 3) {
printf("%s", usage.c_str());
return 1;
}
if (absl::GetFlag(FLAGS_list_codecs)) {
printf("The following arguments are valid --codec parameters:\n");
for (const auto& c : CodecList()) {
printf(" %s\n", c.first.c_str());
}
return 0;
}
const auto codec_it = CodecList().find(absl::GetFlag(FLAGS_codec));
if (codec_it == CodecList().end()) {
printf("%s is not a valid codec name.\n",
absl::GetFlag(FLAGS_codec).c_str());
printf("Use argument --list_codecs to see all valid codec names.\n");
return 1;
}
// Create the codec.
const int payload_type = absl::GetFlag(FLAGS_payload_type) == -1
? codec_it->second.default_payload_type
: absl::GetFlag(FLAGS_payload_type);
std::unique_ptr<AudioEncoder> codec =
CreateEncoder(codec_it->second.type, payload_type);
// Create an external VAD/CNG encoder if needed.
if (absl::GetFlag(FLAGS_dtx) && !codec_it->second.internal_dtx) {
AudioEncoderCngConfig cng_config = GetCngConfig(codec->SampleRateHz());
RTC_DCHECK(codec);
cng_config.speech_encoder = std::move(codec);
codec = CreateComfortNoiseEncoder(std::move(cng_config));
}
RTC_DCHECK(codec);
// Set up ACM.
const int timestamp_rate_hz = codec->RtpTimestampRateHz();
auto acm(AudioCodingModule::Create());
acm->SetEncoder(std::move(codec));
acm->SetPacketLossRate(absl::GetFlag(FLAGS_expected_loss));
// Open files.
printf("Input file: %s\n", args[1]);
InputAudioFile input_file(args[1], false); // Open input in non-looping mode.
FILE* out_file = fopen(args[2], "wb");
RTC_CHECK(out_file) << "Could not open file " << args[2] << " for writing";
printf("Output file: %s\n", args[2]);
fprintf(out_file, "#!rtpplay1.0 \n"); //,
// Write 3 32-bit values followed by 2 16-bit values, all set to 0. This means
// a total of 16 bytes.
const uint8_t file_header[16] = {0};
RTC_CHECK_EQ(fwrite(file_header, sizeof(file_header), 1, out_file), 1);
// Create and register the packetizer, which will write the packets to file.
Packetizer packetizer(out_file, absl::GetFlag(FLAGS_ssrc), timestamp_rate_hz);
RTC_DCHECK_EQ(acm->RegisterTransportCallback(&packetizer), 0);
AudioFrame audio_frame;
audio_frame.samples_per_channel_ =
absl::GetFlag(FLAGS_sample_rate) / 100; // 10 ms
audio_frame.sample_rate_hz_ = absl::GetFlag(FLAGS_sample_rate);
audio_frame.num_channels_ = 1;
while (input_file.Read(audio_frame.samples_per_channel_,
audio_frame.mutable_data())) {
RTC_CHECK_GE(acm->Add10MsData(audio_frame), 0);
audio_frame.timestamp_ += audio_frame.samples_per_channel_;
}
return 0;
}
} // namespace
} // namespace test
} // namespace webrtc
int main(int argc, char* argv[]) {
return webrtc::test::RunRtpEncode(argc, argv);
}

View file

@ -0,0 +1,99 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_coding/neteq/tools/rtp_file_source.h"
#include <string.h>
#include "absl/strings/string_view.h"
#ifndef WIN32
#include <netinet/in.h>
#endif
#include <memory>
#include "modules/audio_coding/neteq/tools/packet.h"
#include "rtc_base/checks.h"
#include "test/rtp_file_reader.h"
namespace webrtc {
namespace test {
RtpFileSource* RtpFileSource::Create(absl::string_view file_name,
absl::optional<uint32_t> ssrc_filter) {
RtpFileSource* source = new RtpFileSource(ssrc_filter);
RTC_CHECK(source->OpenFile(file_name));
return source;
}
bool RtpFileSource::ValidRtpDump(absl::string_view file_name) {
std::unique_ptr<RtpFileReader> temp_file(
RtpFileReader::Create(RtpFileReader::kRtpDump, file_name));
return !!temp_file;
}
bool RtpFileSource::ValidPcap(absl::string_view file_name) {
std::unique_ptr<RtpFileReader> temp_file(
RtpFileReader::Create(RtpFileReader::kPcap, file_name));
return !!temp_file;
}
RtpFileSource::~RtpFileSource() {}
bool RtpFileSource::RegisterRtpHeaderExtension(RTPExtensionType type,
uint8_t id) {
return rtp_header_extension_map_.RegisterByType(id, type);
}
std::unique_ptr<Packet> RtpFileSource::NextPacket() {
while (true) {
RtpPacket temp_packet;
if (!rtp_reader_->NextPacket(&temp_packet)) {
return NULL;
}
if (temp_packet.original_length == 0) {
// May be an RTCP packet.
// Read the next one.
continue;
}
auto packet = std::make_unique<Packet>(
rtc::CopyOnWriteBuffer(temp_packet.data, temp_packet.length),
temp_packet.original_length, temp_packet.time_ms,
&rtp_header_extension_map_);
if (!packet->valid_header()) {
continue;
}
if (filter_.test(packet->header().payloadType) ||
(ssrc_filter_ && packet->header().ssrc != *ssrc_filter_)) {
// This payload type should be filtered out. Continue to the next packet.
continue;
}
return packet;
}
}
RtpFileSource::RtpFileSource(absl::optional<uint32_t> ssrc_filter)
: PacketSource(), ssrc_filter_(ssrc_filter) {}
bool RtpFileSource::OpenFile(absl::string_view file_name) {
rtp_reader_.reset(RtpFileReader::Create(RtpFileReader::kRtpDump, file_name));
if (rtp_reader_)
return true;
rtp_reader_.reset(RtpFileReader::Create(RtpFileReader::kPcap, file_name));
if (!rtp_reader_) {
RTC_FATAL()
<< "Couldn't open input file as either a rtpdump or .pcap. Note "
<< "that .pcapng is not supported.";
}
return true;
}
} // namespace test
} // namespace webrtc

View file

@ -0,0 +1,68 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_RTP_FILE_SOURCE_H_
#define MODULES_AUDIO_CODING_NETEQ_TOOLS_RTP_FILE_SOURCE_H_
#include <stdio.h>
#include <memory>
#include <string>
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "modules/audio_coding/neteq/tools/packet_source.h"
#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
namespace webrtc {
namespace test {
class RtpFileReader;
class RtpFileSource : public PacketSource {
public:
// Creates an RtpFileSource reading from `file_name`. If the file cannot be
// opened, or has the wrong format, NULL will be returned.
static RtpFileSource* Create(
absl::string_view file_name,
absl::optional<uint32_t> ssrc_filter = absl::nullopt);
// Checks whether a files is a valid RTP dump or PCAP (Wireshark) file.
static bool ValidRtpDump(absl::string_view file_name);
static bool ValidPcap(absl::string_view file_name);
~RtpFileSource() override;
RtpFileSource(const RtpFileSource&) = delete;
RtpFileSource& operator=(const RtpFileSource&) = delete;
// Registers an RTP header extension and binds it to `id`.
virtual bool RegisterRtpHeaderExtension(RTPExtensionType type, uint8_t id);
std::unique_ptr<Packet> NextPacket() override;
private:
static const int kFirstLineLength = 40;
static const int kRtpFileHeaderSize = 4 + 4 + 4 + 2 + 2;
static const size_t kPacketHeaderSize = 8;
explicit RtpFileSource(absl::optional<uint32_t> ssrc_filter);
bool OpenFile(absl::string_view file_name);
std::unique_ptr<RtpFileReader> rtp_reader_;
const absl::optional<uint32_t> ssrc_filter_;
RtpHeaderExtensionMap rtp_header_extension_map_;
};
} // namespace test
} // namespace webrtc
#endif // MODULES_AUDIO_CODING_NETEQ_TOOLS_RTP_FILE_SOURCE_H_

View file

@ -0,0 +1,59 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_coding/neteq/tools/rtp_generator.h"
namespace webrtc {
namespace test {
uint32_t RtpGenerator::GetRtpHeader(uint8_t payload_type,
size_t payload_length_samples,
RTPHeader* rtp_header) {
RTC_DCHECK(rtp_header);
if (!rtp_header) {
return 0;
}
rtp_header->sequenceNumber = seq_number_++;
rtp_header->timestamp = timestamp_;
timestamp_ += static_cast<uint32_t>(payload_length_samples);
rtp_header->payloadType = payload_type;
rtp_header->markerBit = false;
rtp_header->ssrc = ssrc_;
rtp_header->numCSRCs = 0;
uint32_t this_send_time = next_send_time_ms_;
RTC_DCHECK_GT(samples_per_ms_, 0);
next_send_time_ms_ +=
((1.0 + drift_factor_) * payload_length_samples) / samples_per_ms_;
return this_send_time;
}
void RtpGenerator::set_drift_factor(double factor) {
if (factor > -1.0) {
drift_factor_ = factor;
}
}
uint32_t TimestampJumpRtpGenerator::GetRtpHeader(uint8_t payload_type,
size_t payload_length_samples,
RTPHeader* rtp_header) {
uint32_t ret = RtpGenerator::GetRtpHeader(payload_type,
payload_length_samples, rtp_header);
if (timestamp_ - static_cast<uint32_t>(payload_length_samples) <=
jump_from_timestamp_ &&
timestamp_ > jump_from_timestamp_) {
// We just moved across the `jump_from_timestamp_` timestamp. Do the jump.
timestamp_ = jump_to_timestamp_;
}
return ret;
}
} // namespace test
} // namespace webrtc

View file

@ -0,0 +1,83 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_RTP_GENERATOR_H_
#define MODULES_AUDIO_CODING_NETEQ_TOOLS_RTP_GENERATOR_H_
#include "api/rtp_headers.h"
namespace webrtc {
namespace test {
// Class for generating RTP headers.
class RtpGenerator {
public:
RtpGenerator(int samples_per_ms,
uint16_t start_seq_number = 0,
uint32_t start_timestamp = 0,
uint32_t start_send_time_ms = 0,
uint32_t ssrc = 0x12345678)
: seq_number_(start_seq_number),
timestamp_(start_timestamp),
next_send_time_ms_(start_send_time_ms),
ssrc_(ssrc),
samples_per_ms_(samples_per_ms),
drift_factor_(0.0) {}
virtual ~RtpGenerator() {}
RtpGenerator(const RtpGenerator&) = delete;
RtpGenerator& operator=(const RtpGenerator&) = delete;
// Writes the next RTP header to `rtp_header`, which will be of type
// `payload_type`. Returns the send time for this packet (in ms). The value of
// `payload_length_samples` determines the send time for the next packet.
virtual uint32_t GetRtpHeader(uint8_t payload_type,
size_t payload_length_samples,
RTPHeader* rtp_header);
void set_drift_factor(double factor);
protected:
uint16_t seq_number_;
uint32_t timestamp_;
uint32_t next_send_time_ms_;
const uint32_t ssrc_;
const int samples_per_ms_;
double drift_factor_;
};
class TimestampJumpRtpGenerator : public RtpGenerator {
public:
TimestampJumpRtpGenerator(int samples_per_ms,
uint16_t start_seq_number,
uint32_t start_timestamp,
uint32_t jump_from_timestamp,
uint32_t jump_to_timestamp)
: RtpGenerator(samples_per_ms, start_seq_number, start_timestamp),
jump_from_timestamp_(jump_from_timestamp),
jump_to_timestamp_(jump_to_timestamp) {}
TimestampJumpRtpGenerator(const TimestampJumpRtpGenerator&) = delete;
TimestampJumpRtpGenerator& operator=(const TimestampJumpRtpGenerator&) =
delete;
uint32_t GetRtpHeader(uint8_t payload_type,
size_t payload_length_samples,
RTPHeader* rtp_header) override;
private:
uint32_t jump_from_timestamp_;
uint32_t jump_to_timestamp_;
};
} // namespace test
} // namespace webrtc
#endif // MODULES_AUDIO_CODING_NETEQ_TOOLS_RTP_GENERATOR_H_

View file

@ -0,0 +1,148 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <stdio.h>
#include <algorithm>
#include <fstream>
#include <iostream>
#include <vector>
#include "api/array_view.h"
#include "modules/rtp_rtcp/source/byte_io.h"
#include "rtc_base/buffer.h"
namespace webrtc {
namespace test {
namespace {
constexpr size_t kRtpDumpHeaderLength = 8;
// Returns the next packet or an empty buffer if end of file was encountered.
rtc::Buffer ReadNextPacket(FILE* file) {
// Read the rtpdump header for the next packet.
rtc::Buffer buffer;
buffer.SetData(kRtpDumpHeaderLength, [&](rtc::ArrayView<uint8_t> x) {
return fread(x.data(), 1, x.size(), file);
});
if (buffer.size() != kRtpDumpHeaderLength) {
return rtc::Buffer();
}
// Get length field. This is the total length for this packet written to file,
// including the kRtpDumpHeaderLength bytes already read.
const uint16_t len = ByteReader<uint16_t>::ReadBigEndian(buffer.data());
RTC_CHECK_GE(len, kRtpDumpHeaderLength);
// Read remaining data from file directly into buffer.
buffer.AppendData(len - kRtpDumpHeaderLength, [&](rtc::ArrayView<uint8_t> x) {
return fread(x.data(), 1, x.size(), file);
});
if (buffer.size() != len) {
buffer.Clear();
}
return buffer;
}
struct PacketAndTime {
rtc::Buffer packet;
int time;
};
void WritePacket(const PacketAndTime& packet, FILE* file) {
// Write the first 4 bytes from the original packet.
const auto* payload_ptr = packet.packet.data();
RTC_CHECK_EQ(fwrite(payload_ptr, 4, 1, file), 1);
payload_ptr += 4;
// Convert the new time offset to network endian, and write to file.
uint8_t time[sizeof(uint32_t)];
ByteWriter<uint32_t, sizeof(uint32_t)>::WriteBigEndian(time, packet.time);
RTC_CHECK_EQ(fwrite(time, sizeof(uint32_t), 1, file), 1);
payload_ptr += 4; // Skip the old time in the original payload.
// Write the remaining part of the payload.
RTC_DCHECK_EQ(payload_ptr - packet.packet.data(), kRtpDumpHeaderLength);
RTC_CHECK_EQ(
fwrite(payload_ptr, packet.packet.size() - kRtpDumpHeaderLength, 1, file),
1);
}
int RunRtpJitter(int argc, char* argv[]) {
const std::string program_name = argv[0];
const std::string usage =
"Tool for alternating the arrival times in an RTP dump file.\n"
"Example usage:\n" +
program_name + " input.rtp arrival_times_ms.txt output.rtp\n\n";
if (argc != 4) {
printf("%s", usage.c_str());
return 1;
}
printf("Input RTP file: %s\n", argv[1]);
FILE* in_file = fopen(argv[1], "rb");
RTC_CHECK(in_file) << "Could not open file " << argv[1] << " for reading";
printf("Timing file: %s\n", argv[2]);
std::ifstream timing_file(argv[2]);
printf("Output file: %s\n", argv[3]);
FILE* out_file = fopen(argv[3], "wb");
RTC_CHECK(out_file) << "Could not open file " << argv[2] << " for writing";
// Copy the RTP file header to the output file.
char header_string[30];
RTC_CHECK(fgets(header_string, 30, in_file));
fprintf(out_file, "%s", header_string);
uint8_t file_header[16];
RTC_CHECK_EQ(fread(file_header, sizeof(file_header), 1, in_file), 1);
RTC_CHECK_EQ(fwrite(file_header, sizeof(file_header), 1, out_file), 1);
// Read all time values from the timing file. Store in a vector.
std::vector<int> new_arrival_times;
int new_time;
while (timing_file >> new_time) {
new_arrival_times.push_back(new_time);
}
// Read all packets from the input RTP file, but no more than the number of
// new time values. Store RTP packets together with new time values.
auto time_it = new_arrival_times.begin();
std::vector<PacketAndTime> packets;
while (1) {
auto packet = ReadNextPacket(in_file);
if (packet.empty() || time_it == new_arrival_times.end()) {
break;
}
packets.push_back({std::move(packet), *time_it});
++time_it;
}
// Sort on new time values.
std::sort(packets.begin(), packets.end(),
[](const PacketAndTime& a, const PacketAndTime& b) {
return a.time < b.time;
});
// Write packets to output file.
for (const auto& p : packets) {
WritePacket(p, out_file);
}
fclose(in_file);
fclose(out_file);
return 0;
}
} // namespace
} // namespace test
} // namespace webrtc
int main(int argc, char* argv[]) {
return webrtc::test::RunRtpJitter(argc, argv);
}

View file

@ -0,0 +1,45 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <stdio.h>
#include <memory>
#include "rtc_base/checks.h"
#include "test/rtp_file_reader.h"
#include "test/rtp_file_writer.h"
using webrtc::test::RtpFileReader;
using webrtc::test::RtpFileWriter;
int main(int argc, char* argv[]) {
if (argc < 3) {
printf("Concatenates multiple rtpdump files into one.\n");
printf("Usage: rtpcat in1.rtp int2.rtp [...] out.rtp\n");
exit(1);
}
std::unique_ptr<RtpFileWriter> output(
RtpFileWriter::Create(RtpFileWriter::kRtpDump, argv[argc - 1]));
RTC_CHECK(output.get() != NULL) << "Cannot open output file.";
printf("Output RTP file: %s\n", argv[argc - 1]);
for (int i = 1; i < argc - 1; i++) {
std::unique_ptr<RtpFileReader> input(
RtpFileReader::Create(RtpFileReader::kRtpDump, argv[i]));
RTC_CHECK(input.get() != NULL) << "Cannot open input file " << argv[i];
printf("Input RTP file: %s\n", argv[i]);
webrtc::test::RtpPacket packet;
while (input->NextPacket(&packet))
RTC_CHECK(output->WritePacket(&packet));
}
return 0;
}