Repo created

This commit is contained in:
Fr4nz D13trich 2025-11-22 14:04:28 +01:00
parent 81b91f4139
commit f8c34fa5ee
22732 changed files with 4815320 additions and 2 deletions

View file

@ -0,0 +1,78 @@
/*
* Copyright 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/video_coding/codecs/test/android_codec_factory_helper.h"
#include <jni.h>
#include <pthread.h>
#include <stddef.h>
#include <memory>
#include "modules/utility/include/jvm_android.h"
#include "rtc_base/checks.h"
#include "sdk/android/native_api/codecs/wrapper.h"
#include "sdk/android/native_api/jni/class_loader.h"
#include "sdk/android/native_api/jni/jvm.h"
#include "sdk/android/native_api/jni/scoped_java_ref.h"
#include "sdk/android/src/jni/jvm.h"
namespace webrtc {
namespace test {
namespace {
static pthread_once_t g_initialize_once = PTHREAD_ONCE_INIT;
void EnsureInitializedOnce() {
RTC_CHECK(::webrtc::jni::GetJVM() != nullptr);
JNIEnv* jni = ::webrtc::jni::AttachCurrentThreadIfNeeded();
JavaVM* jvm = NULL;
RTC_CHECK_EQ(0, jni->GetJavaVM(&jvm));
// Initialize the Java environment (currently only used by the audio manager).
webrtc::JVM::Initialize(jvm);
}
} // namespace
void InitializeAndroidObjects() {
RTC_CHECK_EQ(0, pthread_once(&g_initialize_once, &EnsureInitializedOnce));
}
std::unique_ptr<VideoEncoderFactory> CreateAndroidEncoderFactory() {
JNIEnv* env = AttachCurrentThreadIfNeeded();
ScopedJavaLocalRef<jclass> factory_class =
GetClass(env, "org/webrtc/HardwareVideoEncoderFactory");
jmethodID factory_constructor = env->GetMethodID(
factory_class.obj(), "<init>", "(Lorg/webrtc/EglBase$Context;ZZ)V");
ScopedJavaLocalRef<jobject> factory_object(
env, env->NewObject(factory_class.obj(), factory_constructor,
nullptr /* shared_context */,
false /* enable_intel_vp8_encoder */,
true /* enable_h264_high_profile */));
return JavaToNativeVideoEncoderFactory(env, factory_object.obj());
}
std::unique_ptr<VideoDecoderFactory> CreateAndroidDecoderFactory() {
JNIEnv* env = AttachCurrentThreadIfNeeded();
ScopedJavaLocalRef<jclass> factory_class =
GetClass(env, "org/webrtc/HardwareVideoDecoderFactory");
jmethodID factory_constructor = env->GetMethodID(
factory_class.obj(), "<init>", "(Lorg/webrtc/EglBase$Context;)V");
ScopedJavaLocalRef<jobject> factory_object(
env, env->NewObject(factory_class.obj(), factory_constructor,
nullptr /* shared_context */));
return JavaToNativeVideoDecoderFactory(env, factory_object.obj());
}
} // namespace test
} // namespace webrtc

View file

@ -0,0 +1,30 @@
/*
* Copyright 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_VIDEO_CODING_CODECS_TEST_ANDROID_CODEC_FACTORY_HELPER_H_
#define MODULES_VIDEO_CODING_CODECS_TEST_ANDROID_CODEC_FACTORY_HELPER_H_
#include <memory>
#include "api/video_codecs/video_decoder_factory.h"
#include "api/video_codecs/video_encoder_factory.h"
namespace webrtc {
namespace test {
void InitializeAndroidObjects();
std::unique_ptr<VideoEncoderFactory> CreateAndroidEncoderFactory();
std::unique_ptr<VideoDecoderFactory> CreateAndroidDecoderFactory();
} // namespace test
} // namespace webrtc
#endif // MODULES_VIDEO_CODING_CODECS_TEST_ANDROID_CODEC_FACTORY_HELPER_H_

View file

@ -0,0 +1,56 @@
#!/bin/bash
# Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
if [ $# -ne 1 ]; then
echo "Usage: run-instantiation-tests.sh ADB-DEVICE-ID"
exit 1
fi
# Paths: update these based on your git checkout and gn output folder names.
WEBRTC_DIR=$HOME/src/webrtc/src
BUILD_DIR=$WEBRTC_DIR/out/Android_Release
# Other settings.
ADB=`which adb`
SERIAL=$1
TIMEOUT=7200
# Ensure we are using the latest version.
ninja -C $BUILD_DIR modules_tests
# Transfer the required files by trying to run a test that doesn't exist.
echo "===> Transferring required resources to device $1."
$WEBRTC_DIR/build/android/test_runner.py gtest \
--output-directory $BUILD_DIR \
--suite modules_tests \
--gtest_filter "DoesNotExist" \
--shard-timeout $TIMEOUT \
--runtime-deps-path $BUILD_DIR/gen.runtime/modules/modules_tests__test_runner_script.runtime_deps \
--adb-path $ADB \
--device $SERIAL \
--verbose
# Run all tests as separate test invocations.
mkdir $SERIAL
pushd $SERIAL
$WEBRTC_DIR/build/android/test_runner.py gtest \
--output-directory $BUILD_DIR \
--suite modules_tests \
--gtest_filter "*InstantiationTest*" \
--gtest_also_run_disabled_tests \
--shard-timeout $TIMEOUT \
--runtime-deps-path ../empty-runtime-deps \
--test-launcher-retry-limit 0 \
--adb-path $ADB \
--device $SERIAL \
--verbose \
--num-retries 0 \
2>&1 | tee -a instantiation-tests.log
popd

View file

@ -0,0 +1,70 @@
#!/bin/bash
# Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
if [ $# -ne 1 ]; then
echo "Usage: run.sh ADB-DEVICE-ID"
exit 1
fi
# Paths: update these based on your git checkout and gn output folder names.
WEBRTC_DIR=$HOME/src/webrtc/src
BUILD_DIR=$WEBRTC_DIR/out/Android_Release
# Clips: update these to encode/decode other content.
CLIPS=('Foreman')
RESOLUTIONS=('128x96' '160x120' '176x144' '320x240' '352x288')
FRAMERATES=(30)
# Other settings.
ADB=`which adb`
SERIAL=$1
TIMEOUT=7200
# Ensure we are using the latest version.
ninja -C $BUILD_DIR modules_tests
# Transfer the required files by trying to run a test that doesn't exist.
echo "===> Transferring required resources to device $1."
$WEBRTC_DIR/build/android/test_runner.py gtest \
--output-directory $BUILD_DIR \
--suite modules_tests \
--gtest_filter "DoesNotExist" \
--shard-timeout $TIMEOUT \
--runtime-deps-path $BUILD_DIR/gen.runtime/modules/modules_tests__test_runner_script.runtime_deps \
--adb-path $ADB \
--device $SERIAL \
--verbose
# Run all tests as separate test invocations.
mkdir $SERIAL
pushd $SERIAL
for clip in "${CLIPS[@]}"; do
for resolution in "${RESOLUTIONS[@]}"; do
for framerate in "${FRAMERATES[@]}"; do
test_name="${clip}_${resolution}_${framerate}"
log_name="${test_name}.log"
echo "===> Running ${test_name} on device $1."
$WEBRTC_DIR/build/android/test_runner.py gtest \
--output-directory $BUILD_DIR \
--suite modules_tests \
--gtest_filter "CodecSettings/*${test_name}*" \
--shard-timeout $TIMEOUT \
--runtime-deps-path ../empty-runtime-deps \
--test-launcher-retry-limit 0 \
--adb-path $ADB \
--device $SERIAL \
--verbose \
2>&1 | tee -a ${log_name}
done
done
done
popd

View file

@ -0,0 +1,78 @@
/*
* Copyright 2020 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/video_coding/codecs/test/encoded_video_frame_producer.h"
#include <memory>
#include <vector>
#include "api/test/create_frame_generator.h"
#include "api/test/frame_generator_interface.h"
#include "api/transport/rtp/dependency_descriptor.h"
#include "api/video/video_frame.h"
#include "api/video/video_frame_type.h"
#include "api/video_codecs/video_encoder.h"
#include "modules/video_coding/include/video_codec_interface.h"
#include "modules/video_coding/include/video_error_codes.h"
#include "rtc_base/checks.h"
namespace webrtc {
namespace {
class EncoderCallback : public EncodedImageCallback {
public:
explicit EncoderCallback(
std::vector<EncodedVideoFrameProducer::EncodedFrame>& output_frames)
: output_frames_(output_frames) {}
private:
Result OnEncodedImage(const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info) override {
output_frames_.push_back({encoded_image, *codec_specific_info});
return Result(Result::Error::OK);
}
std::vector<EncodedVideoFrameProducer::EncodedFrame>& output_frames_;
};
} // namespace
std::vector<EncodedVideoFrameProducer::EncodedFrame>
EncodedVideoFrameProducer::Encode() {
std::unique_ptr<test::FrameGeneratorInterface> frame_buffer_generator =
test::CreateSquareFrameGenerator(
resolution_.Width(), resolution_.Height(),
test::FrameGeneratorInterface::OutputType::kI420, absl::nullopt);
std::vector<EncodedFrame> encoded_frames;
EncoderCallback encoder_callback(encoded_frames);
RTC_CHECK_EQ(encoder_.RegisterEncodeCompleteCallback(&encoder_callback),
WEBRTC_VIDEO_CODEC_OK);
uint32_t rtp_tick = 90000 / framerate_fps_;
for (int i = 0; i < num_input_frames_; ++i) {
VideoFrame frame =
VideoFrame::Builder()
.set_video_frame_buffer(frame_buffer_generator->NextFrame().buffer)
.set_timestamp_rtp(rtp_timestamp_)
.set_capture_time_identifier(capture_time_identifier_)
.build();
rtp_timestamp_ += rtp_tick;
RTC_CHECK_EQ(encoder_.Encode(frame, &next_frame_type_),
WEBRTC_VIDEO_CODEC_OK);
next_frame_type_[0] = VideoFrameType::kVideoFrameDelta;
}
RTC_CHECK_EQ(encoder_.RegisterEncodeCompleteCallback(nullptr),
WEBRTC_VIDEO_CODEC_OK);
return encoded_frames;
}
} // namespace webrtc

View file

@ -0,0 +1,108 @@
/*
* Copyright 2020 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_VIDEO_CODING_CODECS_TEST_ENCODED_VIDEO_FRAME_PRODUCER_H_
#define MODULES_VIDEO_CODING_CODECS_TEST_ENCODED_VIDEO_FRAME_PRODUCER_H_
#include <stdint.h>
#include <vector>
#include "api/transport/rtp/dependency_descriptor.h"
#include "api/video/encoded_image.h"
#include "api/video_codecs/video_encoder.h"
#include "modules/video_coding/include/video_codec_interface.h"
namespace webrtc {
// Wrapper around VideoEncoder::Encode for convenient input (generates frames)
// and output (returns encoded frames instead of passing them to callback)
class EncodedVideoFrameProducer {
public:
struct EncodedFrame {
EncodedImage encoded_image;
CodecSpecificInfo codec_specific_info;
};
// `encoder` should be initialized, but shouldn't have `EncoderCallback` set.
explicit EncodedVideoFrameProducer(VideoEncoder& encoder)
: encoder_(encoder) {}
EncodedVideoFrameProducer(const EncodedVideoFrameProducer&) = delete;
EncodedVideoFrameProducer& operator=(const EncodedVideoFrameProducer&) =
delete;
// Number of the input frames to pass to the encoder.
EncodedVideoFrameProducer& SetNumInputFrames(int value);
// Encode next frame as key frame.
EncodedVideoFrameProducer& ForceKeyFrame();
// Resolution of the input frames.
EncodedVideoFrameProducer& SetResolution(RenderResolution value);
EncodedVideoFrameProducer& SetFramerateFps(int value);
EncodedVideoFrameProducer& SetRtpTimestamp(uint32_t value);
EncodedVideoFrameProducer& SetCaptureTimeIdentifier(Timestamp value);
// Generates input video frames and encodes them with `encoder` provided
// in the constructor. Returns frame passed to the `OnEncodedImage` by
// wraping `EncodedImageCallback` underneath.
std::vector<EncodedFrame> Encode();
private:
VideoEncoder& encoder_;
uint32_t rtp_timestamp_ = 1000;
Timestamp capture_time_identifier_ = Timestamp::Micros(1000);
int num_input_frames_ = 1;
int framerate_fps_ = 30;
RenderResolution resolution_ = {320, 180};
std::vector<VideoFrameType> next_frame_type_ = {
VideoFrameType::kVideoFrameKey};
};
inline EncodedVideoFrameProducer& EncodedVideoFrameProducer::SetNumInputFrames(
int value) {
RTC_DCHECK_GT(value, 0);
num_input_frames_ = value;
return *this;
}
inline EncodedVideoFrameProducer& EncodedVideoFrameProducer::ForceKeyFrame() {
next_frame_type_ = {VideoFrameType::kVideoFrameKey};
return *this;
}
inline EncodedVideoFrameProducer& EncodedVideoFrameProducer::SetResolution(
RenderResolution value) {
resolution_ = value;
return *this;
}
inline EncodedVideoFrameProducer& EncodedVideoFrameProducer::SetFramerateFps(
int value) {
RTC_DCHECK_GT(value, 0);
framerate_fps_ = value;
return *this;
}
inline EncodedVideoFrameProducer& EncodedVideoFrameProducer::SetRtpTimestamp(
uint32_t value) {
rtp_timestamp_ = value;
return *this;
}
inline EncodedVideoFrameProducer&
EncodedVideoFrameProducer::SetCaptureTimeIdentifier(Timestamp value) {
capture_time_identifier_ = value;
return *this;
}
} // namespace webrtc
#endif // MODULES_VIDEO_CODING_CODECS_TEST_ENCODED_VIDEO_FRAME_PRODUCER_H_

View file

@ -0,0 +1,28 @@
/*
* Copyright 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_VIDEO_CODING_CODECS_TEST_OBJC_CODEC_FACTORY_HELPER_H_
#define MODULES_VIDEO_CODING_CODECS_TEST_OBJC_CODEC_FACTORY_HELPER_H_
#include <memory>
#include "api/video_codecs/video_decoder_factory.h"
#include "api/video_codecs/video_encoder_factory.h"
namespace webrtc {
namespace test {
std::unique_ptr<VideoEncoderFactory> CreateObjCEncoderFactory();
std::unique_ptr<VideoDecoderFactory> CreateObjCDecoderFactory();
} // namespace test
} // namespace webrtc
#endif // MODULES_VIDEO_CODING_CODECS_TEST_OBJC_CODEC_FACTORY_HELPER_H_

View file

@ -0,0 +1,30 @@
/*
* Copyright 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/video_coding/codecs/test/objc_codec_factory_helper.h"
#import "sdk/objc/components/video_codec/RTCVideoDecoderFactoryH264.h"
#import "sdk/objc/components/video_codec/RTCVideoEncoderFactoryH264.h"
#include "sdk/objc/native/api/video_decoder_factory.h"
#include "sdk/objc/native/api/video_encoder_factory.h"
namespace webrtc {
namespace test {
std::unique_ptr<VideoEncoderFactory> CreateObjCEncoderFactory() {
return ObjCToNativeVideoEncoderFactory([[RTC_OBJC_TYPE(RTCVideoEncoderFactoryH264) alloc] init]);
}
std::unique_ptr<VideoDecoderFactory> CreateObjCDecoderFactory() {
return ObjCToNativeVideoDecoderFactory([[RTC_OBJC_TYPE(RTCVideoDecoderFactoryH264) alloc] init]);
}
} // namespace test
} // namespace webrtc

View file

@ -0,0 +1,438 @@
# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Plots statistics from WebRTC integration test logs.
Usage: $ python plot_webrtc_test_logs.py filename.txt
"""
import numpy
import sys
import re
import matplotlib.pyplot as plt
# Log events.
EVENT_START = 'RUN ] CodecSettings/VideoCodecTestParameterized.'
EVENT_END = 'OK ] CodecSettings/VideoCodecTestParameterized.'
# Metrics to plot, tuple: (name to parse in file, label to use when plotting).
WIDTH = ('width', 'width')
HEIGHT = ('height', 'height')
FILENAME = ('filename', 'clip')
CODEC_TYPE = ('codec_type', 'Codec')
ENCODER_IMPLEMENTATION_NAME = ('enc_impl_name', 'enc name')
DECODER_IMPLEMENTATION_NAME = ('dec_impl_name', 'dec name')
CODEC_IMPLEMENTATION_NAME = ('codec_impl_name', 'codec name')
CORES = ('num_cores', 'CPU cores used')
DENOISING = ('denoising', 'denoising')
RESILIENCE = ('resilience', 'resilience')
ERROR_CONCEALMENT = ('error_concealment', 'error concealment')
CPU_USAGE = ('cpu_usage_percent', 'CPU usage (%)')
BITRATE = ('target_bitrate_kbps', 'target bitrate (kbps)')
FRAMERATE = ('input_framerate_fps', 'fps')
QP = ('avg_qp', 'QP avg')
PSNR = ('avg_psnr', 'PSNR (dB)')
SSIM = ('avg_ssim', 'SSIM')
ENC_BITRATE = ('bitrate_kbps', 'encoded bitrate (kbps)')
NUM_FRAMES = ('num_input_frames', 'num frames')
NUM_DROPPED_FRAMES = ('num_dropped_frames', 'num dropped frames')
TIME_TO_TARGET = ('time_to_reach_target_bitrate_sec',
'time to reach target rate (sec)')
ENCODE_SPEED_FPS = ('enc_speed_fps', 'encode speed (fps)')
DECODE_SPEED_FPS = ('dec_speed_fps', 'decode speed (fps)')
AVG_KEY_FRAME_SIZE = ('avg_key_frame_size_bytes', 'avg key frame size (bytes)')
AVG_DELTA_FRAME_SIZE = ('avg_delta_frame_size_bytes',
'avg delta frame size (bytes)')
# Settings.
SETTINGS = [
WIDTH,
HEIGHT,
FILENAME,
NUM_FRAMES,
]
# Settings, options for x-axis.
X_SETTINGS = [
CORES,
FRAMERATE,
DENOISING,
RESILIENCE,
ERROR_CONCEALMENT,
BITRATE, # TODO(asapersson): Needs to be last.
]
# Settings, options for subplots.
SUBPLOT_SETTINGS = [
CODEC_TYPE,
ENCODER_IMPLEMENTATION_NAME,
DECODER_IMPLEMENTATION_NAME,
CODEC_IMPLEMENTATION_NAME,
] + X_SETTINGS
# Results.
RESULTS = [
PSNR,
SSIM,
ENC_BITRATE,
NUM_DROPPED_FRAMES,
TIME_TO_TARGET,
ENCODE_SPEED_FPS,
DECODE_SPEED_FPS,
QP,
CPU_USAGE,
AVG_KEY_FRAME_SIZE,
AVG_DELTA_FRAME_SIZE,
]
METRICS_TO_PARSE = SETTINGS + SUBPLOT_SETTINGS + RESULTS
Y_METRICS = [res[1] for res in RESULTS]
# Parameters for plotting.
FIG_SIZE_SCALE_FACTOR_X = 1.6
FIG_SIZE_SCALE_FACTOR_Y = 1.8
GRID_COLOR = [0.45, 0.45, 0.45]
def ParseSetting(filename, setting):
"""Parses setting from file.
Args:
filename: The name of the file.
setting: Name of setting to parse (e.g. width).
Returns:
A list holding parsed settings, e.g. ['width: 128.0', 'width: 160.0'] """
settings = []
settings_file = open(filename)
while True:
line = settings_file.readline()
if not line:
break
if re.search(r'%s' % EVENT_START, line):
# Parse event.
parsed = {}
while True:
line = settings_file.readline()
if not line:
break
if re.search(r'%s' % EVENT_END, line):
# Add parsed setting to list.
if setting in parsed:
s = setting + ': ' + str(parsed[setting])
if s not in settings:
settings.append(s)
break
TryFindMetric(parsed, line)
settings_file.close()
return settings
def ParseMetrics(filename, setting1, setting2):
"""Parses metrics from file.
Args:
filename: The name of the file.
setting1: First setting for sorting metrics (e.g. width).
setting2: Second setting for sorting metrics (e.g. CPU cores used).
Returns:
A dictionary holding parsed metrics.
For example:
metrics[key1][key2][measurement]
metrics = {
"width: 352": {
"CPU cores used: 1.0": {
"encode time (us)": [0.718005, 0.806925, 0.909726, 0.931835, 0.953642],
"PSNR (dB)": [25.546029, 29.465518, 34.723535, 36.428493, 38.686551],
"bitrate (kbps)": [50, 100, 300, 500, 1000]
},
"CPU cores used: 2.0": {
"encode time (us)": [0.718005, 0.806925, 0.909726, 0.931835, 0.953642],
"PSNR (dB)": [25.546029, 29.465518, 34.723535, 36.428493, 38.686551],
"bitrate (kbps)": [50, 100, 300, 500, 1000]
},
},
"width: 176": {
"CPU cores used: 1.0": {
"encode time (us)": [0.857897, 0.91608, 0.959173, 0.971116, 0.980961],
"PSNR (dB)": [30.243646, 33.375592, 37.574387, 39.42184, 41.437897],
"bitrate (kbps)": [50, 100, 300, 500, 1000]
},
}
} """
metrics = {}
# Parse events.
settings_file = open(filename)
while True:
line = settings_file.readline()
if not line:
break
if re.search(r'%s' % EVENT_START, line):
# Parse event.
parsed = {}
while True:
line = settings_file.readline()
if not line:
break
if re.search(r'%s' % EVENT_END, line):
# Add parsed values to metrics.
key1 = setting1 + ': ' + str(parsed[setting1])
key2 = setting2 + ': ' + str(parsed[setting2])
if key1 not in metrics:
metrics[key1] = {}
if key2 not in metrics[key1]:
metrics[key1][key2] = {}
for label in parsed:
if label not in metrics[key1][key2]:
metrics[key1][key2][label] = []
metrics[key1][key2][label].append(parsed[label])
break
TryFindMetric(parsed, line)
settings_file.close()
return metrics
def TryFindMetric(parsed, line):
for metric in METRICS_TO_PARSE:
name = metric[0]
label = metric[1]
if re.search(r'%s' % name, line):
found, value = GetMetric(name, line)
if found:
parsed[label] = value
return
def GetMetric(name, string):
# Float (e.g. bitrate = 98.8253).
pattern = r'%s\s*[:=]\s*([+-]?\d+\.*\d*)' % name
m = re.search(r'%s' % pattern, string)
if m is not None:
return StringToFloat(m.group(1))
# Alphanumeric characters (e.g. codec type : VP8).
pattern = r'%s\s*[:=]\s*(\w+)' % name
m = re.search(r'%s' % pattern, string)
if m is not None:
return True, m.group(1)
return False, -1
def StringToFloat(value):
try:
value = float(value)
except ValueError:
print "Not a float, skipped %s" % value
return False, -1
return True, value
def Plot(y_metric, x_metric, metrics):
"""Plots y_metric vs x_metric per key in metrics.
For example:
y_metric = 'PSNR (dB)'
x_metric = 'bitrate (kbps)'
metrics = {
"CPU cores used: 1.0": {
"PSNR (dB)": [25.546029, 29.465518, 34.723535, 36.428493, 38.686551],
"bitrate (kbps)": [50, 100, 300, 500, 1000]
},
"CPU cores used: 2.0": {
"PSNR (dB)": [25.546029, 29.465518, 34.723535, 36.428493, 38.686551],
"bitrate (kbps)": [50, 100, 300, 500, 1000]
},
}
"""
for key in sorted(metrics):
data = metrics[key]
if y_metric not in data:
print "Failed to find metric: %s" % y_metric
continue
y = numpy.array(data[y_metric])
x = numpy.array(data[x_metric])
if len(y) != len(x):
print "Length mismatch for %s, %s" % (y, x)
continue
label = y_metric + ' - ' + str(key)
plt.plot(x,
y,
label=label,
linewidth=1.5,
marker='o',
markersize=5,
markeredgewidth=0.0)
def PlotFigure(settings, y_metrics, x_metric, metrics, title):
"""Plots metrics in y_metrics list. One figure is plotted and each entry
in the list is plotted in a subplot (and sorted per settings).
For example:
settings = ['width: 128.0', 'width: 160.0']. Sort subplot per setting.
y_metrics = ['PSNR (dB)', 'PSNR (dB)']. Metric to plot per subplot.
x_metric = 'bitrate (kbps)'
"""
plt.figure()
plt.suptitle(title, fontsize='large', fontweight='bold')
settings.sort()
rows = len(settings)
cols = 1
pos = 1
while pos <= rows:
plt.rc('grid', color=GRID_COLOR)
ax = plt.subplot(rows, cols, pos)
plt.grid()
plt.setp(ax.get_xticklabels(), visible=(pos == rows), fontsize='large')
plt.setp(ax.get_yticklabels(), fontsize='large')
setting = settings[pos - 1]
Plot(y_metrics[pos - 1], x_metric, metrics[setting])
if setting.startswith(WIDTH[1]):
plt.title(setting, fontsize='medium')
plt.legend(fontsize='large', loc='best')
pos += 1
plt.xlabel(x_metric, fontsize='large')
plt.subplots_adjust(left=0.06,
right=0.98,
bottom=0.05,
top=0.94,
hspace=0.08)
def GetTitle(filename, setting):
title = ''
if setting != CODEC_IMPLEMENTATION_NAME[1] and setting != CODEC_TYPE[1]:
codec_types = ParseSetting(filename, CODEC_TYPE[1])
for i in range(0, len(codec_types)):
title += codec_types[i] + ', '
if setting != CORES[1]:
cores = ParseSetting(filename, CORES[1])
for i in range(0, len(cores)):
title += cores[i].split('.')[0] + ', '
if setting != FRAMERATE[1]:
framerate = ParseSetting(filename, FRAMERATE[1])
for i in range(0, len(framerate)):
title += framerate[i].split('.')[0] + ', '
if (setting != CODEC_IMPLEMENTATION_NAME[1]
and setting != ENCODER_IMPLEMENTATION_NAME[1]):
enc_names = ParseSetting(filename, ENCODER_IMPLEMENTATION_NAME[1])
for i in range(0, len(enc_names)):
title += enc_names[i] + ', '
if (setting != CODEC_IMPLEMENTATION_NAME[1]
and setting != DECODER_IMPLEMENTATION_NAME[1]):
dec_names = ParseSetting(filename, DECODER_IMPLEMENTATION_NAME[1])
for i in range(0, len(dec_names)):
title += dec_names[i] + ', '
filenames = ParseSetting(filename, FILENAME[1])
title += filenames[0].split('_')[0]
num_frames = ParseSetting(filename, NUM_FRAMES[1])
for i in range(0, len(num_frames)):
title += ' (' + num_frames[i].split('.')[0] + ')'
return title
def ToString(input_list):
return ToStringWithoutMetric(input_list, ('', ''))
def ToStringWithoutMetric(input_list, metric):
i = 1
output_str = ""
for m in input_list:
if m != metric:
output_str = output_str + ("%s. %s\n" % (i, m[1]))
i += 1
return output_str
def GetIdx(text_list):
return int(raw_input(text_list)) - 1
def main():
filename = sys.argv[1]
# Setup.
idx_metric = GetIdx("Choose metric:\n0. All\n%s" % ToString(RESULTS))
if idx_metric == -1:
# Plot all metrics. One subplot for each metric.
# Per subplot: metric vs bitrate (per resolution).
cores = ParseSetting(filename, CORES[1])
setting1 = CORES[1]
setting2 = WIDTH[1]
sub_keys = [cores[0]] * len(Y_METRICS)
y_metrics = Y_METRICS
x_metric = BITRATE[1]
else:
resolutions = ParseSetting(filename, WIDTH[1])
idx = GetIdx("Select metric for x-axis:\n%s" % ToString(X_SETTINGS))
if X_SETTINGS[idx] == BITRATE:
idx = GetIdx("Plot per:\n%s" %
ToStringWithoutMetric(SUBPLOT_SETTINGS, BITRATE))
idx_setting = METRICS_TO_PARSE.index(SUBPLOT_SETTINGS[idx])
# Plot one metric. One subplot for each resolution.
# Per subplot: metric vs bitrate (per setting).
setting1 = WIDTH[1]
setting2 = METRICS_TO_PARSE[idx_setting][1]
sub_keys = resolutions
y_metrics = [RESULTS[idx_metric][1]] * len(sub_keys)
x_metric = BITRATE[1]
else:
# Plot one metric. One subplot for each resolution.
# Per subplot: metric vs setting (per bitrate).
setting1 = WIDTH[1]
setting2 = BITRATE[1]
sub_keys = resolutions
y_metrics = [RESULTS[idx_metric][1]] * len(sub_keys)
x_metric = X_SETTINGS[idx][1]
metrics = ParseMetrics(filename, setting1, setting2)
# Stretch fig size.
figsize = plt.rcParams["figure.figsize"]
figsize[0] *= FIG_SIZE_SCALE_FACTOR_X
figsize[1] *= FIG_SIZE_SCALE_FACTOR_Y
plt.rcParams["figure.figsize"] = figsize
PlotFigure(sub_keys, y_metrics, x_metric, metrics,
GetTitle(filename, setting2))
plt.show()
if __name__ == '__main__':
main()

View file

@ -0,0 +1,597 @@
/*
* Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <memory>
#include <string>
#include <vector>
#include "absl/flags/flag.h"
#include "absl/functional/any_invocable.h"
#include "api/environment/environment.h"
#include "api/environment/environment_factory.h"
#include "api/test/metrics/global_metrics_logger_and_exporter.h"
#include "api/units/data_rate.h"
#include "api/units/frequency.h"
#include "api/video/resolution.h"
#include "api/video_codecs/builtin_video_decoder_factory.h"
#include "api/video_codecs/builtin_video_encoder_factory.h"
#if defined(WEBRTC_ANDROID)
#include "modules/video_coding/codecs/test/android_codec_factory_helper.h"
#endif
#include "modules/video_coding/svc/scalability_mode_util.h"
#include "rtc_base/logging.h"
#include "rtc_base/strings/string_builder.h"
#include "test/explicit_key_value_config.h"
#include "test/field_trial.h"
#include "test/gtest.h"
#include "test/test_flags.h"
#include "test/testsupport/file_utils.h"
#include "test/video_codec_tester.h"
ABSL_FLAG(std::string,
video_name,
"FourPeople_1280x720_30",
"Name of input video sequence.");
ABSL_FLAG(std::string,
encoder,
"libaom-av1",
"Encoder: libaom-av1, libvpx-vp9, libvpx-vp8, openh264, hw-vp8, "
"hw-vp9, hw-av1, hw-h264, hw-h265");
ABSL_FLAG(std::string,
decoder,
"dav1d",
"Decoder: dav1d, libvpx-vp9, libvpx-vp8, ffmpeg-h264, hw-vp8, "
"hw-vp9, hw-av1, hw-h264, hw-h265");
ABSL_FLAG(std::string, scalability_mode, "L1T1", "Scalability mode.");
ABSL_FLAG(int, width, 1280, "Width.");
ABSL_FLAG(int, height, 720, "Height.");
ABSL_FLAG(std::vector<std::string>,
bitrate_kbps,
{"1024"},
"Encode target bitrate per layer (l0t0,l0t1,...l1t0,l1t1 and so on) "
"in kbps.");
ABSL_FLAG(double,
framerate_fps,
30.0,
"Encode target frame rate of the top temporal layer in fps.");
ABSL_FLAG(int, num_frames, 300, "Number of frames to encode and/or decode.");
ABSL_FLAG(std::string, field_trials, "", "Field trials to apply.");
ABSL_FLAG(std::string, test_name, "", "Test name.");
ABSL_FLAG(bool, dump_decoder_input, false, "Dump decoder input.");
ABSL_FLAG(bool, dump_decoder_output, false, "Dump decoder output.");
ABSL_FLAG(bool, dump_encoder_input, false, "Dump encoder input.");
ABSL_FLAG(bool, dump_encoder_output, false, "Dump encoder output.");
ABSL_FLAG(bool, write_csv, false, "Write metrics to a CSV file.");
namespace webrtc {
namespace test {
namespace {
using ::testing::Combine;
using ::testing::Values;
using VideoSourceSettings = VideoCodecTester::VideoSourceSettings;
using EncodingSettings = VideoCodecTester::EncodingSettings;
using VideoCodecStats = VideoCodecTester::VideoCodecStats;
using Filter = VideoCodecStats::Filter;
using PacingMode = VideoCodecTester::PacingSettings::PacingMode;
struct VideoInfo {
std::string name;
Resolution resolution;
Frequency framerate;
};
const std::map<std::string, VideoInfo> kRawVideos = {
{"FourPeople_1280x720_30",
{.name = "FourPeople_1280x720_30",
.resolution = {.width = 1280, .height = 720},
.framerate = Frequency::Hertz(30)}},
{"vidyo1_1280x720_30",
{.name = "vidyo1_1280x720_30",
.resolution = {.width = 1280, .height = 720},
.framerate = Frequency::Hertz(30)}},
{"vidyo4_1280x720_30",
{.name = "vidyo4_1280x720_30",
.resolution = {.width = 1280, .height = 720},
.framerate = Frequency::Hertz(30)}},
{"KristenAndSara_1280x720_30",
{.name = "KristenAndSara_1280x720_30",
.resolution = {.width = 1280, .height = 720},
.framerate = Frequency::Hertz(30)}},
{"Johnny_1280x720_30",
{.name = "Johnny_1280x720_30",
.resolution = {.width = 1280, .height = 720},
.framerate = Frequency::Hertz(30)}}};
static constexpr Frequency k90kHz = Frequency::Hertz(90000);
std::string CodecNameToCodecType(std::string name) {
if (name.find("av1") != std::string::npos) {
return "AV1";
}
if (name.find("vp9") != std::string::npos) {
return "VP9";
}
if (name.find("vp8") != std::string::npos) {
return "VP8";
}
if (name.find("h264") != std::string::npos) {
return "H264";
}
if (name.find("h265") != std::string::npos) {
return "H265";
}
RTC_CHECK_NOTREACHED();
}
// TODO(webrtc:14852): Make Create[Encoder,Decoder]Factory to work with codec
// name directly.
std::string CodecNameToCodecImpl(std::string name) {
if (name.find("hw") != std::string::npos) {
return "mediacodec";
}
return "builtin";
}
std::unique_ptr<VideoEncoderFactory> CreateEncoderFactory(std::string impl) {
if (impl == "builtin") {
return CreateBuiltinVideoEncoderFactory();
}
#if defined(WEBRTC_ANDROID)
InitializeAndroidObjects();
return CreateAndroidEncoderFactory();
#else
return nullptr;
#endif
}
std::unique_ptr<VideoDecoderFactory> CreateDecoderFactory(std::string impl) {
if (impl == "builtin") {
return CreateBuiltinVideoDecoderFactory();
}
#if defined(WEBRTC_ANDROID)
InitializeAndroidObjects();
return CreateAndroidDecoderFactory();
#else
return nullptr;
#endif
}
std::string TestName() {
std::string test_name = absl::GetFlag(FLAGS_test_name);
if (!test_name.empty()) {
return test_name;
}
return ::testing::UnitTest::GetInstance()->current_test_info()->name();
}
std::string TestOutputPath() {
std::string output_path =
(rtc::StringBuilder() << OutputPath() << TestName()).str();
std::string output_dir = DirName(output_path);
bool result = CreateDir(output_dir);
RTC_CHECK(result) << "Cannot create " << output_dir;
return output_path;
}
} // namespace
std::unique_ptr<VideoCodecStats> RunEncodeDecodeTest(
const Environment& env,
std::string encoder_impl,
std::string decoder_impl,
const VideoInfo& video_info,
const std::map<uint32_t, EncodingSettings>& encoding_settings) {
VideoSourceSettings source_settings{
.file_path = ResourcePath(video_info.name, "yuv"),
.resolution = video_info.resolution,
.framerate = video_info.framerate};
const SdpVideoFormat& sdp_video_format =
encoding_settings.begin()->second.sdp_video_format;
std::unique_ptr<VideoEncoderFactory> encoder_factory =
CreateEncoderFactory(encoder_impl);
if (!encoder_factory
->QueryCodecSupport(sdp_video_format,
/*scalability_mode=*/absl::nullopt)
.is_supported) {
RTC_LOG(LS_WARNING) << "No " << encoder_impl << " encoder for video format "
<< sdp_video_format.ToString();
return nullptr;
}
std::unique_ptr<VideoDecoderFactory> decoder_factory =
CreateDecoderFactory(decoder_impl);
if (!decoder_factory
->QueryCodecSupport(sdp_video_format,
/*reference_scaling=*/false)
.is_supported) {
RTC_LOG(LS_WARNING) << "No " << decoder_impl << " decoder for video format "
<< sdp_video_format.ToString()
<< ". Trying built-in decoder.";
// TODO(ssilkin): No H264 support in ffmpeg on ARM. Consider trying HW
// decoder.
decoder_factory = CreateDecoderFactory("builtin");
if (!decoder_factory
->QueryCodecSupport(sdp_video_format,
/*reference_scaling=*/false)
.is_supported) {
RTC_LOG(LS_WARNING) << "No " << decoder_impl
<< " decoder for video format "
<< sdp_video_format.ToString();
return nullptr;
}
}
std::string output_path = TestOutputPath();
VideoCodecTester::EncoderSettings encoder_settings;
encoder_settings.pacing_settings.mode =
encoder_impl == "builtin" ? PacingMode::kNoPacing : PacingMode::kRealTime;
if (absl::GetFlag(FLAGS_dump_encoder_input)) {
encoder_settings.encoder_input_base_path = output_path + "_enc_input";
}
if (absl::GetFlag(FLAGS_dump_encoder_output)) {
encoder_settings.encoder_output_base_path = output_path + "_enc_output";
}
VideoCodecTester::DecoderSettings decoder_settings;
decoder_settings.pacing_settings.mode =
decoder_impl == "builtin" ? PacingMode::kNoPacing : PacingMode::kRealTime;
if (absl::GetFlag(FLAGS_dump_decoder_input)) {
decoder_settings.decoder_input_base_path = output_path + "_dec_input";
}
if (absl::GetFlag(FLAGS_dump_decoder_output)) {
decoder_settings.decoder_output_base_path = output_path + "_dec_output";
}
return VideoCodecTester::RunEncodeDecodeTest(
env, source_settings, encoder_factory.get(), decoder_factory.get(),
encoder_settings, decoder_settings, encoding_settings);
}
std::unique_ptr<VideoCodecStats> RunEncodeTest(
std::string codec_type,
std::string codec_impl,
const VideoInfo& video_info,
const std::map<uint32_t, EncodingSettings>& encoding_settings) {
VideoSourceSettings source_settings{
.file_path = ResourcePath(video_info.name, "yuv"),
.resolution = video_info.resolution,
.framerate = video_info.framerate};
const SdpVideoFormat& sdp_video_format =
encoding_settings.begin()->second.sdp_video_format;
std::unique_ptr<VideoEncoderFactory> encoder_factory =
CreateEncoderFactory(codec_impl);
if (!encoder_factory
->QueryCodecSupport(sdp_video_format,
/*scalability_mode=*/absl::nullopt)
.is_supported) {
RTC_LOG(LS_WARNING) << "No encoder for video format "
<< sdp_video_format.ToString();
return nullptr;
}
std::string output_path = TestOutputPath();
VideoCodecTester::EncoderSettings encoder_settings;
encoder_settings.pacing_settings.mode =
codec_impl == "builtin" ? PacingMode::kNoPacing : PacingMode::kRealTime;
if (absl::GetFlag(FLAGS_dump_encoder_input)) {
encoder_settings.encoder_input_base_path = output_path + "_enc_input";
}
if (absl::GetFlag(FLAGS_dump_encoder_output)) {
encoder_settings.encoder_output_base_path = output_path + "_enc_output";
}
return VideoCodecTester::RunEncodeTest(source_settings, encoder_factory.get(),
encoder_settings, encoding_settings);
}
class SpatialQualityTest : public ::testing::TestWithParam<std::tuple<
/*codec_type=*/std::string,
/*codec_impl=*/std::string,
VideoInfo,
std::tuple</*width=*/int,
/*height=*/int,
/*framerate_fps=*/double,
/*bitrate_kbps=*/int,
/*expected_min_psnr=*/double>>> {
public:
static std::string TestParamsToString(
const ::testing::TestParamInfo<SpatialQualityTest::ParamType>& info) {
auto [codec_type, codec_impl, video_info, coding_settings] = info.param;
auto [width, height, framerate_fps, bitrate_kbps, psnr] = coding_settings;
return std::string(codec_type + codec_impl + video_info.name +
std::to_string(width) + "x" + std::to_string(height) +
"p" +
std::to_string(static_cast<int>(1000 * framerate_fps)) +
"mhz" + std::to_string(bitrate_kbps) + "kbps");
}
};
TEST_P(SpatialQualityTest, SpatialQuality) {
const Environment env = CreateEnvironment();
auto [codec_type, codec_impl, video_info, coding_settings] = GetParam();
auto [width, height, framerate_fps, bitrate_kbps, expected_min_psnr] =
coding_settings;
int duration_s = 10;
int num_frames = duration_s * framerate_fps;
std::map<uint32_t, EncodingSettings> frames_settings =
VideoCodecTester::CreateEncodingSettings(
codec_type, /*scalability_mode=*/"L1T1", width, height,
{bitrate_kbps}, framerate_fps, num_frames);
std::unique_ptr<VideoCodecStats> stats = RunEncodeDecodeTest(
env, codec_impl, codec_impl, video_info, frames_settings);
VideoCodecStats::Stream stream;
if (stats != nullptr) {
stream = stats->Aggregate(Filter{});
if (absl::GetFlag(FLAGS_webrtc_quick_perf_test)) {
EXPECT_GE(stream.psnr.y.GetAverage(), expected_min_psnr);
}
}
stream.LogMetrics(
GetGlobalMetricsLogger(),
::testing::UnitTest::GetInstance()->current_test_info()->name(),
/*prefix=*/"",
/*metadata=*/
{{"video_name", video_info.name},
{"codec_type", codec_type},
{"codec_impl", codec_impl}});
}
INSTANTIATE_TEST_SUITE_P(
All,
SpatialQualityTest,
Combine(Values("AV1", "VP9", "VP8", "H264", "H265"),
#if defined(WEBRTC_ANDROID)
Values("builtin", "mediacodec"),
#else
Values("builtin"),
#endif
Values(kRawVideos.at("FourPeople_1280x720_30")),
Values(std::make_tuple(320, 180, 30, 32, 26),
std::make_tuple(320, 180, 30, 64, 29),
std::make_tuple(320, 180, 30, 128, 32),
std::make_tuple(320, 180, 30, 256, 36),
std::make_tuple(640, 360, 30, 128, 29),
std::make_tuple(640, 360, 30, 256, 33),
std::make_tuple(640, 360, 30, 384, 35),
std::make_tuple(640, 360, 30, 512, 36),
std::make_tuple(1280, 720, 30, 256, 30),
std::make_tuple(1280, 720, 30, 512, 34),
std::make_tuple(1280, 720, 30, 1024, 37),
std::make_tuple(1280, 720, 30, 2048, 39))),
SpatialQualityTest::TestParamsToString);
class BitrateAdaptationTest
: public ::testing::TestWithParam<
std::tuple</*codec_type=*/std::string,
/*codec_impl=*/std::string,
VideoInfo,
std::pair</*bitrate_kbps=*/int, /*bitrate_kbps=*/int>>> {
public:
static std::string TestParamsToString(
const ::testing::TestParamInfo<BitrateAdaptationTest::ParamType>& info) {
auto [codec_type, codec_impl, video_info, bitrate_kbps] = info.param;
return std::string(codec_type + codec_impl + video_info.name +
std::to_string(bitrate_kbps.first) + "kbps" +
std::to_string(bitrate_kbps.second) + "kbps");
}
};
TEST_P(BitrateAdaptationTest, BitrateAdaptation) {
auto [codec_type, codec_impl, video_info, bitrate_kbps] = GetParam();
int duration_s = 10; // Duration of fixed rate interval.
int num_frames =
static_cast<int>(duration_s * video_info.framerate.hertz<double>());
std::map<uint32_t, EncodingSettings> encoding_settings =
VideoCodecTester::CreateEncodingSettings(
codec_type, /*scalability_mode=*/"L1T1",
/*width=*/640, /*height=*/360, {bitrate_kbps.first},
/*framerate_fps=*/30, num_frames);
uint32_t initial_timestamp_rtp =
encoding_settings.rbegin()->first + k90kHz / Frequency::Hertz(30);
std::map<uint32_t, EncodingSettings> encoding_settings2 =
VideoCodecTester::CreateEncodingSettings(
codec_type, /*scalability_mode=*/"L1T1",
/*width=*/640, /*height=*/360, {bitrate_kbps.second},
/*framerate_fps=*/30, num_frames, initial_timestamp_rtp);
encoding_settings.merge(encoding_settings2);
std::unique_ptr<VideoCodecStats> stats =
RunEncodeTest(codec_type, codec_impl, video_info, encoding_settings);
VideoCodecStats::Stream stream;
if (stats != nullptr) {
stream = stats->Aggregate({.min_timestamp_rtp = initial_timestamp_rtp});
if (absl::GetFlag(FLAGS_webrtc_quick_perf_test)) {
EXPECT_NEAR(stream.bitrate_mismatch_pct.GetAverage(), 0, 10);
EXPECT_NEAR(stream.framerate_mismatch_pct.GetAverage(), 0, 10);
}
}
stream.LogMetrics(
GetGlobalMetricsLogger(),
::testing::UnitTest::GetInstance()->current_test_info()->name(),
/*prefix=*/"",
/*metadata=*/
{{"codec_type", codec_type},
{"codec_impl", codec_impl},
{"video_name", video_info.name},
{"rate_profile", std::to_string(bitrate_kbps.first) + "," +
std::to_string(bitrate_kbps.second)}});
}
INSTANTIATE_TEST_SUITE_P(
All,
BitrateAdaptationTest,
Combine(Values("AV1", "VP9", "VP8", "H264", "H265"),
#if defined(WEBRTC_ANDROID)
Values("builtin", "mediacodec"),
#else
Values("builtin"),
#endif
Values(kRawVideos.at("FourPeople_1280x720_30")),
Values(std::pair(1024, 512), std::pair(512, 1024))),
BitrateAdaptationTest::TestParamsToString);
class FramerateAdaptationTest
: public ::testing::TestWithParam<std::tuple</*codec_type=*/std::string,
/*codec_impl=*/std::string,
VideoInfo,
std::pair<double, double>>> {
public:
static std::string TestParamsToString(
const ::testing::TestParamInfo<FramerateAdaptationTest::ParamType>&
info) {
auto [codec_type, codec_impl, video_info, framerate_fps] = info.param;
return std::string(
codec_type + codec_impl + video_info.name +
std::to_string(static_cast<int>(1000 * framerate_fps.first)) + "mhz" +
std::to_string(static_cast<int>(1000 * framerate_fps.second)) + "mhz");
}
};
TEST_P(FramerateAdaptationTest, FramerateAdaptation) {
auto [codec_type, codec_impl, video_info, framerate_fps] = GetParam();
int duration_s = 10; // Duration of fixed rate interval.
std::map<uint32_t, EncodingSettings> encoding_settings =
VideoCodecTester::CreateEncodingSettings(
codec_type, /*scalability_mode=*/"L1T1",
/*width=*/640, /*height=*/360,
/*layer_bitrates_kbps=*/{512}, framerate_fps.first,
static_cast<int>(duration_s * framerate_fps.first));
uint32_t initial_timestamp_rtp =
encoding_settings.rbegin()->first +
k90kHz / Frequency::Hertz(framerate_fps.first);
std::map<uint32_t, EncodingSettings> encoding_settings2 =
VideoCodecTester::CreateEncodingSettings(
codec_type, /*scalability_mode=*/"L1T1", /*width=*/640,
/*height=*/360,
/*layer_bitrates_kbps=*/{512}, framerate_fps.second,
static_cast<int>(duration_s * framerate_fps.second),
initial_timestamp_rtp);
encoding_settings.merge(encoding_settings2);
std::unique_ptr<VideoCodecStats> stats =
RunEncodeTest(codec_type, codec_impl, video_info, encoding_settings);
VideoCodecStats::Stream stream;
if (stats != nullptr) {
stream = stats->Aggregate({.min_timestamp_rtp = initial_timestamp_rtp});
if (absl::GetFlag(FLAGS_webrtc_quick_perf_test)) {
EXPECT_NEAR(stream.bitrate_mismatch_pct.GetAverage(), 0, 10);
EXPECT_NEAR(stream.framerate_mismatch_pct.GetAverage(), 0, 10);
}
}
stream.LogMetrics(
GetGlobalMetricsLogger(),
::testing::UnitTest::GetInstance()->current_test_info()->name(),
/*prefix=*/"",
/*metadata=*/
{{"codec_type", codec_type},
{"codec_impl", codec_impl},
{"video_name", video_info.name},
{"rate_profile", std::to_string(framerate_fps.first) + "," +
std::to_string(framerate_fps.second)}});
}
INSTANTIATE_TEST_SUITE_P(
All,
FramerateAdaptationTest,
Combine(Values("AV1", "VP9", "VP8", "H264", "H265"),
#if defined(WEBRTC_ANDROID)
Values("builtin", "mediacodec"),
#else
Values("builtin"),
#endif
Values(kRawVideos.at("FourPeople_1280x720_30")),
Values(std::pair(30, 15), std::pair(15, 30))),
FramerateAdaptationTest::TestParamsToString);
TEST(VideoCodecTest, DISABLED_EncodeDecode) {
ScopedFieldTrials field_trials(absl::GetFlag(FLAGS_field_trials));
const Environment env =
CreateEnvironment(std::make_unique<ExplicitKeyValueConfig>(
absl::GetFlag(FLAGS_field_trials)));
std::vector<std::string> bitrate_str = absl::GetFlag(FLAGS_bitrate_kbps);
std::vector<int> bitrate_kbps;
std::transform(bitrate_str.begin(), bitrate_str.end(),
std::back_inserter(bitrate_kbps),
[](const std::string& str) { return std::stoi(str); });
std::map<uint32_t, EncodingSettings> frames_settings =
VideoCodecTester::CreateEncodingSettings(
CodecNameToCodecType(absl::GetFlag(FLAGS_encoder)),
absl::GetFlag(FLAGS_scalability_mode), absl::GetFlag(FLAGS_width),
absl::GetFlag(FLAGS_height), {bitrate_kbps},
absl::GetFlag(FLAGS_framerate_fps), absl::GetFlag(FLAGS_num_frames));
// TODO(webrtc:14852): Pass encoder and decoder names directly, and update
// logged test name (implies lossing history in the chromeperf dashboard).
// Sync with changes in Stream::LogMetrics (see TODOs there).
std::unique_ptr<VideoCodecStats> stats = RunEncodeDecodeTest(
env, CodecNameToCodecImpl(absl::GetFlag(FLAGS_encoder)),
CodecNameToCodecImpl(absl::GetFlag(FLAGS_decoder)),
kRawVideos.at(absl::GetFlag(FLAGS_video_name)), frames_settings);
ASSERT_NE(nullptr, stats);
// Log unsliced metrics.
VideoCodecStats::Stream stream = stats->Aggregate(Filter{});
stream.LogMetrics(GetGlobalMetricsLogger(), TestName(), /*prefix=*/"",
/*metadata=*/{});
// Log metrics sliced on spatial and temporal layer.
ScalabilityMode scalability_mode =
*ScalabilityModeFromString(absl::GetFlag(FLAGS_scalability_mode));
int num_spatial_layers = ScalabilityModeToNumSpatialLayers(scalability_mode);
int num_temporal_layers =
ScalabilityModeToNumTemporalLayers(scalability_mode);
for (int sidx = 0; sidx < num_spatial_layers; ++sidx) {
for (int tidx = 0; tidx < num_temporal_layers; ++tidx) {
std::string metric_name_prefix =
(rtc::StringBuilder() << "s" << sidx << "t" << tidx << "_").str();
stream = stats->Aggregate(
{.layer_id = {{.spatial_idx = sidx, .temporal_idx = tidx}}});
stream.LogMetrics(GetGlobalMetricsLogger(), TestName(),
metric_name_prefix,
/*metadata=*/{});
}
}
if (absl::GetFlag(FLAGS_write_csv)) {
stats->LogMetrics(
(rtc::StringBuilder() << TestOutputPath() << ".csv").str(),
stats->Slice(Filter{}, /*merge=*/false), /*metadata=*/
{{"test_name", TestName()}});
}
}
} // namespace test
} // namespace webrtc

View file

@ -0,0 +1,183 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/video_coding/codecs/test/video_codec_unittest.h"
#include <utility>
#include "api/test/create_frame_generator.h"
#include "api/video_codecs/video_encoder.h"
#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "modules/video_coding/include/video_error_codes.h"
#include "test/video_codec_settings.h"
static constexpr webrtc::TimeDelta kEncodeTimeout =
webrtc::TimeDelta::Millis(100);
static constexpr webrtc::TimeDelta kDecodeTimeout =
webrtc::TimeDelta::Millis(25);
// Set bitrate to get higher quality.
static const int kStartBitrate = 300;
static const int kMaxBitrate = 4000;
static const int kWidth = 176; // Width of the input image.
static const int kHeight = 144; // Height of the input image.
static const int kMaxFramerate = 30; // Arbitrary value.
namespace webrtc {
namespace {
const VideoEncoder::Capabilities kCapabilities(false);
}
EncodedImageCallback::Result
VideoCodecUnitTest::FakeEncodeCompleteCallback::OnEncodedImage(
const EncodedImage& frame,
const CodecSpecificInfo* codec_specific_info) {
MutexLock lock(&test_->encoded_frame_section_);
test_->encoded_frames_.push_back(frame);
RTC_DCHECK(codec_specific_info);
test_->codec_specific_infos_.push_back(*codec_specific_info);
if (!test_->wait_for_encoded_frames_threshold_) {
test_->encoded_frame_event_.Set();
return Result(Result::OK);
}
if (test_->encoded_frames_.size() ==
test_->wait_for_encoded_frames_threshold_) {
test_->wait_for_encoded_frames_threshold_ = 1;
test_->encoded_frame_event_.Set();
}
return Result(Result::OK);
}
void VideoCodecUnitTest::FakeDecodeCompleteCallback::Decoded(
VideoFrame& frame,
absl::optional<int32_t> decode_time_ms,
absl::optional<uint8_t> qp) {
MutexLock lock(&test_->decoded_frame_section_);
test_->decoded_frame_.emplace(frame);
test_->decoded_qp_ = qp;
test_->decoded_frame_event_.Set();
}
void VideoCodecUnitTest::SetUp() {
webrtc::test::CodecSettings(kVideoCodecVP8, &codec_settings_);
codec_settings_.startBitrate = kStartBitrate;
codec_settings_.maxBitrate = kMaxBitrate;
codec_settings_.maxFramerate = kMaxFramerate;
codec_settings_.width = kWidth;
codec_settings_.height = kHeight;
ModifyCodecSettings(&codec_settings_);
input_frame_generator_ = test::CreateSquareFrameGenerator(
codec_settings_.width, codec_settings_.height,
test::FrameGeneratorInterface::OutputType::kI420, absl::optional<int>());
encoder_ = CreateEncoder();
decoder_ = CreateDecoder();
encoder_->RegisterEncodeCompleteCallback(&encode_complete_callback_);
decoder_->RegisterDecodeCompleteCallback(&decode_complete_callback_);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->InitEncode(
&codec_settings_,
VideoEncoder::Settings(kCapabilities, 1 /* number of cores */,
0 /* max payload size (unused) */)));
VideoDecoder::Settings decoder_settings;
decoder_settings.set_codec_type(codec_settings_.codecType);
decoder_settings.set_max_render_resolution(
{codec_settings_.width, codec_settings_.height});
EXPECT_TRUE(decoder_->Configure(decoder_settings));
}
void VideoCodecUnitTest::ModifyCodecSettings(VideoCodec* codec_settings) {}
VideoFrame VideoCodecUnitTest::NextInputFrame() {
test::FrameGeneratorInterface::VideoFrameData frame_data =
input_frame_generator_->NextFrame();
VideoFrame input_frame = VideoFrame::Builder()
.set_video_frame_buffer(frame_data.buffer)
.set_update_rect(frame_data.update_rect)
.build();
const uint32_t timestamp =
last_input_frame_timestamp_ +
kVideoPayloadTypeFrequency / codec_settings_.maxFramerate;
input_frame.set_timestamp(timestamp);
input_frame.set_timestamp_us(timestamp * (1000 / 90));
last_input_frame_timestamp_ = timestamp;
return input_frame;
}
bool VideoCodecUnitTest::WaitForEncodedFrame(
EncodedImage* frame,
CodecSpecificInfo* codec_specific_info) {
std::vector<EncodedImage> frames;
std::vector<CodecSpecificInfo> codec_specific_infos;
if (!WaitForEncodedFrames(&frames, &codec_specific_infos))
return false;
EXPECT_EQ(frames.size(), static_cast<size_t>(1));
EXPECT_EQ(frames.size(), codec_specific_infos.size());
*frame = frames[0];
*codec_specific_info = codec_specific_infos[0];
return true;
}
void VideoCodecUnitTest::SetWaitForEncodedFramesThreshold(size_t num_frames) {
MutexLock lock(&encoded_frame_section_);
wait_for_encoded_frames_threshold_ = num_frames;
}
bool VideoCodecUnitTest::WaitForEncodedFrames(
std::vector<EncodedImage>* frames,
std::vector<CodecSpecificInfo>* codec_specific_info) {
EXPECT_TRUE(encoded_frame_event_.Wait(kEncodeTimeout))
<< "Timed out while waiting for encoded frame.";
// This becomes unsafe if there are multiple threads waiting for frames.
MutexLock lock(&encoded_frame_section_);
EXPECT_FALSE(encoded_frames_.empty());
EXPECT_FALSE(codec_specific_infos_.empty());
EXPECT_EQ(encoded_frames_.size(), codec_specific_infos_.size());
if (!encoded_frames_.empty()) {
*frames = encoded_frames_;
encoded_frames_.clear();
RTC_DCHECK(!codec_specific_infos_.empty());
*codec_specific_info = codec_specific_infos_;
codec_specific_infos_.clear();
return true;
} else {
return false;
}
}
bool VideoCodecUnitTest::WaitForDecodedFrame(std::unique_ptr<VideoFrame>* frame,
absl::optional<uint8_t>* qp) {
bool ret = decoded_frame_event_.Wait(kDecodeTimeout);
EXPECT_TRUE(ret) << "Timed out while waiting for a decoded frame.";
// This becomes unsafe if there are multiple threads waiting for frames.
MutexLock lock(&decoded_frame_section_);
EXPECT_TRUE(decoded_frame_);
if (decoded_frame_) {
frame->reset(new VideoFrame(std::move(*decoded_frame_)));
*qp = decoded_qp_;
decoded_frame_.reset();
return true;
} else {
return false;
}
}
size_t VideoCodecUnitTest::GetNumEncodedFrames() {
MutexLock lock(&encoded_frame_section_);
return encoded_frames_.size();
}
} // namespace webrtc

View file

@ -0,0 +1,128 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_VIDEO_CODING_CODECS_TEST_VIDEO_CODEC_UNITTEST_H_
#define MODULES_VIDEO_CODING_CODECS_TEST_VIDEO_CODEC_UNITTEST_H_
#include <memory>
#include <vector>
#include "api/test/frame_generator_interface.h"
#include "api/video_codecs/video_decoder.h"
#include "api/video_codecs/video_encoder.h"
#include "modules/video_coding/include/video_codec_interface.h"
#include "modules/video_coding/utility/vp8_header_parser.h"
#include "modules/video_coding/utility/vp9_uncompressed_header_parser.h"
#include "rtc_base/event.h"
#include "rtc_base/synchronization/mutex.h"
#include "rtc_base/thread_annotations.h"
#include "test/gtest.h"
namespace webrtc {
class VideoCodecUnitTest : public ::testing::Test {
public:
VideoCodecUnitTest()
: encode_complete_callback_(this),
decode_complete_callback_(this),
wait_for_encoded_frames_threshold_(1),
last_input_frame_timestamp_(0) {}
protected:
class FakeEncodeCompleteCallback : public webrtc::EncodedImageCallback {
public:
explicit FakeEncodeCompleteCallback(VideoCodecUnitTest* test)
: test_(test) {}
Result OnEncodedImage(const EncodedImage& frame,
const CodecSpecificInfo* codec_specific_info);
private:
VideoCodecUnitTest* const test_;
};
class FakeDecodeCompleteCallback : public webrtc::DecodedImageCallback {
public:
explicit FakeDecodeCompleteCallback(VideoCodecUnitTest* test)
: test_(test) {}
int32_t Decoded(VideoFrame& frame) override {
RTC_DCHECK_NOTREACHED();
return -1;
}
int32_t Decoded(VideoFrame& frame, int64_t decode_time_ms) override {
RTC_DCHECK_NOTREACHED();
return -1;
}
void Decoded(VideoFrame& frame,
absl::optional<int32_t> decode_time_ms,
absl::optional<uint8_t> qp) override;
private:
VideoCodecUnitTest* const test_;
};
virtual std::unique_ptr<VideoEncoder> CreateEncoder() = 0;
virtual std::unique_ptr<VideoDecoder> CreateDecoder() = 0;
void SetUp() override;
virtual void ModifyCodecSettings(VideoCodec* codec_settings);
VideoFrame NextInputFrame();
// Helper method for waiting a single encoded frame.
bool WaitForEncodedFrame(EncodedImage* frame,
CodecSpecificInfo* codec_specific_info);
// Helper methods for waiting for multiple encoded frames. Caller must
// define how many frames are to be waited for via `num_frames` before calling
// Encode(). Then, they can expect to retrive them via WaitForEncodedFrames().
void SetWaitForEncodedFramesThreshold(size_t num_frames);
bool WaitForEncodedFrames(
std::vector<EncodedImage>* frames,
std::vector<CodecSpecificInfo>* codec_specific_info);
// Helper method for waiting a single decoded frame.
bool WaitForDecodedFrame(std::unique_ptr<VideoFrame>* frame,
absl::optional<uint8_t>* qp);
size_t GetNumEncodedFrames();
VideoCodec codec_settings_;
std::unique_ptr<VideoEncoder> encoder_;
std::unique_ptr<VideoDecoder> decoder_;
std::unique_ptr<test::FrameGeneratorInterface> input_frame_generator_;
private:
FakeEncodeCompleteCallback encode_complete_callback_;
FakeDecodeCompleteCallback decode_complete_callback_;
rtc::Event encoded_frame_event_;
Mutex encoded_frame_section_;
size_t wait_for_encoded_frames_threshold_;
std::vector<EncodedImage> encoded_frames_
RTC_GUARDED_BY(encoded_frame_section_);
std::vector<CodecSpecificInfo> codec_specific_infos_
RTC_GUARDED_BY(encoded_frame_section_);
rtc::Event decoded_frame_event_;
Mutex decoded_frame_section_;
absl::optional<VideoFrame> decoded_frame_
RTC_GUARDED_BY(decoded_frame_section_);
absl::optional<uint8_t> decoded_qp_ RTC_GUARDED_BY(decoded_frame_section_);
uint32_t last_input_frame_timestamp_;
};
} // namespace webrtc
#endif // MODULES_VIDEO_CODING_CODECS_TEST_VIDEO_CODEC_UNITTEST_H_

View file

@ -0,0 +1,159 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <memory>
#include <vector>
#include "api/environment/environment.h"
#include "api/environment/environment_factory.h"
#include "api/video_codecs/sdp_video_format.h"
#include "api/video_codecs/video_decoder.h"
#include "api/video_codecs/video_decoder_factory.h"
#include "api/video_codecs/video_encoder.h"
#include "api/video_codecs/video_encoder_factory.h"
#if defined(WEBRTC_ANDROID)
#include "modules/video_coding/codecs/test/android_codec_factory_helper.h"
#elif defined(WEBRTC_IOS)
#include "modules/video_coding/codecs/test/objc_codec_factory_helper.h"
#endif
#include "test/gmock.h"
#include "test/gtest.h"
#include "test/video_codec_settings.h"
namespace webrtc {
namespace test {
namespace {
using ::testing::NotNull;
const VideoEncoder::Capabilities kCapabilities(false);
int32_t InitEncoder(VideoCodecType codec_type, VideoEncoder* encoder) {
VideoCodec codec;
CodecSettings(codec_type, &codec);
codec.width = 640;
codec.height = 480;
codec.maxFramerate = 30;
RTC_CHECK(encoder);
return encoder->InitEncode(
&codec, VideoEncoder::Settings(kCapabilities, 1 /* number_of_cores */,
1200 /* max_payload_size */));
}
VideoDecoder::Settings DecoderSettings(VideoCodecType codec_type) {
VideoDecoder::Settings settings;
settings.set_max_render_resolution({640, 480});
settings.set_codec_type(codec_type);
return settings;
}
} // namespace
class VideoEncoderDecoderInstantiationTest
: public ::testing::Test,
public ::testing::WithParamInterface<::testing::tuple<int, int>> {
protected:
VideoEncoderDecoderInstantiationTest()
: vp8_format_("VP8"),
vp9_format_("VP9"),
h264cbp_format_("H264"),
num_encoders_(::testing::get<0>(GetParam())),
num_decoders_(::testing::get<1>(GetParam())) {
#if defined(WEBRTC_ANDROID)
InitializeAndroidObjects();
encoder_factory_ = CreateAndroidEncoderFactory();
decoder_factory_ = CreateAndroidDecoderFactory();
#elif defined(WEBRTC_IOS)
encoder_factory_ = CreateObjCEncoderFactory();
decoder_factory_ = CreateObjCDecoderFactory();
#else
RTC_DCHECK_NOTREACHED() << "Only support Android and iOS.";
#endif
}
~VideoEncoderDecoderInstantiationTest() {
for (auto& encoder : encoders_) {
encoder->Release();
}
for (auto& decoder : decoders_) {
decoder->Release();
}
}
const Environment env_ = CreateEnvironment();
const SdpVideoFormat vp8_format_;
const SdpVideoFormat vp9_format_;
const SdpVideoFormat h264cbp_format_;
std::unique_ptr<VideoEncoderFactory> encoder_factory_;
std::unique_ptr<VideoDecoderFactory> decoder_factory_;
const int num_encoders_;
const int num_decoders_;
std::vector<std::unique_ptr<VideoEncoder>> encoders_;
std::vector<std::unique_ptr<VideoDecoder>> decoders_;
};
INSTANTIATE_TEST_SUITE_P(MultipleEncoders,
VideoEncoderDecoderInstantiationTest,
::testing::Combine(::testing::Range(1, 4),
::testing::Range(1, 2)));
INSTANTIATE_TEST_SUITE_P(MultipleDecoders,
VideoEncoderDecoderInstantiationTest,
::testing::Combine(::testing::Range(1, 2),
::testing::Range(1, 9)));
INSTANTIATE_TEST_SUITE_P(MultipleEncodersDecoders,
VideoEncoderDecoderInstantiationTest,
::testing::Combine(::testing::Range(1, 4),
::testing::Range(1, 9)));
// TODO(brandtr): Check that the factories actually support the codecs before
// trying to instantiate. Currently, we will just crash with a Java exception
// if the factory does not support the codec.
TEST_P(VideoEncoderDecoderInstantiationTest, DISABLED_InstantiateVp8Codecs) {
for (int i = 0; i < num_encoders_; ++i) {
std::unique_ptr<VideoEncoder> encoder =
encoder_factory_->CreateVideoEncoder(vp8_format_);
EXPECT_EQ(0, InitEncoder(kVideoCodecVP8, encoder.get()));
encoders_.emplace_back(std::move(encoder));
}
for (int i = 0; i < num_decoders_; ++i) {
std::unique_ptr<VideoDecoder> decoder =
decoder_factory_->Create(env_, vp8_format_);
ASSERT_THAT(decoder, NotNull());
EXPECT_TRUE(decoder->Configure(DecoderSettings(kVideoCodecVP8)));
decoders_.emplace_back(std::move(decoder));
}
}
TEST_P(VideoEncoderDecoderInstantiationTest,
DISABLED_InstantiateH264CBPCodecs) {
for (int i = 0; i < num_encoders_; ++i) {
std::unique_ptr<VideoEncoder> encoder =
encoder_factory_->CreateVideoEncoder(h264cbp_format_);
EXPECT_EQ(0, InitEncoder(kVideoCodecH264, encoder.get()));
encoders_.emplace_back(std::move(encoder));
}
for (int i = 0; i < num_decoders_; ++i) {
std::unique_ptr<VideoDecoder> decoder =
decoder_factory_->Create(env_, h264cbp_format_);
ASSERT_THAT(decoder, NotNull());
EXPECT_TRUE(decoder->Configure(DecoderSettings(kVideoCodecH264)));
decoders_.push_back(std::move(decoder));
}
}
} // namespace test
} // namespace webrtc

View file

@ -0,0 +1,101 @@
/*
* Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <memory>
#include <vector>
#include "api/test/create_videocodec_test_fixture.h"
#include "api/test/video/function_video_encoder_factory.h"
#include "api/video_codecs/sdp_video_format.h"
#include "media/base/media_constants.h"
#include "media/engine/internal_decoder_factory.h"
#include "media/engine/internal_encoder_factory.h"
#include "media/engine/simulcast_encoder_adapter.h"
#include "test/gtest.h"
#include "test/testsupport/file_utils.h"
namespace webrtc {
namespace test {
namespace {
// Test clips settings.
constexpr int kCifWidth = 352;
constexpr int kCifHeight = 288;
constexpr int kNumFramesLong = 300;
VideoCodecTestFixture::Config CreateConfig(std::string filename) {
VideoCodecTestFixture::Config config;
config.filename = filename;
config.filepath = ResourcePath(config.filename, "yuv");
config.num_frames = kNumFramesLong;
config.use_single_core = true;
return config;
}
TEST(VideoCodecTestAv1, HighBitrate) {
auto config = CreateConfig("foreman_cif");
config.SetCodecSettings(cricket::kAv1CodecName, 1, 1, 1, false, true, true,
kCifWidth, kCifHeight);
config.codec_settings.SetScalabilityMode(ScalabilityMode::kL1T1);
config.num_frames = kNumFramesLong;
auto fixture = CreateVideoCodecTestFixture(config);
std::vector<RateProfile> rate_profiles = {{500, 30, 0}};
std::vector<RateControlThresholds> rc_thresholds = {
{12, 1, 0, 1, 0.3, 0.1, 0, 1}};
std::vector<QualityThresholds> quality_thresholds = {{37, 34, 0.94, 0.91}};
fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr);
}
TEST(VideoCodecTestAv1, VeryLowBitrate) {
auto config = CreateConfig("foreman_cif");
config.SetCodecSettings(cricket::kAv1CodecName, 1, 1, 1, false, true, true,
kCifWidth, kCifHeight);
config.codec_settings.SetScalabilityMode(ScalabilityMode::kL1T1);
auto fixture = CreateVideoCodecTestFixture(config);
std::vector<RateProfile> rate_profiles = {{50, 30, 0}};
std::vector<RateControlThresholds> rc_thresholds = {
{15, 8, 75, 2, 2, 2, 2, 1}};
std::vector<QualityThresholds> quality_thresholds = {{28, 24.8, 0.70, 0.55}};
fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr);
}
#if !defined(WEBRTC_ANDROID)
constexpr int kHdWidth = 1280;
constexpr int kHdHeight = 720;
TEST(VideoCodecTestAv1, Hd) {
auto config = CreateConfig("ConferenceMotion_1280_720_50");
config.SetCodecSettings(cricket::kAv1CodecName, 1, 1, 1, false, true, true,
kHdWidth, kHdHeight);
config.codec_settings.SetScalabilityMode(ScalabilityMode::kL1T1);
config.num_frames = kNumFramesLong;
auto fixture = CreateVideoCodecTestFixture(config);
std::vector<RateProfile> rate_profiles = {{1000, 50, 0}};
std::vector<RateControlThresholds> rc_thresholds = {
{13, 3, 0, 1, 0.3, 0.1, 0, 1}};
std::vector<QualityThresholds> quality_thresholds = {
{35.9, 31.5, 0.925, 0.865}};
fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr);
}
#endif
} // namespace
} // namespace test
} // namespace webrtc

View file

@ -0,0 +1,63 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <stddef.h>
#include "api/test/videocodec_test_fixture.h"
#include "api/video_codecs/video_codec.h"
#include "test/gmock.h"
#include "test/gtest.h"
#include "test/video_codec_settings.h"
using ::testing::ElementsAre;
namespace webrtc {
namespace test {
using Config = VideoCodecTestFixture::Config;
namespace {
const size_t kNumTemporalLayers = 2;
} // namespace
TEST(Config, NumberOfCoresWithUseSingleCore) {
Config config;
config.use_single_core = true;
EXPECT_EQ(1u, config.NumberOfCores());
}
TEST(Config, NumberOfCoresWithoutUseSingleCore) {
Config config;
config.use_single_core = false;
EXPECT_GE(config.NumberOfCores(), 1u);
}
TEST(Config, NumberOfTemporalLayersIsOne) {
Config config;
webrtc::test::CodecSettings(kVideoCodecH264, &config.codec_settings);
EXPECT_EQ(1u, config.NumberOfTemporalLayers());
}
TEST(Config, NumberOfTemporalLayers_Vp8) {
Config config;
webrtc::test::CodecSettings(kVideoCodecVP8, &config.codec_settings);
config.codec_settings.VP8()->numberOfTemporalLayers = kNumTemporalLayers;
EXPECT_EQ(kNumTemporalLayers, config.NumberOfTemporalLayers());
}
TEST(Config, NumberOfTemporalLayers_Vp9) {
Config config;
webrtc::test::CodecSettings(kVideoCodecVP9, &config.codec_settings);
config.codec_settings.VP9()->numberOfTemporalLayers = kNumTemporalLayers;
EXPECT_EQ(kNumTemporalLayers, config.NumberOfTemporalLayers());
}
} // namespace test
} // namespace webrtc

View file

@ -0,0 +1,868 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/video_coding/codecs/test/videocodec_test_fixture_impl.h"
#include <stdint.h>
#include <stdio.h>
#include <algorithm>
#include <cmath>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/str_replace.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "api/array_view.h"
#include "api/environment/environment.h"
#include "api/environment/environment_factory.h"
#include "api/test/metrics/global_metrics_logger_and_exporter.h"
#include "api/test/metrics/metric.h"
#include "api/transport/field_trial_based_config.h"
#include "api/video/video_bitrate_allocation.h"
#include "api/video_codecs/h264_profile_level_id.h"
#include "api/video_codecs/sdp_video_format.h"
#include "api/video_codecs/video_codec.h"
#include "api/video_codecs/video_decoder.h"
#include "api/video_codecs/video_decoder_factory_template.h"
#include "api/video_codecs/video_decoder_factory_template_dav1d_adapter.h"
#include "api/video_codecs/video_decoder_factory_template_libvpx_vp8_adapter.h"
#include "api/video_codecs/video_decoder_factory_template_libvpx_vp9_adapter.h"
#include "api/video_codecs/video_decoder_factory_template_open_h264_adapter.h"
#include "api/video_codecs/video_encoder_factory.h"
#include "api/video_codecs/video_encoder_factory_template.h"
#include "api/video_codecs/video_encoder_factory_template_libaom_av1_adapter.h"
#include "api/video_codecs/video_encoder_factory_template_libvpx_vp8_adapter.h"
#include "api/video_codecs/video_encoder_factory_template_libvpx_vp9_adapter.h"
#include "api/video_codecs/video_encoder_factory_template_open_h264_adapter.h"
#include "common_video/h264/h264_common.h"
#include "media/base/media_constants.h"
#include "modules/video_coding/codecs/h264/include/h264_globals.h"
#include "modules/video_coding/codecs/vp9/svc_config.h"
#include "modules/video_coding/utility/ivf_file_writer.h"
#include "rtc_base/checks.h"
#include "rtc_base/cpu_time.h"
#include "rtc_base/logging.h"
#include "rtc_base/strings/string_builder.h"
#include "rtc_base/time_utils.h"
#include "system_wrappers/include/cpu_info.h"
#include "system_wrappers/include/sleep.h"
#include "test/gtest.h"
#include "test/testsupport/file_utils.h"
#include "test/testsupport/frame_writer.h"
#include "test/video_codec_settings.h"
#include "video/config/simulcast.h"
#include "video/config/video_encoder_config.h"
namespace webrtc {
namespace test {
namespace {
using VideoStatistics = VideoCodecTestStats::VideoStatistics;
const int kBaseKeyFrameInterval = 3000;
const double kBitratePriority = 1.0;
const int kDefaultMaxFramerateFps = 30;
const int kMaxQp = 56;
void ConfigureSimulcast(VideoCodec* codec_settings) {
FieldTrialBasedConfig trials;
const std::vector<webrtc::VideoStream> streams = cricket::GetSimulcastConfig(
/*min_layer=*/1, codec_settings->numberOfSimulcastStreams,
codec_settings->width, codec_settings->height, kBitratePriority, kMaxQp,
/* is_screenshare = */ false, true, trials);
for (size_t i = 0; i < streams.size(); ++i) {
SimulcastStream* ss = &codec_settings->simulcastStream[i];
ss->width = static_cast<uint16_t>(streams[i].width);
ss->height = static_cast<uint16_t>(streams[i].height);
ss->numberOfTemporalLayers =
static_cast<unsigned char>(*streams[i].num_temporal_layers);
ss->maxBitrate = streams[i].max_bitrate_bps / 1000;
ss->targetBitrate = streams[i].target_bitrate_bps / 1000;
ss->minBitrate = streams[i].min_bitrate_bps / 1000;
ss->qpMax = streams[i].max_qp;
ss->active = true;
}
}
void ConfigureSvc(VideoCodec* codec_settings) {
RTC_CHECK_EQ(kVideoCodecVP9, codec_settings->codecType);
const std::vector<SpatialLayer> layers = GetSvcConfig(
codec_settings->width, codec_settings->height, kDefaultMaxFramerateFps,
/*first_active_layer=*/0, codec_settings->VP9()->numberOfSpatialLayers,
codec_settings->VP9()->numberOfTemporalLayers,
/* is_screen_sharing = */ false);
ASSERT_EQ(codec_settings->VP9()->numberOfSpatialLayers, layers.size())
<< "GetSvcConfig returned fewer spatial layers than configured.";
for (size_t i = 0; i < layers.size(); ++i) {
codec_settings->spatialLayers[i] = layers[i];
}
}
std::string CodecSpecificToString(const VideoCodec& codec) {
char buf[1024];
rtc::SimpleStringBuilder ss(buf);
switch (codec.codecType) {
case kVideoCodecVP8:
ss << "\nnum_temporal_layers: "
<< static_cast<int>(codec.VP8().numberOfTemporalLayers);
ss << "\ndenoising: " << codec.VP8().denoisingOn;
ss << "\nautomatic_resize: " << codec.VP8().automaticResizeOn;
ss << "\nkey_frame_interval: " << codec.VP8().keyFrameInterval;
break;
case kVideoCodecVP9:
ss << "\nnum_temporal_layers: "
<< static_cast<int>(codec.VP9().numberOfTemporalLayers);
ss << "\nnum_spatial_layers: "
<< static_cast<int>(codec.VP9().numberOfSpatialLayers);
ss << "\ndenoising: " << codec.VP9().denoisingOn;
ss << "\nkey_frame_interval: " << codec.VP9().keyFrameInterval;
ss << "\nadaptive_qp_mode: " << codec.VP9().adaptiveQpMode;
ss << "\nautomatic_resize: " << codec.VP9().automaticResizeOn;
ss << "\nflexible_mode: " << codec.VP9().flexibleMode;
break;
case kVideoCodecH264:
ss << "\nkey_frame_interval: " << codec.H264().keyFrameInterval;
ss << "\nnum_temporal_layers: "
<< static_cast<int>(codec.H264().numberOfTemporalLayers);
break;
case kVideoCodecH265:
// TODO(bugs.webrtc.org/13485)
break;
default:
break;
}
return ss.str();
}
bool RunEncodeInRealTime(const VideoCodecTestFixtureImpl::Config& config) {
if (config.measure_cpu || config.encode_in_real_time) {
return true;
}
return false;
}
std::string FilenameWithParams(
const VideoCodecTestFixtureImpl::Config& config) {
return config.filename + "_" + config.CodecName() + "_" +
std::to_string(config.codec_settings.startBitrate);
}
SdpVideoFormat CreateSdpVideoFormat(
const VideoCodecTestFixtureImpl::Config& config) {
if (config.codec_settings.codecType == kVideoCodecH264) {
const char* packetization_mode =
config.h264_codec_settings.packetization_mode ==
H264PacketizationMode::NonInterleaved
? "1"
: "0";
CodecParameterMap codec_params = {
{cricket::kH264FmtpProfileLevelId,
*H264ProfileLevelIdToString(H264ProfileLevelId(
config.h264_codec_settings.profile, H264Level::kLevel3_1))},
{cricket::kH264FmtpPacketizationMode, packetization_mode},
{cricket::kH264FmtpLevelAsymmetryAllowed, "1"}};
return SdpVideoFormat(config.codec_name, codec_params);
} else if (config.codec_settings.codecType == kVideoCodecVP9) {
return SdpVideoFormat(config.codec_name, {{"profile-id", "0"}});
}
return SdpVideoFormat(config.codec_name);
}
} // namespace
VideoCodecTestFixtureImpl::Config::Config() = default;
void VideoCodecTestFixtureImpl::Config::SetCodecSettings(
std::string codec_name,
size_t num_simulcast_streams,
size_t num_spatial_layers,
size_t num_temporal_layers,
bool denoising_on,
bool frame_dropper_on,
bool spatial_resize_on,
size_t width,
size_t height) {
this->codec_name = codec_name;
VideoCodecType codec_type = PayloadStringToCodecType(codec_name);
webrtc::test::CodecSettings(codec_type, &codec_settings);
// TODO(brandtr): Move the setting of `width` and `height` to the tests, and
// DCHECK that they are set before initializing the codec instead.
codec_settings.width = static_cast<uint16_t>(width);
codec_settings.height = static_cast<uint16_t>(height);
RTC_CHECK(num_simulcast_streams >= 1 &&
num_simulcast_streams <= kMaxSimulcastStreams);
RTC_CHECK(num_spatial_layers >= 1 && num_spatial_layers <= kMaxSpatialLayers);
RTC_CHECK(num_temporal_layers >= 1 &&
num_temporal_layers <= kMaxTemporalStreams);
// Simulcast is only available with VP8.
RTC_CHECK(num_simulcast_streams < 2 || codec_type == kVideoCodecVP8);
// Spatial scalability is only available with VP9.
RTC_CHECK(num_spatial_layers < 2 || codec_type == kVideoCodecVP9);
// Some base code requires numberOfSimulcastStreams to be set to zero
// when simulcast is not used.
codec_settings.numberOfSimulcastStreams =
num_simulcast_streams <= 1 ? 0
: static_cast<uint8_t>(num_simulcast_streams);
codec_settings.SetFrameDropEnabled(frame_dropper_on);
switch (codec_settings.codecType) {
case kVideoCodecVP8:
codec_settings.VP8()->numberOfTemporalLayers =
static_cast<uint8_t>(num_temporal_layers);
codec_settings.VP8()->denoisingOn = denoising_on;
codec_settings.VP8()->automaticResizeOn = spatial_resize_on;
codec_settings.VP8()->keyFrameInterval = kBaseKeyFrameInterval;
break;
case kVideoCodecVP9:
codec_settings.VP9()->numberOfTemporalLayers =
static_cast<uint8_t>(num_temporal_layers);
codec_settings.VP9()->denoisingOn = denoising_on;
codec_settings.VP9()->keyFrameInterval = kBaseKeyFrameInterval;
codec_settings.VP9()->automaticResizeOn = spatial_resize_on;
codec_settings.VP9()->numberOfSpatialLayers =
static_cast<uint8_t>(num_spatial_layers);
break;
case kVideoCodecAV1:
codec_settings.qpMax = 63;
break;
case kVideoCodecH264:
codec_settings.H264()->keyFrameInterval = kBaseKeyFrameInterval;
codec_settings.H264()->numberOfTemporalLayers =
static_cast<uint8_t>(num_temporal_layers);
break;
case kVideoCodecH265:
// TODO(bugs.webrtc.org/13485)
break;
default:
break;
}
if (codec_settings.numberOfSimulcastStreams > 1) {
ConfigureSimulcast(&codec_settings);
} else if (codec_settings.codecType == kVideoCodecVP9 &&
codec_settings.VP9()->numberOfSpatialLayers > 1) {
ConfigureSvc(&codec_settings);
}
}
size_t VideoCodecTestFixtureImpl::Config::NumberOfCores() const {
return use_single_core ? 1 : CpuInfo::DetectNumberOfCores();
}
size_t VideoCodecTestFixtureImpl::Config::NumberOfTemporalLayers() const {
if (codec_settings.codecType == kVideoCodecVP8) {
return codec_settings.VP8().numberOfTemporalLayers;
} else if (codec_settings.codecType == kVideoCodecVP9) {
return codec_settings.VP9().numberOfTemporalLayers;
} else if (codec_settings.codecType == kVideoCodecH264) {
return codec_settings.H264().numberOfTemporalLayers;
} else {
return 1;
}
}
size_t VideoCodecTestFixtureImpl::Config::NumberOfSpatialLayers() const {
if (codec_settings.codecType == kVideoCodecVP9) {
return codec_settings.VP9().numberOfSpatialLayers;
} else {
return 1;
}
}
size_t VideoCodecTestFixtureImpl::Config::NumberOfSimulcastStreams() const {
return codec_settings.numberOfSimulcastStreams;
}
std::string VideoCodecTestFixtureImpl::Config::ToString() const {
std::string codec_type = CodecTypeToPayloadString(codec_settings.codecType);
rtc::StringBuilder ss;
ss << "test_name: " << test_name;
ss << "\nfilename: " << filename;
ss << "\nnum_frames: " << num_frames;
ss << "\nmax_payload_size_bytes: " << max_payload_size_bytes;
ss << "\ndecode: " << decode;
ss << "\nuse_single_core: " << use_single_core;
ss << "\nmeasure_cpu: " << measure_cpu;
ss << "\nnum_cores: " << NumberOfCores();
ss << "\ncodec_type: " << codec_type;
ss << "\n\n--> codec_settings";
ss << "\nwidth: " << codec_settings.width;
ss << "\nheight: " << codec_settings.height;
ss << "\nmax_framerate_fps: " << codec_settings.maxFramerate;
ss << "\nstart_bitrate_kbps: " << codec_settings.startBitrate;
ss << "\nmax_bitrate_kbps: " << codec_settings.maxBitrate;
ss << "\nmin_bitrate_kbps: " << codec_settings.minBitrate;
ss << "\nmax_qp: " << codec_settings.qpMax;
ss << "\nnum_simulcast_streams: "
<< static_cast<int>(codec_settings.numberOfSimulcastStreams);
ss << "\n\n--> codec_settings." << codec_type;
ss << "complexity: "
<< static_cast<int>(codec_settings.GetVideoEncoderComplexity());
ss << "\nframe_dropping: " << codec_settings.GetFrameDropEnabled();
ss << "\n" << CodecSpecificToString(codec_settings);
if (codec_settings.numberOfSimulcastStreams > 1) {
for (int i = 0; i < codec_settings.numberOfSimulcastStreams; ++i) {
ss << "\n\n--> codec_settings.simulcastStream[" << i << "]";
const SimulcastStream& simulcast_stream =
codec_settings.simulcastStream[i];
ss << "\nwidth: " << simulcast_stream.width;
ss << "\nheight: " << simulcast_stream.height;
ss << "\nnum_temporal_layers: "
<< static_cast<int>(simulcast_stream.numberOfTemporalLayers);
ss << "\nmin_bitrate_kbps: " << simulcast_stream.minBitrate;
ss << "\ntarget_bitrate_kbps: " << simulcast_stream.targetBitrate;
ss << "\nmax_bitrate_kbps: " << simulcast_stream.maxBitrate;
ss << "\nmax_qp: " << simulcast_stream.qpMax;
ss << "\nactive: " << simulcast_stream.active;
}
}
ss << "\n";
return ss.Release();
}
std::string VideoCodecTestFixtureImpl::Config::CodecName() const {
std::string name = codec_name;
if (name.empty()) {
name = CodecTypeToPayloadString(codec_settings.codecType);
}
if (codec_settings.codecType == kVideoCodecH264) {
if (h264_codec_settings.profile == H264Profile::kProfileConstrainedHigh) {
return name + "-CHP";
} else {
RTC_DCHECK_EQ(h264_codec_settings.profile,
H264Profile::kProfileConstrainedBaseline);
return name + "-CBP";
}
}
return name;
}
// TODO(kthelgason): Move this out of the test fixture impl and
// make available as a shared utility class.
void VideoCodecTestFixtureImpl::H264KeyframeChecker::CheckEncodedFrame(
webrtc::VideoCodecType codec,
const EncodedImage& encoded_frame) const {
EXPECT_EQ(kVideoCodecH264, codec);
bool contains_sps = false;
bool contains_pps = false;
bool contains_idr = false;
const std::vector<webrtc::H264::NaluIndex> nalu_indices =
webrtc::H264::FindNaluIndices(encoded_frame.data(), encoded_frame.size());
for (const webrtc::H264::NaluIndex& index : nalu_indices) {
webrtc::H264::NaluType nalu_type = webrtc::H264::ParseNaluType(
encoded_frame.data()[index.payload_start_offset]);
if (nalu_type == webrtc::H264::NaluType::kSps) {
contains_sps = true;
} else if (nalu_type == webrtc::H264::NaluType::kPps) {
contains_pps = true;
} else if (nalu_type == webrtc::H264::NaluType::kIdr) {
contains_idr = true;
}
}
if (encoded_frame._frameType == VideoFrameType::kVideoFrameKey) {
EXPECT_TRUE(contains_sps) << "Keyframe should contain SPS.";
EXPECT_TRUE(contains_pps) << "Keyframe should contain PPS.";
EXPECT_TRUE(contains_idr) << "Keyframe should contain IDR.";
} else if (encoded_frame._frameType == VideoFrameType::kVideoFrameDelta) {
EXPECT_FALSE(contains_sps) << "Delta frame should not contain SPS.";
EXPECT_FALSE(contains_pps) << "Delta frame should not contain PPS.";
EXPECT_FALSE(contains_idr) << "Delta frame should not contain IDR.";
} else {
RTC_DCHECK_NOTREACHED();
}
}
class VideoCodecTestFixtureImpl::CpuProcessTime final {
public:
explicit CpuProcessTime(const Config& config) : config_(config) {}
~CpuProcessTime() {}
void Start() {
if (config_.measure_cpu) {
cpu_time_ -= rtc::GetProcessCpuTimeNanos();
wallclock_time_ -= rtc::SystemTimeNanos();
}
}
void Stop() {
if (config_.measure_cpu) {
cpu_time_ += rtc::GetProcessCpuTimeNanos();
wallclock_time_ += rtc::SystemTimeNanos();
}
}
void Print() const {
if (config_.measure_cpu) {
RTC_LOG(LS_INFO) << "cpu_usage_percent: "
<< GetUsagePercent() / config_.NumberOfCores();
}
}
private:
double GetUsagePercent() const {
return static_cast<double>(cpu_time_) / wallclock_time_ * 100.0;
}
const Config config_;
int64_t cpu_time_ = 0;
int64_t wallclock_time_ = 0;
};
VideoCodecTestFixtureImpl::VideoCodecTestFixtureImpl(Config config)
: encoder_factory_(std::make_unique<webrtc::VideoEncoderFactoryTemplate<
webrtc::LibvpxVp8EncoderTemplateAdapter,
webrtc::LibvpxVp9EncoderTemplateAdapter,
webrtc::OpenH264EncoderTemplateAdapter,
webrtc::LibaomAv1EncoderTemplateAdapter>>()),
decoder_factory_(std::make_unique<webrtc::VideoDecoderFactoryTemplate<
webrtc::LibvpxVp8DecoderTemplateAdapter,
webrtc::LibvpxVp9DecoderTemplateAdapter,
webrtc::OpenH264DecoderTemplateAdapter,
webrtc::Dav1dDecoderTemplateAdapter>>()),
config_(config) {}
VideoCodecTestFixtureImpl::VideoCodecTestFixtureImpl(
Config config,
std::unique_ptr<VideoDecoderFactory> decoder_factory,
std::unique_ptr<VideoEncoderFactory> encoder_factory)
: encoder_factory_(std::move(encoder_factory)),
decoder_factory_(std::move(decoder_factory)),
config_(config) {}
VideoCodecTestFixtureImpl::~VideoCodecTestFixtureImpl() = default;
// Processes all frames in the clip and verifies the result.
void VideoCodecTestFixtureImpl::RunTest(
const std::vector<RateProfile>& rate_profiles,
const std::vector<RateControlThresholds>* rc_thresholds,
const std::vector<QualityThresholds>* quality_thresholds,
const BitstreamThresholds* bs_thresholds) {
RTC_DCHECK(!rate_profiles.empty());
// To emulate operation on a production VideoStreamEncoder, we call the
// codecs on a task queue.
TaskQueueForTest task_queue("VidProc TQ");
bool is_setup_succeeded = SetUpAndInitObjects(
&task_queue, rate_profiles[0].target_kbps, rate_profiles[0].input_fps);
EXPECT_TRUE(is_setup_succeeded);
if (!is_setup_succeeded) {
ReleaseAndCloseObjects(&task_queue);
return;
}
PrintSettings(&task_queue);
ProcessAllFrames(&task_queue, rate_profiles);
ReleaseAndCloseObjects(&task_queue);
AnalyzeAllFrames(rate_profiles, rc_thresholds, quality_thresholds,
bs_thresholds);
}
void VideoCodecTestFixtureImpl::ProcessAllFrames(
TaskQueueForTest* task_queue,
const std::vector<RateProfile>& rate_profiles) {
// Set initial rates.
auto rate_profile = rate_profiles.begin();
task_queue->PostTask([this, rate_profile] {
processor_->SetRates(rate_profile->target_kbps, rate_profile->input_fps);
});
cpu_process_time_->Start();
for (size_t frame_num = 0; frame_num < config_.num_frames; ++frame_num) {
auto next_rate_profile = std::next(rate_profile);
if (next_rate_profile != rate_profiles.end() &&
frame_num == next_rate_profile->frame_num) {
rate_profile = next_rate_profile;
task_queue->PostTask([this, rate_profile] {
processor_->SetRates(rate_profile->target_kbps,
rate_profile->input_fps);
});
}
task_queue->PostTask([this] { processor_->ProcessFrame(); });
if (RunEncodeInRealTime(config_)) {
// Roughly pace the frames.
const int frame_duration_ms =
std::ceil(rtc::kNumMillisecsPerSec / rate_profile->input_fps);
SleepMs(frame_duration_ms);
}
}
task_queue->PostTask([this] { processor_->Finalize(); });
// Wait until we know that the last frame has been sent for encode.
task_queue->SendTask([] {});
// Give the VideoProcessor pipeline some time to process the last frame,
// and then release the codecs.
SleepMs(1 * rtc::kNumMillisecsPerSec);
cpu_process_time_->Stop();
}
void VideoCodecTestFixtureImpl::AnalyzeAllFrames(
const std::vector<RateProfile>& rate_profiles,
const std::vector<RateControlThresholds>* rc_thresholds,
const std::vector<QualityThresholds>* quality_thresholds,
const BitstreamThresholds* bs_thresholds) {
for (size_t rate_profile_idx = 0; rate_profile_idx < rate_profiles.size();
++rate_profile_idx) {
const size_t first_frame_num = rate_profiles[rate_profile_idx].frame_num;
const size_t last_frame_num =
rate_profile_idx + 1 < rate_profiles.size()
? rate_profiles[rate_profile_idx + 1].frame_num - 1
: config_.num_frames - 1;
RTC_CHECK(last_frame_num >= first_frame_num);
VideoStatistics send_stat = stats_.SliceAndCalcAggregatedVideoStatistic(
first_frame_num, last_frame_num);
RTC_LOG(LS_INFO) << "==> Send stats";
RTC_LOG(LS_INFO) << send_stat.ToString("send_") << "\n";
std::vector<VideoStatistics> layer_stats =
stats_.SliceAndCalcLayerVideoStatistic(first_frame_num, last_frame_num);
RTC_LOG(LS_INFO) << "==> Receive stats";
for (const auto& layer_stat : layer_stats) {
RTC_LOG(LS_INFO) << layer_stat.ToString("recv_") << "\n";
// For perf dashboard.
char modifier_buf[256];
rtc::SimpleStringBuilder modifier(modifier_buf);
modifier << "_r" << rate_profile_idx << "_sl" << layer_stat.spatial_idx;
auto PrintResultHelper = [&modifier, this](
absl::string_view measurement, double value,
Unit unit,
absl::string_view non_standard_unit_suffix,
ImprovementDirection improvement_direction) {
rtc::StringBuilder metric_name(measurement);
metric_name << modifier.str() << non_standard_unit_suffix;
GetGlobalMetricsLogger()->LogSingleValueMetric(
metric_name.str(), config_.test_name, value, unit,
improvement_direction);
};
if (layer_stat.temporal_idx == config_.NumberOfTemporalLayers() - 1) {
PrintResultHelper("enc_speed", layer_stat.enc_speed_fps,
Unit::kUnitless, /*non_standard_unit_suffix=*/"_fps",
ImprovementDirection::kBiggerIsBetter);
PrintResultHelper("avg_key_frame_size",
layer_stat.avg_key_frame_size_bytes, Unit::kBytes,
/*non_standard_unit_suffix=*/"",
ImprovementDirection::kNeitherIsBetter);
PrintResultHelper("num_key_frames", layer_stat.num_key_frames,
Unit::kCount,
/*non_standard_unit_suffix=*/"",
ImprovementDirection::kNeitherIsBetter);
printf("\n");
}
modifier << "tl" << layer_stat.temporal_idx;
PrintResultHelper("dec_speed", layer_stat.dec_speed_fps, Unit::kUnitless,
/*non_standard_unit_suffix=*/"_fps",
ImprovementDirection::kBiggerIsBetter);
PrintResultHelper("avg_delta_frame_size",
layer_stat.avg_delta_frame_size_bytes, Unit::kBytes,
/*non_standard_unit_suffix=*/"",
ImprovementDirection::kNeitherIsBetter);
PrintResultHelper("bitrate", layer_stat.bitrate_kbps,
Unit::kKilobitsPerSecond,
/*non_standard_unit_suffix=*/"",
ImprovementDirection::kNeitherIsBetter);
PrintResultHelper("framerate", layer_stat.framerate_fps, Unit::kUnitless,
/*non_standard_unit_suffix=*/"_fps",
ImprovementDirection::kNeitherIsBetter);
PrintResultHelper("avg_psnr_y", layer_stat.avg_psnr_y, Unit::kUnitless,
/*non_standard_unit_suffix=*/"_dB",
ImprovementDirection::kBiggerIsBetter);
PrintResultHelper("avg_psnr_u", layer_stat.avg_psnr_u, Unit::kUnitless,
/*non_standard_unit_suffix=*/"_dB",
ImprovementDirection::kBiggerIsBetter);
PrintResultHelper("avg_psnr_v", layer_stat.avg_psnr_v, Unit::kUnitless,
/*non_standard_unit_suffix=*/"_dB",
ImprovementDirection::kBiggerIsBetter);
PrintResultHelper("min_psnr_yuv", layer_stat.min_psnr, Unit::kUnitless,
/*non_standard_unit_suffix=*/"_dB",
ImprovementDirection::kBiggerIsBetter);
PrintResultHelper("avg_qp", layer_stat.avg_qp, Unit::kUnitless,
/*non_standard_unit_suffix=*/"",
ImprovementDirection::kSmallerIsBetter);
printf("\n");
if (layer_stat.temporal_idx == config_.NumberOfTemporalLayers() - 1) {
printf("\n");
}
}
const RateControlThresholds* rc_threshold =
rc_thresholds ? &(*rc_thresholds)[rate_profile_idx] : nullptr;
const QualityThresholds* quality_threshold =
quality_thresholds ? &(*quality_thresholds)[rate_profile_idx] : nullptr;
VerifyVideoStatistic(send_stat, rc_threshold, quality_threshold,
bs_thresholds,
rate_profiles[rate_profile_idx].target_kbps,
rate_profiles[rate_profile_idx].input_fps);
}
if (config_.print_frame_level_stats) {
RTC_LOG(LS_INFO) << "==> Frame stats";
std::vector<VideoCodecTestStats::FrameStatistics> frame_stats =
stats_.GetFrameStatistics();
for (const auto& frame_stat : frame_stats) {
RTC_LOG(LS_INFO) << frame_stat.ToString();
}
}
cpu_process_time_->Print();
}
void VideoCodecTestFixtureImpl::VerifyVideoStatistic(
const VideoStatistics& video_stat,
const RateControlThresholds* rc_thresholds,
const QualityThresholds* quality_thresholds,
const BitstreamThresholds* bs_thresholds,
size_t target_bitrate_kbps,
double input_framerate_fps) {
if (rc_thresholds) {
const float bitrate_mismatch_percent =
100 * std::fabs(1.0f * video_stat.bitrate_kbps - target_bitrate_kbps) /
target_bitrate_kbps;
const float framerate_mismatch_percent =
100 * std::fabs(video_stat.framerate_fps - input_framerate_fps) /
input_framerate_fps;
EXPECT_LE(bitrate_mismatch_percent,
rc_thresholds->max_avg_bitrate_mismatch_percent);
EXPECT_LE(video_stat.time_to_reach_target_bitrate_sec,
rc_thresholds->max_time_to_reach_target_bitrate_sec);
EXPECT_LE(framerate_mismatch_percent,
rc_thresholds->max_avg_framerate_mismatch_percent);
EXPECT_LE(video_stat.avg_delay_sec,
rc_thresholds->max_avg_buffer_level_sec);
EXPECT_LE(video_stat.max_key_frame_delay_sec,
rc_thresholds->max_max_key_frame_delay_sec);
EXPECT_LE(video_stat.max_delta_frame_delay_sec,
rc_thresholds->max_max_delta_frame_delay_sec);
EXPECT_LE(video_stat.num_spatial_resizes,
rc_thresholds->max_num_spatial_resizes);
EXPECT_LE(video_stat.num_key_frames, rc_thresholds->max_num_key_frames);
}
if (quality_thresholds) {
EXPECT_GT(video_stat.avg_psnr, quality_thresholds->min_avg_psnr);
EXPECT_GT(video_stat.min_psnr, quality_thresholds->min_min_psnr);
// SSIM calculation is not optimized and thus it is disabled in real-time
// mode.
if (!config_.encode_in_real_time) {
EXPECT_GT(video_stat.avg_ssim, quality_thresholds->min_avg_ssim);
EXPECT_GT(video_stat.min_ssim, quality_thresholds->min_min_ssim);
}
}
if (bs_thresholds) {
EXPECT_LE(video_stat.max_nalu_size_bytes,
bs_thresholds->max_max_nalu_size_bytes);
}
}
bool VideoCodecTestFixtureImpl::CreateEncoderAndDecoder() {
const Environment env = CreateEnvironment();
SdpVideoFormat encoder_format(CreateSdpVideoFormat(config_));
SdpVideoFormat decoder_format = encoder_format;
// Override encoder and decoder formats with explicitly provided ones.
if (config_.encoder_format) {
RTC_DCHECK_EQ(config_.encoder_format->name, config_.codec_name);
encoder_format = *config_.encoder_format;
}
if (config_.decoder_format) {
RTC_DCHECK_EQ(config_.decoder_format->name, config_.codec_name);
decoder_format = *config_.decoder_format;
}
encoder_ = encoder_factory_->CreateVideoEncoder(encoder_format);
EXPECT_TRUE(encoder_) << "Encoder not successfully created.";
if (encoder_ == nullptr) {
return false;
}
const size_t num_simulcast_or_spatial_layers = std::max(
config_.NumberOfSimulcastStreams(), config_.NumberOfSpatialLayers());
for (size_t i = 0; i < num_simulcast_or_spatial_layers; ++i) {
std::unique_ptr<VideoDecoder> decoder =
decoder_factory_->Create(env, decoder_format);
EXPECT_TRUE(decoder) << "Decoder not successfully created.";
if (decoder == nullptr) {
return false;
}
decoders_.push_back(std::move(decoder));
}
return true;
}
void VideoCodecTestFixtureImpl::DestroyEncoderAndDecoder() {
decoders_.clear();
encoder_.reset();
}
VideoCodecTestStats& VideoCodecTestFixtureImpl::GetStats() {
return stats_;
}
bool VideoCodecTestFixtureImpl::SetUpAndInitObjects(
TaskQueueForTest* task_queue,
size_t initial_bitrate_kbps,
double initial_framerate_fps) {
config_.codec_settings.minBitrate = 0;
config_.codec_settings.startBitrate = static_cast<int>(initial_bitrate_kbps);
config_.codec_settings.maxFramerate = std::ceil(initial_framerate_fps);
int clip_width = config_.clip_width.value_or(config_.codec_settings.width);
int clip_height = config_.clip_height.value_or(config_.codec_settings.height);
// Create file objects for quality analysis.
source_frame_reader_ = CreateYuvFrameReader(
config_.filepath,
Resolution({.width = clip_width, .height = clip_height}),
YuvFrameReaderImpl::RepeatMode::kPingPong);
RTC_DCHECK(encoded_frame_writers_.empty());
RTC_DCHECK(decoded_frame_writers_.empty());
stats_.Clear();
cpu_process_time_.reset(new CpuProcessTime(config_));
bool is_codec_created = false;
task_queue->SendTask([this, &is_codec_created]() {
is_codec_created = CreateEncoderAndDecoder();
});
if (!is_codec_created) {
return false;
}
if (config_.visualization_params.save_encoded_ivf ||
config_.visualization_params.save_decoded_y4m) {
std::string encoder_name = GetCodecName(task_queue, /*is_encoder=*/true);
encoder_name = absl::StrReplaceAll(encoder_name, {{":", ""}, {" ", "-"}});
const size_t num_simulcast_or_spatial_layers = std::max(
config_.NumberOfSimulcastStreams(), config_.NumberOfSpatialLayers());
const size_t num_temporal_layers = config_.NumberOfTemporalLayers();
for (size_t simulcast_svc_idx = 0;
simulcast_svc_idx < num_simulcast_or_spatial_layers;
++simulcast_svc_idx) {
const std::string output_filename_base =
JoinFilename(config_.output_path,
FilenameWithParams(config_) + "_" + encoder_name +
"_sl" + std::to_string(simulcast_svc_idx));
if (config_.visualization_params.save_encoded_ivf) {
for (size_t temporal_idx = 0; temporal_idx < num_temporal_layers;
++temporal_idx) {
const std::string output_file_path = output_filename_base + "tl" +
std::to_string(temporal_idx) +
".ivf";
FileWrapper ivf_file = FileWrapper::OpenWriteOnly(output_file_path);
const VideoProcessor::LayerKey layer_key(simulcast_svc_idx,
temporal_idx);
encoded_frame_writers_[layer_key] =
IvfFileWriter::Wrap(std::move(ivf_file), /*byte_limit=*/0);
}
}
if (config_.visualization_params.save_decoded_y4m) {
FrameWriter* decoded_frame_writer = new Y4mFrameWriterImpl(
output_filename_base + ".y4m", config_.codec_settings.width,
config_.codec_settings.height, config_.codec_settings.maxFramerate);
EXPECT_TRUE(decoded_frame_writer->Init());
decoded_frame_writers_.push_back(
std::unique_ptr<FrameWriter>(decoded_frame_writer));
}
}
}
task_queue->SendTask([this]() {
processor_ = std::make_unique<VideoProcessor>(
encoder_.get(), &decoders_, source_frame_reader_.get(), config_,
&stats_, &encoded_frame_writers_,
decoded_frame_writers_.empty() ? nullptr : &decoded_frame_writers_);
});
return true;
}
void VideoCodecTestFixtureImpl::ReleaseAndCloseObjects(
TaskQueueForTest* task_queue) {
task_queue->SendTask([this]() {
processor_.reset();
// The VideoProcessor must be destroyed before the codecs.
DestroyEncoderAndDecoder();
});
source_frame_reader_.reset();
// Close visualization files.
for (auto& encoded_frame_writer : encoded_frame_writers_) {
EXPECT_TRUE(encoded_frame_writer.second->Close());
}
encoded_frame_writers_.clear();
for (auto& decoded_frame_writer : decoded_frame_writers_) {
decoded_frame_writer->Close();
}
decoded_frame_writers_.clear();
}
std::string VideoCodecTestFixtureImpl::GetCodecName(
TaskQueueForTest* task_queue,
bool is_encoder) const {
std::string codec_name;
task_queue->SendTask([this, is_encoder, &codec_name] {
if (is_encoder) {
codec_name = encoder_->GetEncoderInfo().implementation_name;
} else {
codec_name = decoders_.at(0)->ImplementationName();
}
});
return codec_name;
}
void VideoCodecTestFixtureImpl::PrintSettings(
TaskQueueForTest* task_queue) const {
RTC_LOG(LS_INFO) << "==> Config";
RTC_LOG(LS_INFO) << config_.ToString();
RTC_LOG(LS_INFO) << "==> Codec names";
RTC_LOG(LS_INFO) << "enc_impl_name: "
<< GetCodecName(task_queue, /*is_encoder=*/true);
RTC_LOG(LS_INFO) << "dec_impl_name: "
<< GetCodecName(task_queue, /*is_encoder=*/false);
}
} // namespace test
} // namespace webrtc

View file

@ -0,0 +1,107 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_VIDEO_CODING_CODECS_TEST_VIDEOCODEC_TEST_FIXTURE_IMPL_H_
#define MODULES_VIDEO_CODING_CODECS_TEST_VIDEOCODEC_TEST_FIXTURE_IMPL_H_
#include <memory>
#include <string>
#include <vector>
#include "api/test/videocodec_test_fixture.h"
#include "api/video_codecs/video_decoder_factory.h"
#include "api/video_codecs/video_encoder_factory.h"
#include "common_video/h264/h264_common.h"
#include "modules/video_coding/codecs/test/videocodec_test_stats_impl.h"
#include "modules/video_coding/codecs/test/videoprocessor.h"
#include "modules/video_coding/utility/ivf_file_writer.h"
#include "rtc_base/task_queue_for_test.h"
#include "test/testsupport/frame_reader.h"
#include "test/testsupport/frame_writer.h"
namespace webrtc {
namespace test {
// Integration test for video processor. It does rate control and frame quality
// analysis using frame statistics collected by video processor and logs the
// results. If thresholds are specified it checks that corresponding metrics
// are in desirable range.
class VideoCodecTestFixtureImpl : public VideoCodecTestFixture {
// Verifies that all H.264 keyframes contain SPS/PPS/IDR NALUs.
public:
class H264KeyframeChecker : public EncodedFrameChecker {
public:
void CheckEncodedFrame(webrtc::VideoCodecType codec,
const EncodedImage& encoded_frame) const override;
};
explicit VideoCodecTestFixtureImpl(Config config);
VideoCodecTestFixtureImpl(
Config config,
std::unique_ptr<VideoDecoderFactory> decoder_factory,
std::unique_ptr<VideoEncoderFactory> encoder_factory);
~VideoCodecTestFixtureImpl() override;
void RunTest(const std::vector<RateProfile>& rate_profiles,
const std::vector<RateControlThresholds>* rc_thresholds,
const std::vector<QualityThresholds>* quality_thresholds,
const BitstreamThresholds* bs_thresholds) override;
VideoCodecTestStats& GetStats() override;
private:
class CpuProcessTime;
bool CreateEncoderAndDecoder();
void DestroyEncoderAndDecoder();
bool SetUpAndInitObjects(TaskQueueForTest* task_queue,
size_t initial_bitrate_kbps,
double initial_framerate_fps);
void ReleaseAndCloseObjects(TaskQueueForTest* task_queue);
void ProcessAllFrames(TaskQueueForTest* task_queue,
const std::vector<RateProfile>& rate_profiles);
void AnalyzeAllFrames(
const std::vector<RateProfile>& rate_profiles,
const std::vector<RateControlThresholds>* rc_thresholds,
const std::vector<QualityThresholds>* quality_thresholds,
const BitstreamThresholds* bs_thresholds);
void VerifyVideoStatistic(
const VideoCodecTestStats::VideoStatistics& video_stat,
const RateControlThresholds* rc_thresholds,
const QualityThresholds* quality_thresholds,
const BitstreamThresholds* bs_thresholds,
size_t target_bitrate_kbps,
double input_framerate_fps);
std::string GetCodecName(TaskQueueForTest* task_queue, bool is_encoder) const;
void PrintSettings(TaskQueueForTest* task_queue) const;
// Codecs.
const std::unique_ptr<VideoEncoderFactory> encoder_factory_;
std::unique_ptr<VideoEncoder> encoder_;
const std::unique_ptr<VideoDecoderFactory> decoder_factory_;
VideoProcessor::VideoDecoderList decoders_;
// Helper objects.
Config config_;
VideoCodecTestStatsImpl stats_;
std::unique_ptr<FrameReader> source_frame_reader_;
VideoProcessor::IvfFileWriterMap encoded_frame_writers_;
VideoProcessor::FrameWriterList decoded_frame_writers_;
std::unique_ptr<VideoProcessor> processor_;
std::unique_ptr<CpuProcessTime> cpu_process_time_;
};
} // namespace test
} // namespace webrtc
#endif // MODULES_VIDEO_CODING_CODECS_TEST_VIDEOCODEC_TEST_FIXTURE_IMPL_H_

View file

@ -0,0 +1,465 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <memory>
#include <vector>
#include "api/test/create_videocodec_test_fixture.h"
#include "api/test/video/function_video_encoder_factory.h"
#include "api/video_codecs/sdp_video_format.h"
#include "media/base/media_constants.h"
#include "media/engine/internal_decoder_factory.h"
#include "media/engine/internal_encoder_factory.h"
#include "media/engine/simulcast_encoder_adapter.h"
#include "modules/video_coding/utility/vp8_header_parser.h"
#include "modules/video_coding/utility/vp9_uncompressed_header_parser.h"
#include "test/gtest.h"
#include "test/testsupport/file_utils.h"
namespace webrtc {
namespace test {
using VideoStatistics = VideoCodecTestStats::VideoStatistics;
namespace {
// Codec settings.
const int kCifWidth = 352;
const int kCifHeight = 288;
const int kNumFramesShort = 100;
const int kNumFramesLong = 300;
const size_t kBitrateRdPerfKbps[] = {100, 200, 300, 400, 500, 600,
700, 800, 1000, 1250, 1400, 1600,
1800, 2000, 2200, 2500};
const size_t kNumFirstFramesToSkipAtRdPerfAnalysis = 60;
class QpFrameChecker : public VideoCodecTestFixture::EncodedFrameChecker {
public:
void CheckEncodedFrame(webrtc::VideoCodecType codec,
const EncodedImage& encoded_frame) const override {
int qp;
if (codec == kVideoCodecVP8) {
EXPECT_TRUE(vp8::GetQp(encoded_frame.data(), encoded_frame.size(), &qp));
} else if (codec == kVideoCodecVP9) {
EXPECT_TRUE(vp9::GetQp(encoded_frame.data(), encoded_frame.size(), &qp));
} else {
RTC_DCHECK_NOTREACHED();
}
EXPECT_EQ(encoded_frame.qp_, qp) << "Encoder QP != parsed bitstream QP.";
}
};
VideoCodecTestFixture::Config CreateConfig() {
VideoCodecTestFixture::Config config;
config.filename = "foreman_cif";
config.filepath = ResourcePath(config.filename, "yuv");
config.num_frames = kNumFramesLong;
config.use_single_core = true;
return config;
}
void PrintRdPerf(std::map<size_t, std::vector<VideoStatistics>> rd_stats) {
printf("--> Summary\n");
printf("%11s %5s %6s %11s %12s %11s %13s %13s %5s %7s %7s %7s %13s %13s\n",
"uplink_kbps", "width", "height", "spatial_idx", "temporal_idx",
"target_kbps", "downlink_kbps", "framerate_fps", "psnr", "psnr_y",
"psnr_u", "psnr_v", "enc_speed_fps", "dec_speed_fps");
for (const auto& rd_stat : rd_stats) {
const size_t bitrate_kbps = rd_stat.first;
for (const auto& layer_stat : rd_stat.second) {
printf(
"%11zu %5zu %6zu %11zu %12zu %11zu %13zu %13.2f %5.2f %7.2f %7.2f "
"%7.2f"
"%13.2f %13.2f\n",
bitrate_kbps, layer_stat.width, layer_stat.height,
layer_stat.spatial_idx, layer_stat.temporal_idx,
layer_stat.target_bitrate_kbps, layer_stat.bitrate_kbps,
layer_stat.framerate_fps, layer_stat.avg_psnr, layer_stat.avg_psnr_y,
layer_stat.avg_psnr_u, layer_stat.avg_psnr_v,
layer_stat.enc_speed_fps, layer_stat.dec_speed_fps);
}
}
}
} // namespace
#if defined(RTC_ENABLE_VP9)
TEST(VideoCodecTestLibvpx, HighBitrateVP9) {
auto config = CreateConfig();
config.SetCodecSettings(cricket::kVp9CodecName, 1, 1, 1, false, true, false,
kCifWidth, kCifHeight);
config.num_frames = kNumFramesShort;
const auto frame_checker = std::make_unique<QpFrameChecker>();
config.encoded_frame_checker = frame_checker.get();
auto fixture = CreateVideoCodecTestFixture(config);
std::vector<RateProfile> rate_profiles = {{500, 30, 0}};
std::vector<RateControlThresholds> rc_thresholds = {
{5, 1, 0, 1, 0.3, 0.1, 0, 1}};
std::vector<QualityThresholds> quality_thresholds = {{37, 36, 0.94, 0.92}};
fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr);
}
TEST(VideoCodecTestLibvpx, ChangeBitrateVP9) {
auto config = CreateConfig();
config.SetCodecSettings(cricket::kVp9CodecName, 1, 1, 1, false, true, false,
kCifWidth, kCifHeight);
const auto frame_checker = std::make_unique<QpFrameChecker>();
config.encoded_frame_checker = frame_checker.get();
auto fixture = CreateVideoCodecTestFixture(config);
std::vector<RateProfile> rate_profiles = {
{200, 30, 0}, // target_kbps, input_fps, frame_num
{700, 30, 100},
{500, 30, 200}};
std::vector<RateControlThresholds> rc_thresholds = {
{5, 2, 0, 1, 0.5, 0.1, 0, 1},
{15, 3, 0, 1, 0.5, 0.1, 0, 0},
{11, 2, 0, 1, 0.5, 0.1, 0, 0}};
std::vector<QualityThresholds> quality_thresholds = {
{34, 33, 0.90, 0.88}, {38, 35, 0.95, 0.91}, {35, 34, 0.93, 0.90}};
fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr);
}
TEST(VideoCodecTestLibvpx, ChangeFramerateVP9) {
auto config = CreateConfig();
config.SetCodecSettings(cricket::kVp9CodecName, 1, 1, 1, false, true, false,
kCifWidth, kCifHeight);
const auto frame_checker = std::make_unique<QpFrameChecker>();
config.encoded_frame_checker = frame_checker.get();
auto fixture = CreateVideoCodecTestFixture(config);
std::vector<RateProfile> rate_profiles = {
{100, 24, 0}, // target_kbps, input_fps, frame_num
{100, 15, 100},
{100, 10, 200}};
// Framerate mismatch should be lower for lower framerate.
std::vector<RateControlThresholds> rc_thresholds = {
{10, 2, 40, 1, 0.5, 0.2, 0, 1},
{8, 2, 5, 1, 0.5, 0.2, 0, 0},
{5, 2, 0, 1, 0.5, 0.3, 0, 0}};
// Quality should be higher for lower framerates for the same content.
std::vector<QualityThresholds> quality_thresholds = {
{33, 32, 0.88, 0.86}, {33.5, 32, 0.90, 0.86}, {33.5, 31.5, 0.90, 0.85}};
fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr);
}
TEST(VideoCodecTestLibvpx, DenoiserOnVP9) {
auto config = CreateConfig();
config.SetCodecSettings(cricket::kVp9CodecName, 1, 1, 1, true, true, false,
kCifWidth, kCifHeight);
config.num_frames = kNumFramesShort;
const auto frame_checker = std::make_unique<QpFrameChecker>();
config.encoded_frame_checker = frame_checker.get();
auto fixture = CreateVideoCodecTestFixture(config);
std::vector<RateProfile> rate_profiles = {{500, 30, 0}};
std::vector<RateControlThresholds> rc_thresholds = {
{5, 1, 0, 1, 0.3, 0.1, 0, 1}};
std::vector<QualityThresholds> quality_thresholds = {{37.5, 36, 0.94, 0.93}};
fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr);
}
TEST(VideoCodecTestLibvpx, VeryLowBitrateVP9) {
auto config = CreateConfig();
config.SetCodecSettings(cricket::kVp9CodecName, 1, 1, 1, false, true, true,
kCifWidth, kCifHeight);
const auto frame_checker = std::make_unique<QpFrameChecker>();
config.encoded_frame_checker = frame_checker.get();
auto fixture = CreateVideoCodecTestFixture(config);
std::vector<RateProfile> rate_profiles = {{50, 30, 0}};
std::vector<RateControlThresholds> rc_thresholds = {
{15, 3, 75, 1, 0.5, 0.4, 2, 1}};
std::vector<QualityThresholds> quality_thresholds = {{28, 25, 0.80, 0.65}};
fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr);
}
// TODO(marpan): Add temporal layer test for VP9, once changes are in
// vp9 wrapper for this.
#endif // defined(RTC_ENABLE_VP9)
TEST(VideoCodecTestLibvpx, HighBitrateVP8) {
auto config = CreateConfig();
config.SetCodecSettings(cricket::kVp8CodecName, 1, 1, 1, true, true, false,
kCifWidth, kCifHeight);
config.num_frames = kNumFramesShort;
const auto frame_checker = std::make_unique<QpFrameChecker>();
config.encoded_frame_checker = frame_checker.get();
auto fixture = CreateVideoCodecTestFixture(config);
std::vector<RateProfile> rate_profiles = {{500, 30, 0}};
std::vector<RateControlThresholds> rc_thresholds = {
{5, 1, 0, 1, 0.2, 0.1, 0, 1}};
#if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64)
std::vector<QualityThresholds> quality_thresholds = {{35, 33, 0.91, 0.89}};
#else
std::vector<QualityThresholds> quality_thresholds = {{37, 35, 0.93, 0.91}};
#endif
fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr);
}
TEST(VideoCodecTestLibvpx, MAYBE_ChangeBitrateVP8) {
auto config = CreateConfig();
config.SetCodecSettings(cricket::kVp8CodecName, 1, 1, 1, true, true, false,
kCifWidth, kCifHeight);
const auto frame_checker = std::make_unique<QpFrameChecker>();
config.encoded_frame_checker = frame_checker.get();
auto fixture = CreateVideoCodecTestFixture(config);
std::vector<RateProfile> rate_profiles = {
{200, 30, 0}, // target_kbps, input_fps, frame_num
{800, 30, 100},
{500, 30, 200}};
std::vector<RateControlThresholds> rc_thresholds = {
{5, 1, 0, 1, 0.2, 0.1, 0, 1},
{15.5, 1, 0, 1, 0.2, 0.1, 0, 0},
{15, 1, 0, 1, 0.2, 0.1, 0, 0}};
#if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64)
std::vector<QualityThresholds> quality_thresholds = {
{31.8, 31, 0.86, 0.85}, {36, 34.8, 0.92, 0.90}, {33.5, 32, 0.90, 0.88}};
#else
std::vector<QualityThresholds> quality_thresholds = {
{33, 32, 0.89, 0.88}, {38, 36, 0.94, 0.93}, {35, 34, 0.92, 0.91}};
#endif
fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr);
}
TEST(VideoCodecTestLibvpx, MAYBE_ChangeFramerateVP8) {
auto config = CreateConfig();
config.SetCodecSettings(cricket::kVp8CodecName, 1, 1, 1, true, true, false,
kCifWidth, kCifHeight);
const auto frame_checker = std::make_unique<QpFrameChecker>();
config.encoded_frame_checker = frame_checker.get();
auto fixture = CreateVideoCodecTestFixture(config);
std::vector<RateProfile> rate_profiles = {
{80, 24, 0}, // target_kbps, input_fps, frame_index_rate_update
{80, 15, 100},
{80, 10, 200}};
#if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64)
std::vector<RateControlThresholds> rc_thresholds = {
{10, 2.42, 60, 1, 0.3, 0.3, 0, 1},
{10, 2, 30, 1, 0.3, 0.3, 0, 0},
{10, 2, 10, 1, 0.3, 0.2, 0, 0}};
#else
std::vector<RateControlThresholds> rc_thresholds = {
{10, 2, 20, 1, 0.3, 0.15, 0, 1},
{5, 2, 5, 1, 0.3, 0.15, 0, 0},
{4, 2, 1, 1, 0.3, 0.2, 0, 0}};
#endif
#if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64)
std::vector<QualityThresholds> quality_thresholds = {
{31, 30, 0.85, 0.84}, {31.4, 30.5, 0.86, 0.84}, {30.5, 29, 0.83, 0.78}};
#else
std::vector<QualityThresholds> quality_thresholds = {
{31, 30, 0.87, 0.85}, {32, 31, 0.88, 0.85}, {32, 30, 0.87, 0.82}};
#endif
fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr);
}
#if defined(WEBRTC_ANDROID)
#define MAYBE_TemporalLayersVP8 DISABLED_TemporalLayersVP8
#else
#define MAYBE_TemporalLayersVP8 TemporalLayersVP8
#endif
TEST(VideoCodecTestLibvpx, MAYBE_TemporalLayersVP8) {
auto config = CreateConfig();
config.SetCodecSettings(cricket::kVp8CodecName, 1, 1, 3, true, true, false,
kCifWidth, kCifHeight);
const auto frame_checker = std::make_unique<QpFrameChecker>();
config.encoded_frame_checker = frame_checker.get();
auto fixture = CreateVideoCodecTestFixture(config);
std::vector<RateProfile> rate_profiles = {{200, 30, 0}, {400, 30, 150}};
#if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64)
std::vector<RateControlThresholds> rc_thresholds = {
{10, 1, 2.1, 1, 0.2, 0.1, 0, 1}, {12, 2, 3, 1, 0.2, 0.1, 0, 1}};
#else
std::vector<RateControlThresholds> rc_thresholds = {
{5, 1, 0, 1, 0.2, 0.1, 0, 1}, {10, 2, 0, 1, 0.2, 0.1, 0, 1}};
#endif
// Min SSIM drops because of high motion scene with complex backgound (trees).
#if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64)
std::vector<QualityThresholds> quality_thresholds = {{31, 30, 0.85, 0.83},
{31, 28, 0.85, 0.75}};
#else
std::vector<QualityThresholds> quality_thresholds = {{32, 30, 0.88, 0.85},
{33, 30, 0.89, 0.83}};
#endif
fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr);
}
#if defined(WEBRTC_ANDROID)
#define MAYBE_MultiresVP8 DISABLED_MultiresVP8
#else
#define MAYBE_MultiresVP8 MultiresVP8
#endif
TEST(VideoCodecTestLibvpx, MAYBE_MultiresVP8) {
auto config = CreateConfig();
config.filename = "ConferenceMotion_1280_720_50";
config.filepath = ResourcePath(config.filename, "yuv");
config.num_frames = 100;
config.SetCodecSettings(cricket::kVp8CodecName, 3, 1, 3, true, true, false,
1280, 720);
const auto frame_checker = std::make_unique<QpFrameChecker>();
config.encoded_frame_checker = frame_checker.get();
auto fixture = CreateVideoCodecTestFixture(config);
std::vector<RateProfile> rate_profiles = {{1500, 30, 0}};
#if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64)
std::vector<RateControlThresholds> rc_thresholds = {
{4.1, 1.04, 7, 0.18, 0.14, 0.08, 0, 1}};
#else
std::vector<RateControlThresholds> rc_thresholds = {
{5, 1, 5, 1, 0.3, 0.1, 0, 1}};
#endif
std::vector<QualityThresholds> quality_thresholds = {{34, 32, 0.90, 0.88}};
fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr);
}
#if defined(WEBRTC_ANDROID)
#define MAYBE_SimulcastVP8 DISABLED_SimulcastVP8
#else
#define MAYBE_SimulcastVP8 SimulcastVP8
#endif
TEST(VideoCodecTestLibvpx, MAYBE_SimulcastVP8) {
auto config = CreateConfig();
config.filename = "ConferenceMotion_1280_720_50";
config.filepath = ResourcePath(config.filename, "yuv");
config.num_frames = 100;
config.SetCodecSettings(cricket::kVp8CodecName, 3, 1, 3, true, true, false,
1280, 720);
const auto frame_checker = std::make_unique<QpFrameChecker>();
config.encoded_frame_checker = frame_checker.get();
InternalEncoderFactory internal_encoder_factory;
std::unique_ptr<VideoEncoderFactory> adapted_encoder_factory =
std::make_unique<FunctionVideoEncoderFactory>([&]() {
return std::make_unique<SimulcastEncoderAdapter>(
&internal_encoder_factory, SdpVideoFormat(cricket::kVp8CodecName));
});
std::unique_ptr<InternalDecoderFactory> internal_decoder_factory(
new InternalDecoderFactory());
auto fixture =
CreateVideoCodecTestFixture(config, std::move(internal_decoder_factory),
std::move(adapted_encoder_factory));
std::vector<RateProfile> rate_profiles = {{1500, 30, 0}};
std::vector<RateControlThresholds> rc_thresholds = {
{20, 5, 90, 1, 0.5, 0.3, 0, 1}};
std::vector<QualityThresholds> quality_thresholds = {{34, 32, 0.90, 0.88}};
fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr);
}
#if defined(WEBRTC_ANDROID)
#define MAYBE_SvcVP9 DISABLED_SvcVP9
#else
#define MAYBE_SvcVP9 SvcVP9
#endif
TEST(VideoCodecTestLibvpx, MAYBE_SvcVP9) {
auto config = CreateConfig();
config.filename = "ConferenceMotion_1280_720_50";
config.filepath = ResourcePath(config.filename, "yuv");
config.num_frames = 100;
config.SetCodecSettings(cricket::kVp9CodecName, 1, 3, 3, true, true, false,
1280, 720);
const auto frame_checker = std::make_unique<QpFrameChecker>();
config.encoded_frame_checker = frame_checker.get();
auto fixture = CreateVideoCodecTestFixture(config);
std::vector<RateProfile> rate_profiles = {{1500, 30, 0}};
std::vector<RateControlThresholds> rc_thresholds = {
{5, 1, 5, 1, 0.3, 0.1, 0, 1}};
std::vector<QualityThresholds> quality_thresholds = {{36, 34, 0.93, 0.90}};
fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr);
}
TEST(VideoCodecTestLibvpx, DISABLED_MultiresVP8RdPerf) {
auto config = CreateConfig();
config.filename = "FourPeople_1280x720_30";
config.filepath = ResourcePath(config.filename, "yuv");
config.num_frames = 300;
config.print_frame_level_stats = true;
config.SetCodecSettings(cricket::kVp8CodecName, 3, 1, 3, true, true, false,
1280, 720);
const auto frame_checker = std::make_unique<QpFrameChecker>();
config.encoded_frame_checker = frame_checker.get();
auto fixture = CreateVideoCodecTestFixture(config);
std::map<size_t, std::vector<VideoStatistics>> rd_stats;
for (size_t bitrate_kbps : kBitrateRdPerfKbps) {
std::vector<RateProfile> rate_profiles = {{bitrate_kbps, 30, 0}};
fixture->RunTest(rate_profiles, nullptr, nullptr, nullptr);
rd_stats[bitrate_kbps] =
fixture->GetStats().SliceAndCalcLayerVideoStatistic(
kNumFirstFramesToSkipAtRdPerfAnalysis, config.num_frames - 1);
}
PrintRdPerf(rd_stats);
}
TEST(VideoCodecTestLibvpx, DISABLED_SvcVP9RdPerf) {
auto config = CreateConfig();
config.filename = "FourPeople_1280x720_30";
config.filepath = ResourcePath(config.filename, "yuv");
config.num_frames = 300;
config.print_frame_level_stats = true;
config.SetCodecSettings(cricket::kVp9CodecName, 1, 3, 3, true, true, false,
1280, 720);
const auto frame_checker = std::make_unique<QpFrameChecker>();
config.encoded_frame_checker = frame_checker.get();
auto fixture = CreateVideoCodecTestFixture(config);
std::map<size_t, std::vector<VideoStatistics>> rd_stats;
for (size_t bitrate_kbps : kBitrateRdPerfKbps) {
std::vector<RateProfile> rate_profiles = {{bitrate_kbps, 30, 0}};
fixture->RunTest(rate_profiles, nullptr, nullptr, nullptr);
rd_stats[bitrate_kbps] =
fixture->GetStats().SliceAndCalcLayerVideoStatistic(
kNumFirstFramesToSkipAtRdPerfAnalysis, config.num_frames - 1);
}
PrintRdPerf(rd_stats);
}
} // namespace test
} // namespace webrtc

View file

@ -0,0 +1,267 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <memory>
#include <string>
#include <tuple>
#include <vector>
#include "api/test/create_videocodec_test_fixture.h"
#include "media/base/media_constants.h"
#include "modules/video_coding/codecs/test/android_codec_factory_helper.h"
#include "modules/video_coding/codecs/test/videocodec_test_fixture_impl.h"
#include "rtc_base/strings/string_builder.h"
#include "test/gtest.h"
#include "test/testsupport/file_utils.h"
namespace webrtc {
namespace test {
namespace {
const int kForemanNumFrames = 300;
const int kForemanFramerateFps = 30;
struct RateProfileData {
std::string name;
std::vector<webrtc::test::RateProfile> rate_profile;
};
const size_t kConstRateIntervalSec = 10;
const RateProfileData kBitRateHighLowHigh = {
/*name=*/"BitRateHighLowHigh",
/*rate_profile=*/{
{/*target_kbps=*/3000, /*input_fps=*/30, /*frame_num=*/0},
{/*target_kbps=*/1500, /*input_fps=*/30, /*frame_num=*/300},
{/*target_kbps=*/750, /*input_fps=*/30, /*frame_num=*/600},
{/*target_kbps=*/1500, /*input_fps=*/30, /*frame_num=*/900},
{/*target_kbps=*/3000, /*input_fps=*/30, /*frame_num=*/1200}}};
const RateProfileData kBitRateLowHighLow = {
/*name=*/"BitRateLowHighLow",
/*rate_profile=*/{
{/*target_kbps=*/750, /*input_fps=*/30, /*frame_num=*/0},
{/*target_kbps=*/1500, /*input_fps=*/30, /*frame_num=*/300},
{/*target_kbps=*/3000, /*input_fps=*/30, /*frame_num=*/600},
{/*target_kbps=*/1500, /*input_fps=*/30, /*frame_num=*/900},
{/*target_kbps=*/750, /*input_fps=*/30, /*frame_num=*/1200}}};
const RateProfileData kFrameRateHighLowHigh = {
/*name=*/"FrameRateHighLowHigh",
/*rate_profile=*/{
{/*target_kbps=*/2000, /*input_fps=*/30, /*frame_num=*/0},
{/*target_kbps=*/2000, /*input_fps=*/15, /*frame_num=*/300},
{/*target_kbps=*/2000, /*input_fps=*/7.5, /*frame_num=*/450},
{/*target_kbps=*/2000, /*input_fps=*/15, /*frame_num=*/525},
{/*target_kbps=*/2000, /*input_fps=*/30, /*frame_num=*/675}}};
const RateProfileData kFrameRateLowHighLow = {
/*name=*/"FrameRateLowHighLow",
/*rate_profile=*/{
{/*target_kbps=*/2000, /*input_fps=*/7.5, /*frame_num=*/0},
{/*target_kbps=*/2000, /*input_fps=*/15, /*frame_num=*/75},
{/*target_kbps=*/2000, /*input_fps=*/30, /*frame_num=*/225},
{/*target_kbps=*/2000, /*input_fps=*/15, /*frame_num=*/525},
{/*target_kbps=*/2000, /*input_fps=*/7.5, /*frame_num=*/775}}};
VideoCodecTestFixture::Config CreateConfig() {
VideoCodecTestFixture::Config config;
config.filename = "foreman_cif";
config.filepath = ResourcePath(config.filename, "yuv");
config.num_frames = kForemanNumFrames;
// In order to not overwhelm the OpenMAX buffers in the Android MediaCodec.
config.encode_in_real_time = true;
return config;
}
std::unique_ptr<VideoCodecTestFixture> CreateTestFixtureWithConfig(
VideoCodecTestFixture::Config config) {
InitializeAndroidObjects(); // Idempotent.
auto encoder_factory = CreateAndroidEncoderFactory();
auto decoder_factory = CreateAndroidDecoderFactory();
return CreateVideoCodecTestFixture(config, std::move(decoder_factory),
std::move(encoder_factory));
}
} // namespace
TEST(VideoCodecTestMediaCodec, ForemanCif500kbpsVp8) {
auto config = CreateConfig();
config.SetCodecSettings(cricket::kVp8CodecName, 1, 1, 1, false, false, false,
352, 288);
auto fixture = CreateTestFixtureWithConfig(config);
std::vector<RateProfile> rate_profiles = {{500, kForemanFramerateFps, 0}};
// The thresholds below may have to be tweaked to let even poor MediaCodec
// implementations pass. If this test fails on the bots, disable it and
// ping brandtr@.
std::vector<RateControlThresholds> rc_thresholds = {
{10, 1, 1, 0.1, 0.2, 0.1, 0, 1}};
std::vector<QualityThresholds> quality_thresholds = {{36, 31, 0.92, 0.86}};
fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr);
}
TEST(VideoCodecTestMediaCodec, ForemanCif500kbpsH264CBP) {
auto config = CreateConfig();
const auto frame_checker =
std::make_unique<VideoCodecTestFixtureImpl::H264KeyframeChecker>();
config.encoded_frame_checker = frame_checker.get();
config.SetCodecSettings(cricket::kH264CodecName, 1, 1, 1, false, false, false,
352, 288);
auto fixture = CreateTestFixtureWithConfig(config);
std::vector<RateProfile> rate_profiles = {{500, kForemanFramerateFps, 0}};
// The thresholds below may have to be tweaked to let even poor MediaCodec
// implementations pass. If this test fails on the bots, disable it and
// ping brandtr@.
std::vector<RateControlThresholds> rc_thresholds = {
{10, 1, 1, 0.1, 0.2, 0.1, 0, 1}};
std::vector<QualityThresholds> quality_thresholds = {{36, 31, 0.92, 0.86}};
fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr);
}
// TODO(brandtr): Enable this test when we have trybots/buildbots with
// HW encoders that support CHP.
TEST(VideoCodecTestMediaCodec, DISABLED_ForemanCif500kbpsH264CHP) {
auto config = CreateConfig();
const auto frame_checker =
std::make_unique<VideoCodecTestFixtureImpl::H264KeyframeChecker>();
config.h264_codec_settings.profile = H264Profile::kProfileConstrainedHigh;
config.encoded_frame_checker = frame_checker.get();
config.SetCodecSettings(cricket::kH264CodecName, 1, 1, 1, false, false, false,
352, 288);
auto fixture = CreateTestFixtureWithConfig(config);
std::vector<RateProfile> rate_profiles = {{500, kForemanFramerateFps, 0}};
// The thresholds below may have to be tweaked to let even poor MediaCodec
// implementations pass. If this test fails on the bots, disable it and
// ping brandtr@.
std::vector<RateControlThresholds> rc_thresholds = {
{5, 1, 0, 0.1, 0.2, 0.1, 0, 1}};
std::vector<QualityThresholds> quality_thresholds = {{37, 35, 0.93, 0.91}};
fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr);
}
TEST(VideoCodecTestMediaCodec, ForemanMixedRes100kbpsVp8H264) {
auto config = CreateConfig();
const int kNumFrames = 30;
const std::vector<std::string> codecs = {cricket::kVp8CodecName,
cricket::kH264CodecName};
const std::vector<std::tuple<int, int>> resolutions = {
{128, 96}, {176, 144}, {320, 240}, {480, 272}};
const std::vector<RateProfile> rate_profiles = {
{100, kForemanFramerateFps, 0}};
const std::vector<QualityThresholds> quality_thresholds = {
{29, 26, 0.8, 0.75}};
for (const auto& codec : codecs) {
for (const auto& resolution : resolutions) {
const int width = std::get<0>(resolution);
const int height = std::get<1>(resolution);
config.filename = std::string("foreman_") + std::to_string(width) + "x" +
std::to_string(height);
config.filepath = ResourcePath(config.filename, "yuv");
config.num_frames = kNumFrames;
config.SetCodecSettings(codec, 1, 1, 1, false, false, false, width,
height);
auto fixture = CreateTestFixtureWithConfig(config);
fixture->RunTest(rate_profiles, nullptr /* rc_thresholds */,
&quality_thresholds, nullptr /* bs_thresholds */);
}
}
}
class VideoCodecTestMediaCodecRateAdaptation
: public ::testing::TestWithParam<
std::tuple<RateProfileData, std::string>> {
public:
static std::string ParamInfoToStr(
const ::testing::TestParamInfo<
VideoCodecTestMediaCodecRateAdaptation::ParamType>& info) {
char buf[512];
rtc::SimpleStringBuilder ss(buf);
ss << std::get<0>(info.param).name << "_" << std::get<1>(info.param);
return ss.str();
}
};
TEST_P(VideoCodecTestMediaCodecRateAdaptation, DISABLED_RateAdaptation) {
const std::vector<webrtc::test::RateProfile> rate_profile =
std::get<0>(GetParam()).rate_profile;
const std::string codec_name = std::get<1>(GetParam());
VideoCodecTestFixture::Config config;
config.filename = "FourPeople_1280x720_30";
config.filepath = ResourcePath(config.filename, "yuv");
config.num_frames = rate_profile.back().frame_num +
static_cast<size_t>(kConstRateIntervalSec *
rate_profile.back().input_fps);
config.encode_in_real_time = true;
config.SetCodecSettings(codec_name, 1, 1, 1, false, false, false, 1280, 720);
auto fixture = CreateTestFixtureWithConfig(config);
fixture->RunTest(rate_profile, nullptr, nullptr, nullptr);
for (size_t i = 0; i < rate_profile.size(); ++i) {
const size_t num_frames =
static_cast<size_t>(rate_profile[i].input_fps * kConstRateIntervalSec);
auto stats = fixture->GetStats().SliceAndCalcLayerVideoStatistic(
rate_profile[i].frame_num, rate_profile[i].frame_num + num_frames - 1);
ASSERT_EQ(stats.size(), 1u);
// Bitrate mismatch is <= 10%.
EXPECT_LE(stats[0].avg_bitrate_mismatch_pct, 10);
EXPECT_GE(stats[0].avg_bitrate_mismatch_pct, -10);
// Avg frame transmission delay and processing latency is <=100..250ms
// depending on frame rate.
const double expected_delay_sec =
std::min(std::max(1 / rate_profile[i].input_fps, 0.1), 0.25);
EXPECT_LE(stats[0].avg_delay_sec, expected_delay_sec);
EXPECT_LE(stats[0].avg_encode_latency_sec, expected_delay_sec);
EXPECT_LE(stats[0].avg_decode_latency_sec, expected_delay_sec);
// Frame drops are not expected.
EXPECT_EQ(stats[0].num_encoded_frames, num_frames);
EXPECT_EQ(stats[0].num_decoded_frames, num_frames);
// Periodic keyframes are not expected.
EXPECT_EQ(stats[0].num_key_frames, i == 0 ? 1u : 0);
// Ensure codec delivers a reasonable spatial quality.
EXPECT_GE(stats[0].avg_psnr_y, 35);
}
}
INSTANTIATE_TEST_SUITE_P(
RateAdaptation,
VideoCodecTestMediaCodecRateAdaptation,
::testing::Combine(::testing::Values(kBitRateLowHighLow,
kBitRateHighLowHigh,
kFrameRateLowHighLow,
kFrameRateHighLowHigh),
::testing::Values(cricket::kVp8CodecName,
cricket::kVp9CodecName,
cricket::kH264CodecName)),
VideoCodecTestMediaCodecRateAdaptation::ParamInfoToStr);
} // namespace test
} // namespace webrtc

View file

@ -0,0 +1,87 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <memory>
#include <vector>
#include "api/test/create_videocodec_test_fixture.h"
#include "media/base/media_constants.h"
#include "modules/video_coding/codecs/test/videocodec_test_fixture_impl.h"
#include "test/gtest.h"
#include "test/testsupport/file_utils.h"
namespace webrtc {
namespace test {
namespace {
// Codec settings.
const int kCifWidth = 352;
const int kCifHeight = 288;
const int kNumFrames = 100;
VideoCodecTestFixture::Config CreateConfig() {
VideoCodecTestFixture::Config config;
config.filename = "foreman_cif";
config.filepath = ResourcePath(config.filename, "yuv");
config.num_frames = kNumFrames;
// Only allow encoder/decoder to use single core, for predictability.
config.use_single_core = true;
return config;
}
} // namespace
TEST(VideoCodecTestOpenH264, ConstantHighBitrate) {
auto frame_checker =
std::make_unique<VideoCodecTestFixtureImpl::H264KeyframeChecker>();
auto config = CreateConfig();
config.SetCodecSettings(cricket::kH264CodecName, 1, 1, 1, false, true, false,
kCifWidth, kCifHeight);
config.encoded_frame_checker = frame_checker.get();
auto fixture = CreateVideoCodecTestFixture(config);
std::vector<RateProfile> rate_profiles = {{500, 30, 0}};
std::vector<RateControlThresholds> rc_thresholds = {
{5, 1, 0, 0.1, 0.2, 0.1, 0, 1}};
std::vector<QualityThresholds> quality_thresholds = {{37, 35, 0.93, 0.91}};
fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr);
}
// H264: Enable SingleNalUnit packetization mode. Encoder should split
// large frames into multiple slices and limit length of NAL units.
TEST(VideoCodecTestOpenH264, SingleNalUnit) {
auto frame_checker =
std::make_unique<VideoCodecTestFixtureImpl::H264KeyframeChecker>();
auto config = CreateConfig();
config.h264_codec_settings.packetization_mode =
H264PacketizationMode::SingleNalUnit;
config.max_payload_size_bytes = 500;
config.SetCodecSettings(cricket::kH264CodecName, 1, 1, 1, false, true, false,
kCifWidth, kCifHeight);
config.encoded_frame_checker = frame_checker.get();
auto fixture = CreateVideoCodecTestFixture(config);
std::vector<RateProfile> rate_profiles = {{500, 30, 0}};
std::vector<RateControlThresholds> rc_thresholds = {
{5, 1, 0, 0.1, 0.2, 0.1, 0, 1}};
std::vector<QualityThresholds> quality_thresholds = {{37, 35, 0.93, 0.91}};
BitstreamThresholds bs_thresholds = {config.max_payload_size_bytes};
fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds,
&bs_thresholds);
}
} // namespace test
} // namespace webrtc

View file

@ -0,0 +1,441 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/video_coding/codecs/test/videocodec_test_stats_impl.h"
#include <algorithm>
#include <cmath>
#include <iterator>
#include <limits>
#include <numeric>
#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "rtc_base/checks.h"
#include "rtc_base/numerics/running_statistics.h"
#include "rtc_base/strings/string_builder.h"
namespace webrtc {
namespace test {
using FrameStatistics = VideoCodecTestStats::FrameStatistics;
using VideoStatistics = VideoCodecTestStats::VideoStatistics;
namespace {
const int kMaxBitrateMismatchPercent = 20;
}
VideoCodecTestStatsImpl::VideoCodecTestStatsImpl() = default;
VideoCodecTestStatsImpl::~VideoCodecTestStatsImpl() = default;
void VideoCodecTestStatsImpl::AddFrame(const FrameStatistics& frame_stat) {
const size_t timestamp = frame_stat.rtp_timestamp;
const size_t layer_idx = frame_stat.spatial_idx;
RTC_DCHECK(rtp_timestamp_to_frame_num_[layer_idx].find(timestamp) ==
rtp_timestamp_to_frame_num_[layer_idx].end());
rtp_timestamp_to_frame_num_[layer_idx][timestamp] = frame_stat.frame_number;
layer_stats_[layer_idx].push_back(frame_stat);
}
FrameStatistics* VideoCodecTestStatsImpl::GetFrame(size_t frame_num,
size_t layer_idx) {
RTC_CHECK_LT(frame_num, layer_stats_[layer_idx].size());
return &layer_stats_[layer_idx][frame_num];
}
FrameStatistics* VideoCodecTestStatsImpl::GetFrameWithTimestamp(
size_t timestamp,
size_t layer_idx) {
RTC_DCHECK(rtp_timestamp_to_frame_num_[layer_idx].find(timestamp) !=
rtp_timestamp_to_frame_num_[layer_idx].end());
return GetFrame(rtp_timestamp_to_frame_num_[layer_idx][timestamp], layer_idx);
}
FrameStatistics* VideoCodecTestStatsImpl::GetOrAddFrame(size_t timestamp_rtp,
size_t spatial_idx) {
if (rtp_timestamp_to_frame_num_[spatial_idx].count(timestamp_rtp) > 0) {
return GetFrameWithTimestamp(timestamp_rtp, spatial_idx);
}
size_t frame_num = layer_stats_[spatial_idx].size();
AddFrame(FrameStatistics(frame_num, timestamp_rtp, spatial_idx));
return GetFrameWithTimestamp(timestamp_rtp, spatial_idx);
}
std::vector<FrameStatistics> VideoCodecTestStatsImpl::GetFrameStatistics()
const {
size_t capacity = 0;
for (const auto& layer_stat : layer_stats_) {
capacity += layer_stat.second.size();
}
std::vector<FrameStatistics> frame_statistics;
frame_statistics.reserve(capacity);
for (const auto& layer_stat : layer_stats_) {
std::copy(layer_stat.second.cbegin(), layer_stat.second.cend(),
std::back_inserter(frame_statistics));
}
return frame_statistics;
}
std::vector<VideoStatistics>
VideoCodecTestStatsImpl::SliceAndCalcLayerVideoStatistic(
size_t first_frame_num,
size_t last_frame_num) {
std::vector<VideoStatistics> layer_stats;
size_t num_spatial_layers = 0;
size_t num_temporal_layers = 0;
GetNumberOfEncodedLayers(first_frame_num, last_frame_num, &num_spatial_layers,
&num_temporal_layers);
RTC_CHECK_GT(num_spatial_layers, 0);
RTC_CHECK_GT(num_temporal_layers, 0);
for (size_t spatial_idx = 0; spatial_idx < num_spatial_layers;
++spatial_idx) {
for (size_t temporal_idx = 0; temporal_idx < num_temporal_layers;
++temporal_idx) {
VideoStatistics layer_stat = SliceAndCalcVideoStatistic(
first_frame_num, last_frame_num, spatial_idx, temporal_idx, false,
/*target_bitrate=*/absl::nullopt, /*target_framerate=*/absl::nullopt);
layer_stats.push_back(layer_stat);
}
}
return layer_stats;
}
VideoStatistics VideoCodecTestStatsImpl::SliceAndCalcAggregatedVideoStatistic(
size_t first_frame_num,
size_t last_frame_num) {
size_t num_spatial_layers = 0;
size_t num_temporal_layers = 0;
GetNumberOfEncodedLayers(first_frame_num, last_frame_num, &num_spatial_layers,
&num_temporal_layers);
RTC_CHECK_GT(num_spatial_layers, 0);
RTC_CHECK_GT(num_temporal_layers, 0);
return SliceAndCalcVideoStatistic(
first_frame_num, last_frame_num, num_spatial_layers - 1,
num_temporal_layers - 1, true, /*target_bitrate=*/absl::nullopt,
/*target_framerate=*/absl::nullopt);
}
VideoStatistics VideoCodecTestStatsImpl::CalcVideoStatistic(
size_t first_frame_num,
size_t last_frame_num,
DataRate target_bitrate,
Frequency target_framerate) {
size_t num_spatial_layers = 0;
size_t num_temporal_layers = 0;
GetNumberOfEncodedLayers(first_frame_num, last_frame_num, &num_spatial_layers,
&num_temporal_layers);
return SliceAndCalcVideoStatistic(
first_frame_num, last_frame_num, num_spatial_layers - 1,
num_temporal_layers - 1, true, target_bitrate, target_framerate);
}
size_t VideoCodecTestStatsImpl::Size(size_t spatial_idx) {
return layer_stats_[spatial_idx].size();
}
void VideoCodecTestStatsImpl::Clear() {
layer_stats_.clear();
rtp_timestamp_to_frame_num_.clear();
}
FrameStatistics VideoCodecTestStatsImpl::AggregateFrameStatistic(
size_t frame_num,
size_t spatial_idx,
bool aggregate_independent_layers) {
FrameStatistics frame_stat = *GetFrame(frame_num, spatial_idx);
bool inter_layer_predicted = frame_stat.inter_layer_predicted;
while (spatial_idx-- > 0) {
if (aggregate_independent_layers || inter_layer_predicted) {
FrameStatistics* base_frame_stat = GetFrame(frame_num, spatial_idx);
frame_stat.length_bytes += base_frame_stat->length_bytes;
frame_stat.target_bitrate_kbps += base_frame_stat->target_bitrate_kbps;
inter_layer_predicted = base_frame_stat->inter_layer_predicted;
}
}
return frame_stat;
}
size_t VideoCodecTestStatsImpl::CalcLayerTargetBitrateKbps(
size_t first_frame_num,
size_t last_frame_num,
size_t spatial_idx,
size_t temporal_idx,
bool aggregate_independent_layers) {
size_t target_bitrate_kbps = 0;
// We don't know if superframe includes all required spatial layers because
// of possible frame drops. Run through all frames in specified range, find
// and return maximum target bitrate. Assume that target bitrate in frame
// statistic is specified per temporal layer.
for (size_t frame_num = first_frame_num; frame_num <= last_frame_num;
++frame_num) {
FrameStatistics superframe = AggregateFrameStatistic(
frame_num, spatial_idx, aggregate_independent_layers);
if (superframe.temporal_idx <= temporal_idx) {
target_bitrate_kbps =
std::max(target_bitrate_kbps, superframe.target_bitrate_kbps);
}
}
RTC_DCHECK_GT(target_bitrate_kbps, 0);
return target_bitrate_kbps;
}
VideoStatistics VideoCodecTestStatsImpl::SliceAndCalcVideoStatistic(
size_t first_frame_num,
size_t last_frame_num,
size_t spatial_idx,
size_t temporal_idx,
bool aggregate_independent_layers,
absl::optional<DataRate> target_bitrate,
absl::optional<Frequency> target_framerate) {
VideoStatistics video_stat;
float buffer_level_bits = 0.0f;
webrtc_impl::RunningStatistics<float> buffer_level_sec;
webrtc_impl::RunningStatistics<size_t> key_frame_size_bytes;
webrtc_impl::RunningStatistics<size_t> delta_frame_size_bytes;
webrtc_impl::RunningStatistics<size_t> frame_encoding_time_us;
webrtc_impl::RunningStatistics<size_t> frame_decoding_time_us;
webrtc_impl::RunningStatistics<float> psnr_y;
webrtc_impl::RunningStatistics<float> psnr_u;
webrtc_impl::RunningStatistics<float> psnr_v;
webrtc_impl::RunningStatistics<float> psnr;
webrtc_impl::RunningStatistics<float> ssim;
webrtc_impl::RunningStatistics<int> qp;
size_t rtp_timestamp_first_frame = 0;
size_t rtp_timestamp_prev_frame = 0;
FrameStatistics last_successfully_decoded_frame(0, 0, 0);
const size_t target_bitrate_kbps =
target_bitrate.has_value()
? target_bitrate->kbps()
: CalcLayerTargetBitrateKbps(first_frame_num, last_frame_num,
spatial_idx, temporal_idx,
aggregate_independent_layers);
const size_t target_bitrate_bps = 1000 * target_bitrate_kbps;
RTC_CHECK_GT(target_bitrate_kbps, 0); // We divide by `target_bitrate_kbps`.
for (size_t frame_num = first_frame_num; frame_num <= last_frame_num;
++frame_num) {
FrameStatistics frame_stat = AggregateFrameStatistic(
frame_num, spatial_idx, aggregate_independent_layers);
float time_since_first_frame_sec =
1.0f * (frame_stat.rtp_timestamp - rtp_timestamp_first_frame) /
kVideoPayloadTypeFrequency;
float time_since_prev_frame_sec =
1.0f * (frame_stat.rtp_timestamp - rtp_timestamp_prev_frame) /
kVideoPayloadTypeFrequency;
if (frame_stat.temporal_idx > temporal_idx) {
continue;
}
buffer_level_bits -= time_since_prev_frame_sec * 1000 * target_bitrate_kbps;
buffer_level_bits = std::max(0.0f, buffer_level_bits);
buffer_level_bits += 8.0 * frame_stat.length_bytes;
buffer_level_sec.AddSample(buffer_level_bits /
(1000 * target_bitrate_kbps));
video_stat.length_bytes += frame_stat.length_bytes;
if (frame_stat.encoding_successful) {
++video_stat.num_encoded_frames;
if (frame_stat.frame_type == VideoFrameType::kVideoFrameKey) {
key_frame_size_bytes.AddSample(frame_stat.length_bytes);
++video_stat.num_key_frames;
} else {
delta_frame_size_bytes.AddSample(frame_stat.length_bytes);
}
frame_encoding_time_us.AddSample(frame_stat.encode_time_us);
qp.AddSample(frame_stat.qp);
video_stat.max_nalu_size_bytes = std::max(video_stat.max_nalu_size_bytes,
frame_stat.max_nalu_size_bytes);
}
if (frame_stat.decoding_successful) {
++video_stat.num_decoded_frames;
video_stat.width = std::max(video_stat.width, frame_stat.decoded_width);
video_stat.height =
std::max(video_stat.height, frame_stat.decoded_height);
if (video_stat.num_decoded_frames > 1) {
if (last_successfully_decoded_frame.decoded_width !=
frame_stat.decoded_width ||
last_successfully_decoded_frame.decoded_height !=
frame_stat.decoded_height) {
++video_stat.num_spatial_resizes;
}
}
frame_decoding_time_us.AddSample(frame_stat.decode_time_us);
last_successfully_decoded_frame = frame_stat;
}
if (frame_stat.quality_analysis_successful) {
psnr_y.AddSample(frame_stat.psnr_y);
psnr_u.AddSample(frame_stat.psnr_u);
psnr_v.AddSample(frame_stat.psnr_v);
psnr.AddSample(frame_stat.psnr);
ssim.AddSample(frame_stat.ssim);
}
if (video_stat.num_input_frames > 0) {
if (video_stat.time_to_reach_target_bitrate_sec == 0.0f) {
RTC_CHECK_GT(time_since_first_frame_sec, 0);
const float curr_kbps =
8.0 * video_stat.length_bytes / 1000 / time_since_first_frame_sec;
const float bitrate_mismatch_percent =
100 * std::fabs(curr_kbps - target_bitrate_kbps) /
target_bitrate_kbps;
if (bitrate_mismatch_percent < kMaxBitrateMismatchPercent) {
video_stat.time_to_reach_target_bitrate_sec =
time_since_first_frame_sec;
}
}
}
rtp_timestamp_prev_frame = frame_stat.rtp_timestamp;
if (video_stat.num_input_frames == 0) {
rtp_timestamp_first_frame = frame_stat.rtp_timestamp;
}
++video_stat.num_input_frames;
}
const size_t num_frames = last_frame_num - first_frame_num + 1;
const size_t timestamp_delta =
GetFrame(first_frame_num + 1, spatial_idx)->rtp_timestamp -
GetFrame(first_frame_num, spatial_idx)->rtp_timestamp;
RTC_CHECK_GT(timestamp_delta, 0);
const float input_framerate_fps =
target_framerate.has_value()
? target_framerate->millihertz() / 1000.0
: 1.0 * kVideoPayloadTypeFrequency / timestamp_delta;
RTC_CHECK_GT(input_framerate_fps, 0);
const float duration_sec = num_frames / input_framerate_fps;
video_stat.target_bitrate_kbps = target_bitrate_kbps;
video_stat.input_framerate_fps = input_framerate_fps;
video_stat.spatial_idx = spatial_idx;
video_stat.temporal_idx = temporal_idx;
RTC_CHECK_GT(duration_sec, 0);
const float bitrate_bps = 8 * video_stat.length_bytes / duration_sec;
video_stat.bitrate_kbps = static_cast<size_t>((bitrate_bps + 500) / 1000);
video_stat.framerate_fps = video_stat.num_encoded_frames / duration_sec;
// http://bugs.webrtc.org/10400: On Windows, we only get millisecond
// granularity in the frame encode/decode timing measurements.
// So we need to softly avoid a div-by-zero here.
const float mean_encode_time_us =
frame_encoding_time_us.GetMean().value_or(0);
video_stat.enc_speed_fps = mean_encode_time_us > 0.0f
? 1000000.0f / mean_encode_time_us
: std::numeric_limits<float>::max();
const float mean_decode_time_us =
frame_decoding_time_us.GetMean().value_or(0);
video_stat.dec_speed_fps = mean_decode_time_us > 0.0f
? 1000000.0f / mean_decode_time_us
: std::numeric_limits<float>::max();
video_stat.avg_encode_latency_sec =
frame_encoding_time_us.GetMean().value_or(0) / 1000000.0f;
video_stat.max_encode_latency_sec =
frame_encoding_time_us.GetMax().value_or(0) / 1000000.0f;
video_stat.avg_decode_latency_sec =
frame_decoding_time_us.GetMean().value_or(0) / 1000000.0f;
video_stat.max_decode_latency_sec =
frame_decoding_time_us.GetMax().value_or(0) / 1000000.0f;
auto MaxDelaySec = [target_bitrate_kbps](
const webrtc_impl::RunningStatistics<size_t>& stats) {
return 8 * stats.GetMax().value_or(0) / 1000 / target_bitrate_kbps;
};
video_stat.avg_delay_sec = buffer_level_sec.GetMean().value_or(0);
video_stat.max_key_frame_delay_sec = MaxDelaySec(key_frame_size_bytes);
video_stat.max_delta_frame_delay_sec = MaxDelaySec(delta_frame_size_bytes);
video_stat.avg_bitrate_mismatch_pct =
100 * (bitrate_bps - target_bitrate_bps) / target_bitrate_bps;
video_stat.avg_framerate_mismatch_pct =
100 * (video_stat.framerate_fps - input_framerate_fps) /
input_framerate_fps;
video_stat.avg_key_frame_size_bytes =
key_frame_size_bytes.GetMean().value_or(0);
video_stat.avg_delta_frame_size_bytes =
delta_frame_size_bytes.GetMean().value_or(0);
video_stat.avg_qp = qp.GetMean().value_or(0);
video_stat.avg_psnr_y = psnr_y.GetMean().value_or(0);
video_stat.avg_psnr_u = psnr_u.GetMean().value_or(0);
video_stat.avg_psnr_v = psnr_v.GetMean().value_or(0);
video_stat.avg_psnr = psnr.GetMean().value_or(0);
video_stat.min_psnr =
psnr.GetMin().value_or(std::numeric_limits<float>::max());
video_stat.avg_ssim = ssim.GetMean().value_or(0);
video_stat.min_ssim =
ssim.GetMin().value_or(std::numeric_limits<float>::max());
return video_stat;
}
void VideoCodecTestStatsImpl::GetNumberOfEncodedLayers(
size_t first_frame_num,
size_t last_frame_num,
size_t* num_encoded_spatial_layers,
size_t* num_encoded_temporal_layers) {
*num_encoded_spatial_layers = 0;
*num_encoded_temporal_layers = 0;
const size_t num_spatial_layers = layer_stats_.size();
for (size_t frame_num = first_frame_num; frame_num <= last_frame_num;
++frame_num) {
for (size_t spatial_idx = 0; spatial_idx < num_spatial_layers;
++spatial_idx) {
FrameStatistics* frame_stat = GetFrame(frame_num, spatial_idx);
if (frame_stat->encoding_successful) {
*num_encoded_spatial_layers =
std::max(*num_encoded_spatial_layers, frame_stat->spatial_idx + 1);
*num_encoded_temporal_layers = std::max(*num_encoded_temporal_layers,
frame_stat->temporal_idx + 1);
}
}
}
}
} // namespace test
} // namespace webrtc

View file

@ -0,0 +1,95 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_VIDEO_CODING_CODECS_TEST_VIDEOCODEC_TEST_STATS_IMPL_H_
#define MODULES_VIDEO_CODING_CODECS_TEST_VIDEOCODEC_TEST_STATS_IMPL_H_
#include <stddef.h>
#include <map>
#include <string>
#include <vector>
#include "api/test/videocodec_test_stats.h" // NOLINT(build/include)
namespace webrtc {
namespace test {
// Statistics for a sequence of processed frames. This class is not thread safe.
class VideoCodecTestStatsImpl : public VideoCodecTestStats {
public:
VideoCodecTestStatsImpl();
~VideoCodecTestStatsImpl() override;
// Creates a FrameStatistics for the next frame to be processed.
void AddFrame(const FrameStatistics& frame_stat);
// Returns the FrameStatistics corresponding to `frame_number` or `timestamp`.
FrameStatistics* GetFrame(size_t frame_number, size_t spatial_idx);
FrameStatistics* GetFrameWithTimestamp(size_t timestamp, size_t spatial_idx);
// Creates FrameStatisticts if it doesn't exists and/or returns
// created/existing FrameStatisticts.
FrameStatistics* GetOrAddFrame(size_t timestamp_rtp, size_t spatial_idx);
// Implements VideoCodecTestStats.
std::vector<FrameStatistics> GetFrameStatistics() const override;
std::vector<VideoStatistics> SliceAndCalcLayerVideoStatistic(
size_t first_frame_num,
size_t last_frame_num) override;
VideoStatistics SliceAndCalcAggregatedVideoStatistic(size_t first_frame_num,
size_t last_frame_num);
VideoStatistics CalcVideoStatistic(size_t first_frame,
size_t last_frame,
DataRate target_bitrate,
Frequency target_framerate) override;
size_t Size(size_t spatial_idx);
void Clear();
private:
VideoCodecTestStats::FrameStatistics AggregateFrameStatistic(
size_t frame_num,
size_t spatial_idx,
bool aggregate_independent_layers);
size_t CalcLayerTargetBitrateKbps(size_t first_frame_num,
size_t last_frame_num,
size_t spatial_idx,
size_t temporal_idx,
bool aggregate_independent_layers);
VideoCodecTestStats::VideoStatistics SliceAndCalcVideoStatistic(
size_t first_frame_num,
size_t last_frame_num,
size_t spatial_idx,
size_t temporal_idx,
bool aggregate_independent_layers,
absl::optional<DataRate> target_bitrate,
absl::optional<Frequency> target_framerate);
void GetNumberOfEncodedLayers(size_t first_frame_num,
size_t last_frame_num,
size_t* num_encoded_spatial_layers,
size_t* num_encoded_temporal_layers);
// layer_idx -> stats.
std::map<size_t, std::vector<FrameStatistics>> layer_stats_;
// layer_idx -> rtp_timestamp -> frame_num.
std::map<size_t, std::map<size_t, size_t>> rtp_timestamp_to_frame_num_;
};
} // namespace test
} // namespace webrtc
#endif // MODULES_VIDEO_CODING_CODECS_TEST_VIDEOCODEC_TEST_STATS_IMPL_H_

View file

@ -0,0 +1,105 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/video_coding/codecs/test/videocodec_test_stats_impl.h"
#include <vector>
#include "test/gmock.h"
#include "test/gtest.h"
namespace webrtc {
namespace test {
using FrameStatistics = VideoCodecTestStatsImpl::FrameStatistics;
namespace {
const size_t kTimestamp = 12345;
using ::testing::AllOf;
using ::testing::Contains;
using ::testing::Field;
} // namespace
TEST(StatsTest, AddAndGetFrame) {
VideoCodecTestStatsImpl stats;
stats.AddFrame(FrameStatistics(0, kTimestamp, 0));
FrameStatistics* frame_stat = stats.GetFrame(0u, 0);
EXPECT_EQ(0u, frame_stat->frame_number);
EXPECT_EQ(kTimestamp, frame_stat->rtp_timestamp);
}
TEST(StatsTest, GetOrAddFrame_noFrame_createsNewFrameStat) {
VideoCodecTestStatsImpl stats;
stats.GetOrAddFrame(kTimestamp, 0);
FrameStatistics* frame_stat = stats.GetFrameWithTimestamp(kTimestamp, 0);
EXPECT_EQ(kTimestamp, frame_stat->rtp_timestamp);
}
TEST(StatsTest, GetOrAddFrame_frameExists_returnsExistingFrameStat) {
VideoCodecTestStatsImpl stats;
stats.AddFrame(FrameStatistics(0, kTimestamp, 0));
FrameStatistics* frame_stat1 = stats.GetFrameWithTimestamp(kTimestamp, 0);
FrameStatistics* frame_stat2 = stats.GetOrAddFrame(kTimestamp, 0);
EXPECT_EQ(frame_stat1, frame_stat2);
}
TEST(StatsTest, AddAndGetFrames) {
VideoCodecTestStatsImpl stats;
const size_t kNumFrames = 1000;
for (size_t i = 0; i < kNumFrames; ++i) {
stats.AddFrame(FrameStatistics(i, kTimestamp + i, 0));
FrameStatistics* frame_stat = stats.GetFrame(i, 0);
EXPECT_EQ(i, frame_stat->frame_number);
EXPECT_EQ(kTimestamp + i, frame_stat->rtp_timestamp);
}
EXPECT_EQ(kNumFrames, stats.Size(0));
// Get frame.
size_t i = 22;
FrameStatistics* frame_stat = stats.GetFrameWithTimestamp(kTimestamp + i, 0);
EXPECT_EQ(i, frame_stat->frame_number);
EXPECT_EQ(kTimestamp + i, frame_stat->rtp_timestamp);
}
TEST(StatsTest, AddFrameLayering) {
VideoCodecTestStatsImpl stats;
for (size_t i = 0; i < 3; ++i) {
stats.AddFrame(FrameStatistics(0, kTimestamp + i, i));
FrameStatistics* frame_stat = stats.GetFrame(0u, i);
EXPECT_EQ(0u, frame_stat->frame_number);
EXPECT_EQ(kTimestamp, frame_stat->rtp_timestamp - i);
EXPECT_EQ(1u, stats.Size(i));
}
}
TEST(StatsTest, GetFrameStatistics) {
VideoCodecTestStatsImpl stats;
stats.AddFrame(FrameStatistics(0, kTimestamp, 0));
stats.AddFrame(FrameStatistics(0, kTimestamp, 1));
stats.AddFrame(FrameStatistics(1, kTimestamp + 3000, 0));
stats.AddFrame(FrameStatistics(1, kTimestamp + 3000, 1));
const std::vector<FrameStatistics> frame_stats = stats.GetFrameStatistics();
auto field_matcher = [](size_t frame_number, size_t spatial_idx) {
return AllOf(Field(&FrameStatistics::frame_number, frame_number),
Field(&FrameStatistics::spatial_idx, spatial_idx));
};
EXPECT_THAT(frame_stats, Contains(field_matcher(0, 0)));
EXPECT_THAT(frame_stats, Contains(field_matcher(0, 1)));
EXPECT_THAT(frame_stats, Contains(field_matcher(1, 0)));
EXPECT_THAT(frame_stats, Contains(field_matcher(1, 1)));
}
} // namespace test
} // namespace webrtc

View file

@ -0,0 +1,88 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <memory>
#include <vector>
#include "api/test/create_videocodec_test_fixture.h"
#include "media/base/media_constants.h"
#include "modules/video_coding/codecs/test/objc_codec_factory_helper.h"
#include "modules/video_coding/codecs/test/videocodec_test_fixture_impl.h"
#include "test/gtest.h"
#include "test/testsupport/file_utils.h"
namespace webrtc {
namespace test {
namespace {
const int kForemanNumFrames = 300;
VideoCodecTestFixture::Config CreateConfig() {
VideoCodecTestFixture::Config config;
config.filename = "foreman_cif";
config.filepath = ResourcePath(config.filename, "yuv");
config.num_frames = kForemanNumFrames;
return config;
}
std::unique_ptr<VideoCodecTestFixture> CreateTestFixtureWithConfig(
VideoCodecTestFixture::Config config) {
auto decoder_factory = CreateObjCDecoderFactory();
auto encoder_factory = CreateObjCEncoderFactory();
return CreateVideoCodecTestFixture(config, std::move(decoder_factory),
std::move(encoder_factory));
}
} // namespace
// TODO(webrtc:9099): Disabled until the issue is fixed.
// HW codecs don't work on simulators. Only run these tests on device.
// #if TARGET_OS_IPHONE && !TARGET_IPHONE_SIMULATOR
// #define MAYBE_TEST TEST
// #else
#define MAYBE_TEST(s, name) TEST(s, DISABLED_##name)
// #endif
// TODO(kthelgason): Use RC Thresholds when the internal bitrateAdjuster is no
// longer in use.
MAYBE_TEST(VideoCodecTestVideoToolbox, ForemanCif500kbpsH264CBP) {
const auto frame_checker =
std::make_unique<VideoCodecTestFixtureImpl::H264KeyframeChecker>();
auto config = CreateConfig();
config.SetCodecSettings(cricket::kH264CodecName, 1, 1, 1, false, false, false,
352, 288);
config.encoded_frame_checker = frame_checker.get();
auto fixture = CreateTestFixtureWithConfig(config);
std::vector<RateProfile> rate_profiles = {{500, 30, 0}};
std::vector<QualityThresholds> quality_thresholds = {{33, 29, 0.9, 0.82}};
fixture->RunTest(rate_profiles, nullptr, &quality_thresholds, nullptr);
}
MAYBE_TEST(VideoCodecTestVideoToolbox, ForemanCif500kbpsH264CHP) {
const auto frame_checker =
std::make_unique<VideoCodecTestFixtureImpl::H264KeyframeChecker>();
auto config = CreateConfig();
config.h264_codec_settings.profile = H264Profile::kProfileConstrainedHigh;
config.SetCodecSettings(cricket::kH264CodecName, 1, 1, 1, false, false, false,
352, 288);
config.encoded_frame_checker = frame_checker.get();
auto fixture = CreateTestFixtureWithConfig(config);
std::vector<RateProfile> rate_profiles = {{500, 30, 0}};
std::vector<QualityThresholds> quality_thresholds = {{33, 30, 0.91, 0.83}};
fixture->RunTest(rate_profiles, nullptr, &quality_thresholds, nullptr);
}
} // namespace test
} // namespace webrtc

View file

@ -0,0 +1,726 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/video_coding/codecs/test/videoprocessor.h"
#include <string.h>
#include <algorithm>
#include <cstddef>
#include <limits>
#include <memory>
#include <utility>
#include "api/scoped_refptr.h"
#include "api/video/builtin_video_bitrate_allocator_factory.h"
#include "api/video/i420_buffer.h"
#include "api/video/video_bitrate_allocator_factory.h"
#include "api/video/video_frame_buffer.h"
#include "api/video/video_rotation.h"
#include "api/video_codecs/video_codec.h"
#include "api/video_codecs/video_encoder.h"
#include "common_video/h264/h264_common.h"
#include "common_video/libyuv/include/webrtc_libyuv.h"
#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "modules/video_coding/codecs/interface/common_constants.h"
#include "modules/video_coding/include/video_error_codes.h"
#include "rtc_base/checks.h"
#include "rtc_base/time_utils.h"
#include "test/gtest.h"
#include "third_party/libyuv/include/libyuv/compare.h"
#include "third_party/libyuv/include/libyuv/scale.h"
namespace webrtc {
namespace test {
namespace {
const int kMsToRtpTimestamp = kVideoPayloadTypeFrequency / 1000;
const int kMaxBufferedInputFrames = 20;
const VideoEncoder::Capabilities kCapabilities(false);
size_t GetMaxNaluSizeBytes(const EncodedImage& encoded_frame,
const VideoCodecTestFixture::Config& config) {
if (config.codec_settings.codecType != kVideoCodecH264)
return 0;
std::vector<webrtc::H264::NaluIndex> nalu_indices =
webrtc::H264::FindNaluIndices(encoded_frame.data(), encoded_frame.size());
RTC_CHECK(!nalu_indices.empty());
size_t max_size = 0;
for (const webrtc::H264::NaluIndex& index : nalu_indices)
max_size = std::max(max_size, index.payload_size);
return max_size;
}
size_t GetTemporalLayerIndex(const CodecSpecificInfo& codec_specific) {
size_t temporal_idx = 0;
if (codec_specific.codecType == kVideoCodecVP8) {
temporal_idx = codec_specific.codecSpecific.VP8.temporalIdx;
} else if (codec_specific.codecType == kVideoCodecVP9) {
temporal_idx = codec_specific.codecSpecific.VP9.temporal_idx;
}
if (temporal_idx == kNoTemporalIdx) {
temporal_idx = 0;
}
return temporal_idx;
}
int GetElapsedTimeMicroseconds(int64_t start_ns, int64_t stop_ns) {
int64_t diff_us = (stop_ns - start_ns) / rtc::kNumNanosecsPerMicrosec;
RTC_DCHECK_GE(diff_us, std::numeric_limits<int>::min());
RTC_DCHECK_LE(diff_us, std::numeric_limits<int>::max());
return static_cast<int>(diff_us);
}
void CalculateFrameQuality(const I420BufferInterface& ref_buffer,
const I420BufferInterface& dec_buffer,
VideoCodecTestStats::FrameStatistics* frame_stat,
bool calc_ssim) {
if (ref_buffer.width() != dec_buffer.width() ||
ref_buffer.height() != dec_buffer.height()) {
RTC_CHECK_GE(ref_buffer.width(), dec_buffer.width());
RTC_CHECK_GE(ref_buffer.height(), dec_buffer.height());
// Downscale reference frame.
rtc::scoped_refptr<I420Buffer> scaled_buffer =
I420Buffer::Create(dec_buffer.width(), dec_buffer.height());
I420Scale(ref_buffer.DataY(), ref_buffer.StrideY(), ref_buffer.DataU(),
ref_buffer.StrideU(), ref_buffer.DataV(), ref_buffer.StrideV(),
ref_buffer.width(), ref_buffer.height(),
scaled_buffer->MutableDataY(), scaled_buffer->StrideY(),
scaled_buffer->MutableDataU(), scaled_buffer->StrideU(),
scaled_buffer->MutableDataV(), scaled_buffer->StrideV(),
scaled_buffer->width(), scaled_buffer->height(),
libyuv::kFilterBox);
CalculateFrameQuality(*scaled_buffer, dec_buffer, frame_stat, calc_ssim);
} else {
const uint64_t sse_y = libyuv::ComputeSumSquareErrorPlane(
dec_buffer.DataY(), dec_buffer.StrideY(), ref_buffer.DataY(),
ref_buffer.StrideY(), dec_buffer.width(), dec_buffer.height());
const uint64_t sse_u = libyuv::ComputeSumSquareErrorPlane(
dec_buffer.DataU(), dec_buffer.StrideU(), ref_buffer.DataU(),
ref_buffer.StrideU(), dec_buffer.width() / 2, dec_buffer.height() / 2);
const uint64_t sse_v = libyuv::ComputeSumSquareErrorPlane(
dec_buffer.DataV(), dec_buffer.StrideV(), ref_buffer.DataV(),
ref_buffer.StrideV(), dec_buffer.width() / 2, dec_buffer.height() / 2);
const size_t num_y_samples = dec_buffer.width() * dec_buffer.height();
const size_t num_u_samples =
dec_buffer.width() / 2 * dec_buffer.height() / 2;
frame_stat->psnr_y = libyuv::SumSquareErrorToPsnr(sse_y, num_y_samples);
frame_stat->psnr_u = libyuv::SumSquareErrorToPsnr(sse_u, num_u_samples);
frame_stat->psnr_v = libyuv::SumSquareErrorToPsnr(sse_v, num_u_samples);
frame_stat->psnr = libyuv::SumSquareErrorToPsnr(
sse_y + sse_u + sse_v, num_y_samples + 2 * num_u_samples);
if (calc_ssim) {
frame_stat->ssim = I420SSIM(ref_buffer, dec_buffer);
}
}
}
} // namespace
VideoProcessor::VideoProcessor(webrtc::VideoEncoder* encoder,
VideoDecoderList* decoders,
FrameReader* input_frame_reader,
const VideoCodecTestFixture::Config& config,
VideoCodecTestStatsImpl* stats,
IvfFileWriterMap* encoded_frame_writers,
FrameWriterList* decoded_frame_writers)
: config_(config),
num_simulcast_or_spatial_layers_(
std::max(config_.NumberOfSimulcastStreams(),
config_.NumberOfSpatialLayers())),
analyze_frame_quality_(!config_.measure_cpu),
stats_(stats),
encoder_(encoder),
decoders_(decoders),
bitrate_allocator_(
CreateBuiltinVideoBitrateAllocatorFactory()
->CreateVideoBitrateAllocator(config_.codec_settings)),
encode_callback_(this),
input_frame_reader_(input_frame_reader),
merged_encoded_frames_(num_simulcast_or_spatial_layers_),
encoded_frame_writers_(encoded_frame_writers),
decoded_frame_writers_(decoded_frame_writers),
last_inputed_frame_num_(0),
last_inputed_timestamp_(0),
first_encoded_frame_(num_simulcast_or_spatial_layers_, true),
last_encoded_frame_num_(num_simulcast_or_spatial_layers_),
first_decoded_frame_(num_simulcast_or_spatial_layers_, true),
last_decoded_frame_num_(num_simulcast_or_spatial_layers_),
last_decoded_frame_buffer_(num_simulcast_or_spatial_layers_),
post_encode_time_ns_(0),
is_finalized_(false) {
// Sanity checks.
RTC_CHECK(TaskQueueBase::Current())
<< "VideoProcessor must be run on a task queue.";
RTC_CHECK(stats_);
RTC_CHECK(encoder_);
RTC_CHECK(decoders_);
RTC_CHECK_EQ(decoders_->size(), num_simulcast_or_spatial_layers_);
RTC_CHECK(input_frame_reader_);
RTC_CHECK(encoded_frame_writers_);
RTC_CHECK(!decoded_frame_writers ||
decoded_frame_writers->size() == num_simulcast_or_spatial_layers_);
// Setup required callbacks for the encoder and decoder and initialize them.
RTC_CHECK_EQ(encoder_->RegisterEncodeCompleteCallback(&encode_callback_),
WEBRTC_VIDEO_CODEC_OK);
// Initialize codecs so that they are ready to receive frames.
RTC_CHECK_EQ(encoder_->InitEncode(
&config_.codec_settings,
VideoEncoder::Settings(
kCapabilities, static_cast<int>(config_.NumberOfCores()),
config_.max_payload_size_bytes)),
WEBRTC_VIDEO_CODEC_OK);
for (size_t i = 0; i < num_simulcast_or_spatial_layers_; ++i) {
decode_callback_.push_back(
std::make_unique<VideoProcessorDecodeCompleteCallback>(this, i));
VideoDecoder::Settings decoder_settings;
decoder_settings.set_max_render_resolution(
{config_.codec_settings.width, config_.codec_settings.height});
decoder_settings.set_codec_type(config_.codec_settings.codecType);
decoder_settings.set_number_of_cores(config_.NumberOfCores());
RTC_CHECK(decoders_->at(i)->Configure(decoder_settings));
RTC_CHECK_EQ(decoders_->at(i)->RegisterDecodeCompleteCallback(
decode_callback_.at(i).get()),
WEBRTC_VIDEO_CODEC_OK);
}
}
VideoProcessor::~VideoProcessor() {
RTC_DCHECK_RUN_ON(&sequence_checker_);
if (!is_finalized_) {
Finalize();
}
// Explicitly reset codecs, in case they don't do that themselves when they
// go out of scope.
RTC_CHECK_EQ(encoder_->Release(), WEBRTC_VIDEO_CODEC_OK);
encoder_->RegisterEncodeCompleteCallback(nullptr);
for (auto& decoder : *decoders_) {
RTC_CHECK_EQ(decoder->Release(), WEBRTC_VIDEO_CODEC_OK);
decoder->RegisterDecodeCompleteCallback(nullptr);
}
// Sanity check.
RTC_CHECK_LE(input_frames_.size(), kMaxBufferedInputFrames);
}
void VideoProcessor::ProcessFrame() {
RTC_DCHECK_RUN_ON(&sequence_checker_);
RTC_DCHECK(!is_finalized_);
RTC_DCHECK_GT(target_rates_.size(), 0u);
RTC_DCHECK_EQ(target_rates_.begin()->first, 0u);
RateProfile target_rate =
std::prev(target_rates_.upper_bound(last_inputed_frame_num_))->second;
const size_t frame_number = last_inputed_frame_num_++;
// Get input frame and store for future quality calculation.
Resolution resolution = Resolution({.width = config_.codec_settings.width,
.height = config_.codec_settings.height});
FrameReader::Ratio framerate_scale = FrameReader::Ratio(
{.num = config_.clip_fps.value_or(config_.codec_settings.maxFramerate),
.den = static_cast<int>(config_.codec_settings.maxFramerate)});
rtc::scoped_refptr<I420BufferInterface> buffer =
input_frame_reader_->PullFrame(
/*frame_num*/ nullptr, resolution, framerate_scale);
RTC_CHECK(buffer) << "Tried to read too many frames from the file.";
const size_t timestamp =
last_inputed_timestamp_ +
static_cast<size_t>(kVideoPayloadTypeFrequency / target_rate.input_fps);
VideoFrame input_frame =
VideoFrame::Builder()
.set_video_frame_buffer(buffer)
.set_timestamp_rtp(static_cast<uint32_t>(timestamp))
.set_timestamp_ms(static_cast<int64_t>(timestamp / kMsToRtpTimestamp))
.set_rotation(webrtc::kVideoRotation_0)
.build();
// Store input frame as a reference for quality calculations.
if (config_.decode && !config_.measure_cpu) {
if (input_frames_.size() == kMaxBufferedInputFrames) {
input_frames_.erase(input_frames_.begin());
}
if (config_.reference_width != -1 && config_.reference_height != -1 &&
(input_frame.width() != config_.reference_width ||
input_frame.height() != config_.reference_height)) {
rtc::scoped_refptr<I420Buffer> scaled_buffer = I420Buffer::Create(
config_.codec_settings.width, config_.codec_settings.height);
scaled_buffer->ScaleFrom(*input_frame.video_frame_buffer()->ToI420());
VideoFrame scaled_reference_frame = input_frame;
scaled_reference_frame.set_video_frame_buffer(scaled_buffer);
input_frames_.emplace(frame_number, scaled_reference_frame);
if (config_.reference_width == config_.codec_settings.width &&
config_.reference_height == config_.codec_settings.height) {
// Both encoding and comparison uses the same down-scale factor, reuse
// it for encoder below.
input_frame = scaled_reference_frame;
}
} else {
input_frames_.emplace(frame_number, input_frame);
}
}
last_inputed_timestamp_ = timestamp;
post_encode_time_ns_ = 0;
// Create frame statistics object for all simulcast/spatial layers.
for (size_t i = 0; i < num_simulcast_or_spatial_layers_; ++i) {
FrameStatistics frame_stat(frame_number, timestamp, i);
stats_->AddFrame(frame_stat);
}
// For the highest measurement accuracy of the encode time, the start/stop
// time recordings should wrap the Encode call as tightly as possible.
const int64_t encode_start_ns = rtc::TimeNanos();
for (size_t i = 0; i < num_simulcast_or_spatial_layers_; ++i) {
FrameStatistics* frame_stat = stats_->GetFrame(frame_number, i);
frame_stat->encode_start_ns = encode_start_ns;
}
if (input_frame.width() != config_.codec_settings.width ||
input_frame.height() != config_.codec_settings.height) {
rtc::scoped_refptr<I420Buffer> scaled_buffer = I420Buffer::Create(
config_.codec_settings.width, config_.codec_settings.height);
scaled_buffer->ScaleFrom(*input_frame.video_frame_buffer()->ToI420());
input_frame.set_video_frame_buffer(scaled_buffer);
}
// Encode.
const std::vector<VideoFrameType> frame_types =
(frame_number == 0)
? std::vector<VideoFrameType>(num_simulcast_or_spatial_layers_,
VideoFrameType::kVideoFrameKey)
: std::vector<VideoFrameType>(num_simulcast_or_spatial_layers_,
VideoFrameType::kVideoFrameDelta);
const int encode_return_code = encoder_->Encode(input_frame, &frame_types);
for (size_t i = 0; i < num_simulcast_or_spatial_layers_; ++i) {
FrameStatistics* frame_stat = stats_->GetFrame(frame_number, i);
frame_stat->encode_return_code = encode_return_code;
}
}
void VideoProcessor::SetRates(size_t bitrate_kbps, double framerate_fps) {
RTC_DCHECK_RUN_ON(&sequence_checker_);
RTC_DCHECK(!is_finalized_);
target_rates_[last_inputed_frame_num_] =
RateProfile({.target_kbps = bitrate_kbps, .input_fps = framerate_fps});
auto bitrate_allocation =
bitrate_allocator_->Allocate(VideoBitrateAllocationParameters(
static_cast<uint32_t>(bitrate_kbps * 1000), framerate_fps));
encoder_->SetRates(
VideoEncoder::RateControlParameters(bitrate_allocation, framerate_fps));
}
int32_t VideoProcessor::VideoProcessorDecodeCompleteCallback::Decoded(
VideoFrame& image) {
// Post the callback to the right task queue, if needed.
if (!task_queue_->IsCurrent()) {
// There might be a limited amount of output buffers, make a copy to make
// sure we don't block the decoder.
VideoFrame copy = VideoFrame::Builder()
.set_video_frame_buffer(I420Buffer::Copy(
*image.video_frame_buffer()->ToI420()))
.set_rotation(image.rotation())
.set_timestamp_us(image.timestamp_us())
.set_id(image.id())
.build();
copy.set_timestamp(image.timestamp());
task_queue_->PostTask([this, copy]() {
video_processor_->FrameDecoded(copy, simulcast_svc_idx_);
});
return 0;
}
video_processor_->FrameDecoded(image, simulcast_svc_idx_);
return 0;
}
void VideoProcessor::FrameEncoded(
const webrtc::EncodedImage& encoded_image,
const webrtc::CodecSpecificInfo& codec_specific) {
RTC_DCHECK_RUN_ON(&sequence_checker_);
// For the highest measurement accuracy of the encode time, the start/stop
// time recordings should wrap the Encode call as tightly as possible.
const int64_t encode_stop_ns = rtc::TimeNanos();
const VideoCodecType codec_type = codec_specific.codecType;
if (config_.encoded_frame_checker) {
config_.encoded_frame_checker->CheckEncodedFrame(codec_type, encoded_image);
}
// Layer metadata.
// We could either have simulcast layers or spatial layers.
// TODO(https://crbug.com/webrtc/14891): If we want to support a mix of
// simulcast and SVC we'll also need to consider the case where we have both
// simulcast and spatial indices.
size_t stream_idx = encoded_image.SpatialIndex().value_or(
encoded_image.SimulcastIndex().value_or(0));
size_t temporal_idx = GetTemporalLayerIndex(codec_specific);
FrameStatistics* frame_stat =
stats_->GetFrameWithTimestamp(encoded_image.RtpTimestamp(), stream_idx);
const size_t frame_number = frame_stat->frame_number;
// Ensure that the encode order is monotonically increasing, within this
// simulcast/spatial layer.
RTC_CHECK(first_encoded_frame_[stream_idx] ||
last_encoded_frame_num_[stream_idx] < frame_number);
// Ensure SVC spatial layers are delivered in ascending order.
const size_t num_spatial_layers = config_.NumberOfSpatialLayers();
if (!first_encoded_frame_[stream_idx] && num_spatial_layers > 1) {
for (size_t i = 0; i < stream_idx; ++i) {
RTC_CHECK_LE(last_encoded_frame_num_[i], frame_number);
}
for (size_t i = stream_idx + 1; i < num_simulcast_or_spatial_layers_; ++i) {
RTC_CHECK_GT(frame_number, last_encoded_frame_num_[i]);
}
}
first_encoded_frame_[stream_idx] = false;
last_encoded_frame_num_[stream_idx] = frame_number;
RateProfile target_rate =
std::prev(target_rates_.upper_bound(frame_number))->second;
auto bitrate_allocation =
bitrate_allocator_->Allocate(VideoBitrateAllocationParameters(
static_cast<uint32_t>(target_rate.target_kbps * 1000),
target_rate.input_fps));
// Update frame statistics.
frame_stat->encoding_successful = true;
frame_stat->encode_time_us = GetElapsedTimeMicroseconds(
frame_stat->encode_start_ns, encode_stop_ns - post_encode_time_ns_);
frame_stat->target_bitrate_kbps =
bitrate_allocation.GetTemporalLayerSum(stream_idx, temporal_idx) / 1000;
frame_stat->target_framerate_fps = target_rate.input_fps;
frame_stat->length_bytes = encoded_image.size();
frame_stat->frame_type = encoded_image._frameType;
frame_stat->temporal_idx = temporal_idx;
frame_stat->max_nalu_size_bytes = GetMaxNaluSizeBytes(encoded_image, config_);
frame_stat->qp = encoded_image.qp_;
if (codec_type == kVideoCodecVP9) {
const CodecSpecificInfoVP9& vp9_info = codec_specific.codecSpecific.VP9;
frame_stat->inter_layer_predicted = vp9_info.inter_layer_predicted;
frame_stat->non_ref_for_inter_layer_pred =
vp9_info.non_ref_for_inter_layer_pred;
} else {
frame_stat->inter_layer_predicted = false;
frame_stat->non_ref_for_inter_layer_pred = true;
}
const webrtc::EncodedImage* encoded_image_for_decode = &encoded_image;
if (config_.decode || !encoded_frame_writers_->empty()) {
if (num_spatial_layers > 1) {
encoded_image_for_decode = BuildAndStoreSuperframe(
encoded_image, codec_type, frame_number, stream_idx,
frame_stat->inter_layer_predicted);
}
}
if (config_.decode) {
DecodeFrame(*encoded_image_for_decode, stream_idx);
if (codec_specific.end_of_picture && num_spatial_layers > 1) {
// If inter-layer prediction is enabled and upper layer was dropped then
// base layer should be passed to upper layer decoder. Otherwise decoder
// won't be able to decode next superframe.
const EncodedImage* base_image = nullptr;
const FrameStatistics* base_stat = nullptr;
for (size_t i = 0; i < num_spatial_layers; ++i) {
const bool layer_dropped = (first_decoded_frame_[i] ||
last_decoded_frame_num_[i] < frame_number);
// Ensure current layer was decoded.
RTC_CHECK(layer_dropped == false || i != stream_idx);
if (!layer_dropped) {
base_image = &merged_encoded_frames_[i];
base_stat =
stats_->GetFrameWithTimestamp(encoded_image.RtpTimestamp(), i);
} else if (base_image && !base_stat->non_ref_for_inter_layer_pred) {
DecodeFrame(*base_image, i);
}
}
}
} else {
frame_stat->decode_return_code = WEBRTC_VIDEO_CODEC_NO_OUTPUT;
}
// Since frames in higher TLs typically depend on frames in lower TLs,
// write out frames in lower TLs to bitstream dumps of higher TLs.
for (size_t write_temporal_idx = temporal_idx;
write_temporal_idx < config_.NumberOfTemporalLayers();
++write_temporal_idx) {
const VideoProcessor::LayerKey layer_key(stream_idx, write_temporal_idx);
auto it = encoded_frame_writers_->find(layer_key);
if (it != encoded_frame_writers_->cend()) {
RTC_CHECK(it->second->WriteFrame(*encoded_image_for_decode,
config_.codec_settings.codecType));
}
}
if (!config_.encode_in_real_time) {
// To get pure encode time for next layers, measure time spent in encode
// callback and subtract it from encode time of next layers.
post_encode_time_ns_ += rtc::TimeNanos() - encode_stop_ns;
}
}
void VideoProcessor::CalcFrameQuality(const I420BufferInterface& decoded_frame,
FrameStatistics* frame_stat) {
RTC_DCHECK_RUN_ON(&sequence_checker_);
const auto reference_frame = input_frames_.find(frame_stat->frame_number);
RTC_CHECK(reference_frame != input_frames_.cend())
<< "The codecs are either buffering too much, dropping too much, or "
"being too slow relative to the input frame rate.";
// SSIM calculation is not optimized. Skip it in real-time mode.
const bool calc_ssim = !config_.encode_in_real_time;
CalculateFrameQuality(*reference_frame->second.video_frame_buffer()->ToI420(),
decoded_frame, frame_stat, calc_ssim);
frame_stat->quality_analysis_successful = true;
}
void VideoProcessor::WriteDecodedFrame(const I420BufferInterface& decoded_frame,
FrameWriter& frame_writer) {
int input_video_width = config_.codec_settings.width;
int input_video_height = config_.codec_settings.height;
rtc::scoped_refptr<I420Buffer> scaled_buffer;
const I420BufferInterface* scaled_frame;
if (decoded_frame.width() == input_video_width &&
decoded_frame.height() == input_video_height) {
scaled_frame = &decoded_frame;
} else {
EXPECT_DOUBLE_EQ(
static_cast<double>(input_video_width) / input_video_height,
static_cast<double>(decoded_frame.width()) / decoded_frame.height());
scaled_buffer = I420Buffer::Create(input_video_width, input_video_height);
scaled_buffer->ScaleFrom(decoded_frame);
scaled_frame = scaled_buffer.get();
}
// Ensure there is no padding.
RTC_CHECK_EQ(scaled_frame->StrideY(), input_video_width);
RTC_CHECK_EQ(scaled_frame->StrideU(), input_video_width / 2);
RTC_CHECK_EQ(scaled_frame->StrideV(), input_video_width / 2);
RTC_CHECK_EQ(3 * input_video_width * input_video_height / 2,
frame_writer.FrameLength());
RTC_CHECK(frame_writer.WriteFrame(scaled_frame->DataY()));
}
void VideoProcessor::FrameDecoded(const VideoFrame& decoded_frame,
size_t spatial_idx) {
RTC_DCHECK_RUN_ON(&sequence_checker_);
// For the highest measurement accuracy of the decode time, the start/stop
// time recordings should wrap the Decode call as tightly as possible.
const int64_t decode_stop_ns = rtc::TimeNanos();
FrameStatistics* frame_stat =
stats_->GetFrameWithTimestamp(decoded_frame.timestamp(), spatial_idx);
const size_t frame_number = frame_stat->frame_number;
if (!first_decoded_frame_[spatial_idx]) {
for (size_t dropped_frame_number = last_decoded_frame_num_[spatial_idx] + 1;
dropped_frame_number < frame_number; ++dropped_frame_number) {
FrameStatistics* dropped_frame_stat =
stats_->GetFrame(dropped_frame_number, spatial_idx);
if (analyze_frame_quality_ && config_.analyze_quality_of_dropped_frames) {
// Calculate frame quality comparing input frame with last decoded one.
CalcFrameQuality(*last_decoded_frame_buffer_[spatial_idx],
dropped_frame_stat);
}
if (decoded_frame_writers_ != nullptr) {
// Fill drops with last decoded frame to make them look like freeze at
// playback and to keep decoded layers in sync.
WriteDecodedFrame(*last_decoded_frame_buffer_[spatial_idx],
*decoded_frame_writers_->at(spatial_idx));
}
}
}
// Ensure that the decode order is monotonically increasing, within this
// simulcast/spatial layer.
RTC_CHECK(first_decoded_frame_[spatial_idx] ||
last_decoded_frame_num_[spatial_idx] < frame_number);
first_decoded_frame_[spatial_idx] = false;
last_decoded_frame_num_[spatial_idx] = frame_number;
// Update frame statistics.
frame_stat->decoding_successful = true;
frame_stat->decode_time_us =
GetElapsedTimeMicroseconds(frame_stat->decode_start_ns, decode_stop_ns);
frame_stat->decoded_width = decoded_frame.width();
frame_stat->decoded_height = decoded_frame.height();
// Skip quality metrics calculation to not affect CPU usage.
if (analyze_frame_quality_ || decoded_frame_writers_) {
// Save last decoded frame to handle possible future drops.
rtc::scoped_refptr<I420BufferInterface> i420buffer =
decoded_frame.video_frame_buffer()->ToI420();
// Copy decoded frame to a buffer without padding/stride such that we can
// dump Y, U and V planes into a file in one shot.
last_decoded_frame_buffer_[spatial_idx] = I420Buffer::Copy(
i420buffer->width(), i420buffer->height(), i420buffer->DataY(),
i420buffer->StrideY(), i420buffer->DataU(), i420buffer->StrideU(),
i420buffer->DataV(), i420buffer->StrideV());
}
if (analyze_frame_quality_) {
CalcFrameQuality(*decoded_frame.video_frame_buffer()->ToI420(), frame_stat);
}
if (decoded_frame_writers_ != nullptr) {
WriteDecodedFrame(*last_decoded_frame_buffer_[spatial_idx],
*decoded_frame_writers_->at(spatial_idx));
}
// Erase all buffered input frames that we have moved past for all
// simulcast/spatial layers. Never buffer more than
// `kMaxBufferedInputFrames` frames, to protect against long runs of
// consecutive frame drops for a particular layer.
const auto min_last_decoded_frame_num = std::min_element(
last_decoded_frame_num_.cbegin(), last_decoded_frame_num_.cend());
const size_t min_buffered_frame_num =
std::max(0, static_cast<int>(frame_number) - kMaxBufferedInputFrames + 1);
RTC_CHECK(min_last_decoded_frame_num != last_decoded_frame_num_.cend());
const auto input_frames_erase_before = input_frames_.lower_bound(
std::max(*min_last_decoded_frame_num, min_buffered_frame_num));
input_frames_.erase(input_frames_.cbegin(), input_frames_erase_before);
}
void VideoProcessor::DecodeFrame(const EncodedImage& encoded_image,
size_t spatial_idx) {
RTC_DCHECK_RUN_ON(&sequence_checker_);
FrameStatistics* frame_stat =
stats_->GetFrameWithTimestamp(encoded_image.RtpTimestamp(), spatial_idx);
frame_stat->decode_start_ns = rtc::TimeNanos();
frame_stat->decode_return_code =
decoders_->at(spatial_idx)->Decode(encoded_image, 0);
}
const webrtc::EncodedImage* VideoProcessor::BuildAndStoreSuperframe(
const EncodedImage& encoded_image,
const VideoCodecType codec,
size_t frame_number,
size_t spatial_idx,
bool inter_layer_predicted) {
// Should only be called for SVC.
RTC_CHECK_GT(config_.NumberOfSpatialLayers(), 1);
EncodedImage base_image;
RTC_CHECK_EQ(base_image.size(), 0);
// Each SVC layer is decoded with dedicated decoder. Find the nearest
// non-dropped base frame and merge it and current frame into superframe.
if (inter_layer_predicted) {
for (int base_idx = static_cast<int>(spatial_idx) - 1; base_idx >= 0;
--base_idx) {
EncodedImage lower_layer = merged_encoded_frames_.at(base_idx);
if (lower_layer.RtpTimestamp() == encoded_image.RtpTimestamp()) {
base_image = lower_layer;
break;
}
}
}
const size_t payload_size_bytes = base_image.size() + encoded_image.size();
auto buffer = EncodedImageBuffer::Create(payload_size_bytes);
if (base_image.size()) {
RTC_CHECK(base_image.data());
memcpy(buffer->data(), base_image.data(), base_image.size());
}
memcpy(buffer->data() + base_image.size(), encoded_image.data(),
encoded_image.size());
EncodedImage copied_image = encoded_image;
copied_image.SetEncodedData(buffer);
if (base_image.size())
copied_image._frameType = base_image._frameType;
// Replace previous EncodedImage for this spatial layer.
merged_encoded_frames_.at(spatial_idx) = std::move(copied_image);
return &merged_encoded_frames_.at(spatial_idx);
}
void VideoProcessor::Finalize() {
RTC_DCHECK_RUN_ON(&sequence_checker_);
RTC_DCHECK(!is_finalized_);
is_finalized_ = true;
if (!(analyze_frame_quality_ && config_.analyze_quality_of_dropped_frames) &&
decoded_frame_writers_ == nullptr) {
return;
}
for (size_t spatial_idx = 0; spatial_idx < num_simulcast_or_spatial_layers_;
++spatial_idx) {
if (first_decoded_frame_[spatial_idx]) {
continue; // No decoded frames on this spatial layer.
}
for (size_t dropped_frame_number = last_decoded_frame_num_[spatial_idx] + 1;
dropped_frame_number < last_inputed_frame_num_;
++dropped_frame_number) {
FrameStatistics* frame_stat =
stats_->GetFrame(dropped_frame_number, spatial_idx);
RTC_DCHECK(!frame_stat->decoding_successful);
if (analyze_frame_quality_ && config_.analyze_quality_of_dropped_frames) {
CalcFrameQuality(*last_decoded_frame_buffer_[spatial_idx], frame_stat);
}
if (decoded_frame_writers_ != nullptr) {
WriteDecodedFrame(*last_decoded_frame_buffer_[spatial_idx],
*decoded_frame_writers_->at(spatial_idx));
}
}
}
}
} // namespace test
} // namespace webrtc

View file

@ -0,0 +1,264 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_VIDEO_CODING_CODECS_TEST_VIDEOPROCESSOR_H_
#define MODULES_VIDEO_CODING_CODECS_TEST_VIDEOPROCESSOR_H_
#include <stddef.h>
#include <stdint.h>
#include <map>
#include <memory>
#include <utility>
#include <vector>
#include "absl/types/optional.h"
#include "api/sequence_checker.h"
#include "api/task_queue/task_queue_base.h"
#include "api/test/videocodec_test_fixture.h"
#include "api/video/encoded_image.h"
#include "api/video/i420_buffer.h"
#include "api/video/resolution.h"
#include "api/video/video_bitrate_allocation.h"
#include "api/video/video_bitrate_allocator.h"
#include "api/video/video_frame.h"
#include "api/video_codecs/video_decoder.h"
#include "api/video_codecs/video_encoder.h"
#include "modules/include/module_common_types.h"
#include "modules/video_coding/codecs/test/videocodec_test_stats_impl.h"
#include "modules/video_coding/include/video_codec_interface.h"
#include "modules/video_coding/utility/ivf_file_writer.h"
#include "rtc_base/buffer.h"
#include "rtc_base/checks.h"
#include "rtc_base/system/no_unique_address.h"
#include "rtc_base/thread_annotations.h"
#include "test/testsupport/frame_reader.h"
#include "test/testsupport/frame_writer.h"
namespace webrtc {
namespace test {
// Handles encoding/decoding of video using the VideoEncoder/VideoDecoder
// interfaces. This is done in a sequential manner in order to be able to
// measure times properly.
// The class processes a frame at the time for the configured input file.
// It maintains state of where in the source input file the processing is at.
// TODO(webrtc:14852): Deprecated in favor VideoCodecTester.
class VideoProcessor {
public:
using VideoDecoderList = std::vector<std::unique_ptr<VideoDecoder>>;
using LayerKey = std::pair<int /* spatial_idx */, int /* temporal_idx */>;
using IvfFileWriterMap = std::map<LayerKey, std::unique_ptr<IvfFileWriter>>;
// TODO(brandtr): Consider changing FrameWriterList to be a FrameWriterMap,
// to be able to save different TLs separately.
using FrameWriterList = std::vector<std::unique_ptr<FrameWriter>>;
using FrameStatistics = VideoCodecTestStats::FrameStatistics;
VideoProcessor(webrtc::VideoEncoder* encoder,
VideoDecoderList* decoders,
FrameReader* input_frame_reader,
const VideoCodecTestFixture::Config& config,
VideoCodecTestStatsImpl* stats,
IvfFileWriterMap* encoded_frame_writers,
FrameWriterList* decoded_frame_writers);
~VideoProcessor();
VideoProcessor(const VideoProcessor&) = delete;
VideoProcessor& operator=(const VideoProcessor&) = delete;
// Reads a frame and sends it to the encoder. When the encode callback
// is received, the encoded frame is buffered. After encoding is finished
// buffered frame is sent to decoder. Quality evaluation is done in
// the decode callback.
void ProcessFrame();
// Updates the encoder with target rates. Must be called at least once.
void SetRates(size_t bitrate_kbps, double framerate_fps);
// Signals processor to finalize frame processing and handle possible tail
// drops. If not called expelicitly, this will be called in dtor. It is
// unexpected to get ProcessFrame() or SetRates() calls after Finalize().
void Finalize();
private:
class VideoProcessorEncodeCompleteCallback
: public webrtc::EncodedImageCallback {
public:
explicit VideoProcessorEncodeCompleteCallback(
VideoProcessor* video_processor)
: video_processor_(video_processor),
task_queue_(TaskQueueBase::Current()) {
RTC_DCHECK(video_processor_);
RTC_DCHECK(task_queue_);
}
Result OnEncodedImage(
const webrtc::EncodedImage& encoded_image,
const webrtc::CodecSpecificInfo* codec_specific_info) override {
RTC_CHECK(codec_specific_info);
// Post the callback to the right task queue, if needed.
if (!task_queue_->IsCurrent()) {
VideoProcessor* video_processor = video_processor_;
task_queue_->PostTask([video_processor, encoded_image,
codec_specific_info = *codec_specific_info] {
video_processor->FrameEncoded(encoded_image, codec_specific_info);
});
return Result(Result::OK, 0);
}
video_processor_->FrameEncoded(encoded_image, *codec_specific_info);
return Result(Result::OK, 0);
}
private:
VideoProcessor* const video_processor_;
TaskQueueBase* const task_queue_;
};
class VideoProcessorDecodeCompleteCallback
: public webrtc::DecodedImageCallback {
public:
explicit VideoProcessorDecodeCompleteCallback(
VideoProcessor* video_processor,
size_t simulcast_svc_idx)
: video_processor_(video_processor),
simulcast_svc_idx_(simulcast_svc_idx),
task_queue_(TaskQueueBase::Current()) {
RTC_DCHECK(video_processor_);
RTC_DCHECK(task_queue_);
}
int32_t Decoded(webrtc::VideoFrame& image) override;
int32_t Decoded(webrtc::VideoFrame& image,
int64_t decode_time_ms) override {
return Decoded(image);
}
void Decoded(webrtc::VideoFrame& image,
absl::optional<int32_t> decode_time_ms,
absl::optional<uint8_t> qp) override {
Decoded(image);
}
private:
VideoProcessor* const video_processor_;
const size_t simulcast_svc_idx_;
TaskQueueBase* const task_queue_;
};
// Invoked by the callback adapter when a frame has completed encoding.
void FrameEncoded(const webrtc::EncodedImage& encoded_image,
const webrtc::CodecSpecificInfo& codec_specific);
// Invoked by the callback adapter when a frame has completed decoding.
void FrameDecoded(const webrtc::VideoFrame& image, size_t simulcast_svc_idx);
void DecodeFrame(const EncodedImage& encoded_image, size_t simulcast_svc_idx);
// In order to supply the SVC decoders with super frames containing all
// lower layer frames, we merge and store the layer frames in this method.
const webrtc::EncodedImage* BuildAndStoreSuperframe(
const EncodedImage& encoded_image,
VideoCodecType codec,
size_t frame_number,
size_t simulcast_svc_idx,
bool inter_layer_predicted) RTC_RUN_ON(sequence_checker_);
void CalcFrameQuality(const I420BufferInterface& decoded_frame,
FrameStatistics* frame_stat);
void WriteDecodedFrame(const I420BufferInterface& decoded_frame,
FrameWriter& frame_writer);
void HandleTailDrops();
// Test config.
const VideoCodecTestFixture::Config config_;
const size_t num_simulcast_or_spatial_layers_;
const bool analyze_frame_quality_;
// Frame statistics.
VideoCodecTestStatsImpl* const stats_;
// Codecs.
webrtc::VideoEncoder* const encoder_;
VideoDecoderList* const decoders_;
const std::unique_ptr<VideoBitrateAllocator> bitrate_allocator_;
// Target bitrate and framerate per frame.
std::map<size_t, RateProfile> target_rates_ RTC_GUARDED_BY(sequence_checker_);
// Adapters for the codec callbacks.
VideoProcessorEncodeCompleteCallback encode_callback_;
// Assign separate callback object to each decoder. This allows us to identify
// decoded layer in frame decode callback.
// simulcast_svc_idx -> decode callback.
std::vector<std::unique_ptr<VideoProcessorDecodeCompleteCallback>>
decode_callback_;
// Each call to ProcessFrame() will read one frame from `input_frame_reader_`.
FrameReader* const input_frame_reader_;
// Input frames are used as reference for frame quality evaluations.
// Async codecs might queue frames. To handle that we keep input frame
// and release it after corresponding coded frame is decoded and quality
// measurement is done.
// frame_number -> frame.
std::map<size_t, VideoFrame> input_frames_ RTC_GUARDED_BY(sequence_checker_);
// Encoder delivers coded frame layer-by-layer. We store coded frames and
// then, after all layers are encoded, decode them. Such separation of
// frame processing on superframe level simplifies encoding/decoding time
// measurement.
// simulcast_svc_idx -> merged SVC encoded frame.
std::vector<EncodedImage> merged_encoded_frames_
RTC_GUARDED_BY(sequence_checker_);
// These (optional) file writers are used to persistently store the encoded
// and decoded bitstreams. Each frame writer is enabled by being non-null.
IvfFileWriterMap* const encoded_frame_writers_;
FrameWriterList* const decoded_frame_writers_;
// Metadata for inputed/encoded/decoded frames. Used for frame identification,
// frame drop detection, etc. We assume that encoded/decoded frames are
// ordered within each simulcast/spatial layer, but we do not make any
// assumptions of frame ordering between layers.
size_t last_inputed_frame_num_ RTC_GUARDED_BY(sequence_checker_);
size_t last_inputed_timestamp_ RTC_GUARDED_BY(sequence_checker_);
// simulcast_svc_idx -> encode status.
std::vector<bool> first_encoded_frame_ RTC_GUARDED_BY(sequence_checker_);
// simulcast_svc_idx -> frame_number.
std::vector<size_t> last_encoded_frame_num_ RTC_GUARDED_BY(sequence_checker_);
// simulcast_svc_idx -> decode status.
std::vector<bool> first_decoded_frame_ RTC_GUARDED_BY(sequence_checker_);
// simulcast_svc_idx -> frame_number.
std::vector<size_t> last_decoded_frame_num_ RTC_GUARDED_BY(sequence_checker_);
// simulcast_svc_idx -> buffer.
std::vector<rtc::scoped_refptr<I420Buffer>> last_decoded_frame_buffer_
RTC_GUARDED_BY(sequence_checker_);
// Time spent in frame encode callback. It is accumulated for layers and
// reset when frame encode starts. When next layer is encoded post-encode time
// is substracted from measured encode time. Thus we get pure encode time.
int64_t post_encode_time_ns_ RTC_GUARDED_BY(sequence_checker_);
// Indicates whether Finalize() was called or not.
bool is_finalized_ RTC_GUARDED_BY(sequence_checker_);
// This class must be operated on a TaskQueue.
RTC_NO_UNIQUE_ADDRESS SequenceChecker sequence_checker_;
};
} // namespace test
} // namespace webrtc
#endif // MODULES_VIDEO_CODING_CODECS_TEST_VIDEOPROCESSOR_H_

View file

@ -0,0 +1,196 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/video_coding/codecs/test/videoprocessor.h"
#include <memory>
#include "api/scoped_refptr.h"
#include "api/test/mock_video_decoder.h"
#include "api/test/mock_video_encoder.h"
#include "api/test/videocodec_test_fixture.h"
#include "api/video/i420_buffer.h"
#include "media/base/media_constants.h"
#include "modules/video_coding/codecs/test/videocodec_test_stats_impl.h"
#include "rtc_base/task_queue_for_test.h"
#include "test/gmock.h"
#include "test/gtest.h"
#include "test/testsupport/mock/mock_frame_reader.h"
using ::testing::_;
using ::testing::AllOf;
using ::testing::Field;
using ::testing::Property;
using ::testing::ResultOf;
using ::testing::Return;
namespace webrtc {
namespace test {
namespace {
const int kWidth = 352;
const int kHeight = 288;
} // namespace
class VideoProcessorTest : public ::testing::Test {
protected:
VideoProcessorTest() : q_("VP queue") {
config_.SetCodecSettings(cricket::kVp8CodecName, 1, 1, 1, false, false,
false, kWidth, kHeight);
decoder_mock_ = new MockVideoDecoder();
decoders_.push_back(std::unique_ptr<VideoDecoder>(decoder_mock_));
ExpectInit();
q_.SendTask([this] {
video_processor_ = std::make_unique<VideoProcessor>(
&encoder_mock_, &decoders_, &frame_reader_mock_, config_, &stats_,
&encoded_frame_writers_, /*decoded_frame_writers=*/nullptr);
});
}
~VideoProcessorTest() {
q_.SendTask([this] { video_processor_.reset(); });
}
void ExpectInit() {
EXPECT_CALL(encoder_mock_, InitEncode(_, _));
EXPECT_CALL(encoder_mock_, RegisterEncodeCompleteCallback);
EXPECT_CALL(*decoder_mock_, Configure);
EXPECT_CALL(*decoder_mock_, RegisterDecodeCompleteCallback);
}
void ExpectRelease() {
EXPECT_CALL(encoder_mock_, Release()).Times(1);
EXPECT_CALL(encoder_mock_, RegisterEncodeCompleteCallback(_)).Times(1);
EXPECT_CALL(*decoder_mock_, Release()).Times(1);
EXPECT_CALL(*decoder_mock_, RegisterDecodeCompleteCallback(_)).Times(1);
}
TaskQueueForTest q_;
VideoCodecTestFixture::Config config_;
MockVideoEncoder encoder_mock_;
MockVideoDecoder* decoder_mock_;
std::vector<std::unique_ptr<VideoDecoder>> decoders_;
MockFrameReader frame_reader_mock_;
VideoCodecTestStatsImpl stats_;
VideoProcessor::IvfFileWriterMap encoded_frame_writers_;
std::unique_ptr<VideoProcessor> video_processor_;
};
TEST_F(VideoProcessorTest, InitRelease) {
ExpectRelease();
}
TEST_F(VideoProcessorTest, ProcessFrames_FixedFramerate) {
const int kBitrateKbps = 456;
const int kFramerateFps = 31;
EXPECT_CALL(
encoder_mock_,
SetRates(Field(&VideoEncoder::RateControlParameters::framerate_fps,
static_cast<double>(kFramerateFps))))
.Times(1);
q_.SendTask([=] { video_processor_->SetRates(kBitrateKbps, kFramerateFps); });
EXPECT_CALL(frame_reader_mock_, PullFrame(_, _, _))
.WillRepeatedly(Return(I420Buffer::Create(kWidth, kHeight)));
EXPECT_CALL(
encoder_mock_,
Encode(Property(&VideoFrame::timestamp, 1 * 90000 / kFramerateFps), _))
.Times(1);
q_.SendTask([this] { video_processor_->ProcessFrame(); });
EXPECT_CALL(
encoder_mock_,
Encode(Property(&VideoFrame::timestamp, 2 * 90000 / kFramerateFps), _))
.Times(1);
q_.SendTask([this] { video_processor_->ProcessFrame(); });
ExpectRelease();
}
TEST_F(VideoProcessorTest, ProcessFrames_VariableFramerate) {
const int kBitrateKbps = 456;
const int kStartFramerateFps = 27;
const int kStartTimestamp = 90000 / kStartFramerateFps;
EXPECT_CALL(
encoder_mock_,
SetRates(Field(&VideoEncoder::RateControlParameters::framerate_fps,
static_cast<double>(kStartFramerateFps))))
.Times(1);
q_.SendTask(
[=] { video_processor_->SetRates(kBitrateKbps, kStartFramerateFps); });
EXPECT_CALL(frame_reader_mock_, PullFrame(_, _, _))
.WillRepeatedly(Return(I420Buffer::Create(kWidth, kHeight)));
EXPECT_CALL(encoder_mock_,
Encode(Property(&VideoFrame::timestamp, kStartTimestamp), _))
.Times(1);
q_.SendTask([this] { video_processor_->ProcessFrame(); });
const int kNewFramerateFps = 13;
EXPECT_CALL(
encoder_mock_,
SetRates(Field(&VideoEncoder::RateControlParameters::framerate_fps,
static_cast<double>(kNewFramerateFps))))
.Times(1);
q_.SendTask(
[=] { video_processor_->SetRates(kBitrateKbps, kNewFramerateFps); });
EXPECT_CALL(encoder_mock_,
Encode(Property(&VideoFrame::timestamp,
kStartTimestamp + 90000 / kNewFramerateFps),
_))
.Times(1);
q_.SendTask([this] { video_processor_->ProcessFrame(); });
ExpectRelease();
}
TEST_F(VideoProcessorTest, SetRates) {
const uint32_t kBitrateKbps = 123;
const int kFramerateFps = 17;
EXPECT_CALL(
encoder_mock_,
SetRates(AllOf(ResultOf(
[](const VideoEncoder::RateControlParameters& params) {
return params.bitrate.get_sum_kbps();
},
kBitrateKbps),
Field(&VideoEncoder::RateControlParameters::framerate_fps,
static_cast<double>(kFramerateFps)))))
.Times(1);
q_.SendTask([=] { video_processor_->SetRates(kBitrateKbps, kFramerateFps); });
const uint32_t kNewBitrateKbps = 456;
const int kNewFramerateFps = 34;
EXPECT_CALL(
encoder_mock_,
SetRates(AllOf(ResultOf(
[](const VideoEncoder::RateControlParameters& params) {
return params.bitrate.get_sum_kbps();
},
kNewBitrateKbps),
Field(&VideoEncoder::RateControlParameters::framerate_fps,
static_cast<double>(kNewFramerateFps)))))
.Times(1);
q_.SendTask(
[=] { video_processor_->SetRates(kNewBitrateKbps, kNewFramerateFps); });
ExpectRelease();
}
} // namespace test
} // namespace webrtc