Repo created

This commit is contained in:
Fr4nz D13trich 2025-11-22 14:04:28 +01:00
parent 81b91f4139
commit f8c34fa5ee
22732 changed files with 4815320 additions and 2 deletions

View file

@ -0,0 +1,4 @@
ilnik@webrtc.org
mflodman@webrtc.org
perkj@webrtc.org
tkchin@webrtc.org

View file

@ -0,0 +1,211 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/video_capture/device_info_impl.h"
#include <stdlib.h>
#include "absl/strings/match.h"
#include "absl/strings/string_view.h"
#include "rtc_base/logging.h"
#ifndef abs
#define abs(a) (a >= 0 ? a : -a)
#endif
namespace webrtc {
namespace videocapturemodule {
DeviceInfoImpl::DeviceInfoImpl()
: _lastUsedDeviceName(NULL), _lastUsedDeviceNameLength(0) {}
DeviceInfoImpl::~DeviceInfoImpl(void) {
MutexLock lock(&_apiLock);
free(_lastUsedDeviceName);
}
int32_t DeviceInfoImpl::NumberOfCapabilities(const char* deviceUniqueIdUTF8) {
if (!deviceUniqueIdUTF8)
return -1;
MutexLock lock(&_apiLock);
// Is it the same device that is asked for again.
if (absl::EqualsIgnoreCase(
deviceUniqueIdUTF8,
absl::string_view(_lastUsedDeviceName, _lastUsedDeviceNameLength))) {
return static_cast<int32_t>(_captureCapabilities.size());
}
int32_t ret = CreateCapabilityMap(deviceUniqueIdUTF8);
return ret;
}
int32_t DeviceInfoImpl::GetCapability(const char* deviceUniqueIdUTF8,
const uint32_t deviceCapabilityNumber,
VideoCaptureCapability& capability) {
RTC_DCHECK(deviceUniqueIdUTF8);
MutexLock lock(&_apiLock);
if (!absl::EqualsIgnoreCase(
deviceUniqueIdUTF8,
absl::string_view(_lastUsedDeviceName, _lastUsedDeviceNameLength))) {
if (-1 == CreateCapabilityMap(deviceUniqueIdUTF8)) {
return -1;
}
}
// Make sure the number is valid
if (deviceCapabilityNumber >= (unsigned int)_captureCapabilities.size()) {
RTC_LOG(LS_ERROR) << "Invalid deviceCapabilityNumber "
<< deviceCapabilityNumber << ">= number of capabilities ("
<< _captureCapabilities.size() << ").";
return -1;
}
capability = _captureCapabilities[deviceCapabilityNumber];
return 0;
}
int32_t DeviceInfoImpl::GetBestMatchedCapability(
const char* deviceUniqueIdUTF8,
const VideoCaptureCapability& requested,
VideoCaptureCapability& resulting) {
if (!deviceUniqueIdUTF8)
return -1;
MutexLock lock(&_apiLock);
if (!absl::EqualsIgnoreCase(
deviceUniqueIdUTF8,
absl::string_view(_lastUsedDeviceName, _lastUsedDeviceNameLength))) {
if (-1 == CreateCapabilityMap(deviceUniqueIdUTF8)) {
return -1;
}
}
int32_t bestformatIndex = -1;
int32_t bestWidth = 0;
int32_t bestHeight = 0;
int32_t bestFrameRate = 0;
VideoType bestVideoType = VideoType::kUnknown;
const int32_t numberOfCapabilies =
static_cast<int32_t>(_captureCapabilities.size());
for (int32_t tmp = 0; tmp < numberOfCapabilies;
++tmp) // Loop through all capabilities
{
VideoCaptureCapability& capability = _captureCapabilities[tmp];
const int32_t diffWidth = capability.width - requested.width;
const int32_t diffHeight = capability.height - requested.height;
const int32_t diffFrameRate = capability.maxFPS - requested.maxFPS;
const int32_t currentbestDiffWith = bestWidth - requested.width;
const int32_t currentbestDiffHeight = bestHeight - requested.height;
const int32_t currentbestDiffFrameRate = bestFrameRate - requested.maxFPS;
if ((diffHeight >= 0 &&
diffHeight <= abs(currentbestDiffHeight)) // Height better or equalt
// that previouse.
|| (currentbestDiffHeight < 0 && diffHeight >= currentbestDiffHeight)) {
if (diffHeight ==
currentbestDiffHeight) // Found best height. Care about the width)
{
if ((diffWidth >= 0 &&
diffWidth <= abs(currentbestDiffWith)) // Width better or equal
|| (currentbestDiffWith < 0 && diffWidth >= currentbestDiffWith)) {
if (diffWidth == currentbestDiffWith &&
diffHeight == currentbestDiffHeight) // Same size as previously
{
// Also check the best frame rate if the diff is the same as
// previouse
if (((diffFrameRate >= 0 &&
diffFrameRate <=
currentbestDiffFrameRate) // Frame rate to high but
// better match than previouse
// and we have not selected IUV
|| (currentbestDiffFrameRate < 0 &&
diffFrameRate >=
currentbestDiffFrameRate)) // Current frame rate is
// lower than requested.
// This is better.
) {
if ((currentbestDiffFrameRate ==
diffFrameRate) // Same frame rate as previous or frame rate
// allready good enough
|| (currentbestDiffFrameRate >= 0)) {
if (bestVideoType != requested.videoType &&
requested.videoType != VideoType::kUnknown &&
(capability.videoType == requested.videoType ||
capability.videoType == VideoType::kI420 ||
capability.videoType == VideoType::kYUY2 ||
capability.videoType == VideoType::kYV12 ||
capability.videoType == VideoType::kNV12)) {
bestVideoType = capability.videoType;
bestformatIndex = tmp;
}
// If width height and frame rate is full filled we can use the
// camera for encoding if it is supported.
if (capability.height == requested.height &&
capability.width == requested.width &&
capability.maxFPS >= requested.maxFPS) {
bestformatIndex = tmp;
}
} else // Better frame rate
{
bestWidth = capability.width;
bestHeight = capability.height;
bestFrameRate = capability.maxFPS;
bestVideoType = capability.videoType;
bestformatIndex = tmp;
}
}
} else // Better width than previously
{
bestWidth = capability.width;
bestHeight = capability.height;
bestFrameRate = capability.maxFPS;
bestVideoType = capability.videoType;
bestformatIndex = tmp;
}
} // else width no good
} else // Better height
{
bestWidth = capability.width;
bestHeight = capability.height;
bestFrameRate = capability.maxFPS;
bestVideoType = capability.videoType;
bestformatIndex = tmp;
}
} // else height not good
} // end for
RTC_LOG(LS_VERBOSE) << "Best camera format: " << bestWidth << "x"
<< bestHeight << "@" << bestFrameRate
<< "fps, color format: "
<< static_cast<int>(bestVideoType);
// Copy the capability
if (bestformatIndex < 0)
return -1;
resulting = _captureCapabilities[bestformatIndex];
return bestformatIndex;
}
// Default implementation. This should be overridden by Mobile implementations.
int32_t DeviceInfoImpl::GetOrientation(const char* deviceUniqueIdUTF8,
VideoRotation& orientation) {
orientation = kVideoRotation_0;
return -1;
}
} // namespace videocapturemodule
} // namespace webrtc

View file

@ -0,0 +1,62 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_VIDEO_CAPTURE_MAIN_SOURCE_DEVICE_INFO_IMPL_H_
#define MODULES_VIDEO_CAPTURE_MAIN_SOURCE_DEVICE_INFO_IMPL_H_
#include <stdint.h>
#include <vector>
#include "api/video/video_rotation.h"
#include "modules/video_capture/video_capture.h"
#include "modules/video_capture/video_capture_defines.h"
#include "rtc_base/synchronization/mutex.h"
#include "rtc_base/thread_annotations.h"
namespace webrtc {
namespace videocapturemodule {
class DeviceInfoImpl : public VideoCaptureModule::DeviceInfo {
public:
DeviceInfoImpl();
~DeviceInfoImpl(void) override;
int32_t NumberOfCapabilities(const char* deviceUniqueIdUTF8) override;
int32_t GetCapability(const char* deviceUniqueIdUTF8,
uint32_t deviceCapabilityNumber,
VideoCaptureCapability& capability) override;
int32_t GetBestMatchedCapability(const char* deviceUniqueIdUTF8,
const VideoCaptureCapability& requested,
VideoCaptureCapability& resulting) override;
int32_t GetOrientation(const char* deviceUniqueIdUTF8,
VideoRotation& orientation) override;
protected:
/* Initialize this object*/
virtual int32_t Init() = 0;
/*
* Fills the member variable _captureCapabilities with capabilities for the
* given device name.
*/
virtual int32_t CreateCapabilityMap(const char* deviceUniqueIdUTF8)
RTC_EXCLUSIVE_LOCKS_REQUIRED(_apiLock) = 0;
protected:
// Data members
typedef std::vector<VideoCaptureCapability> VideoCaptureCapabilities;
VideoCaptureCapabilities _captureCapabilities RTC_GUARDED_BY(_apiLock);
Mutex _apiLock;
char* _lastUsedDeviceName RTC_GUARDED_BY(_apiLock);
uint32_t _lastUsedDeviceNameLength RTC_GUARDED_BY(_apiLock);
};
} // namespace videocapturemodule
} // namespace webrtc
#endif // MODULES_VIDEO_CAPTURE_MAIN_SOURCE_DEVICE_INFO_IMPL_H_

View file

@ -0,0 +1,256 @@
/*
* Copyright (c) 2023 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/video_capture/linux/camera_portal.h"
#include <gio/gio.h>
#include <gio/gunixfdlist.h>
#include "modules/portal/pipewire_utils.h"
#include "modules/portal/xdg_desktop_portal_utils.h"
#include "rtc_base/synchronization/mutex.h"
namespace webrtc {
using xdg_portal::RequestResponse;
using xdg_portal::RequestResponseFromPortalResponse;
using xdg_portal::RequestSessionProxy;
constexpr char kCameraInterfaceName[] = "org.freedesktop.portal.Camera";
class CameraPortalPrivate {
public:
explicit CameraPortalPrivate(CameraPortal::PortalNotifier* notifier);
~CameraPortalPrivate();
void Start();
private:
void OnPortalDone(xdg_portal::RequestResponse result,
int fd = kInvalidPipeWireFd);
static void OnProxyRequested(GObject* object,
GAsyncResult* result,
gpointer user_data);
void ProxyRequested(GDBusProxy* proxy);
static void OnAccessResponse(GDBusProxy* proxy,
GAsyncResult* result,
gpointer user_data);
static void OnResponseSignalEmitted(GDBusConnection* connection,
const char* sender_name,
const char* object_path,
const char* interface_name,
const char* signal_name,
GVariant* parameters,
gpointer user_data);
static void OnOpenResponse(GDBusProxy* proxy,
GAsyncResult* result,
gpointer user_data);
webrtc::Mutex notifier_lock_;
CameraPortal::PortalNotifier* notifier_ RTC_GUARDED_BY(&notifier_lock_) =
nullptr;
GDBusConnection* connection_ = nullptr;
GDBusProxy* proxy_ = nullptr;
GCancellable* cancellable_ = nullptr;
guint access_request_signal_id_ = 0;
};
CameraPortalPrivate::CameraPortalPrivate(CameraPortal::PortalNotifier* notifier)
: notifier_(notifier) {}
CameraPortalPrivate::~CameraPortalPrivate() {
{
webrtc::MutexLock lock(&notifier_lock_);
notifier_ = nullptr;
}
if (access_request_signal_id_) {
g_dbus_connection_signal_unsubscribe(connection_,
access_request_signal_id_);
access_request_signal_id_ = 0;
}
if (cancellable_) {
g_cancellable_cancel(cancellable_);
g_object_unref(cancellable_);
cancellable_ = nullptr;
}
if (proxy_) {
g_object_unref(proxy_);
proxy_ = nullptr;
connection_ = nullptr;
}
}
void CameraPortalPrivate::Start() {
cancellable_ = g_cancellable_new();
Scoped<GError> error;
RequestSessionProxy(kCameraInterfaceName, OnProxyRequested, cancellable_,
this);
}
// static
void CameraPortalPrivate::OnProxyRequested(GObject* gobject,
GAsyncResult* result,
gpointer user_data) {
CameraPortalPrivate* that = static_cast<CameraPortalPrivate*>(user_data);
Scoped<GError> error;
GDBusProxy* proxy = g_dbus_proxy_new_finish(result, error.receive());
if (!proxy) {
// Ignore the error caused by user cancelling the request via `cancellable_`
if (g_error_matches(error.get(), G_IO_ERROR, G_IO_ERROR_CANCELLED))
return;
RTC_LOG(LS_ERROR) << "Failed to get a proxy for the portal: "
<< error->message;
that->OnPortalDone(RequestResponse::kError);
return;
}
RTC_LOG(LS_VERBOSE) << "Successfully created proxy for the portal.";
that->ProxyRequested(proxy);
}
void CameraPortalPrivate::ProxyRequested(GDBusProxy* proxy) {
GVariantBuilder builder;
Scoped<char> variant_string;
std::string access_handle;
proxy_ = proxy;
connection_ = g_dbus_proxy_get_connection(proxy);
g_variant_builder_init(&builder, G_VARIANT_TYPE_VARDICT);
variant_string =
g_strdup_printf("capture%d", g_random_int_range(0, G_MAXINT));
g_variant_builder_add(&builder, "{sv}", "handle_token",
g_variant_new_string(variant_string.get()));
access_handle =
xdg_portal::PrepareSignalHandle(variant_string.get(), connection_);
access_request_signal_id_ = xdg_portal::SetupRequestResponseSignal(
access_handle.c_str(), OnResponseSignalEmitted, this, connection_);
RTC_LOG(LS_VERBOSE) << "Requesting camera access from the portal.";
g_dbus_proxy_call(proxy_, "AccessCamera", g_variant_new("(a{sv})", &builder),
G_DBUS_CALL_FLAGS_NONE, /*timeout_msec=*/-1, cancellable_,
reinterpret_cast<GAsyncReadyCallback>(OnAccessResponse),
this);
}
// static
void CameraPortalPrivate::OnAccessResponse(GDBusProxy* proxy,
GAsyncResult* result,
gpointer user_data) {
CameraPortalPrivate* that = static_cast<CameraPortalPrivate*>(user_data);
RTC_DCHECK(that);
Scoped<GError> error;
Scoped<GVariant> variant(
g_dbus_proxy_call_finish(proxy, result, error.receive()));
if (!variant) {
if (g_error_matches(error.get(), G_IO_ERROR, G_IO_ERROR_CANCELLED))
return;
RTC_LOG(LS_ERROR) << "Failed to access portal:" << error->message;
if (that->access_request_signal_id_) {
g_dbus_connection_signal_unsubscribe(that->connection_,
that->access_request_signal_id_);
that->access_request_signal_id_ = 0;
}
that->OnPortalDone(RequestResponse::kError);
}
}
// static
void CameraPortalPrivate::OnResponseSignalEmitted(GDBusConnection* connection,
const char* sender_name,
const char* object_path,
const char* interface_name,
const char* signal_name,
GVariant* parameters,
gpointer user_data) {
CameraPortalPrivate* that = static_cast<CameraPortalPrivate*>(user_data);
RTC_DCHECK(that);
uint32_t portal_response;
g_variant_get(parameters, "(u@a{sv})", &portal_response, nullptr);
if (portal_response) {
RTC_LOG(LS_INFO) << "Camera access denied by the XDG portal.";
that->OnPortalDone(RequestResponseFromPortalResponse(portal_response));
return;
}
RTC_LOG(LS_VERBOSE) << "Camera access granted by the XDG portal.";
GVariantBuilder builder;
g_variant_builder_init(&builder, G_VARIANT_TYPE_VARDICT);
g_dbus_proxy_call(
that->proxy_, "OpenPipeWireRemote", g_variant_new("(a{sv})", &builder),
G_DBUS_CALL_FLAGS_NONE, /*timeout_msec=*/-1, that->cancellable_,
reinterpret_cast<GAsyncReadyCallback>(OnOpenResponse), that);
}
void CameraPortalPrivate::OnOpenResponse(GDBusProxy* proxy,
GAsyncResult* result,
gpointer user_data) {
CameraPortalPrivate* that = static_cast<CameraPortalPrivate*>(user_data);
RTC_DCHECK(that);
Scoped<GError> error;
Scoped<GUnixFDList> outlist;
Scoped<GVariant> variant(g_dbus_proxy_call_with_unix_fd_list_finish(
proxy, outlist.receive(), result, error.receive()));
if (!variant) {
if (g_error_matches(error.get(), G_IO_ERROR, G_IO_ERROR_CANCELLED))
return;
RTC_LOG(LS_ERROR) << "Failed to open PipeWire remote:" << error->message;
if (that->access_request_signal_id_) {
g_dbus_connection_signal_unsubscribe(that->connection_,
that->access_request_signal_id_);
that->access_request_signal_id_ = 0;
}
that->OnPortalDone(RequestResponse::kError);
return;
}
int32_t index;
g_variant_get(variant.get(), "(h)", &index);
int fd = g_unix_fd_list_get(outlist.get(), index, error.receive());
if (fd == kInvalidPipeWireFd) {
RTC_LOG(LS_ERROR) << "Failed to get file descriptor from the list: "
<< error->message;
that->OnPortalDone(RequestResponse::kError);
return;
}
that->OnPortalDone(RequestResponse::kSuccess, fd);
}
void CameraPortalPrivate::OnPortalDone(RequestResponse result, int fd) {
webrtc::MutexLock lock(&notifier_lock_);
if (notifier_) {
notifier_->OnCameraRequestResult(result, fd);
notifier_ = nullptr;
}
}
CameraPortal::CameraPortal(PortalNotifier* notifier)
: private_(std::make_unique<CameraPortalPrivate>(notifier)) {}
CameraPortal::~CameraPortal() {}
void CameraPortal::Start() {
private_->Start();
}
} // namespace webrtc

View file

@ -0,0 +1,47 @@
/*
* Copyright (c) 2023 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_VIDEO_CAPTURE_LINUX_CAMERA_PORTAL_H_
#define MODULES_VIDEO_CAPTURE_LINUX_CAMERA_PORTAL_H_
#include <memory>
#include <string>
#include "modules/portal/portal_request_response.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
class CameraPortalPrivate;
class RTC_EXPORT CameraPortal {
public:
class PortalNotifier {
public:
virtual void OnCameraRequestResult(xdg_portal::RequestResponse result,
int fd) = 0;
protected:
PortalNotifier() = default;
virtual ~PortalNotifier() = default;
};
explicit CameraPortal(PortalNotifier* notifier);
~CameraPortal();
void Start();
private:
std::unique_ptr<CameraPortalPrivate> private_;
};
} // namespace webrtc
#endif // MODULES_VIDEO_CAPTURE_LINUX_CAMERA_PORTAL_H_

View file

@ -0,0 +1,52 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <errno.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <unistd.h>
// v4l includes
#include <linux/videodev2.h>
#include <vector>
#if defined(WEBRTC_USE_PIPEWIRE)
#include "modules/video_capture/linux/device_info_pipewire.h"
#endif
#include "modules/video_capture/linux/device_info_v4l2.h"
#include "modules/video_capture/video_capture.h"
#include "modules/video_capture/video_capture_defines.h"
#include "modules/video_capture/video_capture_impl.h"
#include "modules/video_capture/video_capture_options.h"
#include "rtc_base/logging.h"
namespace webrtc {
namespace videocapturemodule {
VideoCaptureModule::DeviceInfo* VideoCaptureImpl::CreateDeviceInfo() {
return new videocapturemodule::DeviceInfoV4l2();
}
VideoCaptureModule::DeviceInfo* VideoCaptureImpl::CreateDeviceInfo(
VideoCaptureOptions* options) {
#if defined(WEBRTC_USE_PIPEWIRE)
if (options->allow_pipewire()) {
return new videocapturemodule::DeviceInfoPipeWire(options);
}
#endif
if (options->allow_v4l2())
return new videocapturemodule::DeviceInfoV4l2();
return nullptr;
}
} // namespace videocapturemodule
} // namespace webrtc

View file

@ -0,0 +1,117 @@
/*
* Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/video_capture/linux/device_info_pipewire.h"
#include <errno.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <unistd.h>
#include <vector>
#include "modules/video_capture/linux/pipewire_session.h"
#include "modules/video_capture/video_capture.h"
#include "modules/video_capture/video_capture_defines.h"
#include "modules/video_capture/video_capture_impl.h"
#include "rtc_base/logging.h"
namespace webrtc {
namespace videocapturemodule {
DeviceInfoPipeWire::DeviceInfoPipeWire(VideoCaptureOptions* options)
: DeviceInfoImpl(), pipewire_session_(options->pipewire_session()) {}
int32_t DeviceInfoPipeWire::Init() {
return 0;
}
DeviceInfoPipeWire::~DeviceInfoPipeWire() = default;
uint32_t DeviceInfoPipeWire::NumberOfDevices() {
RTC_CHECK(pipewire_session_);
return pipewire_session_->nodes().size();
}
int32_t DeviceInfoPipeWire::GetDeviceName(uint32_t deviceNumber,
char* deviceNameUTF8,
uint32_t deviceNameLength,
char* deviceUniqueIdUTF8,
uint32_t deviceUniqueIdUTF8Length,
char* productUniqueIdUTF8,
uint32_t productUniqueIdUTF8Length) {
RTC_CHECK(pipewire_session_);
if (deviceNumber >= NumberOfDevices())
return -1;
const PipeWireNode& node = pipewire_session_->nodes().at(deviceNumber);
if (deviceNameLength <= node.display_name().length()) {
RTC_LOG(LS_INFO) << "deviceNameUTF8 buffer passed is too small";
return -1;
}
if (deviceUniqueIdUTF8Length <= node.unique_id().length()) {
RTC_LOG(LS_INFO) << "deviceUniqueIdUTF8 buffer passed is too small";
return -1;
}
if (productUniqueIdUTF8 &&
productUniqueIdUTF8Length <= node.model_id().length()) {
RTC_LOG(LS_INFO) << "productUniqueIdUTF8 buffer passed is too small";
return -1;
}
memset(deviceNameUTF8, 0, deviceNameLength);
node.display_name().copy(deviceNameUTF8, deviceNameLength);
memset(deviceUniqueIdUTF8, 0, deviceUniqueIdUTF8Length);
node.unique_id().copy(deviceUniqueIdUTF8, deviceUniqueIdUTF8Length);
if (productUniqueIdUTF8) {
memset(productUniqueIdUTF8, 0, productUniqueIdUTF8Length);
node.model_id().copy(productUniqueIdUTF8, productUniqueIdUTF8Length);
}
return 0;
}
int32_t DeviceInfoPipeWire::CreateCapabilityMap(
const char* deviceUniqueIdUTF8) {
RTC_CHECK(pipewire_session_);
for (auto& node : pipewire_session_->nodes()) {
if (node.unique_id().compare(deviceUniqueIdUTF8) != 0)
continue;
_captureCapabilities = node.capabilities();
_lastUsedDeviceNameLength = node.display_name().length();
_lastUsedDeviceName = static_cast<char*>(
realloc(_lastUsedDeviceName, _lastUsedDeviceNameLength + 1));
memcpy(_lastUsedDeviceName, deviceUniqueIdUTF8,
_lastUsedDeviceNameLength + 1);
return _captureCapabilities.size();
}
return -1;
}
int32_t DeviceInfoPipeWire::DisplayCaptureSettingsDialogBox(
const char* /*deviceUniqueIdUTF8*/,
const char* /*dialogTitleUTF8*/,
void* /*parentWindow*/,
uint32_t /*positionX*/,
uint32_t /*positionY*/) {
return -1;
}
} // namespace videocapturemodule
} // namespace webrtc

View file

@ -0,0 +1,51 @@
/*
* Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_VIDEO_CAPTURE_LINUX_DEVICE_INFO_PIPEWIRE_H_
#define MODULES_VIDEO_CAPTURE_LINUX_DEVICE_INFO_PIPEWIRE_H_
#include <stdint.h>
#include "modules/video_capture/device_info_impl.h"
#include "modules/video_capture/video_capture_options.h"
namespace webrtc {
namespace videocapturemodule {
class DeviceInfoPipeWire : public DeviceInfoImpl {
public:
explicit DeviceInfoPipeWire(VideoCaptureOptions* options);
~DeviceInfoPipeWire() override;
uint32_t NumberOfDevices() override;
int32_t GetDeviceName(uint32_t deviceNumber,
char* deviceNameUTF8,
uint32_t deviceNameLength,
char* deviceUniqueIdUTF8,
uint32_t deviceUniqueIdUTF8Length,
char* productUniqueIdUTF8 = nullptr,
uint32_t productUniqueIdUTF8Length = 0) override;
/*
* Fills the membervariable _captureCapabilities with capabilites for the
* given device name.
*/
int32_t CreateCapabilityMap(const char* deviceUniqueIdUTF8) override
RTC_EXCLUSIVE_LOCKS_REQUIRED(_apiLock);
int32_t DisplayCaptureSettingsDialogBox(const char* /*deviceUniqueIdUTF8*/,
const char* /*dialogTitleUTF8*/,
void* /*parentWindow*/,
uint32_t /*positionX*/,
uint32_t /*positionY*/) override;
int32_t Init() override;
private:
rtc::scoped_refptr<PipeWireSession> pipewire_session_;
};
} // namespace videocapturemodule
} // namespace webrtc
#endif // MODULES_VIDEO_CAPTURE_LINUX_DEVICE_INFO_PIPEWIRE_H_

View file

@ -0,0 +1,354 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/video_capture/linux/device_info_v4l2.h"
#include <errno.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <unistd.h>
// v4l includes
#include <linux/videodev2.h>
#include <vector>
#include "modules/video_capture/video_capture.h"
#include "modules/video_capture/video_capture_defines.h"
#include "modules/video_capture/video_capture_impl.h"
#include "rtc_base/logging.h"
// These defines are here to support building on kernel 3.16 which some
// downstream projects, e.g. Firefox, use.
// TODO(apehrson): Remove them and their undefs when no longer needed.
#ifndef V4L2_PIX_FMT_ABGR32
#define ABGR32_OVERRIDE 1
#define V4L2_PIX_FMT_ABGR32 v4l2_fourcc('A', 'R', '2', '4')
#endif
#ifndef V4L2_PIX_FMT_ARGB32
#define ARGB32_OVERRIDE 1
#define V4L2_PIX_FMT_ARGB32 v4l2_fourcc('B', 'A', '2', '4')
#endif
#ifndef V4L2_PIX_FMT_RGBA32
#define RGBA32_OVERRIDE 1
#define V4L2_PIX_FMT_RGBA32 v4l2_fourcc('A', 'B', '2', '4')
#endif
namespace webrtc {
namespace videocapturemodule {
DeviceInfoV4l2::DeviceInfoV4l2() : DeviceInfoImpl() {}
int32_t DeviceInfoV4l2::Init() {
return 0;
}
DeviceInfoV4l2::~DeviceInfoV4l2() {}
uint32_t DeviceInfoV4l2::NumberOfDevices() {
uint32_t count = 0;
char device[20];
int fd = -1;
struct v4l2_capability cap;
/* detect /dev/video [0-63]VideoCaptureModule entries */
for (int n = 0; n < 64; n++) {
snprintf(device, sizeof(device), "/dev/video%d", n);
if ((fd = open(device, O_RDONLY)) != -1) {
// query device capabilities and make sure this is a video capture device
if (ioctl(fd, VIDIOC_QUERYCAP, &cap) < 0 ||
!(cap.device_caps & V4L2_CAP_VIDEO_CAPTURE)) {
close(fd);
continue;
}
close(fd);
count++;
}
}
return count;
}
int32_t DeviceInfoV4l2::GetDeviceName(uint32_t deviceNumber,
char* deviceNameUTF8,
uint32_t deviceNameLength,
char* deviceUniqueIdUTF8,
uint32_t deviceUniqueIdUTF8Length,
char* /*productUniqueIdUTF8*/,
uint32_t /*productUniqueIdUTF8Length*/) {
// Travel through /dev/video [0-63]
uint32_t count = 0;
char device[20];
int fd = -1;
bool found = false;
struct v4l2_capability cap;
for (int n = 0; n < 64; n++) {
snprintf(device, sizeof(device), "/dev/video%d", n);
if ((fd = open(device, O_RDONLY)) != -1) {
// query device capabilities and make sure this is a video capture device
if (ioctl(fd, VIDIOC_QUERYCAP, &cap) < 0 ||
!(cap.device_caps & V4L2_CAP_VIDEO_CAPTURE)) {
close(fd);
continue;
}
if (count == deviceNumber) {
// Found the device
found = true;
break;
} else {
close(fd);
count++;
}
}
}
if (!found)
return -1;
// query device capabilities
if (ioctl(fd, VIDIOC_QUERYCAP, &cap) < 0) {
RTC_LOG(LS_INFO) << "error in querying the device capability for device "
<< device << ". errno = " << errno;
close(fd);
return -1;
}
close(fd);
char cameraName[64];
memset(deviceNameUTF8, 0, deviceNameLength);
memcpy(cameraName, cap.card, sizeof(cap.card));
if (deviceNameLength > strlen(cameraName)) {
memcpy(deviceNameUTF8, cameraName, strlen(cameraName));
} else {
RTC_LOG(LS_INFO) << "buffer passed is too small";
return -1;
}
if (cap.bus_info[0] != 0) { // may not available in all drivers
// copy device id
size_t len = strlen(reinterpret_cast<const char*>(cap.bus_info));
if (deviceUniqueIdUTF8Length > len) {
memset(deviceUniqueIdUTF8, 0, deviceUniqueIdUTF8Length);
memcpy(deviceUniqueIdUTF8, cap.bus_info, len);
} else {
RTC_LOG(LS_INFO) << "buffer passed is too small";
return -1;
}
}
return 0;
}
int32_t DeviceInfoV4l2::CreateCapabilityMap(const char* deviceUniqueIdUTF8) {
int fd;
char device[32];
bool found = false;
const int32_t deviceUniqueIdUTF8Length = strlen(deviceUniqueIdUTF8);
if (deviceUniqueIdUTF8Length >= kVideoCaptureUniqueNameLength) {
RTC_LOG(LS_INFO) << "Device name too long";
return -1;
}
RTC_LOG(LS_INFO) << "CreateCapabilityMap called for device "
<< deviceUniqueIdUTF8;
/* detect /dev/video [0-63] entries */
for (int n = 0; n < 64; ++n) {
snprintf(device, sizeof(device), "/dev/video%d", n);
fd = open(device, O_RDONLY);
if (fd == -1)
continue;
// query device capabilities
struct v4l2_capability cap;
if (ioctl(fd, VIDIOC_QUERYCAP, &cap) == 0) {
// skip devices without video capture capability
if (!(cap.device_caps & V4L2_CAP_VIDEO_CAPTURE)) {
continue;
}
if (cap.bus_info[0] != 0) {
if (strncmp(reinterpret_cast<const char*>(cap.bus_info),
deviceUniqueIdUTF8,
strlen(deviceUniqueIdUTF8)) == 0) { // match with device id
found = true;
break; // fd matches with device unique id supplied
}
} else { // match for device name
if (IsDeviceNameMatches(reinterpret_cast<const char*>(cap.card),
deviceUniqueIdUTF8)) {
found = true;
break;
}
}
}
close(fd); // close since this is not the matching device
}
if (!found) {
RTC_LOG(LS_INFO) << "no matching device found";
return -1;
}
// now fd will point to the matching device
// reset old capability list.
_captureCapabilities.clear();
int size = FillCapabilities(fd);
close(fd);
// Store the new used device name
_lastUsedDeviceNameLength = deviceUniqueIdUTF8Length;
_lastUsedDeviceName = reinterpret_cast<char*>(
realloc(_lastUsedDeviceName, _lastUsedDeviceNameLength + 1));
memcpy(_lastUsedDeviceName, deviceUniqueIdUTF8,
_lastUsedDeviceNameLength + 1);
RTC_LOG(LS_INFO) << "CreateCapabilityMap " << _captureCapabilities.size();
return size;
}
int32_t DeviceInfoV4l2::DisplayCaptureSettingsDialogBox(
const char* /*deviceUniqueIdUTF8*/,
const char* /*dialogTitleUTF8*/,
void* /*parentWindow*/,
uint32_t /*positionX*/,
uint32_t /*positionY*/) {
return -1;
}
bool DeviceInfoV4l2::IsDeviceNameMatches(const char* name,
const char* deviceUniqueIdUTF8) {
if (strncmp(deviceUniqueIdUTF8, name, strlen(name)) == 0)
return true;
return false;
}
int32_t DeviceInfoV4l2::FillCapabilities(int fd) {
// set image format
struct v4l2_format video_fmt;
memset(&video_fmt, 0, sizeof(struct v4l2_format));
video_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
video_fmt.fmt.pix.sizeimage = 0;
unsigned int videoFormats[] = {
V4L2_PIX_FMT_MJPEG, V4L2_PIX_FMT_JPEG, V4L2_PIX_FMT_YUV420,
V4L2_PIX_FMT_YVU420, V4L2_PIX_FMT_YUYV, V4L2_PIX_FMT_UYVY,
V4L2_PIX_FMT_NV12, V4L2_PIX_FMT_BGR24, V4L2_PIX_FMT_RGB24,
V4L2_PIX_FMT_RGB565, V4L2_PIX_FMT_ABGR32, V4L2_PIX_FMT_ARGB32,
V4L2_PIX_FMT_RGBA32, V4L2_PIX_FMT_BGR32, V4L2_PIX_FMT_RGB32,
};
constexpr int totalFmts = sizeof(videoFormats) / sizeof(unsigned int);
int sizes = 13;
unsigned int size[][2] = {{128, 96}, {160, 120}, {176, 144}, {320, 240},
{352, 288}, {640, 480}, {704, 576}, {800, 600},
{960, 720}, {1280, 720}, {1024, 768}, {1440, 1080},
{1920, 1080}};
for (int fmts = 0; fmts < totalFmts; fmts++) {
for (int i = 0; i < sizes; i++) {
video_fmt.fmt.pix.pixelformat = videoFormats[fmts];
video_fmt.fmt.pix.width = size[i][0];
video_fmt.fmt.pix.height = size[i][1];
if (ioctl(fd, VIDIOC_TRY_FMT, &video_fmt) >= 0) {
if ((video_fmt.fmt.pix.width == size[i][0]) &&
(video_fmt.fmt.pix.height == size[i][1])) {
VideoCaptureCapability cap;
cap.width = video_fmt.fmt.pix.width;
cap.height = video_fmt.fmt.pix.height;
if (videoFormats[fmts] == V4L2_PIX_FMT_YUYV) {
cap.videoType = VideoType::kYUY2;
} else if (videoFormats[fmts] == V4L2_PIX_FMT_YUV420) {
cap.videoType = VideoType::kI420;
} else if (videoFormats[fmts] == V4L2_PIX_FMT_YVU420) {
cap.videoType = VideoType::kYV12;
} else if (videoFormats[fmts] == V4L2_PIX_FMT_MJPEG ||
videoFormats[fmts] == V4L2_PIX_FMT_JPEG) {
cap.videoType = VideoType::kMJPEG;
} else if (videoFormats[fmts] == V4L2_PIX_FMT_UYVY) {
cap.videoType = VideoType::kUYVY;
} else if (videoFormats[fmts] == V4L2_PIX_FMT_NV12) {
cap.videoType = VideoType::kNV12;
} else if (videoFormats[fmts] == V4L2_PIX_FMT_BGR24) {
// NB that for RGB formats, `VideoType` follows naming conventions
// of libyuv[1], where e.g. the format for FOURCC "ARGB" stores
// pixels in BGRA order in memory. V4L2[2] on the other hand names
// its formats based on the order of the RGB components as stored in
// memory. Applies to all RGB formats below.
// [1]https://chromium.googlesource.com/libyuv/libyuv/+/refs/heads/main/docs/formats.md#the-argb-fourcc
// [2]https://www.kernel.org/doc/html/v6.2/userspace-api/media/v4l/pixfmt-rgb.html#bits-per-component
cap.videoType = VideoType::kRGB24;
} else if (videoFormats[fmts] == V4L2_PIX_FMT_RGB24) {
cap.videoType = VideoType::kBGR24;
} else if (videoFormats[fmts] == V4L2_PIX_FMT_RGB565) {
cap.videoType = VideoType::kRGB565;
} else if (videoFormats[fmts] == V4L2_PIX_FMT_ABGR32) {
cap.videoType = VideoType::kARGB;
} else if (videoFormats[fmts] == V4L2_PIX_FMT_ARGB32) {
cap.videoType = VideoType::kBGRA;
} else if (videoFormats[fmts] == V4L2_PIX_FMT_BGR32) {
cap.videoType = VideoType::kARGB;
} else if (videoFormats[fmts] == V4L2_PIX_FMT_RGB32) {
cap.videoType = VideoType::kBGRA;
} else if (videoFormats[fmts] == V4L2_PIX_FMT_RGBA32) {
cap.videoType = VideoType::kABGR;
} else {
RTC_DCHECK_NOTREACHED();
}
// get fps of current camera mode
// V4l2 does not have a stable method of knowing so we just guess.
if (cap.width >= 800 && cap.videoType != VideoType::kMJPEG) {
cap.maxFPS = 15;
} else {
cap.maxFPS = 30;
}
_captureCapabilities.push_back(cap);
RTC_LOG(LS_VERBOSE) << "Camera capability, width:" << cap.width
<< " height:" << cap.height
<< " type:" << static_cast<int32_t>(cap.videoType)
<< " fps:" << cap.maxFPS;
}
}
}
}
RTC_LOG(LS_INFO) << "CreateCapabilityMap " << _captureCapabilities.size();
return _captureCapabilities.size();
}
} // namespace videocapturemodule
} // namespace webrtc
#ifdef ABGR32_OVERRIDE
#undef ABGR32_OVERRIDE
#undef V4L2_PIX_FMT_ABGR32
#endif
#ifdef ARGB32_OVERRIDE
#undef ARGB32_OVERRIDE
#undef V4L2_PIX_FMT_ARGB32
#endif
#ifdef RGBA32_OVERRIDE
#undef RGBA32_OVERRIDE
#undef V4L2_PIX_FMT_RGBA32
#endif

View file

@ -0,0 +1,51 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_VIDEO_CAPTURE_LINUX_DEVICE_INFO_V4L2_H_
#define MODULES_VIDEO_CAPTURE_LINUX_DEVICE_INFO_V4L2_H_
#include <stdint.h>
#include "modules/video_capture/device_info_impl.h"
namespace webrtc {
namespace videocapturemodule {
class DeviceInfoV4l2 : public DeviceInfoImpl {
public:
DeviceInfoV4l2();
~DeviceInfoV4l2() override;
uint32_t NumberOfDevices() override;
int32_t GetDeviceName(uint32_t deviceNumber,
char* deviceNameUTF8,
uint32_t deviceNameLength,
char* deviceUniqueIdUTF8,
uint32_t deviceUniqueIdUTF8Length,
char* productUniqueIdUTF8 = 0,
uint32_t productUniqueIdUTF8Length = 0) override;
/*
* Fills the membervariable _captureCapabilities with capabilites for the
* given device name.
*/
int32_t CreateCapabilityMap(const char* deviceUniqueIdUTF8) override
RTC_EXCLUSIVE_LOCKS_REQUIRED(_apiLock);
int32_t DisplayCaptureSettingsDialogBox(const char* /*deviceUniqueIdUTF8*/,
const char* /*dialogTitleUTF8*/,
void* /*parentWindow*/,
uint32_t /*positionX*/,
uint32_t /*positionY*/) override;
int32_t FillCapabilities(int fd) RTC_EXCLUSIVE_LOCKS_REQUIRED(_apiLock);
int32_t Init() override;
private:
bool IsDeviceNameMatches(const char* name, const char* deviceUniqueIdUTF8);
};
} // namespace videocapturemodule
} // namespace webrtc
#endif // MODULES_VIDEO_CAPTURE_LINUX_DEVICE_INFO_V4L2_H_

View file

@ -0,0 +1,400 @@
/*
* Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/video_capture/linux/pipewire_session.h"
#include <spa/monitor/device.h>
#include <spa/param/format-utils.h>
#include <spa/param/format.h>
#include <spa/param/video/raw.h>
#include <spa/pod/parser.h>
#include "common_video/libyuv/include/webrtc_libyuv.h"
#include "modules/video_capture/device_info_impl.h"
#include "rtc_base/logging.h"
#include "rtc_base/string_encode.h"
#include "rtc_base/string_to_number.h"
namespace webrtc {
namespace videocapturemodule {
VideoType PipeWireRawFormatToVideoType(uint32_t id) {
switch (id) {
case SPA_VIDEO_FORMAT_I420:
return VideoType::kI420;
case SPA_VIDEO_FORMAT_NV12:
return VideoType::kNV12;
case SPA_VIDEO_FORMAT_YUY2:
return VideoType::kYUY2;
case SPA_VIDEO_FORMAT_UYVY:
return VideoType::kUYVY;
case SPA_VIDEO_FORMAT_RGB:
return VideoType::kRGB24;
default:
return VideoType::kUnknown;
}
}
PipeWireNode::PipeWireNode(PipeWireSession* session,
uint32_t id,
const spa_dict* props)
: session_(session),
id_(id),
display_name_(spa_dict_lookup(props, PW_KEY_NODE_DESCRIPTION)),
unique_id_(rtc::ToString(id)) {
RTC_LOG(LS_VERBOSE) << "Found Camera: " << display_name_;
proxy_ = static_cast<pw_proxy*>(pw_registry_bind(
session_->pw_registry_, id, PW_TYPE_INTERFACE_Node, PW_VERSION_NODE, 0));
static const pw_node_events node_events{
.version = PW_VERSION_NODE_EVENTS,
.info = OnNodeInfo,
.param = OnNodeParam,
};
pw_node_add_listener(proxy_, &node_listener_, &node_events, this);
}
PipeWireNode::~PipeWireNode() {
pw_proxy_destroy(proxy_);
spa_hook_remove(&node_listener_);
}
// static
void PipeWireNode::OnNodeInfo(void* data, const pw_node_info* info) {
PipeWireNode* that = static_cast<PipeWireNode*>(data);
if (info->change_mask & PW_NODE_CHANGE_MASK_PROPS) {
const char* vid_str;
const char* pid_str;
absl::optional<int> vid;
absl::optional<int> pid;
vid_str = spa_dict_lookup(info->props, SPA_KEY_DEVICE_VENDOR_ID);
pid_str = spa_dict_lookup(info->props, SPA_KEY_DEVICE_PRODUCT_ID);
vid = vid_str ? rtc::StringToNumber<int>(vid_str) : absl::nullopt;
pid = pid_str ? rtc::StringToNumber<int>(pid_str) : absl::nullopt;
if (vid && pid) {
char model_str[10];
snprintf(model_str, sizeof(model_str), "%04x:%04x", vid.value(),
pid.value());
that->model_id_ = model_str;
}
} else if (info->change_mask & PW_NODE_CHANGE_MASK_PARAMS) {
for (uint32_t i = 0; i < info->n_params; i++) {
uint32_t id = info->params[i].id;
if (id == SPA_PARAM_EnumFormat &&
info->params[i].flags & SPA_PARAM_INFO_READ) {
pw_node_enum_params(that->proxy_, 0, id, 0, UINT32_MAX, nullptr);
break;
}
}
that->session_->PipeWireSync();
}
}
// static
void PipeWireNode::OnNodeParam(void* data,
int seq,
uint32_t id,
uint32_t index,
uint32_t next,
const spa_pod* param) {
PipeWireNode* that = static_cast<PipeWireNode*>(data);
auto* obj = reinterpret_cast<const spa_pod_object*>(param);
const spa_pod_prop* prop = nullptr;
VideoCaptureCapability cap;
spa_pod* val;
uint32_t n_items, choice;
cap.videoType = VideoType::kUnknown;
cap.maxFPS = 0;
prop = spa_pod_object_find_prop(obj, prop, SPA_FORMAT_VIDEO_framerate);
if (prop) {
val = spa_pod_get_values(&prop->value, &n_items, &choice);
if (val->type == SPA_TYPE_Fraction) {
spa_fraction* fract;
fract = static_cast<spa_fraction*>(SPA_POD_BODY(val));
if (choice == SPA_CHOICE_None)
cap.maxFPS = 1.0 * fract[0].num / fract[0].denom;
else if (choice == SPA_CHOICE_Range && fract[1].num > 0)
cap.maxFPS = 1.0 * fract[1].num / fract[1].denom;
}
}
prop = spa_pod_object_find_prop(obj, prop, SPA_FORMAT_VIDEO_size);
if (!prop)
return;
val = spa_pod_get_values(&prop->value, &n_items, &choice);
if (val->type != SPA_TYPE_Rectangle)
return;
if (choice != SPA_CHOICE_None)
return;
if (!ParseFormat(param, &cap))
return;
spa_rectangle* rect;
rect = static_cast<spa_rectangle*>(SPA_POD_BODY(val));
cap.width = rect[0].width;
cap.height = rect[0].height;
RTC_LOG(LS_VERBOSE) << "Found Format(" << that->display_name_
<< "): " << static_cast<int>(cap.videoType) << "("
<< cap.width << "x" << cap.height << "@" << cap.maxFPS
<< ")";
that->capabilities_.push_back(cap);
}
// static
bool PipeWireNode::ParseFormat(const spa_pod* param,
VideoCaptureCapability* cap) {
auto* obj = reinterpret_cast<const spa_pod_object*>(param);
uint32_t media_type, media_subtype;
if (spa_format_parse(param, &media_type, &media_subtype) < 0) {
RTC_LOG(LS_ERROR) << "Failed to parse video format.";
return false;
}
if (media_type != SPA_MEDIA_TYPE_video)
return false;
if (media_subtype == SPA_MEDIA_SUBTYPE_raw) {
const spa_pod_prop* prop = nullptr;
uint32_t n_items, choice;
spa_pod* val;
uint32_t* id;
prop = spa_pod_object_find_prop(obj, prop, SPA_FORMAT_VIDEO_format);
if (!prop)
return false;
val = spa_pod_get_values(&prop->value, &n_items, &choice);
if (val->type != SPA_TYPE_Id)
return false;
if (choice != SPA_CHOICE_None)
return false;
id = static_cast<uint32_t*>(SPA_POD_BODY(val));
cap->videoType = PipeWireRawFormatToVideoType(id[0]);
if (cap->videoType == VideoType::kUnknown) {
RTC_LOG(LS_INFO) << "Unsupported PipeWire pixel format " << id[0];
return false;
}
} else if (media_subtype == SPA_MEDIA_SUBTYPE_mjpg) {
cap->videoType = VideoType::kMJPEG;
} else {
RTC_LOG(LS_INFO) << "Unsupported PipeWire media subtype " << media_subtype;
}
return cap->videoType != VideoType::kUnknown;
}
CameraPortalNotifier::CameraPortalNotifier(PipeWireSession* session)
: session_(session) {}
void CameraPortalNotifier::OnCameraRequestResult(
xdg_portal::RequestResponse result,
int fd) {
if (result == xdg_portal::RequestResponse::kSuccess) {
session_->InitPipeWire(fd);
} else if (result == xdg_portal::RequestResponse::kUserCancelled) {
session_->Finish(VideoCaptureOptions::Status::DENIED);
} else {
session_->Finish(VideoCaptureOptions::Status::ERROR);
}
}
PipeWireSession::PipeWireSession()
: status_(VideoCaptureOptions::Status::UNINITIALIZED) {}
PipeWireSession::~PipeWireSession() {
Cleanup();
}
void PipeWireSession::Init(VideoCaptureOptions::Callback* callback, int fd) {
{
webrtc::MutexLock lock(&callback_lock_);
callback_ = callback;
}
if (fd != kInvalidPipeWireFd) {
InitPipeWire(fd);
} else {
portal_notifier_ = std::make_unique<CameraPortalNotifier>(this);
portal_ = std::make_unique<CameraPortal>(portal_notifier_.get());
portal_->Start();
}
}
void PipeWireSession::InitPipeWire(int fd) {
if (!InitializePipeWire())
Finish(VideoCaptureOptions::Status::UNAVAILABLE);
if (!StartPipeWire(fd))
Finish(VideoCaptureOptions::Status::ERROR);
}
bool PipeWireSession::StartPipeWire(int fd) {
pw_init(/*argc=*/nullptr, /*argv=*/nullptr);
pw_main_loop_ = pw_thread_loop_new("pipewire-main-loop", nullptr);
pw_context_ =
pw_context_new(pw_thread_loop_get_loop(pw_main_loop_), nullptr, 0);
if (!pw_context_) {
RTC_LOG(LS_ERROR) << "Failed to create PipeWire context";
return false;
}
pw_core_ = pw_context_connect_fd(pw_context_, fd, nullptr, 0);
if (!pw_core_) {
RTC_LOG(LS_ERROR) << "Failed to connect PipeWire context";
return false;
}
static const pw_core_events core_events{
.version = PW_VERSION_CORE_EVENTS,
.done = &OnCoreDone,
.error = &OnCoreError,
};
pw_core_add_listener(pw_core_, &core_listener_, &core_events, this);
static const pw_registry_events registry_events{
.version = PW_VERSION_REGISTRY_EVENTS,
.global = OnRegistryGlobal,
.global_remove = OnRegistryGlobalRemove,
};
pw_registry_ = pw_core_get_registry(pw_core_, PW_VERSION_REGISTRY, 0);
pw_registry_add_listener(pw_registry_, &registry_listener_, &registry_events,
this);
PipeWireSync();
if (pw_thread_loop_start(pw_main_loop_) < 0) {
RTC_LOG(LS_ERROR) << "Failed to start main PipeWire loop";
return false;
}
return true;
}
void PipeWireSession::StopPipeWire() {
if (pw_main_loop_)
pw_thread_loop_stop(pw_main_loop_);
if (pw_core_) {
pw_core_disconnect(pw_core_);
pw_core_ = nullptr;
}
if (pw_context_) {
pw_context_destroy(pw_context_);
pw_context_ = nullptr;
}
if (pw_main_loop_) {
pw_thread_loop_destroy(pw_main_loop_);
pw_main_loop_ = nullptr;
}
}
void PipeWireSession::PipeWireSync() {
sync_seq_ = pw_core_sync(pw_core_, PW_ID_CORE, sync_seq_);
}
// static
void PipeWireSession::OnCoreError(void* data,
uint32_t id,
int seq,
int res,
const char* message) {
RTC_LOG(LS_ERROR) << "PipeWire remote error: " << message;
}
// static
void PipeWireSession::OnCoreDone(void* data, uint32_t id, int seq) {
PipeWireSession* that = static_cast<PipeWireSession*>(data);
if (id == PW_ID_CORE) {
if (seq == that->sync_seq_) {
RTC_LOG(LS_VERBOSE) << "Enumerating PipeWire camera devices complete.";
that->Finish(VideoCaptureOptions::Status::SUCCESS);
}
}
}
// static
void PipeWireSession::OnRegistryGlobal(void* data,
uint32_t id,
uint32_t permissions,
const char* type,
uint32_t version,
const spa_dict* props) {
PipeWireSession* that = static_cast<PipeWireSession*>(data);
if (type != absl::string_view(PW_TYPE_INTERFACE_Node))
return;
if (!spa_dict_lookup(props, PW_KEY_NODE_DESCRIPTION))
return;
auto node_role = spa_dict_lookup(props, PW_KEY_MEDIA_ROLE);
if (!node_role || strcmp(node_role, "Camera"))
return;
that->nodes_.emplace_back(that, id, props);
that->PipeWireSync();
}
// static
void PipeWireSession::OnRegistryGlobalRemove(void* data, uint32_t id) {
PipeWireSession* that = static_cast<PipeWireSession*>(data);
for (auto it = that->nodes_.begin(); it != that->nodes().end(); ++it) {
if ((*it).id() == id) {
that->nodes_.erase(it);
break;
}
}
}
void PipeWireSession::Finish(VideoCaptureOptions::Status status) {
webrtc::MutexLock lock(&callback_lock_);
if (callback_) {
callback_->OnInitialized(status);
callback_ = nullptr;
}
}
void PipeWireSession::Cleanup() {
webrtc::MutexLock lock(&callback_lock_);
callback_ = nullptr;
StopPipeWire();
}
} // namespace videocapturemodule
} // namespace webrtc

View file

@ -0,0 +1,145 @@
/*
* Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_VIDEO_CAPTURE_LINUX_PIPEWIRE_SESSION_H_
#define MODULES_VIDEO_CAPTURE_LINUX_PIPEWIRE_SESSION_H_
#include <pipewire/core.h>
#include <pipewire/pipewire.h>
#include <deque>
#include <string>
#include <vector>
#include "api/ref_counted_base.h"
#include "api/scoped_refptr.h"
#include "modules/portal/pipewire_utils.h"
#include "modules/video_capture/linux/camera_portal.h"
#include "modules/video_capture/video_capture.h"
#include "modules/video_capture/video_capture_options.h"
#include "rtc_base/synchronization/mutex.h"
namespace webrtc {
namespace videocapturemodule {
class PipeWireSession;
class VideoCaptureModulePipeWire;
// PipeWireNode objects are the local representation of PipeWire node objects.
// The portal API ensured that only camera nodes are visible to the client.
// So they all represent one camera that is available via PipeWire.
class PipeWireNode {
public:
PipeWireNode(PipeWireSession* session, uint32_t id, const spa_dict* props);
~PipeWireNode();
uint32_t id() const { return id_; }
std::string display_name() const { return display_name_; }
std::string unique_id() const { return unique_id_; }
std::string model_id() const { return model_id_; }
std::vector<VideoCaptureCapability> capabilities() const {
return capabilities_;
}
private:
static void OnNodeInfo(void* data, const pw_node_info* info);
static void OnNodeParam(void* data,
int seq,
uint32_t id,
uint32_t index,
uint32_t next,
const spa_pod* param);
static bool ParseFormat(const spa_pod* param, VideoCaptureCapability* cap);
pw_proxy* proxy_;
spa_hook node_listener_;
PipeWireSession* session_;
uint32_t id_;
std::string display_name_;
std::string unique_id_;
std::string model_id_;
std::vector<VideoCaptureCapability> capabilities_;
};
class CameraPortalNotifier : public CameraPortal::PortalNotifier {
public:
CameraPortalNotifier(PipeWireSession* session);
~CameraPortalNotifier() = default;
void OnCameraRequestResult(xdg_portal::RequestResponse result,
int fd) override;
private:
PipeWireSession* session_;
};
class PipeWireSession : public rtc::RefCountedNonVirtual<PipeWireSession> {
public:
PipeWireSession();
~PipeWireSession();
void Init(VideoCaptureOptions::Callback* callback,
int fd = kInvalidPipeWireFd);
const std::deque<PipeWireNode>& nodes() const { return nodes_; }
friend class CameraPortalNotifier;
friend class PipeWireNode;
friend class VideoCaptureModulePipeWire;
private:
void InitPipeWire(int fd);
bool StartPipeWire(int fd);
void StopPipeWire();
void PipeWireSync();
static void OnCoreError(void* data,
uint32_t id,
int seq,
int res,
const char* message);
static void OnCoreDone(void* data, uint32_t id, int seq);
static void OnRegistryGlobal(void* data,
uint32_t id,
uint32_t permissions,
const char* type,
uint32_t version,
const spa_dict* props);
static void OnRegistryGlobalRemove(void* data, uint32_t id);
void Finish(VideoCaptureOptions::Status status);
void Cleanup();
webrtc::Mutex callback_lock_;
VideoCaptureOptions::Callback* callback_ RTC_GUARDED_BY(&callback_lock_) =
nullptr;
VideoCaptureOptions::Status status_;
struct pw_thread_loop* pw_main_loop_ = nullptr;
struct pw_context* pw_context_ = nullptr;
struct pw_core* pw_core_ = nullptr;
struct spa_hook core_listener_;
struct pw_registry* pw_registry_ = nullptr;
struct spa_hook registry_listener_;
int sync_seq_ = 0;
std::deque<PipeWireNode> nodes_;
std::unique_ptr<CameraPortal> portal_;
std::unique_ptr<CameraPortalNotifier> portal_notifier_;
};
} // namespace videocapturemodule
} // namespace webrtc
#endif // MODULES_VIDEO_CAPTURE_LINUX_PIPEWIRE_SESSION_H_

View file

@ -0,0 +1,68 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <errno.h>
#include <fcntl.h>
#include <linux/videodev2.h>
#include <stdio.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/select.h>
#include <time.h>
#include <unistd.h>
#include <new>
#include <string>
#include "api/scoped_refptr.h"
#include "media/base/video_common.h"
#if defined(WEBRTC_USE_PIPEWIRE)
#include "modules/video_capture/linux/video_capture_pipewire.h"
#endif
#include "modules/video_capture/linux/video_capture_v4l2.h"
#include "modules/video_capture/video_capture.h"
#include "modules/video_capture/video_capture_options.h"
#include "rtc_base/logging.h"
namespace webrtc {
namespace videocapturemodule {
rtc::scoped_refptr<VideoCaptureModule> VideoCaptureImpl::Create(
const char* deviceUniqueId) {
auto implementation = rtc::make_ref_counted<VideoCaptureModuleV4L2>();
if (implementation->Init(deviceUniqueId) != 0)
return nullptr;
return implementation;
}
rtc::scoped_refptr<VideoCaptureModule> VideoCaptureImpl::Create(
VideoCaptureOptions* options,
const char* deviceUniqueId) {
#if defined(WEBRTC_USE_PIPEWIRE)
if (options->allow_pipewire()) {
auto implementation =
rtc::make_ref_counted<VideoCaptureModulePipeWire>(options);
if (implementation->Init(deviceUniqueId) == 0)
return implementation;
}
#endif
if (options->allow_v4l2()) {
auto implementation = rtc::make_ref_counted<VideoCaptureModuleV4L2>();
if (implementation->Init(deviceUniqueId) == 0)
return implementation;
}
return nullptr;
}
} // namespace videocapturemodule
} // namespace webrtc

View file

@ -0,0 +1,414 @@
/*
* Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/video_capture/linux/video_capture_pipewire.h"
#include <spa/param/format.h>
#include <spa/param/video/format-utils.h>
#include <spa/pod/builder.h>
#include <spa/utils/result.h>
#include <vector>
#include "common_video/libyuv/include/webrtc_libyuv.h"
#include "modules/portal/pipewire_utils.h"
#include "rtc_base/logging.h"
#include "rtc_base/string_to_number.h"
namespace webrtc {
namespace videocapturemodule {
struct {
uint32_t spa_format;
VideoType video_type;
} constexpr kSupportedFormats[] = {
{SPA_VIDEO_FORMAT_I420, VideoType::kI420},
{SPA_VIDEO_FORMAT_NV12, VideoType::kNV12},
{SPA_VIDEO_FORMAT_YUY2, VideoType::kYUY2},
{SPA_VIDEO_FORMAT_UYVY, VideoType::kUYVY},
{SPA_VIDEO_FORMAT_RGB, VideoType::kRGB24},
};
VideoType VideoCaptureModulePipeWire::PipeWireRawFormatToVideoType(
uint32_t spa_format) {
for (const auto& spa_and_pixel_format : kSupportedFormats) {
if (spa_and_pixel_format.spa_format == spa_format)
return spa_and_pixel_format.video_type;
}
RTC_LOG(LS_INFO) << "Unsupported pixel format: " << spa_format;
return VideoType::kUnknown;
}
VideoCaptureModulePipeWire::VideoCaptureModulePipeWire(
VideoCaptureOptions* options)
: VideoCaptureImpl(),
session_(options->pipewire_session()),
initialized_(false),
started_(false) {}
VideoCaptureModulePipeWire::~VideoCaptureModulePipeWire() {
RTC_DCHECK_RUN_ON(&api_checker_);
StopCapture();
}
int32_t VideoCaptureModulePipeWire::Init(const char* deviceUniqueId) {
RTC_CHECK_RUNS_SERIALIZED(&capture_checker_);
RTC_DCHECK_RUN_ON(&api_checker_);
absl::optional<int> id;
id = rtc::StringToNumber<int>(deviceUniqueId);
if (id == absl::nullopt)
return -1;
node_id_ = id.value();
const int len = strlen(deviceUniqueId);
_deviceUniqueId = new (std::nothrow) char[len + 1];
memcpy(_deviceUniqueId, deviceUniqueId, len + 1);
return 0;
}
static spa_pod* BuildFormat(spa_pod_builder* builder,
uint32_t format,
uint32_t width,
uint32_t height,
float frame_rate) {
spa_pod_frame frames[2];
spa_pod_builder_push_object(builder, &frames[0], SPA_TYPE_OBJECT_Format,
SPA_PARAM_EnumFormat);
spa_pod_builder_add(builder, SPA_FORMAT_mediaType,
SPA_POD_Id(SPA_MEDIA_TYPE_video), SPA_FORMAT_mediaSubtype,
SPA_POD_Id(format), 0);
if (format == SPA_MEDIA_SUBTYPE_raw) {
spa_pod_builder_prop(builder, SPA_FORMAT_VIDEO_format, 0);
spa_pod_builder_push_choice(builder, &frames[1], SPA_CHOICE_Enum, 0);
spa_pod_builder_id(builder, kSupportedFormats[0].spa_format);
for (const auto& spa_and_pixel_format : kSupportedFormats)
spa_pod_builder_id(builder, spa_and_pixel_format.spa_format);
spa_pod_builder_pop(builder, &frames[1]);
}
spa_rectangle preferred_size = spa_rectangle{width, height};
spa_rectangle min_size = spa_rectangle{1, 1};
spa_rectangle max_size = spa_rectangle{4096, 4096};
spa_pod_builder_add(
builder, SPA_FORMAT_VIDEO_size,
SPA_POD_CHOICE_RANGE_Rectangle(&preferred_size, &min_size, &max_size), 0);
spa_fraction preferred_frame_rate =
spa_fraction{static_cast<uint32_t>(frame_rate), 1};
spa_fraction min_frame_rate = spa_fraction{0, 1};
spa_fraction max_frame_rate = spa_fraction{INT32_MAX, 1};
spa_pod_builder_add(
builder, SPA_FORMAT_VIDEO_framerate,
SPA_POD_CHOICE_RANGE_Fraction(&preferred_frame_rate, &min_frame_rate,
&max_frame_rate),
0);
return static_cast<spa_pod*>(spa_pod_builder_pop(builder, &frames[0]));
}
int32_t VideoCaptureModulePipeWire::StartCapture(
const VideoCaptureCapability& capability) {
RTC_DCHECK_RUN_ON(&api_checker_);
if (initialized_) {
if (capability == _requestedCapability) {
return 0;
} else {
StopCapture();
}
}
uint8_t buffer[1024] = {};
// We don't want members above to be guarded by capture_checker_ as
// it's meant to be for members that are accessed on the API thread
// only when we are not capturing. The code above can be called many
// times while sharing instance of VideoCapturePipeWire between
// websites and therefore it would not follow the requirements of this
// checker.
RTC_CHECK_RUNS_SERIALIZED(&capture_checker_);
PipeWireThreadLoopLock thread_loop_lock(session_->pw_main_loop_);
RTC_LOG(LS_VERBOSE) << "Creating new PipeWire stream for node " << node_id_;
pw_properties* reuse_props =
pw_properties_new_string("pipewire.client.reuse=1");
stream_ = pw_stream_new(session_->pw_core_, "camera-stream", reuse_props);
if (!stream_) {
RTC_LOG(LS_ERROR) << "Failed to create camera stream!";
return -1;
}
static const pw_stream_events stream_events{
.version = PW_VERSION_STREAM_EVENTS,
.state_changed = &OnStreamStateChanged,
.param_changed = &OnStreamParamChanged,
.process = &OnStreamProcess,
};
pw_stream_add_listener(stream_, &stream_listener_, &stream_events, this);
spa_pod_builder builder = spa_pod_builder{buffer, sizeof(buffer)};
std::vector<const spa_pod*> params;
uint32_t width = capability.width;
uint32_t height = capability.height;
uint32_t frame_rate = capability.maxFPS;
bool prefer_jpeg = (width > 640) || (height > 480);
params.push_back(
BuildFormat(&builder, SPA_MEDIA_SUBTYPE_raw, width, height, frame_rate));
params.insert(
prefer_jpeg ? params.begin() : params.end(),
BuildFormat(&builder, SPA_MEDIA_SUBTYPE_mjpg, width, height, frame_rate));
int res = pw_stream_connect(
stream_, PW_DIRECTION_INPUT, node_id_,
static_cast<enum pw_stream_flags>(PW_STREAM_FLAG_AUTOCONNECT |
PW_STREAM_FLAG_DONT_RECONNECT |
PW_STREAM_FLAG_MAP_BUFFERS),
params.data(), params.size());
if (res != 0) {
RTC_LOG(LS_ERROR) << "Could not connect to camera stream: "
<< spa_strerror(res);
return -1;
}
_requestedCapability = capability;
initialized_ = true;
return 0;
}
int32_t VideoCaptureModulePipeWire::StopCapture() {
RTC_DCHECK_RUN_ON(&api_checker_);
PipeWireThreadLoopLock thread_loop_lock(session_->pw_main_loop_);
// PipeWireSession is guarded by API checker so just make sure we do
// race detection when the PipeWire loop is locked/stopped to not run
// any callback at this point.
RTC_CHECK_RUNS_SERIALIZED(&capture_checker_);
if (stream_) {
pw_stream_destroy(stream_);
stream_ = nullptr;
}
_requestedCapability = VideoCaptureCapability();
return 0;
}
bool VideoCaptureModulePipeWire::CaptureStarted() {
RTC_DCHECK_RUN_ON(&api_checker_);
MutexLock lock(&api_lock_);
return started_;
}
int32_t VideoCaptureModulePipeWire::CaptureSettings(
VideoCaptureCapability& settings) {
RTC_DCHECK_RUN_ON(&api_checker_);
settings = _requestedCapability;
return 0;
}
void VideoCaptureModulePipeWire::OnStreamParamChanged(
void* data,
uint32_t id,
const struct spa_pod* format) {
VideoCaptureModulePipeWire* that =
static_cast<VideoCaptureModulePipeWire*>(data);
RTC_DCHECK(that);
RTC_CHECK_RUNS_SERIALIZED(&that->capture_checker_);
if (format && id == SPA_PARAM_Format)
that->OnFormatChanged(format);
}
void VideoCaptureModulePipeWire::OnFormatChanged(const struct spa_pod* format) {
RTC_CHECK_RUNS_SERIALIZED(&capture_checker_);
uint32_t media_type, media_subtype;
if (spa_format_parse(format, &media_type, &media_subtype) < 0) {
RTC_LOG(LS_ERROR) << "Failed to parse video format.";
return;
}
switch (media_subtype) {
case SPA_MEDIA_SUBTYPE_raw: {
struct spa_video_info_raw f;
spa_format_video_raw_parse(format, &f);
configured_capability_.width = f.size.width;
configured_capability_.height = f.size.height;
configured_capability_.videoType = PipeWireRawFormatToVideoType(f.format);
configured_capability_.maxFPS = f.framerate.num / f.framerate.denom;
break;
}
case SPA_MEDIA_SUBTYPE_mjpg: {
struct spa_video_info_mjpg f;
spa_format_video_mjpg_parse(format, &f);
configured_capability_.width = f.size.width;
configured_capability_.height = f.size.height;
configured_capability_.videoType = VideoType::kMJPEG;
configured_capability_.maxFPS = f.framerate.num / f.framerate.denom;
break;
}
default:
configured_capability_.videoType = VideoType::kUnknown;
}
if (configured_capability_.videoType == VideoType::kUnknown) {
RTC_LOG(LS_ERROR) << "Unsupported video format.";
return;
}
RTC_LOG(LS_VERBOSE) << "Configured capture format = "
<< static_cast<int>(configured_capability_.videoType);
uint8_t buffer[1024] = {};
auto builder = spa_pod_builder{buffer, sizeof(buffer)};
// Setup buffers and meta header for new format.
std::vector<const spa_pod*> params;
spa_pod_frame frame;
spa_pod_builder_push_object(&builder, &frame, SPA_TYPE_OBJECT_ParamBuffers,
SPA_PARAM_Buffers);
if (media_subtype == SPA_MEDIA_SUBTYPE_raw) {
// Enforce stride without padding.
size_t stride;
switch (configured_capability_.videoType) {
case VideoType::kI420:
case VideoType::kNV12:
stride = configured_capability_.width;
break;
case VideoType::kYUY2:
case VideoType::kUYVY:
stride = configured_capability_.width * 2;
break;
case VideoType::kRGB24:
stride = configured_capability_.width * 3;
break;
default:
RTC_LOG(LS_ERROR) << "Unsupported video format.";
return;
}
spa_pod_builder_add(&builder, SPA_PARAM_BUFFERS_stride, SPA_POD_Int(stride),
0);
}
spa_pod_builder_add(
&builder, SPA_PARAM_BUFFERS_buffers, SPA_POD_CHOICE_RANGE_Int(8, 1, 32),
SPA_PARAM_BUFFERS_dataType,
SPA_POD_CHOICE_FLAGS_Int((1 << SPA_DATA_MemFd) | (1 << SPA_DATA_MemPtr)),
0);
params.push_back(
static_cast<spa_pod*>(spa_pod_builder_pop(&builder, &frame)));
params.push_back(reinterpret_cast<spa_pod*>(spa_pod_builder_add_object(
&builder, SPA_TYPE_OBJECT_ParamMeta, SPA_PARAM_Meta, SPA_PARAM_META_type,
SPA_POD_Id(SPA_META_Header), SPA_PARAM_META_size,
SPA_POD_Int(sizeof(struct spa_meta_header)))));
params.push_back(reinterpret_cast<spa_pod*>(spa_pod_builder_add_object(
&builder, SPA_TYPE_OBJECT_ParamMeta, SPA_PARAM_Meta, SPA_PARAM_META_type,
SPA_POD_Id(SPA_META_VideoTransform), SPA_PARAM_META_size,
SPA_POD_Int(sizeof(struct spa_meta_videotransform)))));
pw_stream_update_params(stream_, params.data(), params.size());
}
void VideoCaptureModulePipeWire::OnStreamStateChanged(
void* data,
pw_stream_state old_state,
pw_stream_state state,
const char* error_message) {
VideoCaptureModulePipeWire* that =
static_cast<VideoCaptureModulePipeWire*>(data);
RTC_DCHECK(that);
MutexLock lock(&that->api_lock_);
switch (state) {
case PW_STREAM_STATE_STREAMING:
that->started_ = true;
break;
case PW_STREAM_STATE_ERROR:
RTC_LOG(LS_ERROR) << "PipeWire stream state error: " << error_message;
[[fallthrough]];
case PW_STREAM_STATE_PAUSED:
case PW_STREAM_STATE_UNCONNECTED:
case PW_STREAM_STATE_CONNECTING:
that->started_ = false;
break;
}
RTC_LOG(LS_VERBOSE) << "PipeWire stream state change: "
<< pw_stream_state_as_string(old_state) << " -> "
<< pw_stream_state_as_string(state);
}
void VideoCaptureModulePipeWire::OnStreamProcess(void* data) {
VideoCaptureModulePipeWire* that =
static_cast<VideoCaptureModulePipeWire*>(data);
RTC_DCHECK(that);
RTC_CHECK_RUNS_SERIALIZED(&that->capture_checker_);
that->ProcessBuffers();
}
static VideoRotation VideorotationFromPipeWireTransform(uint32_t transform) {
switch (transform) {
case SPA_META_TRANSFORMATION_90:
return kVideoRotation_90;
case SPA_META_TRANSFORMATION_180:
return kVideoRotation_180;
case SPA_META_TRANSFORMATION_270:
return kVideoRotation_270;
default:
return kVideoRotation_0;
}
}
void VideoCaptureModulePipeWire::ProcessBuffers() {
RTC_CHECK_RUNS_SERIALIZED(&capture_checker_);
while (pw_buffer* buffer = pw_stream_dequeue_buffer(stream_)) {
struct spa_meta_header* h;
h = static_cast<struct spa_meta_header*>(
spa_buffer_find_meta_data(buffer->buffer, SPA_META_Header, sizeof(*h)));
struct spa_meta_videotransform* videotransform;
videotransform =
static_cast<struct spa_meta_videotransform*>(spa_buffer_find_meta_data(
buffer->buffer, SPA_META_VideoTransform, sizeof(*videotransform)));
if (videotransform) {
VideoRotation rotation =
VideorotationFromPipeWireTransform(videotransform->transform);
SetCaptureRotation(rotation);
SetApplyRotation(rotation != kVideoRotation_0);
}
if (h->flags & SPA_META_HEADER_FLAG_CORRUPTED) {
RTC_LOG(LS_INFO) << "Dropping corruped frame.";
} else {
IncomingFrame(static_cast<unsigned char*>(buffer->buffer->datas[0].data),
buffer->buffer->datas[0].chunk->size,
configured_capability_);
}
pw_stream_queue_buffer(stream_, buffer);
}
}
} // namespace videocapturemodule
} // namespace webrtc

View file

@ -0,0 +1,60 @@
/*
* Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_VIDEO_CAPTURE_LINUX_VIDEO_CAPTURE_PIPEWIRE_H_
#define MODULES_VIDEO_CAPTURE_LINUX_VIDEO_CAPTURE_PIPEWIRE_H_
#include "modules/video_capture/linux/pipewire_session.h"
#include "modules/video_capture/video_capture_defines.h"
#include "modules/video_capture/video_capture_impl.h"
namespace webrtc {
namespace videocapturemodule {
class VideoCaptureModulePipeWire : public VideoCaptureImpl {
public:
explicit VideoCaptureModulePipeWire(VideoCaptureOptions* options);
~VideoCaptureModulePipeWire() override;
int32_t Init(const char* deviceUniqueId);
int32_t StartCapture(const VideoCaptureCapability& capability) override;
int32_t StopCapture() override;
bool CaptureStarted() override;
int32_t CaptureSettings(VideoCaptureCapability& settings) override;
static VideoType PipeWireRawFormatToVideoType(uint32_t format);
private:
static void OnStreamParamChanged(void* data,
uint32_t id,
const struct spa_pod* format);
static void OnStreamStateChanged(void* data,
pw_stream_state old_state,
pw_stream_state state,
const char* error_message);
static void OnStreamProcess(void* data);
void OnFormatChanged(const struct spa_pod* format);
void ProcessBuffers();
const rtc::scoped_refptr<PipeWireSession> session_
RTC_GUARDED_BY(api_checker_);
bool initialized_ RTC_GUARDED_BY(api_checker_);
bool started_ RTC_GUARDED_BY(api_lock_);
int node_id_ RTC_GUARDED_BY(capture_checker_);
VideoCaptureCapability configured_capability_
RTC_GUARDED_BY(capture_checker_);
struct pw_stream* stream_ RTC_GUARDED_BY(capture_checker_) = nullptr;
struct spa_hook stream_listener_ RTC_GUARDED_BY(capture_checker_);
};
} // namespace videocapturemodule
} // namespace webrtc
#endif // MODULES_VIDEO_CAPTURE_LINUX_VIDEO_CAPTURE_PIPEWIRE_H_

View file

@ -0,0 +1,496 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/video_capture/linux/video_capture_v4l2.h"
#include <errno.h>
#include <fcntl.h>
#include <linux/videodev2.h>
#include <stdio.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/select.h>
#include <time.h>
#include <unistd.h>
#include <new>
#include <string>
#include "api/scoped_refptr.h"
#include "media/base/video_common.h"
#include "modules/video_capture/video_capture.h"
#include "rtc_base/logging.h"
// These defines are here to support building on kernel 3.16 which some
// downstream projects, e.g. Firefox, use.
// TODO(apehrson): Remove them and their undefs when no longer needed.
#ifndef V4L2_PIX_FMT_ABGR32
#define ABGR32_OVERRIDE 1
#define V4L2_PIX_FMT_ABGR32 v4l2_fourcc('A', 'R', '2', '4')
#endif
#ifndef V4L2_PIX_FMT_ARGB32
#define ARGB32_OVERRIDE 1
#define V4L2_PIX_FMT_ARGB32 v4l2_fourcc('B', 'A', '2', '4')
#endif
#ifndef V4L2_PIX_FMT_RGBA32
#define RGBA32_OVERRIDE 1
#define V4L2_PIX_FMT_RGBA32 v4l2_fourcc('A', 'B', '2', '4')
#endif
namespace webrtc {
namespace videocapturemodule {
VideoCaptureModuleV4L2::VideoCaptureModuleV4L2()
: VideoCaptureImpl(),
_deviceId(-1),
_deviceFd(-1),
_buffersAllocatedByDevice(-1),
_captureStarted(false),
_pool(NULL) {}
int32_t VideoCaptureModuleV4L2::Init(const char* deviceUniqueIdUTF8) {
RTC_DCHECK_RUN_ON(&api_checker_);
int len = strlen((const char*)deviceUniqueIdUTF8);
_deviceUniqueId = new (std::nothrow) char[len + 1];
if (_deviceUniqueId) {
memcpy(_deviceUniqueId, deviceUniqueIdUTF8, len + 1);
}
int fd;
char device[32];
bool found = false;
/* detect /dev/video [0-63] entries */
int n;
for (n = 0; n < 64; n++) {
snprintf(device, sizeof(device), "/dev/video%d", n);
if ((fd = open(device, O_RDONLY)) != -1) {
// query device capabilities
struct v4l2_capability cap;
if (ioctl(fd, VIDIOC_QUERYCAP, &cap) == 0) {
if (cap.bus_info[0] != 0) {
if (strncmp((const char*)cap.bus_info,
(const char*)deviceUniqueIdUTF8,
strlen((const char*)deviceUniqueIdUTF8)) ==
0) { // match with device id
close(fd);
found = true;
break; // fd matches with device unique id supplied
}
}
}
close(fd); // close since this is not the matching device
}
}
if (!found) {
RTC_LOG(LS_INFO) << "no matching device found";
return -1;
}
_deviceId = n; // store the device id
return 0;
}
VideoCaptureModuleV4L2::~VideoCaptureModuleV4L2() {
RTC_DCHECK_RUN_ON(&api_checker_);
RTC_CHECK_RUNS_SERIALIZED(&capture_checker_);
StopCapture();
if (_deviceFd != -1)
close(_deviceFd);
}
int32_t VideoCaptureModuleV4L2::StartCapture(
const VideoCaptureCapability& capability) {
RTC_DCHECK_RUN_ON(&api_checker_);
if (_captureStarted) {
if (capability == _requestedCapability) {
return 0;
} else {
StopCapture();
}
}
// We don't want members above to be guarded by capture_checker_ as
// it's meant to be for members that are accessed on the API thread
// only when we are not capturing. The code above can be called many
// times while sharing instance of VideoCaptureV4L2 between websites
// and therefore it would not follow the requirements of this checker.
RTC_CHECK_RUNS_SERIALIZED(&capture_checker_);
// Set a baseline of configured parameters. It is updated here during
// configuration, then read from the capture thread.
configured_capability_ = capability;
MutexLock lock(&capture_lock_);
// first open /dev/video device
char device[20];
snprintf(device, sizeof(device), "/dev/video%d", _deviceId);
if ((_deviceFd = open(device, O_RDWR | O_NONBLOCK, 0)) < 0) {
RTC_LOG(LS_INFO) << "error in opening " << device << " errono = " << errno;
return -1;
}
// Supported video formats in preferred order.
// If the requested resolution is larger than VGA, we prefer MJPEG. Go for
// I420 otherwise.
unsigned int hdFmts[] = {
V4L2_PIX_FMT_MJPEG, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_YVU420,
V4L2_PIX_FMT_YUYV, V4L2_PIX_FMT_UYVY, V4L2_PIX_FMT_NV12,
V4L2_PIX_FMT_ABGR32, V4L2_PIX_FMT_ARGB32, V4L2_PIX_FMT_RGBA32,
V4L2_PIX_FMT_BGR32, V4L2_PIX_FMT_RGB32, V4L2_PIX_FMT_BGR24,
V4L2_PIX_FMT_RGB24, V4L2_PIX_FMT_RGB565, V4L2_PIX_FMT_JPEG,
};
unsigned int sdFmts[] = {
V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_YVU420, V4L2_PIX_FMT_YUYV,
V4L2_PIX_FMT_UYVY, V4L2_PIX_FMT_NV12, V4L2_PIX_FMT_ABGR32,
V4L2_PIX_FMT_ARGB32, V4L2_PIX_FMT_RGBA32, V4L2_PIX_FMT_BGR32,
V4L2_PIX_FMT_RGB32, V4L2_PIX_FMT_BGR24, V4L2_PIX_FMT_RGB24,
V4L2_PIX_FMT_RGB565, V4L2_PIX_FMT_MJPEG, V4L2_PIX_FMT_JPEG,
};
const bool isHd = capability.width > 640 || capability.height > 480;
unsigned int* fmts = isHd ? hdFmts : sdFmts;
static_assert(sizeof(hdFmts) == sizeof(sdFmts));
constexpr int nFormats = sizeof(hdFmts) / sizeof(unsigned int);
// Enumerate image formats.
struct v4l2_fmtdesc fmt;
int fmtsIdx = nFormats;
memset(&fmt, 0, sizeof(fmt));
fmt.index = 0;
fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
RTC_LOG(LS_INFO) << "Video Capture enumerats supported image formats:";
while (ioctl(_deviceFd, VIDIOC_ENUM_FMT, &fmt) == 0) {
RTC_LOG(LS_INFO) << " { pixelformat = "
<< cricket::GetFourccName(fmt.pixelformat)
<< ", description = '" << fmt.description << "' }";
// Match the preferred order.
for (int i = 0; i < nFormats; i++) {
if (fmt.pixelformat == fmts[i] && i < fmtsIdx)
fmtsIdx = i;
}
// Keep enumerating.
fmt.index++;
}
if (fmtsIdx == nFormats) {
RTC_LOG(LS_INFO) << "no supporting video formats found";
return -1;
} else {
RTC_LOG(LS_INFO) << "We prefer format "
<< cricket::GetFourccName(fmts[fmtsIdx]);
}
struct v4l2_format video_fmt;
memset(&video_fmt, 0, sizeof(struct v4l2_format));
video_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
video_fmt.fmt.pix.sizeimage = 0;
video_fmt.fmt.pix.width = capability.width;
video_fmt.fmt.pix.height = capability.height;
video_fmt.fmt.pix.pixelformat = fmts[fmtsIdx];
if (video_fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_YUYV)
configured_capability_.videoType = VideoType::kYUY2;
else if (video_fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_YUV420)
configured_capability_.videoType = VideoType::kI420;
else if (video_fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_YVU420)
configured_capability_.videoType = VideoType::kYV12;
else if (video_fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_UYVY)
configured_capability_.videoType = VideoType::kUYVY;
else if (video_fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_NV12)
configured_capability_.videoType = VideoType::kNV12;
else if (video_fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_BGR24)
configured_capability_.videoType = VideoType::kRGB24;
else if (video_fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_RGB24)
configured_capability_.videoType = VideoType::kBGR24;
else if (video_fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_RGB565)
configured_capability_.videoType = VideoType::kRGB565;
else if (video_fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_ABGR32 ||
video_fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_BGR32)
configured_capability_.videoType = VideoType::kARGB;
else if (video_fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_ARGB32 ||
video_fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_RGB32)
configured_capability_.videoType = VideoType::kBGRA;
else if (video_fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_RGBA32)
configured_capability_.videoType = VideoType::kABGR;
else if (video_fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_MJPEG ||
video_fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_JPEG)
configured_capability_.videoType = VideoType::kMJPEG;
else
RTC_DCHECK_NOTREACHED();
// set format and frame size now
if (ioctl(_deviceFd, VIDIOC_S_FMT, &video_fmt) < 0) {
RTC_LOG(LS_INFO) << "error in VIDIOC_S_FMT, errno = " << errno;
return -1;
}
// initialize current width and height
configured_capability_.width = video_fmt.fmt.pix.width;
configured_capability_.height = video_fmt.fmt.pix.height;
// Trying to set frame rate, before check driver capability.
bool driver_framerate_support = true;
struct v4l2_streamparm streamparms;
memset(&streamparms, 0, sizeof(streamparms));
streamparms.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (ioctl(_deviceFd, VIDIOC_G_PARM, &streamparms) < 0) {
RTC_LOG(LS_INFO) << "error in VIDIOC_G_PARM errno = " << errno;
driver_framerate_support = false;
// continue
} else {
// check the capability flag is set to V4L2_CAP_TIMEPERFRAME.
if (streamparms.parm.capture.capability & V4L2_CAP_TIMEPERFRAME) {
// driver supports the feature. Set required framerate.
memset(&streamparms, 0, sizeof(streamparms));
streamparms.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
streamparms.parm.capture.timeperframe.numerator = 1;
streamparms.parm.capture.timeperframe.denominator = capability.maxFPS;
if (ioctl(_deviceFd, VIDIOC_S_PARM, &streamparms) < 0) {
RTC_LOG(LS_INFO) << "Failed to set the framerate. errno=" << errno;
driver_framerate_support = false;
}
}
}
// If driver doesn't support framerate control, need to hardcode.
// Hardcoding the value based on the frame size.
if (!driver_framerate_support) {
if (configured_capability_.width >= 800 &&
configured_capability_.videoType != VideoType::kMJPEG) {
configured_capability_.maxFPS = 15;
} else {
configured_capability_.maxFPS = 30;
}
}
if (!AllocateVideoBuffers()) {
RTC_LOG(LS_INFO) << "failed to allocate video capture buffers";
return -1;
}
// Needed to start UVC camera - from the uvcview application
enum v4l2_buf_type type;
type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (ioctl(_deviceFd, VIDIOC_STREAMON, &type) == -1) {
RTC_LOG(LS_INFO) << "Failed to turn on stream";
return -1;
}
_requestedCapability = capability;
_captureStarted = true;
_streaming = true;
// start capture thread;
if (_captureThread.empty()) {
quit_ = false;
_captureThread = rtc::PlatformThread::SpawnJoinable(
[this] {
while (CaptureProcess()) {
}
},
"CaptureThread",
rtc::ThreadAttributes().SetPriority(rtc::ThreadPriority::kHigh));
}
return 0;
}
int32_t VideoCaptureModuleV4L2::StopCapture() {
RTC_DCHECK_RUN_ON(&api_checker_);
if (!_captureThread.empty()) {
{
MutexLock lock(&capture_lock_);
quit_ = true;
}
// Make sure the capture thread stops using the mutex.
_captureThread.Finalize();
}
_captureStarted = false;
RTC_CHECK_RUNS_SERIALIZED(&capture_checker_);
MutexLock lock(&capture_lock_);
if (_streaming) {
_streaming = false;
DeAllocateVideoBuffers();
close(_deviceFd);
_deviceFd = -1;
_requestedCapability = configured_capability_ = VideoCaptureCapability();
}
return 0;
}
// critical section protected by the caller
bool VideoCaptureModuleV4L2::AllocateVideoBuffers() {
RTC_CHECK_RUNS_SERIALIZED(&capture_checker_);
struct v4l2_requestbuffers rbuffer;
memset(&rbuffer, 0, sizeof(v4l2_requestbuffers));
rbuffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
rbuffer.memory = V4L2_MEMORY_MMAP;
rbuffer.count = kNoOfV4L2Bufffers;
if (ioctl(_deviceFd, VIDIOC_REQBUFS, &rbuffer) < 0) {
RTC_LOG(LS_INFO) << "Could not get buffers from device. errno = " << errno;
return false;
}
if (rbuffer.count > kNoOfV4L2Bufffers)
rbuffer.count = kNoOfV4L2Bufffers;
_buffersAllocatedByDevice = rbuffer.count;
// Map the buffers
_pool = new Buffer[rbuffer.count];
for (unsigned int i = 0; i < rbuffer.count; i++) {
struct v4l2_buffer buffer;
memset(&buffer, 0, sizeof(v4l2_buffer));
buffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buffer.memory = V4L2_MEMORY_MMAP;
buffer.index = i;
if (ioctl(_deviceFd, VIDIOC_QUERYBUF, &buffer) < 0) {
return false;
}
_pool[i].start = mmap(NULL, buffer.length, PROT_READ | PROT_WRITE,
MAP_SHARED, _deviceFd, buffer.m.offset);
if (MAP_FAILED == _pool[i].start) {
for (unsigned int j = 0; j < i; j++)
munmap(_pool[j].start, _pool[j].length);
return false;
}
_pool[i].length = buffer.length;
if (ioctl(_deviceFd, VIDIOC_QBUF, &buffer) < 0) {
return false;
}
}
return true;
}
bool VideoCaptureModuleV4L2::DeAllocateVideoBuffers() {
RTC_CHECK_RUNS_SERIALIZED(&capture_checker_);
// unmap buffers
for (int i = 0; i < _buffersAllocatedByDevice; i++)
munmap(_pool[i].start, _pool[i].length);
delete[] _pool;
// turn off stream
enum v4l2_buf_type type;
type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (ioctl(_deviceFd, VIDIOC_STREAMOFF, &type) < 0) {
RTC_LOG(LS_INFO) << "VIDIOC_STREAMOFF error. errno: " << errno;
}
return true;
}
bool VideoCaptureModuleV4L2::CaptureStarted() {
RTC_DCHECK_RUN_ON(&api_checker_);
return _captureStarted;
}
bool VideoCaptureModuleV4L2::CaptureProcess() {
RTC_CHECK_RUNS_SERIALIZED(&capture_checker_);
int retVal = 0;
fd_set rSet;
struct timeval timeout;
FD_ZERO(&rSet);
FD_SET(_deviceFd, &rSet);
timeout.tv_sec = 1;
timeout.tv_usec = 0;
// _deviceFd written only in StartCapture, when this thread isn't running.
retVal = select(_deviceFd + 1, &rSet, NULL, NULL, &timeout);
{
MutexLock lock(&capture_lock_);
if (quit_) {
return false;
}
if (retVal < 0 && errno != EINTR) { // continue if interrupted
// select failed
return false;
} else if (retVal == 0) {
// select timed out
return true;
} else if (!FD_ISSET(_deviceFd, &rSet)) {
// not event on camera handle
return true;
}
if (_streaming) {
struct v4l2_buffer buf;
memset(&buf, 0, sizeof(struct v4l2_buffer));
buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buf.memory = V4L2_MEMORY_MMAP;
// dequeue a buffer - repeat until dequeued properly!
while (ioctl(_deviceFd, VIDIOC_DQBUF, &buf) < 0) {
if (errno != EINTR) {
RTC_LOG(LS_INFO) << "could not sync on a buffer on device "
<< strerror(errno);
return true;
}
}
// convert to to I420 if needed
IncomingFrame(reinterpret_cast<uint8_t*>(_pool[buf.index].start),
buf.bytesused, configured_capability_);
// enqueue the buffer again
if (ioctl(_deviceFd, VIDIOC_QBUF, &buf) == -1) {
RTC_LOG(LS_INFO) << "Failed to enqueue capture buffer";
}
}
}
usleep(0);
return true;
}
int32_t VideoCaptureModuleV4L2::CaptureSettings(
VideoCaptureCapability& settings) {
RTC_DCHECK_RUN_ON(&api_checker_);
settings = _requestedCapability;
return 0;
}
} // namespace videocapturemodule
} // namespace webrtc
#ifdef ABGR32_OVERRIDE
#undef ABGR32_OVERRIDE
#undef V4L2_PIX_FMT_ABGR32
#endif
#ifdef ARGB32_OVERRIDE
#undef ARGB32_OVERRIDE
#undef V4L2_PIX_FMT_ARGB32
#endif
#ifdef RGBA32_OVERRIDE
#undef RGBA32_OVERRIDE
#undef V4L2_PIX_FMT_RGBA32
#endif

View file

@ -0,0 +1,64 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_VIDEO_CAPTURE_LINUX_VIDEO_CAPTURE_V4L2_H_
#define MODULES_VIDEO_CAPTURE_LINUX_VIDEO_CAPTURE_V4L2_H_
#include <stddef.h>
#include <stdint.h>
#include <memory>
#include "modules/video_capture/video_capture_defines.h"
#include "modules/video_capture/video_capture_impl.h"
#include "rtc_base/platform_thread.h"
#include "rtc_base/synchronization/mutex.h"
namespace webrtc {
namespace videocapturemodule {
class VideoCaptureModuleV4L2 : public VideoCaptureImpl {
public:
VideoCaptureModuleV4L2();
~VideoCaptureModuleV4L2() override;
int32_t Init(const char* deviceUniqueId);
int32_t StartCapture(const VideoCaptureCapability& capability) override;
int32_t StopCapture() override;
bool CaptureStarted() override;
int32_t CaptureSettings(VideoCaptureCapability& settings) override;
private:
enum { kNoOfV4L2Bufffers = 4 };
static void CaptureThread(void*);
bool CaptureProcess();
bool AllocateVideoBuffers() RTC_EXCLUSIVE_LOCKS_REQUIRED(capture_lock_);
bool DeAllocateVideoBuffers() RTC_EXCLUSIVE_LOCKS_REQUIRED(capture_lock_);
rtc::PlatformThread _captureThread RTC_GUARDED_BY(api_checker_);
Mutex capture_lock_ RTC_ACQUIRED_BEFORE(api_lock_);
bool quit_ RTC_GUARDED_BY(capture_lock_);
int32_t _deviceId RTC_GUARDED_BY(api_checker_);
int32_t _deviceFd RTC_GUARDED_BY(capture_checker_);
int32_t _buffersAllocatedByDevice RTC_GUARDED_BY(capture_lock_);
VideoCaptureCapability configured_capability_
RTC_GUARDED_BY(capture_checker_);
bool _streaming RTC_GUARDED_BY(capture_checker_);
bool _captureStarted RTC_GUARDED_BY(api_checker_);
struct Buffer {
void* start;
size_t length;
};
Buffer* _pool RTC_GUARDED_BY(capture_lock_);
};
} // namespace videocapturemodule
} // namespace webrtc
#endif // MODULES_VIDEO_CAPTURE_LINUX_VIDEO_CAPTURE_V4L2_H_

View file

@ -0,0 +1,34 @@
/*
* Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
// This file contains interfaces used for creating the VideoCaptureModule
// and DeviceInfo.
#ifndef MODULES_VIDEO_CAPTURE_RAW_VIDEO_SINK_INTERFACE_H_
#define MODULES_VIDEO_CAPTURE_RAW_VIDEO_SINK_INTERFACE_H_
#include "modules/video_capture/video_capture_defines.h"
namespace webrtc {
class RawVideoSinkInterface {
public:
virtual ~RawVideoSinkInterface() = default;
virtual int32_t OnRawFrame(uint8_t* videoFrame,
size_t videoFrameLength,
const webrtc::VideoCaptureCapability& frameInfo,
VideoRotation rotation,
int64_t captureTime) = 0;
};
} // namespace webrtc
#endif // MODULES_VIDEO_CAPTURE_RAW_VIDEO_SINK_INTERFACE_H_

View file

@ -0,0 +1,118 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_VIDEO_CAPTURE_VIDEO_CAPTURE_H_
#define MODULES_VIDEO_CAPTURE_VIDEO_CAPTURE_H_
#include "api/video/video_rotation.h"
#include "api/video/video_sink_interface.h"
#include "modules/video_capture/raw_video_sink_interface.h"
#include "modules/video_capture/video_capture_defines.h"
namespace webrtc {
class VideoCaptureModule : public RefCountInterface {
public:
// Interface for receiving information about available camera devices.
class DeviceInfo {
public:
virtual uint32_t NumberOfDevices() = 0;
// Returns the available capture devices.
// deviceNumber - Index of capture device.
// deviceNameUTF8 - Friendly name of the capture device.
// deviceUniqueIdUTF8 - Unique name of the capture device if it exist.
// Otherwise same as deviceNameUTF8.
// productUniqueIdUTF8 - Unique product id if it exist.
// Null terminated otherwise.
virtual int32_t GetDeviceName(uint32_t deviceNumber,
char* deviceNameUTF8,
uint32_t deviceNameLength,
char* deviceUniqueIdUTF8,
uint32_t deviceUniqueIdUTF8Length,
char* productUniqueIdUTF8 = 0,
uint32_t productUniqueIdUTF8Length = 0) = 0;
// Returns the number of capabilities this device.
virtual int32_t NumberOfCapabilities(const char* deviceUniqueIdUTF8) = 0;
// Gets the capabilities of the named device.
virtual int32_t GetCapability(const char* deviceUniqueIdUTF8,
uint32_t deviceCapabilityNumber,
VideoCaptureCapability& capability) = 0;
// Gets clockwise angle the captured frames should be rotated in order
// to be displayed correctly on a normally rotated display.
virtual int32_t GetOrientation(const char* deviceUniqueIdUTF8,
VideoRotation& orientation) = 0;
// Gets the capability that best matches the requested width, height and
// frame rate.
// Returns the deviceCapabilityNumber on success.
virtual int32_t GetBestMatchedCapability(
const char* deviceUniqueIdUTF8,
const VideoCaptureCapability& requested,
VideoCaptureCapability& resulting) = 0;
// Display OS /capture device specific settings dialog
virtual int32_t DisplayCaptureSettingsDialogBox(
const char* deviceUniqueIdUTF8,
const char* dialogTitleUTF8,
void* parentWindow,
uint32_t positionX,
uint32_t positionY) = 0;
virtual ~DeviceInfo() {}
};
// Register capture data callback
virtual void RegisterCaptureDataCallback(
rtc::VideoSinkInterface<VideoFrame>* dataCallback) = 0;
virtual void RegisterCaptureDataCallback(
RawVideoSinkInterface* dataCallback) = 0;
// Remove capture data callback
virtual void DeRegisterCaptureDataCallback() = 0;
// Start capture device
virtual int32_t StartCapture(const VideoCaptureCapability& capability) = 0;
virtual int32_t StopCapture() = 0;
// Returns the name of the device used by this module.
virtual const char* CurrentDeviceName() const = 0;
// Returns true if the capture device is running
virtual bool CaptureStarted() = 0;
// Gets the current configuration.
virtual int32_t CaptureSettings(VideoCaptureCapability& settings) = 0;
// Set the rotation of the captured frames.
// If the rotation is set to the same as returned by
// DeviceInfo::GetOrientation the captured frames are
// displayed correctly if rendered.
virtual int32_t SetCaptureRotation(VideoRotation rotation) = 0;
// Tells the capture module whether to apply the pending rotation. By default,
// the rotation is applied and the generated frame is up right. When set to
// false, generated frames will carry the rotation information from
// SetCaptureRotation. Return value indicates whether this operation succeeds.
virtual bool SetApplyRotation(bool enable) = 0;
// Return whether the rotation is applied or left pending.
virtual bool GetApplyRotation() = 0;
protected:
~VideoCaptureModule() override {}
};
} // namespace webrtc
#endif // MODULES_VIDEO_CAPTURE_VIDEO_CAPTURE_H_

View file

@ -0,0 +1,33 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_VIDEO_CAPTURE_MAIN_SOURCE_VIDEO_CAPTURE_CONFIG_H_
#define MODULES_VIDEO_CAPTURE_MAIN_SOURCE_VIDEO_CAPTURE_CONFIG_H_
namespace webrtc {
namespace videocapturemodule {
enum { kDefaultWidth = 640 }; // Start width
enum { kDefaultHeight = 480 }; // Start heigt
enum { kDefaultFrameRate = 30 }; // Start frame rate
enum { kMaxFrameRate = 60 }; // Max allowed frame rate of the start image
enum { kDefaultCaptureDelay = 120 };
enum {
kMaxCaptureDelay = 270
}; // Max capture delay allowed in the precompiled capture delay values.
enum { kFrameRateCallbackInterval = 1000 };
enum { kFrameRateCountHistorySize = 90 };
enum { kFrameRateHistoryWindowMs = 2000 };
} // namespace videocapturemodule
} // namespace webrtc
#endif // MODULES_VIDEO_CAPTURE_MAIN_SOURCE_VIDEO_CAPTURE_CONFIG_H_

View file

@ -0,0 +1,59 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_VIDEO_CAPTURE_VIDEO_CAPTURE_DEFINES_H_
#define MODULES_VIDEO_CAPTURE_VIDEO_CAPTURE_DEFINES_H_
#include "api/video/video_frame.h"
#include "common_video/libyuv/include/webrtc_libyuv.h"
namespace webrtc {
enum {
kVideoCaptureUniqueNameLength = 1024
}; // Max unique capture device name lenght
enum { kVideoCaptureDeviceNameLength = 256 }; // Max capture device name lenght
enum { kVideoCaptureProductIdLength = 128 }; // Max product id length
struct VideoCaptureCapability {
int32_t width;
int32_t height;
int32_t maxFPS;
VideoType videoType;
bool interlaced;
VideoCaptureCapability() {
width = 0;
height = 0;
maxFPS = 0;
videoType = VideoType::kUnknown;
interlaced = false;
}
bool operator!=(const VideoCaptureCapability& other) const {
if (width != other.width)
return true;
if (height != other.height)
return true;
if (maxFPS != other.maxFPS)
return true;
if (videoType != other.videoType)
return true;
if (interlaced != other.interlaced)
return true;
return false;
}
bool operator==(const VideoCaptureCapability& other) const {
return !operator!=(other);
}
};
} // namespace webrtc
#endif // MODULES_VIDEO_CAPTURE_VIDEO_CAPTURE_DEFINES_H_

View file

@ -0,0 +1,58 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/video_capture/video_capture_factory.h"
#include "modules/video_capture/video_capture_impl.h"
namespace webrtc {
rtc::scoped_refptr<VideoCaptureModule> VideoCaptureFactory::Create(
const char* deviceUniqueIdUTF8) {
#if defined(WEBRTC_ANDROID) || defined(WEBRTC_MAC)
return nullptr;
#else
return videocapturemodule::VideoCaptureImpl::Create(deviceUniqueIdUTF8);
#endif
}
rtc::scoped_refptr<VideoCaptureModule> VideoCaptureFactory::Create(
VideoCaptureOptions* options,
const char* deviceUniqueIdUTF8) {
// This is only implemented on pure Linux and WEBRTC_LINUX is defined for
// Android as well
#if !defined(WEBRTC_LINUX) || defined(WEBRTC_ANDROID)
return nullptr;
#else
return videocapturemodule::VideoCaptureImpl::Create(options,
deviceUniqueIdUTF8);
#endif
}
VideoCaptureModule::DeviceInfo* VideoCaptureFactory::CreateDeviceInfo() {
#if defined(WEBRTC_ANDROID) || defined(WEBRTC_MAC)
return nullptr;
#else
return videocapturemodule::VideoCaptureImpl::CreateDeviceInfo();
#endif
}
VideoCaptureModule::DeviceInfo* VideoCaptureFactory::CreateDeviceInfo(
VideoCaptureOptions* options) {
// This is only implemented on pure Linux and WEBRTC_LINUX is defined for
// Android as well
#if !defined(WEBRTC_LINUX) || defined(WEBRTC_ANDROID)
return nullptr;
#else
return videocapturemodule::VideoCaptureImpl::CreateDeviceInfo(options);
#endif
}
} // namespace webrtc

View file

@ -0,0 +1,48 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
// This file contains interfaces used for creating the VideoCaptureModule
// and DeviceInfo.
#ifndef MODULES_VIDEO_CAPTURE_VIDEO_CAPTURE_FACTORY_H_
#define MODULES_VIDEO_CAPTURE_VIDEO_CAPTURE_FACTORY_H_
#include "api/scoped_refptr.h"
#include "modules/video_capture/video_capture.h"
#include "modules/video_capture/video_capture_defines.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
class VideoCaptureOptions;
class RTC_EXPORT VideoCaptureFactory {
public:
// Create a video capture module object
// id - unique identifier of this video capture module object.
// deviceUniqueIdUTF8 - name of the device.
// Available names can be found by using GetDeviceName
static rtc::scoped_refptr<VideoCaptureModule> Create(
const char* deviceUniqueIdUTF8);
static rtc::scoped_refptr<VideoCaptureModule> Create(
VideoCaptureOptions* options,
const char* deviceUniqueIdUTF8);
static VideoCaptureModule::DeviceInfo* CreateDeviceInfo();
static VideoCaptureModule::DeviceInfo* CreateDeviceInfo(
VideoCaptureOptions* options);
private:
~VideoCaptureFactory();
};
} // namespace webrtc
#endif // MODULES_VIDEO_CAPTURE_VIDEO_CAPTURE_FACTORY_H_

View file

@ -0,0 +1,27 @@
/*
* Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/video_capture/video_capture_impl.h"
namespace webrtc {
namespace videocapturemodule {
// static
VideoCaptureModule::DeviceInfo* VideoCaptureImpl::CreateDeviceInfo() {
return nullptr;
}
rtc::scoped_refptr<VideoCaptureModule> VideoCaptureImpl::Create(
const char* device_id) {
return nullptr;
}
} // namespace videocapturemodule
} // namespace webrtc

View file

@ -0,0 +1,314 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/video_capture/video_capture_impl.h"
#include <stdlib.h>
#include <string.h>
#include "api/video/i420_buffer.h"
#include "api/video/video_frame_buffer.h"
#include "common_video/libyuv/include/webrtc_libyuv.h"
#include "modules/video_capture/video_capture_config.h"
#include "rtc_base/logging.h"
#include "rtc_base/time_utils.h"
#include "rtc_base/trace_event.h"
#include "third_party/libyuv/include/libyuv.h"
namespace webrtc {
namespace videocapturemodule {
const char* VideoCaptureImpl::CurrentDeviceName() const {
RTC_DCHECK_RUN_ON(&api_checker_);
return _deviceUniqueId;
}
// static
int32_t VideoCaptureImpl::RotationFromDegrees(int degrees,
VideoRotation* rotation) {
switch (degrees) {
case 0:
*rotation = kVideoRotation_0;
return 0;
case 90:
*rotation = kVideoRotation_90;
return 0;
case 180:
*rotation = kVideoRotation_180;
return 0;
case 270:
*rotation = kVideoRotation_270;
return 0;
default:
return -1;
;
}
}
// static
int32_t VideoCaptureImpl::RotationInDegrees(VideoRotation rotation,
int* degrees) {
switch (rotation) {
case kVideoRotation_0:
*degrees = 0;
return 0;
case kVideoRotation_90:
*degrees = 90;
return 0;
case kVideoRotation_180:
*degrees = 180;
return 0;
case kVideoRotation_270:
*degrees = 270;
return 0;
}
return -1;
}
VideoCaptureImpl::VideoCaptureImpl()
: _deviceUniqueId(NULL),
_requestedCapability(),
_lastProcessTimeNanos(rtc::TimeNanos()),
_lastFrameRateCallbackTimeNanos(rtc::TimeNanos()),
_dataCallBack(NULL),
_rawDataCallBack(NULL),
_lastProcessFrameTimeNanos(rtc::TimeNanos()),
_rotateFrame(kVideoRotation_0),
apply_rotation_(false) {
_requestedCapability.width = kDefaultWidth;
_requestedCapability.height = kDefaultHeight;
_requestedCapability.maxFPS = 30;
_requestedCapability.videoType = VideoType::kI420;
memset(_incomingFrameTimesNanos, 0, sizeof(_incomingFrameTimesNanos));
}
VideoCaptureImpl::~VideoCaptureImpl() {
RTC_DCHECK_RUN_ON(&api_checker_);
DeRegisterCaptureDataCallback();
if (_deviceUniqueId)
delete[] _deviceUniqueId;
}
void VideoCaptureImpl::RegisterCaptureDataCallback(
rtc::VideoSinkInterface<VideoFrame>* dataCallBack) {
MutexLock lock(&api_lock_);
RTC_DCHECK(!_rawDataCallBack);
_dataCallBack = dataCallBack;
}
void VideoCaptureImpl::RegisterCaptureDataCallback(
RawVideoSinkInterface* dataCallBack) {
MutexLock lock(&api_lock_);
RTC_DCHECK(!_dataCallBack);
_rawDataCallBack = dataCallBack;
}
void VideoCaptureImpl::DeRegisterCaptureDataCallback() {
MutexLock lock(&api_lock_);
_dataCallBack = NULL;
_rawDataCallBack = NULL;
}
int32_t VideoCaptureImpl::DeliverCapturedFrame(VideoFrame& captureFrame) {
RTC_CHECK_RUNS_SERIALIZED(&capture_checker_);
UpdateFrameCount(); // frame count used for local frame rate callback.
if (_dataCallBack) {
_dataCallBack->OnFrame(captureFrame);
}
return 0;
}
void VideoCaptureImpl::DeliverRawFrame(uint8_t* videoFrame,
size_t videoFrameLength,
const VideoCaptureCapability& frameInfo,
int64_t captureTime) {
RTC_CHECK_RUNS_SERIALIZED(&capture_checker_);
UpdateFrameCount();
_rawDataCallBack->OnRawFrame(videoFrame, videoFrameLength, frameInfo,
_rotateFrame, captureTime);
}
int32_t VideoCaptureImpl::IncomingFrame(uint8_t* videoFrame,
size_t videoFrameLength,
const VideoCaptureCapability& frameInfo,
int64_t captureTime /*=0*/) {
RTC_CHECK_RUNS_SERIALIZED(&capture_checker_);
MutexLock lock(&api_lock_);
const int32_t width = frameInfo.width;
const int32_t height = frameInfo.height;
TRACE_EVENT1("webrtc", "VC::IncomingFrame", "capture_time", captureTime);
if (_rawDataCallBack) {
DeliverRawFrame(videoFrame, videoFrameLength, frameInfo, captureTime);
return 0;
}
// Not encoded, convert to I420.
if (frameInfo.videoType != VideoType::kMJPEG) {
// Allow buffers larger than expected. On linux gstreamer allocates buffers
// page-aligned and v4l2loopback passes us the buffer size verbatim which
// for most cases is larger than expected.
// See https://github.com/umlaeute/v4l2loopback/issues/190.
if (auto size = CalcBufferSize(frameInfo.videoType, width, abs(height));
videoFrameLength < size) {
RTC_LOG(LS_ERROR) << "Wrong incoming frame length. Expected " << size
<< ", Got " << videoFrameLength << ".";
return -1;
}
}
int stride_y = width;
int stride_uv = (width + 1) / 2;
int target_width = width;
int target_height = abs(height);
if (apply_rotation_) {
// Rotating resolution when for 90/270 degree rotations.
if (_rotateFrame == kVideoRotation_90 ||
_rotateFrame == kVideoRotation_270) {
target_width = abs(height);
target_height = width;
}
}
// Setting absolute height (in case it was negative).
// In Windows, the image starts bottom left, instead of top left.
// Setting a negative source height, inverts the image (within LibYuv).
rtc::scoped_refptr<I420Buffer> buffer = I420Buffer::Create(
target_width, target_height, stride_y, stride_uv, stride_uv);
libyuv::RotationMode rotation_mode = libyuv::kRotate0;
if (apply_rotation_) {
switch (_rotateFrame) {
case kVideoRotation_0:
rotation_mode = libyuv::kRotate0;
break;
case kVideoRotation_90:
rotation_mode = libyuv::kRotate90;
break;
case kVideoRotation_180:
rotation_mode = libyuv::kRotate180;
break;
case kVideoRotation_270:
rotation_mode = libyuv::kRotate270;
break;
}
}
const int conversionResult = libyuv::ConvertToI420(
videoFrame, videoFrameLength, buffer.get()->MutableDataY(),
buffer.get()->StrideY(), buffer.get()->MutableDataU(),
buffer.get()->StrideU(), buffer.get()->MutableDataV(),
buffer.get()->StrideV(), 0, 0, // No Cropping
width, height, target_width, target_height, rotation_mode,
ConvertVideoType(frameInfo.videoType));
if (conversionResult != 0) {
RTC_LOG(LS_ERROR) << "Failed to convert capture frame from type "
<< static_cast<int>(frameInfo.videoType) << "to I420.";
return -1;
}
VideoFrame captureFrame =
VideoFrame::Builder()
.set_video_frame_buffer(buffer)
.set_timestamp_rtp(0)
.set_timestamp_ms(rtc::TimeMillis())
.set_rotation(!apply_rotation_ ? _rotateFrame : kVideoRotation_0)
.build();
captureFrame.set_ntp_time_ms(captureTime);
DeliverCapturedFrame(captureFrame);
return 0;
}
int32_t VideoCaptureImpl::StartCapture(
const VideoCaptureCapability& capability) {
RTC_DCHECK_RUN_ON(&api_checker_);
_requestedCapability = capability;
return -1;
}
int32_t VideoCaptureImpl::StopCapture() {
return -1;
}
bool VideoCaptureImpl::CaptureStarted() {
return false;
}
int32_t VideoCaptureImpl::CaptureSettings(
VideoCaptureCapability& /*settings*/) {
return -1;
}
int32_t VideoCaptureImpl::SetCaptureRotation(VideoRotation rotation) {
MutexLock lock(&api_lock_);
_rotateFrame = rotation;
return 0;
}
bool VideoCaptureImpl::SetApplyRotation(bool enable) {
MutexLock lock(&api_lock_);
apply_rotation_ = enable;
return true;
}
bool VideoCaptureImpl::GetApplyRotation() {
MutexLock lock(&api_lock_);
return apply_rotation_;
}
void VideoCaptureImpl::UpdateFrameCount() {
RTC_CHECK_RUNS_SERIALIZED(&capture_checker_);
if (_incomingFrameTimesNanos[0] / rtc::kNumNanosecsPerMicrosec == 0) {
// first no shift
} else {
// shift
for (int i = (kFrameRateCountHistorySize - 2); i >= 0; --i) {
_incomingFrameTimesNanos[i + 1] = _incomingFrameTimesNanos[i];
}
}
_incomingFrameTimesNanos[0] = rtc::TimeNanos();
}
uint32_t VideoCaptureImpl::CalculateFrameRate(int64_t now_ns) {
RTC_CHECK_RUNS_SERIALIZED(&capture_checker_);
int32_t num = 0;
int32_t nrOfFrames = 0;
for (num = 1; num < (kFrameRateCountHistorySize - 1); ++num) {
if (_incomingFrameTimesNanos[num] <= 0 ||
(now_ns - _incomingFrameTimesNanos[num]) /
rtc::kNumNanosecsPerMillisec >
kFrameRateHistoryWindowMs) { // don't use data older than 2sec
break;
} else {
nrOfFrames++;
}
}
if (num > 1) {
int64_t diff = (now_ns - _incomingFrameTimesNanos[num - 1]) /
rtc::kNumNanosecsPerMillisec;
if (diff > 0) {
return uint32_t((nrOfFrames * 1000.0f / diff) + 0.5f);
}
}
return nrOfFrames;
}
} // namespace videocapturemodule
} // namespace webrtc

View file

@ -0,0 +1,133 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_VIDEO_CAPTURE_MAIN_SOURCE_VIDEO_CAPTURE_IMPL_H_
#define MODULES_VIDEO_CAPTURE_MAIN_SOURCE_VIDEO_CAPTURE_IMPL_H_
/*
* video_capture_impl.h
*/
#include <stddef.h>
#include <stdint.h>
#include "api/scoped_refptr.h"
#include "api/sequence_checker.h"
#include "api/video/video_frame.h"
#include "api/video/video_rotation.h"
#include "api/video/video_sink_interface.h"
#include "modules/video_capture/video_capture.h"
#include "modules/video_capture/video_capture_config.h"
#include "modules/video_capture/video_capture_defines.h"
#include "rtc_base/race_checker.h"
#include "rtc_base/synchronization/mutex.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
class VideoCaptureOptions;
namespace videocapturemodule {
// Class definitions
class RTC_EXPORT VideoCaptureImpl : public VideoCaptureModule {
public:
/*
* Create a video capture module object
*
* id - unique identifier of this video capture module object
* deviceUniqueIdUTF8 - name of the device. Available names can be found by
* using GetDeviceName
*/
static rtc::scoped_refptr<VideoCaptureModule> Create(
const char* deviceUniqueIdUTF8);
static rtc::scoped_refptr<VideoCaptureModule> Create(
VideoCaptureOptions* options,
const char* deviceUniqueIdUTF8);
static DeviceInfo* CreateDeviceInfo();
static DeviceInfo* CreateDeviceInfo(VideoCaptureOptions* options);
// Helpers for converting between (integral) degrees and
// VideoRotation values. Return 0 on success.
static int32_t RotationFromDegrees(int degrees, VideoRotation* rotation);
static int32_t RotationInDegrees(VideoRotation rotation, int* degrees);
// Call backs
void RegisterCaptureDataCallback(
rtc::VideoSinkInterface<VideoFrame>* dataCallback) override;
virtual void RegisterCaptureDataCallback(
RawVideoSinkInterface* dataCallback) override;
void DeRegisterCaptureDataCallback() override;
int32_t SetCaptureRotation(VideoRotation rotation) override;
bool SetApplyRotation(bool enable) override;
bool GetApplyRotation() override;
const char* CurrentDeviceName() const override;
// `capture_time` must be specified in NTP time format in milliseconds.
int32_t IncomingFrame(uint8_t* videoFrame,
size_t videoFrameLength,
const VideoCaptureCapability& frameInfo,
int64_t captureTime = 0);
// Platform dependent
int32_t StartCapture(const VideoCaptureCapability& capability) override;
int32_t StopCapture() override;
bool CaptureStarted() override;
int32_t CaptureSettings(VideoCaptureCapability& /*settings*/) override;
protected:
VideoCaptureImpl();
~VideoCaptureImpl() override;
// Calls to the public API must happen on a single thread.
SequenceChecker api_checker_;
// RaceChecker for members that can be accessed on the API thread while
// capture is not happening, and on a callback thread otherwise.
rtc::RaceChecker capture_checker_;
// current Device unique name;
char* _deviceUniqueId RTC_GUARDED_BY(api_checker_);
Mutex api_lock_;
// Should be set by platform dependent code in StartCapture.
VideoCaptureCapability _requestedCapability RTC_GUARDED_BY(api_checker_);
private:
void UpdateFrameCount();
uint32_t CalculateFrameRate(int64_t now_ns);
int32_t DeliverCapturedFrame(VideoFrame& captureFrame)
RTC_EXCLUSIVE_LOCKS_REQUIRED(api_lock_);
void DeliverRawFrame(uint8_t* videoFrame,
size_t videoFrameLength,
const VideoCaptureCapability& frameInfo,
int64_t captureTime)
RTC_EXCLUSIVE_LOCKS_REQUIRED(api_lock_);
// last time the module process function was called.
int64_t _lastProcessTimeNanos RTC_GUARDED_BY(capture_checker_);
// last time the frame rate callback function was called.
int64_t _lastFrameRateCallbackTimeNanos RTC_GUARDED_BY(capture_checker_);
rtc::VideoSinkInterface<VideoFrame>* _dataCallBack RTC_GUARDED_BY(api_lock_);
RawVideoSinkInterface* _rawDataCallBack RTC_GUARDED_BY(api_lock_);
int64_t _lastProcessFrameTimeNanos RTC_GUARDED_BY(capture_checker_);
// timestamp for local captured frames
int64_t _incomingFrameTimesNanos[kFrameRateCountHistorySize] RTC_GUARDED_BY(
capture_checker_);
// Set if the frame should be rotated by the capture module.
VideoRotation _rotateFrame RTC_GUARDED_BY(api_lock_);
// Indicate whether rotation should be applied before delivered externally.
bool apply_rotation_ RTC_GUARDED_BY(api_lock_);
};
} // namespace videocapturemodule
} // namespace webrtc
#endif // MODULES_VIDEO_CAPTURE_MAIN_SOURCE_VIDEO_CAPTURE_IMPL_H_

View file

@ -0,0 +1,55 @@
/*
* Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/video_capture/video_capture_options.h"
#if defined(WEBRTC_USE_PIPEWIRE)
#include "modules/video_capture/linux/pipewire_session.h"
#endif
namespace webrtc {
VideoCaptureOptions::VideoCaptureOptions() {}
VideoCaptureOptions::VideoCaptureOptions(const VideoCaptureOptions& options) =
default;
VideoCaptureOptions::VideoCaptureOptions(VideoCaptureOptions&& options) =
default;
VideoCaptureOptions::~VideoCaptureOptions() {}
VideoCaptureOptions& VideoCaptureOptions::operator=(
const VideoCaptureOptions& options) = default;
VideoCaptureOptions& VideoCaptureOptions::operator=(
VideoCaptureOptions&& options) = default;
void VideoCaptureOptions::Init(Callback* callback) {
#if defined(WEBRTC_USE_PIPEWIRE)
if (allow_pipewire_) {
pipewire_session_ =
rtc::make_ref_counted<videocapturemodule::PipeWireSession>();
pipewire_session_->Init(callback, pipewire_fd_);
return;
}
#endif
#if defined(WEBRTC_LINUX)
if (!allow_v4l2_)
callback->OnInitialized(Status::UNAVAILABLE);
else
#endif
callback->OnInitialized(Status::SUCCESS);
}
#if defined(WEBRTC_USE_PIPEWIRE)
rtc::scoped_refptr<videocapturemodule::PipeWireSession>
VideoCaptureOptions::pipewire_session() {
return pipewire_session_;
}
#endif
} // namespace webrtc

View file

@ -0,0 +1,83 @@
/*
* Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_VIDEO_CAPTURE_VIDEO_CAPTURE_OPTIONS_H_
#define MODULES_VIDEO_CAPTURE_VIDEO_CAPTURE_OPTIONS_H_
#include "api/scoped_refptr.h"
#include "rtc_base/system/rtc_export.h"
#if defined(WEBRTC_USE_PIPEWIRE)
#include "modules/portal/pipewire_utils.h"
#endif
namespace webrtc {
#if defined(WEBRTC_USE_PIPEWIRE)
namespace videocapturemodule {
class PipeWireSession;
}
#endif
// An object that stores initialization parameters for video capturers
class RTC_EXPORT VideoCaptureOptions {
public:
VideoCaptureOptions();
VideoCaptureOptions(const VideoCaptureOptions& options);
VideoCaptureOptions(VideoCaptureOptions&& options);
~VideoCaptureOptions();
VideoCaptureOptions& operator=(const VideoCaptureOptions& options);
VideoCaptureOptions& operator=(VideoCaptureOptions&& options);
enum class Status {
SUCCESS,
UNINITIALIZED,
UNAVAILABLE,
DENIED,
ERROR,
MAX_VALUE = ERROR
};
class Callback {
public:
virtual void OnInitialized(Status status) = 0;
protected:
virtual ~Callback() = default;
};
void Init(Callback* callback);
#if defined(WEBRTC_LINUX)
bool allow_v4l2() const { return allow_v4l2_; }
void set_allow_v4l2(bool allow) { allow_v4l2_ = allow; }
#endif
#if defined(WEBRTC_USE_PIPEWIRE)
bool allow_pipewire() const { return allow_pipewire_; }
void set_allow_pipewire(bool allow) { allow_pipewire_ = allow; }
void set_pipewire_fd(int fd) { pipewire_fd_ = fd; }
rtc::scoped_refptr<videocapturemodule::PipeWireSession> pipewire_session();
#endif
private:
#if defined(WEBRTC_LINUX)
bool allow_v4l2_ = false;
#endif
#if defined(WEBRTC_USE_PIPEWIRE)
bool allow_pipewire_ = false;
int pipewire_fd_ = kInvalidPipeWireFd;
rtc::scoped_refptr<videocapturemodule::PipeWireSession> pipewire_session_;
#endif
};
} // namespace webrtc
#endif // MODULES_VIDEO_CAPTURE_VIDEO_CAPTURE_OPTIONS_H_

View file

@ -0,0 +1,647 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/video_capture/windows/device_info_ds.h"
#include <dvdmedia.h>
#include "modules/video_capture/video_capture_config.h"
#include "modules/video_capture/windows/help_functions_ds.h"
#include "rtc_base/logging.h"
#include "rtc_base/string_utils.h"
namespace webrtc {
namespace videocapturemodule {
// static
DeviceInfoDS* DeviceInfoDS::Create() {
DeviceInfoDS* dsInfo = new DeviceInfoDS();
if (!dsInfo || dsInfo->Init() != 0) {
delete dsInfo;
dsInfo = NULL;
}
return dsInfo;
}
DeviceInfoDS::DeviceInfoDS()
: _dsDevEnum(NULL),
_dsMonikerDevEnum(NULL),
_CoUninitializeIsRequired(true) {
// 1) Initialize the COM library (make Windows load the DLLs).
//
// CoInitializeEx must be called at least once, and is usually called only
// once, for each thread that uses the COM library. Multiple calls to
// CoInitializeEx by the same thread are allowed as long as they pass the same
// concurrency flag, but subsequent valid calls return S_FALSE. To close the
// COM library gracefully on a thread, each successful call to CoInitializeEx,
// including any call that returns S_FALSE, must be balanced by a
// corresponding call to CoUninitialize.
//
/*Apartment-threading, while allowing for multiple threads of execution,
serializes all incoming calls by requiring that calls to methods of objects
created by this thread always run on the same thread the apartment/thread
that created them. In addition, calls can arrive only at message-queue
boundaries (i.e., only during a PeekMessage, SendMessage, DispatchMessage,
etc.). Because of this serialization, it is not typically necessary to write
concurrency control into the code for the object, other than to avoid calls
to PeekMessage and SendMessage during processing that must not be interrupted
by other method invocations or calls to other objects in the same
apartment/thread.*/
/// CoInitializeEx(NULL, COINIT_APARTMENTTHREADED ); //|
/// COINIT_SPEED_OVER_MEMORY
HRESULT hr = CoInitializeEx(
NULL, COINIT_MULTITHREADED); // Use COINIT_MULTITHREADED since Voice
// Engine uses COINIT_MULTITHREADED
if (FAILED(hr)) {
// Avoid calling CoUninitialize() since CoInitializeEx() failed.
_CoUninitializeIsRequired = FALSE;
if (hr == RPC_E_CHANGED_MODE) {
// Calling thread has already initialized COM to be used in a
// single-threaded apartment (STA). We are then prevented from using STA.
// Details: hr = 0x80010106 <=> "Cannot change thread mode after it is
// set".
//
RTC_DLOG(LS_INFO) << __FUNCTION__
<< ": CoInitializeEx(NULL, COINIT_APARTMENTTHREADED)"
" => RPC_E_CHANGED_MODE, error 0x"
<< rtc::ToHex(hr);
}
}
}
DeviceInfoDS::~DeviceInfoDS() {
RELEASE_AND_CLEAR(_dsMonikerDevEnum);
RELEASE_AND_CLEAR(_dsDevEnum);
if (_CoUninitializeIsRequired) {
CoUninitialize();
}
}
int32_t DeviceInfoDS::Init() {
HRESULT hr = CoCreateInstance(CLSID_SystemDeviceEnum, NULL, CLSCTX_INPROC,
IID_ICreateDevEnum, (void**)&_dsDevEnum);
if (hr != NOERROR) {
RTC_LOG(LS_INFO) << "Failed to create CLSID_SystemDeviceEnum, error 0x"
<< rtc::ToHex(hr);
return -1;
}
return 0;
}
uint32_t DeviceInfoDS::NumberOfDevices() {
MutexLock lock(&_apiLock);
return GetDeviceInfo(0, 0, 0, 0, 0, 0, 0);
}
int32_t DeviceInfoDS::GetDeviceName(uint32_t deviceNumber,
char* deviceNameUTF8,
uint32_t deviceNameLength,
char* deviceUniqueIdUTF8,
uint32_t deviceUniqueIdUTF8Length,
char* productUniqueIdUTF8,
uint32_t productUniqueIdUTF8Length) {
MutexLock lock(&_apiLock);
const int32_t result = GetDeviceInfo(
deviceNumber, deviceNameUTF8, deviceNameLength, deviceUniqueIdUTF8,
deviceUniqueIdUTF8Length, productUniqueIdUTF8, productUniqueIdUTF8Length);
return result > (int32_t)deviceNumber ? 0 : -1;
}
int32_t DeviceInfoDS::GetDeviceInfo(uint32_t deviceNumber,
char* deviceNameUTF8,
uint32_t deviceNameLength,
char* deviceUniqueIdUTF8,
uint32_t deviceUniqueIdUTF8Length,
char* productUniqueIdUTF8,
uint32_t productUniqueIdUTF8Length)
{
// enumerate all video capture devices
RELEASE_AND_CLEAR(_dsMonikerDevEnum);
HRESULT hr = _dsDevEnum->CreateClassEnumerator(CLSID_VideoInputDeviceCategory,
&_dsMonikerDevEnum, 0);
if (hr != NOERROR) {
RTC_LOG(LS_INFO) << "Failed to enumerate CLSID_SystemDeviceEnum, error 0x"
<< rtc::ToHex(hr) << ". No webcam exist?";
return 0;
}
_dsMonikerDevEnum->Reset();
ULONG cFetched;
IMoniker* pM;
int index = 0;
while (S_OK == _dsMonikerDevEnum->Next(1, &pM, &cFetched)) {
IPropertyBag* pBag;
hr = pM->BindToStorage(0, 0, IID_IPropertyBag, (void**)&pBag);
if (S_OK == hr) {
// Find the description or friendly name.
VARIANT varName;
VariantInit(&varName);
hr = pBag->Read(L"Description", &varName, 0);
if (FAILED(hr)) {
hr = pBag->Read(L"FriendlyName", &varName, 0);
}
if (SUCCEEDED(hr)) {
// ignore all VFW drivers
if ((wcsstr(varName.bstrVal, (L"(VFW)")) == NULL) &&
(_wcsnicmp(varName.bstrVal, (L"Google Camera Adapter"), 21) != 0)) {
// Found a valid device.
if (index == static_cast<int>(deviceNumber)) {
int convResult = 0;
if (deviceNameLength > 0) {
convResult = WideCharToMultiByte(CP_UTF8, 0, varName.bstrVal, -1,
(char*)deviceNameUTF8,
deviceNameLength, NULL, NULL);
if (convResult == 0) {
RTC_LOG(LS_INFO) << "Failed to convert device name to UTF8, "
"error = "
<< GetLastError();
return -1;
}
}
if (deviceUniqueIdUTF8Length > 0) {
hr = pBag->Read(L"DevicePath", &varName, 0);
if (FAILED(hr)) {
strncpy_s((char*)deviceUniqueIdUTF8, deviceUniqueIdUTF8Length,
(char*)deviceNameUTF8, convResult);
RTC_LOG(LS_INFO) << "Failed to get "
"deviceUniqueIdUTF8 using "
"deviceNameUTF8";
} else {
convResult = WideCharToMultiByte(
CP_UTF8, 0, varName.bstrVal, -1, (char*)deviceUniqueIdUTF8,
deviceUniqueIdUTF8Length, NULL, NULL);
if (convResult == 0) {
RTC_LOG(LS_INFO) << "Failed to convert device "
"name to UTF8, error = "
<< GetLastError();
return -1;
}
if (productUniqueIdUTF8 && productUniqueIdUTF8Length > 0) {
GetProductId(deviceUniqueIdUTF8, productUniqueIdUTF8,
productUniqueIdUTF8Length);
}
}
}
}
++index; // increase the number of valid devices
}
}
VariantClear(&varName);
pBag->Release();
pM->Release();
}
}
if (deviceNameLength) {
RTC_DLOG(LS_INFO) << __FUNCTION__ << " " << deviceNameUTF8;
}
return index;
}
IBaseFilter* DeviceInfoDS::GetDeviceFilter(const char* deviceUniqueIdUTF8,
char* productUniqueIdUTF8,
uint32_t productUniqueIdUTF8Length) {
const int32_t deviceUniqueIdUTF8Length = (int32_t)strlen(
(char*)deviceUniqueIdUTF8); // UTF8 is also NULL terminated
if (deviceUniqueIdUTF8Length >= kVideoCaptureUniqueNameLength) {
RTC_LOG(LS_INFO) << "Device name too long";
return NULL;
}
// enumerate all video capture devices
RELEASE_AND_CLEAR(_dsMonikerDevEnum);
HRESULT hr = _dsDevEnum->CreateClassEnumerator(CLSID_VideoInputDeviceCategory,
&_dsMonikerDevEnum, 0);
if (hr != NOERROR) {
RTC_LOG(LS_INFO) << "Failed to enumerate CLSID_SystemDeviceEnum, error 0x"
<< rtc::ToHex(hr) << ". No webcam exist?";
return 0;
}
_dsMonikerDevEnum->Reset();
ULONG cFetched;
IMoniker* pM;
IBaseFilter* captureFilter = NULL;
bool deviceFound = false;
while (S_OK == _dsMonikerDevEnum->Next(1, &pM, &cFetched) && !deviceFound) {
IPropertyBag* pBag;
hr = pM->BindToStorage(0, 0, IID_IPropertyBag, (void**)&pBag);
if (S_OK == hr) {
// Find the description or friendly name.
VARIANT varName;
VariantInit(&varName);
if (deviceUniqueIdUTF8Length > 0) {
hr = pBag->Read(L"DevicePath", &varName, 0);
if (FAILED(hr)) {
hr = pBag->Read(L"Description", &varName, 0);
if (FAILED(hr)) {
hr = pBag->Read(L"FriendlyName", &varName, 0);
}
}
if (SUCCEEDED(hr)) {
char tempDevicePathUTF8[256];
tempDevicePathUTF8[0] = 0;
WideCharToMultiByte(CP_UTF8, 0, varName.bstrVal, -1,
tempDevicePathUTF8, sizeof(tempDevicePathUTF8),
NULL, NULL);
if (strncmp(tempDevicePathUTF8, (const char*)deviceUniqueIdUTF8,
deviceUniqueIdUTF8Length) == 0) {
// We have found the requested device
deviceFound = true;
hr =
pM->BindToObject(0, 0, IID_IBaseFilter, (void**)&captureFilter);
if FAILED (hr) {
RTC_LOG(LS_ERROR) << "Failed to bind to the selected "
"capture device "
<< hr;
}
if (productUniqueIdUTF8 &&
productUniqueIdUTF8Length > 0) // Get the device name
{
GetProductId(deviceUniqueIdUTF8, productUniqueIdUTF8,
productUniqueIdUTF8Length);
}
}
}
}
VariantClear(&varName);
pBag->Release();
}
pM->Release();
}
return captureFilter;
}
int32_t DeviceInfoDS::GetWindowsCapability(
const int32_t capabilityIndex,
VideoCaptureCapabilityWindows& windowsCapability) {
MutexLock lock(&_apiLock);
if (capabilityIndex < 0 || static_cast<size_t>(capabilityIndex) >=
_captureCapabilitiesWindows.size()) {
return -1;
}
windowsCapability = _captureCapabilitiesWindows[capabilityIndex];
return 0;
}
int32_t DeviceInfoDS::CreateCapabilityMap(const char* deviceUniqueIdUTF8)
{
// Reset old capability list
_captureCapabilities.clear();
const int32_t deviceUniqueIdUTF8Length =
(int32_t)strlen((char*)deviceUniqueIdUTF8);
if (deviceUniqueIdUTF8Length >= kVideoCaptureUniqueNameLength) {
RTC_LOG(LS_INFO) << "Device name too long";
return -1;
}
RTC_LOG(LS_INFO) << "CreateCapabilityMap called for device "
<< deviceUniqueIdUTF8;
char productId[kVideoCaptureProductIdLength];
IBaseFilter* captureDevice = DeviceInfoDS::GetDeviceFilter(
deviceUniqueIdUTF8, productId, kVideoCaptureProductIdLength);
if (!captureDevice)
return -1;
IPin* outputCapturePin = GetOutputPin(captureDevice, GUID_NULL);
if (!outputCapturePin) {
RTC_LOG(LS_INFO) << "Failed to get capture device output pin";
RELEASE_AND_CLEAR(captureDevice);
return -1;
}
IAMExtDevice* extDevice = NULL;
HRESULT hr =
captureDevice->QueryInterface(IID_IAMExtDevice, (void**)&extDevice);
if (SUCCEEDED(hr) && extDevice) {
RTC_LOG(LS_INFO) << "This is an external device";
extDevice->Release();
}
IAMStreamConfig* streamConfig = NULL;
hr = outputCapturePin->QueryInterface(IID_IAMStreamConfig,
(void**)&streamConfig);
if (FAILED(hr)) {
RTC_LOG(LS_INFO) << "Failed to get IID_IAMStreamConfig interface "
"from capture device";
return -1;
}
// this gets the FPS
IAMVideoControl* videoControlConfig = NULL;
HRESULT hrVC = captureDevice->QueryInterface(IID_IAMVideoControl,
(void**)&videoControlConfig);
if (FAILED(hrVC)) {
RTC_LOG(LS_INFO) << "IID_IAMVideoControl Interface NOT SUPPORTED";
}
AM_MEDIA_TYPE* pmt = NULL;
VIDEO_STREAM_CONFIG_CAPS caps;
int count, size;
hr = streamConfig->GetNumberOfCapabilities(&count, &size);
if (FAILED(hr)) {
RTC_LOG(LS_INFO) << "Failed to GetNumberOfCapabilities";
RELEASE_AND_CLEAR(videoControlConfig);
RELEASE_AND_CLEAR(streamConfig);
RELEASE_AND_CLEAR(outputCapturePin);
RELEASE_AND_CLEAR(captureDevice);
return -1;
}
// Check if the device support formattype == FORMAT_VideoInfo2 and
// FORMAT_VideoInfo. Prefer FORMAT_VideoInfo since some cameras (ZureCam) has
// been seen having problem with MJPEG and FORMAT_VideoInfo2 Interlace flag is
// only supported in FORMAT_VideoInfo2
bool supportFORMAT_VideoInfo2 = false;
bool supportFORMAT_VideoInfo = false;
bool foundInterlacedFormat = false;
GUID preferedVideoFormat = FORMAT_VideoInfo;
for (int32_t tmp = 0; tmp < count; ++tmp) {
hr = streamConfig->GetStreamCaps(tmp, &pmt, reinterpret_cast<BYTE*>(&caps));
if (hr == S_OK) {
if (pmt->majortype == MEDIATYPE_Video &&
pmt->formattype == FORMAT_VideoInfo2) {
RTC_LOG(LS_INFO) << "Device support FORMAT_VideoInfo2";
supportFORMAT_VideoInfo2 = true;
VIDEOINFOHEADER2* h =
reinterpret_cast<VIDEOINFOHEADER2*>(pmt->pbFormat);
RTC_DCHECK(h);
foundInterlacedFormat |=
h->dwInterlaceFlags &
(AMINTERLACE_IsInterlaced | AMINTERLACE_DisplayModeBobOnly);
}
if (pmt->majortype == MEDIATYPE_Video &&
pmt->formattype == FORMAT_VideoInfo) {
RTC_LOG(LS_INFO) << "Device support FORMAT_VideoInfo2";
supportFORMAT_VideoInfo = true;
}
FreeMediaType(pmt);
pmt = NULL;
}
}
if (supportFORMAT_VideoInfo2) {
if (supportFORMAT_VideoInfo && !foundInterlacedFormat) {
preferedVideoFormat = FORMAT_VideoInfo;
} else {
preferedVideoFormat = FORMAT_VideoInfo2;
}
}
for (int32_t tmp = 0; tmp < count; ++tmp) {
hr = streamConfig->GetStreamCaps(tmp, &pmt, reinterpret_cast<BYTE*>(&caps));
if (hr != S_OK) {
RTC_LOG(LS_INFO) << "Failed to GetStreamCaps";
RELEASE_AND_CLEAR(videoControlConfig);
RELEASE_AND_CLEAR(streamConfig);
RELEASE_AND_CLEAR(outputCapturePin);
RELEASE_AND_CLEAR(captureDevice);
return -1;
}
if (pmt->majortype == MEDIATYPE_Video &&
pmt->formattype == preferedVideoFormat) {
VideoCaptureCapabilityWindows capability;
int64_t avgTimePerFrame = 0;
if (pmt->formattype == FORMAT_VideoInfo) {
VIDEOINFOHEADER* h = reinterpret_cast<VIDEOINFOHEADER*>(pmt->pbFormat);
RTC_DCHECK(h);
capability.directShowCapabilityIndex = tmp;
capability.width = h->bmiHeader.biWidth;
capability.height = h->bmiHeader.biHeight;
avgTimePerFrame = h->AvgTimePerFrame;
}
if (pmt->formattype == FORMAT_VideoInfo2) {
VIDEOINFOHEADER2* h =
reinterpret_cast<VIDEOINFOHEADER2*>(pmt->pbFormat);
RTC_DCHECK(h);
capability.directShowCapabilityIndex = tmp;
capability.width = h->bmiHeader.biWidth;
capability.height = h->bmiHeader.biHeight;
capability.interlaced =
h->dwInterlaceFlags &
(AMINTERLACE_IsInterlaced | AMINTERLACE_DisplayModeBobOnly);
avgTimePerFrame = h->AvgTimePerFrame;
}
if (hrVC == S_OK) {
LONGLONG* frameDurationList = NULL;
LONGLONG maxFPS = 0;
long listSize = 0;
SIZE size;
size.cx = capability.width;
size.cy = capability.height;
// GetMaxAvailableFrameRate doesn't return max frame rate always
// eg: Logitech Notebook. This may be due to a bug in that API
// because GetFrameRateList array is reversed in the above camera. So
// a util method written. Can't assume the first value will return
// the max fps.
hrVC = videoControlConfig->GetFrameRateList(
outputCapturePin, tmp, size, &listSize, &frameDurationList);
if (hrVC == S_OK) {
maxFPS = GetMaxOfFrameArray(frameDurationList, listSize);
}
CoTaskMemFree(frameDurationList);
frameDurationList = NULL;
listSize = 0;
// On some odd cameras, you may get a 0 for duration. Some others may
// not update the out vars. GetMaxOfFrameArray returns the lowest
// duration (highest FPS), or 0 if there was no list with elements.
if (0 != maxFPS) {
capability.maxFPS = static_cast<int>(10000000 / maxFPS);
capability.supportFrameRateControl = true;
} else // use existing method
{
RTC_LOG(LS_INFO) << "GetMaxAvailableFrameRate NOT SUPPORTED";
if (avgTimePerFrame > 0)
capability.maxFPS = static_cast<int>(10000000 / avgTimePerFrame);
else
capability.maxFPS = 0;
}
} else // use existing method in case IAMVideoControl is not supported
{
if (avgTimePerFrame > 0)
capability.maxFPS = static_cast<int>(10000000 / avgTimePerFrame);
else
capability.maxFPS = 0;
}
// can't switch MEDIATYPE :~(
if (pmt->subtype == MEDIASUBTYPE_I420) {
capability.videoType = VideoType::kI420;
} else if (pmt->subtype == MEDIASUBTYPE_IYUV) {
capability.videoType = VideoType::kIYUV;
} else if (pmt->subtype == MEDIASUBTYPE_RGB24) {
capability.videoType = VideoType::kRGB24;
} else if (pmt->subtype == MEDIASUBTYPE_YUY2) {
capability.videoType = VideoType::kYUY2;
} else if (pmt->subtype == MEDIASUBTYPE_RGB565) {
capability.videoType = VideoType::kRGB565;
} else if (pmt->subtype == MEDIASUBTYPE_MJPG) {
capability.videoType = VideoType::kMJPEG;
} else if (pmt->subtype == MEDIASUBTYPE_dvsl ||
pmt->subtype == MEDIASUBTYPE_dvsd ||
pmt->subtype ==
MEDIASUBTYPE_dvhd) // If this is an external DV camera
{
capability.videoType =
VideoType::kYUY2; // MS DV filter seems to create this type
} else if (pmt->subtype ==
MEDIASUBTYPE_UYVY) // Seen used by Declink capture cards
{
capability.videoType = VideoType::kUYVY;
} else if (pmt->subtype ==
MEDIASUBTYPE_HDYC) // Seen used by Declink capture cards. Uses
// BT. 709 color. Not entiry correct to use
// UYVY. http://en.wikipedia.org/wiki/YCbCr
{
RTC_LOG(LS_INFO) << "Device support HDYC.";
capability.videoType = VideoType::kUYVY;
} else {
WCHAR strGuid[39];
StringFromGUID2(pmt->subtype, strGuid, 39);
RTC_LOG(LS_WARNING)
<< "Device support unknown media type " << strGuid << ", width "
<< capability.width << ", height " << capability.height;
continue;
}
_captureCapabilities.push_back(capability);
_captureCapabilitiesWindows.push_back(capability);
RTC_LOG(LS_INFO) << "Camera capability, width:" << capability.width
<< " height:" << capability.height
<< " type:" << static_cast<int>(capability.videoType)
<< " fps:" << capability.maxFPS;
}
FreeMediaType(pmt);
pmt = NULL;
}
RELEASE_AND_CLEAR(streamConfig);
RELEASE_AND_CLEAR(videoControlConfig);
RELEASE_AND_CLEAR(outputCapturePin);
RELEASE_AND_CLEAR(captureDevice); // Release the capture device
// Store the new used device name
_lastUsedDeviceNameLength = deviceUniqueIdUTF8Length;
_lastUsedDeviceName =
(char*)realloc(_lastUsedDeviceName, _lastUsedDeviceNameLength + 1);
memcpy(_lastUsedDeviceName, deviceUniqueIdUTF8,
_lastUsedDeviceNameLength + 1);
RTC_LOG(LS_INFO) << "CreateCapabilityMap " << _captureCapabilities.size();
return static_cast<int32_t>(_captureCapabilities.size());
}
// Constructs a product ID from the Windows DevicePath. on a USB device the
// devicePath contains product id and vendor id. This seems to work for firewire
// as well.
// Example of device path:
// "\\?\usb#vid_0408&pid_2010&mi_00#7&258e7aaf&0&0000#{65e8773d-8f56-11d0-a3b9-00a0c9223196}\global"
// "\\?\avc#sony&dv-vcr&camcorder&dv#65b2d50301460008#{65e8773d-8f56-11d0-a3b9-00a0c9223196}\global"
void DeviceInfoDS::GetProductId(const char* devicePath,
char* productUniqueIdUTF8,
uint32_t productUniqueIdUTF8Length) {
*productUniqueIdUTF8 = '\0';
char* startPos = strstr((char*)devicePath, "\\\\?\\");
if (!startPos) {
strncpy_s((char*)productUniqueIdUTF8, productUniqueIdUTF8Length, "", 1);
RTC_LOG(LS_INFO) << "Failed to get the product Id";
return;
}
startPos += 4;
char* pos = strchr(startPos, '&');
if (!pos || pos >= (char*)devicePath + strlen((char*)devicePath)) {
strncpy_s((char*)productUniqueIdUTF8, productUniqueIdUTF8Length, "", 1);
RTC_LOG(LS_INFO) << "Failed to get the product Id";
return;
}
// Find the second occurrence.
pos = strchr(pos + 1, '&');
uint32_t bytesToCopy = (uint32_t)(pos - startPos);
if (pos && (bytesToCopy < productUniqueIdUTF8Length) &&
bytesToCopy <= kVideoCaptureProductIdLength) {
strncpy_s((char*)productUniqueIdUTF8, productUniqueIdUTF8Length,
(char*)startPos, bytesToCopy);
} else {
strncpy_s((char*)productUniqueIdUTF8, productUniqueIdUTF8Length, "", 1);
RTC_LOG(LS_INFO) << "Failed to get the product Id";
}
}
int32_t DeviceInfoDS::DisplayCaptureSettingsDialogBox(
const char* deviceUniqueIdUTF8,
const char* dialogTitleUTF8,
void* parentWindow,
uint32_t positionX,
uint32_t positionY) {
MutexLock lock(&_apiLock);
HWND window = (HWND)parentWindow;
IBaseFilter* filter = GetDeviceFilter(deviceUniqueIdUTF8, NULL, 0);
if (!filter)
return -1;
ISpecifyPropertyPages* pPages = NULL;
CAUUID uuid;
HRESULT hr = S_OK;
hr = filter->QueryInterface(IID_ISpecifyPropertyPages, (LPVOID*)&pPages);
if (!SUCCEEDED(hr)) {
filter->Release();
return -1;
}
hr = pPages->GetPages(&uuid);
if (!SUCCEEDED(hr)) {
filter->Release();
return -1;
}
WCHAR tempDialogTitleWide[256];
tempDialogTitleWide[0] = 0;
int size = 255;
// UTF-8 to wide char
MultiByteToWideChar(CP_UTF8, 0, (char*)dialogTitleUTF8, -1,
tempDialogTitleWide, size);
// Invoke a dialog box to display.
hr = OleCreatePropertyFrame(
window, // You must create the parent window.
positionX, // Horizontal position for the dialog box.
positionY, // Vertical position for the dialog box.
tempDialogTitleWide, // String used for the dialog box caption.
1, // Number of pointers passed in pPlugin.
(LPUNKNOWN*)&filter, // Pointer to the filter.
uuid.cElems, // Number of property pages.
uuid.pElems, // Array of property page CLSIDs.
LOCALE_USER_DEFAULT, // Locale ID for the dialog box.
0, NULL); // Reserved
// Release memory.
if (uuid.pElems) {
CoTaskMemFree(uuid.pElems);
}
filter->Release();
return 0;
}
} // namespace videocapturemodule
} // namespace webrtc

View file

@ -0,0 +1,99 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_VIDEO_CAPTURE_MAIN_SOURCE_WINDOWS_DEVICE_INFO_DS_H_
#define MODULES_VIDEO_CAPTURE_MAIN_SOURCE_WINDOWS_DEVICE_INFO_DS_H_
#include <dshow.h>
#include "modules/video_capture/device_info_impl.h"
#include "modules/video_capture/video_capture_impl.h"
namespace webrtc {
namespace videocapturemodule {
struct VideoCaptureCapabilityWindows : public VideoCaptureCapability {
uint32_t directShowCapabilityIndex;
bool supportFrameRateControl;
VideoCaptureCapabilityWindows() {
directShowCapabilityIndex = 0;
supportFrameRateControl = false;
}
};
class DeviceInfoDS : public DeviceInfoImpl {
public:
// Factory function.
static DeviceInfoDS* Create();
DeviceInfoDS();
~DeviceInfoDS() override;
int32_t Init() override;
uint32_t NumberOfDevices() override;
/*
* Returns the available capture devices.
*/
int32_t GetDeviceName(uint32_t deviceNumber,
char* deviceNameUTF8,
uint32_t deviceNameLength,
char* deviceUniqueIdUTF8,
uint32_t deviceUniqueIdUTF8Length,
char* productUniqueIdUTF8,
uint32_t productUniqueIdUTF8Length) override;
/*
* Display OS /capture device specific settings dialog
*/
int32_t DisplayCaptureSettingsDialogBox(const char* deviceUniqueIdUTF8,
const char* dialogTitleUTF8,
void* parentWindow,
uint32_t positionX,
uint32_t positionY) override;
// Windows specific
/* Gets a capture device filter
The user of this API is responsible for releasing the filter when it not
needed.
*/
IBaseFilter* GetDeviceFilter(const char* deviceUniqueIdUTF8,
char* productUniqueIdUTF8 = NULL,
uint32_t productUniqueIdUTF8Length = 0);
int32_t GetWindowsCapability(
int32_t capabilityIndex,
VideoCaptureCapabilityWindows& windowsCapability);
static void GetProductId(const char* devicePath,
char* productUniqueIdUTF8,
uint32_t productUniqueIdUTF8Length);
protected:
int32_t GetDeviceInfo(uint32_t deviceNumber,
char* deviceNameUTF8,
uint32_t deviceNameLength,
char* deviceUniqueIdUTF8,
uint32_t deviceUniqueIdUTF8Length,
char* productUniqueIdUTF8,
uint32_t productUniqueIdUTF8Length);
int32_t CreateCapabilityMap(const char* deviceUniqueIdUTF8) override
RTC_EXCLUSIVE_LOCKS_REQUIRED(_apiLock);
private:
ICreateDevEnum* _dsDevEnum;
IEnumMoniker* _dsMonikerDevEnum;
bool _CoUninitializeIsRequired;
std::vector<VideoCaptureCapabilityWindows> _captureCapabilitiesWindows;
};
} // namespace videocapturemodule
} // namespace webrtc
#endif // MODULES_VIDEO_CAPTURE_MAIN_SOURCE_WINDOWS_DEVICE_INFO_DS_H_

View file

@ -0,0 +1,158 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <initguid.h> // Must come before the help_functions_ds.h include so
// that DEFINE_GUID() entries will be defined in this
// object file.
#include <cguid.h>
#include "modules/video_capture/windows/help_functions_ds.h"
#include "rtc_base/logging.h"
namespace webrtc {
namespace videocapturemodule {
// This returns minimum :), which will give max frame rate...
LONGLONG GetMaxOfFrameArray(LONGLONG* maxFps, long size) {
if (!maxFps || size <= 0) {
return 0;
}
LONGLONG maxFPS = maxFps[0];
for (int i = 0; i < size; i++) {
if (maxFPS > maxFps[i])
maxFPS = maxFps[i];
}
return maxFPS;
}
IPin* GetInputPin(IBaseFilter* filter) {
IPin* pin = NULL;
IEnumPins* pPinEnum = NULL;
filter->EnumPins(&pPinEnum);
if (pPinEnum == NULL) {
return NULL;
}
// get first unconnected pin
pPinEnum->Reset(); // set to first pin
while (S_OK == pPinEnum->Next(1, &pin, NULL)) {
PIN_DIRECTION pPinDir;
pin->QueryDirection(&pPinDir);
if (PINDIR_INPUT == pPinDir) // This is an input pin
{
IPin* tempPin = NULL;
if (S_OK != pin->ConnectedTo(&tempPin)) // The pint is not connected
{
pPinEnum->Release();
return pin;
}
}
pin->Release();
}
pPinEnum->Release();
return NULL;
}
IPin* GetOutputPin(IBaseFilter* filter, REFGUID Category) {
IPin* pin = NULL;
IEnumPins* pPinEnum = NULL;
filter->EnumPins(&pPinEnum);
if (pPinEnum == NULL) {
return NULL;
}
// get first unconnected pin
pPinEnum->Reset(); // set to first pin
while (S_OK == pPinEnum->Next(1, &pin, NULL)) {
PIN_DIRECTION pPinDir;
pin->QueryDirection(&pPinDir);
if (PINDIR_OUTPUT == pPinDir) // This is an output pin
{
if (Category == GUID_NULL || PinMatchesCategory(pin, Category)) {
pPinEnum->Release();
return pin;
}
}
pin->Release();
pin = NULL;
}
pPinEnum->Release();
return NULL;
}
BOOL PinMatchesCategory(IPin* pPin, REFGUID Category) {
BOOL bFound = FALSE;
IKsPropertySet* pKs = NULL;
HRESULT hr = pPin->QueryInterface(IID_PPV_ARGS(&pKs));
if (SUCCEEDED(hr)) {
GUID PinCategory;
DWORD cbReturned;
hr = pKs->Get(AMPROPSETID_Pin, AMPROPERTY_PIN_CATEGORY, NULL, 0,
&PinCategory, sizeof(GUID), &cbReturned);
if (SUCCEEDED(hr) && (cbReturned == sizeof(GUID))) {
bFound = (PinCategory == Category);
}
pKs->Release();
}
return bFound;
}
void ResetMediaType(AM_MEDIA_TYPE* media_type) {
if (!media_type)
return;
if (media_type->cbFormat != 0) {
CoTaskMemFree(media_type->pbFormat);
media_type->cbFormat = 0;
media_type->pbFormat = nullptr;
}
if (media_type->pUnk) {
media_type->pUnk->Release();
media_type->pUnk = nullptr;
}
}
void FreeMediaType(AM_MEDIA_TYPE* media_type) {
if (!media_type)
return;
ResetMediaType(media_type);
CoTaskMemFree(media_type);
}
HRESULT CopyMediaType(AM_MEDIA_TYPE* target, const AM_MEDIA_TYPE* source) {
RTC_DCHECK_NE(source, target);
*target = *source;
if (source->cbFormat != 0) {
RTC_DCHECK(source->pbFormat);
target->pbFormat =
reinterpret_cast<BYTE*>(CoTaskMemAlloc(source->cbFormat));
if (target->pbFormat == nullptr) {
target->cbFormat = 0;
return E_OUTOFMEMORY;
} else {
CopyMemory(target->pbFormat, source->pbFormat, target->cbFormat);
}
}
if (target->pUnk != nullptr)
target->pUnk->AddRef();
return S_OK;
}
wchar_t* DuplicateWideString(const wchar_t* str) {
size_t len = lstrlenW(str);
wchar_t* ret =
reinterpret_cast<LPWSTR>(CoTaskMemAlloc((len + 1) * sizeof(wchar_t)));
lstrcpyW(ret, str);
return ret;
}
} // namespace videocapturemodule
} // namespace webrtc

View file

@ -0,0 +1,118 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_VIDEO_CAPTURE_MAIN_SOURCE_WINDOWS_HELP_FUNCTIONS_DS_H_
#define MODULES_VIDEO_CAPTURE_MAIN_SOURCE_WINDOWS_HELP_FUNCTIONS_DS_H_
#include <dshow.h>
#include <type_traits>
#include <utility>
#include "api/scoped_refptr.h"
#include "rtc_base/ref_counter.h"
DEFINE_GUID(MEDIASUBTYPE_I420,
0x30323449,
0x0000,
0x0010,
0x80,
0x00,
0x00,
0xAA,
0x00,
0x38,
0x9B,
0x71);
DEFINE_GUID(MEDIASUBTYPE_HDYC,
0x43594448,
0x0000,
0x0010,
0x80,
0x00,
0x00,
0xAA,
0x00,
0x38,
0x9B,
0x71);
#define RELEASE_AND_CLEAR(p) \
if (p) { \
(p)->Release(); \
(p) = NULL; \
}
namespace webrtc {
namespace videocapturemodule {
LONGLONG GetMaxOfFrameArray(LONGLONG* maxFps, long size);
IPin* GetInputPin(IBaseFilter* filter);
IPin* GetOutputPin(IBaseFilter* filter, REFGUID Category);
BOOL PinMatchesCategory(IPin* pPin, REFGUID Category);
void ResetMediaType(AM_MEDIA_TYPE* media_type);
void FreeMediaType(AM_MEDIA_TYPE* media_type);
HRESULT CopyMediaType(AM_MEDIA_TYPE* target, const AM_MEDIA_TYPE* source);
// Helper function to make using scoped_refptr with COM interface pointers
// a little less awkward. rtc::scoped_refptr doesn't support the & operator
// or a way to receive values via an out ptr.
// The function is intentionally not called QueryInterface to make things less
// confusing for the compiler to figure out what the caller wants to do when
// called from within the context of a class that also implements COM
// interfaces.
template <class T>
HRESULT GetComInterface(IUnknown* object, rtc::scoped_refptr<T>* ptr) {
// This helper function is not meant to magically free ptr. If we do that
// we add code bloat to most places where it's not needed and make the code
// less readable since it's not clear at the call site that the pointer
// would get freed even inf QI() fails.
RTC_DCHECK(!ptr->get());
void* new_ptr = nullptr;
HRESULT hr = object->QueryInterface(__uuidof(T), &new_ptr);
if (SUCCEEDED(hr))
ptr->swap(reinterpret_cast<T**>(&new_ptr));
return hr;
}
// Provides a reference count implementation for COM (IUnknown derived) classes.
// The implementation uses atomics for managing the ref count.
template <class T>
class ComRefCount : public T {
public:
ComRefCount() {}
template <class P0>
explicit ComRefCount(P0&& p0) : T(std::forward<P0>(p0)) {}
STDMETHOD_(ULONG, AddRef)() override {
ref_count_.IncRef();
return 1;
}
STDMETHOD_(ULONG, Release)() override {
const auto status = ref_count_.DecRef();
if (status == rtc::RefCountReleaseStatus::kDroppedLastRef) {
delete this;
return 0;
}
return 1;
}
protected:
~ComRefCount() {}
private:
webrtc::webrtc_impl::RefCounter ref_count_{0};
};
} // namespace videocapturemodule
} // namespace webrtc
#endif // MODULES_VIDEO_CAPTURE_MAIN_SOURCE_WINDOWS_HELP_FUNCTIONS_DS_H_

View file

@ -0,0 +1,961 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/video_capture/windows/sink_filter_ds.h"
#include <dvdmedia.h> // VIDEOINFOHEADER2
#include <initguid.h>
#include <algorithm>
#include <list>
#include "rtc_base/arraysize.h"
#include "rtc_base/checks.h"
#include "rtc_base/logging.h"
#include "rtc_base/platform_thread.h"
#include "rtc_base/string_utils.h"
DEFINE_GUID(CLSID_SINKFILTER,
0x88cdbbdc,
0xa73b,
0x4afa,
0xac,
0xbf,
0x15,
0xd5,
0xe2,
0xce,
0x12,
0xc3);
namespace webrtc {
namespace videocapturemodule {
namespace {
// Simple enumeration implementation that enumerates over a single pin :-/
class EnumPins : public IEnumPins {
public:
EnumPins(IPin* pin) : pin_(pin) {}
protected:
virtual ~EnumPins() {}
private:
STDMETHOD(QueryInterface)(REFIID riid, void** ppv) override {
if (riid == IID_IUnknown || riid == IID_IEnumPins) {
*ppv = static_cast<IEnumPins*>(this);
AddRef();
return S_OK;
}
return E_NOINTERFACE;
}
STDMETHOD(Clone)(IEnumPins** pins) {
RTC_DCHECK_NOTREACHED();
return E_NOTIMPL;
}
STDMETHOD(Next)(ULONG count, IPin** pins, ULONG* fetched) {
RTC_DCHECK(count > 0);
RTC_DCHECK(pins);
// fetched may be NULL.
if (pos_ > 0) {
if (fetched)
*fetched = 0;
return S_FALSE;
}
++pos_;
pins[0] = pin_.get();
pins[0]->AddRef();
if (fetched)
*fetched = 1;
return count == 1 ? S_OK : S_FALSE;
}
STDMETHOD(Skip)(ULONG count) {
RTC_DCHECK_NOTREACHED();
return E_NOTIMPL;
}
STDMETHOD(Reset)() {
pos_ = 0;
return S_OK;
}
rtc::scoped_refptr<IPin> pin_;
int pos_ = 0;
};
bool IsMediaTypePartialMatch(const AM_MEDIA_TYPE& a, const AM_MEDIA_TYPE& b) {
if (b.majortype != GUID_NULL && a.majortype != b.majortype)
return false;
if (b.subtype != GUID_NULL && a.subtype != b.subtype)
return false;
if (b.formattype != GUID_NULL) {
// if the format block is specified then it must match exactly
if (a.formattype != b.formattype)
return false;
if (a.cbFormat != b.cbFormat)
return false;
if (a.cbFormat != 0 && memcmp(a.pbFormat, b.pbFormat, a.cbFormat) != 0)
return false;
}
return true;
}
bool IsMediaTypeFullySpecified(const AM_MEDIA_TYPE& type) {
return type.majortype != GUID_NULL && type.formattype != GUID_NULL;
}
BYTE* AllocMediaTypeFormatBuffer(AM_MEDIA_TYPE* media_type, ULONG length) {
RTC_DCHECK(length);
if (media_type->cbFormat == length)
return media_type->pbFormat;
BYTE* buffer = static_cast<BYTE*>(CoTaskMemAlloc(length));
if (!buffer)
return nullptr;
if (media_type->pbFormat) {
RTC_DCHECK(media_type->cbFormat);
CoTaskMemFree(media_type->pbFormat);
media_type->pbFormat = nullptr;
}
media_type->cbFormat = length;
media_type->pbFormat = buffer;
return buffer;
}
void GetSampleProperties(IMediaSample* sample, AM_SAMPLE2_PROPERTIES* props) {
rtc::scoped_refptr<IMediaSample2> sample2;
if (SUCCEEDED(GetComInterface(sample, &sample2))) {
sample2->GetProperties(sizeof(*props), reinterpret_cast<BYTE*>(props));
return;
}
// Get the properties the hard way.
props->cbData = sizeof(*props);
props->dwTypeSpecificFlags = 0;
props->dwStreamId = AM_STREAM_MEDIA;
props->dwSampleFlags = 0;
if (sample->IsDiscontinuity() == S_OK)
props->dwSampleFlags |= AM_SAMPLE_DATADISCONTINUITY;
if (sample->IsPreroll() == S_OK)
props->dwSampleFlags |= AM_SAMPLE_PREROLL;
if (sample->IsSyncPoint() == S_OK)
props->dwSampleFlags |= AM_SAMPLE_SPLICEPOINT;
if (SUCCEEDED(sample->GetTime(&props->tStart, &props->tStop)))
props->dwSampleFlags |= AM_SAMPLE_TIMEVALID | AM_SAMPLE_STOPVALID;
if (sample->GetMediaType(&props->pMediaType) == S_OK)
props->dwSampleFlags |= AM_SAMPLE_TYPECHANGED;
sample->GetPointer(&props->pbBuffer);
props->lActual = sample->GetActualDataLength();
props->cbBuffer = sample->GetSize();
}
// Returns true if the media type is supported, false otherwise.
// For supported types, the `capability` will be populated accordingly.
bool TranslateMediaTypeToVideoCaptureCapability(
const AM_MEDIA_TYPE* media_type,
VideoCaptureCapability* capability) {
RTC_DCHECK(capability);
if (!media_type || media_type->majortype != MEDIATYPE_Video ||
!media_type->pbFormat) {
return false;
}
const BITMAPINFOHEADER* bih = nullptr;
if (media_type->formattype == FORMAT_VideoInfo) {
bih = &reinterpret_cast<VIDEOINFOHEADER*>(media_type->pbFormat)->bmiHeader;
} else if (media_type->formattype != FORMAT_VideoInfo2) {
bih = &reinterpret_cast<VIDEOINFOHEADER2*>(media_type->pbFormat)->bmiHeader;
} else {
return false;
}
RTC_LOG(LS_INFO) << "TranslateMediaTypeToVideoCaptureCapability width:"
<< bih->biWidth << " height:" << bih->biHeight
<< " Compression:0x" << rtc::ToHex(bih->biCompression);
const GUID& sub_type = media_type->subtype;
if (sub_type == MEDIASUBTYPE_MJPG &&
bih->biCompression == MAKEFOURCC('M', 'J', 'P', 'G')) {
capability->videoType = VideoType::kMJPEG;
} else if (sub_type == MEDIASUBTYPE_I420 &&
bih->biCompression == MAKEFOURCC('I', '4', '2', '0')) {
capability->videoType = VideoType::kI420;
} else if (sub_type == MEDIASUBTYPE_YUY2 &&
bih->biCompression == MAKEFOURCC('Y', 'U', 'Y', '2')) {
capability->videoType = VideoType::kYUY2;
} else if (sub_type == MEDIASUBTYPE_UYVY &&
bih->biCompression == MAKEFOURCC('U', 'Y', 'V', 'Y')) {
capability->videoType = VideoType::kUYVY;
} else if (sub_type == MEDIASUBTYPE_HDYC) {
capability->videoType = VideoType::kUYVY;
} else if (sub_type == MEDIASUBTYPE_RGB24 && bih->biCompression == BI_RGB) {
capability->videoType = VideoType::kRGB24;
} else {
return false;
}
// Store the incoming width and height
capability->width = bih->biWidth;
// Store the incoming height,
// for RGB24 we assume the frame to be upside down
if (sub_type == MEDIASUBTYPE_RGB24 && bih->biHeight > 0) {
capability->height = -(bih->biHeight);
} else {
capability->height = abs(bih->biHeight);
}
return true;
}
class MediaTypesEnum : public IEnumMediaTypes {
public:
MediaTypesEnum(const VideoCaptureCapability& capability)
: capability_(capability),
format_preference_order_(
{// Default preferences, sorted by cost-to-convert-to-i420.
VideoType::kI420, VideoType::kYUY2, VideoType::kRGB24,
VideoType::kUYVY, VideoType::kMJPEG}) {
// Use the preferred video type, if supported.
auto it = std::find(format_preference_order_.begin(),
format_preference_order_.end(), capability_.videoType);
if (it != format_preference_order_.end()) {
RTC_LOG(LS_INFO) << "Selected video type: " << *it;
// Move it to the front of the list, if it isn't already there.
if (it != format_preference_order_.begin()) {
format_preference_order_.splice(format_preference_order_.begin(),
format_preference_order_, it,
std::next(it));
}
} else {
RTC_LOG(LS_WARNING) << "Unsupported video type: "
<< rtc::ToString(
static_cast<int>(capability_.videoType))
<< ", using default preference list.";
}
}
protected:
virtual ~MediaTypesEnum() {}
private:
STDMETHOD(QueryInterface)(REFIID riid, void** ppv) override {
if (riid == IID_IUnknown || riid == IID_IEnumMediaTypes) {
*ppv = static_cast<IEnumMediaTypes*>(this);
AddRef();
return S_OK;
}
return E_NOINTERFACE;
}
// IEnumMediaTypes
STDMETHOD(Clone)(IEnumMediaTypes** pins) {
RTC_DCHECK_NOTREACHED();
return E_NOTIMPL;
}
STDMETHOD(Next)(ULONG count, AM_MEDIA_TYPE** types, ULONG* fetched) {
RTC_DCHECK(count > 0);
RTC_DCHECK(types);
// fetched may be NULL.
if (fetched)
*fetched = 0;
for (ULONG i = 0;
i < count && pos_ < static_cast<int>(format_preference_order_.size());
++i) {
AM_MEDIA_TYPE* media_type = reinterpret_cast<AM_MEDIA_TYPE*>(
CoTaskMemAlloc(sizeof(AM_MEDIA_TYPE)));
ZeroMemory(media_type, sizeof(*media_type));
types[i] = media_type;
VIDEOINFOHEADER* vih = reinterpret_cast<VIDEOINFOHEADER*>(
AllocMediaTypeFormatBuffer(media_type, sizeof(VIDEOINFOHEADER)));
ZeroMemory(vih, sizeof(*vih));
vih->bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
vih->bmiHeader.biPlanes = 1;
vih->bmiHeader.biClrImportant = 0;
vih->bmiHeader.biClrUsed = 0;
if (capability_.maxFPS != 0)
vih->AvgTimePerFrame = 10000000 / capability_.maxFPS;
SetRectEmpty(&vih->rcSource); // we want the whole image area rendered.
SetRectEmpty(&vih->rcTarget); // no particular destination rectangle
media_type->majortype = MEDIATYPE_Video;
media_type->formattype = FORMAT_VideoInfo;
media_type->bTemporalCompression = FALSE;
// Set format information.
auto format_it = std::next(format_preference_order_.begin(), pos_++);
SetMediaInfoFromVideoType(*format_it, &vih->bmiHeader, media_type);
vih->bmiHeader.biWidth = capability_.width;
vih->bmiHeader.biHeight = capability_.height;
vih->bmiHeader.biSizeImage = ((vih->bmiHeader.biBitCount / 4) *
capability_.height * capability_.width) /
2;
RTC_DCHECK(vih->bmiHeader.biSizeImage);
media_type->lSampleSize = vih->bmiHeader.biSizeImage;
media_type->bFixedSizeSamples = true;
if (fetched)
++(*fetched);
}
return pos_ == static_cast<int>(format_preference_order_.size()) ? S_FALSE
: S_OK;
}
static void SetMediaInfoFromVideoType(VideoType video_type,
BITMAPINFOHEADER* bitmap_header,
AM_MEDIA_TYPE* media_type) {
switch (video_type) {
case VideoType::kI420:
bitmap_header->biCompression = MAKEFOURCC('I', '4', '2', '0');
bitmap_header->biBitCount = 12; // bit per pixel
media_type->subtype = MEDIASUBTYPE_I420;
break;
case VideoType::kYUY2:
bitmap_header->biCompression = MAKEFOURCC('Y', 'U', 'Y', '2');
bitmap_header->biBitCount = 16; // bit per pixel
media_type->subtype = MEDIASUBTYPE_YUY2;
break;
case VideoType::kRGB24:
bitmap_header->biCompression = BI_RGB;
bitmap_header->biBitCount = 24; // bit per pixel
media_type->subtype = MEDIASUBTYPE_RGB24;
break;
case VideoType::kUYVY:
bitmap_header->biCompression = MAKEFOURCC('U', 'Y', 'V', 'Y');
bitmap_header->biBitCount = 16; // bit per pixel
media_type->subtype = MEDIASUBTYPE_UYVY;
break;
case VideoType::kMJPEG:
bitmap_header->biCompression = MAKEFOURCC('M', 'J', 'P', 'G');
bitmap_header->biBitCount = 12; // bit per pixel
media_type->subtype = MEDIASUBTYPE_MJPG;
break;
default:
RTC_DCHECK_NOTREACHED();
}
}
STDMETHOD(Skip)(ULONG count) {
RTC_DCHECK_NOTREACHED();
return E_NOTIMPL;
}
STDMETHOD(Reset)() {
pos_ = 0;
return S_OK;
}
int pos_ = 0;
const VideoCaptureCapability capability_;
std::list<VideoType> format_preference_order_;
};
} // namespace
CaptureInputPin::CaptureInputPin(CaptureSinkFilter* filter) {
capture_checker_.Detach();
// No reference held to avoid circular references.
info_.pFilter = filter;
info_.dir = PINDIR_INPUT;
}
CaptureInputPin::~CaptureInputPin() {
RTC_DCHECK_RUN_ON(&main_checker_);
ResetMediaType(&media_type_);
}
HRESULT CaptureInputPin::SetRequestedCapability(
const VideoCaptureCapability& capability) {
RTC_DCHECK_RUN_ON(&main_checker_);
RTC_DCHECK(Filter()->IsStopped());
requested_capability_ = capability;
resulting_capability_ = VideoCaptureCapability();
return S_OK;
}
void CaptureInputPin::OnFilterActivated() {
RTC_DCHECK_RUN_ON(&main_checker_);
runtime_error_ = false;
flushing_ = false;
capture_checker_.Detach();
capture_thread_id_ = 0;
}
void CaptureInputPin::OnFilterDeactivated() {
RTC_DCHECK_RUN_ON(&main_checker_);
// Expedite shutdown by raising the flushing flag so no further processing
// on the capture thread occurs. When the graph is stopped and all filters
// have been told to stop, the media controller (graph) will wait for the
// capture thread to stop.
flushing_ = true;
if (allocator_)
allocator_->Decommit();
}
CaptureSinkFilter* CaptureInputPin::Filter() const {
return static_cast<CaptureSinkFilter*>(info_.pFilter);
}
HRESULT CaptureInputPin::AttemptConnection(IPin* receive_pin,
const AM_MEDIA_TYPE* media_type) {
RTC_DCHECK_RUN_ON(&main_checker_);
RTC_DCHECK(Filter()->IsStopped());
// Check that the connection is valid -- need to do this for every
// connect attempt since BreakConnect will undo it.
HRESULT hr = CheckDirection(receive_pin);
if (FAILED(hr))
return hr;
if (!TranslateMediaTypeToVideoCaptureCapability(media_type,
&resulting_capability_)) {
ClearAllocator(true);
return VFW_E_TYPE_NOT_ACCEPTED;
}
// See if the other pin will accept this type.
hr = receive_pin->ReceiveConnection(static_cast<IPin*>(this), media_type);
if (FAILED(hr)) {
receive_pin_ = nullptr; // Should already be null, but just in case.
return hr;
}
// Should have been set as part of the connect process.
RTC_DCHECK_EQ(receive_pin_, receive_pin);
ResetMediaType(&media_type_);
CopyMediaType(&media_type_, media_type);
return S_OK;
}
std::vector<AM_MEDIA_TYPE*> CaptureInputPin::DetermineCandidateFormats(
IPin* receive_pin,
const AM_MEDIA_TYPE* media_type) {
RTC_DCHECK_RUN_ON(&main_checker_);
RTC_DCHECK(receive_pin);
RTC_DCHECK(media_type);
std::vector<AM_MEDIA_TYPE*> ret;
for (int i = 0; i < 2; i++) {
IEnumMediaTypes* types = nullptr;
if (i == 0) {
// First time around, try types from receive_pin.
receive_pin->EnumMediaTypes(&types);
} else {
// Then try ours.
EnumMediaTypes(&types);
}
if (types) {
while (true) {
ULONG fetched = 0;
AM_MEDIA_TYPE* this_type = nullptr;
if (types->Next(1, &this_type, &fetched) != S_OK)
break;
if (IsMediaTypePartialMatch(*this_type, *media_type)) {
ret.push_back(this_type);
} else {
FreeMediaType(this_type);
}
}
types->Release();
}
}
return ret;
}
void CaptureInputPin::ClearAllocator(bool decommit) {
RTC_DCHECK_RUN_ON(&main_checker_);
if (!allocator_)
return;
if (decommit)
allocator_->Decommit();
allocator_ = nullptr;
}
HRESULT CaptureInputPin::CheckDirection(IPin* pin) const {
RTC_DCHECK_RUN_ON(&main_checker_);
PIN_DIRECTION pd;
pin->QueryDirection(&pd);
// Fairly basic check, make sure we don't pair input with input etc.
return pd == info_.dir ? VFW_E_INVALID_DIRECTION : S_OK;
}
COM_DECLSPEC_NOTHROW STDMETHODIMP CaptureInputPin::QueryInterface(REFIID riid,
void** ppv) {
(*ppv) = nullptr;
if (riid == IID_IUnknown || riid == IID_IMemInputPin) {
*ppv = static_cast<IMemInputPin*>(this);
} else if (riid == IID_IPin) {
*ppv = static_cast<IPin*>(this);
}
if (!(*ppv))
return E_NOINTERFACE;
static_cast<IMemInputPin*>(this)->AddRef();
return S_OK;
}
COM_DECLSPEC_NOTHROW STDMETHODIMP
CaptureInputPin::Connect(IPin* receive_pin, const AM_MEDIA_TYPE* media_type) {
RTC_DCHECK_RUN_ON(&main_checker_);
if (!media_type || !receive_pin)
return E_POINTER;
if (!Filter()->IsStopped())
return VFW_E_NOT_STOPPED;
if (receive_pin_) {
RTC_DCHECK_NOTREACHED();
return VFW_E_ALREADY_CONNECTED;
}
if (IsMediaTypeFullySpecified(*media_type))
return AttemptConnection(receive_pin, media_type);
auto types = DetermineCandidateFormats(receive_pin, media_type);
bool connected = false;
for (auto* type : types) {
if (!connected && AttemptConnection(receive_pin, media_type) == S_OK)
connected = true;
FreeMediaType(type);
}
return connected ? S_OK : VFW_E_NO_ACCEPTABLE_TYPES;
}
COM_DECLSPEC_NOTHROW STDMETHODIMP
CaptureInputPin::ReceiveConnection(IPin* connector,
const AM_MEDIA_TYPE* media_type) {
RTC_DCHECK_RUN_ON(&main_checker_);
RTC_DCHECK(Filter()->IsStopped());
if (receive_pin_) {
RTC_DCHECK_NOTREACHED();
return VFW_E_ALREADY_CONNECTED;
}
HRESULT hr = CheckDirection(connector);
if (FAILED(hr))
return hr;
if (!TranslateMediaTypeToVideoCaptureCapability(media_type,
&resulting_capability_))
return VFW_E_TYPE_NOT_ACCEPTED;
// Complete the connection
receive_pin_ = connector;
ResetMediaType(&media_type_);
CopyMediaType(&media_type_, media_type);
return S_OK;
}
COM_DECLSPEC_NOTHROW STDMETHODIMP CaptureInputPin::Disconnect() {
RTC_DCHECK_RUN_ON(&main_checker_);
if (!Filter()->IsStopped())
return VFW_E_NOT_STOPPED;
if (!receive_pin_)
return S_FALSE;
ClearAllocator(true);
receive_pin_ = nullptr;
return S_OK;
}
COM_DECLSPEC_NOTHROW STDMETHODIMP CaptureInputPin::ConnectedTo(IPin** pin) {
RTC_DCHECK_RUN_ON(&main_checker_);
if (!receive_pin_)
return VFW_E_NOT_CONNECTED;
*pin = receive_pin_.get();
receive_pin_->AddRef();
return S_OK;
}
COM_DECLSPEC_NOTHROW STDMETHODIMP
CaptureInputPin::ConnectionMediaType(AM_MEDIA_TYPE* media_type) {
RTC_DCHECK_RUN_ON(&main_checker_);
if (!receive_pin_)
return VFW_E_NOT_CONNECTED;
CopyMediaType(media_type, &media_type_);
return S_OK;
}
COM_DECLSPEC_NOTHROW STDMETHODIMP
CaptureInputPin::QueryPinInfo(PIN_INFO* info) {
RTC_DCHECK_RUN_ON(&main_checker_);
*info = info_;
if (info_.pFilter)
info_.pFilter->AddRef();
return S_OK;
}
COM_DECLSPEC_NOTHROW STDMETHODIMP
CaptureInputPin::QueryDirection(PIN_DIRECTION* pin_dir) {
RTC_DCHECK_RUN_ON(&main_checker_);
*pin_dir = info_.dir;
return S_OK;
}
COM_DECLSPEC_NOTHROW STDMETHODIMP CaptureInputPin::QueryId(LPWSTR* id) {
RTC_DCHECK_RUN_ON(&main_checker_);
size_t len = lstrlenW(info_.achName);
*id = reinterpret_cast<LPWSTR>(CoTaskMemAlloc((len + 1) * sizeof(wchar_t)));
lstrcpyW(*id, info_.achName);
return S_OK;
}
COM_DECLSPEC_NOTHROW STDMETHODIMP
CaptureInputPin::QueryAccept(const AM_MEDIA_TYPE* media_type) {
RTC_DCHECK_RUN_ON(&main_checker_);
RTC_DCHECK(Filter()->IsStopped());
VideoCaptureCapability capability(resulting_capability_);
return TranslateMediaTypeToVideoCaptureCapability(media_type, &capability)
? S_FALSE
: S_OK;
}
COM_DECLSPEC_NOTHROW STDMETHODIMP
CaptureInputPin::EnumMediaTypes(IEnumMediaTypes** types) {
RTC_DCHECK_RUN_ON(&main_checker_);
*types = new ComRefCount<MediaTypesEnum>(requested_capability_);
(*types)->AddRef();
return S_OK;
}
COM_DECLSPEC_NOTHROW STDMETHODIMP
CaptureInputPin::QueryInternalConnections(IPin** pins, ULONG* count) {
return E_NOTIMPL;
}
COM_DECLSPEC_NOTHROW STDMETHODIMP CaptureInputPin::EndOfStream() {
return S_OK;
}
COM_DECLSPEC_NOTHROW STDMETHODIMP CaptureInputPin::BeginFlush() {
RTC_DCHECK_RUN_ON(&main_checker_);
flushing_ = true;
return S_OK;
}
COM_DECLSPEC_NOTHROW STDMETHODIMP CaptureInputPin::EndFlush() {
RTC_DCHECK_RUN_ON(&main_checker_);
flushing_ = false;
runtime_error_ = false;
return S_OK;
}
COM_DECLSPEC_NOTHROW STDMETHODIMP
CaptureInputPin::NewSegment(REFERENCE_TIME start,
REFERENCE_TIME stop,
double rate) {
RTC_DCHECK_RUN_ON(&main_checker_);
return S_OK;
}
COM_DECLSPEC_NOTHROW STDMETHODIMP
CaptureInputPin::GetAllocator(IMemAllocator** allocator) {
RTC_DCHECK_RUN_ON(&main_checker_);
if (allocator_ == nullptr) {
HRESULT hr = CoCreateInstance(CLSID_MemoryAllocator, 0,
CLSCTX_INPROC_SERVER, IID_IMemAllocator,
reinterpret_cast<void**>(allocator));
if (FAILED(hr))
return hr;
allocator_.swap(allocator);
}
*allocator = allocator_.get();
allocator_->AddRef();
return S_OK;
}
COM_DECLSPEC_NOTHROW STDMETHODIMP
CaptureInputPin::NotifyAllocator(IMemAllocator* allocator, BOOL read_only) {
RTC_DCHECK_RUN_ON(&main_checker_);
allocator_.swap(&allocator);
if (allocator_)
allocator_->AddRef();
if (allocator)
allocator->Release();
return S_OK;
}
COM_DECLSPEC_NOTHROW STDMETHODIMP
CaptureInputPin::GetAllocatorRequirements(ALLOCATOR_PROPERTIES* props) {
return E_NOTIMPL;
}
COM_DECLSPEC_NOTHROW STDMETHODIMP
CaptureInputPin::Receive(IMediaSample* media_sample) {
RTC_DCHECK_RUN_ON(&capture_checker_);
CaptureSinkFilter* const filter = static_cast<CaptureSinkFilter*>(Filter());
if (flushing_.load(std::memory_order_relaxed))
return S_FALSE;
if (runtime_error_.load(std::memory_order_relaxed))
return VFW_E_RUNTIME_ERROR;
if (!capture_thread_id_) {
// Make sure we set the thread name only once.
capture_thread_id_ = GetCurrentThreadId();
rtc::SetCurrentThreadName("webrtc_video_capture");
}
AM_SAMPLE2_PROPERTIES sample_props = {};
GetSampleProperties(media_sample, &sample_props);
// Has the format changed in this sample?
if (sample_props.dwSampleFlags & AM_SAMPLE_TYPECHANGED) {
// Check the derived class accepts the new format.
// This shouldn't fail as the source must call QueryAccept first.
// Note: This will modify resulting_capability_.
// That should be OK as long as resulting_capability_ is only modified
// on this thread while it is running (filter is not stopped), and only
// modified on the main thread when the filter is stopped (i.e. this thread
// is not running).
if (!TranslateMediaTypeToVideoCaptureCapability(sample_props.pMediaType,
&resulting_capability_)) {
// Raise a runtime error if we fail the media type
runtime_error_ = true;
EndOfStream();
Filter()->NotifyEvent(EC_ERRORABORT, VFW_E_TYPE_NOT_ACCEPTED, 0);
return VFW_E_INVALIDMEDIATYPE;
}
}
filter->ProcessCapturedFrame(sample_props.pbBuffer, sample_props.lActual,
resulting_capability_);
return S_OK;
}
COM_DECLSPEC_NOTHROW STDMETHODIMP
CaptureInputPin::ReceiveMultiple(IMediaSample** samples,
long count,
long* processed) {
HRESULT hr = S_OK;
*processed = 0;
while (count-- > 0) {
hr = Receive(samples[*processed]);
if (hr != S_OK)
break;
++(*processed);
}
return hr;
}
COM_DECLSPEC_NOTHROW STDMETHODIMP CaptureInputPin::ReceiveCanBlock() {
return S_FALSE;
}
// ----------------------------------------------------------------------------
CaptureSinkFilter::CaptureSinkFilter(VideoCaptureImpl* capture_observer)
: input_pin_(new ComRefCount<CaptureInputPin>(this)),
capture_observer_(capture_observer) {}
CaptureSinkFilter::~CaptureSinkFilter() {
RTC_DCHECK_RUN_ON(&main_checker_);
}
HRESULT CaptureSinkFilter::SetRequestedCapability(
const VideoCaptureCapability& capability) {
RTC_DCHECK_RUN_ON(&main_checker_);
// Called on the same thread as capture is started on.
return input_pin_->SetRequestedCapability(capability);
}
COM_DECLSPEC_NOTHROW STDMETHODIMP
CaptureSinkFilter::GetState(DWORD msecs, FILTER_STATE* state) {
RTC_DCHECK_RUN_ON(&main_checker_);
*state = state_;
return S_OK;
}
COM_DECLSPEC_NOTHROW STDMETHODIMP
CaptureSinkFilter::SetSyncSource(IReferenceClock* clock) {
RTC_DCHECK_RUN_ON(&main_checker_);
return S_OK;
}
COM_DECLSPEC_NOTHROW STDMETHODIMP
CaptureSinkFilter::GetSyncSource(IReferenceClock** clock) {
RTC_DCHECK_RUN_ON(&main_checker_);
return E_NOTIMPL;
}
COM_DECLSPEC_NOTHROW STDMETHODIMP CaptureSinkFilter::Pause() {
RTC_DCHECK_RUN_ON(&main_checker_);
state_ = State_Paused;
return S_OK;
}
COM_DECLSPEC_NOTHROW STDMETHODIMP
CaptureSinkFilter::Run(REFERENCE_TIME tStart) {
RTC_DCHECK_RUN_ON(&main_checker_);
if (state_ == State_Stopped)
Pause();
state_ = State_Running;
input_pin_->OnFilterActivated();
return S_OK;
}
COM_DECLSPEC_NOTHROW STDMETHODIMP CaptureSinkFilter::Stop() {
RTC_DCHECK_RUN_ON(&main_checker_);
if (state_ == State_Stopped)
return S_OK;
state_ = State_Stopped;
input_pin_->OnFilterDeactivated();
return S_OK;
}
COM_DECLSPEC_NOTHROW STDMETHODIMP
CaptureSinkFilter::EnumPins(IEnumPins** pins) {
RTC_DCHECK_RUN_ON(&main_checker_);
*pins = new ComRefCount<class EnumPins>(input_pin_.get());
(*pins)->AddRef();
return S_OK;
}
COM_DECLSPEC_NOTHROW STDMETHODIMP CaptureSinkFilter::FindPin(LPCWSTR id,
IPin** pin) {
RTC_DCHECK_RUN_ON(&main_checker_);
// There's no ID assigned to our input pin, so looking it up based on one
// is pointless (and in practice, this method isn't being used).
return VFW_E_NOT_FOUND;
}
COM_DECLSPEC_NOTHROW STDMETHODIMP
CaptureSinkFilter::QueryFilterInfo(FILTER_INFO* info) {
RTC_DCHECK_RUN_ON(&main_checker_);
*info = info_;
if (info->pGraph)
info->pGraph->AddRef();
return S_OK;
}
COM_DECLSPEC_NOTHROW STDMETHODIMP
CaptureSinkFilter::JoinFilterGraph(IFilterGraph* graph, LPCWSTR name) {
RTC_DCHECK_RUN_ON(&main_checker_);
RTC_DCHECK(IsStopped());
// Note, since a reference to the filter is held by the graph manager,
// filters must not hold a reference to the graph. If they would, we'd have
// a circular reference. Instead, a pointer to the graph can be held without
// reference. See documentation for IBaseFilter::JoinFilterGraph for more.
info_.pGraph = graph; // No AddRef().
sink_ = nullptr;
if (info_.pGraph) {
// make sure we don't hold on to the reference we may receive.
// Note that this assumes the same object identity, but so be it.
rtc::scoped_refptr<IMediaEventSink> sink;
GetComInterface(info_.pGraph, &sink);
sink_ = sink.get();
}
info_.achName[0] = L'\0';
if (name)
lstrcpynW(info_.achName, name, arraysize(info_.achName));
return S_OK;
}
COM_DECLSPEC_NOTHROW STDMETHODIMP
CaptureSinkFilter::QueryVendorInfo(LPWSTR* vendor_info) {
return E_NOTIMPL;
}
void CaptureSinkFilter::ProcessCapturedFrame(
unsigned char* buffer,
size_t length,
const VideoCaptureCapability& frame_info) {
// Called on the capture thread.
capture_observer_->IncomingFrame(buffer, length, frame_info);
}
void CaptureSinkFilter::NotifyEvent(long code,
LONG_PTR param1,
LONG_PTR param2) {
// Called on the capture thread.
if (!sink_)
return;
if (EC_COMPLETE == code)
param2 = reinterpret_cast<LONG_PTR>(static_cast<IBaseFilter*>(this));
sink_->Notify(code, param1, param2);
}
bool CaptureSinkFilter::IsStopped() const {
RTC_DCHECK_RUN_ON(&main_checker_);
return state_ == State_Stopped;
}
COM_DECLSPEC_NOTHROW STDMETHODIMP
CaptureSinkFilter::QueryInterface(REFIID riid, void** ppv) {
if (riid == IID_IUnknown || riid == IID_IPersist || riid == IID_IBaseFilter) {
*ppv = static_cast<IBaseFilter*>(this);
AddRef();
return S_OK;
}
return E_NOINTERFACE;
}
COM_DECLSPEC_NOTHROW STDMETHODIMP CaptureSinkFilter::GetClassID(CLSID* clsid) {
*clsid = CLSID_SINKFILTER;
return S_OK;
}
} // namespace videocapturemodule
} // namespace webrtc

View file

@ -0,0 +1,162 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_VIDEO_CAPTURE_MAIN_SOURCE_WINDOWS_SINK_FILTER_DS_H_
#define MODULES_VIDEO_CAPTURE_MAIN_SOURCE_WINDOWS_SINK_FILTER_DS_H_
#include <dshow.h>
#include <atomic>
#include <memory>
#include <vector>
#include "api/sequence_checker.h"
#include "modules/video_capture/video_capture_impl.h"
#include "modules/video_capture/windows/help_functions_ds.h"
#include "rtc_base/thread_annotations.h"
namespace webrtc {
namespace videocapturemodule {
// forward declarations
class CaptureSinkFilter;
// Input pin for camera input
// Implements IMemInputPin, IPin.
class CaptureInputPin : public IMemInputPin, public IPin {
public:
CaptureInputPin(CaptureSinkFilter* filter);
HRESULT SetRequestedCapability(const VideoCaptureCapability& capability);
// Notifications from the filter.
void OnFilterActivated();
void OnFilterDeactivated();
protected:
virtual ~CaptureInputPin();
private:
CaptureSinkFilter* Filter() const;
HRESULT AttemptConnection(IPin* receive_pin, const AM_MEDIA_TYPE* media_type);
std::vector<AM_MEDIA_TYPE*> DetermineCandidateFormats(
IPin* receive_pin,
const AM_MEDIA_TYPE* media_type);
void ClearAllocator(bool decommit);
HRESULT CheckDirection(IPin* pin) const;
// IUnknown
STDMETHOD(QueryInterface)(REFIID riid, void** ppv) override;
// clang-format off
// clang isn't sure what to do with the longer STDMETHOD() function
// declarations.
// IPin
STDMETHOD(Connect)(IPin* receive_pin,
const AM_MEDIA_TYPE* media_type) override;
STDMETHOD(ReceiveConnection)(IPin* connector,
const AM_MEDIA_TYPE* media_type) override;
STDMETHOD(Disconnect)() override;
STDMETHOD(ConnectedTo)(IPin** pin) override;
STDMETHOD(ConnectionMediaType)(AM_MEDIA_TYPE* media_type) override;
STDMETHOD(QueryPinInfo)(PIN_INFO* info) override;
STDMETHOD(QueryDirection)(PIN_DIRECTION* pin_dir) override;
STDMETHOD(QueryId)(LPWSTR* id) override;
STDMETHOD(QueryAccept)(const AM_MEDIA_TYPE* media_type) override;
STDMETHOD(EnumMediaTypes)(IEnumMediaTypes** types) override;
STDMETHOD(QueryInternalConnections)(IPin** pins, ULONG* count) override;
STDMETHOD(EndOfStream)() override;
STDMETHOD(BeginFlush)() override;
STDMETHOD(EndFlush)() override;
STDMETHOD(NewSegment)(REFERENCE_TIME start, REFERENCE_TIME stop,
double rate) override;
// IMemInputPin
STDMETHOD(GetAllocator)(IMemAllocator** allocator) override;
STDMETHOD(NotifyAllocator)(IMemAllocator* allocator, BOOL read_only) override;
STDMETHOD(GetAllocatorRequirements)(ALLOCATOR_PROPERTIES* props) override;
STDMETHOD(Receive)(IMediaSample* sample) override;
STDMETHOD(ReceiveMultiple)(IMediaSample** samples, long count,
long* processed) override;
STDMETHOD(ReceiveCanBlock)() override;
// clang-format on
SequenceChecker main_checker_;
SequenceChecker capture_checker_;
VideoCaptureCapability requested_capability_ RTC_GUARDED_BY(main_checker_);
// Accessed on the main thread when Filter()->IsStopped() (capture thread not
// running), otherwise accessed on the capture thread.
VideoCaptureCapability resulting_capability_;
DWORD capture_thread_id_ = 0;
rtc::scoped_refptr<IMemAllocator> allocator_ RTC_GUARDED_BY(main_checker_);
rtc::scoped_refptr<IPin> receive_pin_ RTC_GUARDED_BY(main_checker_);
std::atomic_bool flushing_{false};
std::atomic_bool runtime_error_{false};
// Holds a referenceless pointer to the owning filter, the name and
// direction of the pin. The filter pointer can be considered const.
PIN_INFO info_ = {};
AM_MEDIA_TYPE media_type_ RTC_GUARDED_BY(main_checker_) = {};
};
// Implement IBaseFilter (including IPersist and IMediaFilter).
class CaptureSinkFilter : public IBaseFilter {
public:
CaptureSinkFilter(VideoCaptureImpl* capture_observer);
HRESULT SetRequestedCapability(const VideoCaptureCapability& capability);
// Called on the capture thread.
void ProcessCapturedFrame(unsigned char* buffer,
size_t length,
const VideoCaptureCapability& frame_info);
void NotifyEvent(long code, LONG_PTR param1, LONG_PTR param2);
bool IsStopped() const;
// IUnknown
STDMETHOD(QueryInterface)(REFIID riid, void** ppv) override;
// IPersist
STDMETHOD(GetClassID)(CLSID* clsid) override;
// IMediaFilter.
STDMETHOD(GetState)(DWORD msecs, FILTER_STATE* state) override;
STDMETHOD(SetSyncSource)(IReferenceClock* clock) override;
STDMETHOD(GetSyncSource)(IReferenceClock** clock) override;
STDMETHOD(Pause)() override;
STDMETHOD(Run)(REFERENCE_TIME start) override;
STDMETHOD(Stop)() override;
// IBaseFilter
STDMETHOD(EnumPins)(IEnumPins** pins) override;
STDMETHOD(FindPin)(LPCWSTR id, IPin** pin) override;
STDMETHOD(QueryFilterInfo)(FILTER_INFO* info) override;
STDMETHOD(JoinFilterGraph)(IFilterGraph* graph, LPCWSTR name) override;
STDMETHOD(QueryVendorInfo)(LPWSTR* vendor_info) override;
protected:
virtual ~CaptureSinkFilter();
private:
SequenceChecker main_checker_;
const rtc::scoped_refptr<ComRefCount<CaptureInputPin>> input_pin_;
VideoCaptureImpl* const capture_observer_;
FILTER_INFO info_ RTC_GUARDED_BY(main_checker_) = {};
// Set/cleared in JoinFilterGraph. The filter must be stopped (no capture)
// at that time, so no lock is required. While the state is not stopped,
// the sink will be used from the capture thread.
IMediaEventSink* sink_ = nullptr;
FILTER_STATE state_ RTC_GUARDED_BY(main_checker_) = State_Stopped;
};
} // namespace videocapturemodule
} // namespace webrtc
#endif // MODULES_VIDEO_CAPTURE_MAIN_SOURCE_WINDOWS_SINK_FILTER_DS_H_

View file

@ -0,0 +1,337 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/video_capture/windows/video_capture_ds.h"
#include <dvdmedia.h> // VIDEOINFOHEADER2
#include "modules/video_capture/video_capture_config.h"
#include "modules/video_capture/windows/help_functions_ds.h"
#include "modules/video_capture/windows/sink_filter_ds.h"
#include "rtc_base/logging.h"
namespace webrtc {
namespace videocapturemodule {
VideoCaptureDS::VideoCaptureDS()
: _captureFilter(NULL),
_graphBuilder(NULL),
_mediaControl(NULL),
_inputSendPin(NULL),
_outputCapturePin(NULL),
_dvFilter(NULL),
_inputDvPin(NULL),
_outputDvPin(NULL) {}
VideoCaptureDS::~VideoCaptureDS() {
if (_mediaControl) {
_mediaControl->Stop();
}
if (_graphBuilder) {
if (sink_filter_)
_graphBuilder->RemoveFilter(sink_filter_.get());
if (_captureFilter)
_graphBuilder->RemoveFilter(_captureFilter);
if (_dvFilter)
_graphBuilder->RemoveFilter(_dvFilter);
}
RELEASE_AND_CLEAR(_inputSendPin);
RELEASE_AND_CLEAR(_outputCapturePin);
RELEASE_AND_CLEAR(_captureFilter); // release the capture device
RELEASE_AND_CLEAR(_dvFilter);
RELEASE_AND_CLEAR(_mediaControl);
RELEASE_AND_CLEAR(_inputDvPin);
RELEASE_AND_CLEAR(_outputDvPin);
RELEASE_AND_CLEAR(_graphBuilder);
}
int32_t VideoCaptureDS::Init(const char* deviceUniqueIdUTF8) {
RTC_DCHECK_RUN_ON(&api_checker_);
const int32_t nameLength = (int32_t)strlen((char*)deviceUniqueIdUTF8);
if (nameLength >= kVideoCaptureUniqueNameLength)
return -1;
// Store the device name
_deviceUniqueId = new (std::nothrow) char[nameLength + 1];
memcpy(_deviceUniqueId, deviceUniqueIdUTF8, nameLength + 1);
if (_dsInfo.Init() != 0)
return -1;
_captureFilter = _dsInfo.GetDeviceFilter(deviceUniqueIdUTF8);
if (!_captureFilter) {
RTC_LOG(LS_INFO) << "Failed to create capture filter.";
return -1;
}
// Get the interface for DirectShow's GraphBuilder
HRESULT hr = CoCreateInstance(CLSID_FilterGraph, NULL, CLSCTX_INPROC_SERVER,
IID_IGraphBuilder, (void**)&_graphBuilder);
if (FAILED(hr)) {
RTC_LOG(LS_INFO) << "Failed to create graph builder.";
return -1;
}
hr = _graphBuilder->QueryInterface(IID_IMediaControl, (void**)&_mediaControl);
if (FAILED(hr)) {
RTC_LOG(LS_INFO) << "Failed to create media control builder.";
return -1;
}
hr = _graphBuilder->AddFilter(_captureFilter, CAPTURE_FILTER_NAME);
if (FAILED(hr)) {
RTC_LOG(LS_INFO) << "Failed to add the capture device to the graph.";
return -1;
}
_outputCapturePin = GetOutputPin(_captureFilter, PIN_CATEGORY_CAPTURE);
if (!_outputCapturePin) {
RTC_LOG(LS_INFO) << "Failed to get output capture pin";
return -1;
}
// Create the sink filte used for receiving Captured frames.
sink_filter_ = new ComRefCount<CaptureSinkFilter>(this);
hr = _graphBuilder->AddFilter(sink_filter_.get(), SINK_FILTER_NAME);
if (FAILED(hr)) {
RTC_LOG(LS_INFO) << "Failed to add the send filter to the graph.";
return -1;
}
_inputSendPin = GetInputPin(sink_filter_.get());
if (!_inputSendPin) {
RTC_LOG(LS_INFO) << "Failed to get input send pin";
return -1;
}
if (SetCameraOutput(_requestedCapability) != 0) {
return -1;
}
RTC_LOG(LS_INFO) << "Capture device '" << deviceUniqueIdUTF8
<< "' initialized.";
return 0;
}
int32_t VideoCaptureDS::StartCapture(const VideoCaptureCapability& capability) {
RTC_DCHECK_RUN_ON(&api_checker_);
if (capability != _requestedCapability) {
DisconnectGraph();
if (SetCameraOutput(capability) != 0) {
return -1;
}
}
HRESULT hr = _mediaControl->Pause();
if (FAILED(hr)) {
RTC_LOG(LS_INFO)
<< "Failed to Pause the Capture device. Is it already occupied? " << hr;
return -1;
}
hr = _mediaControl->Run();
if (FAILED(hr)) {
RTC_LOG(LS_INFO) << "Failed to start the Capture device.";
return -1;
}
return 0;
}
int32_t VideoCaptureDS::StopCapture() {
RTC_DCHECK_RUN_ON(&api_checker_);
HRESULT hr = _mediaControl->StopWhenReady();
if (FAILED(hr)) {
RTC_LOG(LS_INFO) << "Failed to stop the capture graph. " << hr;
return -1;
}
return 0;
}
bool VideoCaptureDS::CaptureStarted() {
RTC_DCHECK_RUN_ON(&api_checker_);
OAFilterState state = 0;
HRESULT hr = _mediaControl->GetState(1000, &state);
if (hr != S_OK && hr != VFW_S_CANT_CUE) {
RTC_LOG(LS_INFO) << "Failed to get the CaptureStarted status";
}
RTC_LOG(LS_INFO) << "CaptureStarted " << state;
return state == State_Running;
}
int32_t VideoCaptureDS::CaptureSettings(VideoCaptureCapability& settings) {
RTC_DCHECK_RUN_ON(&api_checker_);
settings = _requestedCapability;
return 0;
}
int32_t VideoCaptureDS::SetCameraOutput(
const VideoCaptureCapability& requestedCapability) {
RTC_DCHECK_RUN_ON(&api_checker_);
// Get the best matching capability
VideoCaptureCapability capability;
int32_t capabilityIndex;
// Store the new requested size
_requestedCapability = requestedCapability;
// Match the requested capability with the supported.
if ((capabilityIndex = _dsInfo.GetBestMatchedCapability(
_deviceUniqueId, _requestedCapability, capability)) < 0) {
return -1;
}
// Reduce the frame rate if possible.
if (capability.maxFPS > requestedCapability.maxFPS) {
capability.maxFPS = requestedCapability.maxFPS;
} else if (capability.maxFPS <= 0) {
capability.maxFPS = 30;
}
// Convert it to the windows capability index since they are not nexessary
// the same
VideoCaptureCapabilityWindows windowsCapability;
if (_dsInfo.GetWindowsCapability(capabilityIndex, windowsCapability) != 0) {
return -1;
}
IAMStreamConfig* streamConfig = NULL;
AM_MEDIA_TYPE* pmt = NULL;
VIDEO_STREAM_CONFIG_CAPS caps;
HRESULT hr = _outputCapturePin->QueryInterface(IID_IAMStreamConfig,
(void**)&streamConfig);
if (hr) {
RTC_LOG(LS_INFO) << "Can't get the Capture format settings.";
return -1;
}
// Get the windows capability from the capture device
bool isDVCamera = false;
hr = streamConfig->GetStreamCaps(windowsCapability.directShowCapabilityIndex,
&pmt, reinterpret_cast<BYTE*>(&caps));
if (hr == S_OK) {
if (pmt->formattype == FORMAT_VideoInfo2) {
VIDEOINFOHEADER2* h = reinterpret_cast<VIDEOINFOHEADER2*>(pmt->pbFormat);
if (capability.maxFPS > 0 && windowsCapability.supportFrameRateControl) {
h->AvgTimePerFrame = REFERENCE_TIME(10000000.0 / capability.maxFPS);
}
} else {
VIDEOINFOHEADER* h = reinterpret_cast<VIDEOINFOHEADER*>(pmt->pbFormat);
if (capability.maxFPS > 0 && windowsCapability.supportFrameRateControl) {
h->AvgTimePerFrame = REFERENCE_TIME(10000000.0 / capability.maxFPS);
}
}
// Set the sink filter to request this capability
sink_filter_->SetRequestedCapability(capability);
// Order the capture device to use this capability
hr += streamConfig->SetFormat(pmt);
// Check if this is a DV camera and we need to add MS DV Filter
if (pmt->subtype == MEDIASUBTYPE_dvsl ||
pmt->subtype == MEDIASUBTYPE_dvsd ||
pmt->subtype == MEDIASUBTYPE_dvhd) {
isDVCamera = true; // This is a DV camera. Use MS DV filter
}
FreeMediaType(pmt);
pmt = NULL;
}
RELEASE_AND_CLEAR(streamConfig);
if (FAILED(hr)) {
RTC_LOG(LS_INFO) << "Failed to set capture device output format";
return -1;
}
if (isDVCamera) {
hr = ConnectDVCamera();
} else {
hr = _graphBuilder->ConnectDirect(_outputCapturePin, _inputSendPin, NULL);
}
if (hr != S_OK) {
RTC_LOG(LS_INFO) << "Failed to connect the Capture graph " << hr;
return -1;
}
return 0;
}
int32_t VideoCaptureDS::DisconnectGraph() {
RTC_DCHECK_RUN_ON(&api_checker_);
HRESULT hr = _mediaControl->Stop();
hr += _graphBuilder->Disconnect(_outputCapturePin);
hr += _graphBuilder->Disconnect(_inputSendPin);
// if the DV camera filter exist
if (_dvFilter) {
_graphBuilder->Disconnect(_inputDvPin);
_graphBuilder->Disconnect(_outputDvPin);
}
if (hr != S_OK) {
RTC_LOG(LS_ERROR)
<< "Failed to Stop the Capture device for reconfiguration " << hr;
return -1;
}
return 0;
}
HRESULT VideoCaptureDS::ConnectDVCamera() {
RTC_DCHECK_RUN_ON(&api_checker_);
HRESULT hr = S_OK;
if (!_dvFilter) {
hr = CoCreateInstance(CLSID_DVVideoCodec, NULL, CLSCTX_INPROC,
IID_IBaseFilter, (void**)&_dvFilter);
if (hr != S_OK) {
RTC_LOG(LS_INFO) << "Failed to create the dv decoder: " << hr;
return hr;
}
hr = _graphBuilder->AddFilter(_dvFilter, L"VideoDecoderDV");
if (hr != S_OK) {
RTC_LOG(LS_INFO) << "Failed to add the dv decoder to the graph: " << hr;
return hr;
}
_inputDvPin = GetInputPin(_dvFilter);
if (_inputDvPin == NULL) {
RTC_LOG(LS_INFO) << "Failed to get input pin from DV decoder";
return -1;
}
_outputDvPin = GetOutputPin(_dvFilter, GUID_NULL);
if (_outputDvPin == NULL) {
RTC_LOG(LS_INFO) << "Failed to get output pin from DV decoder";
return -1;
}
}
hr = _graphBuilder->ConnectDirect(_outputCapturePin, _inputDvPin, NULL);
if (hr != S_OK) {
RTC_LOG(LS_INFO) << "Failed to connect capture device to the dv devoder: "
<< hr;
return hr;
}
hr = _graphBuilder->ConnectDirect(_outputDvPin, _inputSendPin, NULL);
if (hr != S_OK) {
if (hr == HRESULT_FROM_WIN32(ERROR_TOO_MANY_OPEN_FILES)) {
RTC_LOG(LS_INFO) << "Failed to connect the capture device, busy";
} else {
RTC_LOG(LS_INFO) << "Failed to connect capture device to the send graph: "
<< hr;
}
}
return hr;
}
} // namespace videocapturemodule
} // namespace webrtc

View file

@ -0,0 +1,75 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_VIDEO_CAPTURE_MAIN_SOURCE_WINDOWS_VIDEO_CAPTURE_DS_H_
#define MODULES_VIDEO_CAPTURE_MAIN_SOURCE_WINDOWS_VIDEO_CAPTURE_DS_H_
#include "api/scoped_refptr.h"
#include "modules/video_capture/video_capture_impl.h"
#include "modules/video_capture/windows/device_info_ds.h"
#define CAPTURE_FILTER_NAME L"VideoCaptureFilter"
#define SINK_FILTER_NAME L"SinkFilter"
namespace webrtc {
namespace videocapturemodule {
// Forward declaraion
class CaptureSinkFilter;
class VideoCaptureDS : public VideoCaptureImpl {
public:
VideoCaptureDS();
virtual int32_t Init(const char* deviceUniqueIdUTF8);
/*************************************************************************
*
* Start/Stop
*
*************************************************************************/
int32_t StartCapture(const VideoCaptureCapability& capability) override;
int32_t StopCapture() override;
/**************************************************************************
*
* Properties of the set device
*
**************************************************************************/
bool CaptureStarted() override;
int32_t CaptureSettings(VideoCaptureCapability& settings) override;
protected:
~VideoCaptureDS() override;
// Help functions
int32_t SetCameraOutput(const VideoCaptureCapability& requestedCapability);
int32_t DisconnectGraph();
HRESULT ConnectDVCamera();
DeviceInfoDS _dsInfo RTC_GUARDED_BY(api_checker_);
IBaseFilter* _captureFilter RTC_GUARDED_BY(api_checker_);
IGraphBuilder* _graphBuilder RTC_GUARDED_BY(api_checker_);
IMediaControl* _mediaControl RTC_GUARDED_BY(api_checker_);
rtc::scoped_refptr<CaptureSinkFilter> sink_filter_
RTC_GUARDED_BY(api_checker_);
IPin* _inputSendPin RTC_GUARDED_BY(api_checker_);
IPin* _outputCapturePin RTC_GUARDED_BY(api_checker_);
// Microsoft DV interface (external DV cameras)
IBaseFilter* _dvFilter RTC_GUARDED_BY(api_checker_);
IPin* _inputDvPin RTC_GUARDED_BY(api_checker_);
IPin* _outputDvPin RTC_GUARDED_BY(api_checker_);
};
} // namespace videocapturemodule
} // namespace webrtc
#endif // MODULES_VIDEO_CAPTURE_MAIN_SOURCE_WINDOWS_VIDEO_CAPTURE_DS_H_

View file

@ -0,0 +1,38 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/scoped_refptr.h"
#include "modules/video_capture/windows/video_capture_ds.h"
namespace webrtc {
namespace videocapturemodule {
// static
VideoCaptureModule::DeviceInfo* VideoCaptureImpl::CreateDeviceInfo() {
// TODO(tommi): Use the Media Foundation version on Vista and up.
return DeviceInfoDS::Create();
}
rtc::scoped_refptr<VideoCaptureModule> VideoCaptureImpl::Create(
const char* device_id) {
if (device_id == nullptr)
return nullptr;
// TODO(tommi): Use Media Foundation implementation for Vista and up.
auto capture = rtc::make_ref_counted<VideoCaptureDS>();
if (capture->Init(device_id) != 0) {
return nullptr;
}
return capture;
}
} // namespace videocapturemodule
} // namespace webrtc