Repo created

This commit is contained in:
Fr4nz D13trich 2025-11-22 14:04:28 +01:00
parent 81b91f4139
commit f8c34fa5ee
22732 changed files with 4815320 additions and 2 deletions

View file

@ -0,0 +1,256 @@
/*
* Copyright (c) 2023 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/video_capture/linux/camera_portal.h"
#include <gio/gio.h>
#include <gio/gunixfdlist.h>
#include "modules/portal/pipewire_utils.h"
#include "modules/portal/xdg_desktop_portal_utils.h"
#include "rtc_base/synchronization/mutex.h"
namespace webrtc {
using xdg_portal::RequestResponse;
using xdg_portal::RequestResponseFromPortalResponse;
using xdg_portal::RequestSessionProxy;
constexpr char kCameraInterfaceName[] = "org.freedesktop.portal.Camera";
class CameraPortalPrivate {
public:
explicit CameraPortalPrivate(CameraPortal::PortalNotifier* notifier);
~CameraPortalPrivate();
void Start();
private:
void OnPortalDone(xdg_portal::RequestResponse result,
int fd = kInvalidPipeWireFd);
static void OnProxyRequested(GObject* object,
GAsyncResult* result,
gpointer user_data);
void ProxyRequested(GDBusProxy* proxy);
static void OnAccessResponse(GDBusProxy* proxy,
GAsyncResult* result,
gpointer user_data);
static void OnResponseSignalEmitted(GDBusConnection* connection,
const char* sender_name,
const char* object_path,
const char* interface_name,
const char* signal_name,
GVariant* parameters,
gpointer user_data);
static void OnOpenResponse(GDBusProxy* proxy,
GAsyncResult* result,
gpointer user_data);
webrtc::Mutex notifier_lock_;
CameraPortal::PortalNotifier* notifier_ RTC_GUARDED_BY(&notifier_lock_) =
nullptr;
GDBusConnection* connection_ = nullptr;
GDBusProxy* proxy_ = nullptr;
GCancellable* cancellable_ = nullptr;
guint access_request_signal_id_ = 0;
};
CameraPortalPrivate::CameraPortalPrivate(CameraPortal::PortalNotifier* notifier)
: notifier_(notifier) {}
CameraPortalPrivate::~CameraPortalPrivate() {
{
webrtc::MutexLock lock(&notifier_lock_);
notifier_ = nullptr;
}
if (access_request_signal_id_) {
g_dbus_connection_signal_unsubscribe(connection_,
access_request_signal_id_);
access_request_signal_id_ = 0;
}
if (cancellable_) {
g_cancellable_cancel(cancellable_);
g_object_unref(cancellable_);
cancellable_ = nullptr;
}
if (proxy_) {
g_object_unref(proxy_);
proxy_ = nullptr;
connection_ = nullptr;
}
}
void CameraPortalPrivate::Start() {
cancellable_ = g_cancellable_new();
Scoped<GError> error;
RequestSessionProxy(kCameraInterfaceName, OnProxyRequested, cancellable_,
this);
}
// static
void CameraPortalPrivate::OnProxyRequested(GObject* gobject,
GAsyncResult* result,
gpointer user_data) {
CameraPortalPrivate* that = static_cast<CameraPortalPrivate*>(user_data);
Scoped<GError> error;
GDBusProxy* proxy = g_dbus_proxy_new_finish(result, error.receive());
if (!proxy) {
// Ignore the error caused by user cancelling the request via `cancellable_`
if (g_error_matches(error.get(), G_IO_ERROR, G_IO_ERROR_CANCELLED))
return;
RTC_LOG(LS_ERROR) << "Failed to get a proxy for the portal: "
<< error->message;
that->OnPortalDone(RequestResponse::kError);
return;
}
RTC_LOG(LS_VERBOSE) << "Successfully created proxy for the portal.";
that->ProxyRequested(proxy);
}
void CameraPortalPrivate::ProxyRequested(GDBusProxy* proxy) {
GVariantBuilder builder;
Scoped<char> variant_string;
std::string access_handle;
proxy_ = proxy;
connection_ = g_dbus_proxy_get_connection(proxy);
g_variant_builder_init(&builder, G_VARIANT_TYPE_VARDICT);
variant_string =
g_strdup_printf("capture%d", g_random_int_range(0, G_MAXINT));
g_variant_builder_add(&builder, "{sv}", "handle_token",
g_variant_new_string(variant_string.get()));
access_handle =
xdg_portal::PrepareSignalHandle(variant_string.get(), connection_);
access_request_signal_id_ = xdg_portal::SetupRequestResponseSignal(
access_handle.c_str(), OnResponseSignalEmitted, this, connection_);
RTC_LOG(LS_VERBOSE) << "Requesting camera access from the portal.";
g_dbus_proxy_call(proxy_, "AccessCamera", g_variant_new("(a{sv})", &builder),
G_DBUS_CALL_FLAGS_NONE, /*timeout_msec=*/-1, cancellable_,
reinterpret_cast<GAsyncReadyCallback>(OnAccessResponse),
this);
}
// static
void CameraPortalPrivate::OnAccessResponse(GDBusProxy* proxy,
GAsyncResult* result,
gpointer user_data) {
CameraPortalPrivate* that = static_cast<CameraPortalPrivate*>(user_data);
RTC_DCHECK(that);
Scoped<GError> error;
Scoped<GVariant> variant(
g_dbus_proxy_call_finish(proxy, result, error.receive()));
if (!variant) {
if (g_error_matches(error.get(), G_IO_ERROR, G_IO_ERROR_CANCELLED))
return;
RTC_LOG(LS_ERROR) << "Failed to access portal:" << error->message;
if (that->access_request_signal_id_) {
g_dbus_connection_signal_unsubscribe(that->connection_,
that->access_request_signal_id_);
that->access_request_signal_id_ = 0;
}
that->OnPortalDone(RequestResponse::kError);
}
}
// static
void CameraPortalPrivate::OnResponseSignalEmitted(GDBusConnection* connection,
const char* sender_name,
const char* object_path,
const char* interface_name,
const char* signal_name,
GVariant* parameters,
gpointer user_data) {
CameraPortalPrivate* that = static_cast<CameraPortalPrivate*>(user_data);
RTC_DCHECK(that);
uint32_t portal_response;
g_variant_get(parameters, "(u@a{sv})", &portal_response, nullptr);
if (portal_response) {
RTC_LOG(LS_INFO) << "Camera access denied by the XDG portal.";
that->OnPortalDone(RequestResponseFromPortalResponse(portal_response));
return;
}
RTC_LOG(LS_VERBOSE) << "Camera access granted by the XDG portal.";
GVariantBuilder builder;
g_variant_builder_init(&builder, G_VARIANT_TYPE_VARDICT);
g_dbus_proxy_call(
that->proxy_, "OpenPipeWireRemote", g_variant_new("(a{sv})", &builder),
G_DBUS_CALL_FLAGS_NONE, /*timeout_msec=*/-1, that->cancellable_,
reinterpret_cast<GAsyncReadyCallback>(OnOpenResponse), that);
}
void CameraPortalPrivate::OnOpenResponse(GDBusProxy* proxy,
GAsyncResult* result,
gpointer user_data) {
CameraPortalPrivate* that = static_cast<CameraPortalPrivate*>(user_data);
RTC_DCHECK(that);
Scoped<GError> error;
Scoped<GUnixFDList> outlist;
Scoped<GVariant> variant(g_dbus_proxy_call_with_unix_fd_list_finish(
proxy, outlist.receive(), result, error.receive()));
if (!variant) {
if (g_error_matches(error.get(), G_IO_ERROR, G_IO_ERROR_CANCELLED))
return;
RTC_LOG(LS_ERROR) << "Failed to open PipeWire remote:" << error->message;
if (that->access_request_signal_id_) {
g_dbus_connection_signal_unsubscribe(that->connection_,
that->access_request_signal_id_);
that->access_request_signal_id_ = 0;
}
that->OnPortalDone(RequestResponse::kError);
return;
}
int32_t index;
g_variant_get(variant.get(), "(h)", &index);
int fd = g_unix_fd_list_get(outlist.get(), index, error.receive());
if (fd == kInvalidPipeWireFd) {
RTC_LOG(LS_ERROR) << "Failed to get file descriptor from the list: "
<< error->message;
that->OnPortalDone(RequestResponse::kError);
return;
}
that->OnPortalDone(RequestResponse::kSuccess, fd);
}
void CameraPortalPrivate::OnPortalDone(RequestResponse result, int fd) {
webrtc::MutexLock lock(&notifier_lock_);
if (notifier_) {
notifier_->OnCameraRequestResult(result, fd);
notifier_ = nullptr;
}
}
CameraPortal::CameraPortal(PortalNotifier* notifier)
: private_(std::make_unique<CameraPortalPrivate>(notifier)) {}
CameraPortal::~CameraPortal() {}
void CameraPortal::Start() {
private_->Start();
}
} // namespace webrtc

View file

@ -0,0 +1,47 @@
/*
* Copyright (c) 2023 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_VIDEO_CAPTURE_LINUX_CAMERA_PORTAL_H_
#define MODULES_VIDEO_CAPTURE_LINUX_CAMERA_PORTAL_H_
#include <memory>
#include <string>
#include "modules/portal/portal_request_response.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
class CameraPortalPrivate;
class RTC_EXPORT CameraPortal {
public:
class PortalNotifier {
public:
virtual void OnCameraRequestResult(xdg_portal::RequestResponse result,
int fd) = 0;
protected:
PortalNotifier() = default;
virtual ~PortalNotifier() = default;
};
explicit CameraPortal(PortalNotifier* notifier);
~CameraPortal();
void Start();
private:
std::unique_ptr<CameraPortalPrivate> private_;
};
} // namespace webrtc
#endif // MODULES_VIDEO_CAPTURE_LINUX_CAMERA_PORTAL_H_

View file

@ -0,0 +1,52 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <errno.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <unistd.h>
// v4l includes
#include <linux/videodev2.h>
#include <vector>
#if defined(WEBRTC_USE_PIPEWIRE)
#include "modules/video_capture/linux/device_info_pipewire.h"
#endif
#include "modules/video_capture/linux/device_info_v4l2.h"
#include "modules/video_capture/video_capture.h"
#include "modules/video_capture/video_capture_defines.h"
#include "modules/video_capture/video_capture_impl.h"
#include "modules/video_capture/video_capture_options.h"
#include "rtc_base/logging.h"
namespace webrtc {
namespace videocapturemodule {
VideoCaptureModule::DeviceInfo* VideoCaptureImpl::CreateDeviceInfo() {
return new videocapturemodule::DeviceInfoV4l2();
}
VideoCaptureModule::DeviceInfo* VideoCaptureImpl::CreateDeviceInfo(
VideoCaptureOptions* options) {
#if defined(WEBRTC_USE_PIPEWIRE)
if (options->allow_pipewire()) {
return new videocapturemodule::DeviceInfoPipeWire(options);
}
#endif
if (options->allow_v4l2())
return new videocapturemodule::DeviceInfoV4l2();
return nullptr;
}
} // namespace videocapturemodule
} // namespace webrtc

View file

@ -0,0 +1,117 @@
/*
* Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/video_capture/linux/device_info_pipewire.h"
#include <errno.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <unistd.h>
#include <vector>
#include "modules/video_capture/linux/pipewire_session.h"
#include "modules/video_capture/video_capture.h"
#include "modules/video_capture/video_capture_defines.h"
#include "modules/video_capture/video_capture_impl.h"
#include "rtc_base/logging.h"
namespace webrtc {
namespace videocapturemodule {
DeviceInfoPipeWire::DeviceInfoPipeWire(VideoCaptureOptions* options)
: DeviceInfoImpl(), pipewire_session_(options->pipewire_session()) {}
int32_t DeviceInfoPipeWire::Init() {
return 0;
}
DeviceInfoPipeWire::~DeviceInfoPipeWire() = default;
uint32_t DeviceInfoPipeWire::NumberOfDevices() {
RTC_CHECK(pipewire_session_);
return pipewire_session_->nodes().size();
}
int32_t DeviceInfoPipeWire::GetDeviceName(uint32_t deviceNumber,
char* deviceNameUTF8,
uint32_t deviceNameLength,
char* deviceUniqueIdUTF8,
uint32_t deviceUniqueIdUTF8Length,
char* productUniqueIdUTF8,
uint32_t productUniqueIdUTF8Length) {
RTC_CHECK(pipewire_session_);
if (deviceNumber >= NumberOfDevices())
return -1;
const PipeWireNode& node = pipewire_session_->nodes().at(deviceNumber);
if (deviceNameLength <= node.display_name().length()) {
RTC_LOG(LS_INFO) << "deviceNameUTF8 buffer passed is too small";
return -1;
}
if (deviceUniqueIdUTF8Length <= node.unique_id().length()) {
RTC_LOG(LS_INFO) << "deviceUniqueIdUTF8 buffer passed is too small";
return -1;
}
if (productUniqueIdUTF8 &&
productUniqueIdUTF8Length <= node.model_id().length()) {
RTC_LOG(LS_INFO) << "productUniqueIdUTF8 buffer passed is too small";
return -1;
}
memset(deviceNameUTF8, 0, deviceNameLength);
node.display_name().copy(deviceNameUTF8, deviceNameLength);
memset(deviceUniqueIdUTF8, 0, deviceUniqueIdUTF8Length);
node.unique_id().copy(deviceUniqueIdUTF8, deviceUniqueIdUTF8Length);
if (productUniqueIdUTF8) {
memset(productUniqueIdUTF8, 0, productUniqueIdUTF8Length);
node.model_id().copy(productUniqueIdUTF8, productUniqueIdUTF8Length);
}
return 0;
}
int32_t DeviceInfoPipeWire::CreateCapabilityMap(
const char* deviceUniqueIdUTF8) {
RTC_CHECK(pipewire_session_);
for (auto& node : pipewire_session_->nodes()) {
if (node.unique_id().compare(deviceUniqueIdUTF8) != 0)
continue;
_captureCapabilities = node.capabilities();
_lastUsedDeviceNameLength = node.display_name().length();
_lastUsedDeviceName = static_cast<char*>(
realloc(_lastUsedDeviceName, _lastUsedDeviceNameLength + 1));
memcpy(_lastUsedDeviceName, deviceUniqueIdUTF8,
_lastUsedDeviceNameLength + 1);
return _captureCapabilities.size();
}
return -1;
}
int32_t DeviceInfoPipeWire::DisplayCaptureSettingsDialogBox(
const char* /*deviceUniqueIdUTF8*/,
const char* /*dialogTitleUTF8*/,
void* /*parentWindow*/,
uint32_t /*positionX*/,
uint32_t /*positionY*/) {
return -1;
}
} // namespace videocapturemodule
} // namespace webrtc

View file

@ -0,0 +1,51 @@
/*
* Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_VIDEO_CAPTURE_LINUX_DEVICE_INFO_PIPEWIRE_H_
#define MODULES_VIDEO_CAPTURE_LINUX_DEVICE_INFO_PIPEWIRE_H_
#include <stdint.h>
#include "modules/video_capture/device_info_impl.h"
#include "modules/video_capture/video_capture_options.h"
namespace webrtc {
namespace videocapturemodule {
class DeviceInfoPipeWire : public DeviceInfoImpl {
public:
explicit DeviceInfoPipeWire(VideoCaptureOptions* options);
~DeviceInfoPipeWire() override;
uint32_t NumberOfDevices() override;
int32_t GetDeviceName(uint32_t deviceNumber,
char* deviceNameUTF8,
uint32_t deviceNameLength,
char* deviceUniqueIdUTF8,
uint32_t deviceUniqueIdUTF8Length,
char* productUniqueIdUTF8 = nullptr,
uint32_t productUniqueIdUTF8Length = 0) override;
/*
* Fills the membervariable _captureCapabilities with capabilites for the
* given device name.
*/
int32_t CreateCapabilityMap(const char* deviceUniqueIdUTF8) override
RTC_EXCLUSIVE_LOCKS_REQUIRED(_apiLock);
int32_t DisplayCaptureSettingsDialogBox(const char* /*deviceUniqueIdUTF8*/,
const char* /*dialogTitleUTF8*/,
void* /*parentWindow*/,
uint32_t /*positionX*/,
uint32_t /*positionY*/) override;
int32_t Init() override;
private:
rtc::scoped_refptr<PipeWireSession> pipewire_session_;
};
} // namespace videocapturemodule
} // namespace webrtc
#endif // MODULES_VIDEO_CAPTURE_LINUX_DEVICE_INFO_PIPEWIRE_H_

View file

@ -0,0 +1,354 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/video_capture/linux/device_info_v4l2.h"
#include <errno.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <unistd.h>
// v4l includes
#include <linux/videodev2.h>
#include <vector>
#include "modules/video_capture/video_capture.h"
#include "modules/video_capture/video_capture_defines.h"
#include "modules/video_capture/video_capture_impl.h"
#include "rtc_base/logging.h"
// These defines are here to support building on kernel 3.16 which some
// downstream projects, e.g. Firefox, use.
// TODO(apehrson): Remove them and their undefs when no longer needed.
#ifndef V4L2_PIX_FMT_ABGR32
#define ABGR32_OVERRIDE 1
#define V4L2_PIX_FMT_ABGR32 v4l2_fourcc('A', 'R', '2', '4')
#endif
#ifndef V4L2_PIX_FMT_ARGB32
#define ARGB32_OVERRIDE 1
#define V4L2_PIX_FMT_ARGB32 v4l2_fourcc('B', 'A', '2', '4')
#endif
#ifndef V4L2_PIX_FMT_RGBA32
#define RGBA32_OVERRIDE 1
#define V4L2_PIX_FMT_RGBA32 v4l2_fourcc('A', 'B', '2', '4')
#endif
namespace webrtc {
namespace videocapturemodule {
DeviceInfoV4l2::DeviceInfoV4l2() : DeviceInfoImpl() {}
int32_t DeviceInfoV4l2::Init() {
return 0;
}
DeviceInfoV4l2::~DeviceInfoV4l2() {}
uint32_t DeviceInfoV4l2::NumberOfDevices() {
uint32_t count = 0;
char device[20];
int fd = -1;
struct v4l2_capability cap;
/* detect /dev/video [0-63]VideoCaptureModule entries */
for (int n = 0; n < 64; n++) {
snprintf(device, sizeof(device), "/dev/video%d", n);
if ((fd = open(device, O_RDONLY)) != -1) {
// query device capabilities and make sure this is a video capture device
if (ioctl(fd, VIDIOC_QUERYCAP, &cap) < 0 ||
!(cap.device_caps & V4L2_CAP_VIDEO_CAPTURE)) {
close(fd);
continue;
}
close(fd);
count++;
}
}
return count;
}
int32_t DeviceInfoV4l2::GetDeviceName(uint32_t deviceNumber,
char* deviceNameUTF8,
uint32_t deviceNameLength,
char* deviceUniqueIdUTF8,
uint32_t deviceUniqueIdUTF8Length,
char* /*productUniqueIdUTF8*/,
uint32_t /*productUniqueIdUTF8Length*/) {
// Travel through /dev/video [0-63]
uint32_t count = 0;
char device[20];
int fd = -1;
bool found = false;
struct v4l2_capability cap;
for (int n = 0; n < 64; n++) {
snprintf(device, sizeof(device), "/dev/video%d", n);
if ((fd = open(device, O_RDONLY)) != -1) {
// query device capabilities and make sure this is a video capture device
if (ioctl(fd, VIDIOC_QUERYCAP, &cap) < 0 ||
!(cap.device_caps & V4L2_CAP_VIDEO_CAPTURE)) {
close(fd);
continue;
}
if (count == deviceNumber) {
// Found the device
found = true;
break;
} else {
close(fd);
count++;
}
}
}
if (!found)
return -1;
// query device capabilities
if (ioctl(fd, VIDIOC_QUERYCAP, &cap) < 0) {
RTC_LOG(LS_INFO) << "error in querying the device capability for device "
<< device << ". errno = " << errno;
close(fd);
return -1;
}
close(fd);
char cameraName[64];
memset(deviceNameUTF8, 0, deviceNameLength);
memcpy(cameraName, cap.card, sizeof(cap.card));
if (deviceNameLength > strlen(cameraName)) {
memcpy(deviceNameUTF8, cameraName, strlen(cameraName));
} else {
RTC_LOG(LS_INFO) << "buffer passed is too small";
return -1;
}
if (cap.bus_info[0] != 0) { // may not available in all drivers
// copy device id
size_t len = strlen(reinterpret_cast<const char*>(cap.bus_info));
if (deviceUniqueIdUTF8Length > len) {
memset(deviceUniqueIdUTF8, 0, deviceUniqueIdUTF8Length);
memcpy(deviceUniqueIdUTF8, cap.bus_info, len);
} else {
RTC_LOG(LS_INFO) << "buffer passed is too small";
return -1;
}
}
return 0;
}
int32_t DeviceInfoV4l2::CreateCapabilityMap(const char* deviceUniqueIdUTF8) {
int fd;
char device[32];
bool found = false;
const int32_t deviceUniqueIdUTF8Length = strlen(deviceUniqueIdUTF8);
if (deviceUniqueIdUTF8Length >= kVideoCaptureUniqueNameLength) {
RTC_LOG(LS_INFO) << "Device name too long";
return -1;
}
RTC_LOG(LS_INFO) << "CreateCapabilityMap called for device "
<< deviceUniqueIdUTF8;
/* detect /dev/video [0-63] entries */
for (int n = 0; n < 64; ++n) {
snprintf(device, sizeof(device), "/dev/video%d", n);
fd = open(device, O_RDONLY);
if (fd == -1)
continue;
// query device capabilities
struct v4l2_capability cap;
if (ioctl(fd, VIDIOC_QUERYCAP, &cap) == 0) {
// skip devices without video capture capability
if (!(cap.device_caps & V4L2_CAP_VIDEO_CAPTURE)) {
continue;
}
if (cap.bus_info[0] != 0) {
if (strncmp(reinterpret_cast<const char*>(cap.bus_info),
deviceUniqueIdUTF8,
strlen(deviceUniqueIdUTF8)) == 0) { // match with device id
found = true;
break; // fd matches with device unique id supplied
}
} else { // match for device name
if (IsDeviceNameMatches(reinterpret_cast<const char*>(cap.card),
deviceUniqueIdUTF8)) {
found = true;
break;
}
}
}
close(fd); // close since this is not the matching device
}
if (!found) {
RTC_LOG(LS_INFO) << "no matching device found";
return -1;
}
// now fd will point to the matching device
// reset old capability list.
_captureCapabilities.clear();
int size = FillCapabilities(fd);
close(fd);
// Store the new used device name
_lastUsedDeviceNameLength = deviceUniqueIdUTF8Length;
_lastUsedDeviceName = reinterpret_cast<char*>(
realloc(_lastUsedDeviceName, _lastUsedDeviceNameLength + 1));
memcpy(_lastUsedDeviceName, deviceUniqueIdUTF8,
_lastUsedDeviceNameLength + 1);
RTC_LOG(LS_INFO) << "CreateCapabilityMap " << _captureCapabilities.size();
return size;
}
int32_t DeviceInfoV4l2::DisplayCaptureSettingsDialogBox(
const char* /*deviceUniqueIdUTF8*/,
const char* /*dialogTitleUTF8*/,
void* /*parentWindow*/,
uint32_t /*positionX*/,
uint32_t /*positionY*/) {
return -1;
}
bool DeviceInfoV4l2::IsDeviceNameMatches(const char* name,
const char* deviceUniqueIdUTF8) {
if (strncmp(deviceUniqueIdUTF8, name, strlen(name)) == 0)
return true;
return false;
}
int32_t DeviceInfoV4l2::FillCapabilities(int fd) {
// set image format
struct v4l2_format video_fmt;
memset(&video_fmt, 0, sizeof(struct v4l2_format));
video_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
video_fmt.fmt.pix.sizeimage = 0;
unsigned int videoFormats[] = {
V4L2_PIX_FMT_MJPEG, V4L2_PIX_FMT_JPEG, V4L2_PIX_FMT_YUV420,
V4L2_PIX_FMT_YVU420, V4L2_PIX_FMT_YUYV, V4L2_PIX_FMT_UYVY,
V4L2_PIX_FMT_NV12, V4L2_PIX_FMT_BGR24, V4L2_PIX_FMT_RGB24,
V4L2_PIX_FMT_RGB565, V4L2_PIX_FMT_ABGR32, V4L2_PIX_FMT_ARGB32,
V4L2_PIX_FMT_RGBA32, V4L2_PIX_FMT_BGR32, V4L2_PIX_FMT_RGB32,
};
constexpr int totalFmts = sizeof(videoFormats) / sizeof(unsigned int);
int sizes = 13;
unsigned int size[][2] = {{128, 96}, {160, 120}, {176, 144}, {320, 240},
{352, 288}, {640, 480}, {704, 576}, {800, 600},
{960, 720}, {1280, 720}, {1024, 768}, {1440, 1080},
{1920, 1080}};
for (int fmts = 0; fmts < totalFmts; fmts++) {
for (int i = 0; i < sizes; i++) {
video_fmt.fmt.pix.pixelformat = videoFormats[fmts];
video_fmt.fmt.pix.width = size[i][0];
video_fmt.fmt.pix.height = size[i][1];
if (ioctl(fd, VIDIOC_TRY_FMT, &video_fmt) >= 0) {
if ((video_fmt.fmt.pix.width == size[i][0]) &&
(video_fmt.fmt.pix.height == size[i][1])) {
VideoCaptureCapability cap;
cap.width = video_fmt.fmt.pix.width;
cap.height = video_fmt.fmt.pix.height;
if (videoFormats[fmts] == V4L2_PIX_FMT_YUYV) {
cap.videoType = VideoType::kYUY2;
} else if (videoFormats[fmts] == V4L2_PIX_FMT_YUV420) {
cap.videoType = VideoType::kI420;
} else if (videoFormats[fmts] == V4L2_PIX_FMT_YVU420) {
cap.videoType = VideoType::kYV12;
} else if (videoFormats[fmts] == V4L2_PIX_FMT_MJPEG ||
videoFormats[fmts] == V4L2_PIX_FMT_JPEG) {
cap.videoType = VideoType::kMJPEG;
} else if (videoFormats[fmts] == V4L2_PIX_FMT_UYVY) {
cap.videoType = VideoType::kUYVY;
} else if (videoFormats[fmts] == V4L2_PIX_FMT_NV12) {
cap.videoType = VideoType::kNV12;
} else if (videoFormats[fmts] == V4L2_PIX_FMT_BGR24) {
// NB that for RGB formats, `VideoType` follows naming conventions
// of libyuv[1], where e.g. the format for FOURCC "ARGB" stores
// pixels in BGRA order in memory. V4L2[2] on the other hand names
// its formats based on the order of the RGB components as stored in
// memory. Applies to all RGB formats below.
// [1]https://chromium.googlesource.com/libyuv/libyuv/+/refs/heads/main/docs/formats.md#the-argb-fourcc
// [2]https://www.kernel.org/doc/html/v6.2/userspace-api/media/v4l/pixfmt-rgb.html#bits-per-component
cap.videoType = VideoType::kRGB24;
} else if (videoFormats[fmts] == V4L2_PIX_FMT_RGB24) {
cap.videoType = VideoType::kBGR24;
} else if (videoFormats[fmts] == V4L2_PIX_FMT_RGB565) {
cap.videoType = VideoType::kRGB565;
} else if (videoFormats[fmts] == V4L2_PIX_FMT_ABGR32) {
cap.videoType = VideoType::kARGB;
} else if (videoFormats[fmts] == V4L2_PIX_FMT_ARGB32) {
cap.videoType = VideoType::kBGRA;
} else if (videoFormats[fmts] == V4L2_PIX_FMT_BGR32) {
cap.videoType = VideoType::kARGB;
} else if (videoFormats[fmts] == V4L2_PIX_FMT_RGB32) {
cap.videoType = VideoType::kBGRA;
} else if (videoFormats[fmts] == V4L2_PIX_FMT_RGBA32) {
cap.videoType = VideoType::kABGR;
} else {
RTC_DCHECK_NOTREACHED();
}
// get fps of current camera mode
// V4l2 does not have a stable method of knowing so we just guess.
if (cap.width >= 800 && cap.videoType != VideoType::kMJPEG) {
cap.maxFPS = 15;
} else {
cap.maxFPS = 30;
}
_captureCapabilities.push_back(cap);
RTC_LOG(LS_VERBOSE) << "Camera capability, width:" << cap.width
<< " height:" << cap.height
<< " type:" << static_cast<int32_t>(cap.videoType)
<< " fps:" << cap.maxFPS;
}
}
}
}
RTC_LOG(LS_INFO) << "CreateCapabilityMap " << _captureCapabilities.size();
return _captureCapabilities.size();
}
} // namespace videocapturemodule
} // namespace webrtc
#ifdef ABGR32_OVERRIDE
#undef ABGR32_OVERRIDE
#undef V4L2_PIX_FMT_ABGR32
#endif
#ifdef ARGB32_OVERRIDE
#undef ARGB32_OVERRIDE
#undef V4L2_PIX_FMT_ARGB32
#endif
#ifdef RGBA32_OVERRIDE
#undef RGBA32_OVERRIDE
#undef V4L2_PIX_FMT_RGBA32
#endif

View file

@ -0,0 +1,51 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_VIDEO_CAPTURE_LINUX_DEVICE_INFO_V4L2_H_
#define MODULES_VIDEO_CAPTURE_LINUX_DEVICE_INFO_V4L2_H_
#include <stdint.h>
#include "modules/video_capture/device_info_impl.h"
namespace webrtc {
namespace videocapturemodule {
class DeviceInfoV4l2 : public DeviceInfoImpl {
public:
DeviceInfoV4l2();
~DeviceInfoV4l2() override;
uint32_t NumberOfDevices() override;
int32_t GetDeviceName(uint32_t deviceNumber,
char* deviceNameUTF8,
uint32_t deviceNameLength,
char* deviceUniqueIdUTF8,
uint32_t deviceUniqueIdUTF8Length,
char* productUniqueIdUTF8 = 0,
uint32_t productUniqueIdUTF8Length = 0) override;
/*
* Fills the membervariable _captureCapabilities with capabilites for the
* given device name.
*/
int32_t CreateCapabilityMap(const char* deviceUniqueIdUTF8) override
RTC_EXCLUSIVE_LOCKS_REQUIRED(_apiLock);
int32_t DisplayCaptureSettingsDialogBox(const char* /*deviceUniqueIdUTF8*/,
const char* /*dialogTitleUTF8*/,
void* /*parentWindow*/,
uint32_t /*positionX*/,
uint32_t /*positionY*/) override;
int32_t FillCapabilities(int fd) RTC_EXCLUSIVE_LOCKS_REQUIRED(_apiLock);
int32_t Init() override;
private:
bool IsDeviceNameMatches(const char* name, const char* deviceUniqueIdUTF8);
};
} // namespace videocapturemodule
} // namespace webrtc
#endif // MODULES_VIDEO_CAPTURE_LINUX_DEVICE_INFO_V4L2_H_

View file

@ -0,0 +1,400 @@
/*
* Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/video_capture/linux/pipewire_session.h"
#include <spa/monitor/device.h>
#include <spa/param/format-utils.h>
#include <spa/param/format.h>
#include <spa/param/video/raw.h>
#include <spa/pod/parser.h>
#include "common_video/libyuv/include/webrtc_libyuv.h"
#include "modules/video_capture/device_info_impl.h"
#include "rtc_base/logging.h"
#include "rtc_base/string_encode.h"
#include "rtc_base/string_to_number.h"
namespace webrtc {
namespace videocapturemodule {
VideoType PipeWireRawFormatToVideoType(uint32_t id) {
switch (id) {
case SPA_VIDEO_FORMAT_I420:
return VideoType::kI420;
case SPA_VIDEO_FORMAT_NV12:
return VideoType::kNV12;
case SPA_VIDEO_FORMAT_YUY2:
return VideoType::kYUY2;
case SPA_VIDEO_FORMAT_UYVY:
return VideoType::kUYVY;
case SPA_VIDEO_FORMAT_RGB:
return VideoType::kRGB24;
default:
return VideoType::kUnknown;
}
}
PipeWireNode::PipeWireNode(PipeWireSession* session,
uint32_t id,
const spa_dict* props)
: session_(session),
id_(id),
display_name_(spa_dict_lookup(props, PW_KEY_NODE_DESCRIPTION)),
unique_id_(rtc::ToString(id)) {
RTC_LOG(LS_VERBOSE) << "Found Camera: " << display_name_;
proxy_ = static_cast<pw_proxy*>(pw_registry_bind(
session_->pw_registry_, id, PW_TYPE_INTERFACE_Node, PW_VERSION_NODE, 0));
static const pw_node_events node_events{
.version = PW_VERSION_NODE_EVENTS,
.info = OnNodeInfo,
.param = OnNodeParam,
};
pw_node_add_listener(proxy_, &node_listener_, &node_events, this);
}
PipeWireNode::~PipeWireNode() {
pw_proxy_destroy(proxy_);
spa_hook_remove(&node_listener_);
}
// static
void PipeWireNode::OnNodeInfo(void* data, const pw_node_info* info) {
PipeWireNode* that = static_cast<PipeWireNode*>(data);
if (info->change_mask & PW_NODE_CHANGE_MASK_PROPS) {
const char* vid_str;
const char* pid_str;
absl::optional<int> vid;
absl::optional<int> pid;
vid_str = spa_dict_lookup(info->props, SPA_KEY_DEVICE_VENDOR_ID);
pid_str = spa_dict_lookup(info->props, SPA_KEY_DEVICE_PRODUCT_ID);
vid = vid_str ? rtc::StringToNumber<int>(vid_str) : absl::nullopt;
pid = pid_str ? rtc::StringToNumber<int>(pid_str) : absl::nullopt;
if (vid && pid) {
char model_str[10];
snprintf(model_str, sizeof(model_str), "%04x:%04x", vid.value(),
pid.value());
that->model_id_ = model_str;
}
} else if (info->change_mask & PW_NODE_CHANGE_MASK_PARAMS) {
for (uint32_t i = 0; i < info->n_params; i++) {
uint32_t id = info->params[i].id;
if (id == SPA_PARAM_EnumFormat &&
info->params[i].flags & SPA_PARAM_INFO_READ) {
pw_node_enum_params(that->proxy_, 0, id, 0, UINT32_MAX, nullptr);
break;
}
}
that->session_->PipeWireSync();
}
}
// static
void PipeWireNode::OnNodeParam(void* data,
int seq,
uint32_t id,
uint32_t index,
uint32_t next,
const spa_pod* param) {
PipeWireNode* that = static_cast<PipeWireNode*>(data);
auto* obj = reinterpret_cast<const spa_pod_object*>(param);
const spa_pod_prop* prop = nullptr;
VideoCaptureCapability cap;
spa_pod* val;
uint32_t n_items, choice;
cap.videoType = VideoType::kUnknown;
cap.maxFPS = 0;
prop = spa_pod_object_find_prop(obj, prop, SPA_FORMAT_VIDEO_framerate);
if (prop) {
val = spa_pod_get_values(&prop->value, &n_items, &choice);
if (val->type == SPA_TYPE_Fraction) {
spa_fraction* fract;
fract = static_cast<spa_fraction*>(SPA_POD_BODY(val));
if (choice == SPA_CHOICE_None)
cap.maxFPS = 1.0 * fract[0].num / fract[0].denom;
else if (choice == SPA_CHOICE_Range && fract[1].num > 0)
cap.maxFPS = 1.0 * fract[1].num / fract[1].denom;
}
}
prop = spa_pod_object_find_prop(obj, prop, SPA_FORMAT_VIDEO_size);
if (!prop)
return;
val = spa_pod_get_values(&prop->value, &n_items, &choice);
if (val->type != SPA_TYPE_Rectangle)
return;
if (choice != SPA_CHOICE_None)
return;
if (!ParseFormat(param, &cap))
return;
spa_rectangle* rect;
rect = static_cast<spa_rectangle*>(SPA_POD_BODY(val));
cap.width = rect[0].width;
cap.height = rect[0].height;
RTC_LOG(LS_VERBOSE) << "Found Format(" << that->display_name_
<< "): " << static_cast<int>(cap.videoType) << "("
<< cap.width << "x" << cap.height << "@" << cap.maxFPS
<< ")";
that->capabilities_.push_back(cap);
}
// static
bool PipeWireNode::ParseFormat(const spa_pod* param,
VideoCaptureCapability* cap) {
auto* obj = reinterpret_cast<const spa_pod_object*>(param);
uint32_t media_type, media_subtype;
if (spa_format_parse(param, &media_type, &media_subtype) < 0) {
RTC_LOG(LS_ERROR) << "Failed to parse video format.";
return false;
}
if (media_type != SPA_MEDIA_TYPE_video)
return false;
if (media_subtype == SPA_MEDIA_SUBTYPE_raw) {
const spa_pod_prop* prop = nullptr;
uint32_t n_items, choice;
spa_pod* val;
uint32_t* id;
prop = spa_pod_object_find_prop(obj, prop, SPA_FORMAT_VIDEO_format);
if (!prop)
return false;
val = spa_pod_get_values(&prop->value, &n_items, &choice);
if (val->type != SPA_TYPE_Id)
return false;
if (choice != SPA_CHOICE_None)
return false;
id = static_cast<uint32_t*>(SPA_POD_BODY(val));
cap->videoType = PipeWireRawFormatToVideoType(id[0]);
if (cap->videoType == VideoType::kUnknown) {
RTC_LOG(LS_INFO) << "Unsupported PipeWire pixel format " << id[0];
return false;
}
} else if (media_subtype == SPA_MEDIA_SUBTYPE_mjpg) {
cap->videoType = VideoType::kMJPEG;
} else {
RTC_LOG(LS_INFO) << "Unsupported PipeWire media subtype " << media_subtype;
}
return cap->videoType != VideoType::kUnknown;
}
CameraPortalNotifier::CameraPortalNotifier(PipeWireSession* session)
: session_(session) {}
void CameraPortalNotifier::OnCameraRequestResult(
xdg_portal::RequestResponse result,
int fd) {
if (result == xdg_portal::RequestResponse::kSuccess) {
session_->InitPipeWire(fd);
} else if (result == xdg_portal::RequestResponse::kUserCancelled) {
session_->Finish(VideoCaptureOptions::Status::DENIED);
} else {
session_->Finish(VideoCaptureOptions::Status::ERROR);
}
}
PipeWireSession::PipeWireSession()
: status_(VideoCaptureOptions::Status::UNINITIALIZED) {}
PipeWireSession::~PipeWireSession() {
Cleanup();
}
void PipeWireSession::Init(VideoCaptureOptions::Callback* callback, int fd) {
{
webrtc::MutexLock lock(&callback_lock_);
callback_ = callback;
}
if (fd != kInvalidPipeWireFd) {
InitPipeWire(fd);
} else {
portal_notifier_ = std::make_unique<CameraPortalNotifier>(this);
portal_ = std::make_unique<CameraPortal>(portal_notifier_.get());
portal_->Start();
}
}
void PipeWireSession::InitPipeWire(int fd) {
if (!InitializePipeWire())
Finish(VideoCaptureOptions::Status::UNAVAILABLE);
if (!StartPipeWire(fd))
Finish(VideoCaptureOptions::Status::ERROR);
}
bool PipeWireSession::StartPipeWire(int fd) {
pw_init(/*argc=*/nullptr, /*argv=*/nullptr);
pw_main_loop_ = pw_thread_loop_new("pipewire-main-loop", nullptr);
pw_context_ =
pw_context_new(pw_thread_loop_get_loop(pw_main_loop_), nullptr, 0);
if (!pw_context_) {
RTC_LOG(LS_ERROR) << "Failed to create PipeWire context";
return false;
}
pw_core_ = pw_context_connect_fd(pw_context_, fd, nullptr, 0);
if (!pw_core_) {
RTC_LOG(LS_ERROR) << "Failed to connect PipeWire context";
return false;
}
static const pw_core_events core_events{
.version = PW_VERSION_CORE_EVENTS,
.done = &OnCoreDone,
.error = &OnCoreError,
};
pw_core_add_listener(pw_core_, &core_listener_, &core_events, this);
static const pw_registry_events registry_events{
.version = PW_VERSION_REGISTRY_EVENTS,
.global = OnRegistryGlobal,
.global_remove = OnRegistryGlobalRemove,
};
pw_registry_ = pw_core_get_registry(pw_core_, PW_VERSION_REGISTRY, 0);
pw_registry_add_listener(pw_registry_, &registry_listener_, &registry_events,
this);
PipeWireSync();
if (pw_thread_loop_start(pw_main_loop_) < 0) {
RTC_LOG(LS_ERROR) << "Failed to start main PipeWire loop";
return false;
}
return true;
}
void PipeWireSession::StopPipeWire() {
if (pw_main_loop_)
pw_thread_loop_stop(pw_main_loop_);
if (pw_core_) {
pw_core_disconnect(pw_core_);
pw_core_ = nullptr;
}
if (pw_context_) {
pw_context_destroy(pw_context_);
pw_context_ = nullptr;
}
if (pw_main_loop_) {
pw_thread_loop_destroy(pw_main_loop_);
pw_main_loop_ = nullptr;
}
}
void PipeWireSession::PipeWireSync() {
sync_seq_ = pw_core_sync(pw_core_, PW_ID_CORE, sync_seq_);
}
// static
void PipeWireSession::OnCoreError(void* data,
uint32_t id,
int seq,
int res,
const char* message) {
RTC_LOG(LS_ERROR) << "PipeWire remote error: " << message;
}
// static
void PipeWireSession::OnCoreDone(void* data, uint32_t id, int seq) {
PipeWireSession* that = static_cast<PipeWireSession*>(data);
if (id == PW_ID_CORE) {
if (seq == that->sync_seq_) {
RTC_LOG(LS_VERBOSE) << "Enumerating PipeWire camera devices complete.";
that->Finish(VideoCaptureOptions::Status::SUCCESS);
}
}
}
// static
void PipeWireSession::OnRegistryGlobal(void* data,
uint32_t id,
uint32_t permissions,
const char* type,
uint32_t version,
const spa_dict* props) {
PipeWireSession* that = static_cast<PipeWireSession*>(data);
if (type != absl::string_view(PW_TYPE_INTERFACE_Node))
return;
if (!spa_dict_lookup(props, PW_KEY_NODE_DESCRIPTION))
return;
auto node_role = spa_dict_lookup(props, PW_KEY_MEDIA_ROLE);
if (!node_role || strcmp(node_role, "Camera"))
return;
that->nodes_.emplace_back(that, id, props);
that->PipeWireSync();
}
// static
void PipeWireSession::OnRegistryGlobalRemove(void* data, uint32_t id) {
PipeWireSession* that = static_cast<PipeWireSession*>(data);
for (auto it = that->nodes_.begin(); it != that->nodes().end(); ++it) {
if ((*it).id() == id) {
that->nodes_.erase(it);
break;
}
}
}
void PipeWireSession::Finish(VideoCaptureOptions::Status status) {
webrtc::MutexLock lock(&callback_lock_);
if (callback_) {
callback_->OnInitialized(status);
callback_ = nullptr;
}
}
void PipeWireSession::Cleanup() {
webrtc::MutexLock lock(&callback_lock_);
callback_ = nullptr;
StopPipeWire();
}
} // namespace videocapturemodule
} // namespace webrtc

View file

@ -0,0 +1,145 @@
/*
* Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_VIDEO_CAPTURE_LINUX_PIPEWIRE_SESSION_H_
#define MODULES_VIDEO_CAPTURE_LINUX_PIPEWIRE_SESSION_H_
#include <pipewire/core.h>
#include <pipewire/pipewire.h>
#include <deque>
#include <string>
#include <vector>
#include "api/ref_counted_base.h"
#include "api/scoped_refptr.h"
#include "modules/portal/pipewire_utils.h"
#include "modules/video_capture/linux/camera_portal.h"
#include "modules/video_capture/video_capture.h"
#include "modules/video_capture/video_capture_options.h"
#include "rtc_base/synchronization/mutex.h"
namespace webrtc {
namespace videocapturemodule {
class PipeWireSession;
class VideoCaptureModulePipeWire;
// PipeWireNode objects are the local representation of PipeWire node objects.
// The portal API ensured that only camera nodes are visible to the client.
// So they all represent one camera that is available via PipeWire.
class PipeWireNode {
public:
PipeWireNode(PipeWireSession* session, uint32_t id, const spa_dict* props);
~PipeWireNode();
uint32_t id() const { return id_; }
std::string display_name() const { return display_name_; }
std::string unique_id() const { return unique_id_; }
std::string model_id() const { return model_id_; }
std::vector<VideoCaptureCapability> capabilities() const {
return capabilities_;
}
private:
static void OnNodeInfo(void* data, const pw_node_info* info);
static void OnNodeParam(void* data,
int seq,
uint32_t id,
uint32_t index,
uint32_t next,
const spa_pod* param);
static bool ParseFormat(const spa_pod* param, VideoCaptureCapability* cap);
pw_proxy* proxy_;
spa_hook node_listener_;
PipeWireSession* session_;
uint32_t id_;
std::string display_name_;
std::string unique_id_;
std::string model_id_;
std::vector<VideoCaptureCapability> capabilities_;
};
class CameraPortalNotifier : public CameraPortal::PortalNotifier {
public:
CameraPortalNotifier(PipeWireSession* session);
~CameraPortalNotifier() = default;
void OnCameraRequestResult(xdg_portal::RequestResponse result,
int fd) override;
private:
PipeWireSession* session_;
};
class PipeWireSession : public rtc::RefCountedNonVirtual<PipeWireSession> {
public:
PipeWireSession();
~PipeWireSession();
void Init(VideoCaptureOptions::Callback* callback,
int fd = kInvalidPipeWireFd);
const std::deque<PipeWireNode>& nodes() const { return nodes_; }
friend class CameraPortalNotifier;
friend class PipeWireNode;
friend class VideoCaptureModulePipeWire;
private:
void InitPipeWire(int fd);
bool StartPipeWire(int fd);
void StopPipeWire();
void PipeWireSync();
static void OnCoreError(void* data,
uint32_t id,
int seq,
int res,
const char* message);
static void OnCoreDone(void* data, uint32_t id, int seq);
static void OnRegistryGlobal(void* data,
uint32_t id,
uint32_t permissions,
const char* type,
uint32_t version,
const spa_dict* props);
static void OnRegistryGlobalRemove(void* data, uint32_t id);
void Finish(VideoCaptureOptions::Status status);
void Cleanup();
webrtc::Mutex callback_lock_;
VideoCaptureOptions::Callback* callback_ RTC_GUARDED_BY(&callback_lock_) =
nullptr;
VideoCaptureOptions::Status status_;
struct pw_thread_loop* pw_main_loop_ = nullptr;
struct pw_context* pw_context_ = nullptr;
struct pw_core* pw_core_ = nullptr;
struct spa_hook core_listener_;
struct pw_registry* pw_registry_ = nullptr;
struct spa_hook registry_listener_;
int sync_seq_ = 0;
std::deque<PipeWireNode> nodes_;
std::unique_ptr<CameraPortal> portal_;
std::unique_ptr<CameraPortalNotifier> portal_notifier_;
};
} // namespace videocapturemodule
} // namespace webrtc
#endif // MODULES_VIDEO_CAPTURE_LINUX_PIPEWIRE_SESSION_H_

View file

@ -0,0 +1,68 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <errno.h>
#include <fcntl.h>
#include <linux/videodev2.h>
#include <stdio.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/select.h>
#include <time.h>
#include <unistd.h>
#include <new>
#include <string>
#include "api/scoped_refptr.h"
#include "media/base/video_common.h"
#if defined(WEBRTC_USE_PIPEWIRE)
#include "modules/video_capture/linux/video_capture_pipewire.h"
#endif
#include "modules/video_capture/linux/video_capture_v4l2.h"
#include "modules/video_capture/video_capture.h"
#include "modules/video_capture/video_capture_options.h"
#include "rtc_base/logging.h"
namespace webrtc {
namespace videocapturemodule {
rtc::scoped_refptr<VideoCaptureModule> VideoCaptureImpl::Create(
const char* deviceUniqueId) {
auto implementation = rtc::make_ref_counted<VideoCaptureModuleV4L2>();
if (implementation->Init(deviceUniqueId) != 0)
return nullptr;
return implementation;
}
rtc::scoped_refptr<VideoCaptureModule> VideoCaptureImpl::Create(
VideoCaptureOptions* options,
const char* deviceUniqueId) {
#if defined(WEBRTC_USE_PIPEWIRE)
if (options->allow_pipewire()) {
auto implementation =
rtc::make_ref_counted<VideoCaptureModulePipeWire>(options);
if (implementation->Init(deviceUniqueId) == 0)
return implementation;
}
#endif
if (options->allow_v4l2()) {
auto implementation = rtc::make_ref_counted<VideoCaptureModuleV4L2>();
if (implementation->Init(deviceUniqueId) == 0)
return implementation;
}
return nullptr;
}
} // namespace videocapturemodule
} // namespace webrtc

View file

@ -0,0 +1,414 @@
/*
* Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/video_capture/linux/video_capture_pipewire.h"
#include <spa/param/format.h>
#include <spa/param/video/format-utils.h>
#include <spa/pod/builder.h>
#include <spa/utils/result.h>
#include <vector>
#include "common_video/libyuv/include/webrtc_libyuv.h"
#include "modules/portal/pipewire_utils.h"
#include "rtc_base/logging.h"
#include "rtc_base/string_to_number.h"
namespace webrtc {
namespace videocapturemodule {
struct {
uint32_t spa_format;
VideoType video_type;
} constexpr kSupportedFormats[] = {
{SPA_VIDEO_FORMAT_I420, VideoType::kI420},
{SPA_VIDEO_FORMAT_NV12, VideoType::kNV12},
{SPA_VIDEO_FORMAT_YUY2, VideoType::kYUY2},
{SPA_VIDEO_FORMAT_UYVY, VideoType::kUYVY},
{SPA_VIDEO_FORMAT_RGB, VideoType::kRGB24},
};
VideoType VideoCaptureModulePipeWire::PipeWireRawFormatToVideoType(
uint32_t spa_format) {
for (const auto& spa_and_pixel_format : kSupportedFormats) {
if (spa_and_pixel_format.spa_format == spa_format)
return spa_and_pixel_format.video_type;
}
RTC_LOG(LS_INFO) << "Unsupported pixel format: " << spa_format;
return VideoType::kUnknown;
}
VideoCaptureModulePipeWire::VideoCaptureModulePipeWire(
VideoCaptureOptions* options)
: VideoCaptureImpl(),
session_(options->pipewire_session()),
initialized_(false),
started_(false) {}
VideoCaptureModulePipeWire::~VideoCaptureModulePipeWire() {
RTC_DCHECK_RUN_ON(&api_checker_);
StopCapture();
}
int32_t VideoCaptureModulePipeWire::Init(const char* deviceUniqueId) {
RTC_CHECK_RUNS_SERIALIZED(&capture_checker_);
RTC_DCHECK_RUN_ON(&api_checker_);
absl::optional<int> id;
id = rtc::StringToNumber<int>(deviceUniqueId);
if (id == absl::nullopt)
return -1;
node_id_ = id.value();
const int len = strlen(deviceUniqueId);
_deviceUniqueId = new (std::nothrow) char[len + 1];
memcpy(_deviceUniqueId, deviceUniqueId, len + 1);
return 0;
}
static spa_pod* BuildFormat(spa_pod_builder* builder,
uint32_t format,
uint32_t width,
uint32_t height,
float frame_rate) {
spa_pod_frame frames[2];
spa_pod_builder_push_object(builder, &frames[0], SPA_TYPE_OBJECT_Format,
SPA_PARAM_EnumFormat);
spa_pod_builder_add(builder, SPA_FORMAT_mediaType,
SPA_POD_Id(SPA_MEDIA_TYPE_video), SPA_FORMAT_mediaSubtype,
SPA_POD_Id(format), 0);
if (format == SPA_MEDIA_SUBTYPE_raw) {
spa_pod_builder_prop(builder, SPA_FORMAT_VIDEO_format, 0);
spa_pod_builder_push_choice(builder, &frames[1], SPA_CHOICE_Enum, 0);
spa_pod_builder_id(builder, kSupportedFormats[0].spa_format);
for (const auto& spa_and_pixel_format : kSupportedFormats)
spa_pod_builder_id(builder, spa_and_pixel_format.spa_format);
spa_pod_builder_pop(builder, &frames[1]);
}
spa_rectangle preferred_size = spa_rectangle{width, height};
spa_rectangle min_size = spa_rectangle{1, 1};
spa_rectangle max_size = spa_rectangle{4096, 4096};
spa_pod_builder_add(
builder, SPA_FORMAT_VIDEO_size,
SPA_POD_CHOICE_RANGE_Rectangle(&preferred_size, &min_size, &max_size), 0);
spa_fraction preferred_frame_rate =
spa_fraction{static_cast<uint32_t>(frame_rate), 1};
spa_fraction min_frame_rate = spa_fraction{0, 1};
spa_fraction max_frame_rate = spa_fraction{INT32_MAX, 1};
spa_pod_builder_add(
builder, SPA_FORMAT_VIDEO_framerate,
SPA_POD_CHOICE_RANGE_Fraction(&preferred_frame_rate, &min_frame_rate,
&max_frame_rate),
0);
return static_cast<spa_pod*>(spa_pod_builder_pop(builder, &frames[0]));
}
int32_t VideoCaptureModulePipeWire::StartCapture(
const VideoCaptureCapability& capability) {
RTC_DCHECK_RUN_ON(&api_checker_);
if (initialized_) {
if (capability == _requestedCapability) {
return 0;
} else {
StopCapture();
}
}
uint8_t buffer[1024] = {};
// We don't want members above to be guarded by capture_checker_ as
// it's meant to be for members that are accessed on the API thread
// only when we are not capturing. The code above can be called many
// times while sharing instance of VideoCapturePipeWire between
// websites and therefore it would not follow the requirements of this
// checker.
RTC_CHECK_RUNS_SERIALIZED(&capture_checker_);
PipeWireThreadLoopLock thread_loop_lock(session_->pw_main_loop_);
RTC_LOG(LS_VERBOSE) << "Creating new PipeWire stream for node " << node_id_;
pw_properties* reuse_props =
pw_properties_new_string("pipewire.client.reuse=1");
stream_ = pw_stream_new(session_->pw_core_, "camera-stream", reuse_props);
if (!stream_) {
RTC_LOG(LS_ERROR) << "Failed to create camera stream!";
return -1;
}
static const pw_stream_events stream_events{
.version = PW_VERSION_STREAM_EVENTS,
.state_changed = &OnStreamStateChanged,
.param_changed = &OnStreamParamChanged,
.process = &OnStreamProcess,
};
pw_stream_add_listener(stream_, &stream_listener_, &stream_events, this);
spa_pod_builder builder = spa_pod_builder{buffer, sizeof(buffer)};
std::vector<const spa_pod*> params;
uint32_t width = capability.width;
uint32_t height = capability.height;
uint32_t frame_rate = capability.maxFPS;
bool prefer_jpeg = (width > 640) || (height > 480);
params.push_back(
BuildFormat(&builder, SPA_MEDIA_SUBTYPE_raw, width, height, frame_rate));
params.insert(
prefer_jpeg ? params.begin() : params.end(),
BuildFormat(&builder, SPA_MEDIA_SUBTYPE_mjpg, width, height, frame_rate));
int res = pw_stream_connect(
stream_, PW_DIRECTION_INPUT, node_id_,
static_cast<enum pw_stream_flags>(PW_STREAM_FLAG_AUTOCONNECT |
PW_STREAM_FLAG_DONT_RECONNECT |
PW_STREAM_FLAG_MAP_BUFFERS),
params.data(), params.size());
if (res != 0) {
RTC_LOG(LS_ERROR) << "Could not connect to camera stream: "
<< spa_strerror(res);
return -1;
}
_requestedCapability = capability;
initialized_ = true;
return 0;
}
int32_t VideoCaptureModulePipeWire::StopCapture() {
RTC_DCHECK_RUN_ON(&api_checker_);
PipeWireThreadLoopLock thread_loop_lock(session_->pw_main_loop_);
// PipeWireSession is guarded by API checker so just make sure we do
// race detection when the PipeWire loop is locked/stopped to not run
// any callback at this point.
RTC_CHECK_RUNS_SERIALIZED(&capture_checker_);
if (stream_) {
pw_stream_destroy(stream_);
stream_ = nullptr;
}
_requestedCapability = VideoCaptureCapability();
return 0;
}
bool VideoCaptureModulePipeWire::CaptureStarted() {
RTC_DCHECK_RUN_ON(&api_checker_);
MutexLock lock(&api_lock_);
return started_;
}
int32_t VideoCaptureModulePipeWire::CaptureSettings(
VideoCaptureCapability& settings) {
RTC_DCHECK_RUN_ON(&api_checker_);
settings = _requestedCapability;
return 0;
}
void VideoCaptureModulePipeWire::OnStreamParamChanged(
void* data,
uint32_t id,
const struct spa_pod* format) {
VideoCaptureModulePipeWire* that =
static_cast<VideoCaptureModulePipeWire*>(data);
RTC_DCHECK(that);
RTC_CHECK_RUNS_SERIALIZED(&that->capture_checker_);
if (format && id == SPA_PARAM_Format)
that->OnFormatChanged(format);
}
void VideoCaptureModulePipeWire::OnFormatChanged(const struct spa_pod* format) {
RTC_CHECK_RUNS_SERIALIZED(&capture_checker_);
uint32_t media_type, media_subtype;
if (spa_format_parse(format, &media_type, &media_subtype) < 0) {
RTC_LOG(LS_ERROR) << "Failed to parse video format.";
return;
}
switch (media_subtype) {
case SPA_MEDIA_SUBTYPE_raw: {
struct spa_video_info_raw f;
spa_format_video_raw_parse(format, &f);
configured_capability_.width = f.size.width;
configured_capability_.height = f.size.height;
configured_capability_.videoType = PipeWireRawFormatToVideoType(f.format);
configured_capability_.maxFPS = f.framerate.num / f.framerate.denom;
break;
}
case SPA_MEDIA_SUBTYPE_mjpg: {
struct spa_video_info_mjpg f;
spa_format_video_mjpg_parse(format, &f);
configured_capability_.width = f.size.width;
configured_capability_.height = f.size.height;
configured_capability_.videoType = VideoType::kMJPEG;
configured_capability_.maxFPS = f.framerate.num / f.framerate.denom;
break;
}
default:
configured_capability_.videoType = VideoType::kUnknown;
}
if (configured_capability_.videoType == VideoType::kUnknown) {
RTC_LOG(LS_ERROR) << "Unsupported video format.";
return;
}
RTC_LOG(LS_VERBOSE) << "Configured capture format = "
<< static_cast<int>(configured_capability_.videoType);
uint8_t buffer[1024] = {};
auto builder = spa_pod_builder{buffer, sizeof(buffer)};
// Setup buffers and meta header for new format.
std::vector<const spa_pod*> params;
spa_pod_frame frame;
spa_pod_builder_push_object(&builder, &frame, SPA_TYPE_OBJECT_ParamBuffers,
SPA_PARAM_Buffers);
if (media_subtype == SPA_MEDIA_SUBTYPE_raw) {
// Enforce stride without padding.
size_t stride;
switch (configured_capability_.videoType) {
case VideoType::kI420:
case VideoType::kNV12:
stride = configured_capability_.width;
break;
case VideoType::kYUY2:
case VideoType::kUYVY:
stride = configured_capability_.width * 2;
break;
case VideoType::kRGB24:
stride = configured_capability_.width * 3;
break;
default:
RTC_LOG(LS_ERROR) << "Unsupported video format.";
return;
}
spa_pod_builder_add(&builder, SPA_PARAM_BUFFERS_stride, SPA_POD_Int(stride),
0);
}
spa_pod_builder_add(
&builder, SPA_PARAM_BUFFERS_buffers, SPA_POD_CHOICE_RANGE_Int(8, 1, 32),
SPA_PARAM_BUFFERS_dataType,
SPA_POD_CHOICE_FLAGS_Int((1 << SPA_DATA_MemFd) | (1 << SPA_DATA_MemPtr)),
0);
params.push_back(
static_cast<spa_pod*>(spa_pod_builder_pop(&builder, &frame)));
params.push_back(reinterpret_cast<spa_pod*>(spa_pod_builder_add_object(
&builder, SPA_TYPE_OBJECT_ParamMeta, SPA_PARAM_Meta, SPA_PARAM_META_type,
SPA_POD_Id(SPA_META_Header), SPA_PARAM_META_size,
SPA_POD_Int(sizeof(struct spa_meta_header)))));
params.push_back(reinterpret_cast<spa_pod*>(spa_pod_builder_add_object(
&builder, SPA_TYPE_OBJECT_ParamMeta, SPA_PARAM_Meta, SPA_PARAM_META_type,
SPA_POD_Id(SPA_META_VideoTransform), SPA_PARAM_META_size,
SPA_POD_Int(sizeof(struct spa_meta_videotransform)))));
pw_stream_update_params(stream_, params.data(), params.size());
}
void VideoCaptureModulePipeWire::OnStreamStateChanged(
void* data,
pw_stream_state old_state,
pw_stream_state state,
const char* error_message) {
VideoCaptureModulePipeWire* that =
static_cast<VideoCaptureModulePipeWire*>(data);
RTC_DCHECK(that);
MutexLock lock(&that->api_lock_);
switch (state) {
case PW_STREAM_STATE_STREAMING:
that->started_ = true;
break;
case PW_STREAM_STATE_ERROR:
RTC_LOG(LS_ERROR) << "PipeWire stream state error: " << error_message;
[[fallthrough]];
case PW_STREAM_STATE_PAUSED:
case PW_STREAM_STATE_UNCONNECTED:
case PW_STREAM_STATE_CONNECTING:
that->started_ = false;
break;
}
RTC_LOG(LS_VERBOSE) << "PipeWire stream state change: "
<< pw_stream_state_as_string(old_state) << " -> "
<< pw_stream_state_as_string(state);
}
void VideoCaptureModulePipeWire::OnStreamProcess(void* data) {
VideoCaptureModulePipeWire* that =
static_cast<VideoCaptureModulePipeWire*>(data);
RTC_DCHECK(that);
RTC_CHECK_RUNS_SERIALIZED(&that->capture_checker_);
that->ProcessBuffers();
}
static VideoRotation VideorotationFromPipeWireTransform(uint32_t transform) {
switch (transform) {
case SPA_META_TRANSFORMATION_90:
return kVideoRotation_90;
case SPA_META_TRANSFORMATION_180:
return kVideoRotation_180;
case SPA_META_TRANSFORMATION_270:
return kVideoRotation_270;
default:
return kVideoRotation_0;
}
}
void VideoCaptureModulePipeWire::ProcessBuffers() {
RTC_CHECK_RUNS_SERIALIZED(&capture_checker_);
while (pw_buffer* buffer = pw_stream_dequeue_buffer(stream_)) {
struct spa_meta_header* h;
h = static_cast<struct spa_meta_header*>(
spa_buffer_find_meta_data(buffer->buffer, SPA_META_Header, sizeof(*h)));
struct spa_meta_videotransform* videotransform;
videotransform =
static_cast<struct spa_meta_videotransform*>(spa_buffer_find_meta_data(
buffer->buffer, SPA_META_VideoTransform, sizeof(*videotransform)));
if (videotransform) {
VideoRotation rotation =
VideorotationFromPipeWireTransform(videotransform->transform);
SetCaptureRotation(rotation);
SetApplyRotation(rotation != kVideoRotation_0);
}
if (h->flags & SPA_META_HEADER_FLAG_CORRUPTED) {
RTC_LOG(LS_INFO) << "Dropping corruped frame.";
} else {
IncomingFrame(static_cast<unsigned char*>(buffer->buffer->datas[0].data),
buffer->buffer->datas[0].chunk->size,
configured_capability_);
}
pw_stream_queue_buffer(stream_, buffer);
}
}
} // namespace videocapturemodule
} // namespace webrtc

View file

@ -0,0 +1,60 @@
/*
* Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_VIDEO_CAPTURE_LINUX_VIDEO_CAPTURE_PIPEWIRE_H_
#define MODULES_VIDEO_CAPTURE_LINUX_VIDEO_CAPTURE_PIPEWIRE_H_
#include "modules/video_capture/linux/pipewire_session.h"
#include "modules/video_capture/video_capture_defines.h"
#include "modules/video_capture/video_capture_impl.h"
namespace webrtc {
namespace videocapturemodule {
class VideoCaptureModulePipeWire : public VideoCaptureImpl {
public:
explicit VideoCaptureModulePipeWire(VideoCaptureOptions* options);
~VideoCaptureModulePipeWire() override;
int32_t Init(const char* deviceUniqueId);
int32_t StartCapture(const VideoCaptureCapability& capability) override;
int32_t StopCapture() override;
bool CaptureStarted() override;
int32_t CaptureSettings(VideoCaptureCapability& settings) override;
static VideoType PipeWireRawFormatToVideoType(uint32_t format);
private:
static void OnStreamParamChanged(void* data,
uint32_t id,
const struct spa_pod* format);
static void OnStreamStateChanged(void* data,
pw_stream_state old_state,
pw_stream_state state,
const char* error_message);
static void OnStreamProcess(void* data);
void OnFormatChanged(const struct spa_pod* format);
void ProcessBuffers();
const rtc::scoped_refptr<PipeWireSession> session_
RTC_GUARDED_BY(api_checker_);
bool initialized_ RTC_GUARDED_BY(api_checker_);
bool started_ RTC_GUARDED_BY(api_lock_);
int node_id_ RTC_GUARDED_BY(capture_checker_);
VideoCaptureCapability configured_capability_
RTC_GUARDED_BY(capture_checker_);
struct pw_stream* stream_ RTC_GUARDED_BY(capture_checker_) = nullptr;
struct spa_hook stream_listener_ RTC_GUARDED_BY(capture_checker_);
};
} // namespace videocapturemodule
} // namespace webrtc
#endif // MODULES_VIDEO_CAPTURE_LINUX_VIDEO_CAPTURE_PIPEWIRE_H_

View file

@ -0,0 +1,496 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/video_capture/linux/video_capture_v4l2.h"
#include <errno.h>
#include <fcntl.h>
#include <linux/videodev2.h>
#include <stdio.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/select.h>
#include <time.h>
#include <unistd.h>
#include <new>
#include <string>
#include "api/scoped_refptr.h"
#include "media/base/video_common.h"
#include "modules/video_capture/video_capture.h"
#include "rtc_base/logging.h"
// These defines are here to support building on kernel 3.16 which some
// downstream projects, e.g. Firefox, use.
// TODO(apehrson): Remove them and their undefs when no longer needed.
#ifndef V4L2_PIX_FMT_ABGR32
#define ABGR32_OVERRIDE 1
#define V4L2_PIX_FMT_ABGR32 v4l2_fourcc('A', 'R', '2', '4')
#endif
#ifndef V4L2_PIX_FMT_ARGB32
#define ARGB32_OVERRIDE 1
#define V4L2_PIX_FMT_ARGB32 v4l2_fourcc('B', 'A', '2', '4')
#endif
#ifndef V4L2_PIX_FMT_RGBA32
#define RGBA32_OVERRIDE 1
#define V4L2_PIX_FMT_RGBA32 v4l2_fourcc('A', 'B', '2', '4')
#endif
namespace webrtc {
namespace videocapturemodule {
VideoCaptureModuleV4L2::VideoCaptureModuleV4L2()
: VideoCaptureImpl(),
_deviceId(-1),
_deviceFd(-1),
_buffersAllocatedByDevice(-1),
_captureStarted(false),
_pool(NULL) {}
int32_t VideoCaptureModuleV4L2::Init(const char* deviceUniqueIdUTF8) {
RTC_DCHECK_RUN_ON(&api_checker_);
int len = strlen((const char*)deviceUniqueIdUTF8);
_deviceUniqueId = new (std::nothrow) char[len + 1];
if (_deviceUniqueId) {
memcpy(_deviceUniqueId, deviceUniqueIdUTF8, len + 1);
}
int fd;
char device[32];
bool found = false;
/* detect /dev/video [0-63] entries */
int n;
for (n = 0; n < 64; n++) {
snprintf(device, sizeof(device), "/dev/video%d", n);
if ((fd = open(device, O_RDONLY)) != -1) {
// query device capabilities
struct v4l2_capability cap;
if (ioctl(fd, VIDIOC_QUERYCAP, &cap) == 0) {
if (cap.bus_info[0] != 0) {
if (strncmp((const char*)cap.bus_info,
(const char*)deviceUniqueIdUTF8,
strlen((const char*)deviceUniqueIdUTF8)) ==
0) { // match with device id
close(fd);
found = true;
break; // fd matches with device unique id supplied
}
}
}
close(fd); // close since this is not the matching device
}
}
if (!found) {
RTC_LOG(LS_INFO) << "no matching device found";
return -1;
}
_deviceId = n; // store the device id
return 0;
}
VideoCaptureModuleV4L2::~VideoCaptureModuleV4L2() {
RTC_DCHECK_RUN_ON(&api_checker_);
RTC_CHECK_RUNS_SERIALIZED(&capture_checker_);
StopCapture();
if (_deviceFd != -1)
close(_deviceFd);
}
int32_t VideoCaptureModuleV4L2::StartCapture(
const VideoCaptureCapability& capability) {
RTC_DCHECK_RUN_ON(&api_checker_);
if (_captureStarted) {
if (capability == _requestedCapability) {
return 0;
} else {
StopCapture();
}
}
// We don't want members above to be guarded by capture_checker_ as
// it's meant to be for members that are accessed on the API thread
// only when we are not capturing. The code above can be called many
// times while sharing instance of VideoCaptureV4L2 between websites
// and therefore it would not follow the requirements of this checker.
RTC_CHECK_RUNS_SERIALIZED(&capture_checker_);
// Set a baseline of configured parameters. It is updated here during
// configuration, then read from the capture thread.
configured_capability_ = capability;
MutexLock lock(&capture_lock_);
// first open /dev/video device
char device[20];
snprintf(device, sizeof(device), "/dev/video%d", _deviceId);
if ((_deviceFd = open(device, O_RDWR | O_NONBLOCK, 0)) < 0) {
RTC_LOG(LS_INFO) << "error in opening " << device << " errono = " << errno;
return -1;
}
// Supported video formats in preferred order.
// If the requested resolution is larger than VGA, we prefer MJPEG. Go for
// I420 otherwise.
unsigned int hdFmts[] = {
V4L2_PIX_FMT_MJPEG, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_YVU420,
V4L2_PIX_FMT_YUYV, V4L2_PIX_FMT_UYVY, V4L2_PIX_FMT_NV12,
V4L2_PIX_FMT_ABGR32, V4L2_PIX_FMT_ARGB32, V4L2_PIX_FMT_RGBA32,
V4L2_PIX_FMT_BGR32, V4L2_PIX_FMT_RGB32, V4L2_PIX_FMT_BGR24,
V4L2_PIX_FMT_RGB24, V4L2_PIX_FMT_RGB565, V4L2_PIX_FMT_JPEG,
};
unsigned int sdFmts[] = {
V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_YVU420, V4L2_PIX_FMT_YUYV,
V4L2_PIX_FMT_UYVY, V4L2_PIX_FMT_NV12, V4L2_PIX_FMT_ABGR32,
V4L2_PIX_FMT_ARGB32, V4L2_PIX_FMT_RGBA32, V4L2_PIX_FMT_BGR32,
V4L2_PIX_FMT_RGB32, V4L2_PIX_FMT_BGR24, V4L2_PIX_FMT_RGB24,
V4L2_PIX_FMT_RGB565, V4L2_PIX_FMT_MJPEG, V4L2_PIX_FMT_JPEG,
};
const bool isHd = capability.width > 640 || capability.height > 480;
unsigned int* fmts = isHd ? hdFmts : sdFmts;
static_assert(sizeof(hdFmts) == sizeof(sdFmts));
constexpr int nFormats = sizeof(hdFmts) / sizeof(unsigned int);
// Enumerate image formats.
struct v4l2_fmtdesc fmt;
int fmtsIdx = nFormats;
memset(&fmt, 0, sizeof(fmt));
fmt.index = 0;
fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
RTC_LOG(LS_INFO) << "Video Capture enumerats supported image formats:";
while (ioctl(_deviceFd, VIDIOC_ENUM_FMT, &fmt) == 0) {
RTC_LOG(LS_INFO) << " { pixelformat = "
<< cricket::GetFourccName(fmt.pixelformat)
<< ", description = '" << fmt.description << "' }";
// Match the preferred order.
for (int i = 0; i < nFormats; i++) {
if (fmt.pixelformat == fmts[i] && i < fmtsIdx)
fmtsIdx = i;
}
// Keep enumerating.
fmt.index++;
}
if (fmtsIdx == nFormats) {
RTC_LOG(LS_INFO) << "no supporting video formats found";
return -1;
} else {
RTC_LOG(LS_INFO) << "We prefer format "
<< cricket::GetFourccName(fmts[fmtsIdx]);
}
struct v4l2_format video_fmt;
memset(&video_fmt, 0, sizeof(struct v4l2_format));
video_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
video_fmt.fmt.pix.sizeimage = 0;
video_fmt.fmt.pix.width = capability.width;
video_fmt.fmt.pix.height = capability.height;
video_fmt.fmt.pix.pixelformat = fmts[fmtsIdx];
if (video_fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_YUYV)
configured_capability_.videoType = VideoType::kYUY2;
else if (video_fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_YUV420)
configured_capability_.videoType = VideoType::kI420;
else if (video_fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_YVU420)
configured_capability_.videoType = VideoType::kYV12;
else if (video_fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_UYVY)
configured_capability_.videoType = VideoType::kUYVY;
else if (video_fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_NV12)
configured_capability_.videoType = VideoType::kNV12;
else if (video_fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_BGR24)
configured_capability_.videoType = VideoType::kRGB24;
else if (video_fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_RGB24)
configured_capability_.videoType = VideoType::kBGR24;
else if (video_fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_RGB565)
configured_capability_.videoType = VideoType::kRGB565;
else if (video_fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_ABGR32 ||
video_fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_BGR32)
configured_capability_.videoType = VideoType::kARGB;
else if (video_fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_ARGB32 ||
video_fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_RGB32)
configured_capability_.videoType = VideoType::kBGRA;
else if (video_fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_RGBA32)
configured_capability_.videoType = VideoType::kABGR;
else if (video_fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_MJPEG ||
video_fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_JPEG)
configured_capability_.videoType = VideoType::kMJPEG;
else
RTC_DCHECK_NOTREACHED();
// set format and frame size now
if (ioctl(_deviceFd, VIDIOC_S_FMT, &video_fmt) < 0) {
RTC_LOG(LS_INFO) << "error in VIDIOC_S_FMT, errno = " << errno;
return -1;
}
// initialize current width and height
configured_capability_.width = video_fmt.fmt.pix.width;
configured_capability_.height = video_fmt.fmt.pix.height;
// Trying to set frame rate, before check driver capability.
bool driver_framerate_support = true;
struct v4l2_streamparm streamparms;
memset(&streamparms, 0, sizeof(streamparms));
streamparms.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (ioctl(_deviceFd, VIDIOC_G_PARM, &streamparms) < 0) {
RTC_LOG(LS_INFO) << "error in VIDIOC_G_PARM errno = " << errno;
driver_framerate_support = false;
// continue
} else {
// check the capability flag is set to V4L2_CAP_TIMEPERFRAME.
if (streamparms.parm.capture.capability & V4L2_CAP_TIMEPERFRAME) {
// driver supports the feature. Set required framerate.
memset(&streamparms, 0, sizeof(streamparms));
streamparms.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
streamparms.parm.capture.timeperframe.numerator = 1;
streamparms.parm.capture.timeperframe.denominator = capability.maxFPS;
if (ioctl(_deviceFd, VIDIOC_S_PARM, &streamparms) < 0) {
RTC_LOG(LS_INFO) << "Failed to set the framerate. errno=" << errno;
driver_framerate_support = false;
}
}
}
// If driver doesn't support framerate control, need to hardcode.
// Hardcoding the value based on the frame size.
if (!driver_framerate_support) {
if (configured_capability_.width >= 800 &&
configured_capability_.videoType != VideoType::kMJPEG) {
configured_capability_.maxFPS = 15;
} else {
configured_capability_.maxFPS = 30;
}
}
if (!AllocateVideoBuffers()) {
RTC_LOG(LS_INFO) << "failed to allocate video capture buffers";
return -1;
}
// Needed to start UVC camera - from the uvcview application
enum v4l2_buf_type type;
type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (ioctl(_deviceFd, VIDIOC_STREAMON, &type) == -1) {
RTC_LOG(LS_INFO) << "Failed to turn on stream";
return -1;
}
_requestedCapability = capability;
_captureStarted = true;
_streaming = true;
// start capture thread;
if (_captureThread.empty()) {
quit_ = false;
_captureThread = rtc::PlatformThread::SpawnJoinable(
[this] {
while (CaptureProcess()) {
}
},
"CaptureThread",
rtc::ThreadAttributes().SetPriority(rtc::ThreadPriority::kHigh));
}
return 0;
}
int32_t VideoCaptureModuleV4L2::StopCapture() {
RTC_DCHECK_RUN_ON(&api_checker_);
if (!_captureThread.empty()) {
{
MutexLock lock(&capture_lock_);
quit_ = true;
}
// Make sure the capture thread stops using the mutex.
_captureThread.Finalize();
}
_captureStarted = false;
RTC_CHECK_RUNS_SERIALIZED(&capture_checker_);
MutexLock lock(&capture_lock_);
if (_streaming) {
_streaming = false;
DeAllocateVideoBuffers();
close(_deviceFd);
_deviceFd = -1;
_requestedCapability = configured_capability_ = VideoCaptureCapability();
}
return 0;
}
// critical section protected by the caller
bool VideoCaptureModuleV4L2::AllocateVideoBuffers() {
RTC_CHECK_RUNS_SERIALIZED(&capture_checker_);
struct v4l2_requestbuffers rbuffer;
memset(&rbuffer, 0, sizeof(v4l2_requestbuffers));
rbuffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
rbuffer.memory = V4L2_MEMORY_MMAP;
rbuffer.count = kNoOfV4L2Bufffers;
if (ioctl(_deviceFd, VIDIOC_REQBUFS, &rbuffer) < 0) {
RTC_LOG(LS_INFO) << "Could not get buffers from device. errno = " << errno;
return false;
}
if (rbuffer.count > kNoOfV4L2Bufffers)
rbuffer.count = kNoOfV4L2Bufffers;
_buffersAllocatedByDevice = rbuffer.count;
// Map the buffers
_pool = new Buffer[rbuffer.count];
for (unsigned int i = 0; i < rbuffer.count; i++) {
struct v4l2_buffer buffer;
memset(&buffer, 0, sizeof(v4l2_buffer));
buffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buffer.memory = V4L2_MEMORY_MMAP;
buffer.index = i;
if (ioctl(_deviceFd, VIDIOC_QUERYBUF, &buffer) < 0) {
return false;
}
_pool[i].start = mmap(NULL, buffer.length, PROT_READ | PROT_WRITE,
MAP_SHARED, _deviceFd, buffer.m.offset);
if (MAP_FAILED == _pool[i].start) {
for (unsigned int j = 0; j < i; j++)
munmap(_pool[j].start, _pool[j].length);
return false;
}
_pool[i].length = buffer.length;
if (ioctl(_deviceFd, VIDIOC_QBUF, &buffer) < 0) {
return false;
}
}
return true;
}
bool VideoCaptureModuleV4L2::DeAllocateVideoBuffers() {
RTC_CHECK_RUNS_SERIALIZED(&capture_checker_);
// unmap buffers
for (int i = 0; i < _buffersAllocatedByDevice; i++)
munmap(_pool[i].start, _pool[i].length);
delete[] _pool;
// turn off stream
enum v4l2_buf_type type;
type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (ioctl(_deviceFd, VIDIOC_STREAMOFF, &type) < 0) {
RTC_LOG(LS_INFO) << "VIDIOC_STREAMOFF error. errno: " << errno;
}
return true;
}
bool VideoCaptureModuleV4L2::CaptureStarted() {
RTC_DCHECK_RUN_ON(&api_checker_);
return _captureStarted;
}
bool VideoCaptureModuleV4L2::CaptureProcess() {
RTC_CHECK_RUNS_SERIALIZED(&capture_checker_);
int retVal = 0;
fd_set rSet;
struct timeval timeout;
FD_ZERO(&rSet);
FD_SET(_deviceFd, &rSet);
timeout.tv_sec = 1;
timeout.tv_usec = 0;
// _deviceFd written only in StartCapture, when this thread isn't running.
retVal = select(_deviceFd + 1, &rSet, NULL, NULL, &timeout);
{
MutexLock lock(&capture_lock_);
if (quit_) {
return false;
}
if (retVal < 0 && errno != EINTR) { // continue if interrupted
// select failed
return false;
} else if (retVal == 0) {
// select timed out
return true;
} else if (!FD_ISSET(_deviceFd, &rSet)) {
// not event on camera handle
return true;
}
if (_streaming) {
struct v4l2_buffer buf;
memset(&buf, 0, sizeof(struct v4l2_buffer));
buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buf.memory = V4L2_MEMORY_MMAP;
// dequeue a buffer - repeat until dequeued properly!
while (ioctl(_deviceFd, VIDIOC_DQBUF, &buf) < 0) {
if (errno != EINTR) {
RTC_LOG(LS_INFO) << "could not sync on a buffer on device "
<< strerror(errno);
return true;
}
}
// convert to to I420 if needed
IncomingFrame(reinterpret_cast<uint8_t*>(_pool[buf.index].start),
buf.bytesused, configured_capability_);
// enqueue the buffer again
if (ioctl(_deviceFd, VIDIOC_QBUF, &buf) == -1) {
RTC_LOG(LS_INFO) << "Failed to enqueue capture buffer";
}
}
}
usleep(0);
return true;
}
int32_t VideoCaptureModuleV4L2::CaptureSettings(
VideoCaptureCapability& settings) {
RTC_DCHECK_RUN_ON(&api_checker_);
settings = _requestedCapability;
return 0;
}
} // namespace videocapturemodule
} // namespace webrtc
#ifdef ABGR32_OVERRIDE
#undef ABGR32_OVERRIDE
#undef V4L2_PIX_FMT_ABGR32
#endif
#ifdef ARGB32_OVERRIDE
#undef ARGB32_OVERRIDE
#undef V4L2_PIX_FMT_ARGB32
#endif
#ifdef RGBA32_OVERRIDE
#undef RGBA32_OVERRIDE
#undef V4L2_PIX_FMT_RGBA32
#endif

View file

@ -0,0 +1,64 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_VIDEO_CAPTURE_LINUX_VIDEO_CAPTURE_V4L2_H_
#define MODULES_VIDEO_CAPTURE_LINUX_VIDEO_CAPTURE_V4L2_H_
#include <stddef.h>
#include <stdint.h>
#include <memory>
#include "modules/video_capture/video_capture_defines.h"
#include "modules/video_capture/video_capture_impl.h"
#include "rtc_base/platform_thread.h"
#include "rtc_base/synchronization/mutex.h"
namespace webrtc {
namespace videocapturemodule {
class VideoCaptureModuleV4L2 : public VideoCaptureImpl {
public:
VideoCaptureModuleV4L2();
~VideoCaptureModuleV4L2() override;
int32_t Init(const char* deviceUniqueId);
int32_t StartCapture(const VideoCaptureCapability& capability) override;
int32_t StopCapture() override;
bool CaptureStarted() override;
int32_t CaptureSettings(VideoCaptureCapability& settings) override;
private:
enum { kNoOfV4L2Bufffers = 4 };
static void CaptureThread(void*);
bool CaptureProcess();
bool AllocateVideoBuffers() RTC_EXCLUSIVE_LOCKS_REQUIRED(capture_lock_);
bool DeAllocateVideoBuffers() RTC_EXCLUSIVE_LOCKS_REQUIRED(capture_lock_);
rtc::PlatformThread _captureThread RTC_GUARDED_BY(api_checker_);
Mutex capture_lock_ RTC_ACQUIRED_BEFORE(api_lock_);
bool quit_ RTC_GUARDED_BY(capture_lock_);
int32_t _deviceId RTC_GUARDED_BY(api_checker_);
int32_t _deviceFd RTC_GUARDED_BY(capture_checker_);
int32_t _buffersAllocatedByDevice RTC_GUARDED_BY(capture_lock_);
VideoCaptureCapability configured_capability_
RTC_GUARDED_BY(capture_checker_);
bool _streaming RTC_GUARDED_BY(capture_checker_);
bool _captureStarted RTC_GUARDED_BY(api_checker_);
struct Buffer {
void* start;
size_t length;
};
Buffer* _pool RTC_GUARDED_BY(capture_lock_);
};
} // namespace videocapturemodule
} // namespace webrtc
#endif // MODULES_VIDEO_CAPTURE_LINUX_VIDEO_CAPTURE_V4L2_H_