Repo created

This commit is contained in:
Fr4nz D13trich 2025-11-15 18:08:00 +01:00
parent d327c31227
commit 0b2aca0925
638 changed files with 76461 additions and 0 deletions

8
native/.gitignore vendored Normal file
View file

@ -0,0 +1,8 @@
/build
obj
libs
/.externalNativeBuild
/.cxx
*-rs.cpp
*-rs.hpp
/compile_commands.json

View file

@ -0,0 +1,12 @@
[build]
# Set arm64 as the default target
# The actual compilation will have the target overriden by command-line.
target = "aarch64-linux-android"
# Enable cross language LTO, and explicitly set dwarf-version for ThinLTO
rustflags = ["-Z", "dwarf-version=4", "-C", "linker-plugin-lto"]
target-dir = "../out/rust"
[unstable]
build-std = ["std", "panic_abort"]
build-std-features = ["panic_immediate_abort", "optimize_for_size"]
profile-rustflags = true

46
native/src/Android-rs.mk Normal file
View file

@ -0,0 +1,46 @@
LOCAL_PATH := $(call my-dir)
###########################
# Rust compilation outputs
###########################
include $(CLEAR_VARS)
LOCAL_MODULE := magisk-rs
LOCAL_EXPORT_C_INCLUDES := src/core/include
LOCAL_LIB = ../out/$(TARGET_ARCH_ABI)/libmagisk-rs.a
ifneq (,$(wildcard $(LOCAL_PATH)/$(LOCAL_LIB)))
LOCAL_SRC_FILES := $(LOCAL_LIB)
include $(PREBUILT_STATIC_LIBRARY)
else
include $(BUILD_STATIC_LIBRARY)
endif
include $(CLEAR_VARS)
LOCAL_MODULE := boot-rs
LOCAL_LIB = ../out/$(TARGET_ARCH_ABI)/libmagiskboot-rs.a
ifneq (,$(wildcard $(LOCAL_PATH)/$(LOCAL_LIB)))
LOCAL_SRC_FILES := $(LOCAL_LIB)
include $(PREBUILT_STATIC_LIBRARY)
else
include $(BUILD_STATIC_LIBRARY)
endif
include $(CLEAR_VARS)
LOCAL_MODULE := init-rs
LOCAL_LIB = ../out/$(TARGET_ARCH_ABI)/libmagiskinit-rs.a
ifneq (,$(wildcard $(LOCAL_PATH)/$(LOCAL_LIB)))
LOCAL_SRC_FILES := $(LOCAL_LIB)
include $(PREBUILT_STATIC_LIBRARY)
else
include $(BUILD_STATIC_LIBRARY)
endif
include $(CLEAR_VARS)
LOCAL_MODULE := policy-rs
LOCAL_LIB = ../out/$(TARGET_ARCH_ABI)/libmagiskpolicy-rs.a
ifneq (,$(wildcard $(LOCAL_PATH)/$(LOCAL_LIB)))
LOCAL_SRC_FILES := $(LOCAL_LIB)
include $(PREBUILT_STATIC_LIBRARY)
else
include $(BUILD_STATIC_LIBRARY)
endif

153
native/src/Android.mk Normal file
View file

@ -0,0 +1,153 @@
LOCAL_PATH := $(call my-dir)
########################
# Binaries
########################
ifdef B_MAGISK
include $(CLEAR_VARS)
LOCAL_MODULE := magisk
LOCAL_STATIC_LIBRARIES := \
libbase \
libsystemproperties \
liblsplt \
libmagisk-rs
LOCAL_SRC_FILES := \
core/applets.cpp \
core/scripting.cpp \
core/sqlite.cpp \
core/utils.cpp \
core/core-rs.cpp \
core/resetprop/sys.cpp \
core/su/su.cpp \
core/zygisk/entry.cpp \
core/zygisk/module.cpp \
core/zygisk/hook.cpp \
core/deny/cli.cpp \
core/deny/utils.cpp \
core/deny/logcat.cpp
LOCAL_LDLIBS := -llog
LOCAL_LDFLAGS := -Wl,--dynamic-list=src/exported_sym.txt
include $(BUILD_EXECUTABLE)
endif
ifdef B_PRELOAD
include $(CLEAR_VARS)
LOCAL_MODULE := init-ld
LOCAL_SRC_FILES := init/preload.c
LOCAL_LDFLAGS := -Wl,--strip-all
include $(BUILD_SHARED_LIBRARY)
endif
ifdef B_INIT
include $(CLEAR_VARS)
LOCAL_MODULE := magiskinit
LOCAL_STATIC_LIBRARIES := \
libbase \
libpolicy \
libxz \
libinit-rs
LOCAL_SRC_FILES := \
init/mount.cpp \
init/rootdir.cpp \
init/getinfo.cpp \
init/init-rs.cpp
LOCAL_LDFLAGS := -static
ifdef B_CRT0
LOCAL_STATIC_LIBRARIES += crt0
LOCAL_LDFLAGS += -Wl,--defsym=vfprintf=tiny_vfprintf
endif
include $(BUILD_EXECUTABLE)
endif
ifdef B_BOOT
include $(CLEAR_VARS)
LOCAL_MODULE := magiskboot
LOCAL_STATIC_LIBRARIES := \
libbase \
liblz4 \
libboot-rs
LOCAL_SRC_FILES := \
boot/bootimg.cpp \
boot/boot-rs.cpp
LOCAL_LDFLAGS := -static
ifdef B_CRT0
LOCAL_STATIC_LIBRARIES += crt0
LOCAL_LDFLAGS += -lm -Wl,--defsym=vfprintf=musl_vfprintf
endif
include $(BUILD_EXECUTABLE)
endif
ifdef B_POLICY
include $(CLEAR_VARS)
LOCAL_MODULE := magiskpolicy
LOCAL_STATIC_LIBRARIES := \
libbase \
libpolicy \
libpolicy-rs
include $(BUILD_EXECUTABLE)
endif
ifdef B_PROP
include $(CLEAR_VARS)
LOCAL_MODULE := resetprop
LOCAL_STATIC_LIBRARIES := \
libbase \
libsystemproperties \
libmagisk-rs
LOCAL_SRC_FILES := \
core/applet_stub.cpp \
core/resetprop/sys.cpp \
core/core-rs.cpp
LOCAL_CFLAGS := -DAPPLET_STUB_MAIN=resetprop_main
include $(BUILD_EXECUTABLE)
endif
########################
# Libraries
########################
include $(CLEAR_VARS)
LOCAL_MODULE := libpolicy
LOCAL_STATIC_LIBRARIES := \
libbase \
libsepol
LOCAL_C_INCLUDES := src/sepolicy/include
LOCAL_EXPORT_C_INCLUDES := $(LOCAL_C_INCLUDES)
LOCAL_SRC_FILES := \
sepolicy/api.cpp \
sepolicy/sepolicy.cpp \
sepolicy/policydb.cpp \
sepolicy/policy-rs.cpp
include $(BUILD_STATIC_LIBRARY)
CWD := $(LOCAL_PATH)
include $(CWD)/Android-rs.mk
include $(CWD)/base/Android.mk
include $(CWD)/external/Android.mk

30
native/src/Application.mk Normal file
View file

@ -0,0 +1,30 @@
APP_BUILD_SCRIPT := src/Android.mk
APP_CFLAGS := -Wall -Oz -fomit-frame-pointer
APP_CPPFLAGS := -std=c++23
APP_STL := none
APP_PLATFORM := android-23
APP_THIN_ARCHIVE := true
APP_STRIP_MODE := none
ifdef MAGISK_DEBUG
NDK_APP_OUT := ./obj/debug
APP_CFLAGS += -flto=thin -gdwarf-4
APP_LDFLAGS += -flto=thin
else
NDK_APP_OUT := ./obj/release
APP_CFLAGS += -flto
APP_LDFLAGS += -flto -Wl,--icf=all
endif
ifdef B_CRT0
# Disable all security and debugging features
APP_CFLAGS += -fno-unwind-tables -fno-asynchronous-unwind-tables -fno-stack-protector -fno-threadsafe-statics -U_FORTIFY_SOURCE
# Override output folder to make sure all dependencies are rebuilt with new CFLAGS
NDK_APP_OUT := $(NDK_APP_OUT)-nolibc
endif

1251
native/src/Cargo.lock generated Normal file

File diff suppressed because it is too large Load diff

67
native/src/Cargo.toml Normal file
View file

@ -0,0 +1,67 @@
[workspace]
exclude = ["external"]
members = ["base", "base/derive", "boot", "core", "init", "sepolicy"]
resolver = "2"
[workspace.package]
version = "0.0.0"
edition = "2024"
[workspace.dependencies]
base = { path = "base" }
derive = { path = "base/derive" }
magiskpolicy = { path = "sepolicy" }
cxx = { path = "external/cxx-rs" }
cxx-gen = { path = "external/cxx-rs/gen/lib" }
libc = "0.2.177"
cfg-if = "1.0.4"
num-traits = "0.2.19"
num-derive = "0.4.2"
thiserror = "2.0.17"
byteorder = "1.5.0"
size = "0.5.0"
bytemuck = "1.24.0"
fdt = "0.1.5"
const_format = "0.2.35"
bit-set = "0.8.0"
syn = "2.0.108"
quote = "1.0.41"
proc-macro2 = "1.0.103"
pb-rs = { version = "0.10.0", default-features = false }
quick-protobuf = "0.8.1"
flate2 = { version = "1.1.5", default-features = false }
bzip2 = "0.6.1"
zopfli = "0.8.3"
lz4 = "1.28.1"
lzma-rust2 = { version = "0.15.1", default-features = false }
nix = "0.30.1"
bitflags = "2.10.0"
# Rust crypto crates are tied together
sha1 = "0.11.0-rc.2"
sha2 = "0.11.0-rc.2"
digest = "0.11.0-rc.3"
p256 = "0.14.0-rc.0"
p384 = "0.14.0-rc.0"
p521 = "0.14.0-rc.0"
rsa = "0.10.0-rc.9"
x509-cert = "0.3.0-rc.2"
der = "0.8.0-rc.9"
[patch.crates-io]
pb-rs = { git = "https://github.com/tafia/quick-protobuf.git" }
quick-protobuf = { git = "https://github.com/tafia/quick-protobuf.git" }
lz4-sys = { path = "external/lz4-sys" }
[profile.dev]
opt-level = "z"
lto = "thin"
panic = "abort"
debug = "none"
[profile.release]
opt-level = "z"
lto = "fat"
codegen-units = 1
panic = "abort"
strip = true

View file

@ -0,0 +1,18 @@
LOCAL_PATH := $(call my-dir)
# Magisk project-wide common code
include $(CLEAR_VARS)
LOCAL_MODULE := libbase
LOCAL_C_INCLUDES := \
src/include \
$(LOCAL_PATH)/include \
out/generated
LOCAL_EXPORT_C_INCLUDES := $(LOCAL_C_INCLUDES)
LOCAL_EXPORT_STATIC_LIBRARIES := libcxx
LOCAL_STATIC_LIBRARIES := libcxx
LOCAL_SRC_FILES := \
base.cpp \
base-rs.cpp \
../external/cxx-rs/src/cxx.cc
include $(BUILD_STATIC_LIBRARY)

View file

@ -0,0 +1,26 @@
[package]
name = "base"
version.workspace = true
edition.workspace = true
[lib]
path = "lib.rs"
[features]
selinux = []
[build-dependencies]
cxx-gen = { workspace = true }
[dependencies]
derive = { workspace = true }
cxx = { workspace = true }
libc = { workspace = true }
cfg-if = { workspace = true }
thiserror = { workspace = true }
bytemuck = { workspace = true }
num-traits = { workspace = true }
num-derive = { workspace = true }
const_format = { workspace = true }
nix = { workspace = true, features = ["fs", "mount", "user"] }
bitflags = { workspace = true }

1226
native/src/base/argh.rs Normal file

File diff suppressed because it is too large Load diff

433
native/src/base/base.cpp Normal file
View file

@ -0,0 +1,433 @@
#include <sys/types.h>
#include <sys/wait.h>
#include <sys/prctl.h>
#include <sys/mman.h>
#include <android/log.h>
#include <fcntl.h>
#include <unistd.h>
#include <syscall.h>
#include <random>
#include <string>
#include <base.hpp>
#include <flags.h>
using namespace std;
#ifndef __call_bypassing_fortify
#define __call_bypassing_fortify(fn) (&fn)
#endif
// Override libc++ new implementation to optimize final build size
void* operator new(std::size_t s) { return std::malloc(s); }
void* operator new[](std::size_t s) { return std::malloc(s); }
void operator delete(void *p) { std::free(p); }
void operator delete[](void *p) { std::free(p); }
void* operator new(std::size_t s, const std::nothrow_t&) noexcept { return std::malloc(s); }
void* operator new[](std::size_t s, const std::nothrow_t&) noexcept { return std::malloc(s); }
void operator delete(void *p, const std::nothrow_t&) noexcept { std::free(p); }
void operator delete[](void *p, const std::nothrow_t&) noexcept { std::free(p); }
bool byte_view::contains(byte_view pattern) const {
return _buf != nullptr && memmem(_buf, _sz, pattern._buf, pattern._sz) != nullptr;
}
bool byte_view::operator==(byte_view rhs) const {
return _sz == rhs._sz && memcmp(_buf, rhs._buf, _sz) == 0;
}
void byte_data::swap(byte_data &o) {
std::swap(_buf, o._buf);
std::swap(_sz, o._sz);
}
rust::Vec<size_t> byte_data::patch(byte_view from, byte_view to) const {
rust::Vec<size_t> v;
if (_buf == nullptr)
return v;
auto p = _buf;
auto eof = _buf + _sz;
while (p < eof) {
p = static_cast<uint8_t *>(memmem(p, eof - p, from.data(), from.size()));
if (p == nullptr)
return v;
memset(p, 0, from.size());
memcpy(p, to.data(), to.size());
v.push_back(p - _buf);
p += from.size();
}
return v;
}
rust::Vec<size_t> mut_u8_patch(MutByteSlice buf, ByteSlice from, ByteSlice to) {
byte_data data(buf);
return data.patch(from, to);
}
int fork_dont_care() {
if (int pid = xfork()) {
waitpid(pid, nullptr, 0);
return pid;
} else if (xfork()) {
exit(0);
}
return 0;
}
int fork_no_orphan() {
int pid = xfork();
if (pid)
return pid;
prctl(PR_SET_PDEATHSIG, SIGKILL);
if (getppid() == 1)
exit(1);
return 0;
}
int exec_command(exec_t &exec) {
auto pipefd = array<int, 2>{-1, -1};
int outfd = -1;
if (exec.fd == -1) {
if (xpipe2(pipefd, O_CLOEXEC) == -1)
return -1;
outfd = pipefd[1];
} else if (exec.fd >= 0) {
outfd = exec.fd;
}
int pid = exec.fork();
if (pid < 0) {
close(pipefd[0]);
close(pipefd[1]);
return -1;
} else if (pid) {
if (exec.fd == -1) {
exec.fd = pipefd[0];
close(pipefd[1]);
}
return pid;
}
// Unblock all signals
sigset_t set;
sigfillset(&set);
pthread_sigmask(SIG_UNBLOCK, &set, nullptr);
if (outfd >= 0) {
xdup2(outfd, STDOUT_FILENO);
if (exec.err)
xdup2(outfd, STDERR_FILENO);
close(outfd);
}
// Call the pre-exec callback
if (exec.pre_exec)
exec.pre_exec();
execve(exec.argv[0], (char **) exec.argv, environ);
PLOGE("execve %s", exec.argv[0]);
exit(-1);
}
int exec_command_sync(exec_t &exec) {
int pid = exec_command(exec);
if (pid < 0)
return -1;
int status;
waitpid(pid, &status, 0);
return WEXITSTATUS(status);
}
int new_daemon_thread(thread_entry entry, void *arg) {
pthread_t thread;
pthread_attr_t attr;
pthread_attr_init(&attr);
pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
errno = pthread_create(&thread, &attr, entry, arg);
if (errno) {
PLOGE("pthread_create");
}
return errno;
}
static char *argv0;
static size_t name_len;
void init_argv0(int argc, char **argv) {
argv0 = argv[0];
name_len = (argv[argc - 1] - argv[0]) + strlen(argv[argc - 1]) + 1;
}
void set_nice_name(Utf8CStr name) {
memset(argv0, 0, name_len);
strscpy(argv0, name.c_str(), name_len);
prctl(PR_SET_NAME, name.c_str());
}
template<typename T, int base>
static T parse_num(string_view s) {
T val = 0;
for (char c : s) {
if (isdigit(c)) {
c -= '0';
} else if (base > 10 && isalpha(c)) {
c -= isupper(c) ? 'A' - 10 : 'a' - 10;
} else {
return -1;
}
if (c >= base) {
return -1;
}
val *= base;
val += c;
}
return val;
}
/*
* Bionic's atoi runs through strtol().
* Use our own implementation for faster conversion.
*/
int parse_int(string_view s) {
return parse_num<int, 10>(s);
}
uint32_t parse_uint32_hex(string_view s) {
return parse_num<uint32_t, 16>(s);
}
int switch_mnt_ns(int pid) {
int ret = -1;
int fd = syscall(__NR_pidfd_open, pid, 0);
if (fd > 0) {
ret = setns(fd, CLONE_NEWNS);
close(fd);
}
if (ret < 0) {
char mnt[32];
ssprintf(mnt, sizeof(mnt), "/proc/%d/ns/mnt", pid);
fd = open(mnt, O_RDONLY);
if (fd < 0) return 1; // Maybe process died..
// Switch to its namespace
ret = xsetns(fd, 0);
close(fd);
}
return ret;
}
string &replace_all(string &str, string_view from, string_view to) {
size_t pos = 0;
while((pos = str.find(from, pos)) != string::npos) {
str.replace(pos, from.length(), to);
pos += to.length();
}
return str;
}
template <typename T>
static auto split_impl(string_view s, string_view delims) {
vector<T> result;
size_t base = 0;
size_t found;
while (true) {
found = s.find_first_of(delims, base);
result.emplace_back(s.substr(base, found - base));
if (found == string::npos)
break;
base = found + 1;
}
return result;
}
vector<string> split(string_view s, string_view delims) {
return split_impl<string>(s, delims);
}
#undef vsnprintf
int vssprintf(char *dest, size_t size, const char *fmt, va_list ap) {
if (size > 0) {
*dest = 0;
return std::min(vsnprintf(dest, size, fmt, ap), (int) size - 1);
}
return -1;
}
int ssprintf(char *dest, size_t size, const char *fmt, ...) {
va_list va;
va_start(va, fmt);
int r = vssprintf(dest, size, fmt, va);
va_end(va);
return r;
}
#undef strlcpy
size_t strscpy(char *dest, const char *src, size_t size) {
return std::min(strlcpy(dest, src, size), size - 1);
}
#undef vsnprintf
static int fmt_and_log_with_rs(LogLevel level, const char *fmt, va_list ap) {
constexpr int sz = 4096;
char buf[sz];
buf[0] = '\0';
// Fortify logs when a fatal error occurs. Do not run through fortify again
int len = std::min(__call_bypassing_fortify(vsnprintf)(buf, sz, fmt, ap), sz - 1);
log_with_rs(level, Utf8CStr(buf, len + 1));
return len;
}
// Used to override external C library logging
extern "C" int magisk_log_print(int prio, const char *tag, const char *fmt, ...) {
LogLevel level;
switch (prio) {
case ANDROID_LOG_DEBUG:
level = LogLevel::Debug;
break;
case ANDROID_LOG_INFO:
level = LogLevel::Info;
break;
case ANDROID_LOG_WARN:
level = LogLevel::Warn;
break;
case ANDROID_LOG_ERROR:
level = LogLevel::Error;
break;
default:
return 0;
}
char fmt_buf[4096];
auto len = strscpy(fmt_buf, tag, sizeof(fmt_buf) - 1);
// Prevent format specifications in the tag
std::replace(fmt_buf, fmt_buf + len, '%', '_');
len = ssprintf(fmt_buf + len, sizeof(fmt_buf) - len - 1, ": %s", fmt) + len;
// Ensure the fmt string always ends with newline
if (fmt_buf[len - 1] != '\n') {
fmt_buf[len] = '\n';
fmt_buf[len + 1] = '\0';
}
va_list argv;
va_start(argv, fmt);
int ret = fmt_and_log_with_rs(level, fmt_buf, argv);
va_end(argv);
return ret;
}
#define LOG_BODY(level) \
va_list argv; \
va_start(argv, fmt); \
fmt_and_log_with_rs(LogLevel::level, fmt, argv); \
va_end(argv); \
// LTO will optimize out the NOP function
#if MAGISK_DEBUG
void LOGD(const char *fmt, ...) { LOG_BODY(Debug) }
#else
void LOGD(const char *fmt, ...) {}
#endif
void LOGI(const char *fmt, ...) { LOG_BODY(Info) }
void LOGW(const char *fmt, ...) { LOG_BODY(Warn) }
void LOGE(const char *fmt, ...) { LOG_BODY(Error) }
// Export raw symbol to fortify compat
extern "C" void __vloge(const char* fmt, va_list ap) {
fmt_and_log_with_rs(LogLevel::Error, fmt, ap);
}
string full_read(int fd) {
string str;
char buf[4096];
for (ssize_t len; (len = xread(fd, buf, sizeof(buf))) > 0;)
str.insert(str.end(), buf, buf + len);
return str;
}
string full_read(const char *filename) {
string str;
if (int fd = xopen(filename, O_RDONLY | O_CLOEXEC); fd >= 0) {
str = full_read(fd);
close(fd);
}
return str;
}
void write_zero(int fd, size_t size) {
char buf[4096] = {0};
size_t len;
while (size > 0) {
len = sizeof(buf) > size ? size : sizeof(buf);
write(fd, buf, len);
size -= len;
}
}
sDIR make_dir(DIR *dp) {
return sDIR(dp, [](DIR *dp){ return dp ? closedir(dp) : 1; });
}
sFILE make_file(FILE *fp) {
return sFILE(fp, [](FILE *fp){ return fp ? fclose(fp) : 1; });
}
mmap_data::mmap_data(const char *name, bool rw) {
auto slice = rust::map_file(name, rw);
if (!slice.empty()) {
_buf = slice.data();
_sz = slice.size();
}
}
mmap_data::mmap_data(int dirfd, const char *name, bool rw) {
auto slice = rust::map_file_at(dirfd, name, rw);
if (!slice.empty()) {
_buf = slice.data();
_sz = slice.size();
}
}
mmap_data::mmap_data(int fd, size_t sz, bool rw) {
auto slice = rust::map_fd(fd, sz, rw);
if (!slice.empty()) {
_buf = slice.data();
_sz = slice.size();
}
}
mmap_data::~mmap_data() {
if (_buf)
munmap(_buf, _sz);
}
string resolve_preinit_dir(const char *base_dir) {
string dir = base_dir;
if (access((dir + "/unencrypted").data(), F_OK) == 0) {
dir += "/unencrypted/magisk";
} else if (access((dir + "/adb").data(), F_OK) == 0) {
dir += "/adb";
} else if (access((dir + "/watchdog").data(), F_OK) == 0) {
dir += "/watchdog/magisk";
} else {
dir += "/magisk";
}
return dir;
}
// FFI for Utf8CStr
extern "C" void cxx$utf8str$new(Utf8CStr *self, const void *s, size_t len);
extern "C" const char *cxx$utf8str$ptr(const Utf8CStr *self);
extern "C" size_t cxx$utf8str$len(const Utf8CStr *self);
Utf8CStr::Utf8CStr(const char *s, size_t len) {
cxx$utf8str$new(this, s, len);
}
const char *Utf8CStr::data() const {
return cxx$utf8str$ptr(this);
}
size_t Utf8CStr::length() const {
return cxx$utf8str$len(this);
}

8
native/src/base/build.rs Normal file
View file

@ -0,0 +1,8 @@
use crate::codegen::gen_cxx_binding;
#[path = "../include/codegen.rs"]
mod codegen;
fn main() {
gen_cxx_binding("base-rs");
}

671
native/src/base/cstr.rs Normal file
View file

@ -0,0 +1,671 @@
use cxx::{ExternType, type_id};
use libc::c_char;
use nix::NixPath;
use std::borrow::Borrow;
use std::cmp::{Ordering, min};
use std::ffi::{CStr, FromBytesUntilNulError, FromBytesWithNulError, OsStr};
use std::fmt::{Debug, Display, Formatter, Write};
use std::ops::Deref;
use std::os::unix::ffi::OsStrExt;
use std::path::{Path, PathBuf};
use std::str::{FromStr, Utf8Error};
use std::{fmt, mem, slice, str};
use thiserror::Error;
use crate::slice_from_ptr_mut;
// Utf8CStr types are UTF-8 validated and null terminated strings.
//
// Several Utf8CStr types:
//
// Utf8CStr: can only exist as reference, similar to &str
// Utf8CString: dynamically sized buffer allocated on the heap, similar to String
// Utf8CStrBufRef: reference to a fixed sized buffer
// Utf8CStrBufArr<N>: fixed sized buffer allocated on the stack
//
// For easier usage, please use the helper functions in cstr::buf.
//
// In most cases, these are the types being used
//
// &Utf8CStr: whenever a printable null terminated string is needed
// &mut dyn Utf8CStrBuf: whenever we need a buffer that needs to support appending
// strings to the end, and has to be null terminated
// &mut dyn Utf8CStrBuf: whenever we need a pre-allocated buffer that is large enough to fit
// in the result, and has to be null terminated
//
// All types dereferences to &Utf8CStr.
// Utf8CString, Utf8CStrBufRef, and Utf8CStrBufArr<N> implements Utf8CStrBuf.
// Public helper functions
pub mod buf {
use super::{Utf8CStrBufArr, Utf8CStrBufRef, Utf8CString};
#[inline(always)]
pub fn dynamic(capacity: usize) -> Utf8CString {
Utf8CString::with_capacity(capacity)
}
#[inline(always)]
pub fn default() -> Utf8CStrBufArr<4096> {
Utf8CStrBufArr::default()
}
#[inline(always)]
pub fn new<const N: usize>() -> Utf8CStrBufArr<N> {
Utf8CStrBufArr::new()
}
#[inline(always)]
pub fn wrap(buf: &mut [u8]) -> Utf8CStrBufRef<'_> {
Utf8CStrBufRef::from(buf)
}
#[inline(always)]
pub unsafe fn wrap_ptr<'a>(buf: *mut u8, len: usize) -> Utf8CStrBufRef<'a> {
unsafe { Utf8CStrBufRef::from_ptr(buf, len) }
}
}
// Trait definitions
pub trait Utf8CStrBuf: Display + Write + AsRef<Utf8CStr> + Deref<Target = Utf8CStr> {
// The length of the string without the terminating null character.
// assert_true(len <= capacity - 1)
fn len(&self) -> usize;
fn push_str(&mut self, s: &str) -> usize;
// The capacity of the internal buffer. The maximum string length this buffer can contain
// is capacity - 1, because the last byte is reserved for the terminating null character.
fn capacity(&self) -> usize;
fn clear(&mut self);
fn as_mut_ptr(&mut self) -> *mut c_char;
fn truncate(&mut self, new_len: usize);
// Rebuild the Utf8CStr based on the contents of the internal buffer. Required after any
// unsafe modifications directly though the pointer obtained from self.as_mut_ptr().
// If an error is returned, the internal buffer will be reset, resulting in an empty string.
fn rebuild(&mut self) -> Result<(), StrErr>;
#[inline(always)]
fn is_empty(&self) -> bool {
self.len() == 0
}
}
pub trait StringExt {
fn nul_terminate(&mut self) -> &mut [u8];
}
impl StringExt for String {
fn nul_terminate(&mut self) -> &mut [u8] {
self.reserve(1);
// SAFETY: the string is reserved to have enough capacity to fit in the null byte
// SAFETY: the null byte is explicitly added outside the string's length
unsafe {
let buf = slice::from_raw_parts_mut(self.as_mut_ptr(), self.len() + 1);
*buf.get_unchecked_mut(self.len()) = b'\0';
buf
}
}
}
impl StringExt for PathBuf {
#[allow(mutable_transmutes)]
fn nul_terminate(&mut self) -> &mut [u8] {
self.reserve(1);
// SAFETY: the PathBuf is reserved to have enough capacity to fit in the null byte
// SAFETY: the null byte is explicitly added outside the PathBuf's length
unsafe {
let bytes: &mut [u8] = mem::transmute(self.as_mut_os_str().as_bytes());
let buf = slice::from_raw_parts_mut(bytes.as_mut_ptr(), bytes.len() + 1);
*buf.get_unchecked_mut(bytes.len()) = b'\0';
buf
}
}
}
pub struct Utf8CString(String);
impl Default for Utf8CString {
fn default() -> Self {
Utf8CString::with_capacity(256)
}
}
impl Utf8CString {
pub fn with_capacity(capacity: usize) -> Utf8CString {
Utf8CString::from(String::with_capacity(capacity))
}
pub fn ensure_capacity(&mut self, capacity: usize) {
if self.capacity() >= capacity {
return;
}
self.0.reserve(capacity - self.0.len())
}
}
impl AsRef<Utf8CStr> for Utf8CString {
#[inline(always)]
fn as_ref(&self) -> &Utf8CStr {
// SAFETY: the internal string is always null terminated
unsafe { mem::transmute(slice::from_raw_parts(self.0.as_ptr(), self.0.len() + 1)) }
}
}
impl Utf8CStrBuf for Utf8CString {
#[inline(always)]
fn len(&self) -> usize {
self.0.len()
}
fn push_str(&mut self, s: &str) -> usize {
self.0.push_str(s);
self.0.nul_terminate();
s.len()
}
fn capacity(&self) -> usize {
self.0.capacity()
}
fn clear(&mut self) {
self.0.clear();
self.0.nul_terminate();
}
fn as_mut_ptr(&mut self) -> *mut c_char {
self.0.as_mut_ptr().cast()
}
fn truncate(&mut self, new_len: usize) {
self.0.truncate(new_len);
self.0.nul_terminate();
}
fn rebuild(&mut self) -> Result<(), StrErr> {
// Temporarily move the internal String out
let mut tmp = String::new();
mem::swap(&mut tmp, &mut self.0);
let (ptr, _, capacity) = tmp.into_raw_parts();
unsafe {
// Validate the entire buffer, including the unused part
let bytes = slice::from_raw_parts(ptr, capacity);
match Utf8CStr::from_bytes_until_nul(bytes) {
Ok(s) => {
// Move the String with the new length back
self.0 = String::from_raw_parts(ptr, s.len(), capacity);
}
Err(e) => {
// Move the String with 0 length back
self.0 = String::from_raw_parts(ptr, 0, capacity);
self.0.nul_terminate();
return Err(e);
}
}
}
Ok(())
}
}
impl From<String> for Utf8CString {
fn from(mut value: String) -> Self {
value.nul_terminate();
Utf8CString(value)
}
}
impl From<&str> for Utf8CString {
fn from(value: &str) -> Self {
let mut s = String::with_capacity(value.len() + 1);
s.push_str(value);
s.nul_terminate();
Utf8CString(s)
}
}
impl FromStr for Utf8CString {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(s.into())
}
}
impl Borrow<Utf8CStr> for Utf8CString {
fn borrow(&self) -> &Utf8CStr {
self.deref()
}
}
// UTF-8 validated + null terminated reference to buffer
pub struct Utf8CStrBufRef<'a> {
used: usize,
buf: &'a mut [u8],
}
impl<'a> Utf8CStrBufRef<'a> {
pub unsafe fn from_ptr(buf: *mut u8, len: usize) -> Utf8CStrBufRef<'a> {
unsafe { Self::from(slice_from_ptr_mut(buf, len)) }
}
}
impl<'a> From<&'a mut [u8]> for Utf8CStrBufRef<'a> {
fn from(buf: &'a mut [u8]) -> Utf8CStrBufRef<'a> {
buf[0] = b'\0';
Utf8CStrBufRef { used: 0, buf }
}
}
// UTF-8 validated + null terminated buffer on the stack
pub struct Utf8CStrBufArr<const N: usize> {
used: usize,
buf: [u8; N],
}
impl<const N: usize> Utf8CStrBufArr<N> {
pub fn new() -> Self {
Utf8CStrBufArr {
used: 0,
buf: [0; N],
}
}
}
impl Default for Utf8CStrBufArr<4096> {
fn default() -> Self {
Utf8CStrBufArr::<4096>::new()
}
}
#[derive(Debug, Error)]
pub enum StrErr {
#[error(transparent)]
Utf8Error(#[from] Utf8Error),
#[error(transparent)]
CStrWithNullError(#[from] FromBytesWithNulError),
#[error(transparent)]
CStrUntilNullError(#[from] FromBytesUntilNulError),
#[error("argument is null")]
NullPointerError,
}
// UTF-8 validated + null terminated string slice
#[repr(transparent)]
pub struct Utf8CStr([u8]);
impl Utf8CStr {
pub fn from_cstr(cstr: &CStr) -> Result<&Utf8CStr, StrErr> {
// Validate the buffer during construction
str::from_utf8(cstr.to_bytes())?;
Ok(unsafe { Self::from_bytes_unchecked(cstr.to_bytes_with_nul()) })
}
fn from_bytes_until_nul(bytes: &[u8]) -> Result<&Utf8CStr, StrErr> {
Self::from_cstr(CStr::from_bytes_until_nul(bytes)?)
}
pub fn from_bytes(bytes: &[u8]) -> Result<&Utf8CStr, StrErr> {
Self::from_cstr(CStr::from_bytes_with_nul(bytes)?)
}
pub fn from_string(s: &mut String) -> &Utf8CStr {
let buf = s.nul_terminate();
// SAFETY: the null byte is explicitly added to the buffer
unsafe { mem::transmute(buf) }
}
#[inline(always)]
pub const unsafe fn from_bytes_unchecked(bytes: &[u8]) -> &Utf8CStr {
unsafe { mem::transmute(bytes) }
}
pub unsafe fn from_ptr<'a>(ptr: *const c_char) -> Result<&'a Utf8CStr, StrErr> {
if ptr.is_null() {
return Err(StrErr::NullPointerError);
}
Self::from_cstr(unsafe { CStr::from_ptr(ptr) })
}
pub unsafe fn from_ptr_unchecked<'a>(ptr: *const c_char) -> &'a Utf8CStr {
unsafe {
let cstr = CStr::from_ptr(ptr);
Self::from_bytes_unchecked(cstr.to_bytes_with_nul())
}
}
pub unsafe fn from_raw_parts<'a>(ptr: *const c_char, len: usize) -> &'a Utf8CStr {
unsafe {
let bytes = slice::from_raw_parts(ptr.cast(), len);
Self::from_bytes_unchecked(bytes)
}
}
#[inline(always)]
pub fn as_bytes_with_nul(&self) -> &[u8] {
&self.0
}
#[inline(always)]
pub fn as_ptr(&self) -> *const c_char {
self.0.as_ptr().cast()
}
#[inline(always)]
pub fn as_cstr(&self) -> &CStr {
// SAFETY: Already validated as null terminated during construction
unsafe { CStr::from_bytes_with_nul_unchecked(&self.0) }
}
#[inline(always)]
pub fn as_utf8_cstr(&self) -> &Utf8CStr {
self
}
#[inline(always)]
pub fn as_str(&self) -> &str {
// SAFETY: Already UTF-8 validated during construction
// SAFETY: The length of the slice is at least 1 due to null termination check
unsafe { str::from_utf8_unchecked(self.0.get_unchecked(..self.0.len() - 1)) }
}
}
impl Deref for Utf8CStr {
type Target = str;
#[inline(always)]
fn deref(&self) -> &str {
self.as_str()
}
}
impl ToOwned for Utf8CStr {
type Owned = Utf8CString;
fn to_owned(&self) -> Utf8CString {
let mut s = Utf8CString::with_capacity(self.len() + 1);
s.push_str(self.as_str());
s
}
}
impl AsRef<Utf8CStr> for Utf8CStr {
fn as_ref(&self) -> &Utf8CStr {
self
}
}
impl NixPath for Utf8CStr {
#[inline(always)]
fn is_empty(&self) -> bool {
self.as_str().is_empty()
}
#[inline(always)]
fn len(&self) -> usize {
self.as_str().len()
}
#[inline(always)]
fn with_nix_path<T, F>(&self, f: F) -> nix::Result<T>
where
F: FnOnce(&CStr) -> T,
{
Ok(f(self.as_cstr()))
}
}
// Notice that we only implement ExternType on Utf8CStr *reference*
unsafe impl ExternType for &Utf8CStr {
type Id = type_id!("Utf8CStr");
type Kind = cxx::kind::Trivial;
}
macro_rules! const_assert_eq {
($left:expr, $right:expr $(,)?) => {
const _: [(); $left] = [(); $right];
};
}
// Assert ABI layout
const_assert_eq!(size_of::<&Utf8CStr>(), size_of::<[usize; 2]>());
const_assert_eq!(align_of::<&Utf8CStr>(), align_of::<[usize; 2]>());
// File system path extensions types
#[repr(transparent)]
pub struct FsPathFollow(Utf8CStr);
impl AsRef<Utf8CStr> for FsPathFollow {
#[inline(always)]
fn as_ref(&self) -> &Utf8CStr {
&self.0
}
}
// impl<T: AsRef<Utf8CStr>> Deref<Target = Utf8CStr> for T { ... }
macro_rules! impl_cstr_deref {
($( ($t:ty, $($g:tt)*) )*) => {$(
impl<$($g)*> Deref for $t {
type Target = Utf8CStr;
#[inline(always)]
fn deref(&self) -> &Utf8CStr {
self.as_ref()
}
}
)*}
}
impl_cstr_deref!(
(Utf8CStrBufRef<'_>,)
(Utf8CStrBufArr<N>, const N: usize)
(Utf8CString,)
(FsPathFollow,)
);
// impl<T: Deref<Target = Utf8CStr>> BoilerPlate for T { ... }
macro_rules! impl_cstr_misc {
($( ($t:ty, $($g:tt)*) )*) => {$(
impl<$($g)*> AsRef<str> for $t {
#[inline(always)]
fn as_ref(&self) -> &str {
self.as_str()
}
}
impl<$($g)*> AsRef<CStr> for $t {
#[inline(always)]
fn as_ref(&self) -> &CStr {
self.as_cstr()
}
}
impl<$($g)*> AsRef<OsStr> for $t {
#[inline(always)]
fn as_ref(&self) -> &OsStr {
OsStr::new(self.as_str())
}
}
impl<$($g)*> AsRef<Path> for $t {
#[inline(always)]
fn as_ref(&self) -> &Path {
Path::new(self.as_str())
}
}
impl<$($g)*> Display for $t {
#[inline(always)]
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
Display::fmt(self.as_str(), f)
}
}
impl<$($g)*> Debug for $t {
#[inline(always)]
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
Debug::fmt(self.as_str(), f)
}
}
impl<$($g)*> PartialEq<str> for $t {
#[inline(always)]
fn eq(&self, other: &str) -> bool {
self.as_str() == other
}
}
impl<$($g)*> PartialEq<$t> for str {
#[inline(always)]
fn eq(&self, other: &$t) -> bool {
self == other.as_str()
}
}
impl<$($g)*> PartialEq<CStr> for $t {
#[inline(always)]
fn eq(&self, other: &CStr) -> bool {
self.as_cstr() == other
}
}
impl<$($g)*> PartialEq<$t> for CStr {
#[inline(always)]
fn eq(&self, other: &$t) -> bool {
self == other.as_cstr()
}
}
impl<T: AsRef<Utf8CStr> + ?Sized, $($g)*> PartialEq<T> for $t {
#[inline(always)]
fn eq(&self, other: &T) -> bool {
self.as_bytes_with_nul() == other.as_ref().as_bytes_with_nul()
}
}
impl<$($g)*> Eq for $t {}
impl<$($g)*> PartialOrd for $t {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl<$($g)*> Ord for $t {
fn cmp(&self, other: &Self) -> Ordering {
self.as_str().cmp(other.as_str())
}
}
)*}
}
impl_cstr_misc!(
(Utf8CStr,)
(Utf8CStrBufRef<'_>,)
(Utf8CStrBufArr<N>, const N: usize)
(Utf8CString,)
(FsPathFollow,)
);
fn copy_cstr_truncate(dest: &mut [u8], src: &[u8]) -> usize {
if dest.len() <= 1 {
// Truncate
return 0;
}
let len = min(src.len(), dest.len() - 1);
if len > 0 {
dest[..len].copy_from_slice(&src[..len]);
}
dest[len] = b'\0';
len
}
// impl<T> AsRef<Utf8CStr> for T { ... }
// impl<T> Utf8CStrBuf for T { ... }
macro_rules! impl_cstr_buf {
($( ($t:ty, $($g:tt)*) )*) => {$(
impl<$($g)*> AsRef<Utf8CStr> for $t {
#[inline(always)]
fn as_ref(&self) -> &Utf8CStr {
// SAFETY: the internal buffer is always UTF-8 checked
// SAFETY: self.used is guaranteed to always <= SIZE - 1
unsafe { Utf8CStr::from_bytes_unchecked(self.buf.get_unchecked(..(self.used + 1))) }
}
}
impl<$($g)*> Utf8CStrBuf for $t {
#[inline(always)]
fn len(&self) -> usize {
self.used
}
#[inline(always)]
fn push_str(&mut self, s: &str) -> usize {
// SAFETY: self.used is guaranteed to always <= SIZE - 1
let dest = unsafe { self.buf.get_unchecked_mut(self.used..) };
let len = copy_cstr_truncate(dest, s.as_bytes());
self.used += len;
len
}
#[inline(always)]
fn capacity(&self) -> usize {
self.buf.len()
}
#[inline(always)]
fn clear(&mut self) {
self.buf[0] = b'\0';
self.used = 0;
}
#[inline(always)]
fn as_mut_ptr(&mut self) -> *mut c_char {
self.buf.as_mut_ptr().cast()
}
fn truncate(&mut self, new_len: usize) {
if self.used <= new_len {
return;
}
self.buf[new_len] = b'\0';
self.used = new_len;
}
fn rebuild(&mut self) -> Result<(), StrErr> {
// Validate the entire buffer, including the unused part
match Utf8CStr::from_bytes_until_nul(&self.buf) {
Ok(s) => self.used = s.len(),
Err(e) => {
self.used = 0;
self.buf[0] = b'\0';
return Err(e);
}
}
Ok(())
}
}
)*}
}
impl_cstr_buf!(
(Utf8CStrBufRef<'_>,)
(Utf8CStrBufArr<N>, const N: usize)
);
// impl<T: Utf8CStrBuf> Write for T { ... }
macro_rules! impl_cstr_buf_write {
($( ($t:ty, $($g:tt)*) )*) => {$(
impl<$($g)*> Write for $t {
#[inline(always)]
fn write_str(&mut self, s: &str) -> fmt::Result {
self.push_str(s);
Ok(())
}
}
)*}
}
impl_cstr_buf_write!(
(Utf8CStrBufRef<'_>,)
(Utf8CStrBufArr<N>, const N: usize)
(Utf8CString,)
);
#[macro_export]
macro_rules! cstr {
($str:expr) => {{
const NULL_STR: &str = $crate::const_format::concatcp!($str, "\0");
#[allow(unused_unsafe)]
unsafe {
$crate::Utf8CStr::from_bytes_unchecked(NULL_STR.as_bytes())
}
}};
}
#[macro_export]
macro_rules! raw_cstr {
($str:expr) => {{ $crate::cstr!($str).as_ptr() }};
}

View file

@ -0,0 +1,189 @@
// Functions in this file are only for exporting to C++, DO NOT USE IN RUST
use std::fs::File;
use std::io::BufReader;
use std::mem::ManuallyDrop;
use std::ops::DerefMut;
use std::os::fd::{BorrowedFd, FromRawFd, OwnedFd, RawFd};
use crate::ffi::{FnBoolStr, FnBoolStrStr};
use crate::files::map_file_at;
pub(crate) use crate::xwrap::*;
use crate::{
BufReadExt, Directory, LoggedResult, ResultExt, Utf8CStr, clone_attr, cstr, fclone_attr,
map_fd, map_file, slice_from_ptr,
};
use cfg_if::cfg_if;
use libc::{c_char, mode_t};
use nix::fcntl::OFlag;
#[unsafe(no_mangle)]
unsafe extern "C" fn canonical_path(path: *const c_char, buf: *mut u8, bufsz: usize) -> isize {
unsafe {
match Utf8CStr::from_ptr(path) {
Ok(path) => {
let mut buf = cstr::buf::wrap_ptr(buf, bufsz);
path.realpath(&mut buf)
.log()
.map_or(-1_isize, |_| buf.len() as isize)
}
Err(_) => -1,
}
}
}
#[unsafe(export_name = "mkdirs")]
unsafe extern "C" fn mkdirs_for_cxx(path: *const c_char, mode: mode_t) -> i32 {
unsafe {
match Utf8CStr::from_ptr(path) {
Ok(path) => path.mkdirs(mode).map_or(-1, |_| 0),
Err(_) => -1,
}
}
}
#[unsafe(export_name = "rm_rf")]
unsafe extern "C" fn rm_rf_for_cxx(path: *const c_char) -> bool {
unsafe {
match Utf8CStr::from_ptr(path) {
Ok(path) => path.remove_all().is_ok(),
Err(_) => false,
}
}
}
#[unsafe(no_mangle)]
unsafe extern "C" fn frm_rf(fd: OwnedFd) -> bool {
fn inner(fd: OwnedFd) -> LoggedResult<()> {
Directory::try_from(fd)?.remove_all()
}
inner(fd).is_ok()
}
pub(crate) fn map_file_for_cxx(path: &Utf8CStr, rw: bool) -> &'static mut [u8] {
map_file(path, rw).log().unwrap_or(&mut [])
}
pub(crate) fn map_file_at_for_cxx(fd: RawFd, path: &Utf8CStr, rw: bool) -> &'static mut [u8] {
unsafe {
map_file_at(BorrowedFd::borrow_raw(fd), path, rw)
.log()
.unwrap_or(&mut [])
}
}
pub(crate) fn map_fd_for_cxx(fd: RawFd, sz: usize, rw: bool) -> &'static mut [u8] {
unsafe {
map_fd(BorrowedFd::borrow_raw(fd), sz, rw)
.log()
.unwrap_or(&mut [])
}
}
pub(crate) unsafe fn readlinkat(
dirfd: RawFd,
path: *const c_char,
buf: *mut u8,
bufsz: usize,
) -> isize {
unsafe {
// readlinkat() may fail on x86 platform, returning random value
// instead of number of bytes placed in buf (length of link)
cfg_if! {
if #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] {
libc::memset(buf.cast(), 0, bufsz);
let mut r = libc::readlinkat(dirfd, path, buf.cast(), bufsz - 1);
if r > 0 {
r = libc::strlen(buf.cast()) as isize;
}
} else {
let r = libc::readlinkat(dirfd, path, buf.cast(), bufsz - 1);
if r >= 0 {
*buf.offset(r) = b'\0';
}
}
}
r
}
}
#[unsafe(export_name = "cp_afc")]
unsafe extern "C" fn cp_afc_for_cxx(src: *const c_char, dest: *const c_char) -> bool {
unsafe {
if let Ok(src) = Utf8CStr::from_ptr(src)
&& let Ok(dest) = Utf8CStr::from_ptr(dest)
{
return src.copy_to(dest).is_ok();
}
false
}
}
#[unsafe(export_name = "mv_path")]
unsafe extern "C" fn mv_path_for_cxx(src: *const c_char, dest: *const c_char) -> bool {
unsafe {
if let Ok(src) = Utf8CStr::from_ptr(src)
&& let Ok(dest) = Utf8CStr::from_ptr(dest)
{
return src.move_to(dest).is_ok();
}
false
}
}
#[unsafe(export_name = "link_path")]
unsafe extern "C" fn link_path_for_cxx(src: *const c_char, dest: *const c_char) -> bool {
unsafe {
if let Ok(src) = Utf8CStr::from_ptr(src)
&& let Ok(dest) = Utf8CStr::from_ptr(dest)
{
return src.link_to(dest).is_ok();
}
false
}
}
#[unsafe(export_name = "clone_attr")]
unsafe extern "C" fn clone_attr_for_cxx(src: *const c_char, dest: *const c_char) -> bool {
unsafe {
if let Ok(src) = Utf8CStr::from_ptr(src)
&& let Ok(dest) = Utf8CStr::from_ptr(dest)
{
return clone_attr(src, dest).log().is_ok();
}
false
}
}
#[unsafe(export_name = "fclone_attr")]
unsafe extern "C" fn fclone_attr_for_cxx(a: RawFd, b: RawFd) -> bool {
fclone_attr(a, b).log().is_ok()
}
#[unsafe(export_name = "cxx$utf8str$new")]
unsafe extern "C" fn str_new(this: &mut &Utf8CStr, s: *const u8, len: usize) {
unsafe {
*this = Utf8CStr::from_bytes(slice_from_ptr(s, len)).unwrap_or(cstr!(""));
}
}
#[unsafe(export_name = "cxx$utf8str$ptr")]
unsafe extern "C" fn str_ptr(this: &&Utf8CStr) -> *const u8 {
this.as_ptr().cast()
}
#[unsafe(export_name = "cxx$utf8str$len")]
unsafe extern "C" fn str_len(this: &&Utf8CStr) -> usize {
this.len()
}
pub(crate) fn parse_prop_file_rs(name: &Utf8CStr, f: &FnBoolStrStr) {
if let Ok(file) = name.open(OFlag::O_RDONLY) {
BufReader::new(file).for_each_prop(|key, value| f.call(key, value))
}
}
pub(crate) fn file_readline_for_cxx(fd: RawFd, f: &FnBoolStr) {
let mut fd = ManuallyDrop::new(unsafe { File::from_raw_fd(fd) });
BufReader::new(fd.deref_mut()).for_each_line(|line| f.call(Utf8CStr::from_string(line)));
}

View file

@ -0,0 +1,13 @@
[package]
name = "derive"
version.workspace = true
edition.workspace = true
[lib]
path = "lib.rs"
proc-macro = true
[dependencies]
syn = { workspace = true }
quote = { workspace = true }
proc-macro2 = { workspace = true }

View file

@ -0,0 +1,185 @@
// Copyright (c) 2020 Google LLC All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
use proc_macro2::{Span, TokenStream};
use quote::ToTokens;
use std::cell::RefCell;
/// A type for collecting procedural macro errors.
#[derive(Default)]
pub struct Errors {
errors: RefCell<Vec<syn::Error>>,
}
/// Produce functions to expect particular literals in `syn::Expr`
macro_rules! expect_lit_fn {
($(($fn_name:ident, $syn_type:ident, $variant:ident, $lit_name:literal),)*) => {
$(
pub fn $fn_name<'a>(&self, e: &'a syn::Expr) -> Option<&'a syn::$syn_type> {
if let syn::Expr::Lit(syn::ExprLit { lit: syn::Lit::$variant(inner), .. }) = e {
Some(inner)
} else {
self.unexpected_lit($lit_name, e);
None
}
}
)*
}
}
/// Produce functions to expect particular variants of `syn::Meta`
macro_rules! expect_meta_fn {
($(($fn_name:ident, $syn_type:ident, $variant:ident, $meta_name:literal),)*) => {
$(
pub fn $fn_name<'a>(&self, meta: &'a syn::Meta) -> Option<&'a syn::$syn_type> {
if let syn::Meta::$variant(inner) = meta {
Some(inner)
} else {
self.unexpected_meta($meta_name, meta);
None
}
}
)*
}
}
impl Errors {
/// Issue an error like:
///
/// Duplicate foo attribute
/// First foo attribute here
pub fn duplicate_attrs(
&self,
attr_kind: &str,
first: &impl syn::spanned::Spanned,
second: &impl syn::spanned::Spanned,
) {
self.duplicate_attrs_inner(attr_kind, first.span(), second.span())
}
fn duplicate_attrs_inner(&self, attr_kind: &str, first: Span, second: Span) {
self.err_span(second, &["Duplicate ", attr_kind, " attribute"].concat());
self.err_span(first, &["First ", attr_kind, " attribute here"].concat());
}
expect_lit_fn![
(expect_lit_str, LitStr, Str, "string"),
(expect_lit_char, LitChar, Char, "character"),
(expect_lit_int, LitInt, Int, "integer"),
];
expect_meta_fn![
(expect_meta_word, Path, Path, "path"),
(expect_meta_list, MetaList, List, "list"),
(
expect_meta_name_value,
MetaNameValue,
NameValue,
"name-value pair"
),
];
fn unexpected_lit(&self, expected: &str, found: &syn::Expr) {
fn lit_kind(lit: &syn::Lit) -> &'static str {
use syn::Lit::{Bool, Byte, ByteStr, Char, Float, Int, Str, Verbatim};
match lit {
Str(_) => "string",
ByteStr(_) => "bytestring",
Byte(_) => "byte",
Char(_) => "character",
Int(_) => "integer",
Float(_) => "float",
Bool(_) => "boolean",
Verbatim(_) => "unknown (possibly extra-large integer)",
_ => "unknown literal kind",
}
}
if let syn::Expr::Lit(syn::ExprLit { lit, .. }) = found {
self.err(
found,
&[
"Expected ",
expected,
" literal, found ",
lit_kind(lit),
" literal",
]
.concat(),
)
} else {
self.err(
found,
&[
"Expected ",
expected,
" literal, found non-literal expression.",
]
.concat(),
)
}
}
fn unexpected_meta(&self, expected: &str, found: &syn::Meta) {
fn meta_kind(meta: &syn::Meta) -> &'static str {
use syn::Meta::{List, NameValue, Path};
match meta {
Path(_) => "path",
List(_) => "list",
NameValue(_) => "name-value pair",
}
}
self.err(
found,
&[
"Expected ",
expected,
" attribute, found ",
meta_kind(found),
" attribute",
]
.concat(),
)
}
/// Issue an error relating to a particular `Spanned` structure.
pub fn err(&self, spanned: &impl syn::spanned::Spanned, msg: &str) {
self.err_span(spanned.span(), msg);
}
/// Issue an error relating to a particular `Span`.
pub fn err_span(&self, span: Span, msg: &str) {
self.push(syn::Error::new(span, msg));
}
/// Issue an error spanning over the given syntax tree node.
pub fn err_span_tokens<T: ToTokens>(&self, tokens: T, msg: &str) {
self.push(syn::Error::new_spanned(tokens, msg));
}
/// Push a `syn::Error` onto the list of errors to issue.
pub fn push(&self, err: syn::Error) {
self.errors.borrow_mut().push(err);
}
/// Convert a `syn::Result` to an `Option`, logging the error if present.
pub fn ok<T>(&self, r: syn::Result<T>) -> Option<T> {
match r {
Ok(v) => Some(v),
Err(e) => {
self.push(e);
None
}
}
}
}
impl ToTokens for Errors {
/// Convert the errors into tokens that, when emit, will cause
/// the user of the macro to receive compiler errors.
fn to_tokens(&self, tokens: &mut TokenStream) {
tokens.extend(self.errors.borrow().iter().map(|e| e.to_compile_error()));
}
}

View file

@ -0,0 +1,912 @@
// Copyright (c) 2020 Google LLC All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
use syn::ext::IdentExt as _;
/// Implementation of the `FromArgs` and `argh(...)` derive attributes.
///
/// For more thorough documentation, see the `argh` crate itself.
extern crate proc_macro;
use errors::Errors;
use parse_attrs::{FieldAttrs, FieldKind, TypeAttrs, check_long_name};
use proc_macro2::{Span, TokenStream};
use quote::{ToTokens, quote, quote_spanned};
use std::collections::HashMap;
use std::str::FromStr;
use syn::spanned::Spanned;
use syn::{GenericArgument, LitStr, PathArguments, Type};
mod errors;
mod parse_attrs;
/// Transform the input into a token stream containing any generated implementations,
/// as well as all errors that occurred.
pub(crate) fn impl_from_args(input: &syn::DeriveInput) -> TokenStream {
let errors = &Errors::default();
let type_attrs = &TypeAttrs::parse(errors, input);
let mut output_tokens = match &input.data {
syn::Data::Struct(ds) => {
impl_from_args_struct(errors, &input.ident, type_attrs, &input.generics, ds)
}
syn::Data::Enum(de) => {
impl_from_args_enum(errors, &input.ident, type_attrs, &input.generics, de)
}
syn::Data::Union(_) => {
errors.err(input, "`#[derive(FromArgs)]` cannot be applied to unions");
TokenStream::new()
}
};
errors.to_tokens(&mut output_tokens);
output_tokens
}
/// The kind of optionality a parameter has.
enum Optionality {
None,
Defaulted(TokenStream),
Optional,
Repeating,
DefaultedRepeating(TokenStream),
}
impl PartialEq<Optionality> for Optionality {
fn eq(&self, other: &Optionality) -> bool {
use Optionality::*;
// NB: (Defaulted, Defaulted) can't contain the same token streams
matches!((self, other), (Optional, Optional) | (Repeating, Repeating))
}
}
impl Optionality {
/// Whether or not this is `Optionality::None`
fn is_required(&self) -> bool {
matches!(self, Optionality::None)
}
}
/// A field of a `#![derive(FromArgs)]` struct with attributes and some other
/// notable metadata appended.
struct StructField<'a> {
/// The original parsed field
field: &'a syn::Field,
/// The parsed attributes of the field
attrs: FieldAttrs,
/// The field name. This is contained optionally inside `field`,
/// but is duplicated non-optionally here to indicate that all field that
/// have reached this point must have a field name, and it no longer
/// needs to be unwrapped.
name: &'a syn::Ident,
/// Similar to `name` above, this is contained optionally inside `FieldAttrs`,
/// but here is fully present to indicate that we only have to consider fields
/// with a valid `kind` at this point.
kind: FieldKind,
// If `field.ty` is `Vec<T>` or `Option<T>`, this is `T`, otherwise it's `&field.ty`.
// This is used to enable consistent parsing code between optional and non-optional
// keyed and subcommand fields.
ty_without_wrapper: &'a syn::Type,
// Whether the field represents an optional value, such as an `Option` subcommand field
// or an `Option` or `Vec` keyed argument, or if it has a `default`.
optionality: Optionality,
// The `--`-prefixed name of the option, if one exists.
long_name: Option<String>,
}
impl<'a> StructField<'a> {
/// Attempts to parse a field of a `#[derive(FromArgs)]` struct, pulling out the
/// fields required for code generation.
fn new(errors: &Errors, field: &'a syn::Field, attrs: FieldAttrs) -> Option<Self> {
let name = field.ident.as_ref().expect("missing ident for named field");
// Ensure that one "kind" is present (switch, option, subcommand, positional)
let kind = if let Some(field_type) = &attrs.field_type {
field_type.kind
} else {
errors.err(
field,
concat!(
"Missing `argh` field kind attribute.\n",
"Expected one of: `switch`, `option`, `remaining`, `subcommand`, `positional`",
),
);
return None;
};
// Parse out whether a field is optional (`Option` or `Vec`).
let optionality;
let ty_without_wrapper;
match kind {
FieldKind::Switch => {
if !ty_expect_switch(errors, &field.ty) {
return None;
}
optionality = Optionality::Optional;
ty_without_wrapper = &field.ty;
}
FieldKind::Option | FieldKind::Positional => {
if let Some(default) = &attrs.default {
let tokens = match TokenStream::from_str(&default.value()) {
Ok(tokens) => tokens,
Err(_) => {
errors.err(&default, "Invalid tokens: unable to lex `default` value");
return None;
}
};
// Set the span of the generated tokens to the string literal
let tokens: TokenStream = tokens
.into_iter()
.map(|mut tree| {
tree.set_span(default.span());
tree
})
.collect();
let inner = if let Some(x) = ty_inner(&["Vec"], &field.ty) {
optionality = Optionality::DefaultedRepeating(tokens);
x
} else {
optionality = Optionality::Defaulted(tokens);
&field.ty
};
ty_without_wrapper = inner;
} else {
let mut inner = None;
optionality = if let Some(x) = ty_inner(&["Option"], &field.ty) {
inner = Some(x);
Optionality::Optional
} else if let Some(x) = ty_inner(&["Vec"], &field.ty) {
inner = Some(x);
Optionality::Repeating
} else {
Optionality::None
};
ty_without_wrapper = inner.unwrap_or(&field.ty);
}
}
FieldKind::SubCommand => {
let inner = ty_inner(&["Option"], &field.ty);
optionality = if inner.is_some() {
Optionality::Optional
} else {
Optionality::None
};
ty_without_wrapper = inner.unwrap_or(&field.ty);
}
}
// Determine the "long" name of options and switches.
// Defaults to the kebab-cased field name if `#[argh(long = "...")]` is omitted.
// If `#[argh(long = none)]` is explicitly set, no long name will be set.
let long_name = match kind {
FieldKind::Switch | FieldKind::Option => {
let long_name = match &attrs.long {
None => {
let kebab_name = to_kebab_case(&name.unraw().to_string());
check_long_name(errors, name, &kebab_name);
Some(kebab_name)
}
Some(None) => None,
Some(Some(long)) => Some(long.value()),
}
.map(|long_name| {
if long_name == "help" {
errors.err(field, "Custom `--help` flags are not supported.");
}
format!("--{}", long_name)
});
if let (None, None) = (&attrs.short, &long_name) {
errors.err(field, "At least one of `short` or `long` has to be set.")
};
long_name
}
FieldKind::SubCommand | FieldKind::Positional => None,
};
Some(StructField {
field,
attrs,
kind,
optionality,
ty_without_wrapper,
name,
long_name,
})
}
pub(crate) fn positional_arg_name(&self) -> String {
self.attrs
.arg_name
.as_ref()
.map(LitStr::value)
.unwrap_or_else(|| self.name.to_string().trim_matches('_').to_owned())
}
fn option_arg_name(&self) -> String {
match (&self.attrs.short, &self.long_name) {
(None, None) => unreachable!("short and long cannot both be None"),
(Some(short), None) => format!("-{}", short.value()),
(None, Some(long)) => long.clone(),
(Some(short), Some(long)) => format!("-{},{long}", short.value()),
}
}
}
fn to_kebab_case(s: &str) -> String {
let words = s.split('_').filter(|word| !word.is_empty());
let mut res = String::with_capacity(s.len());
for word in words {
if !res.is_empty() {
res.push('-')
}
res.push_str(word)
}
res
}
/// Implements `FromArgs` and `TopLevelCommand` or `SubCommand` for a `#[derive(FromArgs)]` struct.
fn impl_from_args_struct(
errors: &Errors,
name: &syn::Ident,
type_attrs: &TypeAttrs,
generic_args: &syn::Generics,
ds: &syn::DataStruct,
) -> TokenStream {
let fields = match &ds.fields {
syn::Fields::Named(fields) => fields,
syn::Fields::Unnamed(_) => {
errors.err(
&ds.struct_token,
"`#![derive(FromArgs)]` is not currently supported on tuple structs",
);
return TokenStream::new();
}
syn::Fields::Unit => {
errors.err(
&ds.struct_token,
"#![derive(FromArgs)]` cannot be applied to unit structs",
);
return TokenStream::new();
}
};
let fields: Vec<_> = fields
.named
.iter()
.filter_map(|field| {
let attrs = FieldAttrs::parse(errors, field);
StructField::new(errors, field, attrs)
})
.collect();
ensure_unique_names(errors, &fields);
ensure_only_trailing_positionals_are_optional(errors, &fields);
let impl_span = Span::call_site();
let from_args_method = impl_from_args_struct_from_args(errors, type_attrs, &fields);
let top_or_sub_cmd_impl = top_or_sub_cmd_impl(errors, name, type_attrs, generic_args);
let (impl_generics, ty_generics, where_clause) = generic_args.split_for_impl();
let trait_impl = quote_spanned! { impl_span =>
#[automatically_derived]
impl #impl_generics argh::FromArgs for #name #ty_generics #where_clause {
#from_args_method
}
#top_or_sub_cmd_impl
};
trait_impl
}
fn impl_from_args_struct_from_args<'a>(
errors: &Errors,
type_attrs: &TypeAttrs,
fields: &'a [StructField<'a>],
) -> TokenStream {
let init_fields = declare_local_storage_for_from_args_fields(fields);
let unwrap_fields = unwrap_from_args_fields(fields);
let positional_fields: Vec<&StructField<'_>> = fields
.iter()
.filter(|field| field.kind == FieldKind::Positional)
.collect();
let positional_field_idents = positional_fields.iter().map(|field| &field.field.ident);
let positional_field_names = positional_fields.iter().map(|field| field.name.to_string());
let last_positional_is_repeating = positional_fields
.last()
.map(|field| field.optionality == Optionality::Repeating)
.unwrap_or(false);
let last_positional_is_greedy = positional_fields
.last()
.map(|field| field.kind == FieldKind::Positional && field.attrs.greedy.is_some())
.unwrap_or(false);
let flag_output_table = fields.iter().filter_map(|field| {
let field_name = &field.field.ident;
match field.kind {
FieldKind::Option => Some(quote! { argh::ParseStructOption::Value(&mut #field_name) }),
FieldKind::Switch => Some(quote! { argh::ParseStructOption::Flag(&mut #field_name) }),
FieldKind::SubCommand | FieldKind::Positional => None,
}
});
let flag_str_to_output_table_map = flag_str_to_output_table_map_entries(fields);
let mut subcommands_iter = fields
.iter()
.filter(|field| field.kind == FieldKind::SubCommand)
.fuse();
let subcommand: Option<&StructField<'_>> = subcommands_iter.next();
for dup_subcommand in subcommands_iter {
errors.duplicate_attrs(
"subcommand",
subcommand.unwrap().field,
dup_subcommand.field,
);
}
let impl_span = Span::call_site();
let missing_requirements_ident = syn::Ident::new("__missing_requirements", impl_span);
let append_missing_requirements =
append_missing_requirements(&missing_requirements_ident, fields);
let parse_subcommands = if let Some(subcommand) = subcommand {
let name = subcommand.name;
let ty = subcommand.ty_without_wrapper;
quote_spanned! { impl_span =>
Some(argh::ParseStructSubCommand {
subcommands: <#ty as argh::SubCommands>::COMMANDS,
dynamic_subcommands: &<#ty as argh::SubCommands>::dynamic_commands(),
parse_func: &mut |__command, __remaining_args| {
#name = Some(<#ty as argh::FromArgs>::from_args(__command, __remaining_args)?);
Ok(())
},
})
}
} else {
quote_spanned! { impl_span => None }
};
let help_triggers = get_help_triggers(type_attrs);
let method_impl = quote_spanned! { impl_span =>
fn from_args(__cmd_name: &[&str], __args: &[&str])
-> std::result::Result<Self, argh::EarlyExit>
{
#![allow(clippy::unwrap_in_result)]
#( #init_fields )*
argh::parse_struct_args(
__cmd_name,
__args,
argh::ParseStructOptions {
arg_to_slot: &[ #( #flag_str_to_output_table_map ,)* ],
slots: &mut [ #( #flag_output_table, )* ],
help_triggers: &[ #( #help_triggers ),* ],
},
argh::ParseStructPositionals {
positionals: &mut [
#(
argh::ParseStructPositional {
name: #positional_field_names,
slot: &mut #positional_field_idents as &mut dyn argh::ParseValueSlot,
},
)*
],
last_is_repeating: #last_positional_is_repeating,
last_is_greedy: #last_positional_is_greedy,
},
#parse_subcommands,
)?;
let mut #missing_requirements_ident = argh::MissingRequirements::default();
#(
#append_missing_requirements
)*
#missing_requirements_ident.err_on_any()?;
Ok(Self {
#( #unwrap_fields, )*
})
}
};
method_impl
}
/// get help triggers vector from type_attrs.help_triggers as a [`Vec<String>`]
///
/// Defaults to vec!["-h", "--help"] if type_attrs.help_triggers is None
fn get_help_triggers(type_attrs: &TypeAttrs) -> Vec<String> {
if type_attrs.is_subcommand.is_some() {
// Subcommands should never have any help triggers
Vec::new()
} else {
type_attrs.help_triggers.as_ref().map_or_else(
|| vec!["-h".to_string(), "--help".to_string()],
|s| {
s.iter()
.filter_map(|s| {
let trigger = s.value();
let trigger_trimmed = trigger.trim().to_owned();
if trigger_trimmed.is_empty() {
None
} else {
Some(trigger_trimmed)
}
})
.collect::<Vec<_>>()
},
)
}
}
/// Ensures that only trailing positional args are non-required.
fn ensure_only_trailing_positionals_are_optional(errors: &Errors, fields: &[StructField<'_>]) {
let mut first_non_required_span = None;
for field in fields {
if field.kind == FieldKind::Positional {
if let Some(first) = first_non_required_span
&& field.optionality.is_required()
{
errors.err_span(
first,
"Only trailing positional arguments may be `Option`, `Vec`, or defaulted.",
);
errors.err(
&field.field,
"Later non-optional positional argument declared here.",
);
return;
}
if !field.optionality.is_required() {
first_non_required_span = Some(field.field.span());
}
}
}
}
/// Ensures that only one short or long name is used.
fn ensure_unique_names(errors: &Errors, fields: &[StructField<'_>]) {
let mut seen_short_names = HashMap::new();
let mut seen_long_names = HashMap::new();
for field in fields {
if let Some(short_name) = &field.attrs.short {
let short_name = short_name.value();
if let Some(first_use_field) = seen_short_names.get(&short_name) {
errors.err_span_tokens(
first_use_field,
&format!(
"The short name of \"-{}\" was already used here.",
short_name
),
);
errors.err_span_tokens(field.field, "Later usage here.");
}
seen_short_names.insert(short_name, &field.field);
}
if let Some(long_name) = &field.long_name {
if let Some(first_use_field) = seen_long_names.get(&long_name) {
errors.err_span_tokens(
*first_use_field,
&format!("The long name of \"{}\" was already used here.", long_name),
);
errors.err_span_tokens(field.field, "Later usage here.");
}
seen_long_names.insert(long_name, field.field);
}
}
}
/// Implement `argh::TopLevelCommand` or `argh::SubCommand` as appropriate.
fn top_or_sub_cmd_impl(
errors: &Errors,
name: &syn::Ident,
type_attrs: &TypeAttrs,
generic_args: &syn::Generics,
) -> TokenStream {
let description = String::new();
let (impl_generics, ty_generics, where_clause) = generic_args.split_for_impl();
if type_attrs.is_subcommand.is_none() {
// Not a subcommand
quote! {
#[automatically_derived]
impl #impl_generics argh::TopLevelCommand for #name #ty_generics #where_clause {}
}
} else {
let empty_str = syn::LitStr::new("", Span::call_site());
let subcommand_name = type_attrs.name.as_ref().unwrap_or_else(|| {
errors.err(
name,
"`#[argh(name = \"...\")]` attribute is required for subcommands",
);
&empty_str
});
quote! {
#[automatically_derived]
impl #impl_generics argh::SubCommand for #name #ty_generics #where_clause {
const COMMAND: &'static argh::CommandInfo = &argh::CommandInfo {
name: #subcommand_name,
description: #description,
};
}
}
}
}
/// Declare a local slots to store each field in during parsing.
///
/// Most fields are stored in `Option<FieldType>` locals.
/// `argh(option)` fields are stored in a `ParseValueSlotTy` along with a
/// function that knows how to decode the appropriate value.
fn declare_local_storage_for_from_args_fields<'a>(
fields: &'a [StructField<'a>],
) -> impl Iterator<Item = TokenStream> + 'a {
fields.iter().map(|field| {
let field_name = &field.field.ident;
let field_type = &field.ty_without_wrapper;
// Wrap field types in `Option` if they aren't already `Option` or `Vec`-wrapped.
let field_slot_type = match field.optionality {
Optionality::Optional | Optionality::Repeating => (&field.field.ty).into_token_stream(),
Optionality::None | Optionality::Defaulted(_) => {
quote! { std::option::Option<#field_type> }
}
Optionality::DefaultedRepeating(_) => {
quote! { std::option::Option<std::vec::Vec<#field_type>> }
}
};
match field.kind {
FieldKind::Option | FieldKind::Positional => {
let from_str_fn = match &field.attrs.from_str_fn {
Some(from_str_fn) => from_str_fn.into_token_stream(),
None => {
quote! {
<#field_type as argh::FromArgValue>::from_arg_value
}
}
};
quote! {
let mut #field_name: argh::ParseValueSlotTy<#field_slot_type, #field_type>
= argh::ParseValueSlotTy {
slot: std::default::Default::default(),
parse_func: |_, value| { #from_str_fn(value) },
};
}
}
FieldKind::SubCommand => {
quote! { let mut #field_name: #field_slot_type = None; }
}
FieldKind::Switch => {
quote! { let mut #field_name: #field_slot_type = argh::Flag::default(); }
}
}
})
}
/// Unwrap non-optional fields and take options out of their tuple slots.
fn unwrap_from_args_fields<'a>(
fields: &'a [StructField<'a>],
) -> impl Iterator<Item = TokenStream> + 'a {
fields.iter().map(|field| {
let field_name = field.name;
match field.kind {
FieldKind::Option | FieldKind::Positional => match &field.optionality {
Optionality::None => quote! {
#field_name: #field_name.slot.unwrap()
},
Optionality::Optional | Optionality::Repeating => {
quote! { #field_name: #field_name.slot }
}
Optionality::Defaulted(tokens) | Optionality::DefaultedRepeating(tokens) => {
quote! {
#field_name: #field_name.slot.unwrap_or_else(|| #tokens)
}
}
},
FieldKind::Switch => field_name.into_token_stream(),
FieldKind::SubCommand => match field.optionality {
Optionality::None => quote! { #field_name: #field_name.unwrap() },
Optionality::Optional | Optionality::Repeating => field_name.into_token_stream(),
Optionality::Defaulted(_) | Optionality::DefaultedRepeating(_) => unreachable!(),
},
}
})
}
/// Entries of tokens like `("--some-flag-key", 5)` that map from a flag key string
/// to an index in the output table.
fn flag_str_to_output_table_map_entries<'a>(fields: &'a [StructField<'a>]) -> Vec<TokenStream> {
let mut flag_str_to_output_table_map = vec![];
for (i, field) in fields.iter().enumerate() {
if let Some(short) = &field.attrs.short {
let short = format!("-{}", short.value());
flag_str_to_output_table_map.push(quote! { (#short, #i) });
}
if let Some(long) = &field.long_name {
flag_str_to_output_table_map.push(quote! { (#long, #i) });
}
}
flag_str_to_output_table_map
}
/// For each non-optional field, add an entry to the `argh::MissingRequirements`.
fn append_missing_requirements<'a>(
// missing_requirements_ident
mri: &syn::Ident,
fields: &'a [StructField<'a>],
) -> impl Iterator<Item = TokenStream> + 'a {
let mri = mri.clone();
fields
.iter()
.filter(|f| f.optionality.is_required())
.map(move |field| {
let field_name = field.name;
match field.kind {
FieldKind::Switch => unreachable!("switches are always optional"),
FieldKind::Positional => {
let name = field.positional_arg_name();
quote! {
if #field_name.slot.is_none() {
#mri.missing_positional_arg(#name)
}
}
}
FieldKind::Option => {
let name = field.option_arg_name();
quote! {
if #field_name.slot.is_none() {
#mri.missing_option(#name)
}
}
}
FieldKind::SubCommand => {
let ty = field.ty_without_wrapper;
quote! {
if #field_name.is_none() {
#mri.missing_subcommands(
<#ty as argh::SubCommands>::COMMANDS
.iter()
.cloned()
.chain(
<#ty as argh::SubCommands>::dynamic_commands()
.iter()
.copied()
),
)
}
}
}
}
})
}
/// Require that a type can be a `switch`.
/// Throws an error for all types except booleans and integers
fn ty_expect_switch(errors: &Errors, ty: &syn::Type) -> bool {
fn ty_can_be_switch(ty: &syn::Type) -> bool {
if let syn::Type::Path(path) = ty {
if path.qself.is_some() {
return false;
}
if path.path.segments.len() != 1 {
return false;
}
let ident = &path.path.segments[0].ident;
// `Option<bool>` can be used as a `switch`.
if ident == "Option"
&& let PathArguments::AngleBracketed(args) = &path.path.segments[0].arguments
&& let GenericArgument::Type(Type::Path(p)) = &args.args[0]
&& p.path.segments[0].ident == "bool"
{
return true;
}
[
"bool", "u8", "u16", "u32", "u64", "u128", "i8", "i16", "i32", "i64", "i128",
]
.iter()
.any(|path| ident == path)
} else {
false
}
}
let res = ty_can_be_switch(ty);
if !res {
errors.err(
ty,
"switches must be of type `bool`, `Option<bool>`, or integer type",
);
}
res
}
/// Returns `Some(T)` if a type is `wrapper_name<T>` for any `wrapper_name` in `wrapper_names`.
fn ty_inner<'a>(wrapper_names: &[&str], ty: &'a syn::Type) -> Option<&'a syn::Type> {
if let syn::Type::Path(path) = ty {
if path.qself.is_some() {
return None;
}
// Since we only check the last path segment, it isn't necessarily the case that
// we're referring to `std::vec::Vec` or `std::option::Option`, but there isn't
// a fool proof way to check these since name resolution happens after macro expansion,
// so this is likely "good enough" (so long as people don't have their own types called
// `Option` or `Vec` that take one generic parameter they're looking to parse).
let last_segment = path.path.segments.last()?;
if !wrapper_names.iter().any(|name| last_segment.ident == *name) {
return None;
}
if let syn::PathArguments::AngleBracketed(gen_args) = &last_segment.arguments {
let generic_arg = gen_args.args.first()?;
if let syn::GenericArgument::Type(ty) = &generic_arg {
return Some(ty);
}
}
}
None
}
/// Implements `FromArgs` and `SubCommands` for a `#![derive(FromArgs)]` enum.
fn impl_from_args_enum(
errors: &Errors,
name: &syn::Ident,
type_attrs: &TypeAttrs,
generic_args: &syn::Generics,
de: &syn::DataEnum,
) -> TokenStream {
parse_attrs::check_enum_type_attrs(errors, type_attrs, &de.enum_token.span);
// An enum variant like `<name>(<ty>)`
struct SubCommandVariant<'a> {
name: &'a syn::Ident,
ty: &'a syn::Type,
}
let mut dynamic_type_and_variant = None;
let variants: Vec<SubCommandVariant<'_>> = de
.variants
.iter()
.filter_map(|variant| {
let name = &variant.ident;
let ty = enum_only_single_field_unnamed_variants(errors, &variant.fields)?;
if parse_attrs::VariantAttrs::parse(errors, variant)
.is_dynamic
.is_some()
{
if dynamic_type_and_variant.is_some() {
errors.err(variant, "Only one variant can have the `dynamic` attribute");
}
dynamic_type_and_variant = Some((ty, name));
None
} else {
Some(SubCommandVariant { name, ty })
}
})
.collect();
let name_repeating = std::iter::repeat(name.clone());
let variant_ty = variants.iter().map(|x| x.ty).collect::<Vec<_>>();
let variant_names = variants.iter().map(|x| x.name).collect::<Vec<_>>();
let dynamic_from_args =
dynamic_type_and_variant
.as_ref()
.map(|(dynamic_type, dynamic_variant)| {
quote! {
if let Some(result) = <#dynamic_type as argh::DynamicSubCommand>::try_from_args(
command_name, args) {
return result.map(#name::#dynamic_variant);
}
}
});
let dynamic_commands = dynamic_type_and_variant.as_ref().map(|(dynamic_type, _)| {
quote! {
fn dynamic_commands() -> &'static [&'static argh::CommandInfo] {
<#dynamic_type as argh::DynamicSubCommand>::commands()
}
}
});
let (impl_generics, ty_generics, where_clause) = generic_args.split_for_impl();
quote! {
impl #impl_generics argh::FromArgs for #name #ty_generics #where_clause {
fn from_args(command_name: &[&str], args: &[&str])
-> std::result::Result<Self, argh::EarlyExit>
{
let subcommand_name = if let Some(subcommand_name) = command_name.last() {
*subcommand_name
} else {
return Err(argh::EarlyExit::from("no subcommand name".to_owned()));
};
#(
if subcommand_name == <#variant_ty as argh::SubCommand>::COMMAND.name {
return Ok(#name_repeating::#variant_names(
<#variant_ty as argh::FromArgs>::from_args(command_name, args)?
));
}
)*
#dynamic_from_args
Err(argh::EarlyExit::from("no subcommand matched".to_owned()))
}
}
impl #impl_generics argh::SubCommands for #name #ty_generics #where_clause {
const COMMANDS: &'static [&'static argh::CommandInfo] = &[#(
<#variant_ty as argh::SubCommand>::COMMAND,
)*];
#dynamic_commands
}
}
}
/// Returns `Some(Bar)` if the field is a single-field unnamed variant like `Foo(Bar)`.
/// Otherwise, generates an error.
fn enum_only_single_field_unnamed_variants<'a>(
errors: &Errors,
variant_fields: &'a syn::Fields,
) -> Option<&'a syn::Type> {
macro_rules! with_enum_suggestion {
($help_text:literal) => {
concat!(
$help_text,
"\nInstead, use a variant with a single unnamed field for each subcommand:\n",
" enum MyCommandEnum {\n",
" SubCommandOne(SubCommandOne),\n",
" SubCommandTwo(SubCommandTwo),\n",
" }",
)
};
}
match variant_fields {
syn::Fields::Named(fields) => {
errors.err(
fields,
with_enum_suggestion!(
"`#![derive(FromArgs)]` `enum`s do not support variants with named fields."
),
);
None
}
syn::Fields::Unit => {
errors.err(
variant_fields,
with_enum_suggestion!(
"`#![derive(FromArgs)]` does not support `enum`s with no variants."
),
);
None
}
syn::Fields::Unnamed(fields) => {
if fields.unnamed.len() != 1 {
errors.err(
fields,
with_enum_suggestion!(
"`#![derive(FromArgs)]` `enum` variants must only contain one field."
),
);
None
} else {
// `unwrap` is okay because of the length check above.
let first_field = fields.unnamed.first().unwrap();
Some(&first_field.ty)
}
}
}
}

View file

@ -0,0 +1,688 @@
// Copyright (c) 2020 Google LLC All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
use syn::parse::Parser;
use syn::punctuated::Punctuated;
use super::errors::Errors;
use proc_macro2::Span;
use std::collections::hash_map::{Entry, HashMap};
/// Attributes applied to a field of a `#![derive(FromArgs)]` struct.
#[derive(Default)]
pub struct FieldAttrs {
pub default: Option<syn::LitStr>,
pub description: Option<Description>,
pub from_str_fn: Option<syn::ExprPath>,
pub field_type: Option<FieldType>,
pub long: Option<Option<syn::LitStr>>,
pub short: Option<syn::LitChar>,
pub arg_name: Option<syn::LitStr>,
pub greedy: Option<syn::Path>,
pub hidden_help: bool,
}
/// The purpose of a particular field on a `#![derive(FromArgs)]` struct.
#[derive(Copy, Clone, Eq, PartialEq)]
pub enum FieldKind {
/// Switches are booleans that are set to "true" by passing the flag.
Switch,
/// Options are `--key value`. They may be optional (using `Option`),
/// or repeating (using `Vec`), or required (neither `Option` nor `Vec`)
Option,
/// Subcommand fields (of which there can be at most one) refer to enums
/// containing one of several potential subcommands. They may be optional
/// (using `Option`) or required (no `Option`).
SubCommand,
/// Positional arguments are parsed literally if the input
/// does not begin with `-` or `--` and is not a subcommand.
/// They are parsed in declaration order, and only the last positional
/// argument in a type may be an `Option`, `Vec`, or have a default value.
Positional,
}
/// The type of a field on a `#![derive(FromArgs)]` struct.
///
/// This is a simple wrapper around `FieldKind` which includes the `syn::Ident`
/// of the attribute containing the field kind.
pub struct FieldType {
pub kind: FieldKind,
pub ident: syn::Ident,
}
/// A description of a `#![derive(FromArgs)]` struct.
///
/// Defaults to the docstring if one is present, or `#[argh(description = "...")]`
/// if one is provided.
pub struct Description {
/// Whether the description was an explicit annotation or whether it was a doc string.
pub explicit: bool,
pub content: syn::LitStr,
}
impl FieldAttrs {
pub fn parse(errors: &Errors, field: &syn::Field) -> Self {
let mut this = Self::default();
for attr in &field.attrs {
if is_doc_attr(attr) {
parse_attr_doc(errors, attr, &mut this.description);
continue;
}
let ml = if let Some(ml) = argh_attr_to_meta_list(errors, attr) {
ml
} else {
continue;
};
for meta in ml {
let name = meta.path();
if name.is_ident("arg_name") {
if let Some(m) = errors.expect_meta_name_value(&meta) {
this.parse_attr_arg_name(errors, m);
}
} else if name.is_ident("default") {
if let Some(m) = errors.expect_meta_name_value(&meta) {
this.parse_attr_default(errors, m);
}
} else if name.is_ident("description") {
if let Some(m) = errors.expect_meta_name_value(&meta) {
parse_attr_description(errors, m, &mut this.description);
}
} else if name.is_ident("from_str_fn") {
if let Some(m) = errors.expect_meta_list(&meta) {
this.parse_attr_from_str_fn(errors, m);
}
} else if name.is_ident("long") {
if let Some(m) = errors.expect_meta_name_value(&meta) {
this.parse_attr_long(errors, m);
}
} else if name.is_ident("option") {
parse_attr_field_type(errors, &meta, FieldKind::Option, &mut this.field_type);
} else if name.is_ident("short") {
if let Some(m) = errors.expect_meta_name_value(&meta) {
this.parse_attr_short(errors, m);
}
} else if name.is_ident("subcommand") {
parse_attr_field_type(
errors,
&meta,
FieldKind::SubCommand,
&mut this.field_type,
);
} else if name.is_ident("switch") {
parse_attr_field_type(errors, &meta, FieldKind::Switch, &mut this.field_type);
} else if name.is_ident("positional") {
parse_attr_field_type(
errors,
&meta,
FieldKind::Positional,
&mut this.field_type,
);
} else if name.is_ident("greedy") {
this.greedy = Some(name.clone());
} else if name.is_ident("hidden_help") {
this.hidden_help = true;
} else {
errors.err(
&meta,
concat!(
"Invalid field-level `argh` attribute\n",
"Expected one of: `arg_name`, `default`, `description`, `from_str_fn`, `greedy`, ",
"`long`, `option`, `short`, `subcommand`, `switch`, `hidden_help`",
),
);
}
}
}
if let (Some(default), Some(field_type)) = (&this.default, &this.field_type) {
match field_type.kind {
FieldKind::Option | FieldKind::Positional => {}
FieldKind::SubCommand | FieldKind::Switch => errors.err(
default,
"`default` may only be specified on `#[argh(option)]` \
or `#[argh(positional)]` fields",
),
}
}
match (&this.greedy, this.field_type.as_ref().map(|f| f.kind)) {
(Some(_), Some(FieldKind::Positional)) => {}
(Some(greedy), Some(_)) => errors.err(
&greedy,
"`greedy` may only be specified on `#[argh(positional)]` \
fields",
),
_ => {}
}
if let Some(d) = &this.description {
check_option_description(errors, d.content.value().trim(), d.content.span());
}
this
}
fn parse_attr_from_str_fn(&mut self, errors: &Errors, m: &syn::MetaList) {
parse_attr_fn_name(errors, m, "from_str_fn", &mut self.from_str_fn)
}
fn parse_attr_default(&mut self, errors: &Errors, m: &syn::MetaNameValue) {
parse_attr_single_string(errors, m, "default", &mut self.default);
}
fn parse_attr_arg_name(&mut self, errors: &Errors, m: &syn::MetaNameValue) {
parse_attr_single_string(errors, m, "arg_name", &mut self.arg_name);
}
fn parse_attr_long(&mut self, errors: &Errors, m: &syn::MetaNameValue) {
if let Some(first) = &self.long {
errors.duplicate_attrs("long", first, m);
} else if let syn::Expr::Path(syn::ExprPath { path, .. }) = &m.value
&& let Some(ident) = path.get_ident()
&& ident.to_string().eq_ignore_ascii_case("none")
{
self.long = Some(None);
} else if let Some(lit_str) = errors.expect_lit_str(&m.value) {
self.long = Some(Some(lit_str.clone()));
}
if let Some(Some(long)) = &self.long {
let value = long.value();
check_long_name(errors, long, &value);
}
}
fn parse_attr_short(&mut self, errors: &Errors, m: &syn::MetaNameValue) {
if let Some(first) = &self.short {
errors.duplicate_attrs("short", first, m);
} else if let Some(lit_char) = errors.expect_lit_char(&m.value) {
self.short = Some(lit_char.clone());
if !lit_char.value().is_ascii() {
errors.err(lit_char, "Short names must be ASCII");
}
}
}
}
pub(crate) fn check_long_name(errors: &Errors, spanned: &impl syn::spanned::Spanned, value: &str) {
if !value.is_ascii() {
errors.err(spanned, "Long names must be ASCII");
}
if !value
.chars()
.all(|c| c.is_lowercase() || c == '-' || c.is_ascii_digit())
{
errors.err(
spanned,
"Long names may only contain lowercase letters, digits, and dashes",
);
}
}
fn parse_attr_fn_name(
errors: &Errors,
m: &syn::MetaList,
attr_name: &str,
slot: &mut Option<syn::ExprPath>,
) {
if let Some(first) = slot {
errors.duplicate_attrs(attr_name, first, m);
}
*slot = errors.ok(m.parse_args());
}
fn parse_attr_field_type(
errors: &Errors,
meta: &syn::Meta,
kind: FieldKind,
slot: &mut Option<FieldType>,
) {
if let Some(path) = errors.expect_meta_word(meta) {
if let Some(first) = slot {
errors.duplicate_attrs("field kind", &first.ident, path);
} else if let Some(word) = path.get_ident() {
*slot = Some(FieldType {
kind,
ident: word.clone(),
});
}
}
}
// Whether the attribute is one like `#[<name> ...]`
fn is_matching_attr(name: &str, attr: &syn::Attribute) -> bool {
attr.path().segments.len() == 1 && attr.path().segments[0].ident == name
}
/// Checks for `#[doc ...]`, which is generated by doc comments.
fn is_doc_attr(attr: &syn::Attribute) -> bool {
is_matching_attr("doc", attr)
}
/// Checks for `#[argh ...]`
fn is_argh_attr(attr: &syn::Attribute) -> bool {
is_matching_attr("argh", attr)
}
/// Filters out non-`#[argh(...)]` attributes and converts to a sequence of `syn::Meta`.
fn argh_attr_to_meta_list(
errors: &Errors,
attr: &syn::Attribute,
) -> Option<impl IntoIterator<Item = syn::Meta>> {
if !is_argh_attr(attr) {
return None;
}
let ml = errors.expect_meta_list(&attr.meta)?;
errors.ok(ml.parse_args_with(
syn::punctuated::Punctuated::<syn::Meta, syn::Token![,]>::parse_terminated,
))
}
/// Represents a `#[derive(FromArgs)]` type's top-level attributes.
#[derive(Default)]
pub struct TypeAttrs {
pub is_subcommand: Option<syn::Ident>,
pub name: Option<syn::LitStr>,
pub description: Option<Description>,
pub examples: Vec<syn::LitStr>,
pub notes: Vec<syn::LitStr>,
pub error_codes: Vec<(syn::LitInt, syn::LitStr)>,
/// Arguments that trigger printing of the help message
pub help_triggers: Option<Vec<syn::LitStr>>,
}
impl TypeAttrs {
/// Parse top-level `#[argh(...)]` attributes
pub fn parse(errors: &Errors, derive_input: &syn::DeriveInput) -> Self {
let mut this = TypeAttrs::default();
for attr in &derive_input.attrs {
if is_doc_attr(attr) {
parse_attr_doc(errors, attr, &mut this.description);
continue;
}
let ml: Vec<syn::Meta> = if let Some(ml) = argh_attr_to_meta_list(errors, attr) {
ml.into_iter().collect()
} else {
continue;
};
for meta in ml.iter() {
let name = meta.path();
if name.is_ident("description") {
if let Some(m) = errors.expect_meta_name_value(meta) {
parse_attr_description(errors, m, &mut this.description);
}
} else if name.is_ident("error_code") {
if let Some(m) = errors.expect_meta_list(meta) {
this.parse_attr_error_code(errors, m);
}
} else if name.is_ident("example") {
if let Some(m) = errors.expect_meta_name_value(meta) {
this.parse_attr_example(errors, m);
}
} else if name.is_ident("name") {
if let Some(m) = errors.expect_meta_name_value(meta) {
this.parse_attr_name(errors, m);
}
} else if name.is_ident("note") {
if let Some(m) = errors.expect_meta_name_value(meta) {
this.parse_attr_note(errors, m);
}
} else if name.is_ident("subcommand") {
if let Some(ident) = errors.expect_meta_word(meta).and_then(|p| p.get_ident()) {
this.parse_attr_subcommand(errors, ident);
}
} else if name.is_ident("help_triggers") {
if let Some(m) = errors.expect_meta_list(meta) {
Self::parse_help_triggers(m, errors, &mut this);
}
} else {
errors.err(
meta,
concat!(
"Invalid type-level `argh` attribute\n",
"Expected one of: `description`, `error_code`, `example`, `name`, ",
"`note`, `subcommand`, `help_triggers`",
),
);
}
}
if this.is_subcommand.is_some() && this.help_triggers.is_some() {
let help_meta = ml
.iter()
.find(|meta| meta.path().is_ident("help_triggers"))
.unwrap();
errors.err(help_meta, "Cannot use `help_triggers` on a subcommand");
}
}
this.check_error_codes(errors);
this
}
/// Checks that error codes are within range for `i32` and that they are
/// never duplicated.
fn check_error_codes(&self, errors: &Errors) {
// map from error code to index
let mut map: HashMap<u64, usize> = HashMap::new();
for (index, (lit_int, _lit_str)) in self.error_codes.iter().enumerate() {
let value = match lit_int.base10_parse::<u64>() {
Ok(v) => v,
Err(e) => {
errors.push(e);
continue;
}
};
if value > (i32::MAX as u64) {
errors.err(lit_int, "Error code out of range for `i32`");
}
match map.entry(value) {
Entry::Occupied(previous) => {
let previous_index = *previous.get();
let (previous_lit_int, _previous_lit_str) = &self.error_codes[previous_index];
errors.err(lit_int, &format!("Duplicate error code {}", value));
errors.err(
previous_lit_int,
&format!("Error code {} previously defined here", value),
);
}
Entry::Vacant(slot) => {
slot.insert(index);
}
}
}
}
fn parse_attr_error_code(&mut self, errors: &Errors, ml: &syn::MetaList) {
errors.ok(ml.parse_args_with(|input: syn::parse::ParseStream| {
let err_code = input.parse()?;
input.parse::<syn::Token![,]>()?;
let err_msg = input.parse()?;
if let (Some(err_code), Some(err_msg)) = (
errors.expect_lit_int(&err_code),
errors.expect_lit_str(&err_msg),
) {
self.error_codes.push((err_code.clone(), err_msg.clone()));
}
Ok(())
}));
}
fn parse_attr_example(&mut self, errors: &Errors, m: &syn::MetaNameValue) {
parse_attr_multi_string(errors, m, &mut self.examples)
}
fn parse_attr_name(&mut self, errors: &Errors, m: &syn::MetaNameValue) {
parse_attr_single_string(errors, m, "name", &mut self.name);
if let Some(name) = &self.name
&& name.value() == "help"
{
errors.err(name, "Custom `help` commands are not supported.");
}
}
fn parse_attr_note(&mut self, errors: &Errors, m: &syn::MetaNameValue) {
parse_attr_multi_string(errors, m, &mut self.notes)
}
fn parse_attr_subcommand(&mut self, errors: &Errors, ident: &syn::Ident) {
if let Some(first) = &self.is_subcommand {
errors.duplicate_attrs("subcommand", first, ident);
} else {
self.is_subcommand = Some(ident.clone());
}
}
// get the list of arguments that trigger printing of the help message as a vector of strings (help_arguments("-h", "--help", "help"))
fn parse_help_triggers(m: &syn::MetaList, errors: &Errors, this: &mut TypeAttrs) {
let parser = Punctuated::<syn::Expr, syn::Token![,]>::parse_terminated;
match parser.parse(m.tokens.clone().into()) {
Ok(args) => {
let mut triggers = Vec::new();
for arg in args {
if let syn::Expr::Lit(syn::ExprLit {
lit: syn::Lit::Str(lit_str),
..
}) = arg
{
triggers.push(lit_str);
}
}
this.help_triggers = Some(triggers);
}
Err(err) => errors.push(err),
}
}
}
/// Represents an enum variant's attributes.
#[derive(Default)]
pub struct VariantAttrs {
pub is_dynamic: Option<syn::Path>,
}
impl VariantAttrs {
/// Parse enum variant `#[argh(...)]` attributes
pub fn parse(errors: &Errors, variant: &syn::Variant) -> Self {
let mut this = VariantAttrs::default();
let fields = match &variant.fields {
syn::Fields::Named(fields) => Some(&fields.named),
syn::Fields::Unnamed(fields) => Some(&fields.unnamed),
syn::Fields::Unit => None,
};
for field in fields.into_iter().flatten() {
for attr in &field.attrs {
if is_argh_attr(attr) {
err_unused_enum_attr(errors, attr);
}
}
}
for attr in &variant.attrs {
let ml = if let Some(ml) = argh_attr_to_meta_list(errors, attr) {
ml
} else {
continue;
};
for meta in ml {
let name = meta.path();
if name.is_ident("dynamic") {
if let Some(prev) = this.is_dynamic.as_ref() {
errors.duplicate_attrs("dynamic", prev, &meta);
} else {
this.is_dynamic = errors.expect_meta_word(&meta).cloned();
}
} else {
errors.err(
&meta,
"Invalid variant-level `argh` attribute\n\
Variants can only have the #[argh(dynamic)] attribute.",
);
}
}
}
this
}
}
fn check_option_description(errors: &Errors, desc: &str, span: Span) {
let chars = &mut desc.trim().chars();
match (chars.next(), chars.next()) {
(Some(x), _) if x.is_lowercase() => {}
// If both the first and second letter are not lowercase,
// this is likely an initialism which should be allowed.
(Some(x), Some(y)) if !x.is_lowercase() && (y.is_alphanumeric() && !y.is_lowercase()) => {}
_ => {
errors.err_span(span, "Descriptions must begin with a lowercase letter");
}
}
}
fn parse_attr_single_string(
errors: &Errors,
m: &syn::MetaNameValue,
name: &str,
slot: &mut Option<syn::LitStr>,
) {
if let Some(first) = slot {
errors.duplicate_attrs(name, first, m);
} else if let Some(lit_str) = errors.expect_lit_str(&m.value) {
*slot = Some(lit_str.clone());
}
}
fn parse_attr_multi_string(errors: &Errors, m: &syn::MetaNameValue, list: &mut Vec<syn::LitStr>) {
if let Some(lit_str) = errors.expect_lit_str(&m.value) {
list.push(lit_str.clone());
}
}
fn parse_attr_doc(errors: &Errors, attr: &syn::Attribute, slot: &mut Option<Description>) {
let nv = if let Some(nv) = errors.expect_meta_name_value(&attr.meta) {
nv
} else {
return;
};
// Don't replace an existing explicit description.
if slot.as_ref().map(|d| d.explicit).unwrap_or(false) {
return;
}
if let Some(lit_str) = errors.expect_lit_str(&nv.value) {
let lit_str = if let Some(previous) = slot {
let previous = &previous.content;
let previous_span = previous.span();
syn::LitStr::new(
&(previous.value() + &unescape_doc(lit_str.value())),
previous_span,
)
} else {
syn::LitStr::new(&unescape_doc(lit_str.value()), lit_str.span())
};
*slot = Some(Description {
explicit: false,
content: lit_str,
});
}
}
/// Replaces escape sequences in doc-comments with the characters they represent.
///
/// Rustdoc understands CommonMark escape sequences consisting of a backslash followed by an ASCII
/// punctuation character. Any other backslash is treated as a literal backslash.
fn unescape_doc(s: String) -> String {
let mut result = String::with_capacity(s.len());
let mut characters = s.chars().peekable();
while let Some(mut character) = characters.next() {
if character == '\\'
&& let Some(next_character) = characters.peek()
&& next_character.is_ascii_punctuation()
{
character = *next_character;
characters.next();
}
// Braces must be escaped as this string will be used as a format string
if character == '{' || character == '}' {
result.push(character);
}
result.push(character);
}
result
}
fn parse_attr_description(errors: &Errors, m: &syn::MetaNameValue, slot: &mut Option<Description>) {
let lit_str = if let Some(lit_str) = errors.expect_lit_str(&m.value) {
lit_str
} else {
return;
};
// Don't allow multiple explicit (non doc-comment) descriptions
if let Some(description) = slot
&& description.explicit
{
errors.duplicate_attrs("description", &description.content, lit_str);
}
*slot = Some(Description {
explicit: true,
content: lit_str.clone(),
});
}
/// Checks that a `#![derive(FromArgs)]` enum has an `#[argh(subcommand)]`
/// attribute and that it does not have any other type-level `#[argh(...)]` attributes.
pub fn check_enum_type_attrs(errors: &Errors, type_attrs: &TypeAttrs, type_span: &Span) {
let TypeAttrs {
is_subcommand,
name,
description,
examples,
notes,
error_codes,
help_triggers,
} = type_attrs;
// Ensure that `#[argh(subcommand)]` is present.
if is_subcommand.is_none() {
errors.err_span(
*type_span,
concat!(
"`#![derive(FromArgs)]` on `enum`s can only be used to enumerate subcommands.\n",
"Consider adding `#[argh(subcommand)]` to the `enum` declaration.",
),
);
}
// Error on all other type-level attributes.
if let Some(name) = name {
err_unused_enum_attr(errors, name);
}
if let Some(description) = description
&& description.explicit
{
err_unused_enum_attr(errors, &description.content);
}
if let Some(example) = examples.first() {
err_unused_enum_attr(errors, example);
}
if let Some(note) = notes.first() {
err_unused_enum_attr(errors, note);
}
if let Some(err_code) = error_codes.first() {
err_unused_enum_attr(errors, &err_code.0);
}
if let Some(triggers) = help_triggers
&& let Some(trigger) = triggers.first()
{
err_unused_enum_attr(errors, trigger);
}
}
fn err_unused_enum_attr(errors: &Errors, location: &impl syn::spanned::Spanned) {
errors.err(
location,
concat!(
"Unused `argh` attribute on `#![derive(FromArgs)]` enum. ",
"Such `enum`s can only be used to dispatch to subcommands, ",
"and should only contain the #[argh(subcommand)] attribute.",
),
);
}

View file

@ -0,0 +1,94 @@
use proc_macro2::TokenStream;
use quote::{quote, quote_spanned};
use syn::spanned::Spanned;
use syn::{Data, DeriveInput, Fields, GenericParam, parse_macro_input, parse_quote};
pub(crate) fn derive_decodable(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
let input = parse_macro_input!(input as DeriveInput);
let name = input.ident;
// Add a bound `T: Decodable` to every type parameter T.
let mut generics = input.generics;
for param in &mut generics.params {
if let GenericParam::Type(ref mut type_param) = *param {
type_param
.bounds
.push(parse_quote!(crate::socket::Decodable));
}
}
let (impl_generics, ty_generics, where_clause) = generics.split_for_impl();
let encode = gen_encode(&input.data);
let decode = gen_decode(&input.data);
let expanded = quote! {
// The generated impl.
impl #impl_generics crate::socket::Encodable for #name #ty_generics #where_clause {
fn encode(&self, w: &mut impl std::io::Write) -> std::io::Result<()> {
#encode
Ok(())
}
}
impl #impl_generics crate::socket::Decodable for #name #ty_generics #where_clause {
fn decode(r: &mut impl std::io::Read) -> std::io::Result<Self> {
let val = #decode;
Ok(val)
}
}
};
proc_macro::TokenStream::from(expanded)
}
// Generate an expression to encode each field.
fn gen_encode(data: &Data) -> TokenStream {
match *data {
Data::Struct(ref data) => {
match data.fields {
Fields::Named(ref fields) => {
// Expands to an expression like
//
// self.x.encode(w)?; self.y.encode(w)?; self.z.encode(w)?;
let recurse = fields.named.iter().map(|f| {
let name = &f.ident;
quote_spanned! { f.span() =>
crate::socket::Encodable::encode(&self.#name, w)?;
}
});
quote! {
#(#recurse)*
}
}
_ => unimplemented!(),
}
}
Data::Enum(_) | Data::Union(_) => unimplemented!(),
}
}
// Generate an expression to decode each field.
fn gen_decode(data: &Data) -> TokenStream {
match *data {
Data::Struct(ref data) => {
match data.fields {
Fields::Named(ref fields) => {
// Expands to an expression like
//
// Self { x: Decodable::decode(r)?, y: Decodable::decode(r)?, }
let recurse = fields.named.iter().map(|f| {
let name = &f.ident;
quote_spanned! { f.span() =>
#name: crate::socket::Decodable::decode(r)?,
}
});
quote! {
Self { #(#recurse)* }
}
}
_ => unimplemented!(),
}
}
Data::Enum(_) | Data::Union(_) => unimplemented!(),
}
}

View file

@ -0,0 +1,19 @@
#![recursion_limit = "256"]
use proc_macro::TokenStream;
mod argh;
mod decodable;
#[proc_macro_derive(Decodable)]
pub fn derive_decodable(input: TokenStream) -> TokenStream {
decodable::derive_decodable(input)
}
/// Entrypoint for `#[derive(FromArgs)]`.
#[proc_macro_derive(FromArgs, attributes(argh))]
pub fn argh_derive(input: TokenStream) -> TokenStream {
let ast = syn::parse_macro_input!(input as syn::DeriveInput);
let token = argh::impl_from_args(&ast);
token.into()
}

459
native/src/base/dir.rs Normal file
View file

@ -0,0 +1,459 @@
use crate::cxx_extern::readlinkat;
use crate::{
FsPathBuilder, LibcReturn, LoggedResult, OsError, OsResult, Utf8CStr, Utf8CStrBuf, cstr, errno,
fd_path, fd_set_attr,
};
use libc::{dirent, mode_t};
use nix::errno::Errno;
use nix::fcntl::{AtFlags, OFlag};
use nix::sys::stat::Mode;
use nix::unistd::UnlinkatFlags;
use std::fs::File;
use std::ops::Deref;
use std::os::fd::{AsFd, AsRawFd, BorrowedFd, IntoRawFd, OwnedFd, RawFd};
use std::ptr::NonNull;
use std::slice;
pub struct DirEntry<'a> {
dir: &'a Directory,
entry: NonNull<dirent>,
d_name_len: usize,
}
impl DirEntry<'_> {
pub fn as_ptr(&self) -> *mut dirent {
self.entry.as_ptr()
}
pub fn name(&self) -> &Utf8CStr {
// SAFETY: Utf8CStr is already validated in Directory::read
unsafe {
Utf8CStr::from_bytes_unchecked(slice::from_raw_parts(
self.d_name.as_ptr().cast(),
self.d_name_len,
))
}
}
pub fn resolve_path(&self, buf: &mut dyn Utf8CStrBuf) -> OsResult<'static, ()> {
self.dir.path_at(self.name(), buf)
}
pub fn is_dir(&self) -> bool {
self.d_type == libc::DT_DIR
}
pub fn is_file(&self) -> bool {
self.d_type == libc::DT_REG
}
pub fn is_symlink(&self) -> bool {
self.d_type == libc::DT_LNK
}
pub fn is_block_device(&self) -> bool {
self.d_type == libc::DT_BLK
}
pub fn is_char_device(&self) -> bool {
self.d_type == libc::DT_CHR
}
pub fn is_fifo(&self) -> bool {
self.d_type == libc::DT_FIFO
}
pub fn is_socket(&self) -> bool {
self.d_type == libc::DT_SOCK
}
pub fn unlink(&self) -> OsResult<'_, ()> {
let flag = if self.is_dir() {
UnlinkatFlags::RemoveDir
} else {
UnlinkatFlags::NoRemoveDir
};
self.dir.unlink_at(self.name(), flag)
}
pub fn read_link(&self, buf: &mut dyn Utf8CStrBuf) -> OsResult<'_, ()> {
self.dir.read_link_at(self.name(), buf)
}
pub fn open_as_dir(&self) -> OsResult<'_, Directory> {
if !self.is_dir() {
return Err(OsError::new(
Errno::ENOTDIR,
"fdopendir",
Some(self.name()),
None,
));
}
self.dir.open_as_dir_at(self.name())
}
pub fn open_as_file(&self, flags: OFlag) -> OsResult<'_, File> {
if self.is_dir() {
return Err(OsError::new(
Errno::EISDIR,
"open_as_file",
Some(self.name()),
None,
));
}
self.dir.open_as_file_at(self.name(), flags, 0)
}
pub fn rename_to<'a, 'entry: 'a>(
&'entry self,
new_dir: impl AsFd,
path: &'a Utf8CStr,
) -> OsResult<'a, ()> {
self.dir.rename_at(self.name(), new_dir, path)
}
}
impl Deref for DirEntry<'_> {
type Target = dirent;
fn deref(&self) -> &dirent {
unsafe { self.entry.as_ref() }
}
}
#[repr(transparent)]
pub struct Directory {
inner: NonNull<libc::DIR>,
}
pub enum WalkResult {
Continue,
Abort,
Skip,
}
impl Directory {
fn open_at<'a>(&self, name: &'a Utf8CStr, flags: OFlag, mode: mode_t) -> OsResult<'a, OwnedFd> {
nix::fcntl::openat(
self,
name,
flags | OFlag::O_CLOEXEC,
Mode::from_bits_truncate(mode),
)
.into_os_result("openat", Some(name), None)
}
fn path_at(&self, name: &Utf8CStr, buf: &mut dyn Utf8CStrBuf) -> OsResult<'static, ()> {
self.resolve_path(buf)?;
buf.append_path(name);
Ok(())
}
}
// Low-level methods, we should track the caller when error occurs, so return OsResult.
impl Directory {
pub fn open(path: &Utf8CStr) -> OsResult<'_, Directory> {
let dirp = unsafe { libc::opendir(path.as_ptr()) };
let dirp = dirp.into_os_result("opendir", Some(path), None)?;
Ok(Directory { inner: dirp })
}
pub fn read(&mut self) -> OsResult<'static, Option<DirEntry<'_>>> {
*errno() = 0;
let e = unsafe { libc::readdir(self.inner.as_ptr()) };
if e.is_null() {
return if *errno() != 0 {
Err(OsError::last_os_error("readdir", None, None))
} else {
Ok(None)
};
}
// Skip non UTF-8 entries, ".", and ".."
unsafe {
let entry = &*e;
let Ok(name) = Utf8CStr::from_ptr(entry.d_name.as_ptr()) else {
return self.read();
};
if name == "." || name == ".." {
self.read()
} else {
let e = DirEntry {
dir: self,
entry: NonNull::from(entry),
d_name_len: name.as_bytes_with_nul().len(),
};
Ok(Some(e))
}
}
}
pub fn rewind(&mut self) {
unsafe { libc::rewinddir(self.inner.as_ptr()) };
}
pub fn open_as_dir_at<'a>(&self, name: &'a Utf8CStr) -> OsResult<'a, Directory> {
let fd = self.open_at(name, OFlag::O_RDONLY, 0)?;
Directory::try_from(fd).map_err(|e| e.set_args(Some(name), None))
}
pub fn open_as_file_at<'a>(
&self,
name: &'a Utf8CStr,
flags: OFlag,
mode: mode_t,
) -> OsResult<'a, File> {
let fd = self.open_at(name, flags, mode)?;
Ok(File::from(fd))
}
pub fn read_link_at<'a>(
&self,
name: &'a Utf8CStr,
buf: &mut dyn Utf8CStrBuf,
) -> OsResult<'a, ()> {
buf.clear();
unsafe {
readlinkat(
self.as_raw_fd(),
name.as_ptr(),
buf.as_mut_ptr().cast(),
buf.capacity(),
)
.check_os_err("readlinkat", Some(name), None)?;
}
buf.rebuild().ok();
Ok(())
}
pub fn mkdir_at<'a>(&self, name: &'a Utf8CStr, mode: mode_t) -> OsResult<'a, ()> {
match nix::sys::stat::mkdirat(self, name, Mode::from_bits_truncate(mode)) {
Ok(_) | Err(Errno::EEXIST) => Ok(()),
Err(e) => Err(OsError::new(e, "mkdirat", Some(name), None)),
}
}
// ln -s target self/name
pub fn create_symlink_at<'a>(
&self,
name: &'a Utf8CStr,
target: &'a Utf8CStr,
) -> OsResult<'a, ()> {
nix::unistd::symlinkat(target, self, name).check_os_err(
"symlinkat",
Some(target),
Some(name),
)
}
pub fn unlink_at<'a>(&self, name: &'a Utf8CStr, flag: UnlinkatFlags) -> OsResult<'a, ()> {
nix::unistd::unlinkat(self, name, flag).check_os_err("unlinkat", Some(name), None)
}
pub fn contains_path(&self, path: &Utf8CStr) -> bool {
// WARNING: Using faccessat is incorrect, because the raw linux kernel syscall
// does not support the flag AT_SYMLINK_NOFOLLOW until 5.8 with faccessat2.
// Use fstatat to check the existence of a file instead.
nix::sys::stat::fstatat(self, path, AtFlags::AT_SYMLINK_NOFOLLOW).is_ok()
}
pub fn resolve_path(&self, buf: &mut dyn Utf8CStrBuf) -> OsResult<'static, ()> {
fd_path(self.as_raw_fd(), buf)
}
pub fn rename_at<'a>(
&self,
old: &'a Utf8CStr,
new_dir: impl AsFd,
new: &'a Utf8CStr,
) -> OsResult<'a, ()> {
nix::fcntl::renameat(self, old, new_dir, new).check_os_err("renameat", Some(old), Some(new))
}
}
// High-level helper methods, composed of multiple operations.
// We should treat these as application logic and log ASAP, so return LoggedResult.
impl Directory {
pub fn post_order_walk<F: FnMut(&DirEntry) -> LoggedResult<WalkResult>>(
&mut self,
mut f: F,
) -> LoggedResult<WalkResult> {
self.post_order_walk_impl(&mut f)
}
pub fn pre_order_walk<F: FnMut(&DirEntry) -> LoggedResult<WalkResult>>(
&mut self,
mut f: F,
) -> LoggedResult<WalkResult> {
self.pre_order_walk_impl(&mut f)
}
pub fn remove_all(mut self) -> LoggedResult<()> {
self.post_order_walk(|e| {
e.unlink()?;
Ok(WalkResult::Continue)
})?;
Ok(())
}
pub fn copy_into(&mut self, dir: &Directory) -> LoggedResult<()> {
let mut buf = cstr::buf::default();
self.copy_into_impl(dir, &mut buf)
}
pub fn move_into(&mut self, dir: &Directory) -> LoggedResult<()> {
while let Some(ref e) = self.read()? {
if e.is_dir() && dir.contains_path(e.name()) {
// Destination folder exists, needs recursive move
let mut src = e.open_as_dir()?;
let dest = dir.open_as_dir_at(e.name())?;
src.move_into(&dest)?;
return Ok(e.unlink()?);
}
e.rename_to(dir, e.name())?;
}
Ok(())
}
pub fn link_into(&mut self, dir: &Directory) -> LoggedResult<()> {
let mut buf = cstr::buf::default();
self.link_into_impl(dir, &mut buf)
}
}
impl Directory {
fn post_order_walk_impl<F: FnMut(&DirEntry) -> LoggedResult<WalkResult>>(
&mut self,
f: &mut F,
) -> LoggedResult<WalkResult> {
use WalkResult::*;
loop {
match self.read()? {
None => return Ok(Continue),
Some(ref e) => {
if e.is_dir() {
let mut dir = e.open_as_dir()?;
if let Abort = dir.post_order_walk_impl(f)? {
return Ok(Abort);
}
}
match f(e)? {
Abort => return Ok(Abort),
Skip => return Ok(Continue),
Continue => {}
}
}
}
}
}
fn pre_order_walk_impl<F: FnMut(&DirEntry) -> LoggedResult<WalkResult>>(
&mut self,
f: &mut F,
) -> LoggedResult<WalkResult> {
use WalkResult::*;
loop {
match self.read()? {
None => return Ok(Continue),
Some(ref e) => match f(e)? {
Abort => return Ok(Abort),
Skip => continue,
Continue => {
if e.is_dir() {
let mut dir = e.open_as_dir()?;
if let Abort = dir.pre_order_walk_impl(f)? {
return Ok(Abort);
}
}
}
},
}
}
}
fn copy_into_impl(
&mut self,
dest_dir: &Directory,
buf: &mut dyn Utf8CStrBuf,
) -> LoggedResult<()> {
while let Some(ref e) = self.read()? {
e.resolve_path(buf)?;
let attr = buf.get_attr()?;
if e.is_dir() {
dest_dir.mkdir_at(e.name(), 0o777)?;
let mut src = e.open_as_dir()?;
let dest = dest_dir.open_as_dir_at(e.name())?;
src.copy_into_impl(&dest, buf)?;
fd_set_attr(dest.as_raw_fd(), &attr)?;
} else if e.is_file() {
let mut src = e.open_as_file(OFlag::O_RDONLY)?;
let mut dest = dest_dir.open_as_file_at(
e.name(),
OFlag::O_WRONLY | OFlag::O_CREAT | OFlag::O_TRUNC,
0o777,
)?;
std::io::copy(&mut src, &mut dest)?;
fd_set_attr(dest.as_raw_fd(), &attr)?;
} else if e.is_symlink() {
e.read_link(buf)?;
dest_dir.create_symlink_at(e.name(), buf)?;
dest_dir.path_at(e.name(), buf)?;
buf.set_attr(&attr)?;
}
}
Ok(())
}
fn link_into_impl(
&mut self,
dest_dir: &Directory,
buf: &mut dyn Utf8CStrBuf,
) -> LoggedResult<()> {
while let Some(ref e) = self.read()? {
if e.is_dir() {
dest_dir.mkdir_at(e.name(), 0o777)?;
e.resolve_path(buf)?;
let attr = buf.get_attr()?;
let mut src = e.open_as_dir()?;
let dest = dest_dir.open_as_dir_at(e.name())?;
src.link_into_impl(&dest, buf)?;
fd_set_attr(dest.as_raw_fd(), &attr)?;
} else {
nix::unistd::linkat(e.dir, e.name(), dest_dir, e.name(), AtFlags::empty())
.check_os_err("linkat", Some(e.name()), None)?;
}
}
Ok(())
}
}
impl TryFrom<OwnedFd> for Directory {
type Error = OsError<'static>;
fn try_from(fd: OwnedFd) -> OsResult<'static, Self> {
let dirp = unsafe { libc::fdopendir(fd.into_raw_fd()) };
let dirp = dirp.into_os_result("fdopendir", None, None)?;
Ok(Directory { inner: dirp })
}
}
impl AsRawFd for Directory {
fn as_raw_fd(&self) -> RawFd {
unsafe { libc::dirfd(self.inner.as_ptr()) }
}
}
impl AsFd for Directory {
fn as_fd(&self) -> BorrowedFd<'_> {
unsafe { BorrowedFd::borrow_raw(self.as_raw_fd()) }
}
}
impl Drop for Directory {
fn drop(&mut self) {
unsafe {
libc::closedir(self.inner.as_ptr());
}
}
}

84
native/src/base/files.hpp Normal file
View file

@ -0,0 +1,84 @@
#pragma once
#include <sys/stat.h>
#include <linux/fs.h>
#include <functional>
#include <string_view>
#include <string>
#include "base-rs.hpp"
struct mmap_data : public byte_data {
static_assert((sizeof(void *) == 8 && BLKGETSIZE64 == 0x80081272) ||
(sizeof(void *) == 4 && BLKGETSIZE64 == 0x80041272));
ALLOW_MOVE_ONLY(mmap_data)
mmap_data() = default;
explicit mmap_data(const char *name, bool rw = false);
mmap_data(int dirfd, const char *name, bool rw = false);
mmap_data(int fd, size_t sz, bool rw = false);
~mmap_data();
};
extern "C" {
int mkdirs(const char *path, mode_t mode);
ssize_t canonical_path(const char * __restrict__ path, char * __restrict__ buf, size_t bufsiz);
bool rm_rf(const char *path);
bool frm_rf(int dirfd);
bool cp_afc(const char *src, const char *dest);
bool mv_path(const char *src, const char *dest);
bool link_path(const char *src, const char *dest);
bool clone_attr(const char *src, const char *dest);
bool fclone_attr(int src, int dest);
} // extern "C"
std::string full_read(int fd);
std::string full_read(const char *filename);
void write_zero(int fd, size_t size);
std::string resolve_preinit_dir(const char *base_dir);
// Functor = function<bool(Utf8CStr, Utf8CStr)>
template <typename Functor>
void parse_prop_file(const char *file, Functor &&fn) {
parse_prop_file_rs(file, [&](rust::Str key, rust::Str val) -> bool {
// We perform the null termination here in C++ because it's very difficult to do it
// right in Rust due to pointer provenance. Trying to dereference a pointer without
// the correct provenance in Rust, even in unsafe code, is undefined behavior.
// However on the C++ side, there are fewer restrictions on pointers, so the const_cast here
// will not trigger UB in the compiler.
*(const_cast<char *>(key.data()) + key.size()) = '\0';
*(const_cast<char *>(val.data()) + val.size()) = '\0';
return fn(Utf8CStr(key.data(), key.size() + 1), Utf8CStr(val.data(), val.size() + 1));
});
}
using sFILE = std::unique_ptr<FILE, decltype(&fclose)>;
using sDIR = std::unique_ptr<DIR, decltype(&closedir)>;
sDIR make_dir(DIR *dp);
sFILE make_file(FILE *fp);
static inline sDIR open_dir(const char *path) {
return make_dir(opendir(path));
}
static inline sDIR xopen_dir(const char *path) {
return make_dir(xopendir(path));
}
static inline sDIR xopen_dir(int dirfd) {
return make_dir(xfdopendir(dirfd));
}
static inline sFILE open_file(const char *path, const char *mode) {
return make_file(fopen(path, mode));
}
static inline sFILE xopen_file(const char *path, const char *mode) {
return make_file(xfopen(path, mode));
}
static inline sFILE xopen_file(int fd, const char *mode) {
return make_file(xfdopen(fd, mode));
}

900
native/src/base/files.rs Normal file
View file

@ -0,0 +1,900 @@
use crate::{
Directory, FsPathFollow, LibcReturn, LoggedResult, OsError, OsResult, Utf8CStr, Utf8CStrBuf,
cstr, errno, error,
};
use bytemuck::{Pod, bytes_of, bytes_of_mut};
use libc::{c_uint, makedev, mode_t};
use nix::errno::Errno;
use nix::fcntl::{AT_FDCWD, OFlag};
use nix::sys::stat::{FchmodatFlags, Mode};
use nix::unistd::{AccessFlags, Gid, Uid};
use num_traits::AsPrimitive;
use std::cmp::min;
use std::ffi::CStr;
use std::fmt::Display;
use std::fs::File;
use std::io::{BufRead, BufReader, Read, Seek, SeekFrom, Write};
use std::mem::MaybeUninit;
use std::os::fd::{AsFd, BorrowedFd};
use std::os::unix::ffi::OsStrExt;
use std::os::unix::io::{AsRawFd, OwnedFd, RawFd};
use std::path::Path;
use std::{io, mem, ptr, slice};
pub trait ReadExt {
fn skip(&mut self, len: usize) -> io::Result<()>;
fn read_pod<F: Pod>(&mut self, data: &mut F) -> io::Result<()>;
}
impl<T: Read> ReadExt for T {
fn skip(&mut self, mut len: usize) -> io::Result<()> {
let mut buf = MaybeUninit::<[u8; 4096]>::uninit();
let buf = unsafe { buf.assume_init_mut() };
while len > 0 {
let l = min(buf.len(), len);
self.read_exact(&mut buf[..l])?;
len -= l;
}
Ok(())
}
fn read_pod<F: Pod>(&mut self, data: &mut F) -> io::Result<()> {
self.read_exact(bytes_of_mut(data))
}
}
pub trait ReadSeekExt {
fn skip(&mut self, len: usize) -> io::Result<()>;
}
impl<T: Read + Seek> ReadSeekExt for T {
fn skip(&mut self, len: usize) -> io::Result<()> {
if self.seek(SeekFrom::Current(len as i64)).is_err() {
// If the file is not actually seekable, fallback to read
ReadExt::skip(self, len)?;
}
Ok(())
}
}
pub trait BufReadExt {
fn for_each_line<F: FnMut(&mut String) -> bool>(&mut self, f: F);
fn for_each_prop<F: FnMut(&str, &str) -> bool>(&mut self, f: F);
}
impl<T: BufRead> BufReadExt for T {
fn for_each_line<F: FnMut(&mut String) -> bool>(&mut self, mut f: F) {
let mut buf = String::new();
loop {
match self.read_line(&mut buf) {
Ok(0) => break,
Ok(_) => {
if !f(&mut buf) {
break;
}
}
Err(e) => {
error!("{}", e);
break;
}
};
buf.clear();
}
}
fn for_each_prop<F: FnMut(&str, &str) -> bool>(&mut self, mut f: F) {
self.for_each_line(|line| {
// Reserve an additional byte, because this string will be manually
// null terminated on the C++ side, and it may need more space.
line.reserve(1);
let line = line.trim();
if line.starts_with('#') {
return true;
}
if let Some((key, value)) = line.split_once('=') {
return f(key.trim(), value.trim());
}
true
});
}
}
pub trait WriteExt {
fn write_zeros(&mut self, len: usize) -> io::Result<()>;
fn write_pod<F: Pod>(&mut self, data: &F) -> io::Result<()>;
}
impl<T: Write> WriteExt for T {
fn write_zeros(&mut self, mut len: usize) -> io::Result<()> {
let buf = [0_u8; 4096];
while len > 0 {
let l = min(buf.len(), len);
self.write_all(&buf[..l])?;
len -= l;
}
Ok(())
}
fn write_pod<F: Pod>(&mut self, data: &F) -> io::Result<()> {
self.write_all(bytes_of(data))
}
}
pub enum FileOrStd {
StdIn,
StdOut,
StdErr,
File(File),
}
impl FileOrStd {
pub fn as_file(&self) -> &File {
let raw_fd_ref: &'static RawFd = match self {
FileOrStd::StdIn => &0,
FileOrStd::StdOut => &1,
FileOrStd::StdErr => &2,
FileOrStd::File(file) => return file,
};
// SAFETY: File is guaranteed to have the same ABI as RawFd
unsafe { mem::transmute(raw_fd_ref) }
}
}
fn open_fd(path: &Utf8CStr, flags: OFlag, mode: mode_t) -> OsResult<'_, OwnedFd> {
nix::fcntl::open(path, flags, Mode::from_bits_truncate(mode)).into_os_result(
"open",
Some(path),
None,
)
}
pub fn fd_path(fd: RawFd, buf: &mut dyn Utf8CStrBuf) -> OsResult<'static, ()> {
let path = cstr::buf::new::<64>()
.join_path("/proc/self/fd")
.join_path_fmt(fd);
path.read_link(buf).map_err(|e| e.set_args(None, None))
}
pub struct FileAttr {
pub st: libc::stat,
#[cfg(feature = "selinux")]
pub con: crate::Utf8CStrBufArr<128>,
}
impl Default for FileAttr {
fn default() -> Self {
Self::new()
}
}
impl FileAttr {
pub fn new() -> Self {
FileAttr {
st: unsafe { mem::zeroed() },
#[cfg(feature = "selinux")]
con: crate::Utf8CStrBufArr::new(),
}
}
#[inline(always)]
#[allow(clippy::unnecessary_cast)]
fn is(&self, mode: mode_t) -> bool {
(self.st.st_mode & libc::S_IFMT as c_uint) as mode_t == mode
}
pub fn is_dir(&self) -> bool {
self.is(libc::S_IFDIR)
}
pub fn is_file(&self) -> bool {
self.is(libc::S_IFREG)
}
pub fn is_symlink(&self) -> bool {
self.is(libc::S_IFLNK)
}
pub fn is_block_device(&self) -> bool {
self.is(libc::S_IFBLK)
}
pub fn is_char_device(&self) -> bool {
self.is(libc::S_IFCHR)
}
pub fn is_fifo(&self) -> bool {
self.is(libc::S_IFIFO)
}
pub fn is_socket(&self) -> bool {
self.is(libc::S_IFSOCK)
}
pub fn is_whiteout(&self) -> bool {
self.is_char_device() && self.st.st_rdev == 0
}
}
const XATTR_NAME_SELINUX: &CStr = c"security.selinux";
// Low-level methods, we should track the caller when error occurs, so return OsResult.
impl Utf8CStr {
pub fn follow_link(&self) -> &FsPathFollow {
unsafe { mem::transmute(self) }
}
pub fn open(&self, flags: OFlag) -> OsResult<'_, File> {
Ok(File::from(open_fd(self, flags, 0)?))
}
pub fn create(&self, flags: OFlag, mode: mode_t) -> OsResult<'_, File> {
Ok(File::from(open_fd(self, OFlag::O_CREAT | flags, mode)?))
}
pub fn exists(&self) -> bool {
nix::sys::stat::lstat(self).is_ok()
}
pub fn rename_to<'a>(&'a self, name: &'a Utf8CStr) -> OsResult<'a, ()> {
nix::fcntl::renameat(AT_FDCWD, self, AT_FDCWD, name).check_os_err(
"rename",
Some(self),
Some(name),
)
}
pub fn remove(&self) -> OsResult<'_, ()> {
unsafe { libc::remove(self.as_ptr()).check_os_err("remove", Some(self), None) }
}
#[allow(clippy::unnecessary_cast)]
pub fn read_link(&self, buf: &mut dyn Utf8CStrBuf) -> OsResult<'_, ()> {
buf.clear();
unsafe {
let r = libc::readlink(self.as_ptr(), buf.as_mut_ptr(), buf.capacity() - 1)
.into_os_result("readlink", Some(self), None)? as isize;
*(buf.as_mut_ptr().offset(r) as *mut u8) = b'\0';
}
buf.rebuild().ok();
Ok(())
}
pub fn mkdir(&self, mode: mode_t) -> OsResult<'_, ()> {
match nix::unistd::mkdir(self, Mode::from_bits_truncate(mode)) {
Ok(_) | Err(Errno::EEXIST) => Ok(()),
Err(e) => Err(OsError::new(e, "mkdir", Some(self), None)),
}
}
// Inspired by https://android.googlesource.com/platform/bionic/+/master/libc/bionic/realpath.cpp
pub fn realpath(&self, buf: &mut dyn Utf8CStrBuf) -> OsResult<'_, ()> {
let fd = self.open(OFlag::O_PATH | OFlag::O_CLOEXEC)?;
let mut skip_check = false;
let st1 = match nix::sys::stat::fstat(&fd) {
Ok(st) => st,
Err(_) => {
// This will only fail on Linux < 3.6
skip_check = true;
unsafe { mem::zeroed() }
}
};
fd_path(fd.as_raw_fd(), buf)?;
let st2 = nix::sys::stat::stat(buf.as_cstr()).into_os_result("stat", Some(self), None)?;
if !skip_check && (st2.st_dev != st1.st_dev || st2.st_ino != st1.st_ino) {
return Err(OsError::new(Errno::ENOENT, "realpath", Some(self), None));
}
Ok(())
}
pub fn get_attr(&self) -> OsResult<'_, FileAttr> {
#[allow(unused_mut)]
let mut attr = FileAttr {
st: nix::sys::stat::lstat(self).into_os_result("lstat", Some(self), None)?,
#[cfg(feature = "selinux")]
con: cstr::buf::new(),
};
#[cfg(feature = "selinux")]
self.get_secontext(&mut attr.con)?;
Ok(attr)
}
pub fn set_attr<'a>(&'a self, attr: &'a FileAttr) -> OsResult<'a, ()> {
if !attr.is_symlink()
&& let Err(e) = self.follow_link().chmod((attr.st.st_mode & 0o777).as_())
{
// Double check if self is symlink before reporting error
let self_attr = self.get_attr()?;
if !self_attr.is_symlink() {
return Err(e);
}
}
unsafe {
libc::lchown(self.as_ptr(), attr.st.st_uid, attr.st.st_gid).check_os_err(
"lchown",
Some(self),
None,
)?;
}
#[cfg(feature = "selinux")]
if !attr.con.is_empty() {
self.set_secontext(&attr.con)?;
}
Ok(())
}
pub fn get_secontext(&self, con: &mut dyn Utf8CStrBuf) -> OsResult<'_, ()> {
con.clear();
let result = unsafe {
libc::lgetxattr(
self.as_ptr(),
XATTR_NAME_SELINUX.as_ptr(),
con.as_mut_ptr().cast(),
con.capacity(),
)
.check_err()
};
match result {
Ok(_) => {
con.rebuild().ok();
Ok(())
}
Err(Errno::ENODATA) => Ok(()),
Err(e) => Err(OsError::new(e, "lgetxattr", Some(self), None)),
}
}
pub fn set_secontext<'a>(&'a self, con: &'a Utf8CStr) -> OsResult<'a, ()> {
unsafe {
libc::lsetxattr(
self.as_ptr(),
XATTR_NAME_SELINUX.as_ptr(),
con.as_ptr().cast(),
con.len() + 1,
0,
)
.check_os_err("lsetxattr", Some(self), Some(con))
}
}
pub fn parent_dir(&self) -> Option<&str> {
Path::new(self.as_str())
.parent()
.map(Path::as_os_str)
// SAFETY: all substring of self is valid UTF-8
.map(|s| unsafe { std::str::from_utf8_unchecked(s.as_bytes()) })
}
pub fn file_name(&self) -> Option<&str> {
Path::new(self.as_str())
.file_name()
// SAFETY: all substring of self is valid UTF-8
.map(|s| unsafe { std::str::from_utf8_unchecked(s.as_bytes()) })
}
// ln -s target self
pub fn create_symlink_to<'a>(&'a self, target: &'a Utf8CStr) -> OsResult<'a, ()> {
nix::unistd::symlinkat(target, AT_FDCWD, self).check_os_err(
"symlink",
Some(target),
Some(self),
)
}
pub fn mkfifo(&self, mode: mode_t) -> OsResult<'_, ()> {
nix::unistd::mkfifo(self, Mode::from_bits_truncate(mode)).check_os_err(
"mkfifo",
Some(self),
None,
)
}
}
// High-level helper methods, composed of multiple operations.
// We should treat these as application logic and log ASAP, so return LoggedResult.
impl Utf8CStr {
pub fn remove_all(&self) -> LoggedResult<()> {
let attr = match self.get_attr() {
Ok(attr) => attr,
Err(e) => {
return match e.errno {
// Allow calling remove_all on non-existence file
Errno::ENOENT => Ok(()),
_ => Err(e)?,
};
}
};
if attr.is_dir() {
let dir = Directory::open(self)?;
dir.remove_all()?;
}
Ok(self.remove()?)
}
pub fn mkdirs(&self, mode: mode_t) -> LoggedResult<()> {
if self.is_empty() {
return Ok(());
}
let mut path = cstr::buf::default();
let mut components = self.split('/').filter(|s| !s.is_empty());
if self.starts_with('/') {
path.append_path("/");
}
loop {
let Some(s) = components.next() else {
break;
};
path.append_path(s);
path.mkdir(mode)?;
}
*errno() = 0;
Ok(())
}
pub fn copy_to(&self, path: &Utf8CStr) -> LoggedResult<()> {
let attr = self.get_attr()?;
if attr.is_dir() {
path.mkdir(0o777)?;
let mut src = Directory::open(self)?;
let dest = Directory::open(path)?;
src.copy_into(&dest)?;
} else {
// It's OK if remove failed
path.remove().ok();
if attr.is_file() {
let mut src = self.open(OFlag::O_RDONLY | OFlag::O_CLOEXEC)?;
let mut dest = path.create(
OFlag::O_WRONLY | OFlag::O_CREAT | OFlag::O_TRUNC | OFlag::O_CLOEXEC,
0o777,
)?;
std::io::copy(&mut src, &mut dest)?;
} else if attr.is_symlink() {
let mut buf = cstr::buf::default();
self.read_link(&mut buf)?;
unsafe {
libc::symlink(buf.as_ptr(), path.as_ptr()).check_os_err(
"symlink",
Some(&buf),
Some(path),
)?;
}
}
}
path.set_attr(&attr)?;
Ok(())
}
pub fn move_to(&self, path: &Utf8CStr) -> LoggedResult<()> {
if path.exists() {
let attr = path.get_attr()?;
if attr.is_dir() {
let mut src = Directory::open(self)?;
let dest = Directory::open(path)?;
return src.move_into(&dest);
} else {
path.remove()?;
}
}
self.rename_to(path)?;
Ok(())
}
// ln self path
pub fn link_to(&self, path: &Utf8CStr) -> LoggedResult<()> {
let attr = self.get_attr()?;
if attr.is_dir() {
path.mkdir(0o777)?;
path.set_attr(&attr)?;
let mut src = Directory::open(self)?;
let dest = Directory::open(path)?;
Ok(src.link_into(&dest)?)
} else {
unsafe {
libc::link(self.as_ptr(), path.as_ptr()).check_os_err(
"link",
Some(self),
Some(path),
)?;
}
Ok(())
}
}
}
impl FsPathFollow {
pub fn exists(&self) -> bool {
nix::unistd::access(self.as_utf8_cstr(), AccessFlags::F_OK).is_ok()
}
pub fn chmod(&self, mode: mode_t) -> OsResult<'_, ()> {
nix::sys::stat::fchmodat(
AT_FDCWD,
self.as_utf8_cstr(),
Mode::from_bits_truncate(mode),
FchmodatFlags::FollowSymlink,
)
.check_os_err("chmod", Some(self), None)
}
pub fn get_attr(&self) -> OsResult<'_, FileAttr> {
#[allow(unused_mut)]
let mut attr = FileAttr {
st: nix::sys::stat::stat(self.as_utf8_cstr()).into_os_result(
"lstat",
Some(self),
None,
)?,
#[cfg(feature = "selinux")]
con: cstr::buf::new(),
};
#[cfg(feature = "selinux")]
self.get_secontext(&mut attr.con)?;
Ok(attr)
}
pub fn set_attr<'a>(&'a self, attr: &'a FileAttr) -> OsResult<'a, ()> {
self.chmod((attr.st.st_mode & 0o777).as_())?;
nix::unistd::chown(
self.as_utf8_cstr(),
Some(Uid::from(attr.st.st_uid)),
Some(Gid::from(attr.st.st_gid)),
)
.check_os_err("chown", Some(self), None)?;
#[cfg(feature = "selinux")]
if !attr.con.is_empty() {
self.set_secontext(&attr.con)?;
}
Ok(())
}
pub fn get_secontext(&self, con: &mut dyn Utf8CStrBuf) -> OsResult<'_, ()> {
con.clear();
let result = unsafe {
libc::getxattr(
self.as_ptr(),
XATTR_NAME_SELINUX.as_ptr(),
con.as_mut_ptr().cast(),
con.capacity(),
)
.check_err()
};
match result {
Ok(_) => {
con.rebuild().ok();
Ok(())
}
Err(Errno::ENODATA) => Ok(()),
Err(e) => Err(OsError::new(e, "getxattr", Some(self), None)),
}
}
pub fn set_secontext<'a>(&'a self, con: &'a Utf8CStr) -> OsResult<'a, ()> {
unsafe {
libc::setxattr(
self.as_ptr(),
XATTR_NAME_SELINUX.as_ptr(),
con.as_ptr().cast(),
con.len() + 1,
0,
)
.check_os_err("setxattr", Some(self), Some(con))
}
}
}
pub trait FsPathBuilder {
fn join_path<T: AsRef<str>>(mut self, path: T) -> Self
where
Self: Sized,
{
self.append_path(path);
self
}
fn join_path_fmt<T: Display>(mut self, name: T) -> Self
where
Self: Sized,
{
self.append_path_fmt(name);
self
}
fn append_path<T: AsRef<str>>(&mut self, path: T) -> &mut Self;
fn append_path_fmt<T: Display>(&mut self, name: T) -> &mut Self;
}
fn append_path_impl(buf: &mut dyn Utf8CStrBuf, path: &str) {
if path.starts_with('/') {
buf.clear();
}
if !buf.is_empty() && !buf.ends_with('/') {
buf.push_str("/");
}
buf.push_str(path);
}
impl<S: Utf8CStrBuf + Sized> FsPathBuilder for S {
fn append_path<T: AsRef<str>>(&mut self, path: T) -> &mut Self {
append_path_impl(self, path.as_ref());
self
}
fn append_path_fmt<T: Display>(&mut self, name: T) -> &mut Self {
self.write_fmt(format_args!("/{name}")).ok();
self
}
}
impl FsPathBuilder for dyn Utf8CStrBuf + '_ {
fn append_path<T: AsRef<str>>(&mut self, path: T) -> &mut Self {
append_path_impl(self, path.as_ref());
self
}
fn append_path_fmt<T: Display>(&mut self, name: T) -> &mut Self {
self.write_fmt(format_args!("/{name}")).ok();
self
}
}
pub fn fd_get_attr(fd: RawFd) -> OsResult<'static, FileAttr> {
let mut attr = FileAttr::new();
unsafe {
libc::fstat(fd, &mut attr.st).check_os_err("fstat", None, None)?;
#[cfg(feature = "selinux")]
fd_get_secontext(fd, &mut attr.con)?;
}
Ok(attr)
}
pub fn fd_set_attr(fd: RawFd, attr: &FileAttr) -> OsResult<'_, ()> {
unsafe {
libc::fchmod(fd, (attr.st.st_mode & 0o777).as_()).check_os_err("fchmod", None, None)?;
libc::fchown(fd, attr.st.st_uid, attr.st.st_gid).check_os_err("fchown", None, None)?;
#[cfg(feature = "selinux")]
if !attr.con.is_empty() {
fd_set_secontext(fd, &attr.con)?;
}
}
Ok(())
}
pub fn fd_get_secontext(fd: RawFd, con: &mut dyn Utf8CStrBuf) -> OsResult<'static, ()> {
con.clear();
let result = unsafe {
libc::fgetxattr(
fd,
XATTR_NAME_SELINUX.as_ptr(),
con.as_mut_ptr().cast(),
con.capacity(),
)
.check_err()
};
match result {
Ok(_) => {
con.rebuild().ok();
Ok(())
}
Err(Errno::ENODATA) => Ok(()),
Err(e) => Err(OsError::new(e, "fgetxattr", None, None)),
}
}
pub fn fd_set_secontext(fd: RawFd, con: &Utf8CStr) -> OsResult<'_, ()> {
unsafe {
libc::fsetxattr(
fd,
XATTR_NAME_SELINUX.as_ptr(),
con.as_ptr().cast(),
con.len() + 1,
0,
)
.check_os_err("fsetxattr", Some(con), None)
}
}
pub fn clone_attr<'a>(a: &'a Utf8CStr, b: &'a Utf8CStr) -> OsResult<'a, ()> {
let attr = a.get_attr().map_err(|e| e.set_args(Some(a), None))?;
b.set_attr(&attr).map_err(|e| e.set_args(Some(b), None))
}
pub fn fclone_attr(a: RawFd, b: RawFd) -> OsResult<'static, ()> {
let attr = fd_get_attr(a)?;
fd_set_attr(b, &attr).map_err(|e| e.set_args(None, None))
}
pub struct MappedFile(&'static mut [u8]);
impl MappedFile {
pub fn open(path: &Utf8CStr) -> OsResult<'_, MappedFile> {
Ok(MappedFile(map_file(path, false)?))
}
pub fn open_rw(path: &Utf8CStr) -> OsResult<'_, MappedFile> {
Ok(MappedFile(map_file(path, true)?))
}
pub fn openat<'a, T: AsFd>(dir: &T, path: &'a Utf8CStr) -> OsResult<'a, MappedFile> {
Ok(MappedFile(map_file_at(dir.as_fd(), path, false)?))
}
pub fn openat_rw<'a, T: AsFd>(dir: &T, path: &'a Utf8CStr) -> OsResult<'a, MappedFile> {
Ok(MappedFile(map_file_at(dir.as_fd(), path, true)?))
}
pub fn create(fd: BorrowedFd, sz: usize, rw: bool) -> OsResult<MappedFile> {
Ok(MappedFile(map_fd(fd, sz, rw)?))
}
}
impl AsRef<[u8]> for MappedFile {
fn as_ref(&self) -> &[u8] {
self.0
}
}
impl AsMut<[u8]> for MappedFile {
fn as_mut(&mut self) -> &mut [u8] {
self.0
}
}
impl Drop for MappedFile {
fn drop(&mut self) {
unsafe {
libc::munmap(self.0.as_mut_ptr().cast(), self.0.len());
}
}
}
unsafe extern "C" {
// Don't use the declaration from the libc crate as request should be u32 not i32
fn ioctl(fd: RawFd, request: u32, ...) -> i32;
}
// We mark the returned slice static because it is valid until explicitly unmapped
pub(crate) fn map_file(path: &Utf8CStr, rw: bool) -> OsResult<'_, &'static mut [u8]> {
map_file_at(AT_FDCWD, path, rw)
}
pub(crate) fn map_file_at<'a>(
dirfd: BorrowedFd,
path: &'a Utf8CStr,
rw: bool,
) -> OsResult<'a, &'static mut [u8]> {
#[cfg(target_pointer_width = "64")]
const BLKGETSIZE64: u32 = 0x80081272;
#[cfg(target_pointer_width = "32")]
const BLKGETSIZE64: u32 = 0x80041272;
let flag = if rw { OFlag::O_RDWR } else { OFlag::O_RDONLY };
let fd = nix::fcntl::openat(dirfd, path, flag | OFlag::O_CLOEXEC, Mode::empty())
.into_os_result("openat", Some(path), None)?;
let attr = fd_get_attr(fd.as_raw_fd())?;
let sz = if attr.is_block_device() {
let mut sz = 0_u64;
unsafe {
ioctl(fd.as_raw_fd(), BLKGETSIZE64, &mut sz).check_os_err("ioctl", Some(path), None)?;
}
sz
} else {
attr.st.st_size as u64
};
map_fd(fd.as_fd(), sz as usize, rw).map_err(|e| e.set_args(Some(path), None))
}
pub(crate) fn map_fd(fd: BorrowedFd, sz: usize, rw: bool) -> OsResult<'static, &'static mut [u8]> {
let flag = if rw {
libc::MAP_SHARED
} else {
libc::MAP_PRIVATE
};
unsafe {
let ptr = libc::mmap(
ptr::null_mut(),
sz,
libc::PROT_READ | libc::PROT_WRITE,
flag,
fd.as_raw_fd(),
0,
);
if ptr == libc::MAP_FAILED {
return Err(OsError::last_os_error("mmap", None, None));
}
Ok(slice::from_raw_parts_mut(ptr.cast(), sz))
}
}
#[allow(dead_code)]
pub struct MountInfo {
pub id: u32,
pub parent: u32,
pub device: u64,
pub root: String,
pub target: String,
pub vfs_option: String,
pub shared: u32,
pub master: u32,
pub propagation_from: u32,
pub unbindable: bool,
pub fs_type: String,
pub source: String,
pub fs_option: String,
}
#[allow(clippy::useless_conversion)]
fn parse_mount_info_line(line: &str) -> Option<MountInfo> {
let mut iter = line.split_whitespace();
let id = iter.next()?.parse().ok()?;
let parent = iter.next()?.parse().ok()?;
let (maj, min) = iter.next()?.split_once(':')?;
let maj = maj.parse().ok()?;
let min = min.parse().ok()?;
let device = makedev(maj, min).into();
let root = iter.next()?.to_string();
let target = iter.next()?.to_string();
let vfs_option = iter.next()?.to_string();
let mut optional = iter.next()?;
let mut shared = 0;
let mut master = 0;
let mut propagation_from = 0;
let mut unbindable = false;
while optional != "-" {
if let Some(peer) = optional.strip_prefix("master:") {
master = peer.parse().ok()?;
} else if let Some(peer) = optional.strip_prefix("shared:") {
shared = peer.parse().ok()?;
} else if let Some(peer) = optional.strip_prefix("propagate_from:") {
propagation_from = peer.parse().ok()?;
} else if optional == "unbindable" {
unbindable = true;
}
optional = iter.next()?;
}
let fs_type = iter.next()?.to_string();
let source = iter.next()?.to_string();
let fs_option = iter.next()?.to_string();
Some(MountInfo {
id,
parent,
device,
root,
target,
vfs_option,
shared,
master,
propagation_from,
unbindable,
fs_type,
source,
fs_option,
})
}
pub fn parse_mount_info(pid: &str) -> Vec<MountInfo> {
let mut res = vec![];
let mut path = format!("/proc/{pid}/mountinfo");
if let Ok(file) = Utf8CStr::from_string(&mut path).open(OFlag::O_RDONLY | OFlag::O_CLOEXEC) {
BufReader::new(file).for_each_line(|line| {
parse_mount_info_line(line)
.map(|info| res.push(info))
.is_some()
});
}
res
}

View file

@ -0,0 +1,10 @@
#pragma once
#include "../xwrap.hpp"
#include "../misc.hpp"
#include "../base-rs.hpp"
#include "../files.hpp"
#include "../logging.hpp"
using rust::xpipe2;
using kv_pairs = std::vector<std::pair<std::string, std::string>>;

View file

@ -0,0 +1 @@
../../../external/cxx-rs/include/cxx.h

84
native/src/base/lib.rs Normal file
View file

@ -0,0 +1,84 @@
#![feature(vec_into_raw_parts)]
#![allow(clippy::missing_safety_doc)]
pub use {const_format, libc, nix};
pub use cstr::{
FsPathFollow, StrErr, Utf8CStr, Utf8CStrBuf, Utf8CStrBufArr, Utf8CStrBufRef, Utf8CString,
};
use cxx_extern::*;
pub use derive;
pub use dir::*;
pub use ffi::{Utf8CStrRef, fork_dont_care, set_nice_name};
pub use files::*;
pub use logging::*;
pub use misc::*;
pub use result::*;
pub mod argh;
pub mod cstr;
mod cxx_extern;
mod dir;
mod files;
mod logging;
mod misc;
mod mount;
mod result;
mod xwrap;
#[cxx::bridge]
mod ffi {
#[derive(Copy, Clone)]
#[repr(i32)]
#[cxx_name = "LogLevel"]
pub(crate) enum LogLevelCxx {
Error,
Warn,
Info,
Debug,
}
unsafe extern "C++" {
include!("misc.hpp");
#[cxx_name = "Utf8CStr"]
type Utf8CStrRef<'a> = &'a crate::cstr::Utf8CStr;
fn mut_u8_patch(buf: &mut [u8], from: &[u8], to: &[u8]) -> Vec<usize>;
fn fork_dont_care() -> i32;
fn set_nice_name(name: Utf8CStrRef);
type FnBoolStrStr;
fn call(self: &FnBoolStrStr, key: &str, value: &str) -> bool;
type FnBoolStr;
fn call(self: &FnBoolStr, key: Utf8CStrRef) -> bool;
}
extern "Rust" {
#[cxx_name = "log_with_rs"]
fn log_from_cxx(level: LogLevelCxx, msg: Utf8CStrRef);
fn cmdline_logging();
fn parse_prop_file_rs(name: Utf8CStrRef, f: &FnBoolStrStr);
#[cxx_name = "file_readline"]
fn file_readline_for_cxx(fd: i32, f: &FnBoolStr);
}
#[namespace = "rust"]
extern "Rust" {
fn xpipe2(fds: &mut [i32; 2], flags: i32) -> i32;
#[cxx_name = "map_file"]
fn map_file_for_cxx(path: Utf8CStrRef, rw: bool) -> &'static mut [u8];
#[cxx_name = "map_file_at"]
fn map_file_at_for_cxx(fd: i32, path: Utf8CStrRef, rw: bool) -> &'static mut [u8];
#[cxx_name = "map_fd"]
fn map_fd_for_cxx(fd: i32, sz: usize, rw: bool) -> &'static mut [u8];
}
}
// In Rust, we do not want to deal with raw pointers, so we change the
// signature of all *mut c_void to usize for new_daemon_thread.
pub type ThreadEntry = extern "C" fn(usize) -> usize;
unsafe extern "C" {
pub fn new_daemon_thread(entry: ThreadEntry, arg: usize);
}

View file

@ -0,0 +1,10 @@
#pragma once
#include <cerrno>
#include <cstdarg>
void LOGD(const char *fmt, ...) __printflike(1, 2);
void LOGI(const char *fmt, ...) __printflike(1, 2);
void LOGW(const char *fmt, ...) __printflike(1, 2);
void LOGE(const char *fmt, ...) __printflike(1, 2);
#define PLOGE(fmt, args...) LOGE(fmt " failed with %d: %s\n", ##args, errno, std::strerror(errno))

149
native/src/base/logging.rs Normal file
View file

@ -0,0 +1,149 @@
use crate::ffi::LogLevelCxx;
use crate::{Utf8CStr, cstr};
use bitflags::bitflags;
use num_derive::{FromPrimitive, ToPrimitive};
use num_traits::FromPrimitive;
use std::fmt;
use std::io::{Write, stderr, stdout};
use std::process::exit;
bitflags! {
#[derive(Copy, Clone)]
struct LogFlag : u32 {
const DISABLE_ERROR = 1 << 0;
const DISABLE_WARN = 1 << 1;
const DISABLE_INFO = 1 << 2;
const DISABLE_DEBUG = 1 << 3;
const EXIT_ON_ERROR = 1 << 4;
}
}
#[derive(Copy, Clone, FromPrimitive, ToPrimitive)]
#[repr(i32)]
pub enum LogLevel {
Error = LogLevelCxx::Error.repr,
Warn = LogLevelCxx::Warn.repr,
Info = LogLevelCxx::Info.repr,
Debug = LogLevelCxx::Debug.repr,
}
// We don't need to care about thread safety, because all
// logger changes will only happen on the main thread.
pub static mut LOGGER: Logger = Logger {
write: |_, _| {},
flags: LogFlag::empty(),
};
type LogWriter = fn(level: LogLevel, msg: &Utf8CStr);
pub(crate) type Formatter<'a> = &'a mut dyn fmt::Write;
#[derive(Copy, Clone)]
pub struct Logger {
pub write: LogWriter,
flags: LogFlag,
}
pub fn update_logger(f: impl FnOnce(&mut Logger)) {
let mut logger = unsafe { LOGGER };
f(&mut logger);
unsafe {
LOGGER = logger;
}
}
pub fn exit_on_error(b: bool) {
update_logger(|logger| logger.flags.set(LogFlag::EXIT_ON_ERROR, b));
}
impl LogLevel {
fn as_disable_flag(&self) -> LogFlag {
match *self {
LogLevel::Error => LogFlag::DISABLE_ERROR,
LogLevel::Warn => LogFlag::DISABLE_WARN,
LogLevel::Info => LogFlag::DISABLE_INFO,
LogLevel::Debug => LogFlag::DISABLE_DEBUG,
}
}
}
pub fn set_log_level_state(level: LogLevel, enabled: bool) {
update_logger(|logger| logger.flags.set(level.as_disable_flag(), enabled));
}
fn log_with_writer<F: FnOnce(LogWriter)>(level: LogLevel, f: F) {
let logger = unsafe { LOGGER };
if logger.flags.contains(level.as_disable_flag()) {
return;
}
f(logger.write);
if matches!(level, LogLevel::Error) && logger.flags.contains(LogFlag::EXIT_ON_ERROR) {
exit(-1);
}
}
pub fn log_from_cxx(level: LogLevelCxx, msg: &Utf8CStr) {
if let Some(level) = LogLevel::from_i32(level.repr) {
log_with_writer(level, |write| write(level, msg));
}
}
pub fn log_with_formatter<F: FnOnce(Formatter) -> fmt::Result>(level: LogLevel, f: F) {
log_with_writer(level, |write| {
let mut buf = cstr::buf::default();
f(&mut buf).ok();
write(level, &buf);
});
}
pub fn cmdline_logging() {
fn cmdline_write(level: LogLevel, msg: &Utf8CStr) {
if matches!(level, LogLevel::Info) {
stdout().write_all(msg.as_bytes()).ok();
} else {
stderr().write_all(msg.as_bytes()).ok();
}
}
update_logger(|logger| logger.write = cmdline_write);
}
#[macro_export]
macro_rules! log_with_args {
($level:expr, $($args:tt)+) => {
$crate::log_with_formatter($level, |w| writeln!(w, $($args)+))
}
}
#[macro_export]
macro_rules! error {
($($args:tt)+) => {
$crate::log_with_formatter($crate::LogLevel::Error, |w| writeln!(w, $($args)+))
}
}
#[macro_export]
macro_rules! warn {
($($args:tt)+) => {
$crate::log_with_formatter($crate::LogLevel::Warn, |w| writeln!(w, $($args)+))
}
}
#[macro_export]
macro_rules! info {
($($args:tt)+) => {
$crate::log_with_formatter($crate::LogLevel::Info, |w| writeln!(w, $($args)+))
}
}
#[cfg(debug_assertions)]
#[macro_export]
macro_rules! debug {
($($args:tt)+) => {
$crate::log_with_formatter($crate::LogLevel::Debug, |w| writeln!(w, $($args)+))
}
}
#[cfg(not(debug_assertions))]
#[macro_export]
macro_rules! debug {
($($args:tt)+) => {};
}

250
native/src/base/misc.hpp Normal file
View file

@ -0,0 +1,250 @@
#pragma once
#include <pthread.h>
#include <string>
#include <functional>
#include <string_view>
#include <bitset>
#include <rust/cxx.h>
#include "xwrap.hpp"
#define DISALLOW_COPY_AND_MOVE(clazz) \
clazz(const clazz&) = delete; \
clazz(clazz &&) = delete;
#define ALLOW_MOVE_ONLY(clazz) \
clazz(const clazz&) = delete; \
clazz(clazz &&o) : clazz() { swap(o); } \
clazz& operator=(clazz &&o) { swap(o); return *this; }
struct Utf8CStr;
class mutex_guard {
DISALLOW_COPY_AND_MOVE(mutex_guard)
public:
explicit mutex_guard(pthread_mutex_t &m): mutex(&m) {
pthread_mutex_lock(mutex);
}
void unlock() {
pthread_mutex_unlock(mutex);
mutex = nullptr;
}
~mutex_guard() {
if (mutex) pthread_mutex_unlock(mutex);
}
private:
pthread_mutex_t *mutex;
};
template <class Func>
class run_finally {
DISALLOW_COPY_AND_MOVE(run_finally)
public:
explicit run_finally(Func &&fn) : fn(std::move(fn)) {}
~run_finally() { fn(); }
private:
Func fn;
};
template<class T>
static void default_new(T *&p) { p = new T(); }
template<class T>
static void default_new(std::unique_ptr<T> &p) { p.reset(new T()); }
struct StringCmp {
using is_transparent = void;
bool operator()(std::string_view a, std::string_view b) const { return a < b; }
};
struct heap_data;
using ByteSlice = rust::Slice<const uint8_t>;
using MutByteSlice = rust::Slice<uint8_t>;
// Interchangeable as `&[u8]` in Rust
struct byte_view {
byte_view() : _buf(nullptr), _sz(0) {}
byte_view(const void *buf, size_t sz) : _buf((uint8_t *) buf), _sz(sz) {}
// byte_view, or any of its subclasses, can be copied as byte_view
byte_view(const byte_view &o) : _buf(o._buf), _sz(o._sz) {}
// Transparent conversion to Rust slice
byte_view(const ByteSlice o) : byte_view(o.data(), o.size()) {}
operator ByteSlice() const { return {_buf, _sz}; }
// String as bytes, including null terminator
byte_view(const char *s) : byte_view(s, strlen(s) + 1) {}
const uint8_t *data() const { return _buf; }
size_t size() const { return _sz; }
bool contains(byte_view pattern) const;
bool operator==(byte_view rhs) const;
protected:
uint8_t *_buf;
size_t _sz;
};
// Interchangeable as `&mut [u8]` in Rust
struct byte_data : public byte_view {
byte_data() = default;
byte_data(void *buf, size_t sz) : byte_view(buf, sz) {}
// byte_data, or any of its subclasses, can be copied as byte_data
byte_data(const byte_data &o) : byte_data(o._buf, o._sz) {}
// Transparent conversion to Rust slice
byte_data(const MutByteSlice o) : byte_data(o.data(), o.size()) {}
operator MutByteSlice() const { return {_buf, _sz}; }
using byte_view::data;
uint8_t *data() const { return _buf; }
void swap(byte_data &o);
rust::Vec<size_t> patch(byte_view from, byte_view to) const;
};
struct heap_data : public byte_data {
ALLOW_MOVE_ONLY(heap_data)
heap_data() = default;
explicit heap_data(size_t sz) : byte_data(calloc(sz, 1), sz) {}
~heap_data() { free(_buf); }
};
struct owned_fd {
ALLOW_MOVE_ONLY(owned_fd)
owned_fd() : fd(-1) {}
owned_fd(int fd) : fd(fd) {}
~owned_fd() { close(fd); fd = -1; }
operator int() { return fd; }
int release() { int f = fd; fd = -1; return f; }
void swap(owned_fd &owned) { std::swap(fd, owned.fd); }
private:
int fd;
};
rust::Vec<size_t> mut_u8_patch(MutByteSlice buf, ByteSlice from, ByteSlice to);
uint32_t parse_uint32_hex(std::string_view s);
int parse_int(std::string_view s);
using thread_entry = void *(*)(void *);
extern "C" int new_daemon_thread(thread_entry entry, void *arg = nullptr);
static inline std::string rtrim(std::string &&s) {
s.erase(std::find_if(s.rbegin(), s.rend(), [](unsigned char ch) {
return !std::isspace(ch) && ch != '\0';
}).base(), s.end());
return std::move(s);
}
int fork_dont_care();
int fork_no_orphan();
void init_argv0(int argc, char **argv);
void set_nice_name(Utf8CStr name);
int switch_mnt_ns(int pid);
std::string &replace_all(std::string &str, std::string_view from, std::string_view to);
std::vector<std::string> split(std::string_view s, std::string_view delims);
// Similar to vsnprintf, but the return value is the written number of bytes
__printflike(3, 0) int vssprintf(char *dest, size_t size, const char *fmt, va_list ap);
// Similar to snprintf, but the return value is the written number of bytes
__printflike(3, 4) int ssprintf(char *dest, size_t size, const char *fmt, ...);
// This is not actually the strscpy from the Linux kernel.
// Silently truncates, and returns the number of bytes written.
extern "C" size_t strscpy(char *dest, const char *src, size_t size);
// Ban usage of unsafe cstring functions
#define vsnprintf __use_vssprintf_instead__
#define snprintf __use_ssprintf_instead__
#define strlcpy __use_strscpy_instead__
struct exec_t {
bool err = false;
int fd = -2;
void (*pre_exec)() = nullptr;
int (*fork)() = xfork;
const char **argv = nullptr;
};
int exec_command(exec_t &exec);
template <class ...Args>
int exec_command(exec_t &exec, Args &&...args) {
const char *argv[] = {args..., nullptr};
exec.argv = argv;
return exec_command(exec);
}
int exec_command_sync(exec_t &exec);
template <class ...Args>
int exec_command_sync(exec_t &exec, Args &&...args) {
const char *argv[] = {args..., nullptr};
exec.argv = argv;
return exec_command_sync(exec);
}
template <class ...Args>
int exec_command_sync(Args &&...args) {
exec_t exec;
return exec_command_sync(exec, args...);
}
template <class ...Args>
void exec_command_async(Args &&...args) {
const char *argv[] = {args..., nullptr};
exec_t exec {
.fork = fork_dont_care,
.argv = argv,
};
exec_command(exec);
}
template <typename T>
constexpr auto operator+(T e) noexcept ->
std::enable_if_t<std::is_enum<T>::value, std::underlying_type_t<T>> {
return static_cast<std::underlying_type_t<T>>(e);
}
struct Utf8CStr {
const char *data() const;
size_t length() const;
Utf8CStr(const char *s, size_t len);
Utf8CStr() : Utf8CStr("", 1) {};
Utf8CStr(const Utf8CStr &o) = default;
Utf8CStr(const char *s) : Utf8CStr(s, strlen(s) + 1) {};
Utf8CStr(std::string s) : Utf8CStr(s.data(), s.length() + 1) {};
const char *c_str() const { return this->data(); }
size_t size() const { return this->length(); }
bool empty() const { return this->length() == 0 ; }
std::string_view sv() const { return {data(), length()}; }
operator std::string_view() const { return sv(); }
bool operator==(std::string_view rhs) const { return sv() == rhs; }
private:
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-private-field"
std::array<std::uintptr_t, 2> repr;
#pragma clang diagnostic pop
};
// Bindings for std::function to be callable from Rust
using CxxFnBoolStrStr = std::function<bool(rust::Str, rust::Str)>;
struct FnBoolStrStr : public CxxFnBoolStrStr {
using CxxFnBoolStrStr::function;
bool call(rust::Str a, rust::Str b) const {
return operator()(a, b);
}
};
using CxxFnBoolStr = std::function<bool(Utf8CStr)>;
struct FnBoolStr : public CxxFnBoolStr {
using CxxFnBoolStr::function;
bool call(Utf8CStr s) const {
return operator()(s);
}
};

296
native/src/base/misc.rs Normal file
View file

@ -0,0 +1,296 @@
use super::argh::{EarlyExit, MissingRequirements};
use crate::{Utf8CStr, Utf8CString, cstr, ffi};
use libc::c_char;
use std::fmt::Arguments;
use std::io::Write;
use std::mem::ManuallyDrop;
use std::process::exit;
use std::sync::Arc;
use std::sync::atomic::{AtomicPtr, Ordering};
use std::{fmt, slice, str};
pub fn errno() -> &'static mut i32 {
unsafe { &mut *libc::__errno() }
}
// When len is 0, don't care whether buf is null or not
#[inline]
pub unsafe fn slice_from_ptr<'a, T>(buf: *const T, len: usize) -> &'a [T] {
unsafe {
if len == 0 {
&[]
} else {
slice::from_raw_parts(buf, len)
}
}
}
// When len is 0, don't care whether buf is null or not
#[inline]
pub unsafe fn slice_from_ptr_mut<'a, T>(buf: *mut T, len: usize) -> &'a mut [T] {
unsafe {
if len == 0 {
&mut []
} else {
slice::from_raw_parts_mut(buf, len)
}
}
}
pub trait BytesExt {
fn find(&self, needle: &[u8]) -> Option<usize>;
fn contains(&self, needle: &[u8]) -> bool {
self.find(needle).is_some()
}
}
impl<T: AsRef<[u8]> + ?Sized> BytesExt for T {
fn find(&self, needle: &[u8]) -> Option<usize> {
fn inner(haystack: &[u8], needle: &[u8]) -> Option<usize> {
unsafe {
let ptr: *const u8 = libc::memmem(
haystack.as_ptr().cast(),
haystack.len(),
needle.as_ptr().cast(),
needle.len(),
)
.cast();
if ptr.is_null() {
None
} else {
Some(ptr.offset_from(haystack.as_ptr()) as usize)
}
}
}
inner(self.as_ref(), needle)
}
}
pub trait MutBytesExt {
fn patch(&mut self, from: &[u8], to: &[u8]) -> Vec<usize>;
}
impl<T: AsMut<[u8]> + ?Sized> MutBytesExt for T {
fn patch(&mut self, from: &[u8], to: &[u8]) -> Vec<usize> {
ffi::mut_u8_patch(self.as_mut(), from, to)
}
}
pub trait EarlyExitExt<T> {
fn on_early_exit<F: FnOnce()>(self, print_help_msg: F) -> T;
}
impl<T> EarlyExitExt<T> for Result<T, EarlyExit> {
fn on_early_exit<F: FnOnce()>(self, print_help_msg: F) -> T {
match self {
Ok(t) => t,
Err(EarlyExit { output, is_help }) => {
if is_help {
print_help_msg();
exit(0)
} else {
eprintln!("{output}");
print_help_msg();
exit(1)
}
}
}
}
}
pub struct PositionalArgParser<'a>(pub slice::Iter<'a, &'a str>);
impl PositionalArgParser<'_> {
pub fn required(&mut self, field_name: &'static str) -> Result<Utf8CString, EarlyExit> {
if let Some(next) = self.0.next() {
Ok((*next).into())
} else {
let mut missing = MissingRequirements::default();
missing.missing_positional_arg(field_name);
missing.err_on_any()?;
unreachable!()
}
}
pub fn optional(&mut self) -> Option<Utf8CString> {
self.0.next().map(|s| (*s).into())
}
pub fn last_required(&mut self, field_name: &'static str) -> Result<Utf8CString, EarlyExit> {
let r = self.required(field_name)?;
self.ensure_end()?;
Ok(r)
}
pub fn last_optional(&mut self) -> Result<Option<Utf8CString>, EarlyExit> {
let r = self.optional();
if r.is_none() {
return Ok(r);
}
self.ensure_end()?;
Ok(r)
}
fn ensure_end(&mut self) -> Result<(), EarlyExit> {
if self.0.len() == 0 {
Ok(())
} else {
Err(EarlyExit::from(format!(
"Unrecognized argument: {}\n",
self.0.next().unwrap()
)))
}
}
}
pub struct FmtAdaptor<'a, T>(pub &'a mut T)
where
T: Write;
impl<T: Write> fmt::Write for FmtAdaptor<'_, T> {
fn write_str(&mut self, s: &str) -> fmt::Result {
self.0.write_all(s.as_bytes()).map_err(|_| fmt::Error)
}
fn write_fmt(&mut self, args: Arguments<'_>) -> fmt::Result {
self.0.write_fmt(args).map_err(|_| fmt::Error)
}
}
pub struct AtomicArc<T> {
ptr: AtomicPtr<T>,
}
impl<T> AtomicArc<T> {
pub fn new(arc: Arc<T>) -> AtomicArc<T> {
let raw = Arc::into_raw(arc);
Self {
ptr: AtomicPtr::new(raw as *mut _),
}
}
pub fn load(&self) -> Arc<T> {
let raw = self.ptr.load(Ordering::Acquire);
// SAFETY: the raw pointer is always created from Arc::into_raw
let arc = ManuallyDrop::new(unsafe { Arc::from_raw(raw) });
ManuallyDrop::into_inner(arc.clone())
}
fn swap_ptr(&self, raw: *const T) -> Arc<T> {
let prev = self.ptr.swap(raw as *mut _, Ordering::AcqRel);
// SAFETY: the raw pointer is always created from Arc::into_raw
unsafe { Arc::from_raw(prev) }
}
pub fn swap(&self, arc: Arc<T>) -> Arc<T> {
let raw = Arc::into_raw(arc);
self.swap_ptr(raw)
}
pub fn store(&self, arc: Arc<T>) {
// Drop the previous value
let _ = self.swap(arc);
}
}
impl<T> Drop for AtomicArc<T> {
fn drop(&mut self) {
// Drop the internal value
let _ = self.swap_ptr(std::ptr::null());
}
}
impl<T: Default> Default for AtomicArc<T> {
fn default() -> Self {
Self::new(Default::default())
}
}
pub struct Chunker {
chunk: Box<[u8]>,
chunk_size: usize,
pos: usize,
}
impl Chunker {
pub fn new(chunk_size: usize) -> Self {
Chunker {
// SAFETY: all bytes will be initialized before it is used, tracked by self.pos
chunk: unsafe { Box::new_uninit_slice(chunk_size).assume_init() },
chunk_size,
pos: 0,
}
}
pub fn set_chunk_size(&mut self, chunk_size: usize) {
self.chunk_size = chunk_size;
self.pos = 0;
if self.chunk.len() < chunk_size {
self.chunk = unsafe { Box::new_uninit_slice(chunk_size).assume_init() };
}
}
// Returns (remaining buf, Option<Chunk>)
pub fn add_data<'a, 'b: 'a>(&'a mut self, mut buf: &'b [u8]) -> (&'b [u8], Option<&'a [u8]>) {
let mut chunk = None;
if self.pos > 0 {
// Try to fill the chunk
let len = std::cmp::min(self.chunk_size - self.pos, buf.len());
self.chunk[self.pos..self.pos + len].copy_from_slice(&buf[..len]);
self.pos += len;
// If the chunk is filled, consume it
if self.pos == self.chunk_size {
chunk = Some(&self.chunk[..self.chunk_size]);
self.pos = 0;
}
buf = &buf[len..];
} else if buf.len() >= self.chunk_size {
// Directly consume a chunk from buf
chunk = Some(&buf[..self.chunk_size]);
buf = &buf[self.chunk_size..];
} else {
// Copy buf into chunk
self.chunk[self.pos..self.pos + buf.len()].copy_from_slice(buf);
self.pos += buf.len();
return (&[], None);
}
(buf, chunk)
}
pub fn get_available(&mut self) -> &[u8] {
let chunk = &self.chunk[..self.pos];
self.pos = 0;
chunk
}
}
pub struct CmdArgs(pub Vec<&'static str>);
impl CmdArgs {
#[allow(clippy::not_unsafe_ptr_arg_deref)]
pub fn new(argc: i32, argv: *const *const c_char) -> CmdArgs {
CmdArgs(
// SAFETY: libc guarantees argc and argv are properly setup and are static
unsafe { slice::from_raw_parts(argv, argc as usize) }
.iter()
.map(|s| unsafe { Utf8CStr::from_ptr(*s) })
.map(|r| r.unwrap_or(cstr!("<invalid>")))
.map(Utf8CStr::as_str)
.collect(),
)
}
pub fn as_slice(&self) -> &[&'static str] {
self.0.as_slice()
}
pub fn iter(&self) -> slice::Iter<'_, &'static str> {
self.0.iter()
}
pub fn cstr_iter(&self) -> impl Iterator<Item = &'static Utf8CStr> {
// SAFETY: libc guarantees null terminated strings
self.0
.iter()
.map(|s| unsafe { Utf8CStr::from_raw_parts(s.as_ptr().cast(), s.len() + 1) })
}
}

84
native/src/base/mount.rs Normal file
View file

@ -0,0 +1,84 @@
use crate::{LibcReturn, OsResult, Utf8CStr};
use nix::mount::{MntFlags, MsFlags, mount, umount2};
impl Utf8CStr {
pub fn bind_mount_to<'a>(&'a self, path: &'a Utf8CStr, rec: bool) -> OsResult<'a, ()> {
let flag = if rec {
MsFlags::MS_REC
} else {
MsFlags::empty()
};
mount(
Some(self),
path,
None::<&Utf8CStr>,
flag | MsFlags::MS_BIND,
None::<&Utf8CStr>,
)
.check_os_err("bind_mount", Some(self), Some(path))
}
pub fn remount_mount_point_flags(&self, flags: MsFlags) -> OsResult<'_, ()> {
mount(
None::<&Utf8CStr>,
self,
None::<&Utf8CStr>,
MsFlags::MS_BIND | MsFlags::MS_REMOUNT | flags,
None::<&Utf8CStr>,
)
.check_os_err("remount", Some(self), None)
}
pub fn remount_mount_flags(&self, flags: MsFlags) -> OsResult<'_, ()> {
mount(
None::<&Utf8CStr>,
self,
None::<&Utf8CStr>,
MsFlags::MS_REMOUNT | flags,
None::<&Utf8CStr>,
)
.check_os_err("remount", Some(self), None)
}
pub fn remount_with_data(&self, data: &Utf8CStr) -> OsResult<'_, ()> {
mount(
None::<&Utf8CStr>,
self,
None::<&Utf8CStr>,
MsFlags::MS_REMOUNT,
Some(data),
)
.check_os_err("remount", Some(self), None)
}
pub fn move_mount_to<'a>(&'a self, path: &'a Utf8CStr) -> OsResult<'a, ()> {
mount(
Some(self),
path,
None::<&Utf8CStr>,
MsFlags::MS_MOVE,
None::<&Utf8CStr>,
)
.check_os_err("move_mount", Some(self), Some(path))
}
pub fn unmount(&self) -> OsResult<'_, ()> {
umount2(self, MntFlags::MNT_DETACH).check_os_err("unmount", Some(self), None)
}
pub fn set_mount_private(&self, rec: bool) -> OsResult<'_, ()> {
let flag = if rec {
MsFlags::MS_REC
} else {
MsFlags::empty()
};
mount(
None::<&Utf8CStr>,
self,
None::<&Utf8CStr>,
flag | MsFlags::MS_PRIVATE,
None::<&Utf8CStr>,
)
.check_os_err("set_mount_private", Some(self), None)
}
}

340
native/src/base/result.rs Normal file
View file

@ -0,0 +1,340 @@
use crate::logging::Formatter;
use crate::{LogLevel, log_with_args, log_with_formatter};
use nix::errno::Errno;
use std::fmt;
use std::fmt::Display;
use std::panic::Location;
use std::ptr::NonNull;
// Error handling throughout the Rust codebase in Magisk:
//
// All errors should be logged and consumed as soon as possible and converted into LoggedError.
// For `Result` with errors that implement the `Display` trait, use the `?` operator to
// log and convert to LoggedResult.
//
// To log an error with more information, use `ResultExt::log_with_msg()`.
#[derive(Default)]
pub struct LoggedError {}
pub type LoggedResult<T> = Result<T, LoggedError>;
#[macro_export]
macro_rules! log_err {
() => {{
Err($crate::LoggedError::default())
}};
($($args:tt)+) => {{
$crate::error!($($args)+);
Err($crate::LoggedError::default())
}};
}
// Any result or option can be silenced
pub trait SilentLogExt<T> {
fn silent(self) -> LoggedResult<T>;
}
impl<T, E> SilentLogExt<T> for Result<T, E> {
fn silent(self) -> LoggedResult<T> {
self.map_err(|_| LoggedError::default())
}
}
impl<T> SilentLogExt<T> for Option<T> {
fn silent(self) -> LoggedResult<T> {
self.ok_or_else(LoggedError::default)
}
}
// Public API for logging results
pub trait ResultExt<T> {
fn log(self) -> LoggedResult<T>;
fn log_with_msg<F: FnOnce(Formatter) -> fmt::Result>(self, f: F) -> LoggedResult<T>;
fn log_ok(self);
}
// Public API for converting Option to LoggedResult
pub trait OptionExt<T> {
fn ok_or_log(self) -> LoggedResult<T>;
fn ok_or_log_msg<F: FnOnce(Formatter) -> fmt::Result>(self, f: F) -> LoggedResult<T>;
}
impl<T> OptionExt<T> for Option<T> {
#[inline(always)]
fn ok_or_log(self) -> LoggedResult<T> {
self.ok_or_else(LoggedError::default)
}
#[cfg(not(debug_assertions))]
fn ok_or_log_msg<F: FnOnce(Formatter) -> fmt::Result>(self, f: F) -> LoggedResult<T> {
self.ok_or_else(|| {
do_log_msg(LogLevel::Error, None, f);
LoggedError::default()
})
}
#[track_caller]
#[cfg(debug_assertions)]
fn ok_or_log_msg<F: FnOnce(Formatter) -> fmt::Result>(self, f: F) -> LoggedResult<T> {
let caller = Some(Location::caller());
self.ok_or_else(|| {
do_log_msg(LogLevel::Error, caller, f);
LoggedError::default()
})
}
}
trait Loggable {
fn do_log(self, level: LogLevel, caller: Option<&'static Location>) -> LoggedError;
fn do_log_msg<F: FnOnce(Formatter) -> fmt::Result>(
self,
level: LogLevel,
caller: Option<&'static Location>,
f: F,
) -> LoggedError;
}
impl<T, E: Loggable> ResultExt<T> for Result<T, E> {
#[cfg(not(debug_assertions))]
fn log(self) -> LoggedResult<T> {
self.map_err(|e| e.do_log(LogLevel::Error, None))
}
#[track_caller]
#[cfg(debug_assertions)]
fn log(self) -> LoggedResult<T> {
let caller = Some(Location::caller());
self.map_err(|e| e.do_log(LogLevel::Error, caller))
}
#[cfg(not(debug_assertions))]
fn log_with_msg<F: FnOnce(Formatter) -> fmt::Result>(self, f: F) -> LoggedResult<T> {
self.map_err(|e| e.do_log_msg(LogLevel::Error, None, f))
}
#[track_caller]
#[cfg(debug_assertions)]
fn log_with_msg<F: FnOnce(Formatter) -> fmt::Result>(self, f: F) -> LoggedResult<T> {
let caller = Some(Location::caller());
self.map_err(|e| e.do_log_msg(LogLevel::Error, caller, f))
}
#[cfg(not(debug_assertions))]
fn log_ok(self) {
self.map_err(|e| e.do_log(LogLevel::Error, None)).ok();
}
#[track_caller]
#[cfg(debug_assertions)]
fn log_ok(self) {
let caller = Some(Location::caller());
self.map_err(|e| e.do_log(LogLevel::Error, caller)).ok();
}
}
impl<T> ResultExt<T> for LoggedResult<T> {
fn log(self) -> LoggedResult<T> {
self
}
#[cfg(not(debug_assertions))]
fn log_with_msg<F: FnOnce(Formatter) -> fmt::Result>(self, f: F) -> LoggedResult<T> {
self.inspect_err(|_| do_log_msg(LogLevel::Error, None, f))
}
#[track_caller]
#[cfg(debug_assertions)]
fn log_with_msg<F: FnOnce(Formatter) -> fmt::Result>(self, f: F) -> LoggedResult<T> {
let caller = Some(Location::caller());
self.inspect_err(|_| do_log_msg(LogLevel::Error, caller, f))
}
fn log_ok(self) {}
}
// Allow converting Loggable errors to LoggedError to support `?` operator
impl<T: Loggable> From<T> for LoggedError {
#[cfg(not(debug_assertions))]
fn from(e: T) -> Self {
e.do_log(LogLevel::Error, None)
}
#[track_caller]
#[cfg(debug_assertions)]
fn from(e: T) -> Self {
let caller = Some(Location::caller());
e.do_log(LogLevel::Error, caller)
}
}
// Actual logging implementation
// Make all printable objects Loggable
impl<T: Display> Loggable for T {
fn do_log(self, level: LogLevel, caller: Option<&'static Location>) -> LoggedError {
if let Some(caller) = caller {
log_with_args!(level, "[{}:{}] {:#}", caller.file(), caller.line(), self);
} else {
log_with_args!(level, "{:#}", self);
}
LoggedError::default()
}
fn do_log_msg<F: FnOnce(Formatter) -> fmt::Result>(
self,
level: LogLevel,
caller: Option<&'static Location>,
f: F,
) -> LoggedError {
log_with_formatter(level, |w| {
if let Some(caller) = caller {
write!(w, "[{}:{}] ", caller.file(), caller.line())?;
}
f(w)?;
writeln!(w, ": {self:#}")
});
LoggedError::default()
}
}
fn do_log_msg<F: FnOnce(Formatter) -> fmt::Result>(
level: LogLevel,
caller: Option<&'static Location>,
f: F,
) {
log_with_formatter(level, |w| {
if let Some(caller) = caller {
write!(w, "[{}:{}] ", caller.file(), caller.line())?;
}
f(w)?;
w.write_char('\n')
});
}
// Check libc return value and map to Result
pub trait LibcReturn
where
Self: Sized,
{
type Value;
fn check_err(self) -> nix::Result<Self::Value>;
fn into_os_result<'a>(
self,
name: &'static str,
arg1: Option<&'a str>,
arg2: Option<&'a str>,
) -> OsResult<'a, Self::Value> {
self.check_err()
.map_err(|e| OsError::new(e, name, arg1, arg2))
}
fn check_os_err<'a>(
self,
name: &'static str,
arg1: Option<&'a str>,
arg2: Option<&'a str>,
) -> OsResult<'a, ()> {
self.check_err()
.map(|_| ())
.map_err(|e| OsError::new(e, name, arg1, arg2))
}
}
macro_rules! impl_libc_return {
($($t:ty)*) => ($(
impl LibcReturn for $t {
type Value = Self;
#[inline(always)]
fn check_err(self) -> nix::Result<Self::Value> {
if self < 0 {
Err(Errno::last())
} else {
Ok(self)
}
}
}
)*)
}
impl_libc_return! { i8 i16 i32 i64 isize }
impl<T> LibcReturn for *mut T {
type Value = NonNull<T>;
#[inline(always)]
fn check_err(self) -> nix::Result<Self::Value> {
NonNull::new(self).ok_or_else(Errno::last)
}
}
impl<T> LibcReturn for nix::Result<T> {
type Value = T;
#[inline(always)]
fn check_err(self) -> Self {
self
}
}
#[derive(Debug)]
pub struct OsError<'a> {
pub errno: Errno,
name: &'static str,
arg1: Option<&'a str>,
arg2: Option<&'a str>,
}
impl OsError<'_> {
pub fn new<'a>(
errno: Errno,
name: &'static str,
arg1: Option<&'a str>,
arg2: Option<&'a str>,
) -> OsError<'a> {
OsError {
errno,
name,
arg1,
arg2,
}
}
pub fn last_os_error<'a>(
name: &'static str,
arg1: Option<&'a str>,
arg2: Option<&'a str>,
) -> OsError<'a> {
Self::new(Errno::last(), name, arg1, arg2)
}
pub fn set_args<'a>(self, arg1: Option<&'a str>, arg2: Option<&'a str>) -> OsError<'a> {
Self::new(self.errno, self.name, arg1, arg2)
}
}
impl Display for OsError<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if self.name.is_empty() {
write!(f, "{}", self.errno)
} else {
match (self.arg1, self.arg2) {
(Some(arg1), Some(arg2)) => {
write!(f, "{} '{arg1}' '{arg2}': {}", self.name, self.errno)
}
(Some(arg1), None) => {
write!(f, "{} '{arg1}': {}", self.name, self.errno)
}
_ => {
write!(f, "{}: {}", self.name, self.errno)
}
}
}
}
}
impl std::error::Error for OsError<'_> {}
pub type OsResult<'a, T> = Result<T, OsError<'a>>;

43
native/src/base/xwrap.hpp Normal file
View file

@ -0,0 +1,43 @@
#pragma once
#include <unistd.h>
#include <dirent.h>
#include <stdio.h>
#include <poll.h>
#include <fcntl.h>
extern "C" {
FILE *xfopen(const char *pathname, const char *mode);
FILE *xfdopen(int fd, const char *mode);
int xopen(const char *pathname, int flags, mode_t mode = 0);
int xopenat(int dirfd, const char *pathname, int flags, mode_t mode = 0);
ssize_t xwrite(int fd, const void *buf, size_t count);
ssize_t xread(int fd, void *buf, size_t count);
ssize_t xxread(int fd, void *buf, size_t count);
int xsetns(int fd, int nstype);
int xunshare(int flags);
DIR *xopendir(const char *name);
DIR *xfdopendir(int fd);
dirent *xreaddir(DIR *dirp);
pid_t xsetsid();
int xstat(const char *pathname, struct stat *buf);
int xfstat(int fd, struct stat *buf);
int xdup2(int oldfd, int newfd);
ssize_t xreadlink(const char * __restrict__ pathname, char * __restrict__ buf, size_t bufsiz);
ssize_t xreadlinkat(
int dirfd, const char * __restrict__ pathname, char * __restrict__ buf, size_t bufsiz);
int xsymlink(const char *target, const char *linkpath);
int xmount(const char *source, const char *target,
const char *filesystemtype, unsigned long mountflags,
const void *data);
int xumount2(const char *target, int flags);
int xrename(const char *oldpath, const char *newpath);
int xmkdir(const char *pathname, mode_t mode);
int xmkdirs(const char *pathname, mode_t mode);
ssize_t xsendfile(int out_fd, int in_fd, off_t *offset, size_t count);
pid_t xfork();
ssize_t xrealpath(const char * __restrict__ path, char * __restrict__ buf, size_t bufsiz);
int xmknod(const char * pathname, mode_t mode, dev_t dev);
} // extern "C"

333
native/src/base/xwrap.rs Normal file
View file

@ -0,0 +1,333 @@
// Functions in this file are only for exporting to C++, DO NOT USE IN RUST
use crate::cxx_extern::readlinkat;
use crate::{Directory, LibcReturn, ResultExt, Utf8CStr, cstr, slice_from_ptr, slice_from_ptr_mut};
use libc::{c_char, c_uint, c_ulong, c_void, dev_t, mode_t, off_t};
use std::ffi::CStr;
use std::fs::File;
use std::io::{Read, Write};
use std::mem::ManuallyDrop;
use std::os::fd::FromRawFd;
use std::os::unix::io::RawFd;
use std::ptr;
use std::ptr::NonNull;
fn ptr_to_str<'a>(ptr: *const c_char) -> Option<&'a str> {
if ptr.is_null() {
None
} else {
unsafe { CStr::from_ptr(ptr) }.to_str().ok()
}
}
#[unsafe(no_mangle)]
unsafe extern "C" fn xrealpath(path: *const c_char, buf: *mut u8, bufsz: usize) -> isize {
unsafe {
match Utf8CStr::from_ptr(path) {
Ok(path) => {
let mut buf = cstr::buf::wrap_ptr(buf, bufsz);
path.realpath(&mut buf)
.log()
.map_or(-1, |_| buf.len() as isize)
}
Err(_) => -1,
}
}
}
#[unsafe(no_mangle)]
unsafe extern "C" fn xreadlink(path: *const c_char, buf: *mut u8, bufsz: usize) -> isize {
unsafe {
match Utf8CStr::from_ptr(path) {
Ok(path) => {
let mut buf = cstr::buf::wrap_ptr(buf, bufsz);
path.read_link(&mut buf)
.log()
.map_or(-1, |_| buf.len() as isize)
}
Err(_) => -1,
}
}
}
#[unsafe(no_mangle)]
unsafe extern "C" fn xreadlinkat(
dirfd: RawFd,
path: *const c_char,
buf: *mut u8,
bufsz: usize,
) -> isize {
unsafe {
readlinkat(dirfd, path, buf, bufsz)
.into_os_result("readlinkat", ptr_to_str(path), None)
.log()
.unwrap_or(-1)
}
}
#[unsafe(no_mangle)]
unsafe extern "C" fn xfopen(path: *const c_char, mode: *const c_char) -> *mut libc::FILE {
unsafe {
libc::fopen(path, mode)
.into_os_result("fopen", ptr_to_str(path), None)
.log()
.map_or(ptr::null_mut(), NonNull::as_ptr)
}
}
#[unsafe(no_mangle)]
unsafe extern "C" fn xfdopen(fd: RawFd, mode: *const c_char) -> *mut libc::FILE {
unsafe {
libc::fdopen(fd, mode)
.into_os_result("fdopen", None, None)
.log()
.map_or(ptr::null_mut(), NonNull::as_ptr)
}
}
#[unsafe(no_mangle)]
unsafe extern "C" fn xopen(path: *const c_char, flags: i32, mode: mode_t) -> RawFd {
unsafe {
libc::open(path, flags, mode as c_uint)
.into_os_result("open", ptr_to_str(path), None)
.log()
.unwrap_or(-1)
}
}
#[unsafe(no_mangle)]
unsafe extern "C" fn xopenat(dirfd: RawFd, path: *const c_char, flags: i32, mode: mode_t) -> RawFd {
unsafe {
libc::openat(dirfd, path, flags, mode as c_uint)
.into_os_result("openat", ptr_to_str(path), None)
.log()
.unwrap_or(-1)
}
}
#[unsafe(no_mangle)]
unsafe extern "C" fn xwrite(fd: RawFd, buf: *const u8, bufsz: usize) -> isize {
let mut file = unsafe { ManuallyDrop::new(File::from_raw_fd(fd)) };
let data = unsafe { slice_from_ptr(buf, bufsz) };
file.write_all(data)
.log()
.map_or(-1, |_| data.len() as isize)
}
#[unsafe(no_mangle)]
unsafe extern "C" fn xread(fd: RawFd, buf: *mut c_void, bufsz: usize) -> isize {
unsafe {
libc::read(fd, buf, bufsz)
.into_os_result("read", None, None)
.log()
.unwrap_or(-1)
}
}
#[unsafe(no_mangle)]
unsafe extern "C" fn xxread(fd: RawFd, buf: *mut u8, bufsz: usize) -> isize {
let mut file = unsafe { ManuallyDrop::new(File::from_raw_fd(fd)) };
let data = unsafe { slice_from_ptr_mut(buf, bufsz) };
file.read_exact(data)
.log()
.map_or(-1, |_| data.len() as isize)
}
pub(crate) fn xpipe2(fds: &mut [i32; 2], flags: i32) -> i32 {
unsafe {
libc::pipe2(fds.as_mut_ptr(), flags)
.into_os_result("pipe2", None, None)
.log()
.unwrap_or(-1)
}
}
#[unsafe(no_mangle)]
extern "C" fn xsetns(fd: RawFd, nstype: i32) -> i32 {
unsafe {
libc::setns(fd, nstype)
.into_os_result("setns", None, None)
.log()
.unwrap_or(-1)
}
}
#[unsafe(no_mangle)]
extern "C" fn xunshare(flags: i32) -> i32 {
unsafe {
libc::unshare(flags)
.into_os_result("unshare", None, None)
.log()
.unwrap_or(-1)
}
}
#[unsafe(no_mangle)]
unsafe extern "C" fn xopendir(path: *const c_char) -> *mut libc::DIR {
unsafe {
libc::opendir(path)
.into_os_result("opendir", ptr_to_str(path), None)
.log()
.map_or(ptr::null_mut(), NonNull::as_ptr)
}
}
#[unsafe(no_mangle)]
extern "C" fn xfdopendir(fd: RawFd) -> *mut libc::DIR {
unsafe {
libc::fdopendir(fd)
.into_os_result("fdopendir", None, None)
.log()
.map_or(ptr::null_mut(), NonNull::as_ptr)
}
}
#[unsafe(no_mangle)]
unsafe extern "C" fn xreaddir(mut dir: ManuallyDrop<Directory>) -> *mut libc::dirent {
dir.read()
.log()
.ok()
.flatten()
.map_or(ptr::null_mut(), |entry| entry.as_ptr())
}
#[unsafe(no_mangle)]
extern "C" fn xsetsid() -> i32 {
unsafe {
libc::setsid()
.into_os_result("setsid", None, None)
.log()
.unwrap_or(-1)
}
}
#[unsafe(no_mangle)]
unsafe extern "C" fn xstat(path: *const c_char, buf: *mut libc::stat) -> i32 {
unsafe {
libc::stat(path, buf)
.into_os_result("stat", ptr_to_str(path), None)
.log()
.unwrap_or(-1)
}
}
#[unsafe(no_mangle)]
unsafe extern "C" fn xfstat(fd: RawFd, buf: *mut libc::stat) -> i32 {
unsafe {
libc::fstat(fd, buf)
.into_os_result("fstat", None, None)
.log()
.unwrap_or(-1)
}
}
#[unsafe(no_mangle)]
extern "C" fn xdup2(oldfd: RawFd, newfd: RawFd) -> RawFd {
unsafe {
libc::dup2(oldfd, newfd)
.into_os_result("dup2", None, None)
.log()
.unwrap_or(-1)
}
}
#[unsafe(no_mangle)]
unsafe extern "C" fn xsymlink(target: *const c_char, linkpath: *const c_char) -> i32 {
unsafe {
libc::symlink(target, linkpath)
.into_os_result("symlink", ptr_to_str(target), ptr_to_str(linkpath))
.log()
.unwrap_or(-1)
}
}
#[unsafe(no_mangle)]
unsafe extern "C" fn xmount(
src: *const c_char,
target: *const c_char,
fstype: *const c_char,
flags: c_ulong,
data: *const c_void,
) -> i32 {
unsafe {
libc::mount(src, target, fstype, flags, data)
.into_os_result("mount", ptr_to_str(src), ptr_to_str(target))
.log()
.unwrap_or(-1)
}
}
#[unsafe(no_mangle)]
unsafe extern "C" fn xumount2(target: *const c_char, flags: i32) -> i32 {
unsafe {
libc::umount2(target, flags)
.into_os_result("umount2", ptr_to_str(target), None)
.log()
.unwrap_or(-1)
}
}
#[unsafe(no_mangle)]
unsafe extern "C" fn xrename(oldname: *const c_char, newname: *const c_char) -> i32 {
unsafe {
libc::rename(oldname, newname)
.into_os_result("rename", ptr_to_str(oldname), ptr_to_str(newname))
.log()
.unwrap_or(-1)
}
}
#[unsafe(no_mangle)]
unsafe extern "C" fn xmkdir(path: *const c_char, mode: mode_t) -> i32 {
unsafe {
match Utf8CStr::from_ptr(path) {
Ok(path) => path.mkdir(mode).log().map_or(-1, |_| 0),
Err(_) => -1,
}
}
}
#[unsafe(no_mangle)]
unsafe extern "C" fn xmkdirs(path: *const c_char, mode: mode_t) -> i32 {
unsafe {
match Utf8CStr::from_ptr(path) {
Ok(path) => path.mkdirs(mode).log().map_or(-1, |_| 0),
Err(_) => -1,
}
}
}
#[unsafe(no_mangle)]
unsafe extern "C" fn xsendfile(
out_fd: RawFd,
in_fd: RawFd,
offset: *mut off_t,
count: usize,
) -> isize {
unsafe {
libc::sendfile(out_fd, in_fd, offset, count)
.into_os_result("sendfile", None, None)
.log()
.unwrap_or(-1)
}
}
#[unsafe(no_mangle)]
extern "C" fn xfork() -> i32 {
unsafe {
libc::fork()
.into_os_result("fork", None, None)
.log()
.unwrap_or(-1)
}
}
#[unsafe(no_mangle)]
unsafe extern "C" fn xmknod(pathname: *const c_char, mode: mode_t, dev: dev_t) -> i32 {
unsafe {
libc::mknod(pathname, mode, dev)
.into_os_result("mknod", ptr_to_str(pathname), None)
.log()
.unwrap_or(-1)
}
}

2
native/src/boot/.gitignore vendored Normal file
View file

@ -0,0 +1,2 @@
proto/update_metadata.rs
proto/mod.rs

View file

@ -0,0 +1,36 @@
[package]
name = "magiskboot"
version.workspace = true
edition.workspace = true
[lib]
crate-type = ["staticlib"]
path = "lib.rs"
[build-dependencies]
cxx-gen = { workspace = true }
pb-rs = { workspace = true }
[dependencies]
base = { workspace = true }
cxx = { workspace = true }
byteorder = { workspace = true }
size = { workspace = true }
quick-protobuf = { workspace = true }
sha1 = { workspace = true }
sha2 = { workspace = true }
digest = { workspace = true }
p256 = { workspace = true }
p384 = { workspace = true }
p521 = { workspace = true }
rsa = { workspace = true, features = ["sha2"] }
x509-cert = { workspace = true }
der = { workspace = true, features = ["derive", "pem"] }
fdt = { workspace = true }
bytemuck = { workspace = true, features = ["derive", "min_const_generics"] }
num-traits = { workspace = true }
flate2 = { workspace = true, features = ["zlib-rs"] }
bzip2 = { workspace = true }
lz4 = { workspace = true }
lzma-rust2 = { workspace = true, features = ["xz", "std", "encoder", "optimization"] }
zopfli = { workspace = true, features = ["gzip"] }

1051
native/src/boot/bootimg.cpp Normal file

File diff suppressed because it is too large Load diff

697
native/src/boot/bootimg.hpp Normal file
View file

@ -0,0 +1,697 @@
#pragma once
#include <cstdint>
#include <utility>
#include <bitset>
#include <rust/cxx.h>
/******************
* Special Headers
*****************/
struct mtk_hdr {
uint32_t magic; /* MTK magic */
uint32_t size; /* Size of the content */
char name[32]; /* The type of the header */
char padding[472]; /* Padding to 512 bytes */
} __attribute__((packed));
struct dhtb_hdr {
char magic[8]; /* DHTB magic */
uint8_t checksum[40]; /* Payload SHA256, whole image + SEANDROIDENFORCE + 0xFFFFFFFF */
uint32_t size; /* Payload size, whole image + SEANDROIDENFORCE + 0xFFFFFFFF */
char padding[460]; /* Padding to 512 bytes */
} __attribute__((packed));
struct blob_hdr {
char secure_magic[20]; /* "-SIGNED-BY-SIGNBLOB-" */
uint32_t datalen; /* 0x00000000 */
uint32_t signature; /* 0x00000000 */
char magic[16]; /* "MSM-RADIO-UPDATE" */
uint32_t hdr_version; /* 0x00010000 */
uint32_t hdr_size; /* Size of header */
uint32_t part_offset; /* Same as size */
uint32_t num_parts; /* Number of partitions */
uint32_t unknown[7]; /* All 0x00000000 */
char name[4]; /* Name of partition */
uint32_t offset; /* offset in blob where this partition starts */
uint32_t size; /* Size of data */
uint32_t version; /* 0x00000001 */
} __attribute__((packed));
struct zimage_hdr {
uint32_t code[9];
uint32_t magic; /* zImage magic */
uint32_t start; /* absolute load/run zImage address */
uint32_t end; /* zImage end address */
uint32_t endian; /* endianness flag */
// There could be more fields, but we don't care
} __attribute__((packed));
/**************
* AVB Headers
**************/
#define AVB_FOOTER_MAGIC_LEN 4
#define AVB_MAGIC_LEN 4
#define AVB_RELEASE_STRING_SIZE 48
// https://android.googlesource.com/platform/external/avb/+/refs/heads/android11-release/libavb/avb_footer.h
struct AvbFooter {
uint8_t magic[AVB_FOOTER_MAGIC_LEN];
uint32_t version_major;
uint32_t version_minor;
uint64_t original_image_size;
uint64_t vbmeta_offset;
uint64_t vbmeta_size;
uint8_t reserved[28];
} __attribute__((packed));
// https://android.googlesource.com/platform/external/avb/+/refs/heads/android11-release/libavb/avb_vbmeta_image.h
struct AvbVBMetaImageHeader {
uint8_t magic[AVB_MAGIC_LEN];
uint32_t required_libavb_version_major;
uint32_t required_libavb_version_minor;
uint64_t authentication_data_block_size;
uint64_t auxiliary_data_block_size;
uint32_t algorithm_type;
uint64_t hash_offset;
uint64_t hash_size;
uint64_t signature_offset;
uint64_t signature_size;
uint64_t public_key_offset;
uint64_t public_key_size;
uint64_t public_key_metadata_offset;
uint64_t public_key_metadata_size;
uint64_t descriptors_offset;
uint64_t descriptors_size;
uint64_t rollback_index;
uint32_t flags;
uint32_t rollback_index_location;
uint8_t release_string[AVB_RELEASE_STRING_SIZE];
uint8_t reserved[80];
} __attribute__((packed));
/*********************
* Boot Image Headers
*********************/
// https://android.googlesource.com/platform/system/tools/mkbootimg/+/refs/heads/android12-release/include/bootimg/bootimg.h
#define BOOT_MAGIC_SIZE 8
#define BOOT_NAME_SIZE 16
#define BOOT_ID_SIZE 32
#define BOOT_ARGS_SIZE 512
#define BOOT_EXTRA_ARGS_SIZE 1024
#define VENDOR_BOOT_ARGS_SIZE 2048
#define VENDOR_RAMDISK_NAME_SIZE 32
#define VENDOR_RAMDISK_TABLE_ENTRY_BOARD_ID_SIZE 16
#define VENDOR_RAMDISK_TYPE_NONE 0
#define VENDOR_RAMDISK_TYPE_PLATFORM 1
#define VENDOR_RAMDISK_TYPE_RECOVERY 2
#define VENDOR_RAMDISK_TYPE_DLKM 3
/*
* When the boot image header has a version of 0 - 2, the structure of the boot
* image is as follows:
*
* +-----------------+
* | boot header | 1 page
* +-----------------+
* | kernel | m pages
* +-----------------+
* | ramdisk | n pages
* +-----------------+
* | second stage | o pages
* +-----------------+
* | extra blob | x pages (non standard)
* +-----------------+
* | recovery dtbo | p pages
* +-----------------+
* | dtb | q pages
* +-----------------+
*
* m = (kernel_size + page_size - 1) / page_size
* n = (ramdisk_size + page_size - 1) / page_size
* o = (second_size + page_size - 1) / page_size
* p = (recovery_dtbo_size + page_size - 1) / page_size
* q = (dtb_size + page_size - 1) / page_size
* x = (extra_size + page_size - 1) / page_size
*/
struct boot_img_hdr_v0_common {
char magic[BOOT_MAGIC_SIZE];
uint32_t kernel_size; /* size in bytes */
uint32_t kernel_addr; /* physical load addr */
uint32_t ramdisk_size; /* size in bytes */
uint32_t ramdisk_addr; /* physical load addr */
uint32_t second_size; /* size in bytes */
uint32_t second_addr; /* physical load addr */
} __attribute__((packed));
struct boot_img_hdr_v0 : public boot_img_hdr_v0_common {
uint32_t tags_addr; /* physical addr for kernel tags */
// In AOSP headers, this field is used for page size.
// For Samsung PXA headers, the use of this field is unknown;
// however, its value is something unrealistic to be treated as page size.
// We use this fact to determine whether this is an AOSP or PXA header.
union {
uint32_t unknown;
uint32_t page_size; /* flash page size we assume */
};
// In header v1, this field is used for header version
// However, on some devices like Samsung, this field is used to store DTB
// We treat this field differently based on its value
union {
uint32_t header_version; /* the version of the header */
uint32_t extra_size; /* extra blob size in bytes */
};
// Operating system version and security patch level.
// For version "A.B.C" and patch level "Y-M-D":
// (7 bits for each of A, B, C; 7 bits for (Y-2000), 4 bits for M)
// os_version = A[31:25] B[24:18] C[17:11] (Y-2000)[10:4] M[3:0]
uint32_t os_version;
char name[BOOT_NAME_SIZE]; /* asciiz product name */
char cmdline[BOOT_ARGS_SIZE];
char id[BOOT_ID_SIZE]; /* timestamp / checksum / sha1 / etc */
// Supplemental command line data; kept here to maintain
// binary compatibility with older versions of mkbootimg.
char extra_cmdline[BOOT_EXTRA_ARGS_SIZE];
} __attribute__((packed));
struct boot_img_hdr_v1 : public boot_img_hdr_v0 {
uint32_t recovery_dtbo_size; /* size in bytes for recovery DTBO/ACPIO image */
uint64_t recovery_dtbo_offset; /* offset to recovery dtbo/acpio in boot image */
uint32_t header_size;
} __attribute__((packed));
struct boot_img_hdr_v2 : public boot_img_hdr_v1 {
uint32_t dtb_size; /* size in bytes for DTB image */
uint64_t dtb_addr; /* physical load address for DTB image */
} __attribute__((packed));
// Special Samsung header
struct boot_img_hdr_pxa : public boot_img_hdr_v0_common {
uint32_t extra_size; /* extra blob size in bytes */
uint32_t unknown;
uint32_t tags_addr; /* physical addr for kernel tags */
uint32_t page_size; /* flash page size we assume */
char name[24]; /* asciiz product name */
char cmdline[BOOT_ARGS_SIZE];
char id[BOOT_ID_SIZE]; /* timestamp / checksum / sha1 / etc */
char extra_cmdline[BOOT_EXTRA_ARGS_SIZE];
} __attribute__((packed));
/*
* When the boot image header has a version of 3 - 4, the structure of the boot
* image is as follows:
*
* +---------------------+
* | boot header | 4096 bytes
* +---------------------+
* | kernel | m pages
* +---------------------+
* | ramdisk | n pages
* +---------------------+
* | boot signature | g pages
* +---------------------+
*
* m = (kernel_size + 4096 - 1) / 4096
* n = (ramdisk_size + 4096 - 1) / 4096
* g = (signature_size + 4096 - 1) / 4096
*
* Page size is fixed at 4096 bytes.
*
* The structure of the vendor boot image is as follows:
*
* +------------------------+
* | vendor boot header | o pages
* +------------------------+
* | vendor ramdisk section | p pages
* +------------------------+
* | dtb | q pages
* +------------------------+
* | vendor ramdisk table | r pages
* +------------------------+
* | bootconfig | s pages
* +------------------------+
*
* o = (2128 + page_size - 1) / page_size
* p = (vendor_ramdisk_size + page_size - 1) / page_size
* q = (dtb_size + page_size - 1) / page_size
* r = (vendor_ramdisk_table_size + page_size - 1) / page_size
* s = (vendor_bootconfig_size + page_size - 1) / page_size
*
* Note that in version 4 of the vendor boot image, multiple vendor ramdisks can
* be included in the vendor boot image. The bootloader can select a subset of
* ramdisks to load at runtime. To help the bootloader select the ramdisks, each
* ramdisk is tagged with a type tag and a set of hardware identifiers
* describing the board, soc or platform that this ramdisk is intended for.
*
* The vendor ramdisk section is consist of multiple ramdisk images concatenated
* one after another, and vendor_ramdisk_size is the size of the section, which
* is the total size of all the ramdisks included in the vendor boot image.
*
* The vendor ramdisk table holds the size, offset, type, name and hardware
* identifiers of each ramdisk. The type field denotes the type of its content.
* The vendor ramdisk names are unique. The hardware identifiers are specified
* in the board_id field in each table entry. The board_id field is consist of a
* vector of unsigned integer words, and the encoding scheme is defined by the
* hardware vendor.
*
* For the different type of ramdisks, there are:
* - VENDOR_RAMDISK_TYPE_NONE indicates the value is unspecified.
* - VENDOR_RAMDISK_TYPE_PLATFORM ramdisks contain platform specific bits, so
* the bootloader should always load these into memory.
* - VENDOR_RAMDISK_TYPE_RECOVERY ramdisks contain recovery resources, so
* the bootloader should load these when booting into recovery.
* - VENDOR_RAMDISK_TYPE_DLKM ramdisks contain dynamic loadable kernel
* modules.
*
* Version 4 of the vendor boot image also adds a bootconfig section to the end
* of the image. This section contains Boot Configuration parameters known at
* build time. The bootloader is responsible for placing this section directly
* after the generic ramdisk, followed by the bootconfig trailer, before
* entering the kernel.
*/
struct boot_img_hdr_v3 {
uint8_t magic[BOOT_MAGIC_SIZE];
uint32_t kernel_size; /* size in bytes */
uint32_t ramdisk_size; /* size in bytes */
uint32_t os_version;
uint32_t header_size;
uint32_t reserved[4];
uint32_t header_version;
char cmdline[BOOT_ARGS_SIZE + BOOT_EXTRA_ARGS_SIZE];
} __attribute__((packed));
struct boot_img_hdr_vnd_v3 {
// Must be VENDOR_BOOT_MAGIC.
uint8_t magic[BOOT_MAGIC_SIZE];
// Version of the vendor boot image header.
uint32_t header_version;
uint32_t page_size; /* flash page size we assume */
uint32_t kernel_addr; /* physical load addr */
uint32_t ramdisk_addr; /* physical load addr */
uint32_t ramdisk_size; /* size in bytes */
char cmdline[VENDOR_BOOT_ARGS_SIZE];
uint32_t tags_addr; /* physical addr for kernel tags (if required) */
char name[BOOT_NAME_SIZE]; /* asciiz product name */
uint32_t header_size;
uint32_t dtb_size; /* size in bytes for DTB image */
uint64_t dtb_addr; /* physical load address for DTB image */
} __attribute__((packed));
struct boot_img_hdr_v4 : public boot_img_hdr_v3 {
uint32_t signature_size; /* size in bytes */
} __attribute__((packed));
struct boot_img_hdr_vnd_v4 : public boot_img_hdr_vnd_v3 {
uint32_t vendor_ramdisk_table_size; /* size in bytes for the vendor ramdisk table */
uint32_t vendor_ramdisk_table_entry_num; /* number of entries in the vendor ramdisk table */
uint32_t vendor_ramdisk_table_entry_size; /* size in bytes for a vendor ramdisk table entry */
uint32_t bootconfig_size; /* size in bytes for the bootconfig section */
} __attribute__((packed));
struct vendor_ramdisk_table_entry_v4 {
uint32_t ramdisk_size; /* size in bytes for the ramdisk image */
uint32_t ramdisk_offset; /* offset to the ramdisk image in vendor ramdisk section */
uint32_t ramdisk_type; /* type of the ramdisk */
char ramdisk_name[VENDOR_RAMDISK_NAME_SIZE]; /* asciiz ramdisk name */
// Hardware identifiers describing the board, soc or platform which this
// ramdisk is intended to be loaded on.
uint32_t board_id[VENDOR_RAMDISK_TABLE_ENTRY_BOARD_ID_SIZE];
} __attribute__((packed));
/*******************************
* Polymorphic Universal Header
*******************************/
template <typename T>
static T align_to(T v, int a) {
static_assert(std::is_integral_v<T>);
return (v + a - 1) / a * a;
}
template <typename T>
static T align_padding(T v, int a) {
return align_to(v, a) - v;
}
#define decl_val(name, len) \
virtual uint##len##_t name() const { return 0; }
#define decl_var(name, len) \
virtual uint##len##_t &name() { return j##len(); } \
decl_val(name, len)
#define decl_str(name) \
virtual char *name() { return nullptr; } \
virtual const char *name() const { return nullptr; }
struct dyn_img_hdr {
virtual bool is_vendor() const = 0;
// Standard entries
decl_var(kernel_size, 32)
decl_var(ramdisk_size, 32)
decl_var(second_size, 32)
decl_val(page_size, 32)
decl_val(header_version, 32)
decl_var(extra_size, 32)
decl_var(os_version, 32)
decl_str(name)
decl_str(cmdline)
decl_str(id)
decl_str(extra_cmdline)
// v1/v2 specific
decl_var(recovery_dtbo_size, 32)
decl_var(recovery_dtbo_offset, 64)
decl_var(header_size, 32)
decl_var(dtb_size, 32)
// v4 specific
decl_val(signature_size, 32)
// v4 vendor specific
decl_val(vendor_ramdisk_table_size, 32)
decl_val(vendor_ramdisk_table_entry_num, 32)
decl_val(vendor_ramdisk_table_entry_size, 32)
decl_var(bootconfig_size, 32)
virtual ~dyn_img_hdr() {
free(raw);
}
virtual size_t hdr_size() const = 0;
virtual size_t hdr_space() const { return page_size(); }
virtual dyn_img_hdr *clone() const = 0;
const void *raw_hdr() const { return raw; }
void print() const;
void dump_hdr_file() const;
void load_hdr_file();
protected:
union {
boot_img_hdr_v2 *v2_hdr; /* AOSP v2 header */
boot_img_hdr_v4 *v4_hdr; /* AOSP v4 header */
boot_img_hdr_vnd_v4 *v4_vnd; /* AOSP vendor v4 header */
boot_img_hdr_pxa *hdr_pxa; /* Samsung PXA header */
void *raw; /* Raw pointer */
};
static uint32_t &j32() { _j32 = 0; return _j32; }
static uint64_t &j64() { _j64 = 0; return _j64; }
private:
// Junk for references
inline static uint32_t _j32 = 0;
inline static uint64_t _j64 = 0;
};
#undef decl_var
#undef decl_val
#undef decl_str
#define __impl_cls(name, hdr) \
protected: name() = default; \
public: \
explicit \
name(const void *p, ssize_t sz = -1) { \
if (sz < 0) sz = sizeof(hdr); \
raw = calloc(sizeof(hdr), 1); \
memcpy(raw, p, sz); \
} \
size_t hdr_size() const override { \
return sizeof(hdr); \
} \
dyn_img_hdr *clone() const override { \
auto p = new name(raw); \
return p; \
};
#define __impl_val(name, hdr_name) \
decltype(std::declval<const dyn_img_hdr>().name()) name() const override { return hdr_name->name; }
#define __impl_var(name, hdr_name) \
decltype(std::declval<dyn_img_hdr>().name()) name() override { return hdr_name->name; } \
__impl_val(name, hdr_name)
#define impl_cls(ver) __impl_cls(dyn_img_##ver, boot_img_hdr_##ver)
#define impl_val(name) __impl_val(name, v2_hdr)
#define impl_var(name) __impl_var(name, v2_hdr)
struct dyn_img_hdr_boot : public dyn_img_hdr {
bool is_vendor() const final { return false; }
};
struct dyn_img_common : public dyn_img_hdr_boot {
impl_var(kernel_size)
impl_var(ramdisk_size)
impl_var(second_size)
};
struct dyn_img_v0 : public dyn_img_common {
impl_cls(v0)
impl_val(page_size)
impl_var(extra_size)
impl_var(os_version)
impl_var(name)
impl_var(cmdline)
impl_var(id)
impl_var(extra_cmdline)
};
struct dyn_img_v1 : public dyn_img_v0 {
impl_cls(v1)
impl_val(header_version)
impl_var(recovery_dtbo_size)
impl_var(recovery_dtbo_offset)
impl_var(header_size)
uint32_t &extra_size() override { return j32(); }
uint32_t extra_size() const override { return 0; }
};
struct dyn_img_v2 : public dyn_img_v1 {
impl_cls(v2)
impl_var(dtb_size)
};
#undef impl_val
#undef impl_var
#define impl_val(name) __impl_val(name, hdr_pxa)
#define impl_var(name) __impl_var(name, hdr_pxa)
struct dyn_img_pxa : public dyn_img_common {
impl_cls(pxa)
impl_var(extra_size)
impl_val(page_size)
impl_var(name)
impl_var(cmdline)
impl_var(id)
impl_var(extra_cmdline)
};
#undef impl_val
#undef impl_var
#define impl_val(name) __impl_val(name, v4_hdr)
#define impl_var(name) __impl_var(name, v4_hdr)
struct dyn_img_v3 : public dyn_img_hdr_boot {
impl_cls(v3)
impl_var(kernel_size)
impl_var(ramdisk_size)
impl_var(os_version)
impl_var(header_size)
impl_val(header_version)
impl_var(cmdline)
// Make API compatible
uint32_t page_size() const override { return 4096; }
char *extra_cmdline() override { return &v4_hdr->cmdline[BOOT_ARGS_SIZE]; }
const char *extra_cmdline() const override { return &v4_hdr->cmdline[BOOT_ARGS_SIZE]; }
};
struct dyn_img_v4 : public dyn_img_v3 {
impl_cls(v4)
impl_val(signature_size)
};
struct dyn_img_hdr_vendor : public dyn_img_hdr {
bool is_vendor() const final { return true; }
};
#undef impl_val
#undef impl_var
#define impl_val(name) __impl_val(name, v4_vnd)
#define impl_var(name) __impl_var(name, v4_vnd)
struct dyn_img_vnd_v3 : public dyn_img_hdr_vendor {
impl_cls(vnd_v3)
impl_val(header_version)
impl_val(page_size)
impl_var(ramdisk_size)
impl_var(cmdline)
impl_var(name)
impl_var(header_size)
impl_var(dtb_size)
size_t hdr_space() const override { return align_to(hdr_size(), page_size()); }
// Make API compatible
char *extra_cmdline() override { return &v4_vnd->cmdline[BOOT_ARGS_SIZE]; }
const char *extra_cmdline() const override { return &v4_vnd->cmdline[BOOT_ARGS_SIZE]; }
};
struct dyn_img_vnd_v4 : public dyn_img_vnd_v3 {
impl_cls(vnd_v4)
impl_val(vendor_ramdisk_table_size)
impl_val(vendor_ramdisk_table_entry_num)
impl_val(vendor_ramdisk_table_entry_size)
impl_var(bootconfig_size)
};
#undef __impl_cls
#undef __impl_val
#undef __impl_var
#undef impl_cls
#undef impl_val
#undef impl_var
/******************
* Full Boot Image
******************/
enum {
MTK_KERNEL,
MTK_RAMDISK,
CHROMEOS_FLAG,
DHTB_FLAG,
SEANDROID_FLAG,
LG_BUMP_FLAG,
SHA256_FLAG,
BLOB_FLAG,
NOOKHD_FLAG,
ACCLAIM_FLAG,
AMONET_FLAG,
AVB1_SIGNED_FLAG,
AVB_FLAG,
ZIMAGE_KERNEL,
BOOT_FLAGS_MAX
};
struct boot_img {
// Memory map of the whole image
const mmap_data map;
// Android image header
dyn_img_hdr *hdr = nullptr;
// Flags to indicate the state of current boot image
std::bitset<BOOT_FLAGS_MAX> flags;
// The format of kernel, ramdisk and extra
FileFormat k_fmt;
FileFormat r_fmt;
FileFormat e_fmt;
/*************************************************************
* Following pointers points within the read-only mmap region
*************************************************************/
// Layout of the memory mapped region
// +---------+
// | head | Vendor specific. Should not exist for standard AOSP boot images.
// +---------+
// | payload | The actual entire AOSP boot image, including the boot image header.
// +---------+
// | tail | Data after payload. Usually contains signature/AVB information.
// +---------+
byte_view payload;
byte_view tail;
// MTK headers
const mtk_hdr *k_hdr = nullptr;
const mtk_hdr *r_hdr = nullptr;
// The pointers/values after parse_image
// +---------------+
// | z_info.hdr | z_info.hdr_sz
// +---------------+
// | kernel | hdr->kernel_size()
// +---------------+
// | z_info.tail |
// +---------------+
struct {
const zimage_hdr *hdr = nullptr;
uint32_t hdr_sz = 0;
byte_view tail{};
} z_info;
// AVB structs
const AvbFooter *avb_footer = nullptr;
const AvbVBMetaImageHeader *vbmeta = nullptr;
// Pointers to blocks defined in header
const uint8_t *kernel = nullptr;
const uint8_t *ramdisk = nullptr;
const uint8_t *second = nullptr;
const uint8_t *extra = nullptr;
const uint8_t *recovery_dtbo = nullptr;
const uint8_t *dtb = nullptr;
const uint8_t *signature = nullptr;
const uint8_t *vendor_ramdisk_table = nullptr;
const uint8_t *bootconfig = nullptr;
// dtb embedded in kernel
byte_view kernel_dtb;
explicit boot_img(const char *);
~boot_img();
bool parse_image(const uint8_t *addr, FileFormat type);
void parse_zimage();
const uint8_t *parse_hdr(const uint8_t *addr, FileFormat type);
std::span<const vendor_ramdisk_table_entry_v4> vendor_ramdisk_tbl() const;
// Rust FFI
static std::unique_ptr<boot_img> create(Utf8CStr name) { return std::make_unique<boot_img>(name.c_str()); }
rust::Slice<const uint8_t> get_payload() const { return payload; }
rust::Slice<const uint8_t> get_tail() const { return tail; }
bool is_signed() const { return flags[AVB1_SIGNED_FLAG]; }
uint64_t tail_off() const { return tail.data() - map.data(); }
// Implemented in Rust
bool verify() const noexcept;
};

28
native/src/boot/build.rs Normal file
View file

@ -0,0 +1,28 @@
use pb_rs::ConfigBuilder;
use pb_rs::types::FileDescriptor;
use crate::codegen::gen_cxx_binding;
#[path = "../include/codegen.rs"]
mod codegen;
fn main() {
println!("cargo:rerun-if-changed=proto/update_metadata.proto");
gen_cxx_binding("boot-rs");
let cb = ConfigBuilder::new(
&["proto/update_metadata.proto"],
None,
Some(&"proto"),
&["."],
)
.unwrap();
FileDescriptor::run(
&cb.single_module(true)
.dont_use_cow(true)
.generate_getters(true)
.build(),
)
.unwrap();
}

446
native/src/boot/cli.rs Normal file
View file

@ -0,0 +1,446 @@
use crate::compress::{compress_cmd, decompress_cmd};
use crate::cpio::{cpio_commands, print_cpio_usage};
use crate::dtb::{DtbAction, dtb_commands, print_dtb_usage};
use crate::ffi::{BootImage, FileFormat, cleanup, repack, split_image_dtb, unpack};
use crate::patch::hexpatch;
use crate::payload::extract_boot_from_payload;
use crate::sign::{sha1_hash, sign_boot_image};
use argh::{CommandInfo, EarlyExit, FromArgs, SubCommand};
use base::libc::umask;
use base::nix::fcntl::OFlag;
use base::{
CmdArgs, EarlyExitExt, LoggedResult, MappedFile, PositionalArgParser, ResultExt, Utf8CStr,
Utf8CString, WriteExt, argh, cmdline_logging, cstr, log_err,
};
use std::ffi::c_char;
use std::io::{Seek, SeekFrom, Write};
use std::str::FromStr;
#[derive(FromArgs)]
struct Cli {
#[argh(subcommand)]
action: Action,
}
#[derive(FromArgs)]
#[argh(subcommand)]
enum Action {
Unpack(Unpack),
Repack(Repack),
Verify(Verify),
Sign(Sign),
Extract(Extract),
HexPatch(HexPatch),
Cpio(Cpio),
Dtb(Dtb),
Split(Split),
Sha1(Sha1),
Cleanup(Cleanup),
Compress(Compress),
Decompress(Decompress),
}
#[derive(FromArgs)]
#[argh(subcommand, name = "unpack")]
struct Unpack {
#[argh(switch, short = 'n', long = none)]
no_decompress: bool,
#[argh(switch, short = 'h', long = none)]
dump_header: bool,
#[argh(positional)]
img: Utf8CString,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "repack")]
struct Repack {
#[argh(switch, short = 'n', long = none)]
no_compress: bool,
#[argh(positional)]
img: Utf8CString,
#[argh(positional)]
out: Option<Utf8CString>,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "verify")]
struct Verify {
#[argh(positional)]
img: Utf8CString,
#[argh(positional)]
cert: Option<Utf8CString>,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "sign")]
struct Sign {
#[argh(positional)]
img: Utf8CString,
#[argh(positional)]
name: Option<Utf8CString>,
#[argh(positional)]
cert: Option<Utf8CString>,
#[argh(positional)]
key: Option<Utf8CString>,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "extract")]
struct Extract {
#[argh(positional)]
payload: Utf8CString,
#[argh(positional)]
partition: Option<Utf8CString>,
#[argh(positional)]
outfile: Option<Utf8CString>,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "hexpatch")]
struct HexPatch {
#[argh(positional)]
file: Utf8CString,
#[argh(positional)]
src: Utf8CString,
#[argh(positional)]
dest: Utf8CString,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "cpio")]
struct Cpio {
#[argh(positional)]
file: Utf8CString,
#[argh(positional)]
cmds: Vec<String>,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "dtb")]
struct Dtb {
#[argh(positional)]
file: Utf8CString,
#[argh(subcommand)]
action: DtbAction,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "split")]
struct Split {
#[argh(switch, short = 'n', long = none)]
no_decompress: bool,
#[argh(positional)]
file: Utf8CString,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "sha1")]
struct Sha1 {
#[argh(positional)]
file: Utf8CString,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "cleanup")]
struct Cleanup {}
struct Compress {
format: FileFormat,
file: Utf8CString,
out: Option<Utf8CString>,
}
impl FromArgs for Compress {
fn from_args(command_name: &[&str], args: &[&str]) -> Result<Self, EarlyExit> {
let cmd = command_name.last().copied().unwrap_or_default();
let fmt = cmd.strip_prefix("compress=").unwrap_or("gzip");
let Ok(fmt) = FileFormat::from_str(fmt) else {
return Err(EarlyExit::from(format!(
"Unsupported or unknown compression format: {fmt}\n"
)));
};
let mut iter = PositionalArgParser(args.iter());
Ok(Compress {
format: fmt,
file: iter.required("infile")?,
out: iter.last_optional()?,
})
}
}
impl SubCommand for Compress {
const COMMAND: &'static CommandInfo = &CommandInfo {
name: "compress",
description: "",
};
}
#[derive(FromArgs)]
#[argh(subcommand, name = "decompress")]
struct Decompress {
#[argh(positional)]
file: Utf8CString,
#[argh(positional)]
out: Option<Utf8CString>,
}
fn print_usage(cmd: &str) {
eprintln!(
r#"MagiskBoot - Boot Image Modification Tool
Usage: {0} <action> [args...]
Supported actions:
unpack [-n] [-h] <bootimg>
Unpack <bootimg> to its individual components, each component to
a file with its corresponding file name in the current directory.
Supported components: kernel, kernel_dtb, ramdisk.cpio, second,
dtb, extra, and recovery_dtbo.
By default, each component will be decompressed on-the-fly.
If '-n' is provided, all decompression operations will be skipped;
each component will remain untouched, dumped in its original format.
If '-h' is provided, the boot image header information will be
dumped to the file 'header', which can be used to modify header
configurations during repacking.
Return values:
0:valid 1:error 2:chromeos
repack [-n] <origbootimg> [outbootimg]
Repack boot image components using files from the current directory
to [outbootimg], or 'new-boot.img' if not specified. Current directory
should only contain required files for [outbootimg], or incorrect
[outbootimg] may be produced.
<origbootimg> is the original boot image used to unpack the components.
By default, each component will be automatically compressed using its
corresponding format detected in <origbootimg>. If a component file
in the current directory is already compressed, then no addition
compression will be performed for that specific component.
If '-n' is provided, all compression operations will be skipped.
If env variable PATCHVBMETAFLAG is set to true, all disable flags in
the boot image's vbmeta header will be set.
verify <bootimg> [x509.pem]
Check whether the boot image is signed with AVB 1.0 signature.
Optionally provide a certificate to verify whether the image is
signed by the public key certificate.
Return value:
0:valid 1:error
sign <bootimg> [name] [x509.pem pk8]
Sign <bootimg> with AVB 1.0 signature.
Optionally provide the name of the image (default: '/boot').
Optionally provide the certificate/private key pair for signing.
If the certificate/private key pair is not provided, the AOSP
verity key bundled in the executable will be used.
extract <payload.bin> [partition] [outfile]
Extract [partition] from <payload.bin> to [outfile].
If [outfile] is not specified, then output to '[partition].img'.
If [partition] is not specified, then attempt to extract either
'init_boot' or 'boot'. Which partition was chosen can be determined
by whichever 'init_boot.img' or 'boot.img' exists.
<payload.bin> can be '-' to be STDIN.
hexpatch <file> <hexpattern1> <hexpattern2>
Search <hexpattern1> in <file>, and replace it with <hexpattern2>
cpio <incpio> [commands...]
Do cpio commands to <incpio> (modifications are done in-place).
Each command is a single argument; add quotes for each command.
See "cpio --help" for supported commands.
dtb <file> <action> [args...]
Do dtb related actions to <file>.
See "dtb --help" for supported actions.
split [-n] <file>
Split image.*-dtb into kernel + kernel_dtb.
If '-n' is provided, decompression operations will be skipped;
the kernel will remain untouched, split in its original format.
sha1 <file>
Print the SHA1 checksum for <file>
cleanup
Cleanup the current working directory
compress[=format] <infile> [outfile]
Compress <infile> with [format] to [outfile].
<infile>/[outfile] can be '-' to be STDIN/STDOUT.
If [format] is not specified, then gzip will be used.
If [outfile] is not specified, then <infile> will be replaced
with another file suffixed with a matching file extension.
Supported formats:
{1}
decompress <infile> [outfile]
Detect format and decompress <infile> to [outfile].
<infile>/[outfile] can be '-' to be STDIN/STDOUT.
If [outfile] is not specified, then <infile> will be replaced
with another file removing its archive format file extension.
Supported formats:
{1}
"#,
cmd,
FileFormat::formats()
);
}
fn verify_cmd(image: &Utf8CStr, cert: Option<&Utf8CStr>) -> bool {
let image = BootImage::new(image);
match cert {
None => {
// Boot image parsing already checks if the image is signed
image.is_signed()
}
Some(_) => {
// Provide a custom certificate and re-verify
image.verify(cert).is_ok()
}
}
}
fn sign_cmd(
image: &Utf8CStr,
name: Option<&Utf8CStr>,
cert: Option<&Utf8CStr>,
key: Option<&Utf8CStr>,
) -> LoggedResult<()> {
let img = BootImage::new(image);
let name = name.unwrap_or(cstr!("/boot"));
let sig = sign_boot_image(img.payload(), name, cert, key)?;
let tail_off = img.tail_off();
drop(img);
let mut fd = image.open(OFlag::O_WRONLY | OFlag::O_CLOEXEC)?;
fd.seek(SeekFrom::Start(tail_off))?;
fd.write_all(&sig)?;
let current = fd.stream_position()?;
let eof = fd.seek(SeekFrom::End(0))?;
if eof > current {
// Zero out rest of the file
fd.seek(SeekFrom::Start(current))?;
fd.write_zeros((eof - current) as usize)?;
}
Ok(())
}
fn boot_main(cmds: CmdArgs) -> LoggedResult<i32> {
let mut cmds = cmds.0;
if cmds.len() < 2 {
print_usage(cmds.first().unwrap_or(&"magiskboot"));
return log_err!();
}
if cmds[1].starts_with("--") {
cmds[1] = &cmds[1][2..];
}
let cli = if cmds[1].starts_with("compress=") {
// Skip the main parser, directly parse the subcommand
Compress::from_args(&cmds[..2], &cmds[2..]).map(|compress| Cli {
action: Action::Compress(compress),
})
} else {
Cli::from_args(&[cmds[0]], &cmds[1..])
}
.on_early_exit(|| match cmds[1] {
"dtb" => print_dtb_usage(),
"cpio" => print_cpio_usage(),
_ => print_usage(cmds[0]),
});
match cli.action {
Action::Unpack(Unpack {
no_decompress,
dump_header,
img,
}) => {
return Ok(unpack(&img, no_decompress, dump_header));
}
Action::Repack(Repack {
no_compress,
img,
out,
}) => {
repack(
&img,
out.as_deref().unwrap_or(cstr!("new-boot.img")),
no_compress,
);
}
Action::Verify(Verify { img, cert }) => {
if !verify_cmd(&img, cert.as_deref()) {
return log_err!();
}
}
Action::Sign(Sign {
img,
name,
cert,
key,
}) => {
sign_cmd(&img, name.as_deref(), cert.as_deref(), key.as_deref())?;
}
Action::Extract(Extract {
payload,
partition,
outfile,
}) => {
extract_boot_from_payload(
&payload,
partition.as_ref().map(AsRef::as_ref),
outfile.as_ref().map(AsRef::as_ref),
)
.log_with_msg(|w| w.write_str("Failed to extract from payload"))?;
}
Action::HexPatch(HexPatch { file, src, dest }) => {
if !hexpatch(&file, &src, &dest) {
log_err!("Failed to patch")?;
}
}
Action::Cpio(Cpio { file, cmds }) => {
cpio_commands(&file, &cmds).log_with_msg(|w| w.write_str("Failed to process cpio"))?;
}
Action::Dtb(Dtb { file, action }) => {
return dtb_commands(&file, &action)
.map(|b| if b { 0 } else { 1 })
.log_with_msg(|w| w.write_str("Failed to process dtb"));
}
Action::Split(Split {
no_decompress,
file,
}) => {
return Ok(split_image_dtb(&file, no_decompress));
}
Action::Sha1(Sha1 { file }) => {
let file = MappedFile::open(&file)?;
let mut sha1 = [0u8; 20];
sha1_hash(file.as_ref(), &mut sha1);
for byte in &sha1 {
print!("{byte:02x}");
}
println!();
}
Action::Cleanup(_) => {
eprintln!("Cleaning up...");
cleanup();
}
Action::Decompress(Decompress { file, out }) => {
decompress_cmd(&file, out.as_deref())?;
}
Action::Compress(Compress { format, file, out }) => {
compress_cmd(format, &file, out.as_deref())?;
}
}
Ok(0)
}
#[unsafe(no_mangle)]
pub extern "C" fn main(argc: i32, argv: *const *const c_char, _envp: *const *const c_char) -> i32 {
cmdline_logging();
unsafe { umask(0) };
let cmds = CmdArgs::new(argc, argv);
boot_main(cmds).unwrap_or(1)
}

395
native/src/boot/compress.rs Normal file
View file

@ -0,0 +1,395 @@
use crate::ffi::{FileFormat, check_fmt};
use base::nix::fcntl::OFlag;
use base::{
Chunker, FileOrStd, LoggedResult, ReadExt, ResultExt, Utf8CStr, Utf8CString, WriteExt, log_err,
};
use bzip2::Compression as BzCompression;
use bzip2::read::BzDecoder;
use bzip2::write::BzEncoder;
use flate2::Compression as GzCompression;
use flate2::read::MultiGzDecoder;
use flate2::write::GzEncoder;
use lz4::block::CompressionMode;
use lz4::liblz4::BlockChecksum;
use lz4::{
BlockMode, BlockSize, ContentChecksum, Decoder as LZ4FrameDecoder, Encoder as LZ4FrameEncoder,
EncoderBuilder as LZ4FrameEncoderBuilder,
};
use lzma_rust2::{CheckType, LzmaOptions, LzmaReader, LzmaWriter, XzOptions, XzReader, XzWriter};
use std::cmp::min;
use std::fmt::Write as FmtWrite;
use std::fs::File;
use std::io::{BufWriter, Cursor, Read, Write};
use std::mem::ManuallyDrop;
use std::num::NonZeroU64;
use std::ops::DerefMut;
use std::os::fd::{FromRawFd, RawFd};
use zopfli::{BlockType, GzipEncoder as ZopFliEncoder, Options as ZopfliOptions};
pub trait WriteFinish<W: Write>: Write {
fn finish(self: Box<Self>) -> std::io::Result<W>;
}
// Boilerplate for existing types
macro_rules! finish_impl {
($($t:ty),*) => {$(
impl<W: Write> WriteFinish<W> for $t {
fn finish(self: Box<Self>) -> std::io::Result<W> {
Self::finish(*self)
}
}
)*}
}
finish_impl!(GzEncoder<W>, BzEncoder<W>, XzWriter<W>, LzmaWriter<W>);
impl<W: Write> WriteFinish<W> for BufWriter<ZopFliEncoder<W>> {
fn finish(self: Box<Self>) -> std::io::Result<W> {
let inner = self.into_inner()?;
ZopFliEncoder::finish(inner)
}
}
impl<W: Write> WriteFinish<W> for LZ4FrameEncoder<W> {
fn finish(self: Box<Self>) -> std::io::Result<W> {
let (w, r) = Self::finish(*self);
r?;
Ok(w)
}
}
// LZ4BlockArchive format
//
// len: | 4 | 4 | n | ... | 4 |
// data: | magic | compressed block size | compressed block data | ... | total uncompressed size |
// LZ4BlockEncoder
const LZ4_BLOCK_SIZE: usize = 0x800000;
const LZ4HC_CLEVEL_MAX: i32 = 12;
const LZ4_MAGIC: u32 = 0x184c2102;
struct LZ4BlockEncoder<W: Write> {
write: W,
chunker: Chunker,
out_buf: Box<[u8]>,
total: u32,
is_lg: bool,
}
impl<W: Write> LZ4BlockEncoder<W> {
fn new(write: W, is_lg: bool) -> Self {
let out_sz = lz4::block::compress_bound(LZ4_BLOCK_SIZE).unwrap_or(LZ4_BLOCK_SIZE);
LZ4BlockEncoder {
write,
chunker: Chunker::new(LZ4_BLOCK_SIZE),
// SAFETY: all bytes will be initialized before it is used
out_buf: unsafe { Box::new_uninit_slice(out_sz).assume_init() },
total: 0,
is_lg,
}
}
fn encode_block(write: &mut W, out_buf: &mut [u8], chunk: &[u8]) -> std::io::Result<()> {
let compressed_size = lz4::block::compress_to_buffer(
chunk,
Some(CompressionMode::HIGHCOMPRESSION(LZ4HC_CLEVEL_MAX)),
false,
out_buf,
)?;
let block_size = compressed_size as u32;
write.write_pod(&block_size)?;
write.write_all(&out_buf[..compressed_size])
}
}
impl<W: Write> Write for LZ4BlockEncoder<W> {
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
self.write_all(buf)?;
Ok(buf.len())
}
fn flush(&mut self) -> std::io::Result<()> {
Ok(())
}
fn write_all(&mut self, mut buf: &[u8]) -> std::io::Result<()> {
if self.total == 0 {
// Write header
self.write.write_pod(&LZ4_MAGIC)?;
}
self.total += buf.len() as u32;
while !buf.is_empty() {
let (b, chunk) = self.chunker.add_data(buf);
buf = b;
if let Some(chunk) = chunk {
Self::encode_block(&mut self.write, &mut self.out_buf, chunk)?;
}
}
Ok(())
}
}
impl<W: Write> WriteFinish<W> for LZ4BlockEncoder<W> {
fn finish(mut self: Box<Self>) -> std::io::Result<W> {
let chunk = self.chunker.get_available();
if !chunk.is_empty() {
Self::encode_block(&mut self.write, &mut self.out_buf, chunk)?;
}
if self.is_lg {
self.write.write_pod(&self.total)?;
}
Ok(self.write)
}
}
// LZ4BlockDecoder
struct LZ4BlockDecoder<R: Read> {
read: R,
in_buf: Box<[u8]>,
out_buf: Box<[u8]>,
out_len: usize,
out_pos: usize,
}
impl<R: Read> LZ4BlockDecoder<R> {
fn new(read: R) -> Self {
let compressed_sz = lz4::block::compress_bound(LZ4_BLOCK_SIZE).unwrap_or(LZ4_BLOCK_SIZE);
Self {
read,
in_buf: unsafe { Box::new_uninit_slice(compressed_sz).assume_init() },
out_buf: unsafe { Box::new_uninit_slice(LZ4_BLOCK_SIZE).assume_init() },
out_len: 0,
out_pos: 0,
}
}
}
impl<R: Read> Read for LZ4BlockDecoder<R> {
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
if self.out_pos == self.out_len {
let mut block_size: u32 = 0;
if let Err(e) = self.read.read_pod(&mut block_size) {
return if e.kind() == std::io::ErrorKind::UnexpectedEof {
Ok(0)
} else {
Err(e)
};
}
if block_size == LZ4_MAGIC {
self.read.read_pod(&mut block_size)?;
}
let block_size = block_size as usize;
if block_size > self.in_buf.len() {
// This may be the LG format trailer, EOF
return Ok(0);
}
// Read the entire compressed block
let compressed_block = &mut self.in_buf[..block_size];
if let Ok(len) = self.read.read(compressed_block) {
if len == 0 {
// We hit EOF, that's fine
return Ok(0);
} else if len != block_size {
let remain = &mut compressed_block[len..];
self.read.read_exact(remain)?;
}
}
self.out_len = lz4::block::decompress_to_buffer(
compressed_block,
Some(LZ4_BLOCK_SIZE as i32),
&mut self.out_buf,
)?;
self.out_pos = 0;
}
let copy_len = min(buf.len(), self.out_len - self.out_pos);
buf[..copy_len].copy_from_slice(&self.out_buf[self.out_pos..self.out_pos + copy_len]);
self.out_pos += copy_len;
Ok(copy_len)
}
}
// Top-level APIs
pub fn get_encoder<'a, W: Write + 'a>(format: FileFormat, w: W) -> Box<dyn WriteFinish<W> + 'a> {
match format {
FileFormat::XZ => {
let mut opt = XzOptions::with_preset(9);
opt.set_check_sum_type(CheckType::Crc32);
Box::new(XzWriter::new(w, opt).unwrap())
}
FileFormat::LZMA => {
Box::new(LzmaWriter::new_use_header(w, &LzmaOptions::with_preset(9), None).unwrap())
}
FileFormat::BZIP2 => Box::new(BzEncoder::new(w, BzCompression::best())),
FileFormat::LZ4 => {
let encoder = LZ4FrameEncoderBuilder::new()
.block_size(BlockSize::Max4MB)
.block_mode(BlockMode::Independent)
.checksum(ContentChecksum::ChecksumEnabled)
.block_checksum(BlockChecksum::BlockChecksumEnabled)
.level(9)
.auto_flush(true)
.build(w)
.unwrap();
Box::new(encoder)
}
FileFormat::LZ4_LEGACY => Box::new(LZ4BlockEncoder::new(w, false)),
FileFormat::LZ4_LG => Box::new(LZ4BlockEncoder::new(w, true)),
FileFormat::ZOPFLI => {
// These options are already better than gzip -9
let opt = ZopfliOptions {
iteration_count: NonZeroU64::new(1).unwrap(),
maximum_block_splits: 1,
..Default::default()
};
Box::new(ZopFliEncoder::new_buffered(opt, BlockType::Dynamic, w).unwrap())
}
FileFormat::GZIP => Box::new(GzEncoder::new(w, GzCompression::best())),
_ => unreachable!(),
}
}
pub fn get_decoder<'a, R: Read + 'a>(format: FileFormat, r: R) -> Box<dyn Read + 'a> {
match format {
FileFormat::XZ => Box::new(XzReader::new(r, true)),
FileFormat::LZMA => Box::new(LzmaReader::new_mem_limit(r, u32::MAX, None).unwrap()),
FileFormat::BZIP2 => Box::new(BzDecoder::new(r)),
FileFormat::LZ4 => Box::new(LZ4FrameDecoder::new(r).unwrap()),
FileFormat::LZ4_LG | FileFormat::LZ4_LEGACY => Box::new(LZ4BlockDecoder::new(r)),
FileFormat::ZOPFLI | FileFormat::GZIP => Box::new(MultiGzDecoder::new(r)),
_ => unreachable!(),
}
}
// C++ FFI
pub fn compress_bytes(format: FileFormat, in_bytes: &[u8], out_fd: RawFd) {
let mut out_file = unsafe { ManuallyDrop::new(File::from_raw_fd(out_fd)) };
let mut encoder = get_encoder(format, out_file.deref_mut());
let _: LoggedResult<()> = try {
encoder.write_all(in_bytes)?;
encoder.finish()?;
};
}
pub fn decompress_bytes(format: FileFormat, in_bytes: &[u8], out_fd: RawFd) {
let mut out_file = unsafe { ManuallyDrop::new(File::from_raw_fd(out_fd)) };
let mut decoder = get_decoder(format, in_bytes);
std::io::copy(decoder.as_mut(), out_file.deref_mut()).log_ok();
}
// Command-line entry points
pub(crate) fn decompress_cmd(infile: &Utf8CStr, outfile: Option<&Utf8CStr>) -> LoggedResult<()> {
let in_std = infile == "-";
let mut rm_in = false;
let mut buf = [0u8; 4096];
let input = if in_std {
FileOrStd::StdIn
} else {
FileOrStd::File(infile.open(OFlag::O_RDONLY)?)
};
// First read some bytes for format detection
let len = input.as_file().read(&mut buf)?;
let buf = &buf[..len];
let format = check_fmt(buf);
eprintln!("Detected format: {format}");
if !format.is_compressed() {
return log_err!("Input file is not a supported type!");
}
// If user did not provide outfile, infile has to be either
// <path>.[ext], or "-". Outfile will be either <path> or "-".
// If the input does not have proper format, abort.
let output = if let Some(outfile) = outfile {
if outfile == "-" {
FileOrStd::StdOut
} else {
FileOrStd::File(outfile.create(OFlag::O_WRONLY | OFlag::O_TRUNC, 0o644)?)
}
} else if in_std {
FileOrStd::StdOut
} else {
// Strip out extension and remove input
let outfile = if let Some((outfile, ext)) = infile.rsplit_once('.')
&& ext == format.ext()
{
Utf8CString::from(outfile)
} else {
return log_err!("Input file is not a supported type!");
};
rm_in = true;
eprintln!("Decompressing to [{outfile}]");
FileOrStd::File(outfile.create(OFlag::O_WRONLY | OFlag::O_TRUNC, 0o644)?)
};
let mut decoder = get_decoder(format, Cursor::new(buf).chain(input.as_file()));
std::io::copy(decoder.as_mut(), &mut output.as_file())?;
if rm_in {
infile.remove()?;
}
Ok(())
}
pub(crate) fn compress_cmd(
method: FileFormat,
infile: &Utf8CStr,
outfile: Option<&Utf8CStr>,
) -> LoggedResult<()> {
let in_std = infile == "-";
let mut rm_in = false;
let input = if in_std {
FileOrStd::StdIn
} else {
FileOrStd::File(infile.open(OFlag::O_RDONLY)?)
};
let output = if let Some(outfile) = outfile {
if outfile == "-" {
FileOrStd::StdOut
} else {
FileOrStd::File(outfile.create(OFlag::O_WRONLY | OFlag::O_TRUNC, 0o644)?)
}
} else if in_std {
FileOrStd::StdOut
} else {
let mut outfile = Utf8CString::default();
outfile.write_str(infile).ok();
outfile.write_char('.').ok();
outfile.write_str(method.ext()).ok();
eprintln!("Compressing to [{outfile}]");
rm_in = true;
let outfile = outfile.create(OFlag::O_WRONLY | OFlag::O_TRUNC, 0o644)?;
FileOrStd::File(outfile)
};
let mut encoder = get_encoder(method, output.as_file());
std::io::copy(&mut input.as_file(), encoder.as_mut())?;
encoder.finish()?;
if rm_in {
infile.remove()?;
}
Ok(())
}

845
native/src/boot/cpio.rs Normal file
View file

@ -0,0 +1,845 @@
#![allow(clippy::useless_conversion)]
use argh::FromArgs;
use base::argh;
use bytemuck::{Pod, Zeroable, from_bytes};
use num_traits::cast::AsPrimitive;
use size::{Base, Size, Style};
use std::cmp::Ordering;
use std::collections::{BTreeMap, HashMap};
use std::fmt::{Display, Formatter};
use std::fs::File;
use std::io::{Cursor, Read, Write};
use std::mem::size_of;
use std::process::exit;
use std::str;
use crate::check_env;
use crate::compress::{get_decoder, get_encoder};
use crate::ffi::FileFormat;
use crate::patch::{patch_encryption, patch_verity};
use base::libc::{
S_IFBLK, S_IFCHR, S_IFDIR, S_IFLNK, S_IFMT, S_IFREG, S_IRGRP, S_IROTH, S_IRUSR, S_IWGRP,
S_IWOTH, S_IWUSR, S_IXGRP, S_IXOTH, S_IXUSR, dev_t, gid_t, major, makedev, minor, mknod,
mode_t, uid_t,
};
use base::nix::fcntl::OFlag;
use base::{
BytesExt, EarlyExitExt, LoggedResult, MappedFile, OptionExt, ResultExt, Utf8CStr, Utf8CStrBuf,
WriteExt, cstr, log_err,
};
#[derive(FromArgs)]
struct CpioCommand {
#[argh(subcommand)]
action: CpioAction,
}
#[derive(FromArgs)]
#[argh(subcommand)]
enum CpioAction {
Test(Test),
Restore(Restore),
Patch(Patch),
Exists(Exists),
Backup(Backup),
Remove(Remove),
Move(Move),
Extract(Extract),
MakeDir(MakeDir),
Link(Link),
Add(Add),
List(List),
}
#[derive(FromArgs)]
#[argh(subcommand, name = "test")]
struct Test {}
#[derive(FromArgs)]
#[argh(subcommand, name = "restore")]
struct Restore {}
#[derive(FromArgs)]
#[argh(subcommand, name = "patch")]
struct Patch {}
#[derive(FromArgs)]
#[argh(subcommand, name = "exists")]
struct Exists {
#[argh(positional, arg_name = "entry")]
path: String,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "backup")]
struct Backup {
#[argh(positional, arg_name = "orig")]
origin: String,
#[argh(switch, short = 'n')]
skip_compress: bool,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "rm")]
struct Remove {
#[argh(positional, arg_name = "entry")]
path: String,
#[argh(switch, short = 'r')]
recursive: bool,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "mv")]
struct Move {
#[argh(positional, arg_name = "source")]
from: String,
#[argh(positional, arg_name = "dest")]
to: String,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "extract")]
struct Extract {
#[argh(positional, greedy)]
paths: Vec<String>,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "mkdir")]
struct MakeDir {
#[argh(positional, from_str_fn(parse_mode))]
mode: mode_t,
#[argh(positional, arg_name = "entry")]
dir: String,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "ln")]
struct Link {
#[argh(positional, arg_name = "entry")]
src: String,
#[argh(positional, arg_name = "target")]
dst: String,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "add")]
struct Add {
#[argh(positional, from_str_fn(parse_mode))]
mode: mode_t,
#[argh(positional, arg_name = "entry")]
path: String,
#[argh(positional, arg_name = "infile")]
file: String,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "ls")]
struct List {
#[argh(positional, default = r#"String::from("/")"#)]
path: String,
#[argh(switch, short = 'r')]
recursive: bool,
}
pub(crate) fn print_cpio_usage() {
eprintln!(
r#"Usage: magiskboot cpio <incpio> [commands...]
Do cpio commands to <incpio> (modifications are done in-place).
Each command is a single argument; add quotes for each command.
Supported commands:
exists ENTRY
Return 0 if ENTRY exists, else return 1
ls [-r] [PATH]
List PATH ("/" by default); specify [-r] to list recursively
rm [-r] ENTRY
Remove ENTRY, specify [-r] to remove recursively
mkdir MODE ENTRY
Create directory ENTRY with permissions MODE
ln TARGET ENTRY
Create a symlink to TARGET with the name ENTRY
mv SOURCE DEST
Move SOURCE to DEST
add MODE ENTRY INFILE
Add INFILE as ENTRY with permissions MODE; replaces ENTRY if exists
extract [ENTRY OUT]
Extract ENTRY to OUT, or extract all entries to current directory
test
Test the cpio's status. Return values:
0:stock 1:Magisk 2:unsupported
patch
Apply ramdisk patches
Configure with env variables: KEEPVERITY KEEPFORCEENCRYPT
backup ORIG [-n]
Create ramdisk backups from ORIG, specify [-n] to skip compression
restore
Restore ramdisk from ramdisk backup stored within incpio
"#
)
}
#[derive(Copy, Clone, Pod, Zeroable)]
#[repr(C, packed)]
struct CpioHeader {
magic: [u8; 6],
ino: [u8; 8],
mode: [u8; 8],
uid: [u8; 8],
gid: [u8; 8],
nlink: [u8; 8],
mtime: [u8; 8],
filesize: [u8; 8],
devmajor: [u8; 8],
devminor: [u8; 8],
rdevmajor: [u8; 8],
rdevminor: [u8; 8],
namesize: [u8; 8],
check: [u8; 8],
}
struct Cpio {
entries: BTreeMap<String, Box<CpioEntry>>,
}
struct CpioEntry {
mode: mode_t,
uid: uid_t,
gid: gid_t,
rdevmajor: dev_t,
rdevminor: dev_t,
data: Vec<u8>,
}
impl Cpio {
fn new() -> Self {
Self {
entries: BTreeMap::new(),
}
}
fn load_from_data(data: &[u8]) -> LoggedResult<Self> {
let mut cpio = Cpio::new();
let mut pos = 0_usize;
while pos < data.len() {
let hdr_sz = size_of::<CpioHeader>();
let hdr = from_bytes::<CpioHeader>(&data[pos..(pos + hdr_sz)]);
if &hdr.magic != b"070701" {
return log_err!("invalid cpio magic");
}
pos += hdr_sz;
let name_sz = x8u(&hdr.namesize)? as usize;
let name = Utf8CStr::from_bytes(&data[pos..(pos + name_sz)])?.to_string();
pos += name_sz;
pos = align_4(pos);
if name == "." || name == ".." {
continue;
}
if name == "TRAILER!!!" {
match data[pos..].find(b"070701") {
Some(x) => pos += x,
None => break,
}
continue;
}
let file_sz = x8u(&hdr.filesize)? as usize;
let entry = Box::new(CpioEntry {
mode: x8u(&hdr.mode)?.as_(),
uid: x8u(&hdr.uid)?.as_(),
gid: x8u(&hdr.gid)?.as_(),
rdevmajor: x8u(&hdr.rdevmajor)?.as_(),
rdevminor: x8u(&hdr.rdevminor)?.as_(),
data: data[pos..(pos + file_sz)].to_vec(),
});
pos += file_sz;
cpio.entries.insert(name, entry);
pos = align_4(pos);
}
Ok(cpio)
}
fn load_from_file(path: &Utf8CStr) -> LoggedResult<Self> {
eprintln!("Loading cpio: [{path}]");
let file = MappedFile::open(path)?;
Self::load_from_data(file.as_ref())
}
fn dump(&self, path: &str) -> LoggedResult<()> {
eprintln!("Dumping cpio: [{path}]");
let mut file = File::create(path)?;
let mut pos = 0usize;
let mut inode = 300000i64;
for (name, entry) in &self.entries {
pos += file.write(
format!(
"070701{:08x}{:08x}{:08x}{:08x}{:08x}{:08x}{:08x}{:08x}{:08x}{:08x}{:08x}{:08x}{:08x}",
inode,
entry.mode,
entry.uid,
entry.gid,
1,
0,
entry.data.len(),
0,
0,
entry.rdevmajor,
entry.rdevminor,
name.len() + 1,
0
).as_bytes(),
)?;
pos += file.write(name.as_bytes())?;
pos += file.write(&[0])?;
file.write_zeros(align_4(pos) - pos)?;
pos = align_4(pos);
pos += file.write(&entry.data)?;
file.write_zeros(align_4(pos) - pos)?;
pos = align_4(pos);
inode += 1;
}
pos += file.write(
format!("070701{:08x}{:08x}{:08x}{:08x}{:08x}{:08x}{:08x}{:08x}{:08x}{:08x}{:08x}{:08x}{:08x}",
inode, 0o755, 0, 0, 1, 0, 0, 0, 0, 0, 0, 11, 0
).as_bytes()
)?;
pos += file.write("TRAILER!!!\0".as_bytes())?;
file.write_zeros(align_4(pos) - pos)?;
Ok(())
}
fn rm(&mut self, path: &str, recursive: bool) {
let path = norm_path(path);
if self.entries.remove(&path).is_some() {
eprintln!("Removed entry [{path}]");
}
if recursive {
let path = path + "/";
self.entries.retain(|k, _| {
if k.starts_with(&path) {
eprintln!("Removed entry [{k}]");
false
} else {
true
}
})
}
}
fn extract_entry(&self, path: &str, out: &mut String) -> LoggedResult<()> {
let entry = self
.entries
.get(path)
.ok_or_log_msg(|w| w.write_str("No such file"))?;
eprintln!("Extracting entry [{path}] to [{out}]");
let out = Utf8CStr::from_string(out);
let mut buf = cstr::buf::default();
// Make sure its parent directories exist
if let Some(dir) = out.parent_dir() {
buf.push_str(dir);
buf.mkdirs(0o755)?;
}
let mode: mode_t = (entry.mode & 0o777).into();
match entry.mode & S_IFMT {
S_IFDIR => out.mkdir(mode)?,
S_IFREG => {
let mut file = out.create(
OFlag::O_CREAT | OFlag::O_TRUNC | OFlag::O_WRONLY | OFlag::O_CLOEXEC,
mode,
)?;
file.write_all(&entry.data)?;
}
S_IFLNK => {
buf.clear();
buf.push_str(str::from_utf8(entry.data.as_slice())?);
out.create_symlink_to(&buf)?;
}
S_IFBLK | S_IFCHR => {
let dev = makedev(entry.rdevmajor.try_into()?, entry.rdevminor.try_into()?);
unsafe { mknod(out.as_ptr().cast(), entry.mode, dev) };
}
_ => {
return log_err!("unknown entry type");
}
}
Ok(())
}
fn extract(&self, path: Option<&mut String>, out: Option<&mut String>) -> LoggedResult<()> {
let path = path.map(|s| norm_path(s.as_str()));
if let (Some(path), Some(out)) = (&path, out) {
return self.extract_entry(path, out);
} else {
for path in self.entries.keys() {
if path == "." || path == ".." {
continue;
}
self.extract_entry(path, &mut path.clone())?;
}
}
Ok(())
}
fn exists(&self, path: &str) -> bool {
self.entries.contains_key(&norm_path(path))
}
fn add(&mut self, mode: mode_t, path: &str, file: &mut String) -> LoggedResult<()> {
if path.ends_with('/') {
return log_err!("path cannot end with / for add");
}
let file = Utf8CStr::from_string(file);
let attr = file.get_attr()?;
let mut content = Vec::<u8>::new();
let rdevmajor: dev_t;
let rdevminor: dev_t;
// Treat symlinks as regular files as symlinks are created by the 'ln TARGET ENTRY' command
let mode = if attr.is_file() || attr.is_symlink() {
rdevmajor = 0;
rdevminor = 0;
file.open(OFlag::O_RDONLY | OFlag::O_CLOEXEC)?
.read_to_end(&mut content)?;
mode | S_IFREG
} else {
rdevmajor = major(attr.st.st_rdev.as_()).as_();
rdevminor = minor(attr.st.st_rdev.as_()).as_();
if attr.is_block_device() {
mode | S_IFBLK
} else if attr.is_char_device() {
mode | S_IFCHR
} else {
return log_err!("unsupported file type");
}
};
self.entries.insert(
norm_path(path),
Box::new(CpioEntry {
mode,
uid: 0,
gid: 0,
rdevmajor,
rdevminor,
data: content,
}),
);
eprintln!("Add file [{path}] ({mode:04o})");
Ok(())
}
fn mkdir(&mut self, mode: mode_t, dir: &str) {
self.entries.insert(
norm_path(dir),
Box::new(CpioEntry {
mode: mode | S_IFDIR,
uid: 0,
gid: 0,
rdevmajor: 0,
rdevminor: 0,
data: vec![],
}),
);
eprintln!("Create directory [{dir}] ({mode:04o})");
}
fn ln(&mut self, src: &str, dst: &str) {
self.entries.insert(
norm_path(dst),
Box::new(CpioEntry {
mode: S_IFLNK,
uid: 0,
gid: 0,
rdevmajor: 0,
rdevminor: 0,
data: norm_path(src).as_bytes().to_vec(),
}),
);
eprintln!("Create symlink [{dst}] -> [{src}]");
}
fn mv(&mut self, from: &str, to: &str) -> LoggedResult<()> {
let entry = self
.entries
.remove(&norm_path(from))
.ok_or_log_msg(|w| w.write_fmt(format_args!("No such entry {from}")))?;
self.entries.insert(norm_path(to), entry);
eprintln!("Move [{from}] -> [{to}]");
Ok(())
}
fn ls(&self, path: &str, recursive: bool) {
let path = norm_path(path);
let path = if path.is_empty() {
path
} else {
"/".to_string() + path.as_str()
};
for (name, entry) in &self.entries {
let p = "/".to_string() + name.as_str();
if !p.starts_with(&path) {
continue;
}
let p = p.strip_prefix(&path).unwrap();
if !p.is_empty() && !p.starts_with('/') {
continue;
}
if !recursive && !p.is_empty() && p.matches('/').count() > 1 {
continue;
}
println!("{entry}\t{name}");
}
}
}
const MAGISK_PATCHED: i32 = 1 << 0;
const UNSUPPORTED_CPIO: i32 = 1 << 1;
impl Cpio {
fn patch(&mut self) {
let keep_verity = check_env("KEEPVERITY");
let keep_force_encrypt = check_env("KEEPFORCEENCRYPT");
eprintln!(
"Patch with flag KEEPVERITY=[{keep_verity}] KEEPFORCEENCRYPT=[{keep_force_encrypt}]"
);
self.entries.retain(|name, entry| {
let fstab = (!keep_verity || !keep_force_encrypt)
&& entry.mode & S_IFMT == S_IFREG
&& !name.starts_with(".backup")
&& !name.starts_with("twrp")
&& !name.starts_with("recovery")
&& name.starts_with("fstab");
if !keep_verity {
if fstab {
eprintln!("Found fstab file [{name}]");
let len = patch_verity(entry.data.as_mut_slice());
if len != entry.data.len() {
entry.data.resize(len, 0);
}
} else if name == "verity_key" {
return false;
}
}
if !keep_force_encrypt && fstab {
let len = patch_encryption(entry.data.as_mut_slice());
if len != entry.data.len() {
entry.data.resize(len, 0);
}
}
true
});
}
fn test(&self) -> i32 {
for file in [
"sbin/launch_daemonsu.sh",
"sbin/su",
"init.xposed.rc",
"boot/sbin/launch_daemonsu.sh",
] {
if self.exists(file) {
return UNSUPPORTED_CPIO;
}
}
for file in [
".backup/.magisk",
"init.magisk.rc",
"overlay/init.magisk.rc",
] {
if self.exists(file) {
return MAGISK_PATCHED;
}
}
0
}
fn restore(&mut self) -> LoggedResult<()> {
let mut backups = HashMap::<String, Box<CpioEntry>>::new();
let mut rm_list = String::new();
self.entries
.extract_if(.., |name, _| name.starts_with(".backup/"))
.for_each(|(name, mut entry)| {
if name == ".backup/.rmlist" {
if let Ok(data) = str::from_utf8(&entry.data) {
rm_list.push_str(data);
}
} else if name != ".backup/.magisk" {
let new_name = if name.ends_with(".xz") && entry.decompress() {
&name[8..name.len() - 3]
} else {
&name[8..]
};
eprintln!("Restore [{name}] -> [{new_name}]");
backups.insert(new_name.to_string(), entry);
}
});
self.rm(".backup", false);
if rm_list.is_empty() && backups.is_empty() {
self.entries.clear();
return Ok(());
}
for rm in rm_list.split('\0') {
if !rm.is_empty() {
self.rm(rm, false);
}
}
self.entries.extend(backups);
Ok(())
}
fn backup(&mut self, origin: &mut String, skip_compress: bool) -> LoggedResult<()> {
let mut backups = HashMap::<String, Box<CpioEntry>>::new();
let mut rm_list = String::new();
backups.insert(
".backup".to_string(),
Box::new(CpioEntry {
mode: S_IFDIR,
uid: 0,
gid: 0,
rdevmajor: 0,
rdevminor: 0,
data: vec![],
}),
);
let origin = Utf8CStr::from_string(origin);
let mut o = Cpio::load_from_file(origin)?;
o.rm(".backup", true);
self.rm(".backup", true);
let mut lhs = o.entries.into_iter().peekable();
let mut rhs = self.entries.iter().peekable();
loop {
enum Action<'a> {
Backup(String, Box<CpioEntry>),
Record(&'a String),
Noop,
}
let action = match (lhs.peek(), rhs.peek()) {
(Some((l, _)), Some((r, re))) => match l.as_str().cmp(r.as_str()) {
Ordering::Less => {
let (l, le) = lhs.next().unwrap();
Action::Backup(l, le)
}
Ordering::Greater => Action::Record(rhs.next().unwrap().0),
Ordering::Equal => {
let (l, le) = lhs.next().unwrap();
let action = if re.data != le.data {
Action::Backup(l, le)
} else {
Action::Noop
};
rhs.next();
action
}
},
(Some(_), None) => {
let (l, le) = lhs.next().unwrap();
Action::Backup(l, le)
}
(None, Some(_)) => Action::Record(rhs.next().unwrap().0),
(None, None) => {
break;
}
};
match action {
Action::Backup(name, mut entry) => {
let backup = if !skip_compress && entry.compress() {
format!(".backup/{name}.xz")
} else {
format!(".backup/{name}")
};
eprintln!("Backup [{name}] -> [{backup}]");
backups.insert(backup, entry);
}
Action::Record(name) => {
eprintln!("Record new entry: [{name}] -> [.backup/.rmlist]");
rm_list.push_str(&format!("{name}\0"));
}
Action::Noop => {}
}
}
if !rm_list.is_empty() {
backups.insert(
".backup/.rmlist".to_string(),
Box::new(CpioEntry {
mode: S_IFREG,
uid: 0,
gid: 0,
rdevmajor: 0,
rdevminor: 0,
data: rm_list.as_bytes().to_vec(),
}),
);
}
self.entries.extend(backups);
Ok(())
}
}
impl CpioEntry {
pub(crate) fn compress(&mut self) -> bool {
if self.mode & S_IFMT != S_IFREG {
return false;
}
let mut encoder = get_encoder(FileFormat::XZ, Vec::new());
let Ok(data): std::io::Result<Vec<u8>> = (try {
encoder.write_all(&self.data)?;
encoder.finish()?
}) else {
eprintln!("xz compression failed");
return false;
};
self.data = data;
true
}
pub(crate) fn decompress(&mut self) -> bool {
if self.mode & S_IFMT != S_IFREG {
return false;
}
let Ok(data): std::io::Result<Vec<u8>> = (try {
let mut decoder = get_decoder(FileFormat::XZ, Cursor::new(&self.data));
let mut data = Vec::new();
std::io::copy(decoder.as_mut(), &mut data)?;
data
}) else {
eprintln!("xz compression failed");
return false;
};
self.data = data;
true
}
}
impl Display for CpioEntry {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(
f,
"{}{}{}{}{}{}{}{}{}{}\t{}\t{}\t{}\t{}:{}",
match self.mode & S_IFMT {
S_IFDIR => "d",
S_IFREG => "-",
S_IFLNK => "l",
S_IFBLK => "b",
S_IFCHR => "c",
_ => "?",
},
if self.mode & S_IRUSR != 0 { "r" } else { "-" },
if self.mode & S_IWUSR != 0 { "w" } else { "-" },
if self.mode & S_IXUSR != 0 { "x" } else { "-" },
if self.mode & S_IRGRP != 0 { "r" } else { "-" },
if self.mode & S_IWGRP != 0 { "w" } else { "-" },
if self.mode & S_IXGRP != 0 { "x" } else { "-" },
if self.mode & S_IROTH != 0 { "r" } else { "-" },
if self.mode & S_IWOTH != 0 { "w" } else { "-" },
if self.mode & S_IXOTH != 0 { "x" } else { "-" },
self.uid,
self.gid,
Size::from_bytes(self.data.len())
.format()
.with_style(Style::Abbreviated)
.with_base(Base::Base10),
self.rdevmajor,
self.rdevminor,
)
}
}
pub(crate) fn cpio_commands(file: &Utf8CStr, cmds: &Vec<String>) -> LoggedResult<()> {
let mut cpio = if file.exists() {
Cpio::load_from_file(file)?
} else {
Cpio::new()
};
for cmd in cmds {
if cmd.starts_with('#') {
continue;
}
let mut cmd = CpioCommand::from_args(
&["magiskboot", "cpio", file],
cmd.split(' ')
.filter(|x| !x.is_empty())
.collect::<Vec<_>>()
.as_slice(),
)
.on_early_exit(print_cpio_usage);
match &mut cmd.action {
CpioAction::Test(_) => exit(cpio.test()),
CpioAction::Restore(_) => cpio.restore()?,
CpioAction::Patch(_) => cpio.patch(),
CpioAction::Exists(Exists { path }) => {
return if cpio.exists(path) {
Ok(())
} else {
log_err!()
};
}
CpioAction::Backup(Backup {
origin,
skip_compress,
}) => cpio.backup(origin, *skip_compress)?,
CpioAction::Remove(Remove { path, recursive }) => cpio.rm(path, *recursive),
CpioAction::Move(Move { from, to }) => cpio.mv(from, to)?,
CpioAction::MakeDir(MakeDir { mode, dir }) => cpio.mkdir(*mode, dir),
CpioAction::Link(Link { src, dst }) => cpio.ln(src, dst),
CpioAction::Add(Add { mode, path, file }) => cpio.add(*mode, path, file)?,
CpioAction::Extract(Extract { paths }) => {
if !paths.is_empty() && paths.len() != 2 {
log_err!("invalid arguments")?;
}
let mut it = paths.iter_mut();
cpio.extract(it.next(), it.next())?;
}
CpioAction::List(List { path, recursive }) => {
cpio.ls(path.as_str(), *recursive);
return Ok(());
}
};
}
cpio.dump(file)?;
Ok(())
}
fn x8u(x: &[u8; 8]) -> LoggedResult<u32> {
// parse hex
let mut ret = 0u32;
let s = str::from_utf8(x).log_with_msg(|w| w.write_str("bad cpio header"))?;
for c in s.chars() {
ret = ret * 16
+ c.to_digit(16)
.ok_or_log_msg(|w| w.write_str("bad cpio header"))?;
}
Ok(ret)
}
#[inline(always)]
fn align_4(x: usize) -> usize {
(x + 3) & !3
}
#[inline(always)]
fn norm_path(path: &str) -> String {
path.split('/')
.filter(|x| !x.is_empty())
.intersperse("/")
.collect()
}
fn parse_mode(s: &str) -> Result<mode_t, String> {
mode_t::from_str_radix(s, 8).map_err(|e| e.to_string())
}

273
native/src/boot/dtb.rs Normal file
View file

@ -0,0 +1,273 @@
use argh::FromArgs;
use base::{LoggedResult, MappedFile, Utf8CStr, argh};
use fdt::node::{FdtNode, NodeProperty};
use fdt::{Fdt, FdtError};
use std::cell::UnsafeCell;
use crate::check_env;
use crate::patch::patch_verity;
#[derive(FromArgs)]
#[argh(subcommand)]
pub(crate) enum DtbAction {
Print(Print),
Patch(Patch),
Test(Test),
}
#[derive(FromArgs)]
#[argh(subcommand, name = "print")]
pub(crate) struct Print {
#[argh(switch, short = 'f', long = none)]
fstab: bool,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "patch")]
pub(crate) struct Patch {}
#[derive(FromArgs)]
#[argh(subcommand, name = "test")]
pub(crate) struct Test {}
pub(crate) fn print_dtb_usage() {
eprintln!(
r#"Usage: magiskboot dtb <file> <action> [args...]
Do dtb related actions to <file>.
Supported actions:
print [-f]
Print all contents of dtb for debugging
Specify [-f] to only print fstab nodes
patch
Search for fstab and remove verity/avb
Modifications are done directly to the file in-place
Configure with env variables: KEEPVERITY
test
Test the fstab's status
Return values:
0:valid 1:error"#
);
}
const MAX_PRINT_LEN: usize = 32;
fn print_node(node: &FdtNode) {
fn pretty_node(depth_set: &[bool]) {
let mut depth_set = depth_set.iter().peekable();
while let Some(depth) = depth_set.next() {
let last = depth_set.peek().is_none();
if *depth {
if last {
print!("├── ");
} else {
print!("");
}
} else if last {
print!("└── ");
} else {
print!(" ");
}
}
}
fn pretty_prop(depth_set: &[bool]) {
let mut depth_set = depth_set.iter().peekable();
while let Some(depth) = depth_set.next() {
let last = depth_set.peek().is_none();
if *depth {
if last {
print!("");
} else {
print!("");
}
} else if last {
print!("└─ ");
} else {
print!(" ");
}
}
}
fn do_print_node(node: &FdtNode, depth_set: &mut Vec<bool>) {
pretty_node(depth_set);
let depth = depth_set.len();
depth_set.push(true);
println!("{}", node.name);
let mut properties = node.properties().peekable();
let mut children = node.children().peekable();
while let Some(NodeProperty { name, value }) = properties.next() {
let size = value.len();
let is_str = !(size > 1 && value[0] == 0)
&& matches!(value.last(), Some(0u8) | None)
&& value.iter().all(|c| *c == 0 || (*c >= 32 && *c < 127));
if depth_set[depth] && properties.peek().is_none() && children.peek().is_none() {
depth_set[depth] = false;
}
pretty_prop(depth_set);
if is_str {
println!(
"[{}]: [\"{}\"]",
name,
if value.is_empty() {
""
} else {
unsafe { Utf8CStr::from_bytes_unchecked(value) }
}
);
} else if size > MAX_PRINT_LEN {
println!("[{name}]: <bytes>({size})");
} else {
println!("[{name}]: {value:02x?}");
}
}
while let Some(child) = children.next() {
if depth_set[depth] && children.peek().is_none() {
depth_set[depth] = false;
}
do_print_node(&child, depth_set);
}
depth_set.pop();
}
do_print_node(node, &mut vec![]);
}
fn for_each_fdt<F: FnMut(usize, Fdt) -> LoggedResult<()>>(
file: &Utf8CStr,
rw: bool,
mut f: F,
) -> LoggedResult<()> {
eprintln!("Loading dtbs from [{file}]");
let file = if rw {
MappedFile::open_rw(file)?
} else {
MappedFile::open(file)?
};
let mut buf = Some(file.as_ref());
let mut dtb_num = 0usize;
while let Some(slice) = buf {
let slice = if let Some(pos) = slice.windows(4).position(|w| w == b"\xd0\x0d\xfe\xed") {
&slice[pos..]
} else {
break;
};
if slice.len() < 40 {
break;
}
let fdt = match Fdt::new(slice) {
Err(FdtError::BufferTooSmall) => {
eprintln!("dtb.{dtb_num:04} is truncated");
break;
}
Ok(fdt) => fdt,
e => e?,
};
let size = fdt.total_size();
f(dtb_num, fdt)?;
dtb_num += 1;
buf = Some(&slice[size..]);
}
Ok(())
}
fn find_fstab<'b, 'a: 'b>(fdt: &'b Fdt<'a>) -> Option<FdtNode<'b, 'a>> {
fdt.all_nodes().find(|node| node.name == "fstab")
}
fn dtb_print(file: &Utf8CStr, fstab: bool) -> LoggedResult<()> {
for_each_fdt(file, false, |n, fdt| {
if fstab {
if let Some(fstab) = find_fstab(&fdt) {
eprintln!("Found fstab in dtb.{n:04}");
print_node(&fstab);
}
} else if let Some(mut root) = fdt.find_node("/") {
eprintln!("Printing dtb.{n:04}");
if root.name.is_empty() {
root.name = "/";
}
print_node(&root);
}
Ok(())
})
}
fn dtb_test(file: &Utf8CStr) -> LoggedResult<bool> {
let mut ret = true;
for_each_fdt(file, false, |_, fdt| {
if let Some(fstab) = find_fstab(&fdt) {
for child in fstab.children() {
if child.name != "system" {
continue;
}
if let Some(mount_point) = child.property("mnt_point")
&& mount_point.value == b"/system_root\0"
{
ret = false;
break;
}
}
}
Ok(())
})?;
Ok(ret)
}
fn dtb_patch(file: &Utf8CStr) -> LoggedResult<bool> {
let keep_verity = check_env("KEEPVERITY");
let mut patched = false;
for_each_fdt(file, true, |n, fdt| {
for node in fdt.all_nodes() {
if node.name != "chosen" {
continue;
}
if let Some(boot_args) = node.property("bootargs") {
boot_args.value.windows(14).for_each(|w| {
if w == b"skip_initramfs" {
let w = unsafe {
&mut *std::mem::transmute::<&[u8], &UnsafeCell<[u8]>>(w).get()
};
w[..=4].copy_from_slice(b"want");
eprintln!("Patch [skip_initramfs] -> [want_initramfs] in dtb.{n:04}");
patched = true;
}
});
}
}
if keep_verity {
return Ok(());
}
if let Some(fstab) = find_fstab(&fdt) {
for child in fstab.children() {
if let Some(flags) = child.property("fsmgr_flags") {
let flags = unsafe {
&mut *std::mem::transmute::<&[u8], &UnsafeCell<[u8]>>(flags.value).get()
};
if patch_verity(flags) != flags.len() {
patched = true;
}
}
}
}
Ok(())
})?;
Ok(patched)
}
pub(crate) fn dtb_commands(file: &Utf8CStr, action: &DtbAction) -> LoggedResult<bool> {
match action {
DtbAction::Print(Print { fstab }) => {
dtb_print(file, *fstab)?;
Ok(true)
}
DtbAction::Test(_) => Ok(dtb_test(file)?),
DtbAction::Patch(_) => Ok(dtb_patch(file)?),
}
}

104
native/src/boot/format.rs Normal file
View file

@ -0,0 +1,104 @@
use crate::ffi::FileFormat;
use base::{Utf8CStr, cstr, libc};
use std::fmt::{Display, Formatter};
use std::str::FromStr;
impl FromStr for FileFormat {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"gzip" => Ok(Self::GZIP),
"zopfli" => Ok(Self::ZOPFLI),
"xz" => Ok(Self::XZ),
"lzma" => Ok(Self::LZMA),
"bzip2" => Ok(Self::BZIP2),
"lz4" => Ok(Self::LZ4),
"lz4_legacy" => Ok(Self::LZ4_LEGACY),
"lz4_lg" => Ok(Self::LZ4_LG),
_ => Err(()),
}
}
}
impl Display for FileFormat {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.write_str(self.as_cstr())
}
}
impl FileFormat {
fn as_cstr(&self) -> &'static Utf8CStr {
match *self {
Self::GZIP => cstr!("gzip"),
Self::ZOPFLI => cstr!("zopfli"),
Self::LZOP => cstr!("lzop"),
Self::XZ => cstr!("xz"),
Self::LZMA => cstr!("lzma"),
Self::BZIP2 => cstr!("bzip2"),
Self::LZ4 => cstr!("lz4"),
Self::LZ4_LEGACY => cstr!("lz4_legacy"),
Self::LZ4_LG => cstr!("lz4_lg"),
Self::DTB => cstr!("dtb"),
Self::ZIMAGE => cstr!("zimage"),
_ => cstr!("raw"),
}
}
}
impl FileFormat {
pub fn ext(&self) -> &'static str {
match *self {
Self::GZIP | Self::ZOPFLI => "gz",
Self::LZOP => "lzo",
Self::XZ => "xz",
Self::LZMA => "lzma",
Self::BZIP2 => "bz2",
Self::LZ4 | Self::LZ4_LEGACY | Self::LZ4_LG => "lz4",
_ => "",
}
}
pub fn is_compressed(&self) -> bool {
matches!(
*self,
Self::GZIP
| Self::ZOPFLI
| Self::XZ
| Self::LZMA
| Self::BZIP2
| Self::LZ4
| Self::LZ4_LEGACY
| Self::LZ4_LG
)
}
pub fn formats() -> String {
[
Self::GZIP,
Self::ZOPFLI,
Self::XZ,
Self::LZMA,
Self::BZIP2,
Self::LZ4,
Self::LZ4_LEGACY,
Self::LZ4_LG,
]
.map(|f| f.to_string())
.join(" ")
}
}
// C++ FFI
pub fn fmt2name(fmt: FileFormat) -> *const libc::c_char {
fmt.as_cstr().as_ptr()
}
pub fn fmt_compressed(fmt: FileFormat) -> bool {
fmt.is_compressed()
}
pub fn fmt_compressed_any(fmt: FileFormat) -> bool {
fmt.is_compressed() || matches!(fmt, FileFormat::LZOP)
}

108
native/src/boot/lib.rs Normal file
View file

@ -0,0 +1,108 @@
#![feature(format_args_nl)]
#![feature(btree_extract_if)]
#![feature(iter_intersperse)]
#![feature(try_blocks)]
pub use base;
use compress::{compress_bytes, decompress_bytes};
use format::{fmt_compressed, fmt_compressed_any, fmt2name};
use sign::{SHA, get_sha, sha256_hash, sign_payload_for_cxx};
use std::env;
mod cli;
mod compress;
mod cpio;
mod dtb;
mod format;
mod patch;
mod payload;
// Suppress warnings in generated code
#[allow(warnings)]
mod proto;
mod sign;
#[cxx::bridge]
pub mod ffi {
enum FileFormat {
UNKNOWN,
/* Boot formats */
CHROMEOS,
AOSP,
AOSP_VENDOR,
DHTB,
BLOB,
/* Compression formats */
GZIP,
ZOPFLI,
XZ,
LZMA,
BZIP2,
LZ4,
LZ4_LEGACY,
LZ4_LG,
/* Unsupported compression */
LZOP,
/* Misc */
MTK,
DTB,
ZIMAGE,
}
unsafe extern "C++" {
include!("magiskboot.hpp");
#[cxx_name = "Utf8CStr"]
type Utf8CStrRef<'a> = base::Utf8CStrRef<'a>;
fn cleanup();
fn unpack(image: Utf8CStrRef, skip_decomp: bool, hdr: bool) -> i32;
fn repack(src_img: Utf8CStrRef, out_img: Utf8CStrRef, skip_comp: bool);
fn split_image_dtb(filename: Utf8CStrRef, skip_decomp: bool) -> i32;
fn check_fmt(buf: &[u8]) -> FileFormat;
}
extern "Rust" {
type SHA;
fn get_sha(use_sha1: bool) -> Box<SHA>;
fn update(self: &mut SHA, data: &[u8]);
fn finalize_into(self: &mut SHA, out: &mut [u8]);
fn output_size(self: &SHA) -> usize;
fn sha256_hash(data: &[u8], out: &mut [u8]);
fn compress_bytes(format: FileFormat, in_bytes: &[u8], out_fd: i32);
fn decompress_bytes(format: FileFormat, in_bytes: &[u8], out_fd: i32);
fn fmt2name(fmt: FileFormat) -> *const c_char;
fn fmt_compressed(fmt: FileFormat) -> bool;
fn fmt_compressed_any(fmt: FileFormat) -> bool;
#[cxx_name = "sign_payload"]
fn sign_payload_for_cxx(payload: &[u8]) -> Vec<u8>;
}
// BootImage FFI
unsafe extern "C++" {
include!("bootimg.hpp");
#[cxx_name = "boot_img"]
type BootImage;
#[cxx_name = "get_payload"]
fn payload(self: &BootImage) -> &[u8];
#[cxx_name = "get_tail"]
fn tail(self: &BootImage) -> &[u8];
fn is_signed(self: &BootImage) -> bool;
fn tail_off(self: &BootImage) -> u64;
#[Self = BootImage]
#[cxx_name = "create"]
fn new(img: Utf8CStrRef) -> UniquePtr<BootImage>;
}
extern "Rust" {
#[cxx_name = "verify"]
fn verify_for_cxx(self: &BootImage) -> bool;
}
}
#[inline(always)]
pub(crate) fn check_env(env: &str) -> bool {
env::var(env).is_ok_and(|var| var == "true")
}

View file

@ -0,0 +1,62 @@
#pragma once
#include <base.hpp>
#define HEADER_FILE "header"
#define KERNEL_FILE "kernel"
#define RAMDISK_FILE "ramdisk.cpio"
#define VND_RAMDISK_DIR "vendor_ramdisk"
#define SECOND_FILE "second"
#define EXTRA_FILE "extra"
#define KER_DTB_FILE "kernel_dtb"
#define RECV_DTBO_FILE "recovery_dtbo"
#define DTB_FILE "dtb"
#define BOOTCONFIG_FILE "bootconfig"
#define NEW_BOOT "new-boot.img"
#define BUFFER_MATCH(buf, s) (memcmp(buf, s, sizeof(s) - 1) == 0)
#define BUFFER_CONTAIN(buf, sz, s) (memmem(buf, sz, s, sizeof(s) - 1) != nullptr)
#define CHECKED_MATCH(s) (len >= (sizeof(s) - 1) && BUFFER_MATCH(buf, s))
#define BOOT_MAGIC "ANDROID!"
#define VENDOR_BOOT_MAGIC "VNDRBOOT"
#define CHROMEOS_MAGIC "CHROMEOS"
#define GZIP1_MAGIC "\x1f\x8b"
#define GZIP2_MAGIC "\x1f\x9e"
#define LZOP_MAGIC "\x89""LZO"
#define XZ_MAGIC "\xfd""7zXZ"
#define BZIP_MAGIC "BZh"
#define LZ4_LEG_MAGIC "\x02\x21\x4c\x18"
#define LZ41_MAGIC "\x03\x21\x4c\x18"
#define LZ42_MAGIC "\x04\x22\x4d\x18"
#define MTK_MAGIC "\x88\x16\x88\x58"
#define DTB_MAGIC "\xd0\x0d\xfe\xed"
#define LG_BUMP_MAGIC "\x41\xa9\xe4\x67\x74\x4d\x1d\x1b\xa4\x29\xf2\xec\xea\x65\x52\x79"
#define DHTB_MAGIC "\x44\x48\x54\x42\x01\x00\x00\x00"
#define SEANDROID_MAGIC "SEANDROIDENFORCE"
#define TEGRABLOB_MAGIC "-SIGNED-BY-SIGNBLOB-"
#define NOOKHD_RL_MAGIC "Red Loader"
#define NOOKHD_GL_MAGIC "Green Loader"
#define NOOKHD_GR_MAGIC "Green Recovery"
#define NOOKHD_EB_MAGIC "eMMC boot.img+secondloader"
#define NOOKHD_ER_MAGIC "eMMC recovery.img+secondloader"
#define NOOKHD_PRE_HEADER_SZ 1048576
#define ACCLAIM_MAGIC "BauwksBoot"
#define ACCLAIM_PRE_HEADER_SZ 262144
#define AMONET_MICROLOADER_MAGIC "microloader"
#define AMONET_MICROLOADER_SZ 1024
#define AVB_FOOTER_MAGIC "AVBf"
#define AVB_MAGIC "AVB0"
#define ZIMAGE_MAGIC "\x18\x28\x6f\x01"
enum class FileFormat : uint8_t;
int unpack(Utf8CStr image, bool skip_decomp = false, bool hdr = false);
void repack(Utf8CStr src_img, Utf8CStr out_img, bool skip_comp = false);
int split_image_dtb(Utf8CStr filename, bool skip_decomp = false);
void cleanup();
FileFormat check_fmt(const void *buf, size_t len);
static inline FileFormat check_fmt(rust::Slice<const uint8_t> bytes) {
return check_fmt(bytes.data(), bytes.size());
}

118
native/src/boot/patch.rs Normal file
View file

@ -0,0 +1,118 @@
use base::{LoggedResult, MappedFile, MutBytesExt, Utf8CStr};
// SAFETY: assert(buf.len() >= 1) && assert(len <= buf.len())
macro_rules! match_patterns {
($buf:ident, $($str:literal), *) => {{
let mut len = if *$buf.get_unchecked(0) == b',' { 1 } else { 0 };
let b = $buf.get_unchecked(len..);
let found = if b.is_empty() {
false
}
$(
else if b.starts_with($str) {
len += $str.len();
true
}
)*
else {
false
};
if found {
let b = $buf.get_unchecked(len..);
if !b.is_empty() && b[0] == b'=' {
for c in b.iter() {
if b" \n\0".contains(c) {
break;
}
len += 1;
}
}
Some(len)
} else {
None
}
}};
}
fn remove_pattern(buf: &mut [u8], pattern_matcher: unsafe fn(&[u8]) -> Option<usize>) -> usize {
let mut write = 0_usize;
let mut read = 0_usize;
let mut sz = buf.len();
// SAFETY: assert(write <= read) && assert(read <= buf.len())
unsafe {
while read < buf.len() {
if let Some(len) = pattern_matcher(buf.get_unchecked(read..)) {
let skipped = buf.get_unchecked(read..(read + len));
// SAFETY: all matching patterns are ASCII bytes
let skipped = std::str::from_utf8_unchecked(skipped);
eprintln!("Remove pattern [{skipped}]");
sz -= len;
read += len;
} else {
*buf.get_unchecked_mut(write) = *buf.get_unchecked(read);
write += 1;
read += 1;
}
}
}
if let Some(buf) = buf.get_mut(write..) {
buf.fill(0);
}
sz
}
pub fn patch_verity(buf: &mut [u8]) -> usize {
unsafe fn match_verity_pattern(buf: &[u8]) -> Option<usize> {
unsafe {
match_patterns!(
buf,
b"verifyatboot",
b"verify",
b"avb_keys",
b"avb",
b"support_scfs",
b"fsverity"
)
}
}
remove_pattern(buf, match_verity_pattern)
}
pub fn patch_encryption(buf: &mut [u8]) -> usize {
unsafe fn match_encryption_pattern(buf: &[u8]) -> Option<usize> {
unsafe { match_patterns!(buf, b"forceencrypt", b"forcefdeorfbe", b"fileencryption") }
}
remove_pattern(buf, match_encryption_pattern)
}
fn hex2byte(hex: &[u8]) -> Vec<u8> {
let mut v = Vec::with_capacity(hex.len() / 2);
for bytes in hex.chunks(2) {
if bytes.len() != 2 {
break;
}
let high = bytes[0].to_ascii_uppercase() - b'0';
let low = bytes[1].to_ascii_uppercase() - b'0';
let h = if high > 9 { high - 7 } else { high };
let l = if low > 9 { low - 7 } else { low };
v.push((h << 4) | l);
}
v
}
pub fn hexpatch(file: &Utf8CStr, from: &Utf8CStr, to: &Utf8CStr) -> bool {
let res: LoggedResult<bool> = try {
let mut map = MappedFile::open_rw(file)?;
let pattern = hex2byte(from.as_bytes());
let patch = hex2byte(to.as_bytes());
let v = map.patch(pattern.as_slice(), patch.as_slice());
for off in &v {
eprintln!("Patch @ {off:#010X} [{from}] -> [{to}]");
}
!v.is_empty()
};
res.unwrap_or(false)
}

179
native/src/boot/payload.rs Normal file
View file

@ -0,0 +1,179 @@
use crate::compress::get_decoder;
use crate::ffi::check_fmt;
use crate::proto::update_metadata::DeltaArchiveManifest;
use crate::proto::update_metadata::mod_InstallOperation::Type;
use base::{LoggedError, LoggedResult, ReadSeekExt, ResultExt, WriteExt, error};
use byteorder::{BigEndian, ReadBytesExt};
use quick_protobuf::{BytesReader, MessageRead};
use std::fs::File;
use std::io::{BufReader, Cursor, Read, Seek, SeekFrom, Write};
use std::os::fd::FromRawFd;
macro_rules! bad_payload {
($msg:literal) => {{
error!(concat!("Invalid payload: ", $msg));
LoggedError::default()
}};
($($args:tt)*) => {{
error!("Invalid payload: {}", format_args!($($args)*));
LoggedError::default()
}};
}
const PAYLOAD_MAGIC: &str = "CrAU";
pub fn extract_boot_from_payload(
in_path: &str,
partition_name: Option<&str>,
out_path: Option<&str>,
) -> LoggedResult<()> {
let mut reader = BufReader::new(if in_path == "-" {
unsafe { File::from_raw_fd(0) }
} else {
File::open(in_path).log_with_msg(|w| write!(w, "Cannot open '{in_path}'"))?
});
let buf = &mut [0u8; 4];
reader.read_exact(buf)?;
if buf != PAYLOAD_MAGIC.as_bytes() {
return Err(bad_payload!("invalid magic"));
}
let version = reader.read_u64::<BigEndian>()?;
if version != 2 {
return Err(bad_payload!("unsupported version: {}", version));
}
let manifest_len = reader.read_u64::<BigEndian>()? as usize;
if manifest_len == 0 {
return Err(bad_payload!("manifest length is zero"));
}
let manifest_sig_len = reader.read_u32::<BigEndian>()?;
if manifest_sig_len == 0 {
return Err(bad_payload!("manifest signature length is zero"));
}
let mut buf = vec![0; manifest_len];
let manifest = {
let manifest = &mut buf[..manifest_len];
reader.read_exact(manifest)?;
let mut br = BytesReader::from_bytes(manifest);
DeltaArchiveManifest::from_reader(&mut br, manifest)?
};
if manifest.get_minor_version() != 0 {
return Err(bad_payload!(
"delta payloads are not supported, please use a full payload file"
));
}
let block_size = manifest.get_block_size() as u64;
let partition = match partition_name {
None => {
let boot = manifest
.partitions
.iter()
.find(|p| p.partition_name == "init_boot");
let boot = match boot {
Some(boot) => Some(boot),
None => manifest
.partitions
.iter()
.find(|p| p.partition_name == "boot"),
};
boot.ok_or_else(|| bad_payload!("boot partition not found"))?
}
Some(name) => manifest
.partitions
.iter()
.find(|p| p.partition_name.as_str() == name)
.ok_or_else(|| bad_payload!("partition '{}' not found", name))?,
};
let out_str: String;
let out_path = match out_path {
None => {
out_str = format!("{}.img", partition.partition_name);
out_str.as_str()
}
Some(s) => s,
};
let mut out_file =
File::create(out_path).log_with_msg(|w| write!(w, "Cannot write to '{out_path}'"))?;
// Skip the manifest signature
reader.skip(manifest_sig_len as usize)?;
// Sort the install operations with data_offset so we will only ever need to seek forward
// This makes it possible to support non-seekable input file descriptors
let mut operations = partition.operations.clone();
operations.sort_by_key(|e| e.data_offset.unwrap_or(0));
let mut curr_data_offset: u64 = 0;
for operation in operations.iter() {
let data_len = operation
.data_length
.ok_or_else(|| bad_payload!("data length not found"))? as usize;
let data_offset = operation
.data_offset
.ok_or_else(|| bad_payload!("data offset not found"))?;
let data_type = operation.type_pb;
buf.resize(data_len, 0u8);
let data = &mut buf[..data_len];
// Skip to the next offset and read data
let skip = data_offset - curr_data_offset;
reader.skip(skip as usize)?;
reader.read_exact(data)?;
curr_data_offset = data_offset + data_len as u64;
let out_offset = operation
.dst_extents
.first()
.ok_or_else(|| bad_payload!("dst extents not found"))?
.start_block
.ok_or_else(|| bad_payload!("start block not found"))?
* block_size;
match data_type {
Type::REPLACE => {
out_file.seek(SeekFrom::Start(out_offset))?;
out_file.write_all(data)?;
}
Type::ZERO => {
for ext in operation.dst_extents.iter() {
let out_seek = ext
.start_block
.ok_or_else(|| bad_payload!("start block not found"))?
* block_size;
let num_blocks = ext
.num_blocks
.ok_or_else(|| bad_payload!("num blocks not found"))?;
out_file.seek(SeekFrom::Start(out_seek))?;
out_file.write_zeros(num_blocks as usize)?;
}
}
Type::REPLACE_BZ | Type::REPLACE_XZ => {
out_file.seek(SeekFrom::Start(out_offset))?;
let fmt = check_fmt(data);
let mut decoder = get_decoder(fmt, Cursor::new(data));
let Ok(_): std::io::Result<()> = (try {
std::io::copy(decoder.as_mut(), &mut out_file)?;
}) else {
return Err(bad_payload!("decompression failed"));
};
}
_ => return Err(bad_payload!("unsupported operation type")),
};
}
Ok(())
}

View file

@ -0,0 +1,369 @@
//
// Copyright (C) 2010 The Android Open Source Project
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Update file format: An update file contains all the operations needed
// to update a system to a specific version. It can be a full payload which
// can update from any version, or a delta payload which can only update
// from a specific version.
// The update format is represented by this struct pseudocode:
// struct delta_update_file {
// char magic[4] = "CrAU";
// uint64 file_format_version; // payload major version
// uint64 manifest_size; // Size of protobuf DeltaArchiveManifest
//
// // Only present if format_version >= 2:
// uint32 metadata_signature_size;
//
// // The DeltaArchiveManifest protobuf serialized, not compressed.
// char manifest[manifest_size];
//
// // The signature of the metadata (from the beginning of the payload up to
// // this location, not including the signature itself). This is a serialized
// // Signatures message.
// char metadata_signature_message[metadata_signature_size];
//
// // Data blobs for files, no specific format. The specific offset
// // and length of each data blob is recorded in the DeltaArchiveManifest.
// struct {
// char data[];
// } blobs[];
//
// // The signature of the entire payload, everything up to this location,
// // except that metadata_signature_message is skipped to simplify signing
// // process. These two are not signed:
// uint64 payload_signatures_message_size;
// // This is a serialized Signatures message.
// char payload_signatures_message[payload_signatures_message_size];
//
// };
// The DeltaArchiveManifest protobuf is an ordered list of InstallOperation
// objects. These objects are stored in a linear array in the
// DeltaArchiveManifest. Each operation is applied in order by the client.
// The DeltaArchiveManifest also contains the initial and final
// checksums for the device.
// The client will perform each InstallOperation in order, beginning even
// before the entire delta file is downloaded (but after at least the
// protobuf is downloaded). The types of operations are explained:
// - REPLACE: Replace the dst_extents on the drive with the attached data,
// zero padding out to block size.
// - REPLACE_BZ: bzip2-uncompress the attached data and write it into
// dst_extents on the drive, zero padding to block size.
// - MOVE: Copy the data in src_extents to dst_extents. Extents may overlap,
// so it may be desirable to read all src_extents data into memory before
// writing it out. (deprecated)
// - SOURCE_COPY: Copy the data in src_extents in the old partition to
// dst_extents in the new partition. There's no overlapping of data because
// the extents are in different partitions.
// - BSDIFF: Read src_length bytes from src_extents into memory, perform
// bspatch with attached data, write new data to dst_extents, zero padding
// to block size. (deprecated)
// - SOURCE_BSDIFF: Read the data in src_extents in the old partition, perform
// bspatch with the attached data and write the new data to dst_extents in the
// new partition.
// - ZERO: Write zeros to the destination dst_extents.
// - DISCARD: Discard the destination dst_extents blocks on the physical medium.
// the data read from those blocks is undefined.
// - REPLACE_XZ: Replace the dst_extents with the contents of the attached
// xz file after decompression. The xz file should only use crc32 or no crc at
// all to be compatible with xz-embedded.
// - PUFFDIFF: Read the data in src_extents in the old partition, perform
// puffpatch with the attached data and write the new data to dst_extents in
// the new partition.
//
// The operations allowed in the payload (supported by the client) depend on the
// major and minor version. See InstallOperation.Type below for details.
syntax = "proto2";
package chromeos_update_engine;
// Data is packed into blocks on disk, always starting from the beginning
// of the block. If a file's data is too large for one block, it overflows
// into another block, which may or may not be the following block on the
// physical partition. An ordered list of extents is another
// representation of an ordered list of blocks. For example, a file stored
// in blocks 9, 10, 11, 2, 18, 12 (in that order) would be stored in
// extents { {9, 3}, {2, 1}, {18, 1}, {12, 1} } (in that order).
// In general, files are stored sequentially on disk, so it's more efficient
// to use extents to encode the block lists (this is effectively
// run-length encoding).
// A sentinel value (kuint64max) as the start block denotes a sparse-hole
// in a file whose block-length is specified by num_blocks.
message Extent {
optional uint64 start_block = 1;
optional uint64 num_blocks = 2;
}
// Signatures: Updates may be signed by the OS vendor. The client verifies
// an update's signature by hashing the entire download. The section of the
// download that contains the signature is at the end of the file, so when
// signing a file, only the part up to the signature part is signed.
// Then, the client looks inside the download's Signatures message for a
// Signature message that it knows how to handle. Generally, a client will
// only know how to handle one type of signature, but an update may contain
// many signatures to support many different types of client. Then client
// selects a Signature message and uses that, along with a known public key,
// to verify the download. The public key is expected to be part of the
// client.
message Signatures {
message Signature {
optional uint32 version = 1 [deprecated = true];
optional bytes data = 2;
// The DER encoded signature size of EC keys is nondeterministic for
// different input of sha256 hash. However, we need the size of the
// serialized signatures protobuf string to be fixed before signing;
// because this size is part of the content to be signed. Therefore, we
// always pad the signature data to the maximum possible signature size of
// a given key. And the payload verifier will truncate the signature to
// its correct size based on the value of |unpadded_signature_size|.
optional fixed32 unpadded_signature_size = 3;
}
repeated Signature signatures = 1;
}
message PartitionInfo {
optional uint64 size = 1;
optional bytes hash = 2;
}
message InstallOperation {
enum Type {
REPLACE = 0; // Replace destination extents w/ attached data.
REPLACE_BZ = 1; // Replace destination extents w/ attached bzipped data.
MOVE = 2 [deprecated = true]; // Move source extents to target extents.
BSDIFF = 3 [deprecated = true]; // The data is a bsdiff binary diff.
// On minor version 2 or newer, these operations are supported:
SOURCE_COPY = 4; // Copy from source to target partition
SOURCE_BSDIFF = 5; // Like BSDIFF, but read from source partition
// On minor version 3 or newer and on major version 2 or newer, these
// operations are supported:
REPLACE_XZ = 8; // Replace destination extents w/ attached xz data.
// On minor version 4 or newer, these operations are supported:
ZERO = 6; // Write zeros in the destination.
DISCARD = 7; // Discard the destination blocks, reading as undefined.
BROTLI_BSDIFF = 10; // Like SOURCE_BSDIFF, but compressed with brotli.
// On minor version 5 or newer, these operations are supported:
PUFFDIFF = 9; // The data is in puffdiff format.
// On minor version 8 or newer, these operations are supported:
ZUCCHINI = 11;
// On minor version 9 or newer, these operations are supported:
LZ4DIFF_BSDIFF = 12;
LZ4DIFF_PUFFDIFF = 13;
}
required Type type = 1;
// Only minor version 6 or newer support 64 bits |data_offset| and
// |data_length|, older client will read them as uint32.
// The offset into the delta file (after the protobuf)
// where the data (if any) is stored
optional uint64 data_offset = 2;
// The length of the data in the delta file
optional uint64 data_length = 3;
// Ordered list of extents that are read from (if any) and written to.
repeated Extent src_extents = 4;
// Byte length of src, equal to the number of blocks in src_extents *
// block_size. It is used for BSDIFF and SOURCE_BSDIFF, because we need to
// pass that external program the number of bytes to read from the blocks we
// pass it. This is not used in any other operation.
optional uint64 src_length = 5;
repeated Extent dst_extents = 6;
// Byte length of dst, equal to the number of blocks in dst_extents *
// block_size. Used for BSDIFF and SOURCE_BSDIFF, but not in any other
// operation.
optional uint64 dst_length = 7;
// Optional SHA 256 hash of the blob associated with this operation.
// This is used as a primary validation for http-based downloads and
// as a defense-in-depth validation for https-based downloads. If
// the operation doesn't refer to any blob, this field will have
// zero bytes.
optional bytes data_sha256_hash = 8;
// Indicates the SHA 256 hash of the source data referenced in src_extents at
// the time of applying the operation. If present, the update_engine daemon
// MUST read and verify the source data before applying the operation.
optional bytes src_sha256_hash = 9;
}
// Hints to VAB snapshot to skip writing some blocks if these blocks are
// identical to the ones on the source image. The src & dst extents for each
// CowMergeOperation should be contiguous, and they're a subset of an OTA
// InstallOperation.
// During merge time, we need to follow the pre-computed sequence to avoid
// read after write, similar to the inplace update schema.
message CowMergeOperation {
enum Type {
COW_COPY = 0; // identical blocks
COW_XOR = 1; // used when src/dst blocks are highly similar
COW_REPLACE = 2; // Raw replace operation
}
optional Type type = 1;
optional Extent src_extent = 2;
optional Extent dst_extent = 3;
// For COW_XOR, source location might be unaligned, so this field is in range
// [0, block_size), representing how much should the src_extent shift toward
// larger block number. If this field is non-zero, then src_extent will
// include 1 extra block in the end, as the merge op actually references the
// first |src_offset| bytes of that extra block. For example, if |dst_extent|
// is [10, 15], |src_offset| is 500, then src_extent might look like [25, 31].
// Note that |src_extent| contains 1 extra block than the |dst_extent|.
optional uint32 src_offset = 4;
}
// Describes the update to apply to a single partition.
message PartitionUpdate {
// A platform-specific name to identify the partition set being updated. For
// example, in Chrome OS this could be "ROOT" or "KERNEL".
required string partition_name = 1;
// Whether this partition carries a filesystem with post-install program that
// must be run to finalize the update process. See also |postinstall_path| and
// |filesystem_type|.
optional bool run_postinstall = 2;
// The path of the executable program to run during the post-install step,
// relative to the root of this filesystem. If not set, the default "postinst"
// will be used. This setting is only used when |run_postinstall| is set and
// true.
optional string postinstall_path = 3;
// The filesystem type as passed to the mount(2) syscall when mounting the new
// filesystem to run the post-install program. If not set, a fixed list of
// filesystems will be attempted. This setting is only used if
// |run_postinstall| is set and true.
optional string filesystem_type = 4;
// If present, a list of signatures of the new_partition_info.hash signed with
// different keys. If the update_engine daemon requires vendor-signed images
// and has its public key installed, one of the signatures should be valid
// for /postinstall to run.
repeated Signatures.Signature new_partition_signature = 5;
optional PartitionInfo old_partition_info = 6;
optional PartitionInfo new_partition_info = 7;
// The list of operations to be performed to apply this PartitionUpdate. The
// associated operation blobs (in operations[i].data_offset, data_length)
// should be stored contiguously and in the same order.
repeated InstallOperation operations = 8;
// Whether a failure in the postinstall step for this partition should be
// ignored.
optional bool postinstall_optional = 9;
// On minor version 6 or newer, these fields are supported:
// The extent for data covered by verity hash tree.
optional Extent hash_tree_data_extent = 10;
// The extent to store verity hash tree.
optional Extent hash_tree_extent = 11;
// The hash algorithm used in verity hash tree.
optional string hash_tree_algorithm = 12;
// The salt used for verity hash tree.
optional bytes hash_tree_salt = 13;
// The extent for data covered by FEC.
optional Extent fec_data_extent = 14;
// The extent to store FEC.
optional Extent fec_extent = 15;
// The number of FEC roots.
optional uint32 fec_roots = 16 [default = 2];
// Per-partition version used for downgrade detection, added
// as an effort to support partial updates. For most partitions,
// this is the build timestamp.
optional string version = 17;
// A sorted list of CowMergeOperation. When writing cow, we can choose to
// skip writing the raw bytes for these extents. During snapshot merge, the
// bytes will read from the source partitions instead.
repeated CowMergeOperation merge_operations = 18;
// Estimated size for COW image. This is used by libsnapshot
// as a hint. If set to 0, libsnapshot should use alternative
// methods for estimating size.
optional uint64 estimate_cow_size = 19;
}
message DynamicPartitionGroup {
// Name of the group.
required string name = 1;
// Maximum size of the group. The sum of sizes of all partitions in the group
// must not exceed the maximum size of the group.
optional uint64 size = 2;
// A list of partitions that belong to the group.
repeated string partition_names = 3;
}
message VABCFeatureSet {
optional bool threaded = 1;
optional bool batch_writes = 2;
}
// Metadata related to all dynamic partitions.
message DynamicPartitionMetadata {
// All updatable groups present in |partitions| of this DeltaArchiveManifest.
// - If an updatable group is on the device but not in the manifest, it is
// not updated. Hence, the group will not be resized, and partitions cannot
// be added to or removed from the group.
// - If an updatable group is in the manifest but not on the device, the group
// is added to the device.
repeated DynamicPartitionGroup groups = 1;
// Whether dynamic partitions have snapshots during the update. If this is
// set to true, the update_engine daemon creates snapshots for all dynamic
// partitions if possible. If this is unset, the update_engine daemon MUST
// NOT create snapshots for dynamic partitions.
optional bool snapshot_enabled = 2;
// If this is set to false, update_engine should not use VABC regardless. If
// this is set to true, update_engine may choose to use VABC if device
// supports it, but not guaranteed.
// VABC stands for Virtual AB Compression
optional bool vabc_enabled = 3;
// The compression algorithm used by VABC. Available ones are "gz", "brotli".
// See system/core/fs_mgr/libsnapshot/cow_writer.cpp for available options,
// as this parameter is ultimated forwarded to libsnapshot's CowWriter
optional string vabc_compression_param = 4;
// COW version used by VABC. The represents the major version in the COW
// header
optional uint32 cow_version = 5;
// A collection of knobs to tune Virtual AB Compression
optional VABCFeatureSet vabc_feature_set = 6;
}
// Definition has been duplicated from
// $ANDROID_BUILD_TOP/build/tools/releasetools/ota_metadata.proto. Keep in sync.
message ApexInfo {
optional string package_name = 1;
optional int64 version = 2;
optional bool is_compressed = 3;
optional int64 decompressed_size = 4;
}
// Definition has been duplicated from
// $ANDROID_BUILD_TOP/build/tools/releasetools/ota_metadata.proto. Keep in sync.
message ApexMetadata {
repeated ApexInfo apex_info = 1;
}
message DeltaArchiveManifest {
// Only present in major version = 1. List of install operations for the
// kernel and rootfs partitions. For major version = 2 see the |partitions|
// field.
reserved 1, 2;
// (At time of writing) usually 4096
optional uint32 block_size = 3 [default = 4096];
// If signatures are present, the offset into the blobs, generally
// tacked onto the end of the file, and the length. We use an offset
// rather than a bool to allow for more flexibility in future file formats.
// If either is absent, it means signatures aren't supported in this
// file.
optional uint64 signatures_offset = 4;
optional uint64 signatures_size = 5;
// Fields deprecated in major version 2.
reserved 6,7,8,9,10,11;
// The minor version, also referred as "delta version", of the payload.
// Minor version 0 is full payload, everything else is delta payload.
optional uint32 minor_version = 12 [default = 0];
// Only present in major version >= 2. List of partitions that will be
// updated, in the order they will be updated. This field replaces the
// |install_operations|, |kernel_install_operations| and the
// |{old,new}_{kernel,rootfs}_info| fields used in major version = 1. This
// array can have more than two partitions if needed, and they are identified
// by the partition name.
repeated PartitionUpdate partitions = 13;
// The maximum timestamp of the OS allowed to apply this payload.
// Can be used to prevent downgrading the OS.
optional int64 max_timestamp = 14;
// Metadata related to all dynamic partitions.
optional DynamicPartitionMetadata dynamic_partition_metadata = 15;
// If the payload only updates a subset of partitions on the device.
optional bool partial_update = 16;
// Information on compressed APEX to figure out how much space is required for
// their decompression
repeated ApexInfo apex_info = 17;
// Security patch level of the device, usually in the format of
// yyyy-mm-dd
optional string security_patch_level = 18;
}

349
native/src/boot/sign.rs Normal file
View file

@ -0,0 +1,349 @@
use der::referenced::OwnedToRef;
use der::{Decode, DecodePem, Encode, Sequence, SliceReader};
use digest::DynDigest;
use p256::ecdsa::{
Signature as P256Signature, SigningKey as P256SigningKey, VerifyingKey as P256VerifyingKey,
};
use p256::pkcs8::DecodePrivateKey;
use p384::ecdsa::{
Signature as P384Signature, SigningKey as P384SigningKey, VerifyingKey as P384VerifyingKey,
};
use p521::ecdsa::{
Signature as P521Signature, SigningKey as P521SigningKey, VerifyingKey as P521VerifyingKey,
};
use rsa::pkcs1v15::{
Signature as RsaSignature, SigningKey as RsaSigningKey, VerifyingKey as RsaVerifyingKey,
};
use rsa::pkcs8::SubjectPublicKeyInfoRef;
use rsa::signature::SignatureEncoding;
use rsa::signature::hazmat::{PrehashSigner, PrehashVerifier};
use rsa::{RsaPrivateKey, RsaPublicKey};
use sha1::Sha1;
use sha2::{Sha256, Sha384, Sha512};
use x509_cert::Certificate;
use x509_cert::der::Any;
use x509_cert::der::asn1::{OctetString, PrintableString};
use x509_cert::spki::AlgorithmIdentifier;
use base::{LoggedResult, MappedFile, ResultExt, SilentLogExt, Utf8CStr, cstr, log_err};
use crate::ffi::BootImage;
#[allow(clippy::upper_case_acronyms)]
pub enum SHA {
SHA1(Sha1),
SHA256(Sha256),
}
impl SHA {
pub fn update(&mut self, data: &[u8]) {
match self {
SHA::SHA1(h) => h.update(data),
SHA::SHA256(h) => h.update(data),
}
}
pub fn output_size(&self) -> usize {
match self {
SHA::SHA1(h) => h.output_size(),
SHA::SHA256(h) => h.output_size(),
}
}
pub fn finalize_into(&mut self, out: &mut [u8]) {
match self {
SHA::SHA1(h) => h.finalize_into_reset(out),
SHA::SHA256(h) => h.finalize_into_reset(out),
}
.ok();
}
}
pub fn get_sha(use_sha1: bool) -> Box<SHA> {
Box::new(if use_sha1 {
SHA::SHA1(Sha1::default())
} else {
SHA::SHA256(Sha256::default())
})
}
pub fn sha1_hash(data: &[u8], out: &mut [u8]) {
let mut h = Sha1::default();
h.update(data);
DynDigest::finalize_into(h, out).ok();
}
pub fn sha256_hash(data: &[u8], out: &mut [u8]) {
let mut h = Sha256::default();
h.update(data);
DynDigest::finalize_into(h, out).ok();
}
#[allow(clippy::large_enum_variant)]
enum SigningKey {
SHA256withRSA(RsaSigningKey<Sha256>),
SHA256withECDSA(P256SigningKey),
SHA384withECDSA(P384SigningKey),
SHA521withECDSA(P521SigningKey),
}
#[allow(clippy::large_enum_variant)]
enum VerifyingKey {
SHA256withRSA(RsaVerifyingKey<Sha256>),
SHA256withECDSA(P256VerifyingKey),
SHA384withECDSA(P384VerifyingKey),
SHA521withECDSA(P521VerifyingKey),
}
struct Verifier {
digest: Box<dyn DynDigest>,
key: VerifyingKey,
}
impl Verifier {
fn from_public_key(key: SubjectPublicKeyInfoRef) -> LoggedResult<Verifier> {
let digest: Box<dyn DynDigest>;
let key = if let Ok(rsa) = RsaPublicKey::try_from(key.clone()) {
digest = Box::<Sha256>::default();
VerifyingKey::SHA256withRSA(RsaVerifyingKey::<Sha256>::new(rsa))
} else if let Ok(ec) = P256VerifyingKey::try_from(key.clone()) {
digest = Box::<Sha256>::default();
VerifyingKey::SHA256withECDSA(ec)
} else if let Ok(ec) = P384VerifyingKey::try_from(key.clone()) {
digest = Box::<Sha384>::default();
VerifyingKey::SHA384withECDSA(ec)
} else if let Ok(ec) = P521VerifyingKey::try_from(key.clone()) {
digest = Box::<Sha512>::default();
VerifyingKey::SHA521withECDSA(ec)
} else {
return log_err!("Unsupported private key");
};
Ok(Verifier { digest, key })
}
fn update(&mut self, data: &[u8]) {
self.digest.update(data)
}
fn verify(mut self, signature: &[u8]) -> LoggedResult<()> {
let hash = self.digest.finalize_reset();
match &self.key {
VerifyingKey::SHA256withRSA(key) => {
let sig = RsaSignature::try_from(signature)?;
key.verify_prehash(hash.as_ref(), &sig).log()
}
VerifyingKey::SHA256withECDSA(key) => {
let sig = P256Signature::from_slice(signature)?;
key.verify_prehash(hash.as_ref(), &sig).log()
}
VerifyingKey::SHA384withECDSA(key) => {
let sig = P384Signature::from_slice(signature)?;
key.verify_prehash(hash.as_ref(), &sig).log()
}
VerifyingKey::SHA521withECDSA(key) => {
let sig = P521Signature::from_slice(signature)?;
key.verify_prehash(hash.as_ref(), &sig).log()
}
}
}
}
struct Signer {
digest: Box<dyn DynDigest>,
key: SigningKey,
}
impl Signer {
fn from_private_key(key: &[u8]) -> LoggedResult<Signer> {
let digest: Box<dyn DynDigest>;
let key = match RsaPrivateKey::from_pkcs8_der(key) {
Ok(rsa) => {
digest = Box::<Sha256>::default();
SigningKey::SHA256withRSA(RsaSigningKey::<Sha256>::new(rsa))
}
_ => match P256SigningKey::from_pkcs8_der(key) {
Ok(ec) => {
digest = Box::<Sha256>::default();
SigningKey::SHA256withECDSA(ec)
}
_ => match P384SigningKey::from_pkcs8_der(key) {
Ok(ec) => {
digest = Box::<Sha384>::default();
SigningKey::SHA384withECDSA(ec)
}
_ => match P521SigningKey::from_pkcs8_der(key) {
Ok(ec) => {
digest = Box::<Sha512>::default();
SigningKey::SHA521withECDSA(ec)
}
_ => {
return log_err!("Unsupported private key");
}
},
},
},
};
Ok(Signer { digest, key })
}
fn update(&mut self, data: &[u8]) {
self.digest.update(data)
}
fn sign(mut self) -> LoggedResult<Vec<u8>> {
let hash = self.digest.finalize_reset();
let v = match &self.key {
SigningKey::SHA256withRSA(key) => {
let sig: RsaSignature = key.sign_prehash(hash.as_ref())?;
sig.to_vec()
}
SigningKey::SHA256withECDSA(key) => {
let sig: P256Signature = key.sign_prehash(hash.as_ref())?;
sig.to_vec()
}
SigningKey::SHA384withECDSA(key) => {
let sig: P384Signature = key.sign_prehash(hash.as_ref())?;
sig.to_vec()
}
SigningKey::SHA521withECDSA(key) => {
let sig: P521Signature = key.sign_prehash(hash.as_ref())?;
sig.to_vec()
}
};
Ok(v)
}
}
/*
* BootSignature ::= SEQUENCE {
* formatVersion ::= INTEGER,
* certificate ::= Certificate,
* algorithmIdentifier ::= SEQUENCE {
* algorithm OBJECT IDENTIFIER,
* parameters ANY DEFINED BY algorithm OPTIONAL
* },
* authenticatedAttributes ::= SEQUENCE {
* target CHARACTER STRING,
* length INTEGER
* },
* signature ::= OCTET STRING
* }
*/
#[derive(Sequence)]
struct AuthenticatedAttributes {
target: PrintableString,
length: u64,
}
#[derive(Sequence)]
struct BootSignature {
format_version: i32,
certificate: Certificate,
algorithm_identifier: AlgorithmIdentifier<Any>,
authenticated_attributes: AuthenticatedAttributes,
signature: OctetString,
}
impl BootSignature {
fn verify(self, payload: &[u8]) -> LoggedResult<()> {
if self.authenticated_attributes.length as usize != payload.len() {
return log_err!("Invalid image size");
}
let mut verifier = Verifier::from_public_key(
self.certificate
.tbs_certificate()
.subject_public_key_info()
.owned_to_ref(),
)?;
verifier.update(payload);
let attr = self.authenticated_attributes.to_der()?;
verifier.update(attr.as_slice());
verifier.verify(self.signature.as_bytes())?;
Ok(())
}
}
impl BootImage {
pub fn verify(&self, cert: Option<&Utf8CStr>) -> LoggedResult<()> {
let tail = self.tail();
if tail.starts_with(b"AVB0") {
return log_err!();
}
// Don't use BootSignature::from_der because tail might have trailing zeros
let mut reader = SliceReader::new(tail)?;
let mut sig = BootSignature::decode(&mut reader).silent()?;
if let Some(s) = cert {
let pem = MappedFile::open(s)?;
sig.certificate = Certificate::from_pem(pem)?;
};
sig.verify(self.payload()).log()
}
pub fn verify_for_cxx(&self) -> bool {
self.verify(None).is_ok()
}
}
enum Bytes {
Mapped(MappedFile),
Slice(&'static [u8]),
}
impl AsRef<[u8]> for Bytes {
fn as_ref(&self) -> &[u8] {
match self {
Bytes::Mapped(m) => m.as_ref(),
Bytes::Slice(s) => s,
}
}
}
const VERITY_PEM: &[u8] = include_bytes!("../../../tools/keys/verity.x509.pem");
const VERITY_PK8: &[u8] = include_bytes!("../../../tools/keys/verity.pk8");
pub fn sign_boot_image(
payload: &[u8],
name: &Utf8CStr,
cert: Option<&Utf8CStr>,
key: Option<&Utf8CStr>,
) -> LoggedResult<Vec<u8>> {
let cert = match cert {
Some(s) => Bytes::Mapped(MappedFile::open(s)?),
None => Bytes::Slice(VERITY_PEM),
};
let key = match key {
Some(s) => Bytes::Mapped(MappedFile::open(s)?),
None => Bytes::Slice(VERITY_PK8),
};
// Parse cert and private key
let cert = Certificate::from_pem(cert)?;
let mut signer = Signer::from_private_key(key.as_ref())?;
// Sign image
let attr = AuthenticatedAttributes {
target: PrintableString::new(name.as_bytes())?,
length: payload.len() as u64,
};
signer.update(payload);
signer.update(attr.to_der()?.as_slice());
let sig = signer.sign()?;
// Create BootSignature DER
let alg_id = cert.signature_algorithm().clone();
let sig = BootSignature {
format_version: 1,
certificate: cert,
algorithm_identifier: alg_id,
authenticated_attributes: attr,
signature: OctetString::new(sig)?,
};
sig.to_der().log()
}
pub fn sign_payload_for_cxx(payload: &[u8]) -> Vec<u8> {
sign_boot_image(payload, cstr!("/boot"), None, None).unwrap_or_default()
}

View file

@ -0,0 +1,32 @@
[package]
name = "magisk"
version.workspace = true
edition.workspace = true
[lib]
crate-type = ["staticlib"]
path = "lib.rs"
[features]
default = ["check-signature", "check-client", "su-check-db"]
# Disable these features for easier debugging during development
check-signature = []
check-client = []
su-check-db = []
[build-dependencies]
cxx-gen = { workspace = true }
pb-rs = { workspace = true }
[dependencies]
base = { workspace = true, features = ["selinux"] }
cxx = { workspace = true }
num-traits = { workspace = true }
num-derive = { workspace = true }
quick-protobuf = { workspace = true }
bytemuck = { workspace = true, features = ["derive"] }
thiserror = { workspace = true }
bit-set = { workspace = true }
nix = { workspace = true, features = ["fs", "mount", "poll", "signal", "term", "user", "zerocopy"] }
bitflags = { workspace = true }

View file

@ -0,0 +1,10 @@
#include <core.hpp>
int main(int argc, char *argv[]) {
if (argc < 1)
return 1;
cmdline_logging();
init_argv0(argc, argv);
umask(0);
return APPLET_STUB_MAIN(argc, argv);
}

View file

@ -0,0 +1,66 @@
#include <libgen.h>
#include <sys/stat.h>
#include <core.hpp>
using namespace std;
struct Applet {
string_view name;
int (*fn)(int, char *[]);
};
constexpr Applet applets[] = {
{ "su", su_client_main },
{ "resetprop", resetprop_main },
};
constexpr Applet private_applets[] = {
{ "zygisk", zygisk_main },
};
int main(int argc, char *argv[]) {
if (argc < 1)
return 1;
cmdline_logging();
init_argv0(argc, argv);
Utf8CStr argv0 = basename(argv[0]);
umask(0);
if (argv[0][0] == '\0') {
// When argv[0] is an empty string, we're calling private applets
if (argc < 2)
return 1;
--argc;
++argv;
for (const auto &app : private_applets) {
if (argv[0] == app.name) {
return app.fn(argc, argv);
}
}
fprintf(stderr, "%s: applet not found\n", argv[0]);
return 1;
}
if (argv0 == "magisk" || argv0 == "magisk32" || argv0 == "magisk64") {
if (argc > 1 && argv[1][0] != '-') {
// Calling applet with "magisk [applet] args..."
--argc;
++argv;
argv0 = argv[0];
} else {
return magisk_main(argc, argv);
}
}
for (const auto &app : applets) {
if (argv0 == app.name) {
return app.fn(argc, argv);
}
}
fprintf(stderr, "%s: applet not found\n", argv0.c_str());
return 1;
}

View file

@ -0,0 +1,254 @@
use crate::consts::{APP_PACKAGE_NAME, BBPATH, DATABIN, MODULEROOT, SECURE_DIR};
use crate::daemon::MagiskD;
use crate::ffi::{
DbEntryKey, RequestCode, check_key_combo, exec_common_scripts, exec_module_scripts,
get_magisk_tmp, initialize_denylist,
};
use crate::logging::setup_logfile;
use crate::module::disable_modules;
use crate::mount::{clean_mounts, setup_preinit_dir};
use crate::resetprop::get_prop;
use crate::selinux::restorecon;
use base::const_format::concatcp;
use base::{BufReadExt, FsPathBuilder, ResultExt, cstr, error, info};
use bitflags::bitflags;
use nix::fcntl::OFlag;
use std::io::BufReader;
use std::os::unix::net::UnixStream;
use std::process::{Command, Stdio};
use std::sync::atomic::Ordering;
bitflags! {
#[derive(Default)]
pub struct BootState : u32 {
const PostFsDataDone = 1 << 0;
const LateStartDone = 1 << 1;
const BootComplete = 1 << 2;
const SafeMode = 1 << 3;
}
}
impl MagiskD {
fn setup_magisk_env(&self) -> bool {
info!("* Initializing Magisk environment");
let mut buf = cstr::buf::default();
let app_bin_dir = buf
.append_path(self.app_data_dir())
.append_path("0")
.append_path(APP_PACKAGE_NAME)
.append_path("install");
// Alternative binaries paths
let alt_bin_dirs = &[
cstr!("/cache/data_adb/magisk"),
cstr!("/data/magisk"),
app_bin_dir,
];
for dir in alt_bin_dirs {
if dir.exists() {
cstr!(DATABIN).remove_all().ok();
dir.copy_to(cstr!(DATABIN)).ok();
dir.remove_all().ok();
}
}
cstr!("/cache/data_adb").remove_all().ok();
// Directories in /data/adb
cstr!(SECURE_DIR).follow_link().chmod(0o700).log_ok();
cstr!(DATABIN).mkdir(0o755).log_ok();
cstr!(MODULEROOT).mkdir(0o755).log_ok();
cstr!(concatcp!(SECURE_DIR, "/post-fs-data.d"))
.mkdir(0o755)
.log_ok();
cstr!(concatcp!(SECURE_DIR, "/service.d"))
.mkdir(0o755)
.log_ok();
restorecon();
let busybox = cstr!(concatcp!(DATABIN, "/busybox"));
if !busybox.exists() {
return false;
}
let tmp_bb = buf.append_path(get_magisk_tmp()).append_path(BBPATH);
tmp_bb.mkdirs(0o755).ok();
tmp_bb.append_path("busybox");
busybox.copy_to(tmp_bb).ok();
tmp_bb.follow_link().chmod(0o755).log_ok();
// Install busybox applets
Command::new(&tmp_bb)
.arg("--install")
.arg("-s")
.arg(tmp_bb.parent_dir().unwrap())
.stdout(Stdio::null())
.stderr(Stdio::null())
.status()
.log_ok();
// magisk32 and magiskpolicy are not installed into ramdisk and has to be copied
// from data to magisk tmp
let magisk32 = cstr!(concatcp!(DATABIN, "/magisk32"));
if magisk32.exists() {
let tmp = buf.append_path(get_magisk_tmp()).append_path("magisk32");
magisk32.copy_to(tmp).log_ok();
}
let magiskpolicy = cstr!(concatcp!(DATABIN, "/magiskpolicy"));
if magiskpolicy.exists() {
let tmp = buf
.append_path(get_magisk_tmp())
.append_path("magiskpolicy");
magiskpolicy.copy_to(tmp).log_ok();
}
true
}
fn post_fs_data(&self) -> bool {
setup_logfile();
info!("** post-fs-data mode running");
self.preserve_stub_apk();
// Check secure dir
let secure_dir = cstr!(SECURE_DIR);
if !secure_dir.exists() {
if self.sdk_int < 24 {
secure_dir.mkdir(0o700).log_ok();
} else {
error!("* {} is not present, abort", SECURE_DIR);
return true;
}
}
self.prune_su_access();
if !self.setup_magisk_env() {
error!("* Magisk environment incomplete, abort");
return true;
}
// Check safe mode
let boot_cnt = self.get_db_setting(DbEntryKey::BootloopCount);
self.set_db_setting(DbEntryKey::BootloopCount, boot_cnt + 1)
.log()
.ok();
let safe_mode = boot_cnt >= 2
|| get_prop(cstr!("persist.sys.safemode")) == "1"
|| get_prop(cstr!("ro.sys.safemode")) == "1"
|| check_key_combo();
if safe_mode {
info!("* Safe mode triggered");
// Disable all modules and zygisk so next boot will be clean
disable_modules();
self.set_db_setting(DbEntryKey::ZygiskConfig, 0).log_ok();
return true;
}
exec_common_scripts(cstr!("post-fs-data"));
self.zygisk_enabled.store(
self.get_db_setting(DbEntryKey::ZygiskConfig) != 0,
Ordering::Release,
);
initialize_denylist();
self.handle_modules();
clean_mounts();
false
}
fn late_start(&self) {
setup_logfile();
info!("** late_start service mode running");
exec_common_scripts(cstr!("service"));
if let Some(module_list) = self.module_list.get() {
exec_module_scripts(cstr!("service"), module_list);
}
}
fn boot_complete(&self) {
setup_logfile();
info!("** boot-complete triggered");
// Reset the bootloop counter once we have boot-complete
self.set_db_setting(DbEntryKey::BootloopCount, 0).log_ok();
// At this point it's safe to create the folder
let secure_dir = cstr!(SECURE_DIR);
if !secure_dir.exists() {
secure_dir.mkdir(0o700).log_ok();
}
setup_preinit_dir();
self.ensure_manager();
if self.zygisk_enabled.load(Ordering::Relaxed) {
self.zygisk.lock().unwrap().reset(true);
}
}
pub fn boot_stage_handler(&self, client: UnixStream, code: RequestCode) {
// Make sure boot stage execution is always serialized
let mut state = self.boot_stage_lock.lock().unwrap();
match code {
RequestCode::POST_FS_DATA => {
if check_data() && !state.contains(BootState::PostFsDataDone) {
if self.post_fs_data() {
state.insert(BootState::SafeMode);
}
state.insert(BootState::PostFsDataDone);
}
}
RequestCode::LATE_START => {
drop(client);
if state.contains(BootState::PostFsDataDone) && !state.contains(BootState::SafeMode)
{
self.late_start();
state.insert(BootState::LateStartDone);
}
}
RequestCode::BOOT_COMPLETE => {
drop(client);
if state.contains(BootState::PostFsDataDone) {
state.insert(BootState::BootComplete);
self.boot_complete()
}
}
_ => {}
}
}
}
fn check_data() -> bool {
if let Ok(file) = cstr!("/proc/mounts").open(OFlag::O_RDONLY | OFlag::O_CLOEXEC) {
let mut mnt = false;
BufReader::new(file).for_each_line(|line| {
if line.contains(" /data ") && !line.contains("tmpfs") {
mnt = true;
return false;
}
true
});
if !mnt {
return false;
}
let crypto = get_prop(cstr!("ro.crypto.state"));
return if !crypto.is_empty() {
if crypto != "encrypted" {
// Unencrypted, we can directly access data
true
} else {
// Encrypted, check whether vold is started
!get_prop(cstr!("init.svc.vold")).is_empty()
}
} else {
// ro.crypto.state is not set, assume it's unencrypted
true
};
}
false
}

28
native/src/core/build.rs Normal file
View file

@ -0,0 +1,28 @@
use pb_rs::ConfigBuilder;
use pb_rs::types::FileDescriptor;
use crate::codegen::gen_cxx_binding;
#[path = "../include/codegen.rs"]
mod codegen;
fn main() {
println!("cargo:rerun-if-changed=resetprop/proto/persistent_properties.proto");
gen_cxx_binding("core-rs");
let cb = ConfigBuilder::new(
&["resetprop/proto/persistent_properties.proto"],
None,
Some(&"resetprop/proto"),
&["."],
)
.unwrap();
FileDescriptor::run(
&cb.single_module(true)
.dont_use_cow(true)
.generate_getters(true)
.build(),
)
.unwrap();
}

489
native/src/core/daemon.rs Normal file
View file

@ -0,0 +1,489 @@
use crate::bootstages::BootState;
use crate::consts::{
MAGISK_FILE_CON, MAGISK_FULL_VER, MAGISK_PROC_CON, MAGISK_VER_CODE, MAGISK_VERSION,
MAIN_CONFIG, MAIN_SOCKET, ROOTMNT, ROOTOVL,
};
use crate::db::Sqlite3;
use crate::ffi::{
ModuleInfo, RequestCode, RespondCode, denylist_handler, get_magisk_tmp, scan_deny_apps,
};
use crate::logging::{android_logging, magisk_logging, setup_logfile, start_log_daemon};
use crate::module::remove_modules;
use crate::package::ManagerInfo;
use crate::resetprop::{get_prop, set_prop};
use crate::selinux::restore_tmpcon;
use crate::socket::{IpcRead, IpcWrite};
use crate::su::SuInfo;
use crate::thread::ThreadPool;
use crate::zygisk::ZygiskState;
use base::const_format::concatcp;
use base::{
AtomicArc, BufReadExt, FileAttr, FsPathBuilder, LoggedResult, ReadExt, ResultExt, Utf8CStr,
Utf8CStrBuf, WriteExt, cstr, fork_dont_care, info, libc, log_err, set_nice_name,
};
use nix::fcntl::OFlag;
use nix::mount::MsFlags;
use nix::sys::signal::SigSet;
use nix::unistd::{dup2_stderr, dup2_stdin, dup2_stdout, getpid, getuid, setsid};
use num_traits::AsPrimitive;
use std::fmt::Write as _;
use std::io::{BufReader, Write};
use std::os::fd::{AsFd, AsRawFd, IntoRawFd, RawFd};
use std::os::unix::net::{UCred, UnixListener, UnixStream};
use std::process::{Command, exit};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Mutex, OnceLock};
use std::time::Duration;
// Global magiskd singleton
pub static MAGISKD: OnceLock<MagiskD> = OnceLock::new();
pub const AID_ROOT: i32 = 0;
pub const AID_SHELL: i32 = 2000;
pub const AID_APP_START: i32 = 10000;
pub const AID_APP_END: i32 = 19999;
pub const AID_USER_OFFSET: i32 = 100000;
pub const fn to_app_id(uid: i32) -> i32 {
uid % AID_USER_OFFSET
}
pub const fn to_user_id(uid: i32) -> i32 {
uid / AID_USER_OFFSET
}
#[derive(Default)]
pub struct MagiskD {
pub sql_connection: Mutex<Option<Sqlite3>>,
pub manager_info: Mutex<ManagerInfo>,
pub boot_stage_lock: Mutex<BootState>,
pub module_list: OnceLock<Vec<ModuleInfo>>,
pub zygisk_enabled: AtomicBool,
pub zygisk: Mutex<ZygiskState>,
pub cached_su_info: AtomicArc<SuInfo>,
pub sdk_int: i32,
pub is_emulator: bool,
is_recovery: bool,
exe_attr: FileAttr,
}
impl MagiskD {
pub fn get() -> &'static MagiskD {
unsafe { MAGISKD.get().unwrap_unchecked() }
}
pub fn sdk_int(&self) -> i32 {
self.sdk_int
}
pub fn app_data_dir(&self) -> &'static Utf8CStr {
if self.sdk_int >= 24 {
cstr!("/data/user_de")
} else {
cstr!("/data/user")
}
}
fn handle_request_sync(&self, mut client: UnixStream, code: RequestCode) {
match code {
RequestCode::CHECK_VERSION => {
#[cfg(debug_assertions)]
let s = concatcp!(MAGISK_VERSION, ":MAGISK:D");
#[cfg(not(debug_assertions))]
let s = concatcp!(MAGISK_VERSION, ":MAGISK:R");
client.write_encodable(s).log_ok();
}
RequestCode::CHECK_VERSION_CODE => {
client.write_pod(&MAGISK_VER_CODE).log_ok();
}
RequestCode::START_DAEMON => {
setup_logfile();
}
RequestCode::STOP_DAEMON => {
// Unmount all overlays
denylist_handler(-1);
// Restore native bridge property
self.zygisk.lock().unwrap().restore_prop();
client.write_pod(&0).log_ok();
// Terminate the daemon!
exit(0);
}
_ => {}
}
}
fn handle_request_async(&self, mut client: UnixStream, code: RequestCode, cred: UCred) {
match code {
RequestCode::DENYLIST => {
denylist_handler(client.into_raw_fd());
}
RequestCode::SUPERUSER => {
self.su_daemon_handler(client, cred);
}
RequestCode::ZYGOTE_RESTART => {
info!("** zygote restarted");
self.prune_su_access();
scan_deny_apps();
if self.zygisk_enabled.load(Ordering::Relaxed) {
self.zygisk.lock().unwrap().reset(false);
}
}
RequestCode::SQLITE_CMD => {
self.db_exec_for_cli(client).ok();
}
RequestCode::REMOVE_MODULES => {
let do_reboot: bool = client.read_decodable().log().unwrap_or_default();
remove_modules();
client.write_pod(&0).log_ok();
if do_reboot {
self.reboot();
}
}
RequestCode::ZYGISK => {
self.zygisk_handler(client);
}
_ => {}
}
}
fn reboot(&self) {
if self.is_recovery {
Command::new("/system/bin/reboot").arg("recovery").status()
} else {
Command::new("/system/bin/reboot").status()
}
.ok();
}
#[cfg(feature = "check-client")]
fn is_client(&self, pid: i32) -> bool {
let mut buf = cstr::buf::new::<32>();
write!(buf, "/proc/{pid}/exe").ok();
if let Ok(attr) = buf.follow_link().get_attr() {
attr.st.st_dev == self.exe_attr.st.st_dev && attr.st.st_ino == self.exe_attr.st.st_ino
} else {
false
}
}
#[cfg(not(feature = "check-client"))]
fn is_client(&self, pid: i32) -> bool {
true
}
fn handle_requests(&'static self, mut client: UnixStream) {
let Ok(cred) = client.peer_cred() else {
// Client died
return;
};
// There are no abstractions for SO_PEERSEC yet, call the raw C API.
let mut context = cstr::buf::new::<256>();
unsafe {
let mut len: libc::socklen_t = context.capacity().as_();
libc::getsockopt(
client.as_raw_fd(),
libc::SOL_SOCKET,
libc::SO_PEERSEC,
context.as_mut_ptr().cast(),
&mut len,
);
}
context.rebuild().ok();
let is_root = cred.uid == 0;
let is_shell = cred.uid == 2000;
let is_zygote = &context == "u:r:zygote:s0";
if !is_root && !is_zygote && !self.is_client(cred.pid.unwrap_or(-1)) {
// Unsupported client state
client.write_pod(&RespondCode::ACCESS_DENIED.repr).log_ok();
return;
}
let mut code = -1;
client.read_pod(&mut code).ok();
if !(0..RequestCode::END.repr).contains(&code)
|| code == RequestCode::_SYNC_BARRIER_.repr
|| code == RequestCode::_STAGE_BARRIER_.repr
{
// Unknown request code
return;
}
let code = RequestCode { repr: code };
// Permission checks
match code {
RequestCode::POST_FS_DATA
| RequestCode::LATE_START
| RequestCode::BOOT_COMPLETE
| RequestCode::ZYGOTE_RESTART
| RequestCode::SQLITE_CMD
| RequestCode::DENYLIST
| RequestCode::STOP_DAEMON => {
if !is_root {
client.write_pod(&RespondCode::ROOT_REQUIRED.repr).log_ok();
return;
}
}
RequestCode::REMOVE_MODULES => {
if !is_root && !is_shell {
// Only allow root and ADB shell to remove modules
client.write_pod(&RespondCode::ACCESS_DENIED.repr).log_ok();
return;
}
}
RequestCode::ZYGISK => {
if !is_zygote {
// Invalid client context
client.write_pod(&RespondCode::ACCESS_DENIED.repr).log_ok();
return;
}
}
_ => {}
}
if client.write_pod(&RespondCode::OK.repr).is_err() {
return;
}
if code.repr < RequestCode::_SYNC_BARRIER_.repr {
self.handle_request_sync(client, code)
} else if code.repr < RequestCode::_STAGE_BARRIER_.repr {
ThreadPool::exec_task(move || {
self.handle_request_async(client, code, cred);
})
} else {
ThreadPool::exec_task(move || {
self.boot_stage_handler(client, code);
})
}
}
}
fn switch_cgroup(cgroup: &str, pid: i32) {
let mut buf = cstr::buf::new::<64>()
.join_path(cgroup)
.join_path("cgroup.procs");
if !buf.exists() {
return;
}
if let Ok(mut file) = buf.open(OFlag::O_WRONLY | OFlag::O_APPEND | OFlag::O_CLOEXEC) {
buf.clear();
write!(buf, "{pid}").ok();
file.write_all(buf.as_bytes()).log_ok();
}
}
fn daemon_entry() {
set_nice_name(cstr!("magiskd"));
android_logging();
// Block all signals
SigSet::all().thread_set_mask().log_ok();
// Swap out the original stdio
if let Ok(null) = cstr!("/dev/null").open(OFlag::O_WRONLY).log() {
dup2_stdout(null.as_fd()).log_ok();
dup2_stderr(null.as_fd()).log_ok();
}
if let Ok(zero) = cstr!("/dev/zero").open(OFlag::O_RDONLY).log() {
dup2_stdin(zero).log_ok();
}
setsid().log_ok();
// Make sure the current context is magisk
if let Ok(mut current) =
cstr!("/proc/self/attr/current").open(OFlag::O_WRONLY | OFlag::O_CLOEXEC)
{
let con = cstr!(MAGISK_PROC_CON);
current.write_all(con.as_bytes_with_nul()).log_ok();
}
start_log_daemon();
magisk_logging();
info!("Magisk {MAGISK_FULL_VER} daemon started");
let is_emulator = get_prop(cstr!("ro.kernel.qemu")) == "1"
|| get_prop(cstr!("ro.boot.qemu")) == "1"
|| get_prop(cstr!("ro.product.device")).contains("vsoc");
// Load config status
let magisk_tmp = get_magisk_tmp();
let mut tmp_path = cstr::buf::new::<64>()
.join_path(magisk_tmp)
.join_path(MAIN_CONFIG);
let mut is_recovery = false;
if let Ok(main_config) = tmp_path.open(OFlag::O_RDONLY | OFlag::O_CLOEXEC) {
BufReader::new(main_config).for_each_prop(|key, val| {
if key == "RECOVERYMODE" {
is_recovery = val == "true";
return false;
}
true
});
}
tmp_path.truncate(magisk_tmp.len());
let mut sdk_int = -1;
if let Ok(build_prop) = cstr!("/system/build.prop").open(OFlag::O_RDONLY | OFlag::O_CLOEXEC) {
BufReader::new(build_prop).for_each_prop(|key, val| {
if key == "ro.build.version.sdk" {
sdk_int = val.parse::<i32>().unwrap_or(-1);
return false;
}
true
});
}
if sdk_int < 0 {
// In case some devices do not store this info in build.prop, fallback to getprop
sdk_int = get_prop(cstr!("ro.build.version.sdk"))
.parse::<i32>()
.unwrap_or(-1);
}
info!("* Device API level: {sdk_int}");
restore_tmpcon().log_ok();
// Escape from cgroup
let pid = getpid().as_raw();
switch_cgroup("/acct", pid);
switch_cgroup("/dev/cg2_bpf", pid);
switch_cgroup("/sys/fs/cgroup", pid);
if get_prop(cstr!("ro.config.per_app_memcg")) != "false" {
switch_cgroup("/dev/memcg/apps", pid);
}
// Samsung workaround #7887
if cstr!("/system_ext/app/mediatek-res/mediatek-res.apk").exists() {
set_prop(cstr!("ro.vendor.mtk_model"), cstr!("0"));
}
// Cleanup pre-init mounts
tmp_path.append_path(ROOTMNT);
if let Ok(mount_list) = tmp_path.open(OFlag::O_RDONLY | OFlag::O_CLOEXEC) {
BufReader::new(mount_list).for_each_line(|line| {
line.truncate(line.trim_end().len());
let item = Utf8CStr::from_string(line);
item.unmount().log_ok();
true
})
}
tmp_path.truncate(magisk_tmp.len());
// Remount rootfs as read-only if requested
if std::env::var_os("REMOUNT_ROOT").is_some() {
cstr!("/").remount_mount_flags(MsFlags::MS_RDONLY).log_ok();
unsafe { std::env::remove_var("REMOUNT_ROOT") };
}
// Remove all pre-init overlay files to free-up memory
tmp_path.append_path(ROOTOVL);
tmp_path.remove_all().ok();
tmp_path.truncate(magisk_tmp.len());
let exe_attr = cstr!("/proc/self/exe")
.follow_link()
.get_attr()
.log()
.unwrap_or_default();
let daemon = MagiskD {
sdk_int,
is_emulator,
is_recovery,
exe_attr,
..Default::default()
};
MAGISKD.set(daemon).ok();
let sock_path = cstr::buf::new::<64>()
.join_path(get_magisk_tmp())
.join_path(MAIN_SOCKET);
sock_path.remove().ok();
let Ok(sock) = UnixListener::bind(&sock_path).log() else {
exit(1);
};
sock_path.follow_link().chmod(0o666).log_ok();
sock_path.set_secontext(cstr!(MAGISK_FILE_CON)).log_ok();
// Loop forever to listen for requests
let daemon = MagiskD::get();
for client in sock.incoming() {
if let Ok(client) = client.log() {
daemon.handle_requests(client);
} else {
exit(1);
}
}
}
pub fn connect_daemon(code: RequestCode, create: bool) -> LoggedResult<UnixStream> {
let sock_path = cstr::buf::new::<64>()
.join_path(get_magisk_tmp())
.join_path(MAIN_SOCKET);
fn send_request(code: RequestCode, mut socket: UnixStream) -> LoggedResult<UnixStream> {
socket.write_pod(&code.repr).log_ok();
let mut res = -1;
socket.read_pod(&mut res).log_ok();
let res = RespondCode { repr: res };
match res {
RespondCode::OK => Ok(socket),
RespondCode::ROOT_REQUIRED => {
log_err!("Root is required for this operation")
}
RespondCode::ACCESS_DENIED => {
log_err!("Accessed denied")
}
_ => {
log_err!("Daemon error")
}
}
}
match UnixStream::connect(&sock_path) {
Ok(socket) => send_request(code, socket),
Err(e) => {
if !create || !getuid().is_root() {
return log_err!("Cannot connect to daemon: {e}");
}
let mut buf = cstr::buf::new::<64>();
if cstr!("/proc/self/exe").read_link(&mut buf).is_err()
|| !buf.starts_with(get_magisk_tmp().as_str())
{
return log_err!("Start daemon on magisk tmpfs");
}
// Fork a process and run the daemon
if fork_dont_care() == 0 {
daemon_entry();
exit(0);
}
// In the client, we keep retry and connect to the socket
loop {
if let Ok(socket) = UnixStream::connect(&sock_path) {
return send_request(code, socket);
} else {
std::thread::sleep(Duration::from_millis(100));
}
}
}
}
}
pub fn connect_daemon_for_cxx(code: RequestCode, create: bool) -> RawFd {
connect_daemon(code, create)
.map(IntoRawFd::into_raw_fd)
.unwrap_or(-1)
}

351
native/src/core/db.rs Normal file
View file

@ -0,0 +1,351 @@
#![allow(improper_ctypes, improper_ctypes_definitions)]
use crate::daemon::{MAGISKD, MagiskD};
use crate::ffi::{
DbEntryKey, DbStatement, DbValues, MntNsMode, open_and_init_db, sqlite3, sqlite3_errstr,
};
use crate::socket::{IpcRead, IpcWrite};
use DbArg::{Integer, Text};
use base::{LoggedResult, ResultExt, Utf8CStr};
use num_derive::FromPrimitive;
use num_traits::FromPrimitive;
use std::ffi::c_void;
use std::io::{BufReader, BufWriter};
use std::os::unix::net::UnixStream;
use std::pin::Pin;
use std::ptr;
use std::ptr::NonNull;
use thiserror::Error;
fn sqlite_err_str(code: i32) -> &'static Utf8CStr {
// SAFETY: sqlite3 always returns UTF-8 strings
unsafe { Utf8CStr::from_ptr_unchecked(sqlite3_errstr(code)) }
}
#[repr(transparent)]
#[derive(Error, Debug)]
#[error("sqlite3: {}", sqlite_err_str(self.0))]
pub struct SqliteError(i32);
pub type SqliteResult<T> = Result<T, SqliteError>;
pub trait SqliteReturn {
fn sql_result(self) -> SqliteResult<()>;
}
impl SqliteReturn for i32 {
fn sql_result(self) -> SqliteResult<()> {
if self != 0 {
Err(SqliteError(self))
} else {
Ok(())
}
}
}
pub trait SqlTable {
fn on_row(&mut self, columns: &[String], values: &DbValues);
}
impl<T> SqlTable for T
where
T: FnMut(&[String], &DbValues),
{
fn on_row(&mut self, columns: &[String], values: &DbValues) {
self.call_mut((columns, values))
}
}
#[derive(Default)]
pub struct DbSettings {
pub root_access: RootAccess,
pub multiuser_mode: MultiuserMode,
pub mnt_ns: MntNsMode,
pub boot_count: i32,
pub denylist: bool,
pub zygisk: bool,
}
#[repr(i32)]
#[derive(Default, FromPrimitive)]
pub enum RootAccess {
Disabled,
AppsOnly,
AdbOnly,
#[default]
AppsAndAdb,
}
#[repr(i32)]
#[derive(Default, FromPrimitive)]
pub enum MultiuserMode {
#[default]
OwnerOnly,
OwnerManaged,
User,
}
impl Default for MntNsMode {
fn default() -> Self {
MntNsMode::Requester
}
}
impl DbEntryKey {
fn to_str(self) -> &'static str {
match self {
DbEntryKey::RootAccess => "root_access",
DbEntryKey::SuMultiuserMode => "multiuser_mode",
DbEntryKey::SuMntNs => "mnt_ns",
DbEntryKey::DenylistConfig => "denylist",
DbEntryKey::ZygiskConfig => "zygisk",
DbEntryKey::BootloopCount => "bootloop",
DbEntryKey::SuManager => "requester",
_ => "",
}
}
}
impl SqlTable for DbSettings {
fn on_row(&mut self, columns: &[String], values: &DbValues) {
let mut key = "";
let mut value = 0;
for (i, column) in columns.iter().enumerate() {
if column == "key" {
key = values.get_text(i as i32);
} else if column == "value" {
value = values.get_int(i as i32);
}
}
match key {
"root_access" => self.root_access = RootAccess::from_i32(value).unwrap_or_default(),
"multiuser_mode" => {
self.multiuser_mode = MultiuserMode::from_i32(value).unwrap_or_default()
}
"mnt_ns" => self.mnt_ns = MntNsMode { repr: value },
"denylist" => self.denylist = value != 0,
"zygisk" => self.zygisk = value != 0,
"bootloop" => self.boot_count = value,
_ => {}
}
}
}
#[repr(transparent)]
pub struct Sqlite3(NonNull<sqlite3>);
unsafe impl Send for Sqlite3 {}
type SqlBindCallback = Option<unsafe extern "C" fn(*mut c_void, i32, Pin<&mut DbStatement>) -> i32>;
type SqlExecCallback = Option<unsafe extern "C" fn(*mut c_void, &[String], &DbValues)>;
unsafe extern "C" {
fn sql_exec_impl(
db: *mut sqlite3,
sql: &str,
bind_callback: SqlBindCallback,
bind_cookie: *mut c_void,
exec_callback: SqlExecCallback,
exec_cookie: *mut c_void,
) -> i32;
}
pub enum DbArg<'a> {
Text(&'a str),
Integer(i64),
}
struct DbArgs<'a> {
args: &'a [DbArg<'a>],
curr: usize,
}
unsafe extern "C" fn bind_arguments(v: *mut c_void, idx: i32, stmt: Pin<&mut DbStatement>) -> i32 {
unsafe {
let args = &mut *(v as *mut DbArgs<'_>);
if args.curr < args.args.len() {
let arg = &args.args[args.curr];
args.curr += 1;
match *arg {
Text(v) => stmt.bind_text(idx, v),
Integer(v) => stmt.bind_int64(idx, v),
}
} else {
0
}
}
}
unsafe extern "C" fn read_db_row<T: SqlTable>(
v: *mut c_void,
columns: &[String],
values: &DbValues,
) {
unsafe {
let table = &mut *(v as *mut T);
table.on_row(columns, values);
}
}
impl MagiskD {
fn with_db<F: FnOnce(*mut sqlite3) -> i32>(&self, f: F) -> i32 {
let mut db = self.sql_connection.lock().unwrap();
if db.is_none() {
let raw_db = open_and_init_db();
*db = NonNull::new(raw_db).map(Sqlite3);
}
match *db {
Some(ref mut db) => f(db.0.as_ptr()),
_ => -1,
}
}
fn db_exec_impl(
&self,
sql: &str,
args: &[DbArg],
exec_callback: SqlExecCallback,
exec_cookie: *mut c_void,
) -> i32 {
let mut bind_callback: SqlBindCallback = None;
let mut bind_cookie: *mut c_void = ptr::null_mut();
let mut db_args = DbArgs { args, curr: 0 };
if !args.is_empty() {
bind_callback = Some(bind_arguments);
bind_cookie = (&mut db_args) as *mut DbArgs as *mut c_void;
}
self.with_db(|db| unsafe {
sql_exec_impl(
db,
sql,
bind_callback,
bind_cookie,
exec_callback,
exec_cookie,
)
})
}
pub fn db_exec_with_rows<T: SqlTable>(&self, sql: &str, args: &[DbArg], out: &mut T) -> i32 {
self.db_exec_impl(
sql,
args,
Some(read_db_row::<T>),
out as *mut T as *mut c_void,
)
}
pub fn db_exec(&self, sql: &str, args: &[DbArg]) -> i32 {
self.db_exec_impl(sql, args, None, ptr::null_mut())
}
pub fn set_db_setting(&self, key: DbEntryKey, value: i32) -> SqliteResult<()> {
self.db_exec(
"INSERT OR REPLACE INTO settings (key,value) VALUES(?,?)",
&[Text(key.to_str()), Integer(value as i64)],
)
.sql_result()
}
pub fn get_db_setting(&self, key: DbEntryKey) -> i32 {
// Get default values
let mut val = match key {
DbEntryKey::RootAccess => RootAccess::default() as i32,
DbEntryKey::SuMultiuserMode => MultiuserMode::default() as i32,
DbEntryKey::SuMntNs => MntNsMode::default().repr,
DbEntryKey::DenylistConfig => 0,
DbEntryKey::ZygiskConfig => self.is_emulator as i32,
DbEntryKey::BootloopCount => 0,
_ => -1,
};
let mut func = |_: &[String], values: &DbValues| {
val = values.get_int(0);
};
self.db_exec_with_rows(
"SELECT value FROM settings WHERE key=?",
&[Text(key.to_str())],
&mut func,
)
.sql_result()
.log()
.ok();
val
}
pub fn get_db_settings(&self) -> SqliteResult<DbSettings> {
let mut cfg = DbSettings {
zygisk: self.is_emulator,
..Default::default()
};
self.db_exec_with_rows("SELECT * FROM settings", &[], &mut cfg)
.sql_result()?;
Ok(cfg)
}
pub fn get_db_string(&self, key: DbEntryKey) -> String {
let mut val = "".to_string();
let mut func = |_: &[String], values: &DbValues| {
val.push_str(values.get_text(0));
};
self.db_exec_with_rows(
"SELECT value FROM strings WHERE key=?",
&[Text(key.to_str())],
&mut func,
)
.sql_result()
.log()
.ok();
val
}
pub fn rm_db_string(&self, key: DbEntryKey) -> SqliteResult<()> {
self.db_exec("DELETE FROM strings WHERE key=?", &[Text(key.to_str())])
.sql_result()
}
pub fn db_exec_for_cli(&self, mut file: UnixStream) -> LoggedResult<()> {
let mut reader = BufReader::new(&mut file);
let sql: String = reader.read_decodable()?;
let mut writer = BufWriter::new(&mut file);
let mut output_fn = |columns: &[String], values: &DbValues| {
let mut out = "".to_string();
for (i, column) in columns.iter().enumerate() {
if i != 0 {
out.push('|');
}
out.push_str(column);
out.push('=');
out.push_str(values.get_text(i as i32));
}
writer.write_encodable(&out).log_ok();
};
self.db_exec_with_rows(&sql, &[], &mut output_fn);
writer.write_encodable("").log()
}
}
impl MagiskD {
pub fn set_db_setting_for_cxx(&self, key: DbEntryKey, value: i32) -> bool {
self.set_db_setting(key, value).log().is_ok()
}
}
#[unsafe(export_name = "sql_exec_rs")]
unsafe extern "C" fn sql_exec_for_cxx(
sql: &str,
bind_callback: SqlBindCallback,
bind_cookie: *mut c_void,
exec_callback: SqlExecCallback,
exec_cookie: *mut c_void,
) -> i32 {
unsafe {
MAGISKD.get().unwrap_unchecked().with_db(|db| {
sql_exec_impl(
db,
sql,
bind_callback,
bind_cookie,
exec_callback,
exec_cookie,
)
})
}
}

View file

@ -0,0 +1,151 @@
#include <sys/wait.h>
#include <sys/mount.h>
#include <core.hpp>
#include "deny.hpp"
using namespace std;
[[noreturn]] static void usage() {
fprintf(stderr,
R"EOF(DenyList Config CLI
Usage: magisk --denylist [action [arguments...] ]
Actions:
status Return the enforcement status
enable Enable denylist enforcement
disable Disable denylist enforcement
add PKG [PROC] Add a new target to the denylist
rm PKG [PROC] Remove target(s) from the denylist
ls Print the current denylist
exec CMDs... Execute commands in isolated mount
namespace and do all unmounts
)EOF");
exit(1);
}
void denylist_handler(int client) {
if (client < 0) {
revert_unmount();
return;
}
int req = read_int(client);
int res = DenyResponse::ERROR;
switch (req) {
case DenyRequest::ENFORCE:
res = enable_deny();
break;
case DenyRequest::DISABLE:
res = disable_deny();
break;
case DenyRequest::ADD:
res = add_list(client);
break;
case DenyRequest::REMOVE:
res = rm_list(client);
break;
case DenyRequest::LIST:
ls_list(client);
return;
case DenyRequest::STATUS:
res = denylist_enforced ? DenyResponse::ENFORCED : DenyResponse::NOT_ENFORCED;
break;
default:
// Unknown request code
break;
}
write_int(client, res);
close(client);
}
int denylist_cli(rust::Vec<rust::String> &args) {
if (args.empty())
usage();
// Convert rust strings into c strings
size_t argc = args.size();
std::vector<const char *> argv;
ranges::transform(args, std::back_inserter(argv), [](rust::String &arg) { return arg.c_str(); });
// End with nullptr
argv.push_back(nullptr);
int req;
if (argv[0] == "enable"sv)
req = DenyRequest::ENFORCE;
else if (argv[0] == "disable"sv)
req = DenyRequest::DISABLE;
else if (argv[0] == "add"sv)
req = DenyRequest::ADD;
else if (argv[0] == "rm"sv)
req = DenyRequest::REMOVE;
else if (argv[0] == "ls"sv)
req = DenyRequest::LIST;
else if (argv[0] == "status"sv)
req = DenyRequest::STATUS;
else if (argv[0] == "exec"sv && argc > 1) {
xunshare(CLONE_NEWNS);
xmount(nullptr, "/", nullptr, MS_PRIVATE | MS_REC, nullptr);
revert_unmount();
execvp(argv[1], (char **) argv.data() + 1);
exit(1);
} else {
usage();
}
// Send request
int fd = connect_daemon(RequestCode::DENYLIST);
write_int(fd, req);
if (req == DenyRequest::ADD || req == DenyRequest::REMOVE) {
write_string(fd, argv[1]);
write_string(fd, argv[2] ? argv[2] : "");
}
// Get response
int res = read_int(fd);
if (res < 0 || res >= DenyResponse::END)
res = DenyResponse::ERROR;
switch (res) {
case DenyResponse::NOT_ENFORCED:
fprintf(stderr, "Denylist is not enforced\n");
goto return_code;
case DenyResponse::ENFORCED:
fprintf(stderr, "Denylist is enforced\n");
goto return_code;
case DenyResponse::ITEM_EXIST:
fprintf(stderr, "Target already exists in denylist\n");
goto return_code;
case DenyResponse::ITEM_NOT_EXIST:
fprintf(stderr, "Target does not exist in denylist\n");
goto return_code;
case DenyResponse::NO_NS:
fprintf(stderr, "The kernel does not support mount namespace\n");
goto return_code;
case DenyResponse::INVALID_PKG:
fprintf(stderr, "Invalid package / process name\n");
goto return_code;
case DenyResponse::ERROR:
fprintf(stderr, "deny: Daemon error\n");
return -1;
case DenyResponse::OK:
break;
default:
__builtin_unreachable();
}
if (req == DenyRequest::LIST) {
string out;
for (;;) {
read_string(fd, out);
if (out.empty())
break;
printf("%s\n", out.data());
}
}
return_code:
return req == DenyRequest::STATUS ? res != DenyResponse::ENFORCED : res != DenyResponse::OK;
}

View file

@ -0,0 +1,44 @@
#pragma once
#include <string_view>
#define ISOLATED_MAGIC "isolated"
namespace DenyRequest {
enum : int {
ENFORCE,
DISABLE,
ADD,
REMOVE,
LIST,
STATUS,
END
};
}
namespace DenyResponse {
enum : int {
OK,
ENFORCED,
NOT_ENFORCED,
ITEM_EXIST,
ITEM_NOT_EXIST,
INVALID_PKG,
NO_NS,
ERROR,
END
};
}
// CLI entries
int enable_deny();
int disable_deny();
int add_list(int client);
int rm_list(int client);
void ls_list(int client);
bool proc_context_match(int pid, std::string_view context);
void *logcat(void *arg);
extern bool logcat_exit;

View file

@ -0,0 +1,279 @@
#include <unistd.h>
#include <android/log.h>
#include <sys/syscall.h>
#include <string>
#include <map>
#include <core.hpp>
#include "deny.hpp"
using namespace std;
struct logger_entry {
uint16_t len; /* length of the payload */
uint16_t hdr_size; /* sizeof(struct logger_entry) */
int32_t pid; /* generating process's pid */
uint32_t tid; /* generating process's tid */
uint32_t sec; /* seconds since Epoch */
uint32_t nsec; /* nanoseconds */
uint32_t lid; /* log id of the payload, bottom 4 bits currently */
uint32_t uid; /* generating process's uid */
};
#define LOGGER_ENTRY_MAX_LEN (5 * 1024)
struct log_msg {
union [[gnu::aligned(4)]] {
unsigned char buf[LOGGER_ENTRY_MAX_LEN + 1];
struct logger_entry entry;
};
};
struct AndroidLogEntry {
time_t tv_sec;
long tv_nsec;
android_LogPriority priority;
int32_t uid;
int32_t pid;
int32_t tid;
const char *tag;
size_t tagLen;
size_t messageLen;
const char *message;
};
struct [[gnu::packed]] android_event_header_t {
int32_t tag; // Little Endian Order
};
struct [[gnu::packed]] android_event_int_t {
int8_t type; // EVENT_TYPE_INT
int32_t data; // Little Endian Order
};
struct [[gnu::packed]] android_event_string_t {
int8_t type; // EVENT_TYPE_STRING;
int32_t length; // Little Endian Order
char data[];
};
struct [[gnu::packed]] android_event_list_t {
int8_t type; // EVENT_TYPE_LIST
int8_t element_count;
} ;
// 30014 am_proc_start (User|1|5),(PID|1|5),(UID|1|5),(Process Name|3),(Type|3),(Component|3)
struct [[gnu::packed]] android_event_am_proc_start {
android_event_header_t tag;
android_event_list_t list;
android_event_int_t user;
android_event_int_t pid;
android_event_int_t uid;
android_event_string_t process_name;
// android_event_string_t type;
// android_event_string_t component;
};
// 3040 boot_progress_ams_ready (time|2|3)
extern "C" {
[[gnu::weak]] struct logger_list *android_logger_list_alloc(int mode, unsigned int tail, pid_t pid);
[[gnu::weak]] void android_logger_list_free(struct logger_list *list);
[[gnu::weak]] int android_logger_list_read(struct logger_list *list, struct log_msg *log_msg);
[[gnu::weak]] struct logger *android_logger_open(struct logger_list *list, log_id_t id);
[[gnu::weak]] int android_log_processLogBuffer(struct logger_entry *buf, AndroidLogEntry *entry);
}
// zygote pid -> mnt ns
static map<int, struct stat> zygote_map;
bool logcat_exit;
static int read_ns(const int pid, struct stat *st) {
char path[32];
sprintf(path, "/proc/%d/ns/mnt", pid);
return stat(path, st);
}
static int parse_ppid(int pid) {
char path[32];
int ppid;
sprintf(path, "/proc/%d/stat", pid);
auto stat = open_file(path, "re");
if (!stat) return -1;
// PID COMM STATE PPID .....
fscanf(stat.get(), "%*d %*s %*c %d", &ppid);
return ppid;
}
static void check_zygote() {
zygote_map.clear();
int proc = open("/proc", O_RDONLY | O_CLOEXEC);
auto proc_dir = xopen_dir(proc);
if (!proc_dir) return;
struct stat st{};
for (dirent *entry; (entry = readdir(proc_dir.get()));) {
int pid = parse_int(entry->d_name);
if (pid <= 0) continue;
if (fstatat(proc, entry->d_name, &st, 0)) continue;
if (st.st_uid != 0) continue;
if (proc_context_match(pid, "u:r:zygote:s0") && parse_ppid(pid) == 1) {
if (read_ns(pid, &st) == 0) {
LOGI("logcat: zygote PID=[%d]\n", pid);
zygote_map[pid] = st;
}
}
}
}
static void process_main_buffer(struct log_msg *msg) {
AndroidLogEntry entry{};
if (android_log_processLogBuffer(&msg->entry, &entry) < 0) return;
entry.tagLen--;
auto tag = string_view(entry.tag, entry.tagLen);
static bool ready = false;
if (tag == "AppZygote") {
if (entry.uid != 1000) return;
if (entry.message[0] == 'S') {
ready = true;
} else {
ready = false;
}
return;
}
if (!ready || tag != "AppZygoteInit") return;
if (!proc_context_match(msg->entry.pid, "u:r:app_zygote:s0")) return;
ready = false;
char cmdline[1024];
sprintf(cmdline, "/proc/%d/cmdline", msg->entry.pid);
if (auto f = open_file(cmdline, "re")) {
fgets(cmdline, sizeof(cmdline), f.get());
} else {
return;
}
if (is_deny_target(entry.uid, cmdline)) {
int pid = msg->entry.pid;
kill(pid, SIGSTOP);
if (fork_dont_care() == 0) {
LOGI("logcat: revert [%s] PID=[%d] UID=[%d]\n", cmdline, pid, entry.uid);
revert_unmount(pid);
kill(pid, SIGCONT);
_exit(0);
}
} else {
LOGD("logcat: skip [%s] PID=[%d] UID=[%d]\n", cmdline, msg->entry.pid, entry.uid);
}
}
static void process_events_buffer(struct log_msg *msg) {
if (msg->entry.uid != 1000) return;
auto event_data = &msg->buf[msg->entry.hdr_size];
auto event_header = reinterpret_cast<const android_event_header_t *>(event_data);
if (event_header->tag == 30014) {
auto am_proc_start = reinterpret_cast<const android_event_am_proc_start *>(event_data);
auto proc = string_view(am_proc_start->process_name.data,
am_proc_start->process_name.length);
if (is_deny_target(am_proc_start->uid.data, proc)) {
int pid = am_proc_start->pid.data;
if (fork_dont_care() == 0) {
int ppid = parse_ppid(pid);
auto it = zygote_map.find(ppid);
if (it == zygote_map.end()) {
LOGW("logcat: skip [%.*s] PID=[%d] UID=[%d] PPID=[%d]; parent not zygote\n",
(int) proc.length(), proc.data(),
pid, am_proc_start->uid.data, ppid);
_exit(0);
}
char path[16];
ssprintf(path, sizeof(path), "/proc/%d", pid);
struct stat st{};
int fd = syscall(__NR_pidfd_open, pid, 0);
if (fd > 0 && setns(fd, CLONE_NEWNS) == 0) {
pid = getpid();
} else {
close(fd);
fd = -1;
}
while (read_ns(pid, &st) == 0 && it->second.st_ino == st.st_ino) {
if (stat(path, &st) == 0 && st.st_uid == 0) {
usleep(10 * 1000);
} else {
LOGW("logcat: skip [%.*s] PID=[%s] UID=[%d]; namespace not isolated\n",
(int) proc.length(), proc.data(),
path + 6, am_proc_start->uid.data);
_exit(0);
}
if (fd > 0) setns(fd, CLONE_NEWNS);
}
close(fd);
LOGI("logcat: revert [%.*s] PID=[%d] UID=[%d]\n",
(int) proc.length(), proc.data(), pid, am_proc_start->uid.data);
revert_unmount(pid);
_exit(0);
}
} else {
LOGD("logcat: skip [%.*s] PID=[%d] UID=[%d]\n",
(int) proc.length(), proc.data(),
am_proc_start->pid.data, am_proc_start->uid.data);
}
return;
}
if (event_header->tag == 3040) {
LOGD("logcat: soft reboot\n");
check_zygote();
}
}
[[noreturn]] void run() {
while (true) {
const unique_ptr<logger_list, decltype(&android_logger_list_free)> logger_list{
android_logger_list_alloc(0, 1, 0), &android_logger_list_free};
for (log_id id: {LOG_ID_MAIN, LOG_ID_EVENTS}) {
auto *logger = android_logger_open(logger_list.get(), id);
if (logger == nullptr) continue;
}
struct log_msg msg{};
while (true) {
if (!denylist_enforced) {
break;
}
if (android_logger_list_read(logger_list.get(), &msg) <= 0) {
break;
}
switch (msg.entry.lid) {
case LOG_ID_EVENTS:
process_events_buffer(&msg);
break;
case LOG_ID_MAIN:
process_main_buffer(&msg);
default:
break;
}
}
if (!denylist_enforced) {
break;
}
sleep(1);
}
LOGD("logcat: terminate\n");
pthread_exit(nullptr);
}
void *logcat(void *) {
check_zygote();
run();
}

View file

@ -0,0 +1,440 @@
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/inotify.h>
#include <unistd.h>
#include <fcntl.h>
#include <dirent.h>
#include <set>
#include <map>
#include <consts.hpp>
#include <sqlite.hpp>
#include <core.hpp>
#include "deny.hpp"
using namespace std;
// For the following data structures:
// If package name == ISOLATED_MAGIC, or app ID == -1, it means isolated service
// Package name -> list of process names
static unique_ptr<map<string, set<string, StringCmp>, StringCmp>> pkg_to_procs_;
#define pkg_to_procs (*pkg_to_procs_)
// app ID -> list of pkg names (string_view points to a pkg_to_procs key)
static unique_ptr<map<int, set<string_view>>> app_id_to_pkgs_;
#define app_id_to_pkgs (*app_id_to_pkgs_)
// Locks the data structures above
static pthread_mutex_t data_lock = PTHREAD_MUTEX_INITIALIZER;
atomic<bool> denylist_enforced = false;
static int get_app_id(const vector<int> &users, const string &pkg) {
struct stat st{};
char buf[PATH_MAX];
for (const auto &user_id: users) {
ssprintf(buf, sizeof(buf), "%s/%d/%s", APP_DATA_DIR, user_id, pkg.data());
if (stat(buf, &st) == 0) {
return to_app_id(st.st_uid);
}
}
return 0;
}
static void collect_users(vector<int> &users) {
auto data_dir = xopen_dir(APP_DATA_DIR);
if (!data_dir)
return;
dirent *entry;
while ((entry = xreaddir(data_dir.get()))) {
users.emplace_back(parse_int(entry->d_name));
}
}
static int get_app_id(const string &pkg) {
if (pkg == ISOLATED_MAGIC)
return -1;
vector<int> users;
collect_users(users);
return get_app_id(users, pkg);
}
static void update_app_id(int app_id, const string &pkg, bool remove) {
if (app_id <= 0)
return;
if (remove) {
if (auto it = app_id_to_pkgs.find(app_id); it != app_id_to_pkgs.end()) {
it->second.erase(pkg);
if (it->second.empty()) {
app_id_to_pkgs.erase(it);
}
}
} else {
app_id_to_pkgs[app_id].emplace(pkg);
}
}
// Leave /proc fd opened as we're going to read from it repeatedly
static DIR *procfp;
template<class F>
static void crawl_procfs(const F &fn) {
rewinddir(procfp);
dirent *dp;
int pid;
while ((dp = readdir(procfp))) {
pid = parse_int(dp->d_name);
if (pid > 0 && !fn(pid))
break;
}
}
static bool str_eql(string_view a, string_view b) { return a == b; }
static bool str_starts_with(string_view a, string_view b) { return a.starts_with(b); }
template<bool str_op(string_view, string_view) = str_eql>
static bool proc_name_match(int pid, string_view name) {
char buf[4019];
sprintf(buf, "/proc/%d/cmdline", pid);
if (auto fp = open_file(buf, "re")) {
fgets(buf, sizeof(buf), fp.get());
if (str_op(buf, name)) {
return true;
}
}
return false;
}
bool proc_context_match(int pid, string_view context) {
char buf[PATH_MAX];
char con[1024] = {0};
sprintf(buf, "/proc/%d", pid);
if (lgetfilecon(buf, byte_data{ con, sizeof(con) })) {
return string_view(con).starts_with(context);
}
return false;
}
template<bool matcher(int, string_view) = &proc_name_match>
static void kill_process(const char *name, bool multi = false) {
crawl_procfs([=](int pid) -> bool {
if (matcher(pid, name)) {
kill(pid, SIGKILL);
LOGD("denylist: kill PID=[%d] (%s)\n", pid, name);
return multi;
}
return true;
});
}
static bool validate(const char *pkg, const char *proc) {
bool pkg_valid = false;
bool proc_valid = true;
if (str_eql(pkg, ISOLATED_MAGIC)) {
pkg_valid = true;
for (char c; (c = *proc); ++proc) {
if (isalnum(c) || c == '_' || c == '.')
continue;
if (c == ':')
break;
proc_valid = false;
break;
}
} else {
for (char c; (c = *pkg); ++pkg) {
if (isalnum(c) || c == '_')
continue;
if (c == '.') {
pkg_valid = true;
continue;
}
pkg_valid = false;
break;
}
for (char c; (c = *proc); ++proc) {
if (isalnum(c) || c == '_' || c == ':' || c == '.')
continue;
proc_valid = false;
break;
}
}
return pkg_valid && proc_valid;
}
static bool add_hide_set(const char *pkg, const char *proc) {
auto p = pkg_to_procs[pkg].emplace(proc);
if (!p.second)
return false;
LOGI("denylist add: [%s/%s]\n", pkg, proc);
if (!denylist_enforced)
return true;
if (str_eql(pkg, ISOLATED_MAGIC)) {
// Kill all matching isolated processes
kill_process<&proc_name_match<str_starts_with>>(proc, true);
} else {
kill_process(proc);
}
return true;
}
void scan_deny_apps() {
if (!app_id_to_pkgs_)
return;
app_id_to_pkgs.clear();
char sql[4096];
vector<int> users;
collect_users(users);
for (auto it = pkg_to_procs.begin(); it != pkg_to_procs.end();) {
if (it->first == ISOLATED_MAGIC) {
it++;
continue;
}
int app_id = get_app_id(users, it->first);
if (app_id == 0) {
LOGI("denylist rm: [%s]\n", it->first.data());
ssprintf(sql, sizeof(sql), "DELETE FROM denylist WHERE package_name='%s'",
it->first.data());
db_exec(sql);
it = pkg_to_procs.erase(it);
} else {
update_app_id(app_id, it->first, false);
it++;
}
}
}
static void clear_data() {
pkg_to_procs_.reset(nullptr);
app_id_to_pkgs_.reset(nullptr);
}
static bool ensure_data() {
if (pkg_to_procs_)
return true;
LOGI("denylist: initializing internal data structures\n");
default_new(pkg_to_procs_);
bool res = db_exec("SELECT * FROM denylist", {}, [](StringSlice columns, const DbValues &values) {
const char *package_name;
const char *process;
for (int i = 0; i < columns.size(); ++i) {
const auto &name = columns[i];
if (name == "package_name") {
package_name = values.get_text(i);
} else if (name == "process") {
process = values.get_text(i);
}
}
add_hide_set(package_name, process);
});
if (!res)
goto error;
default_new(app_id_to_pkgs_);
scan_deny_apps();
return true;
error:
clear_data();
return false;
}
static int add_list(const char *pkg, const char *proc) {
if (proc[0] == '\0')
proc = pkg;
if (!validate(pkg, proc))
return DenyResponse::INVALID_PKG;
{
mutex_guard lock(data_lock);
if (!ensure_data())
return DenyResponse::ERROR;
int app_id = get_app_id(pkg);
if (app_id == 0)
return DenyResponse::INVALID_PKG;
if (!add_hide_set(pkg, proc))
return DenyResponse::ITEM_EXIST;
auto it = pkg_to_procs.find(pkg);
update_app_id(app_id, it->first, false);
}
// Add to database
char sql[4096];
ssprintf(sql, sizeof(sql),
"INSERT INTO denylist (package_name, process) VALUES('%s', '%s')", pkg, proc);
return db_exec(sql) ? DenyResponse::OK : DenyResponse::ERROR;
}
int add_list(int client) {
string pkg = read_string(client);
string proc = read_string(client);
return add_list(pkg.data(), proc.data());
}
static int rm_list(const char *pkg, const char *proc) {
{
mutex_guard lock(data_lock);
if (!ensure_data())
return DenyResponse::ERROR;
bool remove = false;
auto it = pkg_to_procs.find(pkg);
if (it != pkg_to_procs.end()) {
if (proc[0] == '\0') {
update_app_id(get_app_id(pkg), it->first, true);
pkg_to_procs.erase(it);
remove = true;
LOGI("denylist rm: [%s]\n", pkg);
} else if (it->second.erase(proc) != 0) {
remove = true;
LOGI("denylist rm: [%s/%s]\n", pkg, proc);
if (it->second.empty()) {
update_app_id(get_app_id(pkg), it->first, true);
pkg_to_procs.erase(it);
}
}
}
if (!remove)
return DenyResponse::ITEM_NOT_EXIST;
}
char sql[4096];
if (proc[0] == '\0')
ssprintf(sql, sizeof(sql), "DELETE FROM denylist WHERE package_name='%s'", pkg);
else
ssprintf(sql, sizeof(sql),
"DELETE FROM denylist WHERE package_name='%s' AND process='%s'", pkg, proc);
return db_exec(sql) ? DenyResponse::OK : DenyResponse::ERROR;
}
int rm_list(int client) {
string pkg = read_string(client);
string proc = read_string(client);
return rm_list(pkg.data(), proc.data());
}
void ls_list(int client) {
{
mutex_guard lock(data_lock);
if (!ensure_data()) {
write_int(client, static_cast<int>(DenyResponse::ERROR));
return;
}
scan_deny_apps();
write_int(client,static_cast<int>(DenyResponse::OK));
for (const auto &[pkg, procs] : pkg_to_procs) {
for (const auto &proc : procs) {
write_int(client, pkg.size() + proc.size() + 1);
xwrite(client, pkg.data(), pkg.size());
xwrite(client, "|", 1);
xwrite(client, proc.data(), proc.size());
}
}
}
write_int(client, 0);
close(client);
}
int enable_deny() {
if (denylist_enforced) {
return DenyResponse::OK;
} else {
mutex_guard lock(data_lock);
if (access("/proc/self/ns/mnt", F_OK) != 0) {
LOGW("The kernel does not support mount namespace\n");
return DenyResponse::NO_NS;
}
if (procfp == nullptr && (procfp = opendir("/proc")) == nullptr)
return DenyResponse::ERROR;
LOGI("* Enable DenyList\n");
if (!ensure_data())
return DenyResponse::ERROR;
denylist_enforced = true;
if (!MagiskD::Get().zygisk_enabled()) {
if (new_daemon_thread(&logcat)) {
denylist_enforced = false;
return DenyResponse::ERROR;
}
}
// On Android Q+, also kill blastula pool and all app zygotes
if (SDK_INT >= 29) {
kill_process("usap32", true);
kill_process("usap64", true);
kill_process<&proc_context_match>("u:r:app_zygote:s0", true);
}
}
MagiskD::Get().set_db_setting(DbEntryKey::DenylistConfig, true);
return DenyResponse::OK;
}
int disable_deny() {
if (denylist_enforced.exchange(false)) {
LOGI("* Disable DenyList\n");
}
MagiskD::Get().set_db_setting(DbEntryKey::DenylistConfig, false);
return DenyResponse::OK;
}
void initialize_denylist() {
if (!denylist_enforced) {
if (MagiskD::Get().get_db_setting(DbEntryKey::DenylistConfig))
enable_deny();
}
}
bool is_deny_target(int uid, string_view process) {
mutex_guard lock(data_lock);
if (!ensure_data())
return false;
int app_id = to_app_id(uid);
if (app_id >= 90000) {
if (auto it = pkg_to_procs.find(ISOLATED_MAGIC); it != pkg_to_procs.end()) {
for (const auto &s : it->second) {
if (process.starts_with(s))
return true;
}
}
return false;
} else {
auto it = app_id_to_pkgs.find(app_id);
if (it == app_id_to_pkgs.end())
return false;
for (const auto &pkg : it->second) {
if (pkg_to_procs.find(pkg)->second.count(process))
return true;
}
}
return false;
}
void update_deny_flags(int uid, rust::Str process, uint32_t &flags) {
if (is_deny_target(uid, { process.begin(), process.end() })) {
flags |= +ZygiskStateFlags::ProcessOnDenyList;
}
if (denylist_enforced) {
flags |= +ZygiskStateFlags::DenyListEnforced;
}
}

View file

@ -0,0 +1,91 @@
#pragma once
#include <sys/socket.h>
#include <string>
#include <atomic>
#include <functional>
#include <base.hpp>
#include "../core-rs.hpp"
#define AID_ROOT 0
#define AID_SHELL 2000
#define AID_USER_OFFSET 100000
#define to_app_id(uid) (uid % AID_USER_OFFSET)
#define to_user_id(uid) (uid / AID_USER_OFFSET)
#define SDK_INT (MagiskD::Get().sdk_int())
#define APP_DATA_DIR (SDK_INT >= 24 ? "/data/user_de" : "/data/user")
inline int connect_daemon(RequestCode req) {
return connect_daemon(req, false);
}
// Multi-call entrypoints
int su_client_main(int argc, char *argv[]);
int zygisk_main(int argc, char *argv[]);
struct ModuleInfo;
// Utils
const char *get_magisk_tmp();
void unlock_blocks();
bool check_key_combo();
template<typename T> requires(std::is_trivially_copyable_v<T>)
T read_any(int fd) {
T val;
if (xxread(fd, &val, sizeof(val)) != sizeof(val))
return -1;
return val;
}
template<typename T> requires(std::is_trivially_copyable_v<T>)
void write_any(int fd, T val) {
if (fd < 0) return;
xwrite(fd, &val, sizeof(val));
}
inline int read_int(int fd) { return read_any<int>(fd); }
inline void write_int(int fd, int val) { write_any(fd, val); }
std::string read_string(int fd);
bool read_string(int fd, std::string &str);
void write_string(int fd, std::string_view str);
template<typename T> requires(std::is_trivially_copyable_v<T>)
void write_vector(int fd, const std::vector<T> &vec) {
write_int(fd, vec.size());
xwrite(fd, vec.data(), vec.size() * sizeof(T));
}
template<typename T> requires(std::is_trivially_copyable_v<T>)
bool read_vector(int fd, std::vector<T> &vec) {
int size = read_int(fd);
vec.resize(size);
return xread(fd, vec.data(), size * sizeof(T)) == size * sizeof(T);
}
// Scripting
void install_apk(Utf8CStr apk);
void uninstall_pkg(Utf8CStr pkg);
void exec_common_scripts(Utf8CStr stage);
void exec_module_scripts(Utf8CStr stage, const rust::Vec<ModuleInfo> &module_list);
void exec_script(Utf8CStr script);
void clear_pkg(const char *pkg, int user_id);
[[noreturn]] void install_module(Utf8CStr file);
// Denylist
extern std::atomic<bool> denylist_enforced;
int denylist_cli(rust::Vec<rust::String> &args);
void denylist_handler(int client);
void initialize_denylist();
void scan_deny_apps();
bool is_deny_target(int uid, std::string_view process);
void revert_unmount(int pid = -1) noexcept;
void update_deny_flags(int uid, rust::Str process, uint32_t &flags);
// MagiskSU
void exec_root_shell(int client, int pid, SuRequest &req, MntNsMode mode);
// Rust bindings
inline Utf8CStr get_magisk_tmp_rs() { return get_magisk_tmp(); }
inline rust::String resolve_preinit_dir_rs(Utf8CStr base_dir) {
return resolve_preinit_dir(base_dir.c_str());
}

View file

@ -0,0 +1,76 @@
#pragma once
#include <functional>
#include <rust/cxx.h>
#define SQLITE_OPEN_READWRITE 0x00000002 /* Ok for sqlite3_open_v2() */
#define SQLITE_OPEN_CREATE 0x00000004 /* Ok for sqlite3_open_v2() */
#define SQLITE_OPEN_NOMUTEX 0x00008000 /* Ok for sqlite3_open_v2() */
#define SQLITE_OK 0 /* Successful result */
#define SQLITE_ROW 100 /* sqlite3_step() has another row ready */
#define SQLITE_DONE 101 /* sqlite3_step() has finished executing */
struct sqlite3;
struct sqlite3_stmt;
extern const char *(*sqlite3_errstr)(int);
// Transparent wrappers of sqlite3_stmt
struct DbValues {
const char *get_text(int index) const;
rust::Str get_str(int index) const { return get_text(index); }
int get_int(int index) const;
~DbValues() = delete;
};
struct DbStatement {
int bind_text(int index, rust::Str val);
int bind_int64(int index, int64_t val);
~DbStatement() = delete;
};
using StringSlice = rust::Slice<rust::String>;
using sql_bind_callback = int(*)(void*, int, DbStatement&);
using sql_exec_callback = void(*)(void*, StringSlice, const DbValues&);
sqlite3 *open_and_init_db();
/************
* C++ APIs *
************/
using db_exec_callback = std::function<void(StringSlice, const DbValues&)>;
struct DbArg {
enum {
INT,
TEXT,
} type;
union {
int64_t int_val;
rust::Str str_val;
};
DbArg(int64_t v) : type(INT), int_val(v) {}
DbArg(const char *v) : type(TEXT), str_val(v) {}
};
struct DbArgs {
DbArgs() : curr(0) {}
DbArgs(std::initializer_list<DbArg> list) : args(list), curr(0) {}
int operator()(int index, DbStatement &stmt);
bool empty() const { return args.empty(); }
private:
std::vector<DbArg> args;
size_t curr;
};
bool db_exec(const char *sql, DbArgs args = {}, db_exec_callback exec_fn = {});
template<typename T>
concept DbData = requires(T t, StringSlice s, DbValues &v) { t(s, v); };
template<DbData T>
bool db_exec(const char *sql, DbArgs args, T &data) {
return db_exec(sql, std::move(args), (db_exec_callback) std::ref(data));
}

238
native/src/core/lib.rs Normal file
View file

@ -0,0 +1,238 @@
#![feature(try_blocks)]
#![feature(let_chains)]
#![feature(fn_traits)]
#![feature(unix_socket_ancillary_data)]
#![feature(unix_socket_peek)]
#![feature(default_field_values)]
#![feature(peer_credentials_unix_socket)]
#![allow(clippy::missing_safety_doc)]
use crate::ffi::SuRequest;
use crate::socket::Encodable;
use base::derive::Decodable;
use daemon::{MagiskD, connect_daemon_for_cxx};
use logging::{android_logging, zygisk_close_logd, zygisk_get_logd, zygisk_logging};
use magisk::magisk_main;
use mount::revert_unmount;
use resetprop::{get_prop, resetprop_main};
use selinux::{lgetfilecon, setfilecon};
use socket::{recv_fd, recv_fds, send_fd};
use std::fs::File;
use std::mem::ManuallyDrop;
use std::ops::DerefMut;
use std::os::fd::FromRawFd;
use su::{get_pty_num, pump_tty};
use zygisk::zygisk_should_load_module;
mod bootstages;
#[path = "../include/consts.rs"]
mod consts;
mod daemon;
mod db;
mod logging;
mod magisk;
mod module;
mod mount;
mod package;
mod resetprop;
mod selinux;
mod socket;
mod su;
mod thread;
mod zygisk;
#[allow(clippy::needless_lifetimes)]
#[cxx::bridge]
pub mod ffi {
#[repr(i32)]
enum RequestCode {
START_DAEMON,
CHECK_VERSION,
CHECK_VERSION_CODE,
STOP_DAEMON,
_SYNC_BARRIER_,
SUPERUSER,
ZYGOTE_RESTART,
DENYLIST,
SQLITE_CMD,
REMOVE_MODULES,
ZYGISK,
_STAGE_BARRIER_,
POST_FS_DATA,
LATE_START,
BOOT_COMPLETE,
END,
}
#[repr(i32)]
enum RespondCode {
ERROR = -1,
OK = 0,
ROOT_REQUIRED,
ACCESS_DENIED,
END,
}
enum DbEntryKey {
RootAccess,
SuMultiuserMode,
SuMntNs,
DenylistConfig,
ZygiskConfig,
BootloopCount,
SuManager,
}
#[repr(i32)]
enum MntNsMode {
Global,
Requester,
Isolate,
}
#[repr(i32)]
enum SuPolicy {
Query,
Deny,
Allow,
Restrict,
}
struct ModuleInfo {
name: String,
z32: i32,
z64: i32,
}
#[repr(i32)]
enum ZygiskRequest {
GetInfo,
ConnectCompanion,
GetModDir,
}
#[repr(u32)]
enum ZygiskStateFlags {
ProcessGrantedRoot = 0x00000001,
ProcessOnDenyList = 0x00000002,
DenyListEnforced = 0x40000000,
ProcessIsMagiskApp = 0x80000000,
}
#[derive(Decodable)]
struct SuRequest {
target_uid: i32,
target_pid: i32,
login: bool,
keep_env: bool,
drop_cap: bool,
shell: String,
command: String,
context: String,
gids: Vec<u32>,
}
unsafe extern "C++" {
#[cxx_name = "Utf8CStr"]
type Utf8CStrRef<'a> = base::Utf8CStrRef<'a>;
include!("include/core.hpp");
#[cxx_name = "get_magisk_tmp_rs"]
fn get_magisk_tmp() -> Utf8CStrRef<'static>;
#[cxx_name = "resolve_preinit_dir_rs"]
fn resolve_preinit_dir(base_dir: Utf8CStrRef) -> String;
fn check_key_combo() -> bool;
fn unlock_blocks();
fn update_deny_flags(uid: i32, process: &str, flags: &mut u32);
fn initialize_denylist();
fn switch_mnt_ns(pid: i32) -> i32;
fn exec_root_shell(client: i32, pid: i32, req: &mut SuRequest, mode: MntNsMode);
// Scripting
fn exec_script(script: Utf8CStrRef);
fn exec_common_scripts(stage: Utf8CStrRef);
fn exec_module_scripts(state: Utf8CStrRef, modules: &Vec<ModuleInfo>);
fn install_apk(apk: Utf8CStrRef);
fn uninstall_pkg(apk: Utf8CStrRef);
fn install_module(zip: Utf8CStrRef);
// Denylist
fn denylist_cli(args: &mut Vec<String>) -> i32;
fn denylist_handler(client: i32);
fn scan_deny_apps();
include!("include/sqlite.hpp");
type sqlite3;
type DbValues;
type DbStatement;
fn sqlite3_errstr(code: i32) -> *const c_char;
fn open_and_init_db() -> *mut sqlite3;
fn get_int(self: &DbValues, index: i32) -> i32;
#[cxx_name = "get_str"]
fn get_text(self: &DbValues, index: i32) -> &str;
fn bind_text(self: Pin<&mut DbStatement>, index: i32, val: &str) -> i32;
fn bind_int64(self: Pin<&mut DbStatement>, index: i32, val: i64) -> i32;
}
extern "Rust" {
fn android_logging();
fn zygisk_logging();
fn zygisk_close_logd();
fn zygisk_get_logd() -> i32;
fn revert_unmount(pid: i32);
fn zygisk_should_load_module(flags: u32) -> bool;
fn send_fd(socket: i32, fd: i32) -> bool;
fn recv_fd(socket: i32) -> i32;
fn recv_fds(socket: i32) -> Vec<i32>;
fn write_to_fd(self: &SuRequest, fd: i32);
fn pump_tty(ptmx: i32, pump_stdin: bool);
fn get_pty_num(fd: i32) -> i32;
fn lgetfilecon(path: Utf8CStrRef, con: &mut [u8]) -> bool;
fn setfilecon(path: Utf8CStrRef, con: Utf8CStrRef) -> bool;
fn get_prop(name: Utf8CStrRef) -> String;
unsafe fn resetprop_main(argc: i32, argv: *mut *mut c_char) -> i32;
#[cxx_name = "connect_daemon"]
fn connect_daemon_for_cxx(code: RequestCode, create: bool) -> i32;
unsafe fn magisk_main(argc: i32, argv: *mut *mut c_char) -> i32;
}
// Default constructors
extern "Rust" {
#[Self = SuRequest]
#[cxx_name = "New"]
fn default() -> SuRequest;
}
// FFI for MagiskD
extern "Rust" {
type MagiskD;
fn sdk_int(&self) -> i32;
fn zygisk_enabled(&self) -> bool;
fn get_db_setting(&self, key: DbEntryKey) -> i32;
#[cxx_name = "set_db_setting"]
fn set_db_setting_for_cxx(&self, key: DbEntryKey, value: i32) -> bool;
#[Self = MagiskD]
#[cxx_name = "Get"]
fn get() -> &'static MagiskD;
}
}
impl SuRequest {
fn write_to_fd(&self, fd: i32) {
unsafe {
let mut w = ManuallyDrop::new(File::from_raw_fd(fd));
self.encode(w.deref_mut()).ok();
}
}
}

339
native/src/core/logging.rs Normal file
View file

@ -0,0 +1,339 @@
use crate::consts::{LOG_PIPE, LOGFILE};
use crate::ffi::get_magisk_tmp;
use crate::logging::LogFile::{Actual, Buffer};
use base::const_format::concatcp;
use base::{
FsPathBuilder, LogLevel, LoggedResult, ReadExt, ResultExt, Utf8CStr, Utf8CStrBuf, WriteExt,
cstr, libc, new_daemon_thread, raw_cstr, update_logger,
};
use bytemuck::{Pod, Zeroable, bytes_of, write_zeroes};
use libc::{PIPE_BUF, c_char, localtime_r, sigtimedwait, time_t, timespec, tm};
use nix::fcntl::OFlag;
use nix::sys::signal::{SigSet, SigmaskHow, Signal};
use nix::unistd::{Gid, Uid, chown, getpid, gettid};
use num_derive::{FromPrimitive, ToPrimitive};
use num_traits::FromPrimitive;
use std::cmp::min;
use std::fmt::Write as _;
use std::fs::File;
use std::io::{IoSlice, Read, Write};
use std::mem::ManuallyDrop;
use std::os::fd::{FromRawFd, IntoRawFd, RawFd};
use std::ptr::null_mut;
use std::sync::atomic::{AtomicI32, Ordering};
use std::sync::{Arc, Mutex};
use std::time::{SystemTime, UNIX_EPOCH};
use std::{fs, io};
#[allow(dead_code, non_camel_case_types)]
#[derive(FromPrimitive, ToPrimitive)]
#[repr(i32)]
enum ALogPriority {
ANDROID_LOG_UNKNOWN = 0,
ANDROID_LOG_DEFAULT,
ANDROID_LOG_VERBOSE,
ANDROID_LOG_DEBUG,
ANDROID_LOG_INFO,
ANDROID_LOG_WARN,
ANDROID_LOG_ERROR,
ANDROID_LOG_FATAL,
ANDROID_LOG_SILENT,
}
unsafe extern "C" {
fn __android_log_write(prio: i32, tag: *const c_char, msg: *const c_char);
fn strftime(buf: *mut c_char, len: usize, fmt: *const c_char, tm: *const tm) -> usize;
}
fn level_to_prio(level: LogLevel) -> i32 {
match level {
LogLevel::Error => ALogPriority::ANDROID_LOG_ERROR as i32,
LogLevel::Warn => ALogPriority::ANDROID_LOG_WARN as i32,
LogLevel::Info => ALogPriority::ANDROID_LOG_INFO as i32,
LogLevel::Debug => ALogPriority::ANDROID_LOG_DEBUG as i32,
}
}
fn android_log_write(level: LogLevel, msg: &Utf8CStr) {
unsafe {
__android_log_write(level_to_prio(level), raw_cstr!("Magisk"), msg.as_ptr());
}
}
pub fn android_logging() {
update_logger(|logger| logger.write = android_log_write);
}
pub fn magisk_logging() {
fn magisk_log_write(level: LogLevel, msg: &Utf8CStr) {
android_log_write(level, msg);
magisk_log_to_pipe(level_to_prio(level), msg);
}
update_logger(|logger| logger.write = magisk_log_write);
}
pub fn zygisk_logging() {
fn zygisk_log_write(level: LogLevel, msg: &Utf8CStr) {
android_log_write(level, msg);
zygisk_log_to_pipe(level_to_prio(level), msg);
}
update_logger(|logger| logger.write = zygisk_log_write);
}
#[derive(Copy, Clone, Pod, Zeroable)]
#[repr(C)]
struct LogMeta {
prio: i32,
len: i32,
pid: i32,
tid: i32,
}
const MAX_MSG_LEN: usize = PIPE_BUF - size_of::<LogMeta>();
fn write_log_to_pipe(mut logd: &File, prio: i32, msg: &Utf8CStr) -> io::Result<usize> {
// Truncate message if needed
let len = min(MAX_MSG_LEN, msg.len());
let msg = &msg.as_bytes()[..len];
let meta = LogMeta {
prio,
len: len as i32,
pid: getpid().as_raw(),
tid: gettid().as_raw(),
};
let io1 = IoSlice::new(bytes_of(&meta));
let io2 = IoSlice::new(msg);
let result = logd.write_vectored(&[io1, io2]);
if let Err(ref e) = result {
let mut buf = cstr::buf::new::<256>();
write!(buf, "Cannot write_log_to_pipe: {e}").ok();
android_log_write(LogLevel::Error, &buf);
}
result
}
static MAGISK_LOGD_FD: Mutex<Option<Arc<File>>> = Mutex::new(None);
fn with_logd_fd<R, F: FnOnce(&File) -> io::Result<R>>(f: F) {
let fd = MAGISK_LOGD_FD.lock().unwrap().clone();
if let Some(logd) = fd
&& f(&logd).is_err()
{
// If any error occurs, shut down the logd pipe
*MAGISK_LOGD_FD.lock().unwrap() = None;
}
}
fn magisk_log_to_pipe(prio: i32, msg: &Utf8CStr) {
with_logd_fd(|logd| write_log_to_pipe(logd, prio, msg));
}
// SAFETY: zygisk client code runs single threaded, so no need to prevent data race
static ZYGISK_LOGD: AtomicI32 = AtomicI32::new(-1);
pub fn zygisk_close_logd() {
unsafe {
libc::close(ZYGISK_LOGD.swap(-1, Ordering::Relaxed));
}
}
pub fn zygisk_get_logd() -> i32 {
// If we don't have the log pipe set, open the log pipe FIFO. This could actually happen
// multiple times in the zygote daemon (parent process) because we had to close this
// file descriptor to prevent crashing.
//
// For some reason, zygote sanitizes and checks FDs *before* forking. This results in the fact
// that *every* time before zygote forks, it has to close all logging related FDs in order
// to pass FD checks, just to have it re-initialized immediately after any
// logging happens ¯\_(ツ)_/¯.
//
// To be consistent with this behavior, we also have to close the log pipe to magiskd
// to make zygote NOT crash if necessary. We accomplish this by hooking __android_log_close
// and closing it at the same time as the rest of logging FDs.
let mut raw_fd = ZYGISK_LOGD.load(Ordering::Relaxed);
if raw_fd < 0 {
android_logging();
let path = cstr::buf::default()
.join_path(get_magisk_tmp())
.join_path(LOG_PIPE);
// Open as RW as sometimes it may block
if let Ok(fd) = path.open(OFlag::O_RDWR | OFlag::O_CLOEXEC) {
// Only re-enable zygisk logging if success
zygisk_logging();
raw_fd = fd.into_raw_fd();
unsafe {
libc::close(ZYGISK_LOGD.swap(raw_fd, Ordering::Relaxed));
}
} else {
return -1;
}
}
raw_fd
}
fn zygisk_log_to_pipe(prio: i32, msg: &Utf8CStr) {
let fd = zygisk_get_logd();
if fd < 0 {
// Cannot talk to pipe, abort
return;
}
// Block SIGPIPE
let mut mask = SigSet::empty();
mask.add(Signal::SIGPIPE);
let orig_mask = mask.thread_swap_mask(SigmaskHow::SIG_SETMASK);
let logd = ManuallyDrop::new(unsafe { File::from_raw_fd(fd) });
let result = write_log_to_pipe(&logd, prio, msg);
// Consume SIGPIPE if exists, then restore mask
if let Ok(orig_mask) = orig_mask {
unsafe {
// Unfortunately nix does not have an abstraction over sigtimedwait.
// Fallback to use raw libc function calls.
let ts: timespec = std::mem::zeroed();
sigtimedwait(mask.as_ref(), null_mut(), &ts);
}
orig_mask.thread_set_mask().ok();
}
// If any error occurs, shut down the logd pipe
if result.is_err() {
zygisk_close_logd();
}
}
// The following is implementation for the logging daemon
enum LogFile {
Buffer(Vec<u8>),
Actual(File),
}
impl LogFile {
fn as_write(&mut self) -> &mut dyn Write {
match self {
Buffer(e) => e,
Actual(e) => e,
}
}
}
fn logfile_write_loop(mut pipe: File) -> io::Result<()> {
let mut logfile: LogFile = Buffer(Vec::new());
let mut meta = LogMeta::zeroed();
let mut msg_buf = [0u8; MAX_MSG_LEN];
let mut aux = cstr::buf::new::<64>();
loop {
// Read request
write_zeroes(&mut meta);
pipe.read_pod(&mut meta)?;
if meta.prio < 0 {
if let Buffer(ref mut buf) = logfile {
fs::rename(LOGFILE, concatcp!(LOGFILE, ".bak")).ok();
let mut out = File::create(LOGFILE)?;
out.write_all(buf.as_slice())?;
logfile = Actual(out);
}
continue;
}
if meta.len < 0 || meta.len > MAX_MSG_LEN as i32 {
continue;
}
// Read the rest of the message
let msg = &mut msg_buf[..(meta.len as usize)];
pipe.read_exact(msg)?;
// Start building the log string
aux.clear();
let prio = ALogPriority::from_i32(meta.prio).unwrap_or(ALogPriority::ANDROID_LOG_UNKNOWN);
let prio = match prio {
ALogPriority::ANDROID_LOG_VERBOSE => 'V',
ALogPriority::ANDROID_LOG_DEBUG => 'D',
ALogPriority::ANDROID_LOG_INFO => 'I',
ALogPriority::ANDROID_LOG_WARN => 'W',
ALogPriority::ANDROID_LOG_ERROR => 'E',
// Unsupported values, skip
_ => continue,
};
let now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
// Note: the obvious better implementation is to use the rust chrono crate, however
// the crate cannot fetch the proper local timezone without pulling in a bunch of
// timezone handling code. To reduce binary size, fallback to use localtime_r in libc.
unsafe {
let secs = now.as_secs() as time_t;
let mut tm: tm = std::mem::zeroed();
if localtime_r(&secs, &mut tm).is_null() {
continue;
}
strftime(aux.as_mut_ptr(), aux.capacity(), raw_cstr!("%m-%d %T"), &tm);
}
if aux.rebuild().is_ok() {
write!(
aux,
".{:03} {:5} {:5} {} : ",
now.subsec_millis(),
meta.pid,
meta.tid,
prio
)
.ok();
} else {
continue;
}
let io1 = IoSlice::new(aux.as_bytes());
let io2 = IoSlice::new(msg);
// We don't need to care the written len because we are writing less than PIPE_BUF
// It's guaranteed to always write the whole thing atomically
let _ = logfile.as_write().write_vectored(&[io1, io2])?;
}
}
pub fn setup_logfile() {
with_logd_fd(|mut logd| {
let meta = LogMeta {
prio: -1,
len: 0,
pid: 0,
tid: 0,
};
(&mut logd).write_pod(&meta)
});
}
pub fn start_log_daemon() {
let path = cstr::buf::default()
.join_path(get_magisk_tmp())
.join_path(LOG_PIPE);
extern "C" fn logfile_writer_thread(arg: usize) -> usize {
let file = unsafe { File::from_raw_fd(arg as RawFd) };
logfile_write_loop(file).ok();
// If any error occurs, shut down the logd pipe
*MAGISK_LOGD_FD.lock().unwrap() = None;
0
}
let _: LoggedResult<()> = try {
path.mkfifo(0o666).log_ok();
chown(path.as_utf8_cstr(), Some(Uid::from(0)), Some(Gid::from(0)))?;
let read = path.open(OFlag::O_RDWR | OFlag::O_CLOEXEC)?;
let write = path.open(OFlag::O_WRONLY | OFlag::O_CLOEXEC)?;
*MAGISK_LOGD_FD.lock().unwrap() = Some(Arc::new(write));
unsafe {
new_daemon_thread(logfile_writer_thread, read.into_raw_fd() as usize);
}
};
}

298
native/src/core/magisk.rs Normal file
View file

@ -0,0 +1,298 @@
use crate::consts::{APPLET_NAMES, MAGISK_VER_CODE, MAGISK_VERSION, POST_FS_DATA_WAIT_TIME};
use crate::daemon::connect_daemon;
use crate::ffi::{RequestCode, denylist_cli, get_magisk_tmp, install_module, unlock_blocks};
use crate::mount::find_preinit_device;
use crate::selinux::restorecon;
use crate::socket::{Decodable, Encodable};
use argh::FromArgs;
use base::{CmdArgs, EarlyExitExt, LoggedResult, Utf8CString, argh, clone_attr};
use nix::poll::{PollFd, PollFlags, PollTimeout};
use std::ffi::c_char;
use std::os::fd::AsFd;
use std::process::exit;
fn print_usage() {
eprintln!(
r#"Magisk - Multi-purpose Utility
Usage: magisk [applet [arguments]...]
or: magisk [options]...
Options:
-c print current binary version
-v print running daemon version
-V print running daemon version code
--list list all available applets
--remove-modules [-n] remove all modules, reboot if -n is not provided
--install-module ZIP install a module zip file
Advanced Options (Internal APIs):
--daemon manually start magisk daemon
--stop remove all magisk changes and stop daemon
--[init trigger] callback on init triggers. Valid triggers:
post-fs-data, service, boot-complete, zygote-restart
--unlock-blocks set BLKROSET flag to OFF for all block devices
--restorecon restore selinux context on Magisk files
--clone-attr SRC DEST clone permission, owner, and selinux context
--clone SRC DEST clone SRC to DEST
--sqlite SQL exec SQL commands to Magisk database
--path print Magisk tmpfs mount path
--denylist ARGS denylist config CLI
--preinit-device resolve a device to store preinit files
Available applets:
{}
"#,
APPLET_NAMES.join(", ")
);
}
#[derive(FromArgs)]
struct Cli {
#[argh(subcommand)]
action: MagiskAction,
}
#[derive(FromArgs)]
#[argh(subcommand)]
enum MagiskAction {
LocalVersion(LocalVersion),
Version(Version),
VersionCode(VersionCode),
List(ListApplets),
RemoveModules(RemoveModules),
InstallModule(InstallModule),
Daemon(StartDaemon),
Stop(StopDaemon),
PostFsData(PostFsData),
Service(ServiceCmd),
BootComplete(BootComplete),
ZygoteRestart(ZygoteRestart),
UnlockBlocks(UnlockBlocks),
RestoreCon(RestoreCon),
CloneAttr(CloneAttr),
CloneFile(CloneFile),
Sqlite(Sqlite),
Path(PathCmd),
DenyList(DenyList),
PreInitDevice(PreInitDevice),
}
#[derive(FromArgs)]
#[argh(subcommand, name = "-c")]
struct LocalVersion {}
#[derive(FromArgs)]
#[argh(subcommand, name = "-v")]
struct Version {}
#[derive(FromArgs)]
#[argh(subcommand, name = "-V")]
struct VersionCode {}
#[derive(FromArgs)]
#[argh(subcommand, name = "--list")]
struct ListApplets {}
#[derive(FromArgs)]
#[argh(subcommand, name = "--remove-modules")]
struct RemoveModules {
#[argh(switch, short = 'n')]
no_reboot: bool,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "--install-module")]
struct InstallModule {
#[argh(positional)]
zip: Utf8CString,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "--daemon")]
struct StartDaemon {}
#[derive(FromArgs)]
#[argh(subcommand, name = "--stop")]
struct StopDaemon {}
#[derive(FromArgs)]
#[argh(subcommand, name = "--post-fs-data")]
struct PostFsData {}
#[derive(FromArgs)]
#[argh(subcommand, name = "--service")]
struct ServiceCmd {}
#[derive(FromArgs)]
#[argh(subcommand, name = "--boot-complete")]
struct BootComplete {}
#[derive(FromArgs)]
#[argh(subcommand, name = "--zygote-restart")]
struct ZygoteRestart {}
#[derive(FromArgs)]
#[argh(subcommand, name = "--unlock-blocks")]
struct UnlockBlocks {}
#[derive(FromArgs)]
#[argh(subcommand, name = "--restorecon")]
struct RestoreCon {}
#[derive(FromArgs)]
#[argh(subcommand, name = "--clone-attr")]
struct CloneAttr {
#[argh(positional)]
from: Utf8CString,
#[argh(positional)]
to: Utf8CString,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "--clone")]
struct CloneFile {
#[argh(positional)]
from: Utf8CString,
#[argh(positional)]
to: Utf8CString,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "--sqlite")]
struct Sqlite {
#[argh(positional)]
sql: String,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "--path")]
struct PathCmd {}
#[derive(FromArgs)]
#[argh(subcommand, name = "--denylist")]
struct DenyList {
#[argh(positional, greedy)]
args: Vec<String>,
}
#[derive(FromArgs)]
#[argh(subcommand, name = "--preinit-device")]
struct PreInitDevice {}
impl MagiskAction {
fn exec(self) -> LoggedResult<i32> {
use MagiskAction::*;
match self {
LocalVersion(_) => {
#[cfg(debug_assertions)]
println!("{MAGISK_VERSION}:MAGISK:D ({MAGISK_VER_CODE})");
#[cfg(not(debug_assertions))]
println!("{MAGISK_VERSION}:MAGISK:R ({MAGISK_VER_CODE})");
}
Version(_) => {
let mut fd = connect_daemon(RequestCode::CHECK_VERSION, false)?;
let ver = String::decode(&mut fd)?;
println!("{ver}");
}
VersionCode(_) => {
let mut fd = connect_daemon(RequestCode::CHECK_VERSION_CODE, false)?;
let ver = i32::decode(&mut fd)?;
println!("{ver}");
}
List(_) => {
for name in APPLET_NAMES {
println!("{name}");
}
}
RemoveModules(self::RemoveModules { no_reboot }) => {
let mut fd = connect_daemon(RequestCode::REMOVE_MODULES, false)?;
let do_reboot = !no_reboot;
do_reboot.encode(&mut fd)?;
return Ok(i32::decode(&mut fd)?);
}
InstallModule(self::InstallModule { zip }) => {
install_module(&zip);
}
Daemon(_) => {
let _ = connect_daemon(RequestCode::START_DAEMON, true)?;
}
Stop(_) => {
let mut fd = connect_daemon(RequestCode::STOP_DAEMON, false)?;
return Ok(i32::decode(&mut fd)?);
}
PostFsData(_) => {
let fd = connect_daemon(RequestCode::POST_FS_DATA, true)?;
let mut pfd = [PollFd::new(fd.as_fd(), PollFlags::POLLIN)];
nix::poll::poll(
&mut pfd,
PollTimeout::try_from(POST_FS_DATA_WAIT_TIME * 1000)?,
)?;
}
Service(_) => {
let _ = connect_daemon(RequestCode::LATE_START, true)?;
}
BootComplete(_) => {
let _ = connect_daemon(RequestCode::BOOT_COMPLETE, false)?;
}
ZygoteRestart(_) => {
let _ = connect_daemon(RequestCode::ZYGOTE_RESTART, false)?;
}
UnlockBlocks(_) => {
unlock_blocks();
}
RestoreCon(_) => {
restorecon();
}
CloneAttr(self::CloneAttr { from, to }) => {
clone_attr(&from, &to)?;
}
CloneFile(self::CloneFile { from, to }) => {
from.copy_to(&to)?;
}
Sqlite(self::Sqlite { sql }) => {
let mut fd = connect_daemon(RequestCode::SQLITE_CMD, false)?;
sql.encode(&mut fd)?;
loop {
let line = String::decode(&mut fd)?;
if line.is_empty() {
return Ok(0);
}
println!("{line}");
}
}
Path(_) => {
let tmp = get_magisk_tmp();
if tmp.is_empty() {
return Ok(1);
} else {
println!("{tmp}");
}
}
DenyList(self::DenyList { mut args }) => {
return Ok(denylist_cli(&mut args));
}
PreInitDevice(_) => {
let name = find_preinit_device();
if name.is_empty() {
return Ok(1);
} else {
println!("{name}");
}
}
};
Ok(0)
}
}
pub fn magisk_main(argc: i32, argv: *mut *mut c_char) -> i32 {
if argc < 2 {
print_usage();
exit(1);
}
let mut cmds = CmdArgs::new(argc, argv.cast()).0;
// We need to manually inject "--" so that all actions can be treated as subcommands
cmds.insert(1, "--");
let cli = Cli::from_args(&cmds[..1], &cmds[1..]).on_early_exit(print_usage);
cli.action.exec().unwrap_or(1)
}

936
native/src/core/module.rs Normal file
View file

@ -0,0 +1,936 @@
use crate::consts::{MODULEMNT, MODULEROOT, MODULEUPGRADE, WORKERDIR};
use crate::daemon::MagiskD;
use crate::ffi::{ModuleInfo, exec_module_scripts, exec_script, get_magisk_tmp};
use crate::mount::setup_module_mount;
use crate::resetprop::load_prop_file;
use base::{
DirEntry, Directory, FsPathBuilder, LoggedResult, OsResult, ResultExt, SilentLogExt, Utf8CStr,
Utf8CStrBuf, Utf8CString, WalkResult, clone_attr, cstr, debug, error, info, libc, raw_cstr,
warn,
};
use nix::fcntl::OFlag;
use nix::mount::MsFlags;
use nix::unistd::UnlinkatFlags;
use std::collections::BTreeMap;
use std::os::fd::IntoRawFd;
use std::path::{Component, Path};
use std::ptr;
use std::sync::atomic::Ordering;
const MAGISK_BIN_INJECT_PARTITIONS: [&Utf8CStr; 4] = [
cstr!("/system/"),
cstr!("/vendor/"),
cstr!("/product/"),
cstr!("/system_ext/"),
];
const SECONDARY_READ_ONLY_PARTITIONS: [&Utf8CStr; 3] =
[cstr!("/vendor"), cstr!("/product"), cstr!("/system_ext")];
type FsNodeMap = BTreeMap<String, FsNode>;
macro_rules! module_log {
($($args:tt)+) => {
debug!("{:8}: {} <- {}", $($args)+)
}
}
#[allow(unused_variables)]
fn bind_mount(reason: &str, src: &Utf8CStr, dest: &Utf8CStr, rec: bool) {
module_log!(reason, dest, src);
// Ignore any kind of error here. If a single bind mount fails due to selinux permissions or
// kernel limitations, don't let it break module mount entirely.
src.bind_mount_to(dest, rec).log_ok();
dest.remount_mount_point_flags(MsFlags::MS_RDONLY).log_ok();
}
fn mount_dummy<'a>(
reason: &str,
src: &Utf8CStr,
dest: &'a Utf8CStr,
is_dir: bool,
) -> OsResult<'a, ()> {
if is_dir {
dest.mkdir(0o000)?;
} else {
dest.create(OFlag::O_CREAT | OFlag::O_RDONLY | OFlag::O_CLOEXEC, 0o000)?;
}
bind_mount(reason, src, dest, false);
Ok(())
}
// File path that act like a stack, popping out the last element
// automatically when out of scope. Using Rust's lifetime mechanism,
// we can ensure the buffer will never be incorrectly copied or modified.
// After calling append or reborrow, the mutable reference's lifetime is
// "transferred" to the returned object, and the compiler will guarantee
// that the original mutable reference can only be reused if and only if
// the newly created instance is destroyed.
struct PathTracker<'a> {
path: &'a mut dyn Utf8CStrBuf,
len: usize,
}
impl PathTracker<'_> {
fn from<'a>(path: &'a mut dyn Utf8CStrBuf) -> PathTracker<'a> {
let len = path.len();
PathTracker { path, len }
}
fn append(&mut self, name: &str) -> PathTracker<'_> {
let len = self.path.len();
self.path.append_path(name);
PathTracker {
path: self.path,
len,
}
}
fn reborrow(&mut self) -> PathTracker<'_> {
Self::from(self.path)
}
}
impl Drop for PathTracker<'_> {
// Revert back to the original state after finish using the buffer
fn drop(&mut self) {
self.path.truncate(self.len);
}
}
// The comments for this struct assume real = "/system/bin"
struct ModulePaths<'a> {
real: PathTracker<'a>,
module: PathTracker<'a>,
module_mnt: PathTracker<'a>,
}
impl ModulePaths<'_> {
fn new<'a>(
real: &'a mut dyn Utf8CStrBuf,
module: &'a mut dyn Utf8CStrBuf,
module_mnt: &'a mut dyn Utf8CStrBuf,
) -> ModulePaths<'a> {
real.append_path("/");
module.append_path(MODULEROOT);
module_mnt
.append_path(get_magisk_tmp())
.append_path(MODULEMNT);
ModulePaths {
real: PathTracker::from(real),
module: PathTracker::from(module),
module_mnt: PathTracker::from(module_mnt),
}
}
fn set_module(&mut self, module: &str) -> ModulePaths<'_> {
ModulePaths {
real: self.real.reborrow(),
module: self.module.append(module),
module_mnt: self.module_mnt.append(module),
}
}
fn append(&mut self, name: &str) -> ModulePaths<'_> {
ModulePaths {
real: self.real.append(name),
module: self.module.append(name),
module_mnt: self.module_mnt.append(name),
}
}
// Returns "/system/bin"
fn real(&self) -> &Utf8CStr {
self.real.path
}
// Returns "/data/adb/modules/{module}/system/bin"
fn module(&self) -> &Utf8CStr {
self.module.path
}
// Returns "$MAGISK_TMP/.magisk/modules/{module}/system/bin"
fn module_mnt(&self) -> &Utf8CStr {
self.module_mnt.path
}
}
// The comments for this struct assume real = "/system/bin"
struct MountPaths<'a> {
real: PathTracker<'a>,
worker: PathTracker<'a>,
}
impl MountPaths<'_> {
fn new<'a>(real: &'a mut dyn Utf8CStrBuf, worker: &'a mut dyn Utf8CStrBuf) -> MountPaths<'a> {
real.append_path("/");
worker.append_path(get_magisk_tmp()).append_path(WORKERDIR);
MountPaths {
real: PathTracker::from(real),
worker: PathTracker::from(worker),
}
}
fn append(&mut self, name: &str) -> MountPaths<'_> {
MountPaths {
real: self.real.append(name),
worker: self.worker.append(name),
}
}
fn reborrow(&mut self) -> MountPaths<'_> {
MountPaths {
real: self.real.reborrow(),
worker: self.worker.reborrow(),
}
}
// Returns "/system/bin"
fn real(&self) -> &Utf8CStr {
self.real.path
}
// Returns "$MAGISK_TMP/.magisk/worker/system/bin"
fn worker(&self) -> &Utf8CStr {
self.worker.path
}
}
enum FsNode {
Directory { children: FsNodeMap },
File { src: Utf8CString },
Symlink { target: Utf8CString },
MagiskLink,
Whiteout,
}
impl FsNode {
fn new_dir() -> FsNode {
FsNode::Directory {
children: BTreeMap::new(),
}
}
fn collect(&mut self, mut paths: ModulePaths) -> LoggedResult<()> {
let FsNode::Directory { children } = self else {
return Ok(());
};
let mut dir = Directory::open(paths.module())?;
while let Some(entry) = dir.read()? {
let entry_paths = paths.append(entry.name());
let path = entry_paths.module();
if entry.is_dir() {
let node = children
.entry(entry.name().to_string())
.or_insert_with(FsNode::new_dir);
node.collect(entry_paths)?;
} else if entry.is_symlink() {
// Read the link and store its target
let mut link = cstr::buf::default();
path.read_link(&mut link)?;
children
.entry(entry.name().to_string())
.or_insert_with(|| FsNode::Symlink {
target: link.to_owned(),
});
} else {
if entry.is_char_device() {
let attr = path.get_attr()?;
if attr.is_whiteout() {
children
.entry(entry.name().to_string())
.or_insert_with(|| FsNode::Whiteout);
continue;
}
}
if entry_paths.real().exists() {
clone_attr(entry_paths.real(), path)?;
}
children
.entry(entry.name().to_string())
.or_insert_with(|| FsNode::File {
// Make sure to mount from module_mnt, not module
src: entry_paths.module_mnt().to_owned(),
});
}
}
Ok(())
}
// The parent node has to be tmpfs if:
// - Target does not exist
// - Source or target is a symlink (since we cannot bind mount symlink)
// - Source is whiteout (used for removal)
fn parent_should_be_tmpfs(&self, target_path: &Utf8CStr) -> bool {
match self {
FsNode::Directory { .. } | FsNode::File { .. } => {
if let Ok(attr) = target_path.get_attr() {
attr.is_symlink()
} else {
true
}
}
_ => true,
}
}
fn children(&mut self) -> Option<&mut FsNodeMap> {
match self {
FsNode::Directory { children } => Some(children),
_ => None,
}
}
fn commit(&mut self, mut path: MountPaths, is_root_dir: bool) -> LoggedResult<()> {
match self {
FsNode::Directory { children } => {
let mut is_tmpfs = false;
// First determine whether tmpfs is required
children.retain(|name, node| {
if name == ".replace" {
return if is_root_dir {
warn!("Unable to replace '{}', ignore request", path.real());
false
} else {
is_tmpfs = true;
true
};
}
let path = path.append(name);
if node.parent_should_be_tmpfs(path.real()) {
if is_root_dir {
// Ignore the unsupported child node
warn!("Unable to add '{}', skipped", path.real());
return false;
}
is_tmpfs = true;
}
true
});
if is_tmpfs {
self.commit_tmpfs(path.reborrow())?;
// Transitioning from non-tmpfs to tmpfs, we need to actually mount the
// worker dir to dest after all children are committed.
bind_mount("move", path.worker(), path.real(), true);
} else {
for (name, node) in children {
let path = path.append(name);
node.commit(path, false)?;
}
}
}
FsNode::File { src } => {
bind_mount("mount", src, path.real(), false);
}
_ => {
error!("Unable to handle '{}': parent should be tmpfs", path.real());
}
}
Ok(())
}
fn commit_tmpfs(&mut self, mut path: MountPaths) -> LoggedResult<()> {
match self {
FsNode::Directory { children } => {
path.worker().mkdirs(0o000)?;
if path.real().exists() {
clone_attr(path.real(), path.worker())?;
} else if let Some(p) = path.worker().parent_dir() {
let parent = Utf8CString::from(p);
clone_attr(&parent, path.worker())?;
}
// Check whether a file named '.replace' exists
if let Some(FsNode::File { src }) = children.remove(".replace")
&& let Some(replace_dir) = src.parent_dir()
{
for (name, node) in children {
let path = path.append(name);
match node {
FsNode::Directory { .. } => {
// For replace, we don't need to traverse any deeper for mirroring.
// We can simply just bind mount the module dir to worker dir.
let src = Utf8CString::from(replace_dir).join_path(name);
mount_dummy("mount", &src, path.worker(), true)?;
}
_ => node.commit_tmpfs(path)?,
}
}
// If performing replace, we skip mirroring
return Ok(());
}
// Traverse the real directory and mount mirror files
if let Ok(mut dir) = Directory::open(path.real()) {
while let Ok(Some(entry)) = dir.read() {
if children.contains_key(entry.name().as_str()) {
// Should not be mirrored, next
continue;
}
let path = path.append(entry.name());
if entry.is_dir() {
// At the first glance, it looks like we can directly mount the
// real dir to worker dir as mirror. However, this should NOT be done,
// because init will track these mounts with dev.mnt, causing issues.
// We unfortunately have to traverse recursively for mirroring.
FsNode::new_dir().commit_tmpfs(path)?;
} else if entry.is_symlink() {
let mut link = cstr::buf::default();
entry.read_link(&mut link).log_ok();
FsNode::Symlink {
target: link.to_owned(),
}
.commit_tmpfs(path)?;
} else {
// Mount the mirror file
mount_dummy("mirror", path.real(), path.worker(), false)?;
}
}
}
// Finally, commit children
for (name, node) in children {
let path = path.append(name);
node.commit_tmpfs(path)?;
}
}
FsNode::File { src } => {
mount_dummy("mount", src, path.worker(), false)?;
}
FsNode::Symlink { target } => {
module_log!("mklink", path.worker(), target);
path.worker().create_symlink_to(target)?;
if path.real().exists() {
clone_attr(path.real(), path.worker())?;
}
}
FsNode::MagiskLink => {
if let Some(name) = path.real().file_name()
&& name == "supolicy"
{
module_log!("mklink", path.worker(), "./magiskpolicy");
path.worker().create_symlink_to(cstr!("./magiskpolicy"))?;
} else {
module_log!("mklink", path.worker(), "./magisk");
path.worker().create_symlink_to(cstr!("./magisk"))?;
}
}
FsNode::Whiteout => {
module_log!("delete", path.real(), "null");
}
}
Ok(())
}
}
fn get_path_env() -> String {
std::env::var_os("PATH")
.and_then(|s| s.into_string().ok())
.unwrap_or_default()
}
fn inject_magisk_bins(system: &mut FsNode, is_emulator: bool) {
fn inject(children: &mut FsNodeMap) {
let mut path = cstr::buf::default().join_path(get_magisk_tmp());
// Inject binaries
let len = path.len();
path.append_path("magisk");
children.insert(
"magisk".to_string(),
FsNode::File {
src: path.to_owned(),
},
);
path.truncate(len);
path.append_path("magiskpolicy");
children.insert(
"magiskpolicy".to_string(),
FsNode::File {
src: path.to_owned(),
},
);
// Inject applet symlinks
children.insert("su".to_string(), FsNode::MagiskLink);
children.insert("resetprop".to_string(), FsNode::MagiskLink);
children.insert("supolicy".to_string(), FsNode::MagiskLink);
}
// Strip /system prefix to insert correct node
fn strip_system_prefix(orig_item: &str) -> String {
match orig_item.strip_prefix("/system/") {
Some(rest) => format!("/{rest}"),
None => orig_item.to_string(),
}
}
let path_env = get_path_env();
let mut candidates = vec![];
for orig_item in path_env.split(':') {
// Filter non-suitable paths
if !MAGISK_BIN_INJECT_PARTITIONS
.iter()
.any(|p| orig_item.starts_with(p.as_str()))
{
continue;
}
// Flatten apex path is not suitable too
if orig_item.starts_with("/system/apex/") {
continue;
}
// We want to keep /system/xbin/su on emulators (for debugging)
if is_emulator && orig_item.starts_with("/system/xbin") {
continue;
}
// Override existing su first
let su_path = Utf8CString::from(format!("{orig_item}/su"));
if su_path.exists() {
let item = strip_system_prefix(orig_item);
candidates.push((item, 0));
break;
}
let path = Utf8CString::from(orig_item);
if let Ok(attr) = path.get_attr()
&& (attr.st.st_mode & 0x0001) != 0
&& let Ok(mut dir) = Directory::open(&path)
{
let mut count = 0;
if dir
.pre_order_walk(|e| {
if e.is_file() {
count += 1;
}
Ok(WalkResult::Continue)
})
.is_err()
{
// Skip, we cannot ensure the result is correct
continue;
}
let item = strip_system_prefix(orig_item);
candidates.push((item, count));
}
}
// Sort by amount of files
candidates.sort_by_key(|&(_, count)| count);
'path_loop: for candidate in candidates {
let components = Path::new(&candidate.0)
.components()
.filter(|c| matches!(c, Component::Normal(_)))
.filter_map(|c| c.as_os_str().to_str());
let mut curr = match system {
FsNode::Directory { children } => children,
_ => continue,
};
for dir in components {
let node = curr.entry(dir.to_owned()).or_insert_with(FsNode::new_dir);
match node {
FsNode::Directory { children } => curr = children,
_ => continue 'path_loop,
}
}
// Found a suitable path, done
inject(curr);
return;
}
// If still not found, directly inject into /system/bin
let node = system
.children()
.map(|c| c.entry("bin".to_string()).or_insert_with(FsNode::new_dir));
if let Some(FsNode::Directory { children }) = node {
inject(children)
}
}
fn inject_zygisk_bins(name: &str, system: &mut FsNode) {
#[cfg(target_pointer_width = "64")]
let has_32_bit = cstr!("/system/bin/linker").exists();
#[cfg(target_pointer_width = "32")]
let has_32_bit = true;
if has_32_bit {
let lib = system
.children()
.map(|c| c.entry("lib".to_string()).or_insert_with(FsNode::new_dir));
if let Some(FsNode::Directory { children }) = lib {
let mut bin_path = cstr::buf::default().join_path(get_magisk_tmp());
#[cfg(target_pointer_width = "64")]
bin_path.append_path("magisk32");
#[cfg(target_pointer_width = "32")]
bin_path.append_path("magisk");
// There are some devices that announce ABI as 64 bit only, but ship with linker
// because they make use of a special 32 bit to 64 bit translator (such as tango).
// In this case, magisk32 does not exist, so inserting it will cause bind mount
// failure and affect module mount. Native bridge injection does not support these
// kind of translators anyway, so simply check if magisk32 exists here.
if bin_path.exists() {
children.insert(
name.to_string(),
FsNode::File {
src: bin_path.to_owned(),
},
);
}
}
}
#[cfg(target_pointer_width = "64")]
if cstr!("/system/bin/linker64").exists() {
let lib64 = system
.children()
.map(|c| c.entry("lib64".to_string()).or_insert_with(FsNode::new_dir));
if let Some(FsNode::Directory { children }) = lib64 {
let bin_path = cstr::buf::default()
.join_path(get_magisk_tmp())
.join_path("magisk");
children.insert(
name.to_string(),
FsNode::File {
src: bin_path.to_owned(),
},
);
}
}
}
fn upgrade_modules() -> LoggedResult<()> {
let mut upgrade = Directory::open(cstr!(MODULEUPGRADE)).silent()?;
let root = Directory::open(cstr!(MODULEROOT))?;
while let Some(e) = upgrade.read()? {
if !e.is_dir() {
continue;
}
let module_name = e.name();
let mut disable = false;
// Cleanup old module if exists
if root.contains_path(module_name) {
let module = root.open_as_dir_at(module_name)?;
// If the old module is disabled, we need to also disable the new one
disable = module.contains_path(cstr!("disable"));
module.remove_all()?;
root.unlink_at(module_name, UnlinkatFlags::RemoveDir)?;
}
info!("Upgrade / New module: {module_name}");
e.rename_to(&root, module_name)?;
if disable {
let path = cstr::buf::default()
.join_path(module_name)
.join_path("disable");
let _ = root.open_as_file_at(
&path,
OFlag::O_RDONLY | OFlag::O_CREAT | OFlag::O_CLOEXEC,
0,
)?;
}
}
upgrade.remove_all()?;
cstr!(MODULEUPGRADE).remove()?;
Ok(())
}
fn for_each_module(mut func: impl FnMut(&DirEntry) -> LoggedResult<()>) -> LoggedResult<()> {
let mut root = Directory::open(cstr!(MODULEROOT))?;
while let Some(ref e) = root.read()? {
if e.is_dir() && e.name() != ".core" {
func(e)?;
}
}
Ok(())
}
pub fn disable_modules() {
for_each_module(|e| {
let dir = e.open_as_dir()?;
dir.open_as_file_at(
cstr!("disable"),
OFlag::O_RDONLY | OFlag::O_CREAT | OFlag::O_CLOEXEC,
0,
)?;
Ok(())
})
.log_ok();
}
fn run_uninstall_script(module_name: &Utf8CStr) {
let script = cstr::buf::default()
.join_path(MODULEROOT)
.join_path(module_name)
.join_path("uninstall.sh");
exec_script(&script);
}
pub fn remove_modules() {
for_each_module(|e| {
let dir = e.open_as_dir()?;
if dir.contains_path(cstr!("uninstall.sh")) {
run_uninstall_script(e.name());
}
Ok(())
})
.log_ok();
cstr!(MODULEROOT).remove_all().log_ok();
}
fn collect_modules(zygisk_enabled: bool, open_zygisk: bool) -> Vec<ModuleInfo> {
let mut modules = Vec::new();
#[allow(unused_mut)] // It's possible that z32 and z64 are unused
for_each_module(|e| {
let name = e.name();
let dir = e.open_as_dir()?;
if dir.contains_path(cstr!("remove")) {
info!("{name}: remove");
if dir.contains_path(cstr!("uninstall.sh")) {
run_uninstall_script(name);
}
dir.remove_all()?;
e.unlink()?;
return Ok(());
}
dir.unlink_at(cstr!("update"), UnlinkatFlags::NoRemoveDir)
.ok();
if dir.contains_path(cstr!("disable")) {
return Ok(());
}
let mut z32 = -1;
let mut z64 = -1;
let is_zygisk = dir.contains_path(cstr!("zygisk"));
if zygisk_enabled {
// Riru and its modules are not compatible with zygisk
if name == "riru-core" || dir.contains_path(cstr!("riru")) {
return Ok(());
}
fn open_fd_safe(dir: &Directory, name: &Utf8CStr) -> i32 {
dir.open_as_file_at(name, OFlag::O_RDONLY | OFlag::O_CLOEXEC, 0)
.log()
.map(IntoRawFd::into_raw_fd)
.unwrap_or(-1)
}
if open_zygisk && is_zygisk {
#[cfg(target_arch = "arm")]
{
z32 = open_fd_safe(&dir, cstr!("zygisk/armeabi-v7a.so"));
}
#[cfg(target_arch = "aarch64")]
{
z32 = open_fd_safe(&dir, cstr!("zygisk/armeabi-v7a.so"));
z64 = open_fd_safe(&dir, cstr!("zygisk/arm64-v8a.so"));
}
#[cfg(target_arch = "x86")]
{
z32 = open_fd_safe(&dir, cstr!("zygisk/x86.so"));
}
#[cfg(target_arch = "x86_64")]
{
z32 = open_fd_safe(&dir, cstr!("zygisk/x86.so"));
z64 = open_fd_safe(&dir, cstr!("zygisk/x86_64.so"));
}
#[cfg(target_arch = "riscv64")]
{
z64 = open_fd_safe(&dir, cstr!("zygisk/riscv64.so"));
}
dir.unlink_at(cstr!("zygisk/unloaded"), UnlinkatFlags::NoRemoveDir)
.ok();
}
} else {
// Ignore zygisk modules when zygisk is not enabled
if is_zygisk {
info!("{name}: ignore");
return Ok(());
}
}
modules.push(ModuleInfo {
name: name.to_string(),
z32,
z64,
});
Ok(())
})
.log_ok();
if zygisk_enabled && open_zygisk {
let mut use_memfd = true;
let mut convert_to_memfd = |fd: i32| -> i32 {
if fd < 0 {
return fd;
}
if use_memfd {
let memfd = unsafe {
libc::syscall(
libc::SYS_memfd_create,
raw_cstr!("jit-cache"),
libc::MFD_CLOEXEC,
) as i32
};
if memfd >= 0 {
unsafe {
if libc::sendfile(memfd, fd, ptr::null_mut(), i32::MAX as usize) < 0 {
libc::close(memfd);
} else {
libc::close(fd);
return memfd;
}
}
}
// Some error occurred, don't try again
use_memfd = false;
}
fd
};
modules.iter_mut().for_each(|m| {
m.z32 = convert_to_memfd(m.z32);
m.z64 = convert_to_memfd(m.z64);
});
}
modules
}
impl MagiskD {
pub fn handle_modules(&self) {
setup_module_mount();
upgrade_modules().ok();
let zygisk = self.zygisk_enabled.load(Ordering::Acquire);
let modules = collect_modules(zygisk, false);
exec_module_scripts(cstr!("post-fs-data"), &modules);
// Recollect modules (module scripts could remove itself)
let modules = collect_modules(zygisk, true);
self.apply_modules(&modules);
self.module_list.set(modules).ok();
}
fn apply_modules(&self, module_list: &[ModuleInfo]) {
let mut system = FsNode::new_dir();
// Create buffers for paths
let mut buf1 = cstr::buf::dynamic(256);
let mut buf2 = cstr::buf::dynamic(256);
let mut buf3 = cstr::buf::dynamic(256);
let mut paths = ModulePaths::new(&mut buf1, &mut buf2, &mut buf3);
// Step 1: Create virtual filesystem tree
//
// In this step, there is zero logic applied during tree construction; we simply collect and
// record the union of all module filesystem trees under each of their /system directory.
for info in module_list {
let mut paths = paths.set_module(&info.name);
// Read props
let prop = paths.append("system.prop");
if prop.module().exists() {
load_prop_file(prop.module());
}
drop(prop);
// Check whether skip mounting
let skip = paths.append("skip_mount");
if skip.module().exists() {
continue;
}
drop(skip);
// Double check whether the system folder exists
let sys = paths.append("system");
if sys.module().exists() {
info!("{}: loading module files", &info.name);
system.collect(sys).log_ok();
}
}
// Step 2: Inject custom files
//
// Magisk provides some built-in functionality that requires augmenting the filesystem.
// We expose several cmdline tools (e.g. su) into PATH, and the zygisk shared library
// has to also be added into the default LD_LIBRARY_PATH for code injection.
// We directly inject file nodes into the virtual filesystem tree we built in the previous
// step, treating Magisk just like a special "module".
if get_magisk_tmp() != "/sbin" || get_path_env().split(":").all(|s| s != "/sbin") {
inject_magisk_bins(&mut system, self.is_emulator);
}
// Handle zygisk
if self.zygisk_enabled.load(Ordering::Acquire) {
let mut zygisk = self.zygisk.lock().unwrap();
zygisk.set_prop();
inject_zygisk_bins(&zygisk.lib_name, &mut system);
}
// Step 3: Extract all supported read-only partition roots
//
// For simplicity and backwards compatibility on older Android versions, when constructing
// Magisk modules, we always assume that there is only a single read-only partition mounted
// at /system. However, on modern Android there are actually multiple read-only partitions
// mounted at their respective paths. We need to extract these subtrees out of the main
// tree and treat them as individual trees.
let mut roots = BTreeMap::new(); /* mapOf(partition_name -> FsNode) */
if let FsNode::Directory { children } = &mut system {
for dir in SECONDARY_READ_ONLY_PARTITIONS {
// Only treat these nodes as root iff it is actually a directory in rootdir
if let Ok(attr) = dir.get_attr()
&& attr.is_dir()
{
let name = dir.trim_start_matches('/');
if let Some(root) = children.remove(name) {
roots.insert(name, root);
}
}
}
}
roots.insert("system", system);
drop(paths);
let mut paths = MountPaths::new(&mut buf1, &mut buf2);
for (dir, mut root) in roots {
// Step 4: Convert virtual filesystem tree into concrete operations
//
// Compare the virtual filesystem tree we constructed against the real filesystem
// structure on-device to generate a series of "operations".
// The "core" of the logic is to decide which directories need to be rebuilt in the
// tmpfs worker directory, and real sub-nodes need to be mirrored inside it.
let paths = paths.append(dir);
root.commit(paths, true).log_ok();
}
}
}

251
native/src/core/mount.rs Normal file
View file

@ -0,0 +1,251 @@
use crate::consts::{MODULEMNT, MODULEROOT, PREINITDEV, PREINITMIRR, WORKERDIR};
use crate::ffi::{get_magisk_tmp, resolve_preinit_dir, switch_mnt_ns};
use crate::resetprop::get_prop;
use base::{
FsPathBuilder, LibcReturn, LoggedResult, MountInfo, ResultExt, Utf8CStr, Utf8CStrBuf, cstr,
debug, info, libc, parse_mount_info, warn,
};
use libc::{c_uint, dev_t};
use nix::mount::MsFlags;
use nix::sys::stat::{Mode, SFlag, mknod};
use num_traits::AsPrimitive;
use std::cmp::Ordering::{Greater, Less};
use std::path::{Path, PathBuf};
pub fn setup_preinit_dir() {
let magisk_tmp = get_magisk_tmp();
// Mount preinit directory
let dev_path = cstr::buf::new::<64>()
.join_path(magisk_tmp)
.join_path(PREINITDEV);
if let Ok(attr) = dev_path.get_attr()
&& attr.st.st_mode & libc::S_IFMT as c_uint == libc::S_IFBLK.as_()
{
// DO NOT mount the block device directly, as we do not know the flags and configs
// to properly mount the partition; mounting block devices directly as rw could cause
// crashes if the filesystem driver is crap (e.g. some broken F2FS drivers).
// What we do instead is to scan through the current mountinfo and find a pre-existing
// mount point mounting our desired partition, and then bind mount the target folder.
let preinit_dev = attr.st.st_rdev;
let mnt_path = cstr::buf::default()
.join_path(magisk_tmp)
.join_path(PREINITMIRR);
for info in parse_mount_info("self") {
if info.root == "/" && info.device == preinit_dev {
if !info.fs_option.split(',').any(|s| s == "rw") {
// Only care about rw mounts
continue;
}
let mut target = info.target;
let target = Utf8CStr::from_string(&mut target);
let mut preinit_dir = resolve_preinit_dir(target);
let preinit_dir = Utf8CStr::from_string(&mut preinit_dir);
let r: LoggedResult<()> = try {
preinit_dir.mkdir(0o700)?;
mnt_path.mkdirs(0o755)?;
mnt_path.remove().ok();
mnt_path.create_symlink_to(preinit_dir)?;
};
if r.is_ok() {
info!("* Found preinit dir: {}", preinit_dir);
return;
}
}
}
}
warn!("mount: preinit dir not found");
}
pub fn setup_module_mount() {
// Bind remount module root to clear nosuid
let module_mnt = cstr::buf::default()
.join_path(get_magisk_tmp())
.join_path(MODULEMNT);
let _: LoggedResult<()> = try {
module_mnt.mkdir(0o755)?;
cstr!(MODULEROOT).bind_mount_to(&module_mnt, false)?;
module_mnt.remount_mount_point_flags(MsFlags::MS_RDONLY)?;
};
}
pub fn clean_mounts() {
let magisk_tmp = get_magisk_tmp();
let mut buf = cstr::buf::default();
let module_mnt = buf.append_path(magisk_tmp).append_path(MODULEMNT);
module_mnt.unmount().log_ok();
buf.clear();
let worker_dir = buf.append_path(magisk_tmp).append_path(WORKERDIR);
let _: LoggedResult<()> = try {
worker_dir.set_mount_private(true)?;
worker_dir.unmount()?;
};
}
// when partitions have the same fs type, the order is:
// - data: it has sufficient space and can be safely written
// - cache: size is limited, but still can be safely written
// - metadata: size is limited, and it might cause unexpected behavior if written
// - persist: it's the last resort, as it's dangerous to write to it
#[derive(PartialEq, Eq, PartialOrd, Ord)]
enum PartId {
Data,
Cache,
Metadata,
Persist,
}
enum EncryptType {
None,
Block,
File,
Metadata,
}
pub fn find_preinit_device() -> String {
let encrypt_type = if get_prop(cstr!("ro.crypto.state")) != "encrypted" {
EncryptType::None
} else if get_prop(cstr!("ro.crypto.type")) == "block" {
EncryptType::Block
} else if get_prop(cstr!("ro.crypto.metadata.enabled")) == "true" {
EncryptType::Metadata
} else {
EncryptType::File
};
let mut matched_info = parse_mount_info("self")
.into_iter()
.filter_map(|info| {
if info.root != "/" || !info.source.starts_with('/') || info.source.contains("/dm-") {
return None;
}
match info.fs_type.as_str() {
"ext4" | "f2fs" => (),
_ => return None,
}
if !info.fs_option.split(',').any(|s| s == "rw") {
return None;
}
if let Some(path) = Path::new(&info.source).parent() {
if !path.ends_with("by-name") && !path.ends_with("block") {
return None;
}
} else {
return None;
}
// take data iff it's not encrypted or file-based encrypted without metadata
// other partitions are always taken
match info.target.as_str() {
"/persist" | "/mnt/vendor/persist" => Some((PartId::Persist, info)),
"/metadata" => Some((PartId::Metadata, info)),
"/cache" => Some((PartId::Cache, info)),
"/data" => Some((PartId::Data, info))
.take_if(|_| matches!(encrypt_type, EncryptType::None | EncryptType::File)),
_ => None,
}
})
.collect::<Vec<_>>();
if matched_info.is_empty() {
return String::new();
}
let (_, preinit_info, _) = matched_info.select_nth_unstable_by(
0,
|(ap, MountInfo { fs_type: at, .. }), (bp, MountInfo { fs_type: bt, .. })| match (
ap,
bp,
at.as_str() == "ext4",
bt.as_str() == "ext4",
) {
// metadata is not affected by f2fs kernel bug
(PartId::Metadata, _, _, true) | (_, PartId::Metadata, true, _) => ap.cmp(bp),
// otherwise, take ext4 f2fs because f2fs has a kernel bug that causes kernel panic
(_, _, true, false) => Less,
(_, _, false, true) => Greater,
// if both has the same fs type, compare the mount point
_ => ap.cmp(bp),
},
);
let info = &preinit_info.1;
let mut target = info.target.clone();
let mut preinit_dir = resolve_preinit_dir(Utf8CStr::from_string(&mut target));
if unsafe { libc::getuid() } == 0
&& let Ok(tmp) = std::env::var("MAGISKTMP")
&& !tmp.is_empty()
{
let mut buf = cstr::buf::default();
let mirror_dir = buf.append_path(&tmp).append_path(PREINITMIRR);
let preinit_dir = Utf8CStr::from_string(&mut preinit_dir);
let _: LoggedResult<()> = try {
preinit_dir.mkdirs(0o700)?;
mirror_dir.mkdirs(0o755)?;
mirror_dir.unmount().ok();
mirror_dir.remove().ok();
mirror_dir.create_symlink_to(preinit_dir)?;
};
if std::env::var_os("MAKEDEV").is_some() {
buf.clear();
let dev_path = buf.append_path(&tmp).append_path(PREINITDEV);
mknod(
dev_path.as_utf8_cstr(),
SFlag::S_IFBLK,
Mode::from_bits_truncate(0o600),
info.device as dev_t,
)
.check_os_err("mknod", Some(dev_path), None)
.log_ok();
}
}
Path::new(&info.source)
.file_name()
.unwrap()
.to_str()
.unwrap()
.to_string()
}
pub fn revert_unmount(pid: i32) {
if pid > 0 {
if switch_mnt_ns(pid) != 0 {
return;
}
debug!("denylist: handling PID=[{}]", pid);
}
let mut targets = Vec::new();
// Unmount Magisk tmpfs and mounts from module files
for info in parse_mount_info("self") {
if info.source == "magisk" || info.root.starts_with("/adb/modules") {
targets.push(info.target);
}
}
if targets.is_empty() {
return;
}
let mut prev: Option<PathBuf> = None;
targets.sort();
targets.retain(|target| {
if let Some(prev) = &prev
&& Path::new(target).starts_with(prev)
{
return false;
}
prev = Some(PathBuf::from(target.clone()));
true
});
for mut target in targets {
let target = Utf8CStr::from_string(&mut target);
if target.unmount().is_ok() {
debug!("denylist: Unmounted ({})", target);
}
}
}

513
native/src/core/package.rs Normal file
View file

@ -0,0 +1,513 @@
use crate::consts::{APP_PACKAGE_NAME, MAGISK_VER_CODE};
use crate::daemon::{AID_APP_END, AID_APP_START, AID_USER_OFFSET, MagiskD, to_app_id};
use crate::ffi::{DbEntryKey, get_magisk_tmp, install_apk, uninstall_pkg};
use base::WalkResult::{Abort, Continue, Skip};
use base::{
BufReadExt, Directory, FsPathBuilder, LoggedResult, ReadExt, ResultExt, Utf8CStrBuf,
Utf8CString, cstr, error, fd_get_attr, warn,
};
use bit_set::BitSet;
use nix::fcntl::OFlag;
use std::collections::BTreeMap;
use std::fs::File;
use std::io;
use std::io::{Cursor, Read, Seek, SeekFrom};
use std::os::fd::AsRawFd;
use std::time::Duration;
const EOCD_MAGIC: u32 = 0x06054B50;
const APK_SIGNING_BLOCK_MAGIC: [u8; 16] = *b"APK Sig Block 42";
const SIGNATURE_SCHEME_V2_MAGIC: u32 = 0x7109871A;
const PACKAGES_XML: &str = "/data/system/packages.xml";
macro_rules! bad_apk {
($msg:literal) => {
io::Error::new(io::ErrorKind::InvalidData, concat!("cert: ", $msg))
};
}
/*
* A v2/v3 signed APK has the format as following
*
* +---------------+
* | zip content |
* +---------------+
* | signing block |
* +---------------+
* | central dir |
* +---------------+
* | EOCD |
* +---------------+
*
* Scan from end of file to find EOCD, and figure our way back to the
* offset of the signing block. Next, directly extract the certificate
* from the v2 signature block.
*
* All structures above are mostly just for documentation purpose.
*
* This method extracts the first certificate of the first signer
* within the APK v2 signature block.
*/
fn read_certificate(apk: &mut File, version: i32) -> Vec<u8> {
let res: io::Result<Vec<u8>> = try {
let mut u32_val = 0u32;
let mut u64_val = 0u64;
// Find EOCD
for i in 0u16.. {
let mut comment_sz = 0u16;
apk.seek(SeekFrom::End(-(size_of_val(&comment_sz) as i64) - i as i64))?;
apk.read_pod(&mut comment_sz)?;
if comment_sz == i {
apk.seek(SeekFrom::Current(-22))?;
let mut magic = 0u32;
apk.read_pod(&mut magic)?;
if magic == EOCD_MAGIC {
break;
}
}
if i == 0xffff {
Err(bad_apk!("invalid APK format"))?;
}
}
// We are now at EOCD + sizeof(magic)
// Seek and read central_dir_off to find the start of the central directory
let mut central_dir_off = 0u32;
apk.seek(SeekFrom::Current(12))?;
apk.read_pod(&mut central_dir_off)?;
// Code for parse APK comment to get version code
if version >= 0 {
let mut comment_sz = 0u16;
apk.read_pod(&mut comment_sz)?;
let mut comment = vec![0u8; comment_sz as usize];
apk.read_exact(&mut comment)?;
let mut comment = Cursor::new(&comment);
let mut apk_ver = 0;
comment.for_each_prop(|k, v| {
if k == "versionCode" {
apk_ver = v.parse::<i32>().unwrap_or(0);
false
} else {
true
}
});
if version > apk_ver {
Err(bad_apk!("APK version too low"))?;
}
}
// Next, find the start of the APK signing block
apk.seek(SeekFrom::Start((central_dir_off - 24) as u64))?;
apk.read_pod(&mut u64_val)?; // u64_value = block_sz_
let mut magic = [0u8; 16];
apk.read_exact(&mut magic)?;
if magic != APK_SIGNING_BLOCK_MAGIC {
Err(bad_apk!("invalid signing block magic"))?;
}
let mut signing_blk_sz = 0u64;
apk.seek(SeekFrom::Current(
-(u64_val as i64) - (size_of_val(&signing_blk_sz) as i64),
))?;
apk.read_pod(&mut signing_blk_sz)?;
if signing_blk_sz != u64_val {
Err(bad_apk!("invalid signing block size"))?;
}
// Finally, we are now at the beginning of the id-value pair sequence
loop {
apk.read_pod(&mut u64_val)?; // id-value pair length
if u64_val == signing_blk_sz {
Err(bad_apk!("cannot find certificate"))?;
}
let mut id = 0u32;
apk.read_pod(&mut id)?;
if id == SIGNATURE_SCHEME_V2_MAGIC {
// Skip [signer sequence length] + [1st signer length] + [signed data length]
apk.seek(SeekFrom::Current((size_of_val(&u32_val) * 3) as i64))?;
apk.read_pod(&mut u32_val)?; // digest sequence length
apk.seek(SeekFrom::Current(u32_val as i64))?; // skip all digests
apk.seek(SeekFrom::Current(size_of_val(&u32_val) as i64))?; // cert sequence length
apk.read_pod(&mut u32_val)?; // 1st cert length
let mut cert = vec![0; u32_val as usize];
apk.read_exact(cert.as_mut())?;
break cert;
} else {
// Skip this id-value pair
apk.seek(SeekFrom::Current(
u64_val as i64 - (size_of_val(&id) as i64),
))?;
}
}
};
res.log().unwrap_or(vec![])
}
fn find_apk_path(pkg: &str) -> LoggedResult<Utf8CString> {
let mut buf = cstr::buf::default();
Directory::open(cstr!("/data/app"))?.pre_order_walk(|e| {
if !e.is_dir() {
return Ok(Skip);
}
let name_bytes = e.name().as_bytes();
if name_bytes.starts_with(pkg.as_bytes()) && name_bytes[pkg.len()] == b'-' {
// Found the APK path, we can abort now
e.resolve_path(&mut buf)?;
return Ok(Abort);
}
if name_bytes.starts_with(b"~~") {
return Ok(Continue);
}
Ok(Skip)
})?;
if !buf.is_empty() {
buf.push_str("/base.apk");
}
Ok(buf.to_owned())
}
enum Status {
Installed,
NotInstalled,
CertMismatch,
}
pub struct ManagerInfo {
stub_apk_fd: Option<File>,
trusted_cert: Vec<u8>,
repackaged_app_id: i32,
repackaged_pkg: String,
repackaged_cert: Vec<u8>,
tracked_files: BTreeMap<i32, TrackedFile>,
}
impl Default for ManagerInfo {
fn default() -> Self {
ManagerInfo {
stub_apk_fd: None,
trusted_cert: Vec::new(),
repackaged_app_id: -1,
repackaged_pkg: String::new(),
repackaged_cert: Vec::new(),
tracked_files: BTreeMap::new(),
}
}
}
#[derive(Default)]
struct TrackedFile {
path: Utf8CString,
timestamp: Duration,
}
impl TrackedFile {
fn new(path: Utf8CString) -> TrackedFile {
let attr = match path.get_attr() {
Ok(attr) => attr,
Err(_) => return TrackedFile::default(),
};
let timestamp = Duration::new(attr.st.st_ctime as u64, attr.st.st_ctime_nsec as u32);
TrackedFile { path, timestamp }
}
fn is_same(&self) -> bool {
if self.path.is_empty() {
return false;
}
let attr = match self.path.get_attr() {
Ok(attr) => attr,
Err(_) => return false,
};
let timestamp = Duration::new(attr.st.st_ctime as u64, attr.st.st_ctime_nsec as u32);
timestamp == self.timestamp
}
}
impl ManagerInfo {
fn check_dyn(&mut self, daemon: &MagiskD, user: i32, pkg: &str) -> Status {
let apk = cstr::buf::default()
.join_path(daemon.app_data_dir())
.join_path_fmt(user)
.join_path(pkg)
.join_path("dyn")
.join_path("current.apk");
let uid: i32;
let cert = match apk.open(OFlag::O_RDONLY | OFlag::O_CLOEXEC) {
Ok(mut fd) => {
uid = fd_get_attr(fd.as_raw_fd())
.map(|attr| attr.st.st_uid as i32)
.unwrap_or(-1);
read_certificate(&mut fd, MAGISK_VER_CODE)
}
Err(_) => {
warn!("pkg: no dyn APK, ignore");
return Status::NotInstalled;
}
};
if cert.is_empty() || cert != self.trusted_cert {
error!("pkg: dyn APK signature mismatch: {}", apk);
#[cfg(all(feature = "check-signature", not(debug_assertions)))]
{
return Status::CertMismatch;
}
}
self.repackaged_app_id = to_app_id(uid);
self.tracked_files
.insert(user, TrackedFile::new(apk.to_owned()));
Status::Installed
}
fn check_stub(&mut self, user: i32, pkg: &str) -> Status {
let Ok(apk) = find_apk_path(pkg) else {
return Status::NotInstalled;
};
let cert = match apk.open(OFlag::O_RDONLY | OFlag::O_CLOEXEC) {
Ok(mut fd) => read_certificate(&mut fd, -1),
Err(_) => return Status::NotInstalled,
};
if cert.is_empty() || (pkg == self.repackaged_pkg && cert != self.repackaged_cert) {
error!("pkg: repackaged APK signature invalid: {}", apk);
uninstall_pkg(&apk);
return Status::CertMismatch;
}
self.repackaged_pkg.clear();
self.repackaged_pkg.push_str(pkg);
self.repackaged_cert = cert;
self.tracked_files.insert(user, TrackedFile::new(apk));
Status::Installed
}
fn check_orig(&mut self, user: i32) -> Status {
let Ok(apk) = find_apk_path(APP_PACKAGE_NAME) else {
return Status::NotInstalled;
};
let cert = match apk.open(OFlag::O_RDONLY | OFlag::O_CLOEXEC) {
Ok(mut fd) => read_certificate(&mut fd, MAGISK_VER_CODE),
Err(_) => return Status::NotInstalled,
};
if cert.is_empty() || cert != self.trusted_cert {
error!("pkg: APK signature mismatch: {}", apk);
#[cfg(all(feature = "check-signature", not(debug_assertions)))]
{
uninstall_pkg(cstr!(APP_PACKAGE_NAME));
return Status::CertMismatch;
}
}
self.tracked_files.insert(user, TrackedFile::new(apk));
Status::Installed
}
fn install_stub(&mut self) {
if let Some(ref mut stub_fd) = self.stub_apk_fd {
// Copy the stub APK
let tmp_apk = cstr!("/data/stub.apk");
let result: LoggedResult<()> = try {
{
let mut tmp_apk_file = tmp_apk.create(
OFlag::O_WRONLY | OFlag::O_CREAT | OFlag::O_TRUNC | OFlag::O_CLOEXEC,
0o600,
)?;
io::copy(stub_fd, &mut tmp_apk_file)?;
}
// Seek the fd back to start
stub_fd.seek(SeekFrom::Start(0))?;
};
if result.is_ok() {
install_apk(tmp_apk);
}
}
}
fn get_manager(&mut self, daemon: &MagiskD, user: i32, mut install: bool) -> (i32, &str) {
let db_pkg = daemon.get_db_string(DbEntryKey::SuManager);
// If database changed, always re-check files
if db_pkg != self.repackaged_pkg {
self.tracked_files.remove(&user);
}
if let Some(file) = self.tracked_files.get(&user)
&& file.is_same()
{
// no APK
if &file.path == PACKAGES_XML {
if install && !daemon.is_emulator {
self.install_stub();
}
return (-1, "");
}
// dyn APK is still the same
if file.path.starts_with(daemon.app_data_dir().as_str()) {
return (
user * AID_USER_OFFSET + self.repackaged_app_id,
&self.repackaged_pkg,
);
}
// stub APK is still the same
if !self.repackaged_pkg.is_empty() {
return if matches!(
self.check_dyn(daemon, user, self.repackaged_pkg.clone().as_str()),
Status::Installed
) {
(
user * AID_USER_OFFSET + self.repackaged_app_id,
&self.repackaged_pkg,
)
} else {
(-1, "")
};
}
// orig APK is still the same
let uid = daemon.get_package_uid(user, APP_PACKAGE_NAME);
return if uid < 0 {
(-1, "")
} else {
(uid, APP_PACKAGE_NAME)
};
}
if !db_pkg.is_empty() {
match self.check_stub(user, &db_pkg) {
Status::Installed => {
return if matches!(self.check_dyn(daemon, user, &db_pkg), Status::Installed) {
(
user * AID_USER_OFFSET + self.repackaged_app_id,
&self.repackaged_pkg,
)
} else {
(-1, "")
};
}
Status::NotInstalled => {
daemon.rm_db_string(DbEntryKey::SuManager).ok();
}
Status::CertMismatch => {
install = true;
daemon.rm_db_string(DbEntryKey::SuManager).ok();
}
}
}
self.repackaged_pkg.clear();
self.repackaged_cert.clear();
match self.check_orig(user) {
Status::Installed => {
let uid = daemon.get_package_uid(user, APP_PACKAGE_NAME);
return if uid < 0 {
(-1, "")
} else {
(uid, APP_PACKAGE_NAME)
};
}
Status::CertMismatch => install = true,
Status::NotInstalled => {}
}
// If we cannot find any manager, track packages.xml for new package installs
self.tracked_files
.insert(user, TrackedFile::new(PACKAGES_XML.into()));
if install && !daemon.is_emulator {
self.install_stub();
}
(-1, "")
}
}
impl MagiskD {
fn get_package_uid(&self, user: i32, pkg: &str) -> i32 {
let path = cstr::buf::default()
.join_path(self.app_data_dir())
.join_path_fmt(user)
.join_path(pkg);
path.get_attr()
.map(|attr| attr.st.st_uid as i32)
.unwrap_or(-1)
}
pub fn preserve_stub_apk(&self) {
let mut info = self.manager_info.lock().unwrap();
let apk = cstr::buf::default()
.join_path(get_magisk_tmp())
.join_path("stub.apk");
if let Ok(mut fd) = apk.open(OFlag::O_RDONLY | OFlag::O_CLOEXEC) {
info.trusted_cert = read_certificate(&mut fd, MAGISK_VER_CODE);
// Seek the fd back to start
fd.seek(SeekFrom::Start(0)).log_ok();
info.stub_apk_fd = Some(fd);
}
apk.remove().log_ok();
}
pub fn get_manager_uid(&self, user: i32) -> i32 {
let mut info = self.manager_info.lock().unwrap();
let (uid, _) = info.get_manager(self, user, false);
uid
}
pub fn get_manager(&self, user: i32, install: bool) -> (i32, String) {
let mut info = self.manager_info.lock().unwrap();
let (uid, pkg) = info.get_manager(self, user, install);
(uid, pkg.to_string())
}
pub fn ensure_manager(&self) {
let mut info = self.manager_info.lock().unwrap();
let _ = info.get_manager(self, 0, true);
}
// app_id = app_no + AID_APP_START
// app_no range: [0, 9999]
pub fn get_app_no_list(&self) -> BitSet {
let mut list = BitSet::new();
let _: LoggedResult<()> = try {
let mut app_data_dir = Directory::open(self.app_data_dir())?;
// For each user
loop {
let entry = match app_data_dir.read()? {
None => break,
Some(e) => e,
};
let mut user_dir = match entry.open_as_dir() {
Err(_) => continue,
Ok(dir) => dir,
};
// For each package
loop {
match user_dir.read()? {
None => break,
Some(e) => {
let mut entry_path = cstr::buf::default();
e.resolve_path(&mut entry_path)?;
let attr = entry_path.get_attr()?;
let app_id = to_app_id(attr.st.st_uid as i32);
if (AID_APP_START..=AID_APP_END).contains(&app_id) {
let app_no = app_id - AID_APP_START;
list.insert(app_no as usize);
}
}
}
}
}
};
list
}
}

2
native/src/core/resetprop/.gitignore vendored Normal file
View file

@ -0,0 +1,2 @@
proto/mod.rs
proto/persistent_properties.rs

View file

@ -0,0 +1,326 @@
use super::persist::{
persist_delete_prop, persist_get_all_props, persist_get_prop, persist_set_prop,
};
use super::{PropInfo, PropReader, SYS_PROP};
use argh::{EarlyExit, FromArgs, MissingRequirements};
use base::libc::PROP_VALUE_MAX;
use base::{
BufReadExt, CmdArgs, EarlyExitExt, LogLevel, LoggedResult, ResultExt, Utf8CStr, Utf8CStrBuf,
Utf8CString, argh, cstr, debug, log_err, set_log_level_state,
};
use nix::fcntl::OFlag;
use std::collections::BTreeMap;
use std::ffi::c_char;
use std::io::BufReader;
#[derive(FromArgs, Default)]
struct ResetProp {
#[argh(switch, short = 'v')]
verbose: bool,
#[argh(switch, short = 'w', long = none)]
wait_mode: bool,
#[argh(switch, short = 'p', long = none)]
persist: bool,
#[argh(switch, short = 'P', long = none)]
persist_only: bool,
#[argh(switch, short = 'Z', long = none)]
context: bool,
#[argh(switch, short = 'n', long = none)]
skip_svc: bool,
#[argh(option, short = 'f')]
file: Option<Utf8CString>,
#[argh(option, short = 'd', long = "delete")]
delete_key: Option<Utf8CString>,
#[argh(positional, greedy = true)]
args: Vec<Utf8CString>,
}
fn print_usage(cmd: &str) {
eprintln!(
r#"resetprop - System Property Manipulation Tool
Usage: {cmd} [flags] [arguments...]
Read mode arguments:
(no arguments) print all properties
NAME get property of NAME
Write mode arguments:
NAME VALUE set property NAME as VALUE
-f,--file FILE load and set properties from FILE
-d,--delete NAME delete property
Wait mode arguments (toggled with -w):
NAME wait until property NAME changes
NAME OLD_VALUE if value of property NAME is not OLD_VALUE, get value
or else wait until property NAME changes
General flags:
-h,--help show this message
-v,--verbose print verbose output to stderr
-w switch to wait mode
Read mode flags:
-p also read persistent properties from storage
-P only read persistent properties from storage
-Z get property context instead of value
Write mode flags:
-n set properties bypassing property_service
-p always write persistent prop changes to storage
"#
);
}
impl ResetProp {
fn get(&self, key: &Utf8CStr) -> Option<String> {
if self.context {
return Some(SYS_PROP.get_context(key).to_string());
}
let mut val = if !self.persist_only {
SYS_PROP.find(key).map(|info| {
let mut v = String::new();
info.read(&mut PropReader::Value(&mut v));
debug!("resetprop: get prop [{key}]=[{v}]");
v
})
} else {
None
};
if val.is_none() && (self.persist || self.persist_only) && key.starts_with("persist.") {
val = persist_get_prop(key).ok();
}
if val.is_none() {
debug!("resetprop: prop [{key}] does not exist");
}
val
}
fn print_all(&self) {
let mut map: BTreeMap<String, String> = BTreeMap::new();
if !self.persist_only {
SYS_PROP.for_each(&mut PropReader::List(&mut map));
}
if self.persist || self.persist_only {
persist_get_all_props(&mut PropReader::List(&mut map)).log_ok();
}
for (mut k, v) in map.into_iter() {
if self.context {
println!(
"[{k}]: [{}]",
SYS_PROP.get_context(Utf8CStr::from_string(&mut k))
);
} else {
println!("[{k}]: [{v}]");
}
}
}
fn set(&self, key: &Utf8CStr, val: &Utf8CStr) {
let mut skip_svc = self.skip_svc;
let mut info = SYS_PROP.find_mut(key);
// Delete existing read-only properties if they are or will be long properties,
// which cannot directly go through __system_property_update
if key.starts_with("ro.") {
skip_svc = true;
if let Some(pi) = &info
&& (pi.is_long() || val.len() >= PROP_VALUE_MAX as usize)
{
// Skip pruning nodes as we will add it back ASAP
SYS_PROP.delete(key, false);
info = None;
}
}
#[allow(unused_variables)]
let msg = if skip_svc {
"direct modification"
} else {
"property_service"
};
if let Some(pi) = info {
if skip_svc {
pi.update(val);
} else {
SYS_PROP.set(key, val);
}
debug!("resetprop: update prop [{key}]=[{val}] by {msg}");
} else {
if skip_svc {
SYS_PROP.add(key, val);
} else {
SYS_PROP.set(key, val);
}
debug!("resetprop: create prop [{key}]=[{val}] by {msg}");
}
// When bypassing property_service, persistent props won't be stored in storage.
// Explicitly handle this situation.
if skip_svc && self.persist && key.starts_with("persist.") {
persist_set_prop(key, val).log_ok();
}
}
fn delete(&self, key: &Utf8CStr) -> bool {
debug!("resetprop: delete prop [{key}]");
let mut ret = false;
ret |= SYS_PROP.delete(key, true);
if self.persist && key.starts_with("persist.") {
ret |= persist_delete_prop(key).is_ok()
}
ret
}
fn wait(&self) {
let key = &self.args[0];
let val = self.args.get(1).map(|s| &**s);
// Find PropInfo
let info: &PropInfo;
loop {
let i = SYS_PROP.find(key);
if let Some(i) = i {
info = i;
break;
} else {
debug!("resetprop: waiting for prop [{key}] to exist");
let mut serial = SYS_PROP.area_serial();
SYS_PROP.wait(None, serial, &mut serial);
}
}
if let Some(val) = val {
let mut curr_val = String::new();
let mut serial = 0;
loop {
let mut r = PropReader::ValueSerial(&mut curr_val, &mut serial);
SYS_PROP.read(info, &mut r);
if *val != *curr_val {
debug!("resetprop: get prop [{key}]=[{curr_val}]");
break;
}
debug!("resetprop: waiting for prop [{key}]!=[{val}]");
SYS_PROP.wait(Some(info), serial, &mut serial);
}
}
}
fn load_file(&self, file: &Utf8CStr) -> LoggedResult<()> {
let fd = file.open(OFlag::O_RDONLY | OFlag::O_CLOEXEC)?;
let mut key = cstr::buf::dynamic(128);
let mut val = cstr::buf::dynamic(128);
BufReader::new(fd).for_each_prop(|k, v| {
key.clear();
val.clear();
key.push_str(k);
val.push_str(v);
self.set(&key, &val);
true
});
Ok(())
}
fn run(self) -> LoggedResult<()> {
if self.wait_mode {
self.wait();
} else if let Some(file) = &self.file {
self.load_file(file)?;
} else if let Some(key) = &self.delete_key {
if !self.delete(key) {
return log_err!();
}
} else {
match self.args.len() {
0 => self.print_all(),
1 => {
if let Some(val) = self.get(&self.args[0]) {
println!("{val}");
} else {
return log_err!();
}
}
2 => self.set(&self.args[0], &self.args[1]),
_ => unreachable!(),
}
}
Ok(())
}
}
pub fn resetprop_main(argc: i32, argv: *mut *mut c_char) -> i32 {
set_log_level_state(LogLevel::Debug, false);
let cmds = CmdArgs::new(argc, argv.cast());
let cmds = cmds.as_slice();
let cli = ResetProp::from_args(&[cmds[0]], &cmds[1..])
.and_then(|cli| {
let mut special_mode = 0;
if cli.wait_mode {
if cli.args.is_empty() {
let mut missing = MissingRequirements::default();
missing.missing_positional_arg("NAME");
missing.err_on_any()?;
}
special_mode += 1;
}
if cli.file.is_some() {
special_mode += 1;
}
if cli.delete_key.is_some() {
special_mode += 1;
}
if special_mode > 1 {
return Err(EarlyExit::from(
"Multiple operation mode detected!\n".to_string(),
));
}
if cli.args.len() > 2 {
return Err(EarlyExit::from(format!(
"Unrecognized argument: {}\n",
cli.args[2]
)));
}
Ok(cli)
})
.on_early_exit(|| print_usage(cmds[0]));
if cli.verbose {
set_log_level_state(LogLevel::Debug, true);
}
if cli.run().is_ok() { 0 } else { 1 }
}
// Magisk's own helper functions
pub fn set_prop(key: &Utf8CStr, val: &Utf8CStr) {
let prop = ResetProp {
// All Magisk's internal usage should skip property_service
skip_svc: true,
..Default::default()
};
prop.set(key, val);
}
pub fn load_prop_file(file: &Utf8CStr) {
let prop = ResetProp {
// All Magisk's internal usage should skip property_service
skip_svc: true,
..Default::default()
};
prop.load_file(file).ok();
}
pub fn get_prop(key: &Utf8CStr) -> String {
let prop = ResetProp {
persist: key.starts_with("persist."),
..Default::default()
};
prop.get(key).unwrap_or_default()
}

View file

@ -0,0 +1,181 @@
use base::libc::c_char;
use base::{Utf8CStr, libc};
pub use cli::{get_prop, load_prop_file, resetprop_main, set_prop};
use libc::timespec;
use std::collections::BTreeMap;
use std::ffi::CStr;
use std::ptr;
use std::sync::LazyLock;
mod cli;
mod persist;
mod proto;
static SYS_PROP: LazyLock<SysProp> = LazyLock::new(|| unsafe { get_sys_prop() });
#[repr(C)]
struct PropInfo {
_private: cxx::private::Opaque,
}
type CharPtr = *const c_char;
type ReadCallback = unsafe extern "C" fn(&mut PropReader, CharPtr, CharPtr, u32);
type ForEachCallback = unsafe extern "C" fn(&PropInfo, &mut PropReader);
enum PropReader<'a> {
Value(&'a mut String),
ValueSerial(&'a mut String, &'a mut u32),
List(&'a mut BTreeMap<String, String>),
}
impl PropReader<'_> {
fn put_cstr(&mut self, key: CharPtr, val: CharPtr, serial: u32) {
let key = unsafe { CStr::from_ptr(key) };
let val = unsafe { CStr::from_ptr(val) };
match self {
PropReader::Value(v) => {
**v = String::from_utf8_lossy(val.to_bytes()).into_owned();
}
PropReader::ValueSerial(v, s) => {
**v = String::from_utf8_lossy(val.to_bytes()).into_owned();
**s = serial;
}
PropReader::List(map) => {
map.insert(
String::from_utf8_lossy(key.to_bytes()).into_owned(),
String::from_utf8_lossy(val.to_bytes()).into_owned(),
);
}
}
}
fn put_str(&mut self, key: String, val: String, serial: u32) {
match self {
PropReader::Value(v) => {
**v = val;
}
PropReader::ValueSerial(v, s) => {
**v = val;
**s = serial;
}
PropReader::List(map) => {
map.insert(key, val);
}
}
}
}
unsafe extern "C" {
// SAFETY: the improper_ctypes warning is about PropReader. We only pass PropReader
// to C functions as raw pointers, and all actual usage happens on the Rust side.
#[allow(improper_ctypes)]
fn get_sys_prop() -> SysProp;
fn prop_info_is_long(info: &PropInfo) -> bool;
#[link_name = "__system_property_find2"]
fn sys_prop_find(key: CharPtr) -> Option<&'static mut PropInfo>;
#[link_name = "__system_property_update2"]
fn sys_prop_update(info: &mut PropInfo, val: CharPtr, val_len: u32) -> i32;
#[link_name = "__system_property_add2"]
fn sys_prop_add(key: CharPtr, key_len: u32, val: CharPtr, val_len: u32) -> i32;
#[link_name = "__system_property_delete"]
fn sys_prop_delete(key: CharPtr, prune: bool) -> i32;
#[link_name = "__system_property_get_context"]
fn sys_prop_get_context(key: CharPtr) -> CharPtr;
#[link_name = "__system_property_area_serial2"]
fn sys_prop_area_serial() -> u32;
}
#[repr(C)]
struct SysProp {
set: unsafe extern "C" fn(CharPtr, CharPtr) -> i32,
find: unsafe extern "C" fn(CharPtr) -> Option<&'static PropInfo>,
read_callback: unsafe extern "C" fn(&PropInfo, ReadCallback, &mut PropReader) -> i32,
foreach: unsafe extern "C" fn(ForEachCallback, &mut PropReader) -> i32,
wait: unsafe extern "C" fn(Option<&PropInfo>, u32, &mut u32, *const timespec) -> i32,
}
// Safe abstractions over raw C APIs
impl PropInfo {
fn read(&self, reader: &mut PropReader) {
SYS_PROP.read(self, reader);
}
fn update(&mut self, val: &Utf8CStr) {
SYS_PROP.update(self, val);
}
fn is_long(&self) -> bool {
unsafe { prop_info_is_long(self) }
}
}
impl SysProp {
fn read(&self, info: &PropInfo, reader: &mut PropReader) {
unsafe extern "C" fn read_fn(r: &mut PropReader, key: CharPtr, val: CharPtr, serial: u32) {
r.put_cstr(key, val, serial);
}
unsafe {
(self.read_callback)(info, read_fn, reader);
}
}
fn find(&self, key: &Utf8CStr) -> Option<&'static PropInfo> {
unsafe { (self.find)(key.as_ptr()) }
}
fn find_mut(&self, key: &Utf8CStr) -> Option<&'static mut PropInfo> {
unsafe { sys_prop_find(key.as_ptr()) }
}
fn set(&self, key: &Utf8CStr, val: &Utf8CStr) {
unsafe {
(self.set)(key.as_ptr(), val.as_ptr());
}
}
fn add(&self, key: &Utf8CStr, val: &Utf8CStr) {
unsafe {
sys_prop_add(
key.as_ptr(),
key.len() as u32,
val.as_ptr(),
val.len() as u32,
);
}
}
fn update(&self, info: &mut PropInfo, val: &Utf8CStr) {
unsafe {
sys_prop_update(info, val.as_ptr(), val.len() as u32);
}
}
fn delete(&self, key: &Utf8CStr, prune: bool) -> bool {
unsafe { sys_prop_delete(key.as_ptr(), prune) == 0 }
}
fn for_each(&self, reader: &mut PropReader) {
unsafe extern "C" fn for_each_fn(info: &PropInfo, vals: &mut PropReader) {
SYS_PROP.read(info, vals);
}
unsafe {
(self.foreach)(for_each_fn, reader);
}
}
fn wait(&self, info: Option<&PropInfo>, old_serial: u32, new_serial: &mut u32) {
unsafe {
(self.wait)(info, old_serial, new_serial, ptr::null());
}
}
fn get_context(&self, key: &Utf8CStr) -> &'static Utf8CStr {
unsafe { Utf8CStr::from_ptr_unchecked(sys_prop_get_context(key.as_ptr())) }
}
fn area_serial(&self) -> u32 {
unsafe { sys_prop_area_serial() }
}
}

View file

@ -0,0 +1,182 @@
use nix::fcntl::OFlag;
use quick_protobuf::{BytesReader, MessageRead, MessageWrite, Writer};
use std::fs::File;
use std::io::{BufWriter, Read, Write};
use std::os::fd::FromRawFd;
use crate::resetprop::PropReader;
use crate::resetprop::proto::persistent_properties::PersistentProperties;
use crate::resetprop::proto::persistent_properties::mod_PersistentProperties::PersistentPropertyRecord;
use base::const_format::concatcp;
use base::libc::mkstemp;
use base::{
Directory, FsPathBuilder, LibcReturn, LoggedResult, MappedFile, SilentLogExt, Utf8CStr,
Utf8CStrBuf, WalkResult, clone_attr, cstr, debug, log_err,
};
const PERSIST_PROP_DIR: &str = "/data/property";
const PERSIST_PROP: &str = concatcp!(PERSIST_PROP_DIR, "/persistent_properties");
trait PropExt {
fn find_index(&self, name: &Utf8CStr) -> Result<usize, usize>;
fn find(self, name: &Utf8CStr) -> Option<PersistentPropertyRecord>;
}
impl PropExt for PersistentProperties {
fn find_index(&self, name: &Utf8CStr) -> Result<usize, usize> {
self.properties
.binary_search_by(|p| p.name.as_deref().cmp(&Some(name.as_str())))
}
fn find(self, name: &Utf8CStr) -> Option<PersistentPropertyRecord> {
let idx = self.find_index(name).ok()?;
self.properties.into_iter().nth(idx)
}
}
fn check_proto() -> bool {
cstr!(PERSIST_PROP).exists()
}
fn file_get_prop(name: &Utf8CStr) -> LoggedResult<String> {
let path = cstr::buf::default()
.join_path(PERSIST_PROP_DIR)
.join_path(name);
let mut file = path.open(OFlag::O_RDONLY | OFlag::O_CLOEXEC).silent()?;
debug!("resetprop: read prop from [{}]", path);
let mut s = String::new();
file.read_to_string(&mut s)?;
Ok(s)
}
fn file_set_prop(name: &Utf8CStr, value: Option<&Utf8CStr>) -> LoggedResult<()> {
let path = cstr::buf::default()
.join_path(PERSIST_PROP_DIR)
.join_path(name);
if let Some(value) = value {
let mut tmp = cstr::buf::default()
.join_path(PERSIST_PROP_DIR)
.join_path("prop.XXXXXX");
{
let mut f = unsafe {
mkstemp(tmp.as_mut_ptr())
.into_os_result("mkstemp", None, None)
.map(|fd| File::from_raw_fd(fd))?
};
f.write_all(value.as_bytes())?;
}
debug!("resetprop: write prop to [{}]", tmp);
tmp.rename_to(&path)?
} else {
path.remove().silent()?;
debug!("resetprop: unlink [{}]", path);
}
Ok(())
}
fn proto_read_props() -> LoggedResult<PersistentProperties> {
debug!("resetprop: decode with protobuf [{}]", PERSIST_PROP);
let m = MappedFile::open(cstr!(PERSIST_PROP))?;
let m = m.as_ref();
let mut r = BytesReader::from_bytes(m);
let mut props = PersistentProperties::from_reader(&mut r, m)?;
// Keep the list sorted for binary search
props
.properties
.sort_unstable_by(|a, b| a.name.cmp(&b.name));
Ok(props)
}
fn proto_write_props(props: &PersistentProperties) -> LoggedResult<()> {
let mut tmp = cstr::buf::default().join_path(concatcp!(PERSIST_PROP, ".XXXXXX"));
{
let f = unsafe {
mkstemp(tmp.as_mut_ptr())
.into_os_result("mkstemp", None, None)
.map(|fd| File::from_raw_fd(fd))?
};
debug!("resetprop: encode with protobuf [{}]", tmp);
props.write_message(&mut Writer::new(BufWriter::new(f)))?;
}
clone_attr(cstr!(PERSIST_PROP), &tmp)?;
tmp.rename_to(cstr!(PERSIST_PROP))?;
Ok(())
}
pub(super) fn persist_get_prop(key: &Utf8CStr) -> LoggedResult<String> {
if check_proto() {
let props = proto_read_props()?;
let prop = props.find(key).silent()?;
if let PersistentPropertyRecord {
name: Some(_),
value: Some(v),
} = prop
{
return Ok(v);
}
} else {
let value = file_get_prop(key)?;
debug!("resetprop: get persist prop [{}]=[{}]", key, value);
return Ok(value);
}
log_err!()
}
pub(super) fn persist_get_all_props(reader: &mut PropReader) -> LoggedResult<()> {
if check_proto() {
let props = proto_read_props()?;
props.properties.into_iter().for_each(|prop| {
if let PersistentPropertyRecord {
name: Some(n),
value: Some(v),
} = prop
{
reader.put_str(n, v, 0);
}
});
} else {
let mut dir = Directory::open(cstr!(PERSIST_PROP_DIR))?;
dir.pre_order_walk(|e| {
if e.is_file()
&& let Ok(value) = file_get_prop(e.name())
{
reader.put_str(e.name().to_string(), value, 0);
}
// Do not traverse recursively
Ok(WalkResult::Skip)
})?;
}
Ok(())
}
pub(super) fn persist_delete_prop(key: &Utf8CStr) -> LoggedResult<()> {
if check_proto() {
let mut props = proto_read_props()?;
let idx = props.find_index(key).silent()?;
props.properties.remove(idx);
proto_write_props(&props)?;
} else {
file_set_prop(key, None)?;
}
Ok(())
}
pub(super) fn persist_set_prop(key: &Utf8CStr, val: &Utf8CStr) -> LoggedResult<()> {
if check_proto() {
let mut props = proto_read_props()?;
match props.find_index(key) {
Ok(idx) => props.properties[idx].value = Some(val.to_string()),
Err(idx) => props.properties.insert(
idx,
PersistentPropertyRecord {
name: Some(key.to_string()),
value: Some(val.to_string()),
},
),
}
proto_write_props(&props)?;
} else {
file_set_prop(key, Some(val))?;
}
Ok(())
}

View file

@ -0,0 +1,24 @@
/*
* Copyright (C) 2017 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
syntax = "proto2";
option optimize_for = LITE_RUNTIME;
message PersistentProperties {
message PersistentPropertyRecord {
optional string name = 1;
optional string value = 2;
}
repeated PersistentPropertyRecord properties = 1;
}

View file

@ -0,0 +1,55 @@
#include <dlfcn.h>
#include <base.hpp>
#include <core.hpp>
#include <api/system_properties.h>
#include <system_properties/prop_info.h>
using namespace std;
// This has to keep in sync with SysProp in mod.rs
struct SysProp {
int (*set)(const char*, const char*);
const prop_info *(*find)(const char*);
void (*read_callback)(const prop_info*, void (*)(void*, const char*, const char*, uint32_t), void*);
int (*foreach)(void (*)(const prop_info*, void*), void*);
bool (*wait)(const prop_info*, uint32_t, uint32_t*, const timespec*);
};
extern "C" bool prop_info_is_long(const prop_info &info) {
return info.is_long();
}
extern "C" SysProp get_sys_prop() {
SysProp prop{};
#ifdef APPLET_STUB_MAIN
// Use internal implementation
prop.set = __system_property_set;
prop.find = __system_property_find;
prop.read_callback = __system_property_read_callback;
prop.foreach = __system_property_foreach;
prop.wait = __system_property_wait;
#else
#define DLOAD(name) (*(void **) &prop.name = dlsym(RTLD_DEFAULT, "__system_property_" #name))
// Dynamic load platform implementation
DLOAD(set);
DLOAD(find);
DLOAD(read_callback);
DLOAD(foreach);
DLOAD(wait);
#undef DLOAD
if (prop.wait == nullptr) {
// This platform API only exist on API 26+
prop.wait = __system_property_wait;
}
if (prop.read_callback == nullptr) {
// This platform API only exist on API 26+
prop.read_callback = __system_property_read_callback;
}
#endif
if (__system_properties_init()) {
LOGE("resetprop: __system_properties_init error\n");
}
return prop;
}

View file

@ -0,0 +1,232 @@
#include <string>
#include <vector>
#include <sys/wait.h>
#include <consts.hpp>
#include <base.hpp>
#include <core.hpp>
using namespace std;
#define BBEXEC_CMD bbpath(), "sh"
static const char *bbpath() {
static string path;
path = get_magisk_tmp();
path += "/" BBPATH "/busybox";
if (access(path.data(), X_OK) != 0) {
path = DATABIN "/busybox";
}
return path.data();
}
static void set_script_env() {
setenv("ASH_STANDALONE", "1", 1);
char new_path[4096];
ssprintf(new_path, sizeof(new_path), "%s:%s", getenv("PATH"), get_magisk_tmp());
setenv("PATH", new_path, 1);
if (MagiskD::Get().zygisk_enabled())
setenv("ZYGISK_ENABLED", "1", 1);
};
void exec_script(Utf8CStr script) {
exec_t exec {
.pre_exec = set_script_env,
.fork = fork_no_orphan
};
exec_command_sync(exec, BBEXEC_CMD, script.c_str());
}
static timespec pfs_timeout;
#define PFS_SETUP() \
if (pfs) { \
if (int pid = xfork()) { \
if (pid < 0) \
return; \
/* In parent process, simply wait for child to finish */ \
waitpid(pid, nullptr, 0); \
return; \
} \
timer_pid = xfork(); \
if (timer_pid == 0) { \
/* In timer process, count down */ \
clock_nanosleep(CLOCK_MONOTONIC, TIMER_ABSTIME, &pfs_timeout, nullptr); \
exit(0); \
} \
}
#define PFS_WAIT() \
if (pfs) { \
/* If we ran out of time, don't block */ \
if (timer_pid < 0) \
continue; \
if (int pid = waitpid(-1, nullptr, 0); pid == timer_pid) { \
LOGW("* post-fs-data scripts blocking phase timeout\n"); \
timer_pid = -1; \
} \
}
#define PFS_DONE() \
if (pfs) { \
if (timer_pid > 0) \
kill(timer_pid, SIGKILL); \
exit(0); \
}
void exec_common_scripts(Utf8CStr stage) {
LOGI("* Running %s.d scripts\n", stage.c_str());
char path[4096];
char *name = path + sprintf(path, SECURE_DIR "/%s.d", stage.c_str());
auto dir = xopen_dir(path);
if (!dir) return;
bool pfs = stage == "post-fs-data"sv;
int timer_pid = -1;
if (pfs) {
// Setup timer
clock_gettime(CLOCK_MONOTONIC, &pfs_timeout);
pfs_timeout.tv_sec += POST_FS_DATA_SCRIPT_MAX_TIME;
}
PFS_SETUP()
*(name++) = '/';
int dfd = dirfd(dir.get());
for (dirent *entry; (entry = xreaddir(dir.get()));) {
if (entry->d_type == DT_REG) {
if (faccessat(dfd, entry->d_name, X_OK, 0) != 0)
continue;
LOGI("%s.d: exec [%s]\n", stage.c_str(), entry->d_name);
strcpy(name, entry->d_name);
exec_t exec {
.pre_exec = set_script_env,
.fork = pfs ? xfork : fork_dont_care
};
exec_command(exec, BBEXEC_CMD, path);
PFS_WAIT()
}
}
PFS_DONE()
}
static bool operator>(const timespec &a, const timespec &b) {
if (a.tv_sec != b.tv_sec)
return a.tv_sec > b.tv_sec;
return a.tv_nsec > b.tv_nsec;
}
void exec_module_scripts(Utf8CStr stage, const rust::Vec<ModuleInfo> &module_list) {
LOGI("* Running module %s scripts\n", stage.c_str());
if (module_list.empty())
return;
bool pfs = stage == "post-fs-data";
if (pfs) {
timespec now{};
clock_gettime(CLOCK_MONOTONIC, &now);
// If we had already timed out, treat it as service mode
if (now > pfs_timeout)
pfs = false;
}
int timer_pid = -1;
PFS_SETUP()
char path[4096];
for (auto &m : module_list) {
sprintf(path, MODULEROOT "/%.*s/%s.sh", (int) m.name.size(), m.name.data(), stage.c_str());
if (access(path, F_OK) == -1)
continue;
LOGI("%.*s: exec [%s.sh]\n", (int) m.name.size(), m.name.data(), stage.c_str());
exec_t exec {
.pre_exec = set_script_env,
.fork = pfs ? xfork : fork_dont_care
};
exec_command(exec, BBEXEC_CMD, path);
PFS_WAIT()
}
PFS_DONE()
}
constexpr char install_script[] = R"EOF(
APK=%s
log -t Magisk "pm_install: $APK"
log -t Magisk "pm_install: $(pm install -g -r $APK 2>&1)"
appops set %s REQUEST_INSTALL_PACKAGES allow
rm -f $APK
)EOF";
void install_apk(Utf8CStr apk) {
setfilecon(apk.c_str(), MAGISK_FILE_CON);
char cmds[sizeof(install_script) + 4096];
ssprintf(cmds, sizeof(cmds), install_script, apk.c_str(), JAVA_PACKAGE_NAME);
exec_command_async("/system/bin/sh", "-c", cmds);
}
constexpr char uninstall_script[] = R"EOF(
PKG=%s
log -t Magisk "pm_uninstall: $PKG"
log -t Magisk "pm_uninstall: $(pm uninstall $PKG 2>&1)"
)EOF";
void uninstall_pkg(Utf8CStr pkg) {
char cmds[sizeof(uninstall_script) + 256];
ssprintf(cmds, sizeof(cmds), uninstall_script, pkg.c_str());
exec_command_async("/system/bin/sh", "-c", cmds);
}
constexpr char clear_script[] = R"EOF(
PKG=%s
USER=%d
log -t Magisk "pm_clear: $PKG (user=$USER)"
log -t Magisk "pm_clear: $(pm clear --user $USER $PKG 2>&1)"
)EOF";
void clear_pkg(const char *pkg, int user_id) {
char cmds[sizeof(clear_script) + 288];
ssprintf(cmds, sizeof(cmds), clear_script, pkg, user_id);
exec_command_async("/system/bin/sh", "-c", cmds);
}
[[noreturn]] __printflike(2, 3)
static void abort(FILE *fp, const char *fmt, ...) {
va_list valist;
va_start(valist, fmt);
vfprintf(fp, fmt, valist);
fprintf(fp, "\n\n");
va_end(valist);
exit(1);
}
constexpr char install_module_script[] = R"EOF(
. /data/adb/magisk/util_functions.sh
install_module
exit 0
)EOF";
void install_module(Utf8CStr file) {
if (getuid() != 0)
abort(stderr, "Run this command with root");
if (access(DATABIN, F_OK) ||
access(bbpath(), X_OK) ||
access(DATABIN "/util_functions.sh", F_OK))
abort(stderr, "Incomplete Magisk install");
if (access(file.c_str(), F_OK))
abort(stderr, "'%s' does not exist", file.c_str());
char *zip = realpath(file.c_str(), nullptr);
setenv("OUTFD", "1", 1);
setenv("ZIPFILE", zip, 1);
setenv("ASH_STANDALONE", "1", 1);
setenv("MAGISKTMP", get_magisk_tmp(), 0);
free(zip);
int fd = xopen("/dev/null", O_RDONLY);
xdup2(fd, STDERR_FILENO);
close(fd);
const char *argv[] = { BBEXEC_CMD, "-c", install_module_script, nullptr };
execve(argv[0], (char **) argv, environ);
abort(stdout, "Failed to execute BusyBox shell");
}

106
native/src/core/selinux.rs Normal file
View file

@ -0,0 +1,106 @@
use crate::consts::{DATABIN, LOG_PIPE, MAGISK_LOG_CON, MAGISKDB, MODULEROOT, SECURE_DIR};
use crate::ffi::get_magisk_tmp;
use base::{Directory, FsPathBuilder, LoggedResult, ResultExt, Utf8CStr, Utf8CStrBuf, cstr, libc};
use nix::fcntl::OFlag;
use std::io::Write;
const UNLABEL_CON: &Utf8CStr = cstr!("u:object_r:unlabeled:s0");
const SYSTEM_CON: &Utf8CStr = cstr!("u:object_r:system_file:s0");
const ADB_CON: &Utf8CStr = cstr!("u:object_r:adb_data_file:s0");
const ROOT_CON: &Utf8CStr = cstr!("u:object_r:rootfs:s0");
fn restore_syscon_from_unlabeled(
path: &mut dyn Utf8CStrBuf,
con: &mut dyn Utf8CStrBuf,
) -> LoggedResult<()> {
let dir_path_len = path.len();
if path.get_secontext(con).log().is_ok() && con.as_str() == UNLABEL_CON {
path.set_secontext(SYSTEM_CON)?;
}
let mut dir = Directory::open(path)?;
while let Some(ref e) = dir.read()? {
path.truncate(dir_path_len);
path.append_path(e.name());
if e.is_dir() {
restore_syscon_from_unlabeled(path, con)?;
} else if (e.is_file() || e.is_symlink())
&& path.get_secontext(con).log().is_ok()
&& con.as_str() == UNLABEL_CON
{
path.set_secontext(SYSTEM_CON)?;
}
}
Ok(())
}
fn restore_syscon(path: &mut dyn Utf8CStrBuf) -> LoggedResult<()> {
let dir_path_len = path.len();
path.set_secontext(SYSTEM_CON)?;
unsafe { libc::lchown(path.as_ptr(), 0, 0) };
let mut dir = Directory::open(path)?;
while let Some(ref e) = dir.read()? {
path.truncate(dir_path_len);
path.append_path(e.name());
if e.is_dir() {
restore_syscon(path)?;
} else if e.is_file() || e.is_symlink() {
path.set_secontext(SYSTEM_CON)?;
unsafe { libc::lchown(path.as_ptr(), 0, 0) };
}
}
Ok(())
}
pub(crate) fn restorecon() {
if let Ok(mut file) = cstr!("/sys/fs/selinux/context")
.open(OFlag::O_WRONLY | OFlag::O_CLOEXEC)
.log()
&& file.write_all(ADB_CON.as_bytes_with_nul()).is_ok()
{
cstr!(SECURE_DIR).set_secontext(ADB_CON).log_ok();
}
let mut path = cstr::buf::default();
let mut con = cstr::buf::new::<1024>();
path.push_str(MODULEROOT);
path.set_secontext(SYSTEM_CON).log_ok();
restore_syscon_from_unlabeled(&mut path, &mut con).log_ok();
path.clear();
path.push_str(DATABIN);
restore_syscon(&mut path).log_ok();
unsafe { libc::chmod(cstr!(MAGISKDB).as_ptr(), 0o000) };
}
pub(crate) fn restore_tmpcon() -> LoggedResult<()> {
let tmp = get_magisk_tmp();
if tmp == "/sbin" {
tmp.set_secontext(ROOT_CON)?;
} else {
unsafe { libc::chmod(tmp.as_ptr(), 0o711) };
}
let mut path = cstr::buf::default();
let mut dir = Directory::open(tmp)?;
while let Some(ref e) = dir.read()? {
if !e.is_symlink() {
e.resolve_path(&mut path)?;
path.set_secontext(SYSTEM_CON).log_ok();
}
}
path.clear();
path.append_path(tmp).append_path(LOG_PIPE);
path.set_secontext(cstr!(MAGISK_LOG_CON))?;
Ok(())
}
pub(crate) fn lgetfilecon(path: &Utf8CStr, con: &mut [u8]) -> bool {
let mut con = cstr::buf::wrap(con);
path.get_secontext(&mut con).is_ok()
}
pub(crate) fn setfilecon(path: &Utf8CStr, con: &Utf8CStr) -> bool {
path.follow_link().set_secontext(con).is_ok()
}

240
native/src/core/socket.rs Normal file
View file

@ -0,0 +1,240 @@
use base::{ReadExt, ResultExt, WriteExt, libc, warn};
use bytemuck::{Zeroable, bytes_of, bytes_of_mut};
use std::io;
use std::io::{ErrorKind, IoSlice, IoSliceMut, Read, Write};
use std::mem::ManuallyDrop;
use std::os::fd::{FromRawFd, IntoRawFd, OwnedFd, RawFd};
use std::os::unix::net::{AncillaryData, SocketAncillary, UnixStream};
pub trait Encodable {
fn encode(&self, w: &mut impl Write) -> io::Result<()>;
}
pub trait Decodable: Sized + Encodable {
fn decode(r: &mut impl Read) -> io::Result<Self>;
}
macro_rules! impl_pod_encodable {
($($t:ty)*) => ($(
impl Encodable for $t {
#[inline(always)]
fn encode(&self, w: &mut impl Write) -> io::Result<()> {
w.write_pod(self)
}
}
impl Decodable for $t {
#[inline(always)]
fn decode(r: &mut impl Read) -> io::Result<Self> {
let mut val = Self::zeroed();
r.read_pod(&mut val)?;
Ok(val)
}
}
)*)
}
impl_pod_encodable! { u8 u32 i32 usize }
impl Encodable for bool {
#[inline(always)]
fn encode(&self, w: &mut impl Write) -> io::Result<()> {
match *self {
true => 1u8.encode(w),
false => 0u8.encode(w),
}
}
}
impl Decodable for bool {
#[inline(always)]
fn decode(r: &mut impl Read) -> io::Result<Self> {
Ok(u8::decode(r)? != 0)
}
}
// impl<E: Encodable, T: AsRef<E>> Encodable for T
macro_rules! impl_encodable_as_ref {
($( ($t:ty, $e:ty, $($g:tt)*) )*) => ($(
impl<$($g)*> Encodable for $t {
#[inline(always)]
fn encode(&self, w: &mut impl Write) -> io::Result<()> {
AsRef::<$e>::as_ref(self).encode(w)
}
}
)*)
}
impl_encodable_as_ref! {
(String, str,)
(Vec<T>, [T], T: Encodable)
}
impl<T: Encodable> Encodable for [T] {
fn encode(&self, w: &mut impl Write) -> io::Result<()> {
(self.len() as i32).encode(w)?;
self.iter().try_for_each(|e| e.encode(w))
}
}
impl<T: Decodable> Decodable for Vec<T> {
fn decode(r: &mut impl Read) -> io::Result<Self> {
let len = i32::decode(r)?;
let mut val = Vec::with_capacity(len as usize);
for _ in 0..len {
val.push(T::decode(r)?);
}
Ok(val)
}
}
impl Encodable for str {
fn encode(&self, w: &mut impl Write) -> io::Result<()> {
(self.len() as i32).encode(w)?;
w.write_all(self.as_bytes())
}
}
impl Decodable for String {
fn decode(r: &mut impl Read) -> io::Result<String> {
let len = i32::decode(r)?;
let mut val = String::with_capacity(len as usize);
r.take(len as u64).read_to_string(&mut val)?;
Ok(val)
}
}
pub trait IpcRead {
fn read_decodable<E: Decodable>(&mut self) -> io::Result<E>;
}
impl<T: Read> IpcRead for T {
#[inline(always)]
fn read_decodable<E: Decodable>(&mut self) -> io::Result<E> {
E::decode(self)
}
}
pub trait IpcWrite {
fn write_encodable<E: Encodable + ?Sized>(&mut self, val: &E) -> io::Result<()>;
}
impl<T: Write> IpcWrite for T {
#[inline(always)]
fn write_encodable<E: Encodable + ?Sized>(&mut self, val: &E) -> io::Result<()> {
val.encode(self)
}
}
pub trait UnixSocketExt {
fn send_fds(&mut self, fd: &[RawFd]) -> io::Result<()>;
fn recv_fd(&mut self) -> io::Result<Option<OwnedFd>>;
fn recv_fds(&mut self) -> io::Result<Vec<OwnedFd>>;
}
impl UnixSocketExt for UnixStream {
fn send_fds(&mut self, fds: &[RawFd]) -> io::Result<()> {
match fds.len() {
0 => self.write_pod(&0)?,
len => {
// 4k buffer is reasonable enough
let mut buf = [0u8; 4096];
let mut ancillary = SocketAncillary::new(&mut buf);
if !ancillary.add_fds(fds) {
return Err(ErrorKind::OutOfMemory.into());
}
let fd_count = len as i32;
let iov = IoSlice::new(bytes_of(&fd_count));
self.send_vectored_with_ancillary(&[iov], &mut ancillary)?;
}
};
Ok(())
}
fn recv_fd(&mut self) -> io::Result<Option<OwnedFd>> {
let mut fd_count = 0;
self.peek(bytes_of_mut(&mut fd_count))?;
if fd_count < 1 {
// Actually consume the data
self.read_pod(&mut fd_count)?;
return Ok(None);
}
if fd_count > 1 {
warn!(
"Received unexpected number of fds: expected=1 actual={}",
fd_count
);
}
// 4k buffer is reasonable enough
let mut buf = [0u8; 4096];
let mut ancillary = SocketAncillary::new(&mut buf);
let iov = IoSliceMut::new(bytes_of_mut(&mut fd_count));
self.recv_vectored_with_ancillary(&mut [iov], &mut ancillary)?;
for msg in ancillary.messages().flatten() {
if let AncillaryData::ScmRights(mut scm_rights) = msg {
// We only want the first one
let fd = if let Some(fd) = scm_rights.next() {
unsafe { OwnedFd::from_raw_fd(fd) }
} else {
return Ok(None);
};
// Close all others
for fd in scm_rights {
unsafe { libc::close(fd) };
}
return Ok(Some(fd));
}
}
Ok(None)
}
fn recv_fds(&mut self) -> io::Result<Vec<OwnedFd>> {
let mut fd_count = 0;
// 4k buffer is reasonable enough
let mut buf = [0u8; 4096];
let mut ancillary = SocketAncillary::new(&mut buf);
let iov = IoSliceMut::new(bytes_of_mut(&mut fd_count));
self.recv_vectored_with_ancillary(&mut [iov], &mut ancillary)?;
let mut fds: Vec<OwnedFd> = Vec::new();
for msg in ancillary.messages().flatten() {
if let AncillaryData::ScmRights(scm_rights) = msg {
fds = scm_rights
.map(|fd| unsafe { OwnedFd::from_raw_fd(fd) })
.collect();
}
}
if fd_count as usize != fds.len() {
warn!(
"Received unexpected number of fds: expected={} actual={}",
fd_count,
fds.len()
);
}
Ok(fds)
}
}
pub fn send_fd(socket: RawFd, fd: RawFd) -> bool {
let mut socket = ManuallyDrop::new(unsafe { UnixStream::from_raw_fd(socket) });
if fd < 0 {
socket.send_fds(&[]).log().is_ok()
} else {
socket.send_fds(&[fd]).log().is_ok()
}
}
pub fn recv_fd(socket: RawFd) -> RawFd {
let mut socket = ManuallyDrop::new(unsafe { UnixStream::from_raw_fd(socket) });
socket
.recv_fd()
.log()
.unwrap_or(None)
.map_or(-1, IntoRawFd::into_raw_fd)
}
pub fn recv_fds(socket: RawFd) -> Vec<RawFd> {
let mut socket = ManuallyDrop::new(unsafe { UnixStream::from_raw_fd(socket) });
let fds = socket.recv_fds().log().unwrap_or(Vec::new());
// SAFETY: OwnedFd and RawFd has the same layout
unsafe { std::mem::transmute(fds) }
}

347
native/src/core/sqlite.cpp Normal file
View file

@ -0,0 +1,347 @@
#include <dlfcn.h>
#include <consts.hpp>
#include <base.hpp>
#include <sqlite.hpp>
using namespace std;
#define DB_VERSION 12
#define DB_VERSION_STR "12"
// SQLite APIs
static int (*sqlite3_open_v2)(const char *filename, sqlite3 **ppDb, int flags, const char *zVfs);
static int (*sqlite3_close)(sqlite3 *db);
const char *(*sqlite3_errstr)(int);
static int (*sqlite3_prepare_v2)(sqlite3 *db, const char *zSql, int nByte, sqlite3_stmt **ppStmt, const char **pzTail);
static int (*sqlite3_bind_parameter_count)(sqlite3_stmt*);
static int (*sqlite3_bind_int64)(sqlite3_stmt*, int, int64_t);
static int (*sqlite3_bind_text)(sqlite3_stmt*,int,const char*,int,void(*)(void*));
static int (*sqlite3_column_count)(sqlite3_stmt *pStmt);
static const char *(*sqlite3_column_name)(sqlite3_stmt*, int N);
static const char *(*sqlite3_column_text)(sqlite3_stmt*, int iCol);
static int (*sqlite3_column_int)(sqlite3_stmt*, int iCol);
static int (*sqlite3_step)(sqlite3_stmt*);
static int (*sqlite3_finalize)(sqlite3_stmt *pStmt);
// Internal Android linker APIs
static void (*android_get_LD_LIBRARY_PATH)(char *buffer, size_t buffer_size);
static void (*android_update_LD_LIBRARY_PATH)(const char *ld_library_path);
#define DLERR(ptr) if (!(ptr)) { \
LOGE("db: %s\n", dlerror()); \
return false; \
}
#define DLOAD(handle, arg) {\
auto f = dlsym(handle, #arg); \
DLERR(f) \
*(void **) &(arg) = f; \
}
#ifdef __LP64__
constexpr char apex_path[] = "/apex/com.android.runtime/lib64:/apex/com.android.art/lib64:/apex/com.android.i18n/lib64:";
#else
constexpr char apex_path[] = "/apex/com.android.runtime/lib:/apex/com.android.art/lib:/apex/com.android.i18n/lib:";
#endif
static bool load_sqlite() {
static int dl_init = 0;
if (dl_init)
return dl_init > 0;
dl_init = -1;
auto sqlite = dlopen("libsqlite.so", RTLD_LAZY);
if (!sqlite) {
// Should only happen on Android 10+
auto dl = dlopen("libdl_android.so", RTLD_LAZY);
DLERR(dl);
DLOAD(dl, android_get_LD_LIBRARY_PATH);
DLOAD(dl, android_update_LD_LIBRARY_PATH);
// Inject APEX into LD_LIBRARY_PATH
char ld_path[4096];
memcpy(ld_path, apex_path, sizeof(apex_path));
constexpr int len = sizeof(apex_path) - 1;
android_get_LD_LIBRARY_PATH(ld_path + len, sizeof(ld_path) - len);
android_update_LD_LIBRARY_PATH(ld_path);
sqlite = dlopen("libsqlite.so", RTLD_LAZY);
// Revert LD_LIBRARY_PATH just in case
android_update_LD_LIBRARY_PATH(ld_path + len);
}
DLERR(sqlite);
DLOAD(sqlite, sqlite3_open_v2);
DLOAD(sqlite, sqlite3_close);
DLOAD(sqlite, sqlite3_errstr);
DLOAD(sqlite, sqlite3_prepare_v2);
DLOAD(sqlite, sqlite3_bind_parameter_count);
DLOAD(sqlite, sqlite3_bind_int64);
DLOAD(sqlite, sqlite3_bind_text);
DLOAD(sqlite, sqlite3_step);
DLOAD(sqlite, sqlite3_column_count);
DLOAD(sqlite, sqlite3_column_name);
DLOAD(sqlite, sqlite3_column_text);
DLOAD(sqlite, sqlite3_column_int);
DLOAD(sqlite, sqlite3_finalize);
dl_init = 1;
return true;
}
using StringVec = rust::Vec<rust::String>;
using sql_bind_callback_real = int(*)(void*, int, sqlite3_stmt*);
using sql_exec_callback_real = void(*)(void*, StringSlice, sqlite3_stmt*);
#define sql_chk(fn, ...) if (int rc = fn(__VA_ARGS__); rc != SQLITE_OK) return rc
// Exports to Rust
extern "C" int sql_exec_impl(
sqlite3 *db, rust::Str zSql,
sql_bind_callback bind_cb = nullptr, void *bind_cookie = nullptr,
sql_exec_callback exec_cb = nullptr, void *exec_cookie = nullptr) {
const char *sql = zSql.begin();
unique_ptr<sqlite3_stmt, decltype(sqlite3_finalize)> stmt(nullptr, sqlite3_finalize);
while (sql != zSql.end()) {
// Step 1: prepare statement
{
sqlite3_stmt *st = nullptr;
sql_chk(sqlite3_prepare_v2, db, sql, zSql.end() - sql, &st, &sql);
if (st == nullptr) continue;
stmt.reset(st);
}
// Step 2: bind arguments
if (bind_cb) {
if (int count = sqlite3_bind_parameter_count(stmt.get())) {
auto real_cb = reinterpret_cast<sql_bind_callback_real>(bind_cb);
for (int i = 1; i <= count; ++i) {
sql_chk(real_cb, bind_cookie, i, stmt.get());
}
}
}
// Step 3: execute
bool first = true;
StringVec columns;
for (;;) {
int rc = sqlite3_step(stmt.get());
if (rc == SQLITE_DONE) break;
if (rc != SQLITE_ROW) return rc;
if (exec_cb == nullptr) continue;
if (first) {
int count = sqlite3_column_count(stmt.get());
for (int i = 0; i < count; ++i) {
columns.emplace_back(sqlite3_column_name(stmt.get(), i));
}
first = false;
}
auto real_cb = reinterpret_cast<sql_exec_callback_real>(exec_cb);
real_cb(exec_cookie, StringSlice(columns), stmt.get());
}
}
return SQLITE_OK;
}
int DbValues::get_int(int index) const {
return sqlite3_column_int((sqlite3_stmt*) this, index);
}
const char *DbValues::get_text(int index) const {
return sqlite3_column_text((sqlite3_stmt*) this, index);
}
int DbStatement::bind_int64(int index, int64_t val) {
return sqlite3_bind_int64(reinterpret_cast<sqlite3_stmt*>(this), index, val);
}
int DbStatement::bind_text(int index, rust::Str val) {
return sqlite3_bind_text(reinterpret_cast<sqlite3_stmt*>(this), index, val.data(), val.size(), nullptr);
}
#define sql_chk_log_ret(ret, fn, ...) if (int rc = fn(__VA_ARGS__); rc != SQLITE_OK) { \
LOGE("sqlite3(line:%d): %s\n", __LINE__, sqlite3_errstr(rc)); \
return ret; \
}
#define sql_chk_log(fn, ...) sql_chk_log_ret(nullptr, fn, __VA_ARGS__)
sqlite3 *open_and_init_db() {
if (!load_sqlite()) {
LOGE("sqlite3: Cannot load libsqlite.so\n");
return nullptr;
}
unique_ptr<sqlite3, decltype(sqlite3_close)> db(nullptr, sqlite3_close);
{
sqlite3 *sql;
// We open the connection with SQLITE_OPEN_NOMUTEX because we are guarding it ourselves
sql_chk_log(sqlite3_open_v2, MAGISKDB, &sql,
SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE | SQLITE_OPEN_NOMUTEX, nullptr);
db.reset(sql);
}
int ver = 0;
bool upgrade = false;
auto ver_cb = [](void *ver, auto, const DbValues &values) {
*static_cast<int *>(ver) = values.get_int(0);
};
sql_chk_log(sql_exec_impl, db.get(), "PRAGMA user_version", nullptr, nullptr, ver_cb, &ver);
if (ver > DB_VERSION) {
// Don't support downgrading database, delete and retry
LOGE("sqlite3: Downgrading database is not supported\n");
unlink(MAGISKDB);
return open_and_init_db();
}
auto create_policy = [&] {
return sql_exec_impl(db.get(),
"CREATE TABLE IF NOT EXISTS policies "
"(uid INT, policy INT, until INT, logging INT, "
"notification INT, PRIMARY KEY(uid))");
};
auto create_settings = [&] {
return sql_exec_impl(db.get(),
"CREATE TABLE IF NOT EXISTS settings "
"(key TEXT, value INT, PRIMARY KEY(key))");
};
auto create_strings = [&] {
return sql_exec_impl(db.get(),
"CREATE TABLE IF NOT EXISTS strings "
"(key TEXT, value TEXT, PRIMARY KEY(key))");
};
auto create_denylist = [&] {
return sql_exec_impl(db.get(),
"CREATE TABLE IF NOT EXISTS denylist "
"(package_name TEXT, process TEXT, PRIMARY KEY(package_name, process))");
};
// Database changelog:
//
// 0 - 6: DB stored in app private data. There are no longer any code in the project to
// migrate these data, so no need to take any of these versions into consideration.
// 7 : create table `hidelist` (process TEXT, PRIMARY KEY(process))
// 8 : add new column (package_name TEXT) to table `hidelist`
// 9 : rebuild table `hidelist` to change primary key (PRIMARY KEY(package_name, process))
// 10: remove table `logs`
// 11: remove table `hidelist` and create table `denylist` (same data structure)
// 12: rebuild table `policies` to drop column `package_name`
if (/* 0, 1, 2, 3, 4, 5, 6 */ ver <= 6) {
sql_chk_log(create_policy);
sql_chk_log(create_settings);
sql_chk_log(create_strings);
sql_chk_log(create_denylist);
// Directly jump to latest
ver = DB_VERSION;
upgrade = true;
}
if (ver == 7) {
sql_chk_log(sql_exec_impl, db.get(),
"BEGIN TRANSACTION;"
"ALTER TABLE hidelist RENAME TO hidelist_tmp;"
"CREATE TABLE IF NOT EXISTS hidelist "
"(package_name TEXT, process TEXT, PRIMARY KEY(package_name, process));"
"INSERT INTO hidelist SELECT process as package_name, process FROM hidelist_tmp;"
"DROP TABLE hidelist_tmp;"
"COMMIT;");
// Directly jump to version 9
ver = 9;
upgrade = true;
}
if (ver == 8) {
sql_chk_log(sql_exec_impl, db.get(),
"BEGIN TRANSACTION;"
"ALTER TABLE hidelist RENAME TO hidelist_tmp;"
"CREATE TABLE IF NOT EXISTS hidelist "
"(package_name TEXT, process TEXT, PRIMARY KEY(package_name, process));"
"INSERT INTO hidelist SELECT * FROM hidelist_tmp;"
"DROP TABLE hidelist_tmp;"
"COMMIT;");
ver = 9;
upgrade = true;
}
if (ver == 9) {
sql_chk_log(sql_exec_impl, db.get(), "DROP TABLE IF EXISTS logs", nullptr, nullptr);
ver = 10;
upgrade = true;
}
if (ver == 10) {
sql_chk_log(sql_exec_impl, db.get(),
"DROP TABLE IF EXISTS hidelist;"
"DELETE FROM settings WHERE key='magiskhide';");
sql_chk_log(create_denylist);
ver = 11;
upgrade = true;
}
if (ver == 11) {
sql_chk_log(sql_exec_impl, db.get(),
"BEGIN TRANSACTION;"
"ALTER TABLE policies RENAME TO policies_tmp;"
"CREATE TABLE IF NOT EXISTS policies "
"(uid INT, policy INT, until INT, logging INT, "
"notification INT, PRIMARY KEY(uid));"
"INSERT INTO policies "
"SELECT uid, policy, until, logging, notification FROM policies_tmp;"
"DROP TABLE policies_tmp;"
"COMMIT;");
ver = 12;
upgrade = true;
}
if (upgrade) {
// Set version
sql_chk_log(sql_exec_impl, db.get(), "PRAGMA user_version=" DB_VERSION_STR);
}
return db.release();
}
// Exported from Rust
extern "C" int sql_exec_rs(
rust::Str zSql,
sql_bind_callback bind_cb, void *bind_cookie,
sql_exec_callback exec_cb, void *exec_cookie);
bool db_exec(const char *sql, DbArgs args, db_exec_callback exec_fn) {
using db_bind_callback = std::function<int(int, DbStatement&)>;
db_bind_callback bind_fn = {};
sql_bind_callback bind_cb = nullptr;
if (!args.empty()) {
bind_fn = std::ref(args);
bind_cb = [](void *v, int index, DbStatement &stmt) -> int {
auto fn = static_cast<db_bind_callback*>(v);
return fn->operator()(index, stmt);
};
}
sql_exec_callback exec_cb = nullptr;
if (exec_fn) {
exec_cb = [](void *v, StringSlice columns, const DbValues &values) {
auto fn = static_cast<db_exec_callback*>(v);
fn->operator()(columns, values);
};
}
sql_chk_log_ret(false, sql_exec_rs, sql, bind_cb, &bind_fn, exec_cb, &exec_fn);
return true;
}
int DbArgs::operator()(int index, DbStatement &stmt) {
if (curr < args.size()) {
const auto &arg = args[curr++];
switch (arg.type) {
case DbArg::INT:
return stmt.bind_int64(index, arg.int_val);
case DbArg::TEXT:
return stmt.bind_text(index, arg.str_val);
}
}
return SQLITE_OK;
}

View file

@ -0,0 +1,313 @@
use super::SuInfo;
use super::db::RootSettings;
use crate::consts::{INTERNAL_DIR, MAGISK_FILE_CON};
use crate::daemon::to_user_id;
use crate::ffi::{SuPolicy, SuRequest, get_magisk_tmp};
use crate::socket::IpcRead;
use ExtraVal::{Bool, Int, IntList, Str};
use base::{
BytesExt, FileAttr, LibcReturn, LoggedResult, ResultExt, Utf8CStrBuf, cstr, fork_dont_care,
};
use nix::fcntl::OFlag;
use nix::poll::{PollFd, PollFlags, PollTimeout};
use num_traits::AsPrimitive;
use std::fmt::Write;
use std::fs::File;
use std::os::fd::AsFd;
use std::os::unix::net::UCred;
use std::process::{Command, exit};
struct Extra<'a> {
key: &'static str,
value: ExtraVal<'a>,
}
enum ExtraVal<'a> {
Int(i32),
Bool(bool),
Str(&'a str),
IntList(&'a [u32]),
}
impl Extra<'_> {
fn add_intent(&self, cmd: &mut Command) {
match self.value {
Int(i) => {
cmd.args(["--ei", self.key, &i.to_string()]);
}
Bool(b) => {
cmd.args(["--ez", self.key, &b.to_string()]);
}
Str(s) => {
cmd.args(["--es", self.key, s]);
}
IntList(list) => {
cmd.args(["--es", self.key]);
let mut tmp = String::new();
list.iter().for_each(|i| write!(&mut tmp, "{i},").unwrap());
tmp.pop();
cmd.arg(&tmp);
}
}
}
fn add_bind(&self, cmd: &mut Command) {
let mut tmp: String;
match self.value {
Int(i) => {
tmp = format!("{}:i:{}", self.key, i);
}
Bool(b) => {
tmp = format!("{}:b:{}", self.key, b);
}
Str(s) => {
let s = s.replace("\\", "\\\\").replace(":", "\\:");
tmp = format!("{}:s:{}", self.key, s);
}
IntList(list) => {
tmp = format!("{}:s:", self.key);
if !list.is_empty() {
list.iter().for_each(|i| write!(&mut tmp, "{i},").unwrap());
tmp.pop();
}
}
}
cmd.args(["--extra", &tmp]);
}
fn add_bind_legacy(&self, cmd: &mut Command) {
match self.value {
Str(s) => {
let tmp = format!("{}:s:{}", self.key, s);
cmd.args(["--extra", &tmp]);
}
_ => self.add_bind(cmd),
}
}
}
pub(super) struct SuAppContext<'a> {
pub(super) cred: UCred,
pub(super) request: &'a SuRequest,
pub(super) info: &'a SuInfo,
pub(super) settings: &'a mut RootSettings,
pub(super) sdk_int: i32,
}
impl SuAppContext<'_> {
fn exec_cmd(&self, action: &'static str, extras: &[Extra], use_provider: bool) {
let user = to_user_id(self.info.eval_uid);
let user = user.to_string();
if use_provider {
let provider = format!("content://{}.provider", self.info.mgr_pkg);
let mut cmd = Command::new("/system/bin/app_process");
cmd.args([
"/system/bin",
"com.android.commands.content.Content",
"call",
"--uri",
&provider,
"--user",
&user,
"--method",
action,
]);
if self.sdk_int >= 30 {
extras.iter().for_each(|e| e.add_bind(&mut cmd))
} else {
extras.iter().for_each(|e| e.add_bind_legacy(&mut cmd))
}
cmd.env("CLASSPATH", "/system/framework/content.jar");
if let Ok(output) = cmd.output()
&& !output.stderr.contains(b"Error")
&& !output.stdout.contains(b"Error")
{
// The provider call succeed
return;
}
}
let mut cmd = Command::new("/system/bin/app_process");
cmd.args([
"/system/bin",
"com.android.commands.am.Am",
"start",
"-p",
&self.info.mgr_pkg,
"--user",
&user,
"-a",
"android.intent.action.VIEW",
"-f",
// FLAG_ACTIVITY_NEW_TASK|FLAG_ACTIVITY_MULTIPLE_TASK|
// FLAG_ACTIVITY_EXCLUDE_FROM_RECENTS|FLAG_INCLUDE_STOPPED_PACKAGES
"0x18800020",
"--es",
"action",
action,
]);
extras.iter().for_each(|e| e.add_intent(&mut cmd));
cmd.env("CLASSPATH", "/system/framework/am.jar");
// Sometimes `am start` will fail, keep trying until it works
loop {
if let Ok(output) = cmd.output()
&& !output.stdout.is_empty()
{
break;
}
}
}
fn app_request(&mut self) {
let mut fifo = cstr::buf::new::<64>();
fifo.write_fmt(format_args!(
"{}/{}/su_request_{}",
get_magisk_tmp(),
INTERNAL_DIR,
self.cred.pid.unwrap_or(-1)
))
.ok();
let fd: LoggedResult<File> = try {
let mut attr = FileAttr::new();
attr.st.st_mode = 0o600;
attr.st.st_uid = self.info.mgr_uid.as_();
attr.st.st_gid = self.info.mgr_uid.as_();
attr.con.push_str(MAGISK_FILE_CON);
fifo.mkfifo(0o600)?;
fifo.set_attr(&attr)?;
let extras = [
Extra {
key: "fifo",
value: Str(&fifo),
},
Extra {
key: "uid",
value: Int(self.info.eval_uid),
},
Extra {
key: "pid",
value: Int(self.cred.pid.unwrap_or(-1)),
},
];
self.exec_cmd("request", &extras, false);
// Open with O_RDWR to prevent FIFO open block
let fd = fifo.open(OFlag::O_RDWR | OFlag::O_CLOEXEC)?;
let mut pfd = [PollFd::new(fd.as_fd(), PollFlags::POLLIN)];
// Wait for data input for at most 70 seconds
nix::poll::poll(&mut pfd, PollTimeout::try_from(70 * 1000).unwrap())
.check_os_err("poll", None, None)?;
fd
};
fifo.remove().log_ok();
if let Ok(mut fd) = fd {
self.settings.policy = SuPolicy {
repr: fd
.read_decodable::<i32>()
.log()
.map(i32::from_be)
.unwrap_or(SuPolicy::Deny.repr),
};
} else {
self.settings.policy = SuPolicy::Deny;
};
}
fn app_notify(&self) {
let extras = [
Extra {
key: "from.uid",
value: Int(self.cred.uid.as_()),
},
Extra {
key: "pid",
value: Int(self.cred.pid.unwrap_or(-1).as_()),
},
Extra {
key: "policy",
value: Int(self.settings.policy.repr),
},
];
self.exec_cmd("notify", &extras, true);
}
fn app_log(&self) {
let command = if self.request.command.is_empty() {
&self.request.shell
} else {
&self.request.command
};
let extras = [
Extra {
key: "from.uid",
value: Int(self.cred.uid.as_()),
},
Extra {
key: "to.uid",
value: Int(self.request.target_uid),
},
Extra {
key: "pid",
value: Int(self.cred.pid.unwrap_or(-1).as_()),
},
Extra {
key: "policy",
value: Int(self.settings.policy.repr),
},
Extra {
key: "target",
value: Int(self.request.target_pid),
},
Extra {
key: "context",
value: Str(&self.request.context),
},
Extra {
key: "gids",
value: IntList(&self.request.gids),
},
Extra {
key: "command",
value: Str(command),
},
Extra {
key: "notify",
value: Bool(self.settings.notify),
},
];
self.exec_cmd("log", &extras, true);
}
pub(super) fn connect_app(&mut self) {
// If policy is undetermined, show dialog for user consent
if self.settings.policy == SuPolicy::Query {
self.app_request();
}
if !self.settings.log && !self.settings.notify {
return;
}
if fork_dont_care() != 0 {
return;
}
// Notify su usage to application
if self.settings.log {
self.app_log();
} else if self.settings.notify {
self.app_notify();
}
exit(0);
}
}

View file

@ -0,0 +1,292 @@
use super::connect::SuAppContext;
use super::db::RootSettings;
use crate::daemon::{AID_ROOT, AID_SHELL, MagiskD, to_app_id, to_user_id};
use crate::db::{DbSettings, MultiuserMode, RootAccess};
use crate::ffi::{SuPolicy, SuRequest, exec_root_shell};
use crate::socket::IpcRead;
use base::{LoggedResult, ResultExt, WriteExt, debug, error, exit_on_error, libc, warn};
use std::os::fd::IntoRawFd;
use std::os::unix::net::{UCred, UnixStream};
use std::sync::{Arc, Mutex};
use std::time::{Duration, Instant};
#[allow(unused_imports)]
use std::os::fd::AsRawFd;
const DEFAULT_SHELL: &str = "/system/bin/sh";
impl Default for SuRequest {
fn default() -> Self {
SuRequest {
target_uid: AID_ROOT,
target_pid: -1,
login: false,
keep_env: false,
drop_cap: false,
shell: DEFAULT_SHELL.to_string(),
command: "".to_string(),
context: "".to_string(),
gids: vec![],
}
}
}
pub struct SuInfo {
pub(super) uid: i32,
pub(super) eval_uid: i32,
pub(super) mgr_pkg: String,
pub(super) mgr_uid: i32,
cfg: DbSettings,
access: Mutex<AccessInfo>,
}
struct AccessInfo {
settings: RootSettings,
timestamp: Instant,
}
impl Default for SuInfo {
fn default() -> Self {
SuInfo {
uid: -1,
eval_uid: -1,
cfg: Default::default(),
mgr_pkg: Default::default(),
mgr_uid: -1,
access: Default::default(),
}
}
}
impl Default for AccessInfo {
fn default() -> Self {
AccessInfo {
settings: Default::default(),
timestamp: Instant::now(),
}
}
}
impl SuInfo {
fn allow(uid: i32) -> SuInfo {
let access = RootSettings {
policy: SuPolicy::Allow,
log: false,
notify: false,
};
SuInfo {
uid,
access: Mutex::new(AccessInfo::new(access)),
..Default::default()
}
}
fn deny(uid: i32) -> SuInfo {
let access = RootSettings {
policy: SuPolicy::Deny,
log: false,
notify: false,
};
SuInfo {
uid,
access: Mutex::new(AccessInfo::new(access)),
..Default::default()
}
}
}
impl AccessInfo {
fn new(settings: RootSettings) -> AccessInfo {
AccessInfo {
settings,
timestamp: Instant::now(),
}
}
fn is_fresh(&self) -> bool {
self.timestamp.elapsed() < Duration::from_secs(3)
}
fn refresh(&mut self) {
self.timestamp = Instant::now();
}
}
impl MagiskD {
pub fn su_daemon_handler(&self, mut client: UnixStream, cred: UCred) {
debug!(
"su: request from uid=[{}], pid=[{}], client=[{}]",
cred.uid,
cred.pid.unwrap_or(-1),
client.as_raw_fd()
);
let mut req = match client.read_decodable::<SuRequest>().log() {
Ok(req) => req,
Err(_) => {
warn!("su: remote process probably died, abort");
client.write_pod(&SuPolicy::Deny.repr).ok();
return;
}
};
let info = self.get_su_info(cred.uid as i32);
{
let mut access = info.access.lock().unwrap();
// Talk to su manager
let mut app = SuAppContext {
cred,
request: &req,
info: &info,
settings: &mut access.settings,
sdk_int: self.sdk_int(),
};
app.connect_app();
// Before unlocking, refresh the timestamp
access.refresh();
if access.settings.policy == SuPolicy::Restrict {
req.drop_cap = true;
}
if access.settings.policy == SuPolicy::Deny {
warn!("su: request rejected ({})", info.uid);
client.write_pod(&SuPolicy::Deny.repr).ok();
return;
}
}
// At this point, the root access is granted.
// Fork a child root process and monitor its exit value.
let child = unsafe { libc::fork() };
if child == 0 {
debug!("su: fork handler");
// Abort upon any error occurred
exit_on_error(true);
// ack
client.write_pod(&0).ok();
exec_root_shell(
client.into_raw_fd(),
cred.pid.unwrap_or(-1),
&mut req,
info.cfg.mnt_ns,
);
return;
}
if child < 0 {
error!("su: fork failed, abort");
return;
}
// Wait result
debug!("su: waiting child pid=[{}]", child);
let mut status = 0;
let code = unsafe {
if libc::waitpid(child, &mut status, 0) > 0 {
libc::WEXITSTATUS(status)
} else {
-1
}
};
debug!("su: return code=[{}]", code);
client.write_pod(&code).ok();
}
fn get_su_info(&self, uid: i32) -> Arc<SuInfo> {
if uid == AID_ROOT {
return Arc::new(SuInfo::allow(AID_ROOT));
}
let cached = self.cached_su_info.load();
if cached.uid == uid && cached.access.lock().unwrap().is_fresh() {
return cached;
}
let info = self.build_su_info(uid);
self.cached_su_info.store(info.clone());
info
}
#[cfg(feature = "su-check-db")]
fn build_su_info(&self, uid: i32) -> Arc<SuInfo> {
let result: LoggedResult<Arc<SuInfo>> = try {
let cfg = self.get_db_settings()?;
// Check multiuser settings
let eval_uid = match cfg.multiuser_mode {
MultiuserMode::OwnerOnly => {
if to_user_id(uid) != 0 {
return Arc::new(SuInfo::deny(uid));
}
uid
}
MultiuserMode::OwnerManaged => to_app_id(uid),
_ => uid,
};
let mut access = RootSettings::default();
self.get_root_settings(eval_uid, &mut access)?;
// We need to talk to the manager, get the app info
let (mgr_uid, mgr_pkg) =
if access.policy == SuPolicy::Query || access.log || access.notify {
self.get_manager(to_user_id(eval_uid), true)
} else {
(-1, String::new())
};
// If it's the manager, allow it silently
if to_app_id(uid) == to_app_id(mgr_uid) {
return Arc::new(SuInfo::allow(uid));
}
// Check su access settings
match cfg.root_access {
RootAccess::Disabled => {
warn!("Root access is disabled!");
return Arc::new(SuInfo::deny(uid));
}
RootAccess::AdbOnly => {
if uid != AID_SHELL {
warn!("Root access limited to ADB only!");
return Arc::new(SuInfo::deny(uid));
}
}
RootAccess::AppsOnly => {
if uid == AID_SHELL {
warn!("Root access is disabled for ADB!");
return Arc::new(SuInfo::deny(uid));
}
}
_ => {}
};
// If still not determined, check if manager exists
if access.policy == SuPolicy::Query && mgr_uid < 0 {
return Arc::new(SuInfo::deny(uid));
}
// Finally, the SuInfo
Arc::new(SuInfo {
uid,
eval_uid,
mgr_pkg,
mgr_uid,
cfg,
access: Mutex::new(AccessInfo::new(access)),
})
};
result.unwrap_or(Arc::new(SuInfo::deny(uid)))
}
#[cfg(not(feature = "su-check-db"))]
fn build_su_info(&self, uid: i32) -> Arc<SuInfo> {
Arc::new(SuInfo::allow(uid))
}
}

133
native/src/core/su/db.rs Normal file
View file

@ -0,0 +1,133 @@
use crate::daemon::{
AID_APP_END, AID_APP_START, AID_ROOT, AID_SHELL, MagiskD, to_app_id, to_user_id,
};
use crate::db::DbArg::Integer;
use crate::db::{MultiuserMode, RootAccess, SqlTable, SqliteResult, SqliteReturn};
use crate::ffi::{DbValues, SuPolicy};
use base::ResultExt;
impl Default for SuPolicy {
fn default() -> Self {
SuPolicy::Query
}
}
#[derive(Default)]
pub struct RootSettings {
pub policy: SuPolicy,
pub log: bool,
pub notify: bool,
}
impl SqlTable for RootSettings {
fn on_row(&mut self, columns: &[String], values: &DbValues) {
for (i, column) in columns.iter().enumerate() {
let val = values.get_int(i as i32);
match column.as_str() {
"policy" => self.policy.repr = val,
"logging" => self.log = val != 0,
"notification" => self.notify = val != 0,
_ => {}
}
}
}
}
struct UidList(Vec<i32>);
impl SqlTable for UidList {
fn on_row(&mut self, _: &[String], values: &DbValues) {
self.0.push(values.get_int(0));
}
}
impl MagiskD {
pub fn get_root_settings(&self, uid: i32, settings: &mut RootSettings) -> SqliteResult<()> {
self.db_exec_with_rows(
"SELECT policy, logging, notification FROM policies \
WHERE uid=? AND (until=0 OR until>strftime('%s', 'now'))",
&[Integer(uid as i64)],
settings,
)
.sql_result()
}
pub fn prune_su_access(&self) {
let mut list = UidList(Vec::new());
if self
.db_exec_with_rows("SELECT uid FROM policies", &[], &mut list)
.sql_result()
.log()
.is_err()
{
return;
}
let app_list = self.get_app_no_list();
let mut rm_uids = Vec::new();
for uid in list.0 {
let app_id = to_app_id(uid);
if (AID_APP_START..=AID_APP_END).contains(&app_id) {
let app_no = app_id - AID_APP_START;
if !app_list.contains(app_no as usize) {
// The app_id is no longer installed
rm_uids.push(uid);
}
}
}
for uid in rm_uids {
self.db_exec("DELETE FROM policies WHERE uid=?", &[Integer(uid as i64)]);
}
}
pub fn uid_granted_root(&self, mut uid: i32) -> bool {
if uid == AID_ROOT {
return true;
}
let cfg = match self.get_db_settings().log() {
Ok(cfg) => cfg,
Err(_) => return false,
};
// Check user root access settings
match cfg.root_access {
RootAccess::Disabled => return false,
RootAccess::AppsOnly => {
if uid == AID_SHELL {
return false;
}
}
RootAccess::AdbOnly => {
if uid != AID_SHELL {
return false;
}
}
_ => {}
}
// Check multiuser settings
match cfg.multiuser_mode {
MultiuserMode::OwnerOnly => {
if to_user_id(uid) != 0 {
return false;
}
}
MultiuserMode::OwnerManaged => uid = to_app_id(uid),
_ => {}
}
let mut granted = false;
let mut output_fn =
|_: &[String], values: &DbValues| granted = values.get_int(0) == SuPolicy::Allow.repr;
self.db_exec_with_rows(
"SELECT policy FROM policies WHERE uid=? AND (until=0 OR until>strftime('%s', 'now'))",
&[Integer(uid as i64)],
&mut output_fn,
);
granted
}
}

View file

@ -0,0 +1,7 @@
mod connect;
mod daemon;
mod db;
mod pts;
pub use daemon::SuInfo;
pub use pts::{get_pty_num, pump_tty};

177
native/src/core/su/pts.rs Normal file
View file

@ -0,0 +1,177 @@
use base::{FileOrStd, LibcReturn, LoggedResult, OsResult, ResultExt, libc, warn};
use libc::{STDIN_FILENO, TIOCGWINSZ, TIOCSWINSZ, c_int, winsize};
use nix::fcntl::{OFlag, SpliceFFlags};
use nix::poll::{PollFd, PollFlags, PollTimeout, poll};
use nix::sys::signal::{SigSet, Signal, raise};
use nix::sys::signalfd::{SfdFlags, SignalFd};
use nix::sys::termios::{SetArg, Termios, cfmakeraw, tcgetattr, tcsetattr};
use nix::unistd::pipe2;
use std::fs::File;
use std::io::{Read, Write};
use std::mem::MaybeUninit;
use std::os::fd::{AsFd, AsRawFd, FromRawFd, RawFd};
use std::sync::atomic::{AtomicBool, Ordering};
static SHOULD_USE_SPLICE: AtomicBool = AtomicBool::new(true);
const TIOCGPTN: u32 = 0x80045430;
unsafe extern "C" {
// Don't use the declaration from the libc crate as request should be u32 not i32
fn ioctl(fd: c_int, request: u32, ...) -> i32;
}
pub fn get_pty_num(fd: i32) -> i32 {
let mut pty_num = -1i32;
if unsafe { ioctl(fd, TIOCGPTN, &mut pty_num) } != 0 {
warn!("Failed to get pty number");
}
pty_num
}
fn sync_winsize(ptmx: i32) {
let mut ws: winsize = unsafe { std::mem::zeroed() };
if unsafe { ioctl(STDIN_FILENO, TIOCGWINSZ as u32, &mut ws) } >= 0 {
unsafe { ioctl(ptmx, TIOCSWINSZ as u32, &ws) };
}
}
fn splice(fd_in: impl AsFd, fd_out: impl AsFd, len: usize) -> OsResult<'static, usize> {
nix::fcntl::splice(fd_in, None, fd_out, None, len, SpliceFFlags::empty())
.into_os_result("splice", None, None)
}
fn pump_via_copy(mut fd_in: &File, mut fd_out: &File) -> LoggedResult<()> {
let mut buf = MaybeUninit::<[u8; 4096]>::uninit();
let buf = unsafe { buf.assume_init_mut() };
let len = fd_in.read(buf)?;
fd_out.write_all(&buf[..len])?;
Ok(())
}
fn pump_via_splice(fd_in: &File, fd_out: &File, pipe: &(File, File)) -> LoggedResult<()> {
if !SHOULD_USE_SPLICE.load(Ordering::Relaxed) {
return pump_via_copy(fd_in, fd_out);
}
// The pipe capacity is by default 16 pages, let's just use 65536
let Ok(len) = splice(fd_in, &pipe.1, 65536) else {
// If splice failed, stop using splice and fallback to userspace copy
SHOULD_USE_SPLICE.store(false, Ordering::Relaxed);
return pump_via_copy(fd_in, fd_out);
};
if len == 0 {
return Ok(());
}
if splice(&pipe.0, fd_out, len).is_err() {
// If splice failed, stop using splice and fallback to userspace copy
SHOULD_USE_SPLICE.store(false, Ordering::Relaxed);
return pump_via_copy(&pipe.0, fd_out);
}
Ok(())
}
fn set_stdin_raw() -> LoggedResult<Termios> {
let mut term = tcgetattr(FileOrStd::StdIn.as_file())?;
let old_term = term.clone();
let old_output_flags = old_term.output_flags;
cfmakeraw(&mut term);
// Preserve output_flags, since we are not setting stdout raw
term.output_flags = old_output_flags;
tcsetattr(FileOrStd::StdIn.as_file(), SetArg::TCSAFLUSH, &term)
.or_else(|_| tcsetattr(FileOrStd::StdIn.as_file(), SetArg::TCSADRAIN, &term))
.check_os_err("tcsetattr", None, None)
.log_with_msg(|w| w.write_str("Failed to set terminal attributes"))?;
Ok(old_term)
}
fn restore_stdin(term: Termios) -> LoggedResult<()> {
tcsetattr(FileOrStd::StdIn.as_file(), SetArg::TCSAFLUSH, &term)
.or_else(|_| tcsetattr(FileOrStd::StdIn.as_file(), SetArg::TCSADRAIN, &term))
.check_os_err("tcsetattr", None, None)
.log_with_msg(|w| w.write_str("Failed to restore terminal attributes"))
}
fn pump_tty_impl(ptmx: File, pump_stdin: bool) -> LoggedResult<()> {
let mut signal_fd: Option<SignalFd> = None;
let raw_ptmx = ptmx.as_raw_fd();
let mut raw_sig = -1;
let mut poll_fds = Vec::with_capacity(3);
poll_fds.push(PollFd::new(ptmx.as_fd(), PollFlags::POLLIN));
if pump_stdin {
// If stdin is tty, we need to monitor SIGWINCH
let mut set = SigSet::empty();
set.add(Signal::SIGWINCH);
set.thread_block()
.check_os_err("pthread_sigmask", None, None)?;
let sig = SignalFd::with_flags(&set, SfdFlags::SFD_CLOEXEC)
.into_os_result("signalfd", None, None)?;
raw_sig = sig.as_raw_fd();
signal_fd = Some(sig);
poll_fds.push(PollFd::new(
signal_fd.as_ref().unwrap().as_fd(),
PollFlags::POLLIN,
));
// We also need to pump stdin to ptmx
poll_fds.push(PollFd::new(
FileOrStd::StdIn.as_file().as_fd(),
PollFlags::POLLIN,
));
}
// Any flag in this list indicates stop polling
let stop_flags = PollFlags::POLLERR | PollFlags::POLLHUP | PollFlags::POLLNVAL;
// Open a pipe to bypass userspace copy with splice
let pipe_fd = pipe2(OFlag::O_CLOEXEC).into_os_result("pipe2", None, None)?;
let pipe_fd = (File::from(pipe_fd.0), File::from(pipe_fd.1));
'poll: loop {
// Wait for event
poll(&mut poll_fds, PollTimeout::NONE).check_os_err("poll", None, None)?;
for pfd in &poll_fds {
if pfd.all().unwrap_or(false) {
let raw_fd = pfd.as_fd().as_raw_fd();
if raw_fd == STDIN_FILENO {
pump_via_splice(FileOrStd::StdIn.as_file(), &ptmx, &pipe_fd)?;
} else if raw_fd == raw_ptmx {
pump_via_splice(&ptmx, FileOrStd::StdIn.as_file(), &pipe_fd)?;
} else if raw_fd == raw_sig {
sync_winsize(raw_ptmx);
signal_fd.as_ref().unwrap().read_signal()?;
}
} else if pfd
.revents()
.unwrap_or(PollFlags::POLLHUP)
.intersects(stop_flags)
{
// If revents is None or contains any err_flags, stop polling
break 'poll;
}
}
}
Ok(())
}
pub fn pump_tty(ptmx: RawFd, pump_stdin: bool) {
let old_term = if pump_stdin {
sync_winsize(ptmx);
set_stdin_raw().ok()
} else {
None
};
let ptmx = unsafe { File::from_raw_fd(ptmx) };
pump_tty_impl(ptmx, pump_stdin).ok();
if let Some(term) = old_term {
restore_stdin(term).ok();
}
raise(Signal::SIGWINCH).ok();
}

463
native/src/core/su/su.cpp Normal file
View file

@ -0,0 +1,463 @@
/*
* Copyright 2017 - 2025, John Wu (@topjohnwu)
* Copyright 2015, Pierre-Hugues Husson <phh@phh.me>
* Copyright 2010, Adam Shanks (@ChainsDD)
* Copyright 2008, Zinx Verituse (@zinxv)
*/
#include <unistd.h>
#include <getopt.h>
#include <fcntl.h>
#include <pwd.h>
#include <linux/securebits.h>
#include <sys/capability.h>
#include <sys/prctl.h>
#include <sched.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <algorithm>
#include <consts.hpp>
#include <base.hpp>
#include <flags.h>
#include <core.hpp>
using namespace std;
#define DEFAULT_SHELL "/system/bin/sh"
// Constants for atty
#define ATTY_IN (1 << 0)
#define ATTY_OUT (1 << 1)
#define ATTY_ERR (1 << 2)
int quit_signals[] = { SIGALRM, SIGABRT, SIGHUP, SIGPIPE, SIGQUIT, SIGTERM, SIGINT, 0 };
[[noreturn]] static void usage(int status) {
FILE *stream = (status == EXIT_SUCCESS) ? stdout : stderr;
fprintf(stream,
"MagiskSU\n\n"
"Usage: su [options] [-] [user [argument...]]\n\n"
"Options:\n"
" -c, --command COMMAND Pass COMMAND to the invoked shell\n"
" -i, --interactive Force pseudo-terminal allocation when using -c\n"
" -g, --group GROUP Specify the primary group\n"
" -G, --supp-group GROUP Specify a supplementary group\n"
" The first specified supplementary group is also used\n"
" as a primary group if the option -g is not specified\n"
" -Z, --context CONTEXT Change SELinux context\n"
" -t, --target PID PID to take mount namespace from\n"
" -d, --drop-cap Drop all Linux capabilities\n"
" -h, --help Display this help message and exit\n"
" -, -l, --login Pretend the shell to be a login shell\n"
" -m, -p,\n"
" --preserve-environment Preserve the entire environment\n"
" -s, --shell SHELL Use SHELL instead of the default " DEFAULT_SHELL "\n"
" -v, --version Display version number and exit\n"
" -V Display version code and exit\n"
" -mm, -M,\n"
" --mount-master Force run in the global mount namespace\n\n");
exit(status);
}
static void sighandler(int sig) {
// Close all standard I/O to cause the pumps to exit
// so we can continue and retrieve the exit code.
close(STDIN_FILENO);
close(STDOUT_FILENO);
close(STDERR_FILENO);
// Put back all the default handlers
struct sigaction act{};
act.sa_handler = SIG_DFL;
for (int i = 0; quit_signals[i]; ++i) {
sigaction(quit_signals[i], &act, nullptr);
}
}
static void setup_sighandlers(void (*handler)(int)) {
struct sigaction act{};
act.sa_handler = handler;
for (int i = 0; quit_signals[i]; ++i) {
sigaction(quit_signals[i], &act, nullptr);
}
}
int su_client_main(int argc, char *argv[]) {
option long_opts[] = {
{ "command", required_argument, nullptr, 'c' },
{ "help", no_argument, nullptr, 'h' },
{ "login", no_argument, nullptr, 'l' },
{ "preserve-environment", no_argument, nullptr, 'p' },
{ "shell", required_argument, nullptr, 's' },
{ "version", no_argument, nullptr, 'v' },
{ "context", required_argument, nullptr, 'Z' },
{ "mount-master", no_argument, nullptr, 'M' },
{ "target", required_argument, nullptr, 't' },
{ "group", required_argument, nullptr, 'g' },
{ "supp-group", required_argument, nullptr, 'G' },
{ "interactive", no_argument, nullptr, 'i' },
{ "drop-cap", no_argument, nullptr, 'd' },
{ nullptr, 0, nullptr, 0 },
};
auto req = SuRequest::New();
for (int i = 0; i < argc; i++) {
// Replace -cn and -z with -Z for backwards compatibility
if (strcmp(argv[i], "-cn") == 0 || strcmp(argv[i], "-z") == 0)
strcpy(argv[i], "-Z");
// Replace -mm with -M for supporting getopt_long
else if (strcmp(argv[i], "-mm") == 0)
strcpy(argv[i], "-M");
}
bool interactive = false;
int c;
while ((c = getopt_long(argc, argv, "c:hlimpds:VvuZ:Mt:g:G:", long_opts, nullptr)) != -1) {
switch (c) {
case 'c': {
string command;
for (int i = optind - 1; i < argc; ++i) {
if (!command.empty())
command += ' ';
command += argv[i];
}
req.command = command;
optind = argc;
break;
}
case 'h':
usage(EXIT_SUCCESS);
case 'i':
interactive = true;
break;
case 'l':
req.login = true;
break;
case 'm':
case 'p':
req.keep_env = true;
break;
case 'd':
req.drop_cap = true;
break;
case 's':
req.shell = optarg;
break;
case 'V':
printf("%d\n", MAGISK_VER_CODE);
exit(EXIT_SUCCESS);
case 'v':
printf("%s\n", MAGISK_VERSION ":MAGISKSU");
exit(EXIT_SUCCESS);
case 'Z':
req.context = optarg;
break;
case 'M':
case 't':
if (req.target_pid != -1) {
fprintf(stderr, "Can't use -M and -t at the same time\n");
usage(EXIT_FAILURE);
}
if (optarg == nullptr) {
req.target_pid = 0;
} else {
req.target_pid = parse_int(optarg);
if (*optarg == '-' || req.target_pid == -1) {
fprintf(stderr, "Invalid PID: %s\n", optarg);
usage(EXIT_FAILURE);
}
}
break;
case 'g':
case 'G': {
vector<gid_t> gids;
if (int gid = parse_int(optarg); gid >= 0) {
gids.insert(c == 'g' ? gids.begin() : gids.end(), gid);
} else {
fprintf(stderr, "Invalid GID: %s\n", optarg);
usage(EXIT_FAILURE);
}
ranges::copy(gids, std::back_inserter(req.gids));
break;
}
default:
/* Bionic getopt_long doesn't terminate its error output by newline */
fprintf(stderr, "\n");
usage(2);
}
}
if (optind < argc && strcmp(argv[optind], "-") == 0) {
req.login = true;
optind++;
}
/* username or uid */
if (optind < argc) {
if (const passwd *pw = getpwnam(argv[optind]))
req.target_uid = pw->pw_uid;
else
req.target_uid = parse_int(argv[optind]);
optind++;
}
// Connect to client
owned_fd fd = connect_daemon(RequestCode::SUPERUSER);
// Send request
req.write_to_fd(fd);
// Wait for ack from daemon
if (read_int(fd)) {
// Fast fail
fprintf(stderr, "%s\n", strerror(EACCES));
return EACCES;
}
// Determine which one of our streams are attached to a TTY
interactive |= req.command.empty();
int atty = 0;
if (isatty(STDIN_FILENO) && interactive) atty |= ATTY_IN;
if (isatty(STDOUT_FILENO) && interactive) atty |= ATTY_OUT;
if (isatty(STDERR_FILENO) && interactive) atty |= ATTY_ERR;
// Send stdin
send_fd(fd, (atty & ATTY_IN) ? -1 : STDIN_FILENO);
// Send stdout
send_fd(fd, (atty & ATTY_OUT) ? -1 : STDOUT_FILENO);
// Send stderr
send_fd(fd, (atty & ATTY_ERR) ? -1 : STDERR_FILENO);
if (atty) {
// We need a PTY. Get one.
int ptmx = recv_fd(fd);
setup_sighandlers(sighandler);
// If stdin is not a tty, and if we pump to ptmx, our process may intercept the input to ptmx and
// output to stdout, which cause the target process lost input.
pump_tty(ptmx, atty & ATTY_IN);
}
// Get the exit code
return read_int(fd);
}
static void drop_caps() {
static auto last_valid_cap = []() {
uint32_t cap = CAP_WAKE_ALARM;
while (prctl(PR_CAPBSET_READ, cap) >= 0) {
cap++;
}
return cap - 1;
}();
// Drop bounding set
for (uint32_t cap = 0; cap <= last_valid_cap; cap++) {
if (cap != CAP_SETUID) {
prctl(PR_CAPBSET_DROP, cap);
}
}
// Clean inheritable set
__user_cap_header_struct header = {.version = _LINUX_CAPABILITY_VERSION_3};
__user_cap_data_struct data[_LINUX_CAPABILITY_U32S_3] = {};
if (capget(&header, &data[0]) == 0) {
for (size_t i = 0; i < _LINUX_CAPABILITY_U32S_3; i++) {
data[i].inheritable = 0;
}
capset(&header, &data[0]);
}
// All capabilities will be lost after exec
prctl(PR_SET_SECUREBITS, SECBIT_NOROOT);
// Except CAP_SETUID in bounding set, it is a marker for restricted process
}
static bool proc_is_restricted(pid_t pid) {
char buf[32] = {};
auto bnd = "CapBnd:"sv;
uint32_t data[_LINUX_CAPABILITY_U32S_3] = {};
ssprintf(buf, sizeof(buf), "/proc/%d/status", pid);
owned_fd status_fd = xopen(buf, O_RDONLY | O_CLOEXEC);
file_readline(status_fd, [&](Utf8CStr s) -> bool {
string_view line = s;
if (line.starts_with(bnd)) {
auto p = line.begin();
advance(p, bnd.size());
while (isspace(*p)) advance(p, 1);
line.remove_prefix(distance(line.begin(), p));
for (int i = 0; i < _LINUX_CAPABILITY_U32S_3; i++) {
auto cap = line.substr((_LINUX_CAPABILITY_U32S_3 - 1 - i) * 8, 8);
data[i] = parse_uint32_hex(cap);
}
return false;
}
return true;
});
bool equal = true;
for (int i = 0; i < _LINUX_CAPABILITY_U32S_3; i++) {
if (i == CAP_TO_INDEX(CAP_SETUID)) {
if (data[i] != CAP_TO_MASK(CAP_SETUID)) equal = false;
} else {
if (data[i] != 0) equal = false;
}
}
return equal;
}
static void set_identity(int uid, const rust::Vec<gid_t> &groups) {
gid_t gid;
if (!groups.empty()) {
if (setgroups(groups.size(), groups.data())) {
PLOGE("setgroups");
}
gid = groups[0];
} else {
gid = uid;
}
if (setresgid(gid, gid, gid)) {
PLOGE("setresgid (%u)", uid);
}
if (setresuid(uid, uid, uid)) {
PLOGE("setresuid (%u)", uid);
}
}
void exec_root_shell(int client, int pid, SuRequest &req, MntNsMode mode) {
// Become session leader
xsetsid();
// The FDs for each of the streams
int infd = recv_fd(client);
int outfd = recv_fd(client);
int errfd = recv_fd(client);
int ptsfd = -1;
// App need a PTY
if (infd < 0 || outfd < 0 || errfd < 0) {
string pts;
string ptmx;
auto magiskpts = get_magisk_tmp() + "/"s SHELLPTS;
if (access(magiskpts.data(), F_OK)) {
pts = "/dev/pts";
ptmx = "/dev/ptmx";
} else {
pts = magiskpts;
ptmx = magiskpts + "/ptmx";
}
int ptmx_fd = xopen(ptmx.data(), O_RDWR);
grantpt(ptmx_fd);
unlockpt(ptmx_fd);
int pty_num = get_pty_num(ptmx_fd);
if (pty_num < 0) {
// Kernel issue? Fallback to /dev/pts
close(ptmx_fd);
pts = "/dev/pts";
ptmx_fd = xopen("/dev/ptmx", O_RDWR);
grantpt(ptmx_fd);
unlockpt(ptmx_fd);
pty_num = get_pty_num(ptmx_fd);
}
send_fd(client, ptmx_fd);
close(ptmx_fd);
string pts_slave = pts + "/" + to_string(pty_num);
LOGD("su: pts_slave=[%s]\n", pts_slave.data());
// Opening the TTY has to occur after the
// fork() and setsid() so that it becomes
// our controlling TTY and not the daemon's
ptsfd = xopen(pts_slave.data(), O_RDWR);
}
// Swap out stdin, stdout, stderr
xdup2(infd < 0 ? ptsfd : infd, STDIN_FILENO);
xdup2(outfd < 0 ? ptsfd : outfd, STDOUT_FILENO);
xdup2(errfd < 0 ? ptsfd : errfd, STDERR_FILENO);
close(infd);
close(outfd);
close(errfd);
close(ptsfd);
close(client);
// Handle namespaces
if (req.target_pid == -1)
req.target_pid = pid;
else if (req.target_pid == 0)
mode = MntNsMode::Global;
else if (mode == MntNsMode::Global)
mode = MntNsMode::Requester;
switch (mode) {
case MntNsMode::Global:
LOGD("su: use global namespace\n");
break;
case MntNsMode::Requester:
LOGD("su: use namespace of pid=[%d]\n", req.target_pid);
switch_mnt_ns(req.target_pid);
break;
case MntNsMode::Isolate:
LOGD("su: use new isolated namespace\n");
switch_mnt_ns(req.target_pid);
xunshare(CLONE_NEWNS);
xmount(nullptr, "/", nullptr, MS_PRIVATE | MS_REC, nullptr);
break;
}
const char *argv[4] = { nullptr };
argv[0] = req.login ? "-" : req.shell.c_str();
if (!req.command.empty()) {
argv[1] = "-c";
argv[2] = req.command.c_str();
}
// Setup environment
umask(022);
char path[32];
ssprintf(path, sizeof(path), "/proc/%d/cwd", pid);
char cwd[4096];
if (canonical_path(path, cwd, sizeof(cwd)) > 0)
chdir(cwd);
ssprintf(path, sizeof(path), "/proc/%d/environ", pid);
auto env = full_read(path);
clearenv();
for (size_t pos = 0; pos < env.size(); ++pos) {
putenv(env.data() + pos);
pos = env.find_first_of('\0', pos);
if (pos == std::string::npos)
break;
}
if (!req.keep_env) {
struct passwd *pw;
pw = getpwuid(req.target_uid);
if (pw) {
setenv("HOME", pw->pw_dir, 1);
setenv("USER", pw->pw_name, 1);
setenv("LOGNAME", pw->pw_name, 1);
setenv("SHELL", req.shell.c_str(), 1);
}
}
// Config privileges
if (!req.context.empty()) {
auto f = xopen_file("/proc/self/attr/exec", "we");
if (f) fprintf(f.get(), "%s", req.context.c_str());
}
if (req.target_uid != AID_ROOT || req.drop_cap || proc_is_restricted(pid))
drop_caps();
if (req.target_uid != AID_ROOT || req.gids.size() > 0)
set_identity(req.target_uid, req.gids);
// Unblock all signals
sigset_t block_set;
sigemptyset(&block_set);
sigprocmask(SIG_SETMASK, &block_set, nullptr);
execvp(req.shell.c_str(), (char **) argv);
fprintf(stderr, "Cannot execute %s: %s\n", req.shell.c_str(), strerror(errno));
PLOGE("exec");
}

99
native/src/core/thread.rs Normal file
View file

@ -0,0 +1,99 @@
use base::{ResultExt, new_daemon_thread};
use nix::sys::signal::SigSet;
use nix::unistd::{getpid, gettid};
use std::sync::{Condvar, LazyLock, Mutex, WaitTimeoutResult};
use std::time::Duration;
static THREAD_POOL: LazyLock<ThreadPool> = LazyLock::new(ThreadPool::default);
const THREAD_IDLE_MAX_SEC: u64 = 60;
const CORE_POOL_SIZE: i32 = 3;
#[derive(Default)]
pub struct ThreadPool {
task_is_some: Condvar,
task_is_none: Condvar,
info: Mutex<PoolInfo>,
}
#[derive(Default)]
struct PoolInfo {
idle_threads: i32,
total_threads: i32,
task: Option<Box<dyn FnOnce() + Send>>,
}
impl ThreadPool {
fn pool_loop(&self, is_core_pool: bool) {
let mask = SigSet::all();
loop {
// Always restore the sigmask to block all signals
mask.thread_set_mask().log_ok();
let task: Option<Box<dyn FnOnce() + Send>>;
{
let mut info = self.info.lock().unwrap();
info.idle_threads += 1;
if info.task.is_none() {
if is_core_pool {
// Core pool never closes, wait forever.
info = self.task_is_some.wait(info).unwrap();
} else {
let dur = Duration::from_secs(THREAD_IDLE_MAX_SEC);
let result: WaitTimeoutResult;
(info, result) = self.task_is_some.wait_timeout(info, dur).unwrap();
if result.timed_out() {
// Terminate thread after timeout
info.idle_threads -= 1;
info.total_threads -= 1;
return;
}
}
}
task = info.task.take();
self.task_is_none.notify_one();
info.idle_threads -= 1;
}
if let Some(task) = task {
task();
}
if getpid() == gettid() {
// This meant the current thread forked and became the main thread, exit
std::process::exit(0);
}
}
}
fn exec_task_impl(&self, f: impl FnOnce() + Send + 'static) {
extern "C" fn pool_loop_raw(arg: usize) -> usize {
let is_core_pool = arg != 0;
THREAD_POOL.pool_loop(is_core_pool);
0
}
let mut info = self.info.lock().unwrap();
while info.task.is_some() {
// Wait until task is none
info = self.task_is_none.wait(info).unwrap();
}
info.task = Some(Box::new(f));
if info.idle_threads == 0 {
info.total_threads += 1;
let is_core_thread = if info.total_threads <= CORE_POOL_SIZE {
1_usize
} else {
0_usize
};
unsafe {
new_daemon_thread(pool_loop_raw, is_core_thread);
}
} else {
self.task_is_some.notify_one();
}
}
pub fn exec_task(f: impl FnOnce() + Send + 'static) {
THREAD_POOL.exec_task_impl(f);
}
}

109
native/src/core/utils.cpp Normal file
View file

@ -0,0 +1,109 @@
#include <csignal>
#include <libgen.h>
#include <sys/mount.h>
#include <sys/sysmacros.h>
#include <linux/input.h>
#include <map>
#include <consts.hpp>
#include <base.hpp>
#include <core.hpp>
using namespace std;
bool read_string(int fd, std::string &str) {
str.clear();
int len = read_int(fd);
str.resize(len);
return xxread(fd, str.data(), len) == len;
}
string read_string(int fd) {
string str;
read_string(fd, str);
return str;
}
void write_string(int fd, string_view str) {
if (fd < 0) return;
write_int(fd, str.size());
xwrite(fd, str.data(), str.size());
}
const char *get_magisk_tmp() {
static const char *path = nullptr;
if (path == nullptr) {
if (access("/debug_ramdisk/" INTLROOT, F_OK) == 0) {
path = "/debug_ramdisk";
} else if (access("/sbin/" INTLROOT, F_OK) == 0) {
path = "/sbin";
} else {
path = "";
}
}
return path;
}
void unlock_blocks() {
int fd, dev, OFF = 0;
auto dir = xopen_dir("/dev/block");
if (!dir)
return;
dev = dirfd(dir.get());
for (dirent *entry; (entry = readdir(dir.get()));) {
if (entry->d_type == DT_BLK) {
if ((fd = openat(dev, entry->d_name, O_RDONLY | O_CLOEXEC)) < 0)
continue;
if (ioctl(fd, BLKROSET, &OFF) < 0)
PLOGE("unlock %s", entry->d_name);
close(fd);
}
}
}
#define test_bit(bit, array) (array[bit / 8] & (1 << (bit % 8)))
bool check_key_combo() {
uint8_t bitmask[(KEY_MAX + 1) / 8];
vector<owned_fd> events;
constexpr char name[] = "/dev/.ev";
// First collect candidate events that accepts volume down
for (int minor = 64; minor < 96; ++minor) {
if (xmknod(name, S_IFCHR | 0444, makedev(13, minor)))
continue;
int fd = open(name, O_RDONLY | O_CLOEXEC);
unlink(name);
if (fd < 0)
continue;
memset(bitmask, 0, sizeof(bitmask));
ioctl(fd, EVIOCGBIT(EV_KEY, sizeof(bitmask)), bitmask);
if (test_bit(KEY_VOLUMEDOWN, bitmask))
events.emplace_back(fd);
else
close(fd);
}
if (events.empty())
return false;
// Check if volume down key is held continuously for more than 3 seconds
for (int i = 0; i < 300; ++i) {
bool pressed = false;
for (int fd : events) {
memset(bitmask, 0, sizeof(bitmask));
ioctl(fd, EVIOCGKEY(sizeof(bitmask)), bitmask);
if (test_bit(KEY_VOLUMEDOWN, bitmask)) {
pressed = true;
break;
}
}
if (!pressed)
return false;
// Check every 10ms
usleep(10000);
}
LOGD("KEY_VOLUMEDOWN detected: enter safe mode\n");
return true;
}

View file

@ -0,0 +1,396 @@
/* Copyright 2022-2023 John "topjohnwu" Wu
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
* AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
* INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
* LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
* OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
// This is the public API for Zygisk modules.
// DO NOT MODIFY ANY CODE IN THIS HEADER.
// WARNING: this file may contain changes that are not finalized.
// Always use the following published header for development:
// https://github.com/topjohnwu/zygisk-module-sample/blob/master/module/jni/zygisk.hpp
#pragma once
#include <jni.h>
#define ZYGISK_API_VERSION 5
/*
***************
* Introduction
***************
On Android, all app processes are forked from a special daemon called "Zygote".
For each new app process, zygote will fork a new process and perform "specialization".
This specialization operation enforces the Android security sandbox on the newly forked
process to make sure that 3rd party application code is only loaded after it is being
restricted within a sandbox.
On Android, there is also this special process called "system_server". This single
process hosts a significant portion of system services, which controls how the
Android operating system and apps interact with each other.
The Zygisk framework provides a way to allow developers to build modules and run custom
code before and after system_server and any app processes' specialization.
This enable developers to inject code and alter the behavior of system_server and app processes.
Please note that modules will only be loaded after zygote has forked the child process.
THIS MEANS ALL OF YOUR CODE RUNS IN THE APP/SYSTEM_SERVER PROCESS, NOT THE ZYGOTE DAEMON!
*********************
* Development Guide
*********************
Define a class and inherit zygisk::ModuleBase to implement the functionality of your module.
Use the macro REGISTER_ZYGISK_MODULE(className) to register that class to Zygisk.
Example code:
static jint (*orig_logger_entry_max)(JNIEnv *env);
static jint my_logger_entry_max(JNIEnv *env) { return orig_logger_entry_max(env); }
class ExampleModule : public zygisk::ModuleBase {
public:
void onLoad(zygisk::Api *api, JNIEnv *env) override {
this->api = api;
this->env = env;
}
void preAppSpecialize(zygisk::AppSpecializeArgs *args) override {
JNINativeMethod methods[] = {
{ "logger_entry_max_payload_native", "()I", (void*) my_logger_entry_max },
};
api->hookJniNativeMethods(env, "android/util/Log", methods, 1);
*(void **) &orig_logger_entry_max = methods[0].fnPtr;
}
private:
zygisk::Api *api;
JNIEnv *env;
};
REGISTER_ZYGISK_MODULE(ExampleModule)
-----------------------------------------------------------------------------------------
Since your module class's code runs with either Zygote's privilege in pre[XXX]Specialize,
or runs in the sandbox of the target process in post[XXX]Specialize, the code in your class
never runs in a true superuser environment.
If your module require access to superuser permissions, you can create and register
a root companion handler function. This function runs in a separate root companion
daemon process, and an Unix domain socket is provided to allow you to perform IPC between
your target process and the root companion process.
Example code:
static void example_handler(int socket) { ... }
REGISTER_ZYGISK_COMPANION(example_handler)
*/
namespace zygisk {
struct Api;
struct AppSpecializeArgs;
struct ServerSpecializeArgs;
class ModuleBase {
public:
// This method is called as soon as the module is loaded into the target process.
// A Zygisk API handle will be passed as an argument.
virtual void onLoad([[maybe_unused]] Api *api, [[maybe_unused]] JNIEnv *env) {}
// This method is called before the app process is specialized.
// At this point, the process just got forked from zygote, but no app specific specialization
// is applied. This means that the process does not have any sandbox restrictions and
// still runs with the same privilege of zygote.
//
// All the arguments that will be sent and used for app specialization is passed as a single
// AppSpecializeArgs object. You can read and overwrite these arguments to change how the app
// process will be specialized.
//
// If you need to run some operations as superuser, you can call Api::connectCompanion() to
// get a socket to do IPC calls with a root companion process.
// See Api::connectCompanion() for more info.
virtual void preAppSpecialize([[maybe_unused]] AppSpecializeArgs *args) {}
// This method is called after the app process is specialized.
// At this point, the process has all sandbox restrictions enabled for this application.
// This means that this method runs with the same privilege of the app's own code.
virtual void postAppSpecialize([[maybe_unused]] const AppSpecializeArgs *args) {}
// This method is called before the system server process is specialized.
// See preAppSpecialize(args) for more info.
virtual void preServerSpecialize([[maybe_unused]] ServerSpecializeArgs *args) {}
// This method is called after the system server process is specialized.
// At this point, the process runs with the privilege of system_server.
virtual void postServerSpecialize([[maybe_unused]] const ServerSpecializeArgs *args) {}
};
struct AppSpecializeArgs {
// Required arguments. These arguments are guaranteed to exist on all Android versions.
jint &uid;
jint &gid;
jintArray &gids;
jint &runtime_flags;
jobjectArray &rlimits;
jint &mount_external;
jstring &se_info;
jstring &nice_name;
jstring &instruction_set;
jstring &app_data_dir;
// Optional arguments. Please check whether the pointer is null before de-referencing
jintArray *const fds_to_ignore;
jboolean *const is_child_zygote;
jboolean *const is_top_app;
jobjectArray *const pkg_data_info_list;
jobjectArray *const whitelisted_data_info_list;
jboolean *const mount_data_dirs;
jboolean *const mount_storage_dirs;
jboolean *const mount_sysprop_overrides;
AppSpecializeArgs() = delete;
};
struct ServerSpecializeArgs {
jint &uid;
jint &gid;
jintArray &gids;
jint &runtime_flags;
jlong &permitted_capabilities;
jlong &effective_capabilities;
ServerSpecializeArgs() = delete;
};
namespace internal {
struct api_table;
template <class T> void entry_impl(api_table *, JNIEnv *);
}
// These values are used in Api::setOption(Option)
enum Option : int {
// Force Magisk's denylist unmount routines to run on this process.
//
// Setting this option only makes sense in preAppSpecialize.
// The actual unmounting happens during app process specialization.
//
// Set this option to force all Magisk and modules' files to be unmounted from the
// mount namespace of the process, regardless of the denylist enforcement status.
FORCE_DENYLIST_UNMOUNT = 0,
// When this option is set, your module's library will be dlclose-ed after post[XXX]Specialize.
// Be aware that after dlclose-ing your module, all of your code will be unmapped from memory.
// YOU MUST NOT ENABLE THIS OPTION AFTER HOOKING ANY FUNCTIONS IN THE PROCESS.
DLCLOSE_MODULE_LIBRARY = 1,
};
// Bit masks of the return value of Api::getFlags()
enum StateFlag : uint32_t {
// The user has granted root access to the current process
PROCESS_GRANTED_ROOT = (1u << 0),
// The current process was added on the denylist
PROCESS_ON_DENYLIST = (1u << 1),
};
// All API methods will stop working after post[XXX]Specialize as Zygisk will be unloaded
// from the specialized process afterwards.
struct Api {
// Connect to a root companion process and get a Unix domain socket for IPC.
//
// This API only works in the pre[XXX]Specialize methods due to SELinux restrictions.
//
// The pre[XXX]Specialize methods run with the same privilege of zygote.
// If you would like to do some operations with superuser permissions, register a handler
// function that would be called in the root process with REGISTER_ZYGISK_COMPANION(func).
// Another good use case for a companion process is that if you want to share some resources
// across multiple processes, hold the resources in the companion process and pass it over.
//
// The root companion process is ABI aware; that is, when calling this method from a 32-bit
// process, you will be connected to a 32-bit companion process, and vice versa for 64-bit.
//
// Returns a file descriptor to a socket that is connected to the socket passed to your
// module's companion request handler. Returns -1 if the connection attempt failed.
int connectCompanion();
// Get the file descriptor of the root folder of the current module.
//
// This API only works in the pre[XXX]Specialize methods.
// Accessing the directory returned is only possible in the pre[XXX]Specialize methods
// or in the root companion process (assuming that you sent the fd over the socket).
// Both restrictions are due to SELinux and UID.
//
// Returns -1 if errors occurred.
int getModuleDir();
// Set various options for your module.
// Please note that this method accepts one single option at a time.
// Check zygisk::Option for the full list of options available.
void setOption(Option opt);
// Get information about the current process.
// Returns bitwise-or'd zygisk::StateFlag values.
uint32_t getFlags();
// Exempt the provided file descriptor from being automatically closed.
//
// This API only make sense in preAppSpecialize; calling this method in any other situation
// is either a no-op (returns true) or an error (returns false).
//
// When false is returned, the provided file descriptor will eventually be closed by zygote.
bool exemptFd(int fd);
// Hook JNI native methods for a class
//
// Lookup all registered JNI native methods and replace it with your own methods.
// The original function pointer will be saved in each JNINativeMethod's fnPtr.
// If no matching class, method name, or signature is found, that specific JNINativeMethod.fnPtr
// will be set to nullptr.
void hookJniNativeMethods(JNIEnv *env, const char *className, JNINativeMethod *methods, int numMethods);
// Hook functions in the PLT (Procedure Linkage Table) of ELFs loaded in memory.
//
// Parsing /proc/[PID]/maps will give you the memory map of a process. As an example:
//
// <address> <perms> <offset> <dev> <inode> <pathname>
// 56b4346000-56b4347000 r-xp 00002000 fe:00 235 /system/bin/app_process64
// (More details: https://man7.org/linux/man-pages/man5/proc.5.html)
//
// The `dev` and `inode` pair uniquely identifies a file being mapped into memory.
// For matching ELFs loaded in memory, replace function `symbol` with `newFunc`.
// If `oldFunc` is not nullptr, the original function pointer will be saved to `oldFunc`.
void pltHookRegister(dev_t dev, ino_t inode, const char *symbol, void *newFunc, void **oldFunc);
// Commit all the hooks that was previously registered.
// Returns false if an error occurred.
bool pltHookCommit();
private:
internal::api_table *tbl;
template <class T> friend void internal::entry_impl(internal::api_table *, JNIEnv *);
};
// Register a class as a Zygisk module
#define REGISTER_ZYGISK_MODULE(clazz) \
void zygisk_module_entry(zygisk::internal::api_table *table, JNIEnv *env) { \
zygisk::internal::entry_impl<clazz>(table, env); \
}
// Register a root companion request handler function for your module
//
// The function runs in a superuser daemon process and handles a root companion request from
// your module running in a target process. The function has to accept an integer value,
// which is a Unix domain socket that is connected to the target process.
// See Api::connectCompanion() for more info.
//
// NOTE: the function can run concurrently on multiple threads.
// Be aware of race conditions if you have globally shared resources.
#define REGISTER_ZYGISK_COMPANION(func) \
void zygisk_companion_entry(int client) { func(client); }
/*********************************************************
* The following is internal ABI implementation detail.
* You do not have to understand what it is doing.
*********************************************************/
namespace internal {
struct module_abi {
long api_version;
ModuleBase *impl;
void (*preAppSpecialize)(ModuleBase *, AppSpecializeArgs *);
void (*postAppSpecialize)(ModuleBase *, const AppSpecializeArgs *);
void (*preServerSpecialize)(ModuleBase *, ServerSpecializeArgs *);
void (*postServerSpecialize)(ModuleBase *, const ServerSpecializeArgs *);
module_abi(ModuleBase *module) : api_version(ZYGISK_API_VERSION), impl(module) {
preAppSpecialize = [](auto m, auto args) { m->preAppSpecialize(args); };
postAppSpecialize = [](auto m, auto args) { m->postAppSpecialize(args); };
preServerSpecialize = [](auto m, auto args) { m->preServerSpecialize(args); };
postServerSpecialize = [](auto m, auto args) { m->postServerSpecialize(args); };
}
};
struct api_table {
// Base
void *impl;
bool (*registerModule)(api_table *, module_abi *);
void (*hookJniNativeMethods)(JNIEnv *, const char *, JNINativeMethod *, int);
void (*pltHookRegister)(dev_t, ino_t, const char *, void *, void **);
bool (*exemptFd)(int);
bool (*pltHookCommit)();
int (*connectCompanion)(void * /* impl */);
void (*setOption)(void * /* impl */, Option);
int (*getModuleDir)(void * /* impl */);
uint32_t (*getFlags)(void * /* impl */);
};
template <class T>
void entry_impl(api_table *table, JNIEnv *env) {
static Api api;
api.tbl = table;
static T module;
ModuleBase *m = &module;
static module_abi abi(m);
if (!table->registerModule(table, &abi)) return;
m->onLoad(&api, env);
}
} // namespace internal
inline int Api::connectCompanion() {
return tbl->connectCompanion ? tbl->connectCompanion(tbl->impl) : -1;
}
inline int Api::getModuleDir() {
return tbl->getModuleDir ? tbl->getModuleDir(tbl->impl) : -1;
}
inline void Api::setOption(Option opt) {
if (tbl->setOption) tbl->setOption(tbl->impl, opt);
}
inline uint32_t Api::getFlags() {
return tbl->getFlags ? tbl->getFlags(tbl->impl) : 0;
}
inline bool Api::exemptFd(int fd) {
return tbl->exemptFd != nullptr && tbl->exemptFd(fd);
}
inline void Api::hookJniNativeMethods(JNIEnv *env, const char *className, JNINativeMethod *methods, int numMethods) {
if (tbl->hookJniNativeMethods) tbl->hookJniNativeMethods(env, className, methods, numMethods);
}
inline void Api::pltHookRegister(dev_t dev, ino_t inode, const char *symbol, void *newFunc, void **oldFunc) {
if (tbl->pltHookRegister) tbl->pltHookRegister(dev, inode, symbol, newFunc, oldFunc);
}
inline bool Api::pltHookCommit() {
return tbl->pltHookCommit != nullptr && tbl->pltHookCommit();
}
} // namespace zygisk
extern "C" {
[[gnu::visibility("default"), maybe_unused]]
void zygisk_module_entry(zygisk::internal::api_table *, JNIEnv *);
[[gnu::visibility("default"), maybe_unused]]
void zygisk_companion_entry(int);
} // extern "C"

View file

@ -0,0 +1,258 @@
use crate::consts::MODULEROOT;
use crate::daemon::{MagiskD, to_user_id};
use crate::ffi::{ZygiskRequest, ZygiskStateFlags, get_magisk_tmp, update_deny_flags};
use crate::resetprop::{get_prop, set_prop};
use crate::socket::{IpcRead, UnixSocketExt};
use base::libc::STDOUT_FILENO;
use base::{
Directory, FsPathBuilder, LoggedResult, ResultExt, Utf8CStr, WriteExt, cstr, fork_dont_care,
libc, log_err, raw_cstr, warn,
};
use nix::fcntl::OFlag;
use std::fmt::Write;
use std::os::fd::{AsRawFd, RawFd};
use std::os::unix::net::UnixStream;
use std::ptr;
use std::sync::atomic::Ordering;
const NBPROP: &Utf8CStr = cstr!("ro.dalvik.vm.native.bridge");
const ZYGISKLDR: &str = "libzygisk.so";
const UNMOUNT_MASK: u32 =
ZygiskStateFlags::ProcessOnDenyList.repr | ZygiskStateFlags::DenyListEnforced.repr;
pub fn zygisk_should_load_module(flags: u32) -> bool {
flags & UNMOUNT_MASK != UNMOUNT_MASK && flags & ZygiskStateFlags::ProcessIsMagiskApp.repr == 0
}
#[allow(unused_variables)]
fn exec_zygiskd(is_64_bit: bool, remote: UnixStream) {
// This fd has to survive exec
unsafe {
libc::fcntl(remote.as_raw_fd(), libc::F_SETFD, 0);
}
// Start building the exec arguments
#[cfg(target_pointer_width = "64")]
let magisk = if is_64_bit { "magisk" } else { "magisk32" };
#[cfg(target_pointer_width = "32")]
let magisk = "magisk";
let exe = cstr::buf::new::<64>()
.join_path(get_magisk_tmp())
.join_path(magisk);
let mut fd_str = cstr::buf::new::<16>();
write!(fd_str, "{}", remote.as_raw_fd()).ok();
unsafe {
libc::execl(
exe.as_ptr(),
raw_cstr!(""),
raw_cstr!("zygisk"),
raw_cstr!("companion"),
fd_str.as_ptr(),
ptr::null() as *const libc::c_char,
);
libc::exit(-1);
}
}
#[derive(Default)]
pub struct ZygiskState {
pub lib_name: String,
sockets: (Option<UnixStream>, Option<UnixStream>),
start_count: u32 = 1,
}
impl ZygiskState {
fn connect_zygiskd(&mut self, mut client: UnixStream, daemon: &MagiskD) -> LoggedResult<()> {
let is_64_bit: bool = client.read_decodable()?;
let socket = if is_64_bit {
&mut self.sockets.1
} else {
&mut self.sockets.0
};
if let Some(fd) = socket {
// Make sure the socket is still valid
let mut pfd = libc::pollfd {
fd: fd.as_raw_fd(),
events: 0,
revents: 0,
};
if unsafe { libc::poll(&mut pfd, 1, 0) } != 0 || pfd.revents != 0 {
// Any revent means error
*socket = None;
}
}
let socket = if let Some(fd) = socket {
fd
} else {
// Create a new socket pair and fork zygiskd process
let (local, remote) = UnixStream::pair()?;
if fork_dont_care() == 0 {
exec_zygiskd(is_64_bit, remote);
}
*socket = Some(local);
let local = socket.as_mut().unwrap();
if let Some(module_fds) = daemon.get_module_fds(is_64_bit) {
local.send_fds(&module_fds)?;
}
if local.read_decodable::<i32>()? != 0 {
return log_err!();
}
local
};
socket.send_fds(&[client.as_raw_fd()])?;
Ok(())
}
pub fn reset(&mut self, mut restore: bool) {
if restore {
self.start_count = 1;
} else {
self.sockets = (None, None);
self.start_count += 1;
if self.start_count > 3 {
warn!("zygote crashed too many times, rolling-back");
restore = true;
}
}
if restore {
self.restore_prop();
} else {
self.set_prop();
}
}
pub fn set_prop(&mut self) {
if !self.lib_name.is_empty() {
return;
}
let orig = get_prop(NBPROP);
self.lib_name = if orig.is_empty() || orig == "0" {
ZYGISKLDR.to_string()
} else {
ZYGISKLDR.to_string() + &orig
};
set_prop(NBPROP, Utf8CStr::from_string(&mut self.lib_name));
// Whether Huawei's Maple compiler is enabled.
// If so, system server will be created by a special Zygote which ignores the native bridge
// and make system server out of our control. Avoid it by disabling.
if get_prop(cstr!("ro.maple.enable")) == "1" {
set_prop(cstr!("ro.maple.enable"), cstr!("0"));
}
}
pub fn restore_prop(&mut self) {
let mut orig = "0".to_string();
if self.lib_name.len() > ZYGISKLDR.len() {
orig = self.lib_name[ZYGISKLDR.len()..].to_string();
}
set_prop(NBPROP, Utf8CStr::from_string(&mut orig));
self.lib_name.clear();
}
}
impl MagiskD {
pub fn zygisk_handler(&self, mut client: UnixStream) {
let _: LoggedResult<()> = try {
let code = ZygiskRequest {
repr: client.read_decodable()?,
};
match code {
ZygiskRequest::GetInfo => self.get_process_info(client)?,
ZygiskRequest::ConnectCompanion => self
.zygisk
.lock()
.unwrap()
.connect_zygiskd(client, self)
.log_with_msg(|w| w.write_str("zygiskd startup error"))?,
ZygiskRequest::GetModDir => self.get_mod_dir(client)?,
_ => {}
}
};
}
fn get_module_fds(&self, is_64_bit: bool) -> Option<Vec<RawFd>> {
self.module_list.get().map(|module_list| {
module_list
.iter()
.map(|m| if is_64_bit { m.z64 } else { m.z32 })
// All fds passed over sockets have to be valid file descriptors.
// To work around this issue, send over STDOUT_FILENO as an indicator of an
// invalid fd as it will always be /dev/null in magiskd.
.map(|fd| if fd < 0 { STDOUT_FILENO } else { fd })
.collect()
})
}
fn get_process_info(&self, mut client: UnixStream) -> LoggedResult<()> {
let uid: i32 = client.read_decodable()?;
let process: String = client.read_decodable()?;
let is_64_bit: bool = client.read_decodable()?;
let mut flags: u32 = 0;
update_deny_flags(uid, &process, &mut flags);
if self.get_manager_uid(to_user_id(uid)) == uid {
flags |= ZygiskStateFlags::ProcessIsMagiskApp.repr
}
if self.uid_granted_root(uid) {
flags |= ZygiskStateFlags::ProcessGrantedRoot.repr
}
// First send flags
client.write_pod(&flags)?;
// Next send modules
if zygisk_should_load_module(flags)
&& let Some(module_fds) = self.get_module_fds(is_64_bit)
{
client.send_fds(&module_fds)?;
}
// If we're not in system_server, we are done
if uid != 1000 || process != "system_server" {
return Ok(());
}
// Read all failed modules
let failed_ids: Vec<i32> = client.read_decodable()?;
if let Some(module_list) = self.module_list.get() {
for id in failed_ids {
let path = cstr::buf::default()
.join_path(MODULEROOT)
.join_path(&module_list[id as usize].name)
.join_path("zygisk");
// Create the unloaded marker file
if let Ok(dir) = Directory::open(&path) {
dir.open_as_file_at(cstr!("unloaded"), OFlag::O_CREAT | OFlag::O_RDONLY, 0o644)
.log()
.ok();
}
}
}
Ok(())
}
fn get_mod_dir(&self, mut client: UnixStream) -> LoggedResult<()> {
let id: i32 = client.read_decodable()?;
let module = &self.module_list.get().unwrap()[id as usize];
let dir = cstr::buf::default()
.join_path(MODULEROOT)
.join_path(&module.name);
let fd = dir.open(OFlag::O_RDONLY | OFlag::O_CLOEXEC)?;
client.send_fds(&[fd.as_raw_fd()])?;
Ok(())
}
}
// FFI to C++
impl MagiskD {
pub fn zygisk_enabled(&self) -> bool {
self.zygisk_enabled.load(Ordering::Acquire)
}
}

View file

@ -0,0 +1,95 @@
#include <sys/mount.h>
#include <android/dlext.h>
#include <dlfcn.h>
#include <base.hpp>
#include <core.hpp>
#include "zygisk.hpp"
using namespace std;
using comp_entry = void(*)(int);
extern "C" void exec_companion_entry(int, comp_entry);
static void zygiskd(int socket) {
if (getuid() != 0 || fcntl(socket, F_GETFD) < 0)
exit(-1);
#if defined(__LP64__)
set_nice_name("zygiskd64");
LOGI("* Launching zygiskd64\n");
#else
set_nice_name("zygiskd32");
LOGI("* Launching zygiskd32\n");
#endif
// Load modules
vector<comp_entry> modules;
{
auto module_fds = recv_fds(socket);
for (int fd : module_fds) {
comp_entry entry = nullptr;
struct stat s{};
if (fstat(fd, &s) == 0 && S_ISREG(s.st_mode)) {
android_dlextinfo info {
.flags = ANDROID_DLEXT_USE_LIBRARY_FD,
.library_fd = fd,
};
if (void *h = android_dlopen_ext("/jit-cache", RTLD_LAZY, &info)) {
*(void **) &entry = dlsym(h, "zygisk_companion_entry");
} else {
LOGW("Failed to dlopen zygisk module: %s\n", dlerror());
}
}
modules.push_back(entry);
close(fd);
}
}
// ack
write_int(socket, 0);
// Start accepting requests
pollfd pfd = { socket, POLLIN, 0 };
for (;;) {
poll(&pfd, 1, -1);
if (pfd.revents && !(pfd.revents & POLLIN)) {
// Something bad happened in magiskd, terminate zygiskd
exit(0);
}
int client = recv_fd(socket);
if (client < 0) {
// Something bad happened in magiskd, terminate zygiskd
exit(0);
}
int module_id = read_int(client);
if (module_id >= 0 && module_id < modules.size() && modules[module_id]) {
exec_companion_entry(client, modules[module_id]);
} else {
close(client);
}
}
}
// Entrypoint where we need to re-exec ourselves
// This should only ever be called internally
int zygisk_main(int argc, char *argv[]) {
android_logging();
if (argc == 3 && argv[1] == "companion"sv) {
zygiskd(parse_int(argv[2]));
}
return 0;
}
// Entrypoint of code injection
extern "C" [[maybe_unused]] NativeBridgeCallbacks NativeBridgeItf {
.version = 2,
.padding = {},
.isCompatibleWith = [](auto) {
zygisk_logging();
hook_entry();
ZLOGD("load success\n");
return false;
},
};

View file

@ -0,0 +1,238 @@
#!/usr/bin/env python3
primitives = ['jint', 'jboolean', 'jlong']
class JType:
def __init__(self, cpp, jni):
self.cpp = cpp
self.jni = jni
class JArray(JType):
def __init__(self, type):
if type.cpp in primitives:
name = type.cpp + 'Array'
else:
name = 'jobjectArray'
super().__init__(name, '[' + type.jni)
class Argument:
def __init__(self, name, type, set_arg = False):
self.name = name
self.type = type
self.set_arg = set_arg
def cpp(self):
return f'{self.type.cpp} {self.name}'
# Args we don't care, give it an auto generated name
class Anon(Argument):
cnt = 0
def __init__(self, type):
super().__init__(f'_{Anon.cnt}', type)
Anon.cnt += 1
class Return:
def __init__(self, value, type):
self.value = value
self.type = type
class Method:
def __init__(self, name, ret, args):
self.name = name
self.ret = ret
self.args = args
def cpp(self):
return ', '.join(map(lambda x: x.cpp(), self.args))
def name_list(self):
return ', '.join(map(lambda x: x.name, self.args))
def jni(self):
args = ''.join(map(lambda x: x.type.jni, self.args))
return f'({args}){self.ret.type.jni}'
def body(self, name, i):
return ''
class JNIHook(Method):
def __init__(self, ver, ret, args):
name = f'{self.base_name()}_{ver}'
super().__init__(name, ret, args)
def base_name(self):
return ''
def orig_method(self, name, i):
return f'reinterpret_cast<{self.ret.type.cpp}(*)(JNIEnv *env, jclass clazz, {self.cpp()})>(g_hook->{name}_methods[{i}].fnPtr)'
def ind(i):
return '\n' + ' ' * i
# Common types
jint = JType('jint', 'I')
jintArray = JArray(jint)
jstring = JType('jstring', 'Ljava/lang/String;')
jboolean = JType('jboolean', 'Z')
jlong = JType('jlong', 'J')
void = JType('void', 'V')
class ForkAndSpec(JNIHook):
def __init__(self, ver, args):
super().__init__(ver, Return('ctx.pid', jint), args)
def base_name(self):
return 'nativeForkAndSpecialize'
def init_args(self):
return 'AppSpecializeArgs_v5 args(uid, gid, gids, runtime_flags, rlimits, mount_external, se_info, nice_name, instruction_set, app_data_dir);'
def body(self, name, i):
decl = ''
decl += ind(3) + self.init_args()
for a in self.args:
if a.set_arg:
decl += ind(3) + f'args.{a.name} = &{a.name};'
decl += ind(3) + 'ZygiskContext ctx(env, &args);'
decl += ind(3) + f'ctx.{self.base_name()}_pre();'
decl += ind(3) + self.orig_method(name, i) + '('
decl += ind(4) + f'env, clazz, {self.name_list()}'
decl += ind(3) + ');'
decl += ind(3) + f'ctx.{self.base_name()}_post();'
return decl
class SpecApp(ForkAndSpec):
def __init__(self, ver, args):
super().__init__(ver, args)
self.ret = Return('', void)
def base_name(self):
return 'nativeSpecializeAppProcess'
class ForkServer(ForkAndSpec):
def base_name(self):
return 'nativeForkSystemServer'
def init_args(self):
return 'ServerSpecializeArgs_v1 args(uid, gid, gids, runtime_flags, permitted_capabilities, effective_capabilities);'
# Common args
uid = Argument('uid', jint)
gid = Argument('gid', jint)
gids = Argument('gids', jintArray)
runtime_flags = Argument('runtime_flags', jint)
rlimits = Argument('rlimits', JArray(jintArray))
mount_external = Argument('mount_external', jint)
se_info = Argument('se_info', jstring)
nice_name = Argument('nice_name', jstring)
fds_to_close = Argument('fds_to_close', jintArray)
instruction_set = Argument('instruction_set', jstring)
app_data_dir = Argument('app_data_dir', jstring)
# o
fds_to_ignore = Argument('fds_to_ignore', jintArray, True)
# p
is_child_zygote = Argument('is_child_zygote', jboolean, True)
# q_alt
is_top_app = Argument('is_top_app', jboolean, True)
# r
pkg_data_info_list = Argument('pkg_data_info_list', JArray(jstring), True)
whitelisted_data_info_list = Argument('whitelisted_data_info_list', JArray(jstring), True)
mount_data_dirs = Argument('mount_data_dirs', jboolean, True)
mount_storage_dirs = Argument('mount_storage_dirs', jboolean, True)
# u
mount_sysprop_overrides = Argument('mount_sysprop_overrides', jboolean, True)
# server
permitted_capabilities = Argument('permitted_capabilities', jlong)
effective_capabilities = Argument('effective_capabilities', jlong)
# Method definitions
fas_l = ForkAndSpec('l', [uid, gid, gids, runtime_flags, rlimits, mount_external,
se_info, nice_name, fds_to_close, instruction_set, app_data_dir])
fas_o = ForkAndSpec('o', [uid, gid, gids, runtime_flags, rlimits, mount_external,
se_info, nice_name, fds_to_close, fds_to_ignore, instruction_set, app_data_dir])
fas_p = ForkAndSpec('p', [uid, gid, gids, runtime_flags, rlimits, mount_external, se_info,
nice_name, fds_to_close, fds_to_ignore, is_child_zygote, instruction_set, app_data_dir])
fas_q_alt = ForkAndSpec('q_alt', [uid, gid, gids, runtime_flags, rlimits, mount_external, se_info,
nice_name, fds_to_close, fds_to_ignore, is_child_zygote, instruction_set, app_data_dir, is_top_app])
fas_r = ForkAndSpec('r', [uid, gid, gids, runtime_flags, rlimits, mount_external, se_info,
nice_name, fds_to_close, fds_to_ignore, is_child_zygote, instruction_set, app_data_dir, is_top_app,
pkg_data_info_list, whitelisted_data_info_list, mount_data_dirs, mount_storage_dirs])
fas_u = ForkAndSpec('u', [uid, gid, gids, runtime_flags, rlimits, mount_external, se_info,
nice_name, fds_to_close, fds_to_ignore, is_child_zygote, instruction_set, app_data_dir, is_top_app,
pkg_data_info_list, whitelisted_data_info_list, mount_data_dirs, mount_storage_dirs, mount_sysprop_overrides])
fas_samsung_m = ForkAndSpec('samsung_m', [uid, gid, gids, runtime_flags, rlimits, mount_external,
se_info, Anon(jint), Anon(jint), nice_name, fds_to_close, instruction_set, app_data_dir])
fas_samsung_n = ForkAndSpec('samsung_n', [uid, gid, gids, runtime_flags, rlimits, mount_external,
se_info, Anon(jint), Anon(jint), nice_name, fds_to_close, instruction_set, app_data_dir, Anon(jint)])
fas_samsung_o = ForkAndSpec('samsung_o', [uid, gid, gids, runtime_flags, rlimits, mount_external,
se_info, Anon(jint), Anon(jint), nice_name, fds_to_close, fds_to_ignore, instruction_set, app_data_dir])
fas_samsung_p = ForkAndSpec('samsung_p', [uid, gid, gids, runtime_flags, rlimits, mount_external,
se_info, Anon(jint), Anon(jint), nice_name, fds_to_close, fds_to_ignore, is_child_zygote,
instruction_set, app_data_dir])
spec_q = SpecApp('q', [uid, gid, gids, runtime_flags, rlimits, mount_external, se_info,
nice_name, is_child_zygote, instruction_set, app_data_dir])
spec_q_alt = SpecApp('q_alt', [uid, gid, gids, runtime_flags, rlimits, mount_external, se_info,
nice_name, is_child_zygote, instruction_set, app_data_dir, is_top_app])
spec_r = SpecApp('r', [uid, gid, gids, runtime_flags, rlimits, mount_external, se_info, nice_name,
is_child_zygote, instruction_set, app_data_dir, is_top_app, pkg_data_info_list,
whitelisted_data_info_list, mount_data_dirs, mount_storage_dirs])
spec_u = SpecApp('u', [uid, gid, gids, runtime_flags, rlimits, mount_external, se_info, nice_name,
is_child_zygote, instruction_set, app_data_dir, is_top_app, pkg_data_info_list,
whitelisted_data_info_list, mount_data_dirs, mount_storage_dirs, mount_sysprop_overrides])
spec_samsung_q = SpecApp('samsung_q', [uid, gid, gids, runtime_flags, rlimits, mount_external,
se_info, Anon(jint), Anon(jint), nice_name, is_child_zygote, instruction_set, app_data_dir])
server_l = ForkServer('l', [uid, gid, gids, runtime_flags, rlimits,
permitted_capabilities, effective_capabilities])
server_samsung_q = ForkServer('samsung_q', [uid, gid, gids, runtime_flags, Anon(jint), Anon(jint), rlimits,
permitted_capabilities, effective_capabilities])
def gen_jni_def(name, methods):
decl = ''
decl += ind(0) + f'std::array<JNINativeMethod, {len(methods)}> {name}_methods = {{{{'
for i, m in enumerate(methods):
decl += ind(1) + '{'
decl += ind(2) + f'"{m.base_name()}",'
decl += ind(2) + f'"{m.jni()}",'
decl += ind(2) + f'(void *) +[] [[clang::no_stack_protector]] (JNIEnv *env, jclass clazz, {m.cpp()}) static -> {m.ret.type.cpp} {{'
decl += m.body(name, i)
if m.ret.value:
decl += ind(3) + f'return {m.ret.value};'
decl += ind(2) + '}'
decl += ind(1) + '},'
decl += ind(0) + '}};'
decl += ind(0)
return decl
with open('jni_hooks.hpp', 'w') as f:
f.write('// Generated by gen_jni_hooks.py\n')
f.write(gen_jni_def('zygote', [
fas_l, fas_o, fas_p, fas_q_alt, fas_r, fas_u, fas_samsung_m, fas_samsung_n, fas_samsung_o,
fas_samsung_p, spec_q, spec_q_alt, spec_r, spec_u, spec_samsung_q, server_l, server_samsung_q]))
f.write('\n')

View file

@ -0,0 +1,557 @@
#include <sys/mman.h>
#include <sys/mount.h>
#include <sys/resource.h>
#include <dlfcn.h>
#include <unwind.h>
#include <span>
#include <lsplt.hpp>
#include <base.hpp>
#include <consts.hpp>
#include "zygisk.hpp"
#include "module.hpp"
using namespace std;
// *********************
// Zygisk Bootstrapping
// *********************
//
// Zygisk's lifecycle is driven by several PLT function hooks in libandroid_runtime, libart, and
// libnative_bridge. As Zygote is starting up, these carefully selected functions will call into
// the respective lifecycle callbacks in Zygisk to drive the progress forward.
//
// The entire bootstrap process is shown in the graph below.
// Arrows represent control flow, and the blocks are sorted chronologically from top to bottom.
//
// libnative_bridge libandroid_runtime zygisk libart
//
// ┌───────┐
// │ start │
// └───┬─┬─┘
// │ │ ┌────────────────┐
// │ └────────────────────────────────────────►│LoadNativeBridge│
// │ └───────┬────────┘
// ┌────────────────┐ │ │
// │LoadNativeBridge│◄────────────┼───────────────────────────────────────────────────┘
// └───────┬────┬───┘ │
// │ │ │ ┌───────────────┐
// │ └─────────────────┼────────────────────►│NativeBridgeItf│
// │ │ └──────┬────────┘
// │ │ │
// │ │ ▼
// │ │ ┌────────┐
// │ │ │hook_plt│
// ▼ │ └────────┘
// ┌───────┐ │
// │dlclose│ │
// └───┬───┘ │
// │ │
// │ │ ┌───────────────────────┐
// └──────────────────────┼────────────────►│post_native_bridge_load│
// │ └───────────────────────┘
// ▼
// ┌──────────────────────┐
// │ strdup("ZygoteInit") │
// └───────────┬────┬─────┘
// │ │ ┌───────────────┐
// │ └───────────────►│hook_zygote_jni│
// │ └───────────────┘ ┌─────────┐
// │ │ │
// └────────────────────────────────────────────►│ JVM │
// │ │
// └──┬─┬────┘
// ┌───────────────────┐ │ │
// │nativeXXXSpecialize│◄─────────────────────────────────────┘ │
// └─────────────┬─────┘ │
// │ ┌─────────────┐ │
// └────────────────►│ZygiskContext│ │
// └─────────────┘ ▼
// ┌────────────────────┐
// │pthread_attr_destroy│
// └─────────┬──────────┘
// ┌────────────────┐ │
// │restore_plt_hook│◄───────────┘
// └────────────────┘
//
// Some notes regarding the important functions/symbols during bootstrap:
//
// * NativeBridgeItf: this symbol is the entry point for android::LoadNativeBridge
// * HookContext::hook_plt(): hook functions like |dlclose| and |strdup|
// * dlclose: the final step in android::LoadNativeBridge. In this function, we unwind the call
// stack to load the real native bridge if necessary, and fetch NativeBridgeRuntimeCallbacks.
// * strdup: called in AndroidRuntime::start before calling ZygoteInit#main(...)
// * HookContext::hook_zygote_jni(): replace the process specialization functions registered
// with register_jni_procs. This marks the final step of the code injection bootstrap process.
// * pthread_attr_destroy: called whenever the JVM tries to setup threads for itself. We use
// this method to cleanup and unload Zygisk from the process.
constexpr const char *kZygoteInit = "com.android.internal.os.ZygoteInit";
constexpr const char *kZygote = "com/android/internal/os/Zygote";
// Global contexts:
//
// HookContext lives as long as Zygisk is loaded in memory. It tracks the process's function
// hooking state and bootstraps code injection until we replace the process specialization methods.
//
// ZygiskContext lives during the process specialization process. It implements Zygisk
// features, such as loading modules and customizing process fork/specialization.
ZygiskContext *g_ctx;
struct HookContext;
static HookContext *g_hook;
using JNIMethods = std::span<JNINativeMethod>;
struct HookContext {
#include "jni_hooks.hpp"
// std::array<JNINativeMethod> zygote_methods
vector<tuple<dev_t, ino_t, const char *, void **>> plt_backup;
const NativeBridgeRuntimeCallbacks *runtime_callbacks = nullptr;
void *self_handle = nullptr;
bool should_unmap = false;
void hook_plt();
void hook_unloader();
void restore_plt_hook();
void hook_zygote_jni();
void restore_zygote_hook(JNIEnv *env);
void hook_jni_methods(JNIEnv *env, const char *clz, JNIMethods methods);
void post_native_bridge_load(void *handle);
private:
void register_hook(dev_t dev, ino_t inode, const char *symbol, void *new_func, void **old_func);
};
// -----------------------------------------------------------------
#define DCL_HOOK_FUNC(ret, func, ...) \
ret (*old_##func)(__VA_ARGS__); \
ret new_##func(__VA_ARGS__)
DCL_HOOK_FUNC(static char *, strdup, const char * str) {
if (strcmp(kZygoteInit, str) == 0) {
g_hook->hook_zygote_jni();
}
return old_strdup(str);
}
// Skip actual fork and return cached result if applicable
DCL_HOOK_FUNC(int, fork) {
return (g_ctx && g_ctx->pid >= 0) ? g_ctx->pid : old_fork();
}
// Unmount stuffs in the process's private mount namespace
DCL_HOOK_FUNC(static int, unshare, int flags) {
int res = old_unshare(flags);
if (g_ctx && (flags & CLONE_NEWNS) != 0 && res == 0) {
if (g_ctx->flags & DO_REVERT_UNMOUNT) {
revert_unmount();
}
// Restore errno back to 0
errno = 0;
}
return res;
}
// This is the last moment before the secontext of the process changes
DCL_HOOK_FUNC(static int, selinux_android_setcontext,
uid_t uid, bool isSystemServer, const char *seinfo, const char *pkgname) {
// Pre-fetch logd before secontext transition
zygisk_get_logd();
return old_selinux_android_setcontext(uid, isSystemServer, seinfo, pkgname);
}
// Close file descriptors to prevent crashing
DCL_HOOK_FUNC(static void, android_log_close) {
if (g_ctx == nullptr || !(g_ctx->flags & SKIP_CLOSE_LOG_PIPE)) {
// This happens during forks like nativeForkApp, nativeForkUsap,
// nativeForkSystemServer, and nativeForkAndSpecialize.
zygisk_close_logd();
}
old_android_log_close();
}
// It should be safe to assume all dlclose's in libnativebridge are for zygisk_loader
DCL_HOOK_FUNC(static int, dlclose, void *handle) {
if (!g_hook->self_handle) {
ZLOGV("dlclose zygisk_loader\n");
g_hook->post_native_bridge_load(handle);
}
return 0;
}
// We cannot directly call `dlclose` to unload ourselves, otherwise when `dlclose` returns,
// it will return to our code which has been unmapped, causing segmentation fault.
// Instead, we hook `pthread_attr_destroy` which will be called when VM daemon threads start.
DCL_HOOK_FUNC(static int, pthread_attr_destroy, void *target) {
int res = old_pthread_attr_destroy((pthread_attr_t *)target);
// Only perform unloading on the main thread
if (gettid() != getpid())
return res;
ZLOGV("pthread_attr_destroy\n");
if (g_hook->should_unmap) {
g_hook->restore_plt_hook();
if (g_hook->should_unmap) {
ZLOGV("dlclosing self\n");
void *self_handle = g_hook->self_handle;
delete g_hook;
// Because both `pthread_attr_destroy` and `dlclose` have the same function signature,
// we can use `musttail` to let the compiler reuse our stack frame and thus
// `dlclose` will directly return to the caller of `pthread_attr_destroy`.
[[clang::musttail]] return dlclose(self_handle);
}
}
delete g_hook;
return res;
}
#undef DCL_HOOK_FUNC
// -----------------------------------------------------------------
static size_t get_fd_max() {
rlimit r{32768, 32768};
getrlimit(RLIMIT_NOFILE, &r);
return r.rlim_max;
}
ZygiskContext::ZygiskContext(JNIEnv *env, void *args) :
env(env), args{args}, process(nullptr), pid(-1), flags(0), info_flags(0),
allowed_fds(get_fd_max()), hook_info_lock(PTHREAD_MUTEX_INITIALIZER) { g_ctx = this; }
ZygiskContext::~ZygiskContext() {
// This global pointer points to a variable on the stack.
// Set this to nullptr to prevent leaking local variable.
// This also disables most plt hooked functions.
g_ctx = nullptr;
if (!is_child())
return;
zygisk_close_logd();
android_logging();
// Strip out all API function pointers
for (auto &m : modules) {
m.clearApi();
}
// Cleanup
g_hook->should_unmap = true;
g_hook->restore_zygote_hook(env);
g_hook->hook_unloader();
}
// -----------------------------------------------------------------
inline void *unwind_get_region_start(_Unwind_Context *ctx) {
auto fp = _Unwind_GetRegionStart(ctx);
#if defined(__arm__)
// On arm32, we need to check if the pc is in thumb mode,
// if so, we need to set the lowest bit of fp to 1
auto pc = _Unwind_GetGR(ctx, 15); // r15 is pc
if (pc & 1) {
// Thumb mode
fp |= 1;
}
#endif
return reinterpret_cast<void *>(fp);
}
// As we use NativeBridgeRuntimeCallbacks to reload native bridge and to hook jni functions,
// we need to find it by the native bridge's unwind context.
// For abis that use registers to pass arguments, i.e. arm32, arm64, x86_64, the registers are
// caller-saved, and they are not preserved in the unwind context. However, they will be saved
// into the callee-saved registers, so we will search the callee-saved registers for the second
// argument, which is the pointer to NativeBridgeRuntimeCallbacks.
// For x86, whose abi uses stack to pass arguments, we can directly get the pointer to
// NativeBridgeRuntimeCallbacks from the stack.
static const NativeBridgeRuntimeCallbacks* find_runtime_callbacks(struct _Unwind_Context *ctx) {
// Find the writable memory region of libart.so, where the NativeBridgeRuntimeCallbacks is located.
auto [start, end] = []()-> tuple<uintptr_t, uintptr_t> {
for (const auto &map : lsplt::MapInfo::Scan()) {
if (map.path.ends_with("/libart.so") && map.perms == (PROT_WRITE | PROT_READ)) {
ZLOGV("libart.so: start=%p, end=%p\n",
reinterpret_cast<void *>(map.start), reinterpret_cast<void *>(map.end));
return {map.start, map.end};
}
}
return {0, 0};
}();
#if defined(__aarch64__)
// r19-r28 are callee-saved registers
for (int i = 19; i <= 28; ++i) {
auto val = static_cast<uintptr_t>(_Unwind_GetGR(ctx, i));
ZLOGV("r%d = %p\n", i, reinterpret_cast<void *>(val));
if (val >= start && val < end)
return reinterpret_cast<const NativeBridgeRuntimeCallbacks*>(val);
}
#elif defined(__arm__)
// r4-r10 are callee-saved registers
for (int i = 4; i <= 10; ++i) {
auto val = static_cast<uintptr_t>(_Unwind_GetGR(ctx, i));
ZLOGV("r%d = %p\n", i, reinterpret_cast<void *>(val));
if (val >= start && val < end)
return reinterpret_cast<const NativeBridgeRuntimeCallbacks*>(val);
}
#elif defined(__i386__)
// get ebp, which points to the bottom of the stack frame
auto ebp = static_cast<uintptr_t>(_Unwind_GetGR(ctx, 5));
// 1 pointer size above ebp is the old ebp
// 2 pointer sizes above ebp is the return address
// 3 pointer sizes above ebp is the 2nd arg
auto val = *reinterpret_cast<uintptr_t *>(ebp + 3 * sizeof(void *));
ZLOGV("ebp + 3 * ptr_size = %p\n", reinterpret_cast<void *>(val));
if (val >= start && val < end)
return reinterpret_cast<const NativeBridgeRuntimeCallbacks*>(val);
#elif defined(__x86_64__)
// r12-r15 and rbx are callee-saved registers, but the compiler is likely to use them reversely
for (int i : {3, 15, 14, 13, 12}) {
auto val = static_cast<uintptr_t>(_Unwind_GetGR(ctx, i));
ZLOGV("r%d = %p\n", i, reinterpret_cast<void *>(val));
if (val >= start && val < end)
return reinterpret_cast<const NativeBridgeRuntimeCallbacks*>(val);
}
#elif defined(__riscv)
// x8-x9, x18-x27 callee-saved registers
for (int i : {8, 9, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}) {
auto val = static_cast<uintptr_t>(_Unwind_GetGR(ctx, i));
ZLOGV("x%d = %p\n", i, reinterpret_cast<void *>(val));
if (val >= start && val < end)
return reinterpret_cast<const NativeBridgeRuntimeCallbacks*>(val);
}
#else
#error "Unsupported architecture"
#endif
return nullptr;
}
void HookContext::post_native_bridge_load(void *handle) {
self_handle = handle;
using method_sig = const bool (*)(const char *, const NativeBridgeRuntimeCallbacks *);
struct trace_arg {
method_sig load_native_bridge;
const NativeBridgeRuntimeCallbacks *callbacks;
};
trace_arg arg{};
// Unwind to find the address of android::LoadNativeBridge and NativeBridgeRuntimeCallbacks
_Unwind_Backtrace(+[](_Unwind_Context *ctx, void *arg) -> _Unwind_Reason_Code {
void *fp = unwind_get_region_start(ctx);
Dl_info info{};
dladdr(fp, &info);
ZLOGV("backtrace: %p %s\n", fp, info.dli_fname ?: "???");
if (info.dli_fname && std::string_view(info.dli_fname).ends_with("/libnativebridge.so")) {
auto payload = reinterpret_cast<trace_arg *>(arg);
payload->load_native_bridge = reinterpret_cast<method_sig>(fp);
payload->callbacks = find_runtime_callbacks(ctx);
ZLOGV("NativeBridgeRuntimeCallbacks: %p\n", payload->callbacks);
return _URC_END_OF_STACK;
}
return _URC_NO_REASON;
}, &arg);
if (!arg.load_native_bridge || !arg.callbacks)
return;
// Reload the real native bridge if necessary
auto nb = get_prop(NBPROP);
auto len = sizeof(ZYGISKLDR) - 1;
if (nb.size() > len) {
arg.load_native_bridge(nb.c_str() + len, arg.callbacks);
}
runtime_callbacks = arg.callbacks;
}
// -----------------------------------------------------------------
void HookContext::register_hook(
dev_t dev, ino_t inode, const char *symbol, void *new_func, void **old_func) {
if (!lsplt::RegisterHook(dev, inode, symbol, new_func, old_func)) {
ZLOGE("Failed to register plt_hook \"%s\"\n", symbol);
return;
}
plt_backup.emplace_back(dev, inode, symbol, old_func);
}
#define PLT_HOOK_REGISTER_SYM(DEV, INODE, SYM, NAME) \
register_hook(DEV, INODE, SYM, \
reinterpret_cast<void *>(new_##NAME), reinterpret_cast<void **>(&old_##NAME))
#define PLT_HOOK_REGISTER(DEV, INODE, NAME) \
PLT_HOOK_REGISTER_SYM(DEV, INODE, #NAME, NAME)
void HookContext::hook_plt() {
ino_t android_runtime_inode = 0;
dev_t android_runtime_dev = 0;
ino_t native_bridge_inode = 0;
dev_t native_bridge_dev = 0;
for (auto &map : lsplt::MapInfo::Scan()) {
if (map.path.ends_with("/libandroid_runtime.so")) {
android_runtime_inode = map.inode;
android_runtime_dev = map.dev;
} else if (map.path.ends_with("/libnativebridge.so")) {
native_bridge_inode = map.inode;
native_bridge_dev = map.dev;
}
}
PLT_HOOK_REGISTER(native_bridge_dev, native_bridge_inode, dlclose);
PLT_HOOK_REGISTER(android_runtime_dev, android_runtime_inode, fork);
PLT_HOOK_REGISTER(android_runtime_dev, android_runtime_inode, unshare);
PLT_HOOK_REGISTER(android_runtime_dev, android_runtime_inode, selinux_android_setcontext);
PLT_HOOK_REGISTER(android_runtime_dev, android_runtime_inode, strdup);
PLT_HOOK_REGISTER_SYM(android_runtime_dev, android_runtime_inode, "__android_log_close", android_log_close);
if (!lsplt::CommitHook())
ZLOGE("plt_hook failed\n");
// Remove unhooked methods
std::erase_if(plt_backup, [](auto &t) { return *std::get<3>(t) == nullptr; });
}
void HookContext::hook_unloader() {
ino_t art_inode = 0;
dev_t art_dev = 0;
for (auto &map : lsplt::MapInfo::Scan()) {
if (map.path.ends_with("/libart.so")) {
art_inode = map.inode;
art_dev = map.dev;
break;
}
}
PLT_HOOK_REGISTER(art_dev, art_inode, pthread_attr_destroy);
if (!lsplt::CommitHook())
ZLOGE("plt_hook failed\n");
}
void HookContext::restore_plt_hook() {
// Unhook plt_hook
for (const auto &[dev, inode, sym, old_func] : plt_backup) {
if (!lsplt::RegisterHook(dev, inode, sym, *old_func, nullptr)) {
ZLOGE("Failed to register plt_hook [%s]\n", sym);
should_unmap = false;
}
}
if (!lsplt::CommitHook()) {
ZLOGE("Failed to restore plt_hook\n");
should_unmap = false;
}
}
// -----------------------------------------------------------------
void HookContext::hook_jni_methods(JNIEnv *env, const char *clz, JNIMethods methods) {
jclass clazz;
if (!runtime_callbacks || !env || !clz || !(clazz = env->FindClass(clz))) {
for (auto &method : methods) {
method.fnPtr = nullptr;
}
return;
}
// Backup existing methods
auto total = runtime_callbacks->getNativeMethodCount(env, clazz);
auto old_methods = std::make_unique_for_overwrite<JNINativeMethod[]>(total);
runtime_callbacks->getNativeMethods(env, clazz, old_methods.get(), total);
// WARNING: the signature field returned from getNativeMethods is in a non-standard format.
// DO NOT TRY TO USE IT. This is the reason why we try to call RegisterNatives on every single
// provided JNI methods directly to be 100% sure about whether a signature matches or not.
// Replace methods
for (auto &method : methods) {
// It's useful to allow nullptr function pointer for restoring hook
if (!method.fnPtr) continue;
// It's normal that the method is not found
if (env->RegisterNatives(clazz, &method, 1) == JNI_ERR || env->ExceptionCheck() == JNI_TRUE) {
if (auto exception = env->ExceptionOccurred()) {
env->DeleteLocalRef(exception);
}
env->ExceptionClear();
method.fnPtr = nullptr;
}
}
// Fetch the new set of native methods
auto new_methods = std::make_unique_for_overwrite<JNINativeMethod[]>(total);
runtime_callbacks->getNativeMethods(env, clazz, new_methods.get(), total);
// Find the old function pointer and return to caller
for (auto &method : methods) {
if (!method.fnPtr) continue;
for (auto i = 0; i < total; ++i) {
auto &new_method = new_methods[i];
if (new_method.fnPtr == method.fnPtr) {
auto &old_method = old_methods[i];
ZLOGV("replace %s#%s%s %p -> %p\n", clz, method.name, method.signature, old_method.fnPtr, method.fnPtr);
method.fnPtr = old_method.fnPtr;
break;
}
}
}
}
void HookContext::hook_zygote_jni() {
using method_sig = jint(*)(JavaVM **, jsize, jsize *);
auto get_created_vms = reinterpret_cast<method_sig>(
dlsym(RTLD_DEFAULT, "JNI_GetCreatedJavaVMs"));
if (!get_created_vms) {
for (auto &map: lsplt::MapInfo::Scan()) {
if (!map.path.ends_with("/libnativehelper.so")) continue;
void *h = dlopen(map.path.data(), RTLD_LAZY);
if (!h) {
ZLOGW("Cannot dlopen libnativehelper.so: %s\n", dlerror());
break;
}
get_created_vms = reinterpret_cast<method_sig>(dlsym(h, "JNI_GetCreatedJavaVMs"));
dlclose(h);
break;
}
if (!get_created_vms) {
ZLOGW("JNI_GetCreatedJavaVMs not found\n");
return;
}
}
JavaVM *vm = nullptr;
jsize num = 0;
jint res = get_created_vms(&vm, 1, &num);
if (res != JNI_OK || vm == nullptr) {
ZLOGW("JavaVM not found\n");
return;
}
JNIEnv *env = nullptr;
res = vm->GetEnv(reinterpret_cast<void **>(&env), JNI_VERSION_1_6);
if (res != JNI_OK || env == nullptr) {
ZLOGW("JNIEnv not found\n");
}
hook_jni_methods(env, kZygote, zygote_methods);
}
void HookContext::restore_zygote_hook(JNIEnv *env) {
hook_jni_methods(env, kZygote, zygote_methods);
}
// -----------------------------------------------------------------
void hook_entry() {
default_new(g_hook);
g_hook->hook_plt();
}
void hookJniNativeMethods(JNIEnv *env, const char *clz, JNINativeMethod *methods, int numMethods) {
g_hook->hook_jni_methods(env, clz, { methods, (size_t) numMethods });
}

View file

@ -0,0 +1,279 @@
// Generated by gen_jni_hooks.py
std::array<JNINativeMethod, 17> zygote_methods = {{
{
"nativeForkAndSpecialize",
"(II[II[[IILjava/lang/String;Ljava/lang/String;[ILjava/lang/String;Ljava/lang/String;)I",
(void *) +[] [[clang::no_stack_protector]] (JNIEnv *env, jclass clazz, jint uid, jint gid, jintArray gids, jint runtime_flags, jobjectArray rlimits, jint mount_external, jstring se_info, jstring nice_name, jintArray fds_to_close, jstring instruction_set, jstring app_data_dir) static -> jint {
AppSpecializeArgs_v5 args(uid, gid, gids, runtime_flags, rlimits, mount_external, se_info, nice_name, instruction_set, app_data_dir);
ZygiskContext ctx(env, &args);
ctx.nativeForkAndSpecialize_pre();
reinterpret_cast<jint(*)(JNIEnv *env, jclass clazz, jint uid, jint gid, jintArray gids, jint runtime_flags, jobjectArray rlimits, jint mount_external, jstring se_info, jstring nice_name, jintArray fds_to_close, jstring instruction_set, jstring app_data_dir)>(g_hook->zygote_methods[0].fnPtr)(
env, clazz, uid, gid, gids, runtime_flags, rlimits, mount_external, se_info, nice_name, fds_to_close, instruction_set, app_data_dir
);
ctx.nativeForkAndSpecialize_post();
return ctx.pid;
}
},
{
"nativeForkAndSpecialize",
"(II[II[[IILjava/lang/String;Ljava/lang/String;[I[ILjava/lang/String;Ljava/lang/String;)I",
(void *) +[] [[clang::no_stack_protector]] (JNIEnv *env, jclass clazz, jint uid, jint gid, jintArray gids, jint runtime_flags, jobjectArray rlimits, jint mount_external, jstring se_info, jstring nice_name, jintArray fds_to_close, jintArray fds_to_ignore, jstring instruction_set, jstring app_data_dir) static -> jint {
AppSpecializeArgs_v5 args(uid, gid, gids, runtime_flags, rlimits, mount_external, se_info, nice_name, instruction_set, app_data_dir);
args.fds_to_ignore = &fds_to_ignore;
ZygiskContext ctx(env, &args);
ctx.nativeForkAndSpecialize_pre();
reinterpret_cast<jint(*)(JNIEnv *env, jclass clazz, jint uid, jint gid, jintArray gids, jint runtime_flags, jobjectArray rlimits, jint mount_external, jstring se_info, jstring nice_name, jintArray fds_to_close, jintArray fds_to_ignore, jstring instruction_set, jstring app_data_dir)>(g_hook->zygote_methods[1].fnPtr)(
env, clazz, uid, gid, gids, runtime_flags, rlimits, mount_external, se_info, nice_name, fds_to_close, fds_to_ignore, instruction_set, app_data_dir
);
ctx.nativeForkAndSpecialize_post();
return ctx.pid;
}
},
{
"nativeForkAndSpecialize",
"(II[II[[IILjava/lang/String;Ljava/lang/String;[I[IZLjava/lang/String;Ljava/lang/String;)I",
(void *) +[] [[clang::no_stack_protector]] (JNIEnv *env, jclass clazz, jint uid, jint gid, jintArray gids, jint runtime_flags, jobjectArray rlimits, jint mount_external, jstring se_info, jstring nice_name, jintArray fds_to_close, jintArray fds_to_ignore, jboolean is_child_zygote, jstring instruction_set, jstring app_data_dir) static -> jint {
AppSpecializeArgs_v5 args(uid, gid, gids, runtime_flags, rlimits, mount_external, se_info, nice_name, instruction_set, app_data_dir);
args.fds_to_ignore = &fds_to_ignore;
args.is_child_zygote = &is_child_zygote;
ZygiskContext ctx(env, &args);
ctx.nativeForkAndSpecialize_pre();
reinterpret_cast<jint(*)(JNIEnv *env, jclass clazz, jint uid, jint gid, jintArray gids, jint runtime_flags, jobjectArray rlimits, jint mount_external, jstring se_info, jstring nice_name, jintArray fds_to_close, jintArray fds_to_ignore, jboolean is_child_zygote, jstring instruction_set, jstring app_data_dir)>(g_hook->zygote_methods[2].fnPtr)(
env, clazz, uid, gid, gids, runtime_flags, rlimits, mount_external, se_info, nice_name, fds_to_close, fds_to_ignore, is_child_zygote, instruction_set, app_data_dir
);
ctx.nativeForkAndSpecialize_post();
return ctx.pid;
}
},
{
"nativeForkAndSpecialize",
"(II[II[[IILjava/lang/String;Ljava/lang/String;[I[IZLjava/lang/String;Ljava/lang/String;Z)I",
(void *) +[] [[clang::no_stack_protector]] (JNIEnv *env, jclass clazz, jint uid, jint gid, jintArray gids, jint runtime_flags, jobjectArray rlimits, jint mount_external, jstring se_info, jstring nice_name, jintArray fds_to_close, jintArray fds_to_ignore, jboolean is_child_zygote, jstring instruction_set, jstring app_data_dir, jboolean is_top_app) static -> jint {
AppSpecializeArgs_v5 args(uid, gid, gids, runtime_flags, rlimits, mount_external, se_info, nice_name, instruction_set, app_data_dir);
args.fds_to_ignore = &fds_to_ignore;
args.is_child_zygote = &is_child_zygote;
args.is_top_app = &is_top_app;
ZygiskContext ctx(env, &args);
ctx.nativeForkAndSpecialize_pre();
reinterpret_cast<jint(*)(JNIEnv *env, jclass clazz, jint uid, jint gid, jintArray gids, jint runtime_flags, jobjectArray rlimits, jint mount_external, jstring se_info, jstring nice_name, jintArray fds_to_close, jintArray fds_to_ignore, jboolean is_child_zygote, jstring instruction_set, jstring app_data_dir, jboolean is_top_app)>(g_hook->zygote_methods[3].fnPtr)(
env, clazz, uid, gid, gids, runtime_flags, rlimits, mount_external, se_info, nice_name, fds_to_close, fds_to_ignore, is_child_zygote, instruction_set, app_data_dir, is_top_app
);
ctx.nativeForkAndSpecialize_post();
return ctx.pid;
}
},
{
"nativeForkAndSpecialize",
"(II[II[[IILjava/lang/String;Ljava/lang/String;[I[IZLjava/lang/String;Ljava/lang/String;Z[Ljava/lang/String;[Ljava/lang/String;ZZ)I",
(void *) +[] [[clang::no_stack_protector]] (JNIEnv *env, jclass clazz, jint uid, jint gid, jintArray gids, jint runtime_flags, jobjectArray rlimits, jint mount_external, jstring se_info, jstring nice_name, jintArray fds_to_close, jintArray fds_to_ignore, jboolean is_child_zygote, jstring instruction_set, jstring app_data_dir, jboolean is_top_app, jobjectArray pkg_data_info_list, jobjectArray whitelisted_data_info_list, jboolean mount_data_dirs, jboolean mount_storage_dirs) static -> jint {
AppSpecializeArgs_v5 args(uid, gid, gids, runtime_flags, rlimits, mount_external, se_info, nice_name, instruction_set, app_data_dir);
args.fds_to_ignore = &fds_to_ignore;
args.is_child_zygote = &is_child_zygote;
args.is_top_app = &is_top_app;
args.pkg_data_info_list = &pkg_data_info_list;
args.whitelisted_data_info_list = &whitelisted_data_info_list;
args.mount_data_dirs = &mount_data_dirs;
args.mount_storage_dirs = &mount_storage_dirs;
ZygiskContext ctx(env, &args);
ctx.nativeForkAndSpecialize_pre();
reinterpret_cast<jint(*)(JNIEnv *env, jclass clazz, jint uid, jint gid, jintArray gids, jint runtime_flags, jobjectArray rlimits, jint mount_external, jstring se_info, jstring nice_name, jintArray fds_to_close, jintArray fds_to_ignore, jboolean is_child_zygote, jstring instruction_set, jstring app_data_dir, jboolean is_top_app, jobjectArray pkg_data_info_list, jobjectArray whitelisted_data_info_list, jboolean mount_data_dirs, jboolean mount_storage_dirs)>(g_hook->zygote_methods[4].fnPtr)(
env, clazz, uid, gid, gids, runtime_flags, rlimits, mount_external, se_info, nice_name, fds_to_close, fds_to_ignore, is_child_zygote, instruction_set, app_data_dir, is_top_app, pkg_data_info_list, whitelisted_data_info_list, mount_data_dirs, mount_storage_dirs
);
ctx.nativeForkAndSpecialize_post();
return ctx.pid;
}
},
{
"nativeForkAndSpecialize",
"(II[II[[IILjava/lang/String;Ljava/lang/String;[I[IZLjava/lang/String;Ljava/lang/String;Z[Ljava/lang/String;[Ljava/lang/String;ZZZ)I",
(void *) +[] [[clang::no_stack_protector]] (JNIEnv *env, jclass clazz, jint uid, jint gid, jintArray gids, jint runtime_flags, jobjectArray rlimits, jint mount_external, jstring se_info, jstring nice_name, jintArray fds_to_close, jintArray fds_to_ignore, jboolean is_child_zygote, jstring instruction_set, jstring app_data_dir, jboolean is_top_app, jobjectArray pkg_data_info_list, jobjectArray whitelisted_data_info_list, jboolean mount_data_dirs, jboolean mount_storage_dirs, jboolean mount_sysprop_overrides) static -> jint {
AppSpecializeArgs_v5 args(uid, gid, gids, runtime_flags, rlimits, mount_external, se_info, nice_name, instruction_set, app_data_dir);
args.fds_to_ignore = &fds_to_ignore;
args.is_child_zygote = &is_child_zygote;
args.is_top_app = &is_top_app;
args.pkg_data_info_list = &pkg_data_info_list;
args.whitelisted_data_info_list = &whitelisted_data_info_list;
args.mount_data_dirs = &mount_data_dirs;
args.mount_storage_dirs = &mount_storage_dirs;
args.mount_sysprop_overrides = &mount_sysprop_overrides;
ZygiskContext ctx(env, &args);
ctx.nativeForkAndSpecialize_pre();
reinterpret_cast<jint(*)(JNIEnv *env, jclass clazz, jint uid, jint gid, jintArray gids, jint runtime_flags, jobjectArray rlimits, jint mount_external, jstring se_info, jstring nice_name, jintArray fds_to_close, jintArray fds_to_ignore, jboolean is_child_zygote, jstring instruction_set, jstring app_data_dir, jboolean is_top_app, jobjectArray pkg_data_info_list, jobjectArray whitelisted_data_info_list, jboolean mount_data_dirs, jboolean mount_storage_dirs, jboolean mount_sysprop_overrides)>(g_hook->zygote_methods[5].fnPtr)(
env, clazz, uid, gid, gids, runtime_flags, rlimits, mount_external, se_info, nice_name, fds_to_close, fds_to_ignore, is_child_zygote, instruction_set, app_data_dir, is_top_app, pkg_data_info_list, whitelisted_data_info_list, mount_data_dirs, mount_storage_dirs, mount_sysprop_overrides
);
ctx.nativeForkAndSpecialize_post();
return ctx.pid;
}
},
{
"nativeForkAndSpecialize",
"(II[II[[IILjava/lang/String;IILjava/lang/String;[ILjava/lang/String;Ljava/lang/String;)I",
(void *) +[] [[clang::no_stack_protector]] (JNIEnv *env, jclass clazz, jint uid, jint gid, jintArray gids, jint runtime_flags, jobjectArray rlimits, jint mount_external, jstring se_info, jint _0, jint _1, jstring nice_name, jintArray fds_to_close, jstring instruction_set, jstring app_data_dir) static -> jint {
AppSpecializeArgs_v5 args(uid, gid, gids, runtime_flags, rlimits, mount_external, se_info, nice_name, instruction_set, app_data_dir);
ZygiskContext ctx(env, &args);
ctx.nativeForkAndSpecialize_pre();
reinterpret_cast<jint(*)(JNIEnv *env, jclass clazz, jint uid, jint gid, jintArray gids, jint runtime_flags, jobjectArray rlimits, jint mount_external, jstring se_info, jint _0, jint _1, jstring nice_name, jintArray fds_to_close, jstring instruction_set, jstring app_data_dir)>(g_hook->zygote_methods[6].fnPtr)(
env, clazz, uid, gid, gids, runtime_flags, rlimits, mount_external, se_info, _0, _1, nice_name, fds_to_close, instruction_set, app_data_dir
);
ctx.nativeForkAndSpecialize_post();
return ctx.pid;
}
},
{
"nativeForkAndSpecialize",
"(II[II[[IILjava/lang/String;IILjava/lang/String;[ILjava/lang/String;Ljava/lang/String;I)I",
(void *) +[] [[clang::no_stack_protector]] (JNIEnv *env, jclass clazz, jint uid, jint gid, jintArray gids, jint runtime_flags, jobjectArray rlimits, jint mount_external, jstring se_info, jint _2, jint _3, jstring nice_name, jintArray fds_to_close, jstring instruction_set, jstring app_data_dir, jint _4) static -> jint {
AppSpecializeArgs_v5 args(uid, gid, gids, runtime_flags, rlimits, mount_external, se_info, nice_name, instruction_set, app_data_dir);
ZygiskContext ctx(env, &args);
ctx.nativeForkAndSpecialize_pre();
reinterpret_cast<jint(*)(JNIEnv *env, jclass clazz, jint uid, jint gid, jintArray gids, jint runtime_flags, jobjectArray rlimits, jint mount_external, jstring se_info, jint _2, jint _3, jstring nice_name, jintArray fds_to_close, jstring instruction_set, jstring app_data_dir, jint _4)>(g_hook->zygote_methods[7].fnPtr)(
env, clazz, uid, gid, gids, runtime_flags, rlimits, mount_external, se_info, _2, _3, nice_name, fds_to_close, instruction_set, app_data_dir, _4
);
ctx.nativeForkAndSpecialize_post();
return ctx.pid;
}
},
{
"nativeForkAndSpecialize",
"(II[II[[IILjava/lang/String;IILjava/lang/String;[I[ILjava/lang/String;Ljava/lang/String;)I",
(void *) +[] [[clang::no_stack_protector]] (JNIEnv *env, jclass clazz, jint uid, jint gid, jintArray gids, jint runtime_flags, jobjectArray rlimits, jint mount_external, jstring se_info, jint _5, jint _6, jstring nice_name, jintArray fds_to_close, jintArray fds_to_ignore, jstring instruction_set, jstring app_data_dir) static -> jint {
AppSpecializeArgs_v5 args(uid, gid, gids, runtime_flags, rlimits, mount_external, se_info, nice_name, instruction_set, app_data_dir);
args.fds_to_ignore = &fds_to_ignore;
ZygiskContext ctx(env, &args);
ctx.nativeForkAndSpecialize_pre();
reinterpret_cast<jint(*)(JNIEnv *env, jclass clazz, jint uid, jint gid, jintArray gids, jint runtime_flags, jobjectArray rlimits, jint mount_external, jstring se_info, jint _5, jint _6, jstring nice_name, jintArray fds_to_close, jintArray fds_to_ignore, jstring instruction_set, jstring app_data_dir)>(g_hook->zygote_methods[8].fnPtr)(
env, clazz, uid, gid, gids, runtime_flags, rlimits, mount_external, se_info, _5, _6, nice_name, fds_to_close, fds_to_ignore, instruction_set, app_data_dir
);
ctx.nativeForkAndSpecialize_post();
return ctx.pid;
}
},
{
"nativeForkAndSpecialize",
"(II[II[[IILjava/lang/String;IILjava/lang/String;[I[IZLjava/lang/String;Ljava/lang/String;)I",
(void *) +[] [[clang::no_stack_protector]] (JNIEnv *env, jclass clazz, jint uid, jint gid, jintArray gids, jint runtime_flags, jobjectArray rlimits, jint mount_external, jstring se_info, jint _7, jint _8, jstring nice_name, jintArray fds_to_close, jintArray fds_to_ignore, jboolean is_child_zygote, jstring instruction_set, jstring app_data_dir) static -> jint {
AppSpecializeArgs_v5 args(uid, gid, gids, runtime_flags, rlimits, mount_external, se_info, nice_name, instruction_set, app_data_dir);
args.fds_to_ignore = &fds_to_ignore;
args.is_child_zygote = &is_child_zygote;
ZygiskContext ctx(env, &args);
ctx.nativeForkAndSpecialize_pre();
reinterpret_cast<jint(*)(JNIEnv *env, jclass clazz, jint uid, jint gid, jintArray gids, jint runtime_flags, jobjectArray rlimits, jint mount_external, jstring se_info, jint _7, jint _8, jstring nice_name, jintArray fds_to_close, jintArray fds_to_ignore, jboolean is_child_zygote, jstring instruction_set, jstring app_data_dir)>(g_hook->zygote_methods[9].fnPtr)(
env, clazz, uid, gid, gids, runtime_flags, rlimits, mount_external, se_info, _7, _8, nice_name, fds_to_close, fds_to_ignore, is_child_zygote, instruction_set, app_data_dir
);
ctx.nativeForkAndSpecialize_post();
return ctx.pid;
}
},
{
"nativeSpecializeAppProcess",
"(II[II[[IILjava/lang/String;Ljava/lang/String;ZLjava/lang/String;Ljava/lang/String;)V",
(void *) +[] [[clang::no_stack_protector]] (JNIEnv *env, jclass clazz, jint uid, jint gid, jintArray gids, jint runtime_flags, jobjectArray rlimits, jint mount_external, jstring se_info, jstring nice_name, jboolean is_child_zygote, jstring instruction_set, jstring app_data_dir) static -> void {
AppSpecializeArgs_v5 args(uid, gid, gids, runtime_flags, rlimits, mount_external, se_info, nice_name, instruction_set, app_data_dir);
args.is_child_zygote = &is_child_zygote;
ZygiskContext ctx(env, &args);
ctx.nativeSpecializeAppProcess_pre();
reinterpret_cast<void(*)(JNIEnv *env, jclass clazz, jint uid, jint gid, jintArray gids, jint runtime_flags, jobjectArray rlimits, jint mount_external, jstring se_info, jstring nice_name, jboolean is_child_zygote, jstring instruction_set, jstring app_data_dir)>(g_hook->zygote_methods[10].fnPtr)(
env, clazz, uid, gid, gids, runtime_flags, rlimits, mount_external, se_info, nice_name, is_child_zygote, instruction_set, app_data_dir
);
ctx.nativeSpecializeAppProcess_post();
}
},
{
"nativeSpecializeAppProcess",
"(II[II[[IILjava/lang/String;Ljava/lang/String;ZLjava/lang/String;Ljava/lang/String;Z)V",
(void *) +[] [[clang::no_stack_protector]] (JNIEnv *env, jclass clazz, jint uid, jint gid, jintArray gids, jint runtime_flags, jobjectArray rlimits, jint mount_external, jstring se_info, jstring nice_name, jboolean is_child_zygote, jstring instruction_set, jstring app_data_dir, jboolean is_top_app) static -> void {
AppSpecializeArgs_v5 args(uid, gid, gids, runtime_flags, rlimits, mount_external, se_info, nice_name, instruction_set, app_data_dir);
args.is_child_zygote = &is_child_zygote;
args.is_top_app = &is_top_app;
ZygiskContext ctx(env, &args);
ctx.nativeSpecializeAppProcess_pre();
reinterpret_cast<void(*)(JNIEnv *env, jclass clazz, jint uid, jint gid, jintArray gids, jint runtime_flags, jobjectArray rlimits, jint mount_external, jstring se_info, jstring nice_name, jboolean is_child_zygote, jstring instruction_set, jstring app_data_dir, jboolean is_top_app)>(g_hook->zygote_methods[11].fnPtr)(
env, clazz, uid, gid, gids, runtime_flags, rlimits, mount_external, se_info, nice_name, is_child_zygote, instruction_set, app_data_dir, is_top_app
);
ctx.nativeSpecializeAppProcess_post();
}
},
{
"nativeSpecializeAppProcess",
"(II[II[[IILjava/lang/String;Ljava/lang/String;ZLjava/lang/String;Ljava/lang/String;Z[Ljava/lang/String;[Ljava/lang/String;ZZ)V",
(void *) +[] [[clang::no_stack_protector]] (JNIEnv *env, jclass clazz, jint uid, jint gid, jintArray gids, jint runtime_flags, jobjectArray rlimits, jint mount_external, jstring se_info, jstring nice_name, jboolean is_child_zygote, jstring instruction_set, jstring app_data_dir, jboolean is_top_app, jobjectArray pkg_data_info_list, jobjectArray whitelisted_data_info_list, jboolean mount_data_dirs, jboolean mount_storage_dirs) static -> void {
AppSpecializeArgs_v5 args(uid, gid, gids, runtime_flags, rlimits, mount_external, se_info, nice_name, instruction_set, app_data_dir);
args.is_child_zygote = &is_child_zygote;
args.is_top_app = &is_top_app;
args.pkg_data_info_list = &pkg_data_info_list;
args.whitelisted_data_info_list = &whitelisted_data_info_list;
args.mount_data_dirs = &mount_data_dirs;
args.mount_storage_dirs = &mount_storage_dirs;
ZygiskContext ctx(env, &args);
ctx.nativeSpecializeAppProcess_pre();
reinterpret_cast<void(*)(JNIEnv *env, jclass clazz, jint uid, jint gid, jintArray gids, jint runtime_flags, jobjectArray rlimits, jint mount_external, jstring se_info, jstring nice_name, jboolean is_child_zygote, jstring instruction_set, jstring app_data_dir, jboolean is_top_app, jobjectArray pkg_data_info_list, jobjectArray whitelisted_data_info_list, jboolean mount_data_dirs, jboolean mount_storage_dirs)>(g_hook->zygote_methods[12].fnPtr)(
env, clazz, uid, gid, gids, runtime_flags, rlimits, mount_external, se_info, nice_name, is_child_zygote, instruction_set, app_data_dir, is_top_app, pkg_data_info_list, whitelisted_data_info_list, mount_data_dirs, mount_storage_dirs
);
ctx.nativeSpecializeAppProcess_post();
}
},
{
"nativeSpecializeAppProcess",
"(II[II[[IILjava/lang/String;Ljava/lang/String;ZLjava/lang/String;Ljava/lang/String;Z[Ljava/lang/String;[Ljava/lang/String;ZZZ)V",
(void *) +[] [[clang::no_stack_protector]] (JNIEnv *env, jclass clazz, jint uid, jint gid, jintArray gids, jint runtime_flags, jobjectArray rlimits, jint mount_external, jstring se_info, jstring nice_name, jboolean is_child_zygote, jstring instruction_set, jstring app_data_dir, jboolean is_top_app, jobjectArray pkg_data_info_list, jobjectArray whitelisted_data_info_list, jboolean mount_data_dirs, jboolean mount_storage_dirs, jboolean mount_sysprop_overrides) static -> void {
AppSpecializeArgs_v5 args(uid, gid, gids, runtime_flags, rlimits, mount_external, se_info, nice_name, instruction_set, app_data_dir);
args.is_child_zygote = &is_child_zygote;
args.is_top_app = &is_top_app;
args.pkg_data_info_list = &pkg_data_info_list;
args.whitelisted_data_info_list = &whitelisted_data_info_list;
args.mount_data_dirs = &mount_data_dirs;
args.mount_storage_dirs = &mount_storage_dirs;
args.mount_sysprop_overrides = &mount_sysprop_overrides;
ZygiskContext ctx(env, &args);
ctx.nativeSpecializeAppProcess_pre();
reinterpret_cast<void(*)(JNIEnv *env, jclass clazz, jint uid, jint gid, jintArray gids, jint runtime_flags, jobjectArray rlimits, jint mount_external, jstring se_info, jstring nice_name, jboolean is_child_zygote, jstring instruction_set, jstring app_data_dir, jboolean is_top_app, jobjectArray pkg_data_info_list, jobjectArray whitelisted_data_info_list, jboolean mount_data_dirs, jboolean mount_storage_dirs, jboolean mount_sysprop_overrides)>(g_hook->zygote_methods[13].fnPtr)(
env, clazz, uid, gid, gids, runtime_flags, rlimits, mount_external, se_info, nice_name, is_child_zygote, instruction_set, app_data_dir, is_top_app, pkg_data_info_list, whitelisted_data_info_list, mount_data_dirs, mount_storage_dirs, mount_sysprop_overrides
);
ctx.nativeSpecializeAppProcess_post();
}
},
{
"nativeSpecializeAppProcess",
"(II[II[[IILjava/lang/String;IILjava/lang/String;ZLjava/lang/String;Ljava/lang/String;)V",
(void *) +[] [[clang::no_stack_protector]] (JNIEnv *env, jclass clazz, jint uid, jint gid, jintArray gids, jint runtime_flags, jobjectArray rlimits, jint mount_external, jstring se_info, jint _9, jint _10, jstring nice_name, jboolean is_child_zygote, jstring instruction_set, jstring app_data_dir) static -> void {
AppSpecializeArgs_v5 args(uid, gid, gids, runtime_flags, rlimits, mount_external, se_info, nice_name, instruction_set, app_data_dir);
args.is_child_zygote = &is_child_zygote;
ZygiskContext ctx(env, &args);
ctx.nativeSpecializeAppProcess_pre();
reinterpret_cast<void(*)(JNIEnv *env, jclass clazz, jint uid, jint gid, jintArray gids, jint runtime_flags, jobjectArray rlimits, jint mount_external, jstring se_info, jint _9, jint _10, jstring nice_name, jboolean is_child_zygote, jstring instruction_set, jstring app_data_dir)>(g_hook->zygote_methods[14].fnPtr)(
env, clazz, uid, gid, gids, runtime_flags, rlimits, mount_external, se_info, _9, _10, nice_name, is_child_zygote, instruction_set, app_data_dir
);
ctx.nativeSpecializeAppProcess_post();
}
},
{
"nativeForkSystemServer",
"(II[II[[IJJ)I",
(void *) +[] [[clang::no_stack_protector]] (JNIEnv *env, jclass clazz, jint uid, jint gid, jintArray gids, jint runtime_flags, jobjectArray rlimits, jlong permitted_capabilities, jlong effective_capabilities) static -> jint {
ServerSpecializeArgs_v1 args(uid, gid, gids, runtime_flags, permitted_capabilities, effective_capabilities);
ZygiskContext ctx(env, &args);
ctx.nativeForkSystemServer_pre();
reinterpret_cast<jint(*)(JNIEnv *env, jclass clazz, jint uid, jint gid, jintArray gids, jint runtime_flags, jobjectArray rlimits, jlong permitted_capabilities, jlong effective_capabilities)>(g_hook->zygote_methods[15].fnPtr)(
env, clazz, uid, gid, gids, runtime_flags, rlimits, permitted_capabilities, effective_capabilities
);
ctx.nativeForkSystemServer_post();
return ctx.pid;
}
},
{
"nativeForkSystemServer",
"(II[IIII[[IJJ)I",
(void *) +[] [[clang::no_stack_protector]] (JNIEnv *env, jclass clazz, jint uid, jint gid, jintArray gids, jint runtime_flags, jint _11, jint _12, jobjectArray rlimits, jlong permitted_capabilities, jlong effective_capabilities) static -> jint {
ServerSpecializeArgs_v1 args(uid, gid, gids, runtime_flags, permitted_capabilities, effective_capabilities);
ZygiskContext ctx(env, &args);
ctx.nativeForkSystemServer_pre();
reinterpret_cast<jint(*)(JNIEnv *env, jclass clazz, jint uid, jint gid, jintArray gids, jint runtime_flags, jint _11, jint _12, jobjectArray rlimits, jlong permitted_capabilities, jlong effective_capabilities)>(g_hook->zygote_methods[16].fnPtr)(
env, clazz, uid, gid, gids, runtime_flags, _11, _12, rlimits, permitted_capabilities, effective_capabilities
);
ctx.nativeForkSystemServer_post();
return ctx.pid;
}
},
}};

View file

@ -0,0 +1,28 @@
mod daemon;
use crate::thread::ThreadPool;
use base::{fd_get_attr, libc};
pub use daemon::{ZygiskState, zygisk_should_load_module};
use std::os::fd::RawFd;
#[unsafe(no_mangle)]
extern "C" fn exec_companion_entry(client: RawFd, companion_handler: extern "C" fn(RawFd)) {
ThreadPool::exec_task(move || {
let Ok(s1) = fd_get_attr(client) else {
return;
};
companion_handler(client);
// Only close client if it is the same file so we don't
// accidentally close a re-used file descriptor.
// This check is required because the module companion
// handler could've closed the file descriptor already.
if let Ok(s2) = fd_get_attr(client)
&& s1.st.st_dev == s2.st.st_dev
&& s1.st.st_ino == s2.st.st_ino
{
unsafe { libc::close(client) };
}
});
}

View file

@ -0,0 +1,500 @@
#include <sys/mman.h>
#include <android/dlext.h>
#include <dlfcn.h>
#include <lsplt.hpp>
#include <base.hpp>
#include "zygisk.hpp"
#include "module.hpp"
using namespace std;
static int zygisk_request(int req) {
int fd = connect_daemon(RequestCode::ZYGISK);
if (fd < 0) return fd;
write_int(fd, req);
return fd;
}
ZygiskModule::ZygiskModule(int id, void *handle, void *entry)
: id(id), handle(handle), entry{entry}, api{}, mod{nullptr} {
// Make sure all pointers are null
memset(&api, 0, sizeof(api));
api.base.impl = this;
api.base.registerModule = &ZygiskModule::RegisterModuleImpl;
}
bool ZygiskModule::RegisterModuleImpl(ApiTable *api, long *module) {
if (api == nullptr || module == nullptr)
return false;
long api_version = *module;
// Unsupported version
if (api_version > ZYGISK_API_VERSION)
return false;
// Set the actual module_abi*
api->base.impl->mod = { module };
// Fill in API accordingly with module API version
if (api_version >= 1) {
api->v1.hookJniNativeMethods = hookJniNativeMethods;
api->v1.pltHookRegister = [](auto a, auto b, auto c, auto d) {
if (g_ctx) g_ctx->plt_hook_register(a, b, c, d);
};
api->v1.pltHookExclude = [](auto a, auto b) {
if (g_ctx) g_ctx->plt_hook_exclude(a, b);
};
api->v1.pltHookCommit = []() { return g_ctx && g_ctx->plt_hook_commit(); };
api->v1.connectCompanion = [](ZygiskModule *m) { return m->connectCompanion(); };
api->v1.setOption = [](ZygiskModule *m, auto opt) { m->setOption(opt); };
}
if (api_version >= 2) {
api->v2.getModuleDir = [](ZygiskModule *m) { return m->getModuleDir(); };
api->v2.getFlags = [](auto) { return ZygiskModule::getFlags(); };
}
if (api_version >= 4) {
api->v4.pltHookCommit = lsplt::CommitHook;
api->v4.pltHookRegister = [](dev_t dev, ino_t inode, const char *symbol, void *fn, void **backup) {
if (dev == 0 || inode == 0 || symbol == nullptr || fn == nullptr)
return;
lsplt::RegisterHook(dev, inode, symbol, fn, backup);
};
api->v4.exemptFd = [](int fd) { return g_ctx && g_ctx->exempt_fd(fd); };
}
return true;
}
bool ZygiskModule::valid() const {
if (mod.api_version == nullptr)
return false;
switch (*mod.api_version) {
case 5:
case 4:
case 3:
case 2:
case 1:
return mod.v1->impl && mod.v1->preAppSpecialize && mod.v1->postAppSpecialize &&
mod.v1->preServerSpecialize && mod.v1->postServerSpecialize;
default:
return false;
}
}
int ZygiskModule::connectCompanion() const {
if (int fd = zygisk_request(+ZygiskRequest::ConnectCompanion); fd >= 0) {
#ifdef __LP64__
write_any<bool>(fd, true);
#else
write_any<bool>(fd, false);
#endif
write_int(fd, id);
return fd;
}
return -1;
}
int ZygiskModule::getModuleDir() const {
if (owned_fd fd = zygisk_request(+ZygiskRequest::GetModDir); fd >= 0) {
write_int(fd, id);
return recv_fd(fd);
}
return -1;
}
void ZygiskModule::setOption(zygisk::Option opt) {
if (g_ctx == nullptr)
return;
switch (opt) {
case zygisk::FORCE_DENYLIST_UNMOUNT:
g_ctx->flags |= DO_REVERT_UNMOUNT;
break;
case zygisk::DLCLOSE_MODULE_LIBRARY:
unload = true;
break;
}
}
uint32_t ZygiskModule::getFlags() {
return g_ctx ? (g_ctx->info_flags & ~PRIVATE_MASK) : 0;
}
void ZygiskModule::tryUnload() const {
if (unload) dlclose(handle);
}
// -----------------------------------------------------------------
#define call_app(method) \
switch (*mod.api_version) { \
case 1: \
case 2: { \
AppSpecializeArgs_v1 a(args); \
mod.v1->method(mod.v1->impl, &a); \
break; \
} \
case 3: \
case 4: \
case 5: \
mod.v1->method(mod.v1->impl, args);\
break; \
}
void ZygiskModule::preAppSpecialize(AppSpecializeArgs_v5 *args) const {
call_app(preAppSpecialize)
}
void ZygiskModule::postAppSpecialize(const AppSpecializeArgs_v5 *args) const {
call_app(postAppSpecialize)
}
void ZygiskModule::preServerSpecialize(ServerSpecializeArgs_v1 *args) const {
mod.v1->preServerSpecialize(mod.v1->impl, args);
}
void ZygiskModule::postServerSpecialize(const ServerSpecializeArgs_v1 *args) const {
mod.v1->postServerSpecialize(mod.v1->impl, args);
}
// -----------------------------------------------------------------
void ZygiskContext::plt_hook_register(const char *regex, const char *symbol, void *fn, void **backup) {
if (regex == nullptr || symbol == nullptr || fn == nullptr)
return;
regex_t re;
if (regcomp(&re, regex, REG_NOSUB) != 0)
return;
mutex_guard lock(hook_info_lock);
register_info.emplace_back(RegisterInfo{re, symbol, fn, backup});
}
void ZygiskContext::plt_hook_exclude(const char *regex, const char *symbol) {
if (!regex) return;
regex_t re;
if (regcomp(&re, regex, REG_NOSUB) != 0)
return;
mutex_guard lock(hook_info_lock);
ignore_info.emplace_back(IgnoreInfo{re, symbol ?: ""});
}
void ZygiskContext::plt_hook_process_regex() {
if (register_info.empty())
return;
for (auto &map : lsplt::MapInfo::Scan()) {
if (map.offset != 0 || !map.is_private || !(map.perms & PROT_READ)) continue;
for (auto &reg: register_info) {
if (regexec(&reg.regex, map.path.data(), 0, nullptr, 0) != 0)
continue;
bool ignored = false;
for (auto &ign: ignore_info) {
if (regexec(&ign.regex, map.path.data(), 0, nullptr, 0) != 0)
continue;
if (ign.symbol.empty() || ign.symbol == reg.symbol) {
ignored = true;
break;
}
}
if (!ignored) {
lsplt::RegisterHook(map.dev, map.inode, reg.symbol, reg.callback, reg.backup);
}
}
}
}
bool ZygiskContext::plt_hook_commit() {
{
mutex_guard lock(hook_info_lock);
plt_hook_process_regex();
for (auto& reg: register_info) {
regfree(&reg.regex);
}
for (auto& ign: ignore_info) {
regfree(&ign.regex);
}
register_info.clear();
ignore_info.clear();
}
return lsplt::CommitHook();
}
// -----------------------------------------------------------------
int ZygiskContext::get_module_info(int uid, rust::Vec<int> &fds) {
if (int fd = zygisk_request(+ZygiskRequest::GetInfo); fd >= 0) {
write_int(fd, uid);
write_string(fd, process);
#ifdef __LP64__
write_any<bool>(fd, true);
#else
write_any<bool>(fd, false);
#endif
xxread(fd, &info_flags, sizeof(info_flags));
if (zygisk_should_load_module(info_flags)) {
fds = recv_fds(fd);
}
return fd;
}
return -1;
}
void ZygiskContext::sanitize_fds() {
zygisk_close_logd();
if (!is_child()) {
return;
}
if (can_exempt_fd() && !exempted_fds.empty()) {
auto update_fd_array = [&](int old_len) -> jintArray {
jintArray array = env->NewIntArray(static_cast<int>(old_len + exempted_fds.size()));
if (array == nullptr)
return nullptr;
env->SetIntArrayRegion(
array, old_len, static_cast<int>(exempted_fds.size()), exempted_fds.data());
for (int fd : exempted_fds) {
if (fd >= 0 && fd < allowed_fds.size()) {
allowed_fds[fd] = true;
}
}
*args.app->fds_to_ignore = array;
return array;
};
if (jintArray fdsToIgnore = *args.app->fds_to_ignore) {
int *arr = env->GetIntArrayElements(fdsToIgnore, nullptr);
int len = env->GetArrayLength(fdsToIgnore);
for (int i = 0; i < len; ++i) {
int fd = arr[i];
if (fd >= 0 && fd < allowed_fds.size()) {
allowed_fds[fd] = true;
}
}
if (jintArray newFdList = update_fd_array(len)) {
env->SetIntArrayRegion(newFdList, 0, len, arr);
}
env->ReleaseIntArrayElements(fdsToIgnore, arr, JNI_ABORT);
} else {
update_fd_array(0);
}
}
// Close all forbidden fds to prevent crashing
auto dir = xopen_dir("/proc/self/fd");
int dfd = dirfd(dir.get());
for (dirent *entry; (entry = xreaddir(dir.get()));) {
int fd = parse_int(entry->d_name);
if ((fd < 0 || fd >= allowed_fds.size() || !allowed_fds[fd]) && fd != dfd) {
close(fd);
}
}
}
bool ZygiskContext::exempt_fd(int fd) {
if ((flags & POST_SPECIALIZE) || (flags & SKIP_CLOSE_LOG_PIPE))
return true;
if (!can_exempt_fd())
return false;
exempted_fds.push_back(fd);
return true;
}
bool ZygiskContext::can_exempt_fd() const {
return (flags & APP_FORK_AND_SPECIALIZE) && args.app->fds_to_ignore;
}
static int sigmask(int how, int signum) {
sigset_t set;
sigemptyset(&set);
sigaddset(&set, signum);
return sigprocmask(how, &set, nullptr);
}
void ZygiskContext::fork_pre() {
// Do our own fork before loading any 3rd party code
// First block SIGCHLD, unblock after original fork is done
sigmask(SIG_BLOCK, SIGCHLD);
pid = old_fork();
if (!is_child())
return;
// Record all open fds
auto dir = xopen_dir("/proc/self/fd");
for (dirent *entry; (entry = xreaddir(dir.get()));) {
int fd = parse_int(entry->d_name);
if (fd < 0 || fd >= allowed_fds.size()) {
close(fd);
continue;
}
allowed_fds[fd] = true;
}
// The dirfd will be closed once out of scope
allowed_fds[dirfd(dir.get())] = false;
// logd_fd should be handled separately
if (int fd = zygisk_get_logd(); fd >= 0) {
allowed_fds[fd] = false;
}
}
void ZygiskContext::fork_post() {
// Unblock SIGCHLD in case the original method didn't
sigmask(SIG_UNBLOCK, SIGCHLD);
}
void ZygiskContext::run_modules_pre(rust::Vec<int> &fds) {
for (int i = 0; i < fds.size(); ++i) {
owned_fd fd = fds[i];
struct stat s{};
if (fstat(fd, &s) != 0 || !S_ISREG(s.st_mode)) {
fds[i] = -1;
continue;
}
android_dlextinfo info {
.flags = ANDROID_DLEXT_USE_LIBRARY_FD,
.library_fd = fd,
};
if (void *h = android_dlopen_ext("/jit-cache", RTLD_LAZY, &info)) {
if (void *e = dlsym(h, "zygisk_module_entry")) {
modules.emplace_back(i, h, e);
}
} else if (flags & SERVER_FORK_AND_SPECIALIZE) {
ZLOGW("Failed to dlopen zygisk module: %s\n", dlerror());
fds[i] = -1;
}
}
for (auto it = modules.begin(); it != modules.end();) {
it->onLoad(env);
if (it->valid()) {
++it;
} else {
it = modules.erase(it);
}
}
for (auto &m : modules) {
if (flags & APP_SPECIALIZE) {
m.preAppSpecialize(args.app);
} else if (flags & SERVER_FORK_AND_SPECIALIZE) {
m.preServerSpecialize(args.server);
}
}
}
void ZygiskContext::run_modules_post() {
flags |= POST_SPECIALIZE;
for (const auto &m : modules) {
if (flags & APP_SPECIALIZE) {
m.postAppSpecialize(args.app);
} else if (flags & SERVER_FORK_AND_SPECIALIZE) {
m.postServerSpecialize(args.server);
}
m.tryUnload();
}
}
void ZygiskContext::app_specialize_pre() {
flags |= APP_SPECIALIZE;
rust::Vec<int> module_fds;
owned_fd fd = get_module_info(args.app->uid, module_fds);
if ((info_flags & UNMOUNT_MASK) == UNMOUNT_MASK) {
ZLOGI("[%s] is on the denylist\n", process);
flags |= DO_REVERT_UNMOUNT;
} else if (fd >= 0) {
run_modules_pre(module_fds);
}
}
void ZygiskContext::app_specialize_post() {
run_modules_post();
if (info_flags & +ZygiskStateFlags::ProcessIsMagiskApp) {
setenv("ZYGISK_ENABLED", "1", 1);
}
// Cleanups
env->ReleaseStringUTFChars(args.app->nice_name, process);
}
void ZygiskContext::server_specialize_pre() {
rust::Vec<int> module_fds;
if (owned_fd fd = get_module_info(1000, module_fds); fd >= 0) {
if (module_fds.empty()) {
write_int(fd, 0);
} else {
run_modules_pre(module_fds);
// Find all failed module ids and send it back to magiskd
vector<int> failed_ids;
for (int i = 0; i < module_fds.size(); ++i) {
if (module_fds[i] < 0) {
failed_ids.push_back(i);
}
}
write_vector(fd, failed_ids);
}
}
}
void ZygiskContext::server_specialize_post() {
run_modules_post();
}
// -----------------------------------------------------------------
void ZygiskContext::nativeSpecializeAppProcess_pre() {
process = env->GetStringUTFChars(args.app->nice_name, nullptr);
ZLOGV("pre specialize [%s]\n", process);
// App specialize does not check FD
flags |= SKIP_CLOSE_LOG_PIPE;
app_specialize_pre();
}
void ZygiskContext::nativeSpecializeAppProcess_post() {
ZLOGV("post specialize [%s]\n", process);
app_specialize_post();
}
void ZygiskContext::nativeForkSystemServer_pre() {
ZLOGV("pre forkSystemServer\n");
flags |= SERVER_FORK_AND_SPECIALIZE;
process = "system_server";
fork_pre();
if (is_child()) {
server_specialize_pre();
}
sanitize_fds();
}
void ZygiskContext::nativeForkSystemServer_post() {
if (is_child()) {
ZLOGV("post forkSystemServer\n");
server_specialize_post();
}
fork_post();
}
void ZygiskContext::nativeForkAndSpecialize_pre() {
process = env->GetStringUTFChars(args.app->nice_name, nullptr);
ZLOGV("pre forkAndSpecialize [%s]\n", process);
flags |= APP_FORK_AND_SPECIALIZE;
fork_pre();
if (is_child()) {
app_specialize_pre();
}
sanitize_fds();
}
void ZygiskContext::nativeForkAndSpecialize_post() {
if (is_child()) {
ZLOGV("post forkAndSpecialize [%s]\n", process);
app_specialize_post();
}
fork_post();
}

View file

@ -0,0 +1,286 @@
#pragma once
#include <regex.h>
#include <list>
#include "api.hpp"
struct ZygiskContext;
struct ZygiskModule;
struct AppSpecializeArgs_v1;
using AppSpecializeArgs_v2 = AppSpecializeArgs_v1;
struct AppSpecializeArgs_v3;
using AppSpecializeArgs_v4 = AppSpecializeArgs_v3;
struct AppSpecializeArgs_v5;
struct module_abi_v1;
using module_abi_v2 = module_abi_v1;
using module_abi_v3 = module_abi_v1;
using module_abi_v4 = module_abi_v1;
using module_abi_v5 = module_abi_v1;
struct api_abi_v1;
struct api_abi_v2;
using api_abi_v3 = api_abi_v2;
struct api_abi_v4;
using api_abi_v5 = api_abi_v4;
union ApiTable;
struct AppSpecializeArgs_v3 {
jint &uid;
jint &gid;
jintArray &gids;
jint &runtime_flags;
jobjectArray &rlimits;
jint &mount_external;
jstring &se_info;
jstring &nice_name;
jstring &instruction_set;
jstring &app_data_dir;
jintArray *fds_to_ignore = nullptr;
jboolean *is_child_zygote = nullptr;
jboolean *is_top_app = nullptr;
jobjectArray *pkg_data_info_list = nullptr;
jobjectArray *whitelisted_data_info_list = nullptr;
jboolean *mount_data_dirs = nullptr;
jboolean *mount_storage_dirs = nullptr;
AppSpecializeArgs_v3(
jint &uid, jint &gid, jintArray &gids, jint &runtime_flags,
jobjectArray &rlimits, jint &mount_external, jstring &se_info, jstring &nice_name,
jstring &instruction_set, jstring &app_data_dir) :
uid(uid), gid(gid), gids(gids), runtime_flags(runtime_flags), rlimits(rlimits),
mount_external(mount_external), se_info(se_info), nice_name(nice_name),
instruction_set(instruction_set), app_data_dir(app_data_dir) {}
};
struct AppSpecializeArgs_v5 : public AppSpecializeArgs_v3 {
jboolean *mount_sysprop_overrides = nullptr;
AppSpecializeArgs_v5(
jint &uid, jint &gid, jintArray &gids, jint &runtime_flags,
jobjectArray &rlimits, jint &mount_external, jstring &se_info, jstring &nice_name,
jstring &instruction_set, jstring &app_data_dir) : AppSpecializeArgs_v3(
uid, gid, gids, runtime_flags, rlimits, mount_external,
se_info, nice_name, instruction_set, app_data_dir) {}
};
struct AppSpecializeArgs_v1 {
jint &uid;
jint &gid;
jintArray &gids;
jint &runtime_flags;
jint &mount_external;
jstring &se_info;
jstring &nice_name;
jstring &instruction_set;
jstring &app_data_dir;
jboolean *const is_child_zygote;
jboolean *const is_top_app;
jobjectArray *const pkg_data_info_list;
jobjectArray *const whitelisted_data_info_list;
jboolean *const mount_data_dirs;
jboolean *const mount_storage_dirs;
AppSpecializeArgs_v1(const AppSpecializeArgs_v5 *a) :
uid(a->uid), gid(a->gid), gids(a->gids), runtime_flags(a->runtime_flags),
mount_external(a->mount_external), se_info(a->se_info), nice_name(a->nice_name),
instruction_set(a->instruction_set), app_data_dir(a->app_data_dir),
is_child_zygote(a->is_child_zygote), is_top_app(a->is_top_app),
pkg_data_info_list(a->pkg_data_info_list),
whitelisted_data_info_list(a->whitelisted_data_info_list),
mount_data_dirs(a->mount_data_dirs), mount_storage_dirs(a->mount_storage_dirs) {}
};
struct ServerSpecializeArgs_v1 {
jint &uid;
jint &gid;
jintArray &gids;
jint &runtime_flags;
jlong &permitted_capabilities;
jlong &effective_capabilities;
ServerSpecializeArgs_v1(
jint &uid, jint &gid, jintArray &gids, jint &runtime_flags,
jlong &permitted_capabilities, jlong &effective_capabilities) :
uid(uid), gid(gid), gids(gids), runtime_flags(runtime_flags),
permitted_capabilities(permitted_capabilities),
effective_capabilities(effective_capabilities) {}
};
struct module_abi_v1 {
long api_version;
void *impl;
void (*preAppSpecialize)(void *, void *);
void (*postAppSpecialize)(void *, const void *);
void (*preServerSpecialize)(void *, void *);
void (*postServerSpecialize)(void *, const void *);
};
// Assert the flag values to be the same as the public API
static_assert(+ZygiskStateFlags::ProcessGrantedRoot == zygisk::StateFlag::PROCESS_GRANTED_ROOT);
static_assert(+ZygiskStateFlags::ProcessOnDenyList == zygisk::StateFlag::PROCESS_ON_DENYLIST);
enum : uint32_t {
UNMOUNT_MASK = (+ZygiskStateFlags::ProcessOnDenyList | +ZygiskStateFlags::DenyListEnforced),
PRIVATE_MASK = (+ZygiskStateFlags::DenyListEnforced | +ZygiskStateFlags::ProcessIsMagiskApp)
};
struct api_abi_base {
ZygiskModule *impl;
bool (*registerModule)(ApiTable *, long *);
};
struct api_abi_v1 : public api_abi_base {
/* 0 */ void (*hookJniNativeMethods)(JNIEnv *, const char *, JNINativeMethod *, int);
/* 1 */ void (*pltHookRegister)(const char *, const char *, void *, void **);
/* 2 */ void (*pltHookExclude)(const char *, const char *);
/* 3 */ bool (*pltHookCommit)();
/* 4 */ int (*connectCompanion)(ZygiskModule *);
/* 5 */ void (*setOption)(ZygiskModule *, zygisk::Option);
};
struct api_abi_v2 : public api_abi_v1 {
/* 6 */ int (*getModuleDir)(ZygiskModule *);
/* 7 */ uint32_t (*getFlags)(ZygiskModule *);
};
struct api_abi_v4 : public api_abi_base {
/* 0 */ void (*hookJniNativeMethods)(JNIEnv *, const char *, JNINativeMethod *, int);
/* 1 */ void (*pltHookRegister)(dev_t, ino_t, const char *, void *, void **);
/* 2 */ bool (*exemptFd)(int);
/* 3 */ bool (*pltHookCommit)();
/* 4 */ int (*connectCompanion)(ZygiskModule *);
/* 5 */ void (*setOption)(ZygiskModule *, zygisk::Option);
/* 6 */ int (*getModuleDir)(ZygiskModule *);
/* 7 */ uint32_t (*getFlags)(ZygiskModule *);
};
union ApiTable {
api_abi_base base;
api_abi_v1 v1;
api_abi_v2 v2;
api_abi_v4 v4;
};
struct ZygiskModule {
void onLoad(void *env) {
entry.fn(&api, env);
}
void preAppSpecialize(AppSpecializeArgs_v5 *args) const;
void postAppSpecialize(const AppSpecializeArgs_v5 *args) const;
void preServerSpecialize(ServerSpecializeArgs_v1 *args) const;
void postServerSpecialize(const ServerSpecializeArgs_v1 *args) const;
bool valid() const;
int connectCompanion() const;
int getModuleDir() const;
void setOption(zygisk::Option opt);
static uint32_t getFlags();
void tryUnload() const;
void clearApi() { memset(&api, 0, sizeof(api)); }
ZygiskModule(int id, void *handle, void *entry);
static bool RegisterModuleImpl(ApiTable *api, long *module);
private:
const int id;
bool unload = false;
void * const handle;
union {
void * const ptr;
void (* const fn)(void *, void *);
} entry;
ApiTable api;
union {
long *api_version;
module_abi_v1 *v1;
} mod;
};
extern ZygiskContext *g_ctx;
extern int (*old_fork)(void);
enum : uint32_t {
POST_SPECIALIZE = (1u << 0),
APP_FORK_AND_SPECIALIZE = (1u << 1),
APP_SPECIALIZE = (1u << 2),
SERVER_FORK_AND_SPECIALIZE = (1u << 3),
DO_REVERT_UNMOUNT = (1u << 4),
SKIP_CLOSE_LOG_PIPE = (1u << 5),
};
#define DCL_PRE_POST(name) \
void name##_pre(); \
void name##_post();
struct ZygiskContext {
JNIEnv *env;
union {
void *ptr;
AppSpecializeArgs_v5 *app;
ServerSpecializeArgs_v1 *server;
} args;
const char *process;
std::list<ZygiskModule> modules;
int pid;
uint32_t flags;
uint32_t info_flags;
std::vector<bool> allowed_fds;
std::vector<int> exempted_fds;
struct RegisterInfo {
regex_t regex;
std::string symbol;
void *callback;
void **backup;
};
struct IgnoreInfo {
regex_t regex;
std::string symbol;
};
pthread_mutex_t hook_info_lock;
std::vector<RegisterInfo> register_info;
std::vector<IgnoreInfo> ignore_info;
ZygiskContext(JNIEnv *env, void *args);
~ZygiskContext();
void run_modules_pre(rust::Vec<int> &fds);
void run_modules_post();
DCL_PRE_POST(fork)
DCL_PRE_POST(app_specialize)
DCL_PRE_POST(server_specialize)
DCL_PRE_POST(nativeForkAndSpecialize)
DCL_PRE_POST(nativeSpecializeAppProcess)
DCL_PRE_POST(nativeForkSystemServer)
int get_module_info(int uid, rust::Vec<int> &fds);
void sanitize_fds();
bool exempt_fd(int fd);
bool can_exempt_fd() const;
bool is_child() const { return pid <= 0; }
// Compatibility shim
void plt_hook_register(const char *regex, const char *symbol, void *fn, void **backup);
void plt_hook_exclude(const char *regex, const char *symbol);
void plt_hook_process_regex();
bool plt_hook_commit();
};
#undef DCL_PRE_POST

View file

@ -0,0 +1,42 @@
#pragma once
#include <jni.h>
#include <core.hpp>
#define ZYGISKLDR "libzygisk.so"
#define NBPROP "ro.dalvik.vm.native.bridge"
#if defined(__LP64__)
#define ZLOGD(...) LOGD("zygisk64: " __VA_ARGS__)
#define ZLOGE(...) LOGE("zygisk64: " __VA_ARGS__)
#define ZLOGI(...) LOGI("zygisk64: " __VA_ARGS__)
#define ZLOGW(...) LOGW("zygisk64: " __VA_ARGS__)
#else
#define ZLOGD(...) LOGD("zygisk32: " __VA_ARGS__)
#define ZLOGE(...) LOGE("zygisk32: " __VA_ARGS__)
#define ZLOGI(...) LOGI("zygisk32: " __VA_ARGS__)
#define ZLOGW(...) LOGW("zygisk32: " __VA_ARGS__)
#endif
// Extreme verbose logging
//#define ZLOGV(...) ZLOGD(__VA_ARGS__)
#define ZLOGV(...) (void*)0
void hook_entry();
void hookJniNativeMethods(JNIEnv *env, const char *clz, JNINativeMethod *methods, int numMethods);
// The reference of the following structs
// https://cs.android.com/android/platform/superproject/main/+/main:art/libnativebridge/include/nativebridge/native_bridge.h
struct NativeBridgeRuntimeCallbacks {
const char* (*getMethodShorty)(JNIEnv* env, jmethodID mid);
uint32_t (*getNativeMethodCount)(JNIEnv* env, jclass clazz);
uint32_t (*getNativeMethods)(JNIEnv* env, jclass clazz, JNINativeMethod* methods,
uint32_t method_count);
};
struct NativeBridgeCallbacks {
uint32_t version;
void *padding[5];
bool (*isCompatibleWith)(uint32_t);
};

View file

@ -0,0 +1,3 @@
{
NativeBridgeItf;
};

128
native/src/external/Android.mk vendored Normal file
View file

@ -0,0 +1,128 @@
LOCAL_PATH := $(call my-dir)
# libxz.a
include $(CLEAR_VARS)
LOCAL_MODULE:= libxz
LOCAL_C_INCLUDES := $(LOCAL_PATH)/xz-embedded
LOCAL_EXPORT_C_INCLUDES := $(LOCAL_C_INCLUDES)
LOCAL_SRC_FILES := \
xz-embedded/xz_crc32.c \
xz-embedded/xz_dec_lzma2.c \
xz-embedded/xz_dec_stream.c
include $(BUILD_STATIC_LIBRARY)
# liblz4.a
include $(CLEAR_VARS)
LOCAL_MODULE := liblz4
LOCAL_C_INCLUDES := $(LOCAL_PATH)/lz4/lib
LOCAL_EXPORT_C_INCLUDES := $(LOCAL_C_INCLUDES)
LOCAL_SRC_FILES := \
lz4/lib/lz4.c \
lz4/lib/lz4frame.c \
lz4/lib/lz4hc.c \
lz4/lib/xxhash.c
include $(BUILD_STATIC_LIBRARY)
SE_PATH := $(LOCAL_PATH)/selinux
# libsepol.a
include $(CLEAR_VARS)
LIBSEPOL := $(SE_PATH)/libsepol/include $(SE_PATH)/libsepol/cil/include
LOCAL_MODULE := libsepol
LOCAL_C_INCLUDES := $(LIBSEPOL) $(LOCAL_PATH)/selinux/libsepol/src
LOCAL_EXPORT_C_INCLUDES := $(LIBSEPOL)
LOCAL_SRC_FILES := \
selinux/libsepol/src/assertion.c \
selinux/libsepol/src/avrule_block.c \
selinux/libsepol/src/avtab.c \
selinux/libsepol/src/boolean_record.c \
selinux/libsepol/src/booleans.c \
selinux/libsepol/src/conditional.c \
selinux/libsepol/src/constraint.c \
selinux/libsepol/src/context.c \
selinux/libsepol/src/context_record.c \
selinux/libsepol/src/debug.c \
selinux/libsepol/src/ebitmap.c \
selinux/libsepol/src/expand.c \
selinux/libsepol/src/handle.c \
selinux/libsepol/src/hashtab.c \
selinux/libsepol/src/hierarchy.c \
selinux/libsepol/src/ibendport_record.c \
selinux/libsepol/src/ibendports.c \
selinux/libsepol/src/ibpkey_record.c \
selinux/libsepol/src/ibpkeys.c \
selinux/libsepol/src/iface_record.c \
selinux/libsepol/src/interfaces.c \
selinux/libsepol/src/kernel_to_cil.c \
selinux/libsepol/src/kernel_to_common.c \
selinux/libsepol/src/kernel_to_conf.c \
selinux/libsepol/src/link.c \
selinux/libsepol/src/mls.c \
selinux/libsepol/src/module.c \
selinux/libsepol/src/module_to_cil.c \
selinux/libsepol/src/node_record.c \
selinux/libsepol/src/nodes.c \
selinux/libsepol/src/optimize.c \
selinux/libsepol/src/polcaps.c \
selinux/libsepol/src/policydb.c \
selinux/libsepol/src/policydb_convert.c \
selinux/libsepol/src/policydb_public.c \
selinux/libsepol/src/policydb_validate.c \
selinux/libsepol/src/port_record.c \
selinux/libsepol/src/ports.c \
selinux/libsepol/src/services.c \
selinux/libsepol/src/sidtab.c \
selinux/libsepol/src/symtab.c \
selinux/libsepol/src/user_record.c \
selinux/libsepol/src/users.c \
selinux/libsepol/src/util.c \
selinux/libsepol/src/write.c \
selinux/libsepol/cil/src/cil.c \
selinux/libsepol/cil/src/cil_binary.c \
selinux/libsepol/cil/src/cil_build_ast.c \
selinux/libsepol/cil/src/cil_copy_ast.c \
selinux/libsepol/cil/src/cil_deny.c \
selinux/libsepol/cil/src/cil_find.c \
selinux/libsepol/cil/src/cil_fqn.c \
selinux/libsepol/cil/src/cil_lexer.c \
selinux/libsepol/cil/src/cil_list.c \
selinux/libsepol/cil/src/cil_log.c \
selinux/libsepol/cil/src/cil_mem.c \
selinux/libsepol/cil/src/cil_parser.c \
selinux/libsepol/cil/src/cil_policy.c \
selinux/libsepol/cil/src/cil_post.c \
selinux/libsepol/cil/src/cil_reset_ast.c \
selinux/libsepol/cil/src/cil_resolve_ast.c \
selinux/libsepol/cil/src/cil_stack.c \
selinux/libsepol/cil/src/cil_strpool.c \
selinux/libsepol/cil/src/cil_symtab.c \
selinux/libsepol/cil/src/cil_tree.c \
selinux/libsepol/cil/src/cil_verify.c \
selinux/libsepol/cil/src/cil_write_ast.c
LOCAL_CFLAGS := -Wno-unused-but-set-variable
ifeq ($(TARGET_ARCH),riscv64)
LOCAL_CFLAGS += -DHAVE_REALLOCARRAY
endif
include $(BUILD_STATIC_LIBRARY)
# liblsplt.a
include $(CLEAR_VARS)
LOCAL_MODULE:= liblsplt
LOCAL_C_INCLUDES := $(LOCAL_PATH)/lsplt/lsplt/src/main/jni/include
LOCAL_EXPORT_C_INCLUDES := $(LOCAL_C_INCLUDES)
LOCAL_CFLAGS := -Wall -Wextra -Werror -fvisibility=hidden -D__android_log_print=magisk_log_print
LOCAL_CPPFLAGS := -std=c++20
LOCAL_STATIC_LIBRARIES := libcxx
LOCAL_SRC_FILES := \
lsplt/lsplt/src/main/jni/elf_util.cc \
lsplt/lsplt/src/main/jni/lsplt.cc
include $(BUILD_STATIC_LIBRARY)
CWD := $(LOCAL_PATH)
include $(CWD)/system_properties/Android.mk
include $(CWD)/libcxx/Android.mk
ifdef B_CRT0
include $(CWD)/crt0/Android.mk
endif

10
native/src/external/lz4-sys/Cargo.toml vendored Normal file
View file

@ -0,0 +1,10 @@
[package]
name = "lz4-sys"
license = "MIT"
version = "1.11.1+lz4-1.10.0"
authors = [ "Jens Heyens <jens.heyens@ewetel.net>", "Artem V. Navrotskiy <bozaro@buzzsoft.ru>", "Patrick Marks <pmarks@gmail.com>"]
description = "Rust LZ4 sys package."
repository = "https://github.com/10xGenomics/lz4-rs"
[dependencies]
libc = "0.2"

Some files were not shown because too many files have changed in this diff Show more