Updated main branch

This commit is contained in:
Fr4nz D13trich 2025-11-20 21:30:08 +01:00
parent 9a3987bc1e
commit 3f2388004d
350 changed files with 79360 additions and 0 deletions

548
kernel/.clang-format Normal file
View file

@ -0,0 +1,548 @@
# SPDX-License-Identifier: GPL-2.0
#
# clang-format configuration file. Intended for clang-format >= 4.
#
# For more information, see:
#
# Documentation/process/clang-format.rst
# https://clang.llvm.org/docs/ClangFormat.html
# https://clang.llvm.org/docs/ClangFormatStyleOptions.html
#
---
AccessModifierOffset: -4
AlignAfterOpenBracket: Align
AlignConsecutiveAssignments: false
AlignConsecutiveDeclarations: false
#AlignEscapedNewlines: Left # Unknown to clang-format-4.0
AlignOperands: true
AlignTrailingComments: false
AllowAllParametersOfDeclarationOnNextLine: false
AllowShortBlocksOnASingleLine: false
AllowShortCaseLabelsOnASingleLine: false
AllowShortFunctionsOnASingleLine: None
AllowShortIfStatementsOnASingleLine: false
AllowShortLoopsOnASingleLine: false
AlwaysBreakAfterDefinitionReturnType: None
AlwaysBreakAfterReturnType: None
AlwaysBreakBeforeMultilineStrings: false
AlwaysBreakTemplateDeclarations: false
BinPackArguments: true
BinPackParameters: true
BraceWrapping:
AfterClass: false
AfterControlStatement: false
AfterEnum: false
AfterFunction: true
AfterNamespace: true
AfterObjCDeclaration: false
AfterStruct: false
AfterUnion: false
#AfterExternBlock: false # Unknown to clang-format-5.0
BeforeCatch: false
BeforeElse: false
IndentBraces: false
#SplitEmptyFunction: true # Unknown to clang-format-4.0
#SplitEmptyRecord: true # Unknown to clang-format-4.0
#SplitEmptyNamespace: true # Unknown to clang-format-4.0
BreakBeforeBinaryOperators: None
BreakBeforeBraces: Custom
#BreakBeforeInheritanceComma: false # Unknown to clang-format-4.0
BreakBeforeTernaryOperators: false
BreakConstructorInitializersBeforeComma: false
#BreakConstructorInitializers: BeforeComma # Unknown to clang-format-4.0
BreakAfterJavaFieldAnnotations: false
BreakStringLiterals: false
ColumnLimit: 80
CommentPragmas: '^ IWYU pragma:'
#CompactNamespaces: false # Unknown to clang-format-4.0
ConstructorInitializerAllOnOneLineOrOnePerLine: false
ConstructorInitializerIndentWidth: 4
ContinuationIndentWidth: 4
Cpp11BracedListStyle: false
DerivePointerAlignment: false
DisableFormat: false
ExperimentalAutoDetectBinPacking: false
#FixNamespaceComments: false # Unknown to clang-format-4.0
# Taken from:
# git grep -h '^#define [^[:space:]]*for_each[^[:space:]]*(' include/ \
# | sed "s,^#define \([^[:space:]]*for_each[^[:space:]]*\)(.*$, - '\1'," \
# | sort | uniq
ForEachMacros:
- 'apei_estatus_for_each_section'
- 'ata_for_each_dev'
- 'ata_for_each_link'
- '__ata_qc_for_each'
- 'ata_qc_for_each'
- 'ata_qc_for_each_raw'
- 'ata_qc_for_each_with_internal'
- 'ax25_for_each'
- 'ax25_uid_for_each'
- '__bio_for_each_bvec'
- 'bio_for_each_bvec'
- 'bio_for_each_bvec_all'
- 'bio_for_each_integrity_vec'
- '__bio_for_each_segment'
- 'bio_for_each_segment'
- 'bio_for_each_segment_all'
- 'bio_list_for_each'
- 'bip_for_each_vec'
- 'bitmap_for_each_clear_region'
- 'bitmap_for_each_set_region'
- 'blkg_for_each_descendant_post'
- 'blkg_for_each_descendant_pre'
- 'blk_queue_for_each_rl'
- 'bond_for_each_slave'
- 'bond_for_each_slave_rcu'
- 'bpf_for_each_spilled_reg'
- 'btree_for_each_safe128'
- 'btree_for_each_safe32'
- 'btree_for_each_safe64'
- 'btree_for_each_safel'
- 'card_for_each_dev'
- 'cgroup_taskset_for_each'
- 'cgroup_taskset_for_each_leader'
- 'cpufreq_for_each_entry'
- 'cpufreq_for_each_entry_idx'
- 'cpufreq_for_each_valid_entry'
- 'cpufreq_for_each_valid_entry_idx'
- 'css_for_each_child'
- 'css_for_each_descendant_post'
- 'css_for_each_descendant_pre'
- 'device_for_each_child_node'
- 'dma_fence_chain_for_each'
- 'do_for_each_ftrace_op'
- 'drm_atomic_crtc_for_each_plane'
- 'drm_atomic_crtc_state_for_each_plane'
- 'drm_atomic_crtc_state_for_each_plane_state'
- 'drm_atomic_for_each_plane_damage'
- 'drm_client_for_each_connector_iter'
- 'drm_client_for_each_modeset'
- 'drm_connector_for_each_possible_encoder'
- 'drm_for_each_bridge_in_chain'
- 'drm_for_each_connector_iter'
- 'drm_for_each_crtc'
- 'drm_for_each_encoder'
- 'drm_for_each_encoder_mask'
- 'drm_for_each_fb'
- 'drm_for_each_legacy_plane'
- 'drm_for_each_plane'
- 'drm_for_each_plane_mask'
- 'drm_for_each_privobj'
- 'drm_mm_for_each_hole'
- 'drm_mm_for_each_node'
- 'drm_mm_for_each_node_in_range'
- 'drm_mm_for_each_node_safe'
- 'flow_action_for_each'
- 'for_each_active_dev_scope'
- 'for_each_active_drhd_unit'
- 'for_each_active_iommu'
- 'for_each_aggr_pgid'
- 'for_each_available_child_of_node'
- 'for_each_bio'
- 'for_each_board_func_rsrc'
- 'for_each_bvec'
- 'for_each_card_auxs'
- 'for_each_card_auxs_safe'
- 'for_each_card_components'
- 'for_each_card_dapms'
- 'for_each_card_pre_auxs'
- 'for_each_card_prelinks'
- 'for_each_card_rtds'
- 'for_each_card_rtds_safe'
- 'for_each_card_widgets'
- 'for_each_card_widgets_safe'
- 'for_each_cgroup_storage_type'
- 'for_each_child_of_node'
- 'for_each_clear_bit'
- 'for_each_clear_bit_from'
- 'for_each_cmsghdr'
- 'for_each_compatible_node'
- 'for_each_component_dais'
- 'for_each_component_dais_safe'
- 'for_each_comp_order'
- 'for_each_console'
- 'for_each_cpu'
- 'for_each_cpu_and'
- 'for_each_cpu_not'
- 'for_each_cpu_wrap'
- 'for_each_dapm_widgets'
- 'for_each_dev_addr'
- 'for_each_dev_scope'
- 'for_each_displayid_db'
- 'for_each_dma_cap_mask'
- 'for_each_dpcm_be'
- 'for_each_dpcm_be_rollback'
- 'for_each_dpcm_be_safe'
- 'for_each_dpcm_fe'
- 'for_each_drhd_unit'
- 'for_each_dss_dev'
- 'for_each_efi_memory_desc'
- 'for_each_efi_memory_desc_in_map'
- 'for_each_element'
- 'for_each_element_extid'
- 'for_each_element_id'
- 'for_each_endpoint_of_node'
- 'for_each_evictable_lru'
- 'for_each_fib6_node_rt_rcu'
- 'for_each_fib6_walker_rt'
- 'for_each_free_mem_pfn_range_in_zone'
- 'for_each_free_mem_pfn_range_in_zone_from'
- 'for_each_free_mem_range'
- 'for_each_free_mem_range_reverse'
- 'for_each_func_rsrc'
- 'for_each_hstate'
- 'for_each_if'
- 'for_each_iommu'
- 'for_each_ip_tunnel_rcu'
- 'for_each_irq_nr'
- 'for_each_link_codecs'
- 'for_each_link_cpus'
- 'for_each_link_platforms'
- 'for_each_lru'
- 'for_each_matching_node'
- 'for_each_matching_node_and_match'
- 'for_each_member'
- 'for_each_mem_region'
- 'for_each_memblock_type'
- 'for_each_memcg_cache_index'
- 'for_each_mem_pfn_range'
- '__for_each_mem_range'
- 'for_each_mem_range'
- '__for_each_mem_range_rev'
- 'for_each_mem_range_rev'
- 'for_each_migratetype_order'
- 'for_each_msi_entry'
- 'for_each_msi_entry_safe'
- 'for_each_net'
- 'for_each_net_continue_reverse'
- 'for_each_netdev'
- 'for_each_netdev_continue'
- 'for_each_netdev_continue_rcu'
- 'for_each_netdev_continue_reverse'
- 'for_each_netdev_feature'
- 'for_each_netdev_in_bond_rcu'
- 'for_each_netdev_rcu'
- 'for_each_netdev_reverse'
- 'for_each_netdev_safe'
- 'for_each_net_rcu'
- 'for_each_new_connector_in_state'
- 'for_each_new_crtc_in_state'
- 'for_each_new_mst_mgr_in_state'
- 'for_each_new_plane_in_state'
- 'for_each_new_private_obj_in_state'
- 'for_each_node'
- 'for_each_node_by_name'
- 'for_each_node_by_type'
- 'for_each_node_mask'
- 'for_each_node_state'
- 'for_each_node_with_cpus'
- 'for_each_node_with_property'
- 'for_each_nonreserved_multicast_dest_pgid'
- 'for_each_of_allnodes'
- 'for_each_of_allnodes_from'
- 'for_each_of_cpu_node'
- 'for_each_of_pci_range'
- 'for_each_old_connector_in_state'
- 'for_each_old_crtc_in_state'
- 'for_each_old_mst_mgr_in_state'
- 'for_each_oldnew_connector_in_state'
- 'for_each_oldnew_crtc_in_state'
- 'for_each_oldnew_mst_mgr_in_state'
- 'for_each_oldnew_plane_in_state'
- 'for_each_oldnew_plane_in_state_reverse'
- 'for_each_oldnew_private_obj_in_state'
- 'for_each_old_plane_in_state'
- 'for_each_old_private_obj_in_state'
- 'for_each_online_cpu'
- 'for_each_online_node'
- 'for_each_online_pgdat'
- 'for_each_pci_bridge'
- 'for_each_pci_dev'
- 'for_each_pci_msi_entry'
- 'for_each_pcm_streams'
- 'for_each_physmem_range'
- 'for_each_populated_zone'
- 'for_each_possible_cpu'
- 'for_each_present_cpu'
- 'for_each_prime_number'
- 'for_each_prime_number_from'
- 'for_each_process'
- 'for_each_process_thread'
- 'for_each_property_of_node'
- 'for_each_registered_fb'
- 'for_each_requested_gpio'
- 'for_each_requested_gpio_in_range'
- 'for_each_reserved_mem_range'
- 'for_each_reserved_mem_region'
- 'for_each_rtd_codec_dais'
- 'for_each_rtd_codec_dais_rollback'
- 'for_each_rtd_components'
- 'for_each_rtd_cpu_dais'
- 'for_each_rtd_cpu_dais_rollback'
- 'for_each_rtd_dais'
- 'for_each_set_bit'
- 'for_each_set_bit_from'
- 'for_each_set_clump8'
- 'for_each_sg'
- 'for_each_sg_dma_page'
- 'for_each_sg_page'
- 'for_each_sgtable_dma_page'
- 'for_each_sgtable_dma_sg'
- 'for_each_sgtable_page'
- 'for_each_sgtable_sg'
- 'for_each_sibling_event'
- 'for_each_subelement'
- 'for_each_subelement_extid'
- 'for_each_subelement_id'
- '__for_each_thread'
- 'for_each_thread'
- 'for_each_unicast_dest_pgid'
- 'for_each_wakeup_source'
- 'for_each_zone'
- 'for_each_zone_zonelist'
- 'for_each_zone_zonelist_nodemask'
- 'fwnode_for_each_available_child_node'
- 'fwnode_for_each_child_node'
- 'fwnode_graph_for_each_endpoint'
- 'gadget_for_each_ep'
- 'genradix_for_each'
- 'genradix_for_each_from'
- 'hash_for_each'
- 'hash_for_each_possible'
- 'hash_for_each_possible_rcu'
- 'hash_for_each_possible_rcu_notrace'
- 'hash_for_each_possible_safe'
- 'hash_for_each_rcu'
- 'hash_for_each_safe'
- 'hctx_for_each_ctx'
- 'hlist_bl_for_each_entry'
- 'hlist_bl_for_each_entry_rcu'
- 'hlist_bl_for_each_entry_safe'
- 'hlist_for_each'
- 'hlist_for_each_entry'
- 'hlist_for_each_entry_continue'
- 'hlist_for_each_entry_continue_rcu'
- 'hlist_for_each_entry_continue_rcu_bh'
- 'hlist_for_each_entry_from'
- 'hlist_for_each_entry_from_rcu'
- 'hlist_for_each_entry_rcu'
- 'hlist_for_each_entry_rcu_bh'
- 'hlist_for_each_entry_rcu_notrace'
- 'hlist_for_each_entry_safe'
- '__hlist_for_each_rcu'
- 'hlist_for_each_safe'
- 'hlist_nulls_for_each_entry'
- 'hlist_nulls_for_each_entry_from'
- 'hlist_nulls_for_each_entry_rcu'
- 'hlist_nulls_for_each_entry_safe'
- 'i3c_bus_for_each_i2cdev'
- 'i3c_bus_for_each_i3cdev'
- 'ide_host_for_each_port'
- 'ide_port_for_each_dev'
- 'ide_port_for_each_present_dev'
- 'idr_for_each_entry'
- 'idr_for_each_entry_continue'
- 'idr_for_each_entry_continue_ul'
- 'idr_for_each_entry_ul'
- 'in_dev_for_each_ifa_rcu'
- 'in_dev_for_each_ifa_rtnl'
- 'inet_bind_bucket_for_each'
- 'inet_lhash2_for_each_icsk_rcu'
- 'key_for_each'
- 'key_for_each_safe'
- 'klp_for_each_func'
- 'klp_for_each_func_safe'
- 'klp_for_each_func_static'
- 'klp_for_each_object'
- 'klp_for_each_object_safe'
- 'klp_for_each_object_static'
- 'kunit_suite_for_each_test_case'
- 'kvm_for_each_memslot'
- 'kvm_for_each_vcpu'
- 'list_for_each'
- 'list_for_each_codec'
- 'list_for_each_codec_safe'
- 'list_for_each_continue'
- 'list_for_each_entry'
- 'list_for_each_entry_continue'
- 'list_for_each_entry_continue_rcu'
- 'list_for_each_entry_continue_reverse'
- 'list_for_each_entry_from'
- 'list_for_each_entry_from_rcu'
- 'list_for_each_entry_from_reverse'
- 'list_for_each_entry_lockless'
- 'list_for_each_entry_rcu'
- 'list_for_each_entry_reverse'
- 'list_for_each_entry_safe'
- 'list_for_each_entry_safe_continue'
- 'list_for_each_entry_safe_from'
- 'list_for_each_entry_safe_reverse'
- 'list_for_each_prev'
- 'list_for_each_prev_safe'
- 'list_for_each_safe'
- 'llist_for_each'
- 'llist_for_each_entry'
- 'llist_for_each_entry_safe'
- 'llist_for_each_safe'
- 'mci_for_each_dimm'
- 'media_device_for_each_entity'
- 'media_device_for_each_intf'
- 'media_device_for_each_link'
- 'media_device_for_each_pad'
- 'nanddev_io_for_each_page'
- 'netdev_for_each_lower_dev'
- 'netdev_for_each_lower_private'
- 'netdev_for_each_lower_private_rcu'
- 'netdev_for_each_mc_addr'
- 'netdev_for_each_uc_addr'
- 'netdev_for_each_upper_dev_rcu'
- 'netdev_hw_addr_list_for_each'
- 'nft_rule_for_each_expr'
- 'nla_for_each_attr'
- 'nla_for_each_nested'
- 'nlmsg_for_each_attr'
- 'nlmsg_for_each_msg'
- 'nr_neigh_for_each'
- 'nr_neigh_for_each_safe'
- 'nr_node_for_each'
- 'nr_node_for_each_safe'
- 'of_for_each_phandle'
- 'of_property_for_each_string'
- 'of_property_for_each_u32'
- 'pci_bus_for_each_resource'
- 'pcm_for_each_format'
- 'ping_portaddr_for_each_entry'
- 'plist_for_each'
- 'plist_for_each_continue'
- 'plist_for_each_entry'
- 'plist_for_each_entry_continue'
- 'plist_for_each_entry_safe'
- 'plist_for_each_safe'
- 'pnp_for_each_card'
- 'pnp_for_each_dev'
- 'protocol_for_each_card'
- 'protocol_for_each_dev'
- 'queue_for_each_hw_ctx'
- 'radix_tree_for_each_slot'
- 'radix_tree_for_each_tagged'
- 'rbtree_postorder_for_each_entry_safe'
- 'rdma_for_each_block'
- 'rdma_for_each_port'
- 'rdma_umem_for_each_dma_block'
- 'resource_list_for_each_entry'
- 'resource_list_for_each_entry_safe'
- 'rhl_for_each_entry_rcu'
- 'rhl_for_each_rcu'
- 'rht_for_each'
- 'rht_for_each_entry'
- 'rht_for_each_entry_from'
- 'rht_for_each_entry_rcu'
- 'rht_for_each_entry_rcu_from'
- 'rht_for_each_entry_safe'
- 'rht_for_each_from'
- 'rht_for_each_rcu'
- 'rht_for_each_rcu_from'
- '__rq_for_each_bio'
- 'rq_for_each_bvec'
- 'rq_for_each_segment'
- 'scsi_for_each_prot_sg'
- 'scsi_for_each_sg'
- 'sctp_for_each_hentry'
- 'sctp_skb_for_each'
- 'shdma_for_each_chan'
- '__shost_for_each_device'
- 'shost_for_each_device'
- 'sk_for_each'
- 'sk_for_each_bound'
- 'sk_for_each_entry_offset_rcu'
- 'sk_for_each_from'
- 'sk_for_each_rcu'
- 'sk_for_each_safe'
- 'sk_nulls_for_each'
- 'sk_nulls_for_each_from'
- 'sk_nulls_for_each_rcu'
- 'snd_array_for_each'
- 'snd_pcm_group_for_each_entry'
- 'snd_soc_dapm_widget_for_each_path'
- 'snd_soc_dapm_widget_for_each_path_safe'
- 'snd_soc_dapm_widget_for_each_sink_path'
- 'snd_soc_dapm_widget_for_each_source_path'
- 'tb_property_for_each'
- 'tcf_exts_for_each_action'
- 'udp_portaddr_for_each_entry'
- 'udp_portaddr_for_each_entry_rcu'
- 'usb_hub_for_each_child'
- 'v4l2_device_for_each_subdev'
- 'v4l2_m2m_for_each_dst_buf'
- 'v4l2_m2m_for_each_dst_buf_safe'
- 'v4l2_m2m_for_each_src_buf'
- 'v4l2_m2m_for_each_src_buf_safe'
- 'virtio_device_for_each_vq'
- 'while_for_each_ftrace_op'
- 'xa_for_each'
- 'xa_for_each_marked'
- 'xa_for_each_range'
- 'xa_for_each_start'
- 'xas_for_each'
- 'xas_for_each_conflict'
- 'xas_for_each_marked'
- 'xbc_array_for_each_value'
- 'xbc_for_each_key_value'
- 'xbc_node_for_each_array_value'
- 'xbc_node_for_each_child'
- 'xbc_node_for_each_key_value'
- 'zorro_for_each_dev'
#IncludeBlocks: Preserve # Unknown to clang-format-5.0
IncludeCategories:
- Regex: '.*'
Priority: 1
IncludeIsMainRegex: '(Test)?$'
IndentCaseLabels: false
#IndentPPDirectives: None # Unknown to clang-format-5.0
IndentWidth: 4
IndentWrappedFunctionNames: false
JavaScriptQuotes: Leave
JavaScriptWrapImports: true
KeepEmptyLinesAtTheStartOfBlocks: false
MacroBlockBegin: ''
MacroBlockEnd: ''
MaxEmptyLinesToKeep: 1
NamespaceIndentation: None
#ObjCBinPackProtocolList: Auto # Unknown to clang-format-5.0
ObjCBlockIndentWidth: 4
ObjCSpaceAfterProperty: true
ObjCSpaceBeforeProtocolList: true
# Taken from git's rules
#PenaltyBreakAssignment: 10 # Unknown to clang-format-4.0
PenaltyBreakBeforeFirstCallParameter: 30
PenaltyBreakComment: 10
PenaltyBreakFirstLessLess: 0
PenaltyBreakString: 10
PenaltyExcessCharacter: 100
PenaltyReturnTypeOnItsOwnLine: 60
PointerAlignment: Right
ReflowComments: false
SortIncludes: false
#SortUsingDeclarations: false # Unknown to clang-format-4.0
SpaceAfterCStyleCast: false
SpaceAfterTemplateKeyword: true
SpaceBeforeAssignmentOperators: true
#SpaceBeforeCtorInitializerColon: true # Unknown to clang-format-5.0
#SpaceBeforeInheritanceColon: true # Unknown to clang-format-5.0
SpaceBeforeParens: ControlStatements
#SpaceBeforeRangeBasedForLoopColon: true # Unknown to clang-format-5.0
SpaceInEmptyParentheses: false
SpacesBeforeTrailingComments: 1
SpacesInAngles: false
SpacesInContainerLiterals: false
SpacesInCStyleCastParentheses: false
SpacesInParentheses: false
SpacesInSquareBrackets: false
Standard: Cpp03
TabWidth: 4
UseTab: Never
...

4
kernel/.clangd Normal file
View file

@ -0,0 +1,4 @@
Diagnostics:
UnusedIncludes: Strict
ClangTidy:
Remove: bugprone-sizeof-expression

22
kernel/.gitignore vendored Normal file
View file

@ -0,0 +1,22 @@
.cache/
.thinlto-cache/
compile_commands.json
*.ko
*.o
*.mod
*.lds
*.mod.o
.*.o*
.*.mod*
*.ko*
*.mod.c
*.symvers*
*.order
.*.ko.cmd
.tmp_versions/
libs/
obj/
CLAUDE.md
.ddk-version
.vscode/settings.json

11
kernel/.vscode/c_cpp_properties.json vendored Normal file
View file

@ -0,0 +1,11 @@
{
"configurations": [
{
"name": "Linux",
"cStandard": "c11",
"intelliSenseMode": "gcc-arm64",
"compileCommands": "${workspaceFolder}/compile_commands.json"
}
],
"version": 4
}

92
kernel/.vscode/generate_compdb.py vendored Normal file
View file

@ -0,0 +1,92 @@
#!/usr/bin/env python3
from __future__ import print_function, division
import argparse
import fnmatch
import functools
import json
import math
import multiprocessing
import os
import re
import sys
CMD_VAR_RE = re.compile(r'^\s*(?:saved)?cmd_(\S+)\s*:=\s*(.+)\s*$', re.MULTILINE)
SOURCE_VAR_RE = re.compile(r'^\s*source_(\S+)\s*:=\s*(.+)\s*$', re.MULTILINE)
def print_progress_bar(progress):
progress_bar = '[' + '|' * int(50 * progress) + '-' * int(50 * (1.0 - progress)) + ']'
print('\r', progress_bar, "{0:.1%}".format(progress), end='\r', file=sys.stderr)
def parse_cmd_file(out_dir, cmdfile_path):
with open(cmdfile_path, 'r') as cmdfile:
cmdfile_content = cmdfile.read()
commands = { match.group(1): match.group(2) for match in CMD_VAR_RE.finditer(cmdfile_content) }
sources = { match.group(1): match.group(2) for match in SOURCE_VAR_RE.finditer(cmdfile_content) }
return [{
'directory': out_dir,
'command': commands[o_file_name],
'file': source,
'output': o_file_name
} for o_file_name, source in sources.items()]
def gen_compile_commands(cmd_file_search_path, out_dir):
print("Building *.o.cmd file list...", file=sys.stderr)
out_dir = os.path.abspath(out_dir)
if not cmd_file_search_path:
cmd_file_search_path = [out_dir]
cmd_files = []
for search_path in cmd_file_search_path:
if (os.path.isdir(search_path)):
for cur_dir, subdir, files in os.walk(search_path):
cmd_files.extend(os.path.join(cur_dir, cmdfile_name) for cmdfile_name in fnmatch.filter(files, '*.o.cmd'))
else:
cmd_files.extend(search_path)
if not cmd_files:
print("No *.o.cmd files found in", ", ".join(cmd_file_search_path), file=sys.stderr)
return
print("Parsing *.o.cmd files...", file=sys.stderr)
n_processed = 0
print_progress_bar(0)
compdb = []
pool = multiprocessing.Pool()
try:
for compdb_chunk in pool.imap_unordered(functools.partial(parse_cmd_file, out_dir), cmd_files, chunksize=int(math.sqrt(len(cmd_files)))):
compdb.extend(compdb_chunk)
n_processed += 1
print_progress_bar(n_processed / len(cmd_files))
finally:
pool.terminate()
pool.join()
print(file=sys.stderr)
print("Writing compile_commands.json...", file=sys.stderr)
with open('compile_commands.json', 'w') as compdb_file:
json.dump(compdb, compdb_file, indent=1)
def main():
cmd_parser = argparse.ArgumentParser()
cmd_parser.add_argument('-O', '--out-dir', type=str, default=os.getcwd(), help="Build output directory")
cmd_parser.add_argument('cmd_file_search_path', nargs='*', help="*.cmd file search path")
gen_compile_commands(**vars(cmd_parser.parse_args()))
if __name__ == '__main__':
main()

16
kernel/.vscode/tasks.json vendored Normal file
View file

@ -0,0 +1,16 @@
{
// See https://go.microsoft.com/fwlink/?LinkId=733558
// for the documentation about the tasks.json format
"version": "2.0.0",
"tasks": [
{
"label": "Generate compile_commands.json",
"type": "process",
"command": "python",
"args": [
"${workspaceRoot}/.vscode/generate_compdb.py"
],
"problemMatcher": []
}
]
}

42
kernel/Kconfig Normal file
View file

@ -0,0 +1,42 @@
menu "KernelSU"
config KSU
tristate "KernelSU function support"
default y
help
Enable kernel-level root privileges on Android System.
To compile as a module, choose M here: the
module will be called kernelsu.
config KSU_DEBUG
bool "KernelSU debug mode"
depends on KSU
default n
help
Enable KernelSU debug mode.
config KSU_MANUAL_SU
bool "Use manual su"
depends on KSU
default y
help
Use manual su and authorize the corresponding command line and application via prctl
config KPM
bool "Enable SukiSU KPM"
depends on KSU && 64BIT
default n
help
Enabling this option will activate the KPM feature of SukiSU.
This option is suitable for scenarios where you need to force KPM to be enabled.
but it may affect system stability.
select KALLSYMS
select KALLSYMS_ALL
config KSU_MANUAL_HOOK
bool "Hook KernelSU manually"
depends on KSU != m
help
If enabled, Hook required KernelSU syscalls with manually-patched function.
endmenu

339
kernel/LICENSE Normal file
View file

@ -0,0 +1,339 @@
GNU GENERAL PUBLIC LICENSE
Version 2, June 1991
Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The licenses for most software are designed to take away your
freedom to share and change it. By contrast, the GNU General Public
License is intended to guarantee your freedom to share and change free
software--to make sure the software is free for all its users. This
General Public License applies to most of the Free Software
Foundation's software and to any other program whose authors commit to
using it. (Some other Free Software Foundation software is covered by
the GNU Lesser General Public License instead.) You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
this service if you wish), that you receive source code or can get it
if you want it, that you can change the software or use pieces of it
in new free programs; and that you know you can do these things.
To protect your rights, we need to make restrictions that forbid
anyone to deny you these rights or to ask you to surrender the rights.
These restrictions translate to certain responsibilities for you if you
distribute copies of the software, or if you modify it.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must give the recipients all the rights that
you have. You must make sure that they, too, receive or can get the
source code. And you must show them these terms so they know their
rights.
We protect your rights with two steps: (1) copyright the software, and
(2) offer you this license which gives you legal permission to copy,
distribute and/or modify the software.
Also, for each author's protection and ours, we want to make certain
that everyone understands that there is no warranty for this free
software. If the software is modified by someone else and passed on, we
want its recipients to know that what they have is not the original, so
that any problems introduced by others will not reflect on the original
authors' reputations.
Finally, any free program is threatened constantly by software
patents. We wish to avoid the danger that redistributors of a free
program will individually obtain patent licenses, in effect making the
program proprietary. To prevent this, we have made it clear that any
patent must be licensed for everyone's free use or not licensed at all.
The precise terms and conditions for copying, distribution and
modification follow.
GNU GENERAL PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. This License applies to any program or other work which contains
a notice placed by the copyright holder saying it may be distributed
under the terms of this General Public License. The "Program", below,
refers to any such program or work, and a "work based on the Program"
means either the Program or any derivative work under copyright law:
that is to say, a work containing the Program or a portion of it,
either verbatim or with modifications and/or translated into another
language. (Hereinafter, translation is included without limitation in
the term "modification".) Each licensee is addressed as "you".
Activities other than copying, distribution and modification are not
covered by this License; they are outside its scope. The act of
running the Program is not restricted, and the output from the Program
is covered only if its contents constitute a work based on the
Program (independent of having been made by running the Program).
Whether that is true depends on what the Program does.
1. You may copy and distribute verbatim copies of the Program's
source code as you receive it, in any medium, provided that you
conspicuously and appropriately publish on each copy an appropriate
copyright notice and disclaimer of warranty; keep intact all the
notices that refer to this License and to the absence of any warranty;
and give any other recipients of the Program a copy of this License
along with the Program.
You may charge a fee for the physical act of transferring a copy, and
you may at your option offer warranty protection in exchange for a fee.
2. You may modify your copy or copies of the Program or any portion
of it, thus forming a work based on the Program, and copy and
distribute such modifications or work under the terms of Section 1
above, provided that you also meet all of these conditions:
a) You must cause the modified files to carry prominent notices
stating that you changed the files and the date of any change.
b) You must cause any work that you distribute or publish, that in
whole or in part contains or is derived from the Program or any
part thereof, to be licensed as a whole at no charge to all third
parties under the terms of this License.
c) If the modified program normally reads commands interactively
when run, you must cause it, when started running for such
interactive use in the most ordinary way, to print or display an
announcement including an appropriate copyright notice and a
notice that there is no warranty (or else, saying that you provide
a warranty) and that users may redistribute the program under
these conditions, and telling the user how to view a copy of this
License. (Exception: if the Program itself is interactive but
does not normally print such an announcement, your work based on
the Program is not required to print an announcement.)
These requirements apply to the modified work as a whole. If
identifiable sections of that work are not derived from the Program,
and can be reasonably considered independent and separate works in
themselves, then this License, and its terms, do not apply to those
sections when you distribute them as separate works. But when you
distribute the same sections as part of a whole which is a work based
on the Program, the distribution of the whole must be on the terms of
this License, whose permissions for other licensees extend to the
entire whole, and thus to each and every part regardless of who wrote it.
Thus, it is not the intent of this section to claim rights or contest
your rights to work written entirely by you; rather, the intent is to
exercise the right to control the distribution of derivative or
collective works based on the Program.
In addition, mere aggregation of another work not based on the Program
with the Program (or with a work based on the Program) on a volume of
a storage or distribution medium does not bring the other work under
the scope of this License.
3. You may copy and distribute the Program (or a work based on it,
under Section 2) in object code or executable form under the terms of
Sections 1 and 2 above provided that you also do one of the following:
a) Accompany it with the complete corresponding machine-readable
source code, which must be distributed under the terms of Sections
1 and 2 above on a medium customarily used for software interchange; or,
b) Accompany it with a written offer, valid for at least three
years, to give any third party, for a charge no more than your
cost of physically performing source distribution, a complete
machine-readable copy of the corresponding source code, to be
distributed under the terms of Sections 1 and 2 above on a medium
customarily used for software interchange; or,
c) Accompany it with the information you received as to the offer
to distribute corresponding source code. (This alternative is
allowed only for noncommercial distribution and only if you
received the program in object code or executable form with such
an offer, in accord with Subsection b above.)
The source code for a work means the preferred form of the work for
making modifications to it. For an executable work, complete source
code means all the source code for all modules it contains, plus any
associated interface definition files, plus the scripts used to
control compilation and installation of the executable. However, as a
special exception, the source code distributed need not include
anything that is normally distributed (in either source or binary
form) with the major components (compiler, kernel, and so on) of the
operating system on which the executable runs, unless that component
itself accompanies the executable.
If distribution of executable or object code is made by offering
access to copy from a designated place, then offering equivalent
access to copy the source code from the same place counts as
distribution of the source code, even though third parties are not
compelled to copy the source along with the object code.
4. You may not copy, modify, sublicense, or distribute the Program
except as expressly provided under this License. Any attempt
otherwise to copy, modify, sublicense or distribute the Program is
void, and will automatically terminate your rights under this License.
However, parties who have received copies, or rights, from you under
this License will not have their licenses terminated so long as such
parties remain in full compliance.
5. You are not required to accept this License, since you have not
signed it. However, nothing else grants you permission to modify or
distribute the Program or its derivative works. These actions are
prohibited by law if you do not accept this License. Therefore, by
modifying or distributing the Program (or any work based on the
Program), you indicate your acceptance of this License to do so, and
all its terms and conditions for copying, distributing or modifying
the Program or works based on it.
6. Each time you redistribute the Program (or any work based on the
Program), the recipient automatically receives a license from the
original licensor to copy, distribute or modify the Program subject to
these terms and conditions. You may not impose any further
restrictions on the recipients' exercise of the rights granted herein.
You are not responsible for enforcing compliance by third parties to
this License.
7. If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot
distribute so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you
may not distribute the Program at all. For example, if a patent
license would not permit royalty-free redistribution of the Program by
all those who receive copies directly or indirectly through you, then
the only way you could satisfy both it and this License would be to
refrain entirely from distribution of the Program.
If any portion of this section is held invalid or unenforceable under
any particular circumstance, the balance of the section is intended to
apply and the section as a whole is intended to apply in other
circumstances.
It is not the purpose of this section to induce you to infringe any
patents or other property right claims or to contest validity of any
such claims; this section has the sole purpose of protecting the
integrity of the free software distribution system, which is
implemented by public license practices. Many people have made
generous contributions to the wide range of software distributed
through that system in reliance on consistent application of that
system; it is up to the author/donor to decide if he or she is willing
to distribute software through any other system and a licensee cannot
impose that choice.
This section is intended to make thoroughly clear what is believed to
be a consequence of the rest of this License.
8. If the distribution and/or use of the Program is restricted in
certain countries either by patents or by copyrighted interfaces, the
original copyright holder who places the Program under this License
may add an explicit geographical distribution limitation excluding
those countries, so that distribution is permitted only in or among
countries not thus excluded. In such case, this License incorporates
the limitation as if written in the body of this License.
9. The Free Software Foundation may publish revised and/or new versions
of the General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the Program
specifies a version number of this License which applies to it and "any
later version", you have the option of following the terms and conditions
either of that version or of any later version published by the Free
Software Foundation. If the Program does not specify a version number of
this License, you may choose any version ever published by the Free Software
Foundation.
10. If you wish to incorporate parts of the Program into other free
programs whose distribution conditions are different, write to the author
to ask for permission. For software which is copyrighted by the Free
Software Foundation, write to the Free Software Foundation; we sometimes
make exceptions for this. Our decision will be guided by the two goals
of preserving the free status of all derivatives of our free software and
of promoting the sharing and reuse of software generally.
NO WARRANTY
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
REPAIR OR CORRECTION.
12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGES.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
convey the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
Also add information on how to contact you by electronic and paper mail.
If the program is interactive, make it output a short notice like this
when it starts in an interactive mode:
Gnomovision version 69, Copyright (C) year name of author
Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, the commands you use may
be called something other than `show w' and `show c'; they could even be
mouse-clicks or menu items--whatever suits your program.
You should also get your employer (if you work as a programmer) or your
school, if any, to sign a "copyright disclaimer" for the program, if
necessary. Here is a sample; alter the names:
Yoyodyne, Inc., hereby disclaims all copyright interest in the program
`Gnomovision' (which makes passes at compilers) written by James Hacker.
<signature of Ty Coon>, 1 April 1989
Ty Coon, President of Vice
This General Public License does not permit incorporating your program into
proprietary programs. If your program is a subroutine library, you may
consider it more useful to permit linking proprietary applications with the
library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License.

172
kernel/Makefile Normal file
View file

@ -0,0 +1,172 @@
kernelsu-objs := ksu.o
kernelsu-objs += allowlist.o
kernelsu-objs += app_profile.o
kernelsu-objs += dynamic_manager.o
kernelsu-objs += apk_sign.o
kernelsu-objs += sucompat.o
kernelsu-objs += syscall_hook_manager.o
kernelsu-objs += throne_tracker.o
kernelsu-objs += pkg_observer.o
kernelsu-objs += throne_tracker.o
kernelsu-objs += umount_manager.o
kernelsu-objs += setuid_hook.o
kernelsu-objs += kernel_umount.o
kernelsu-objs += supercalls.o
kernelsu-objs += feature.o
kernelsu-objs += ksud.o
kernelsu-objs += embed_ksud.o
kernelsu-objs += seccomp_cache.o
kernelsu-objs += file_wrapper.o
kernelsu-objs += throne_comm.o
kernelsu-objs += sulog.o
ifeq ($(CONFIG_KSU_MANUAL_SU), y)
ccflags-y += -DCONFIG_KSU_MANUAL_SU
kernelsu-objs += manual_su.o
endif
kernelsu-objs += selinux/selinux.o
kernelsu-objs += selinux/sepolicy.o
kernelsu-objs += selinux/rules.o
ccflags-y += -I$(srctree)/security/selinux -I$(srctree)/security/selinux/include
ccflags-y += -I$(objtree)/security/selinux -include $(srctree)/include/uapi/asm-generic/errno.h
obj-$(CONFIG_KSU) += kernelsu.o
obj-$(CONFIG_KPM) += kpm/
REPO_OWNER := SukiSU-Ultra
REPO_NAME := SukiSU-Ultra
REPO_BRANCH := main
KSU_VERSION_API := 4.0.0
GIT_BIN := /usr/bin/env PATH="$$PATH":/usr/bin:/usr/local/bin git
CURL_BIN := /usr/bin/env PATH="$$PATH":/usr/bin:/usr/local/bin curl
KDIR := $(KDIR)
MDIR := $(realpath $(dir $(abspath $(lastword $(MAKEFILE_LIST)))))
ifneq ($(KDIR),)
$(info -- KDIR: $(KDIR))
$(info -- MDIR: $(MDIR))
endif
KSU_GITHUB_VERSION := $(shell $(CURL_BIN) -s "https://api.github.com/repos/$(REPO_OWNER)/$(REPO_NAME)/releases/latest" | grep '"tag_name":' | sed -E 's/.*"v([^"]+)".*/\1/')
KSU_GITHUB_VERSION_COMMIT := $(shell $(CURL_BIN) -sI "https://api.github.com/repos/$(REPO_OWNER)/$(REPO_NAME)/commits?sha=$(REPO_BRANCH)&per_page=1" | grep -i "link:" | sed -n 's/.*page=\([0-9]*\)>; rel="last".*/\1/p')
ifeq ($(findstring $(srctree),$(src)),$(srctree))
KSU_SRC := $(src)
else
KSU_SRC := $(srctree)/$(src)
endif
ifneq ($(shell test -e $(KSU_SRC)/../.git && echo "in-tree"),in-tree)
KSU_SRC := $(MDIR)
endif
LOCAL_GIT_EXISTS := $(shell test -e $(KSU_SRC)/../.git && echo 1 || echo 0)
define get_ksu_version_full
v$1-$(shell cd $(KSU_SRC); $(GIT_BIN) rev-parse --short=8 HEAD)@$(shell cd $(KSU_SRC); $(GIT_BIN) rev-parse --abbrev-ref HEAD)
endef
ifeq ($(KSU_GITHUB_VERSION_COMMIT),)
ifeq ($(LOCAL_GIT_EXISTS),1)
$(shell cd $(KSU_SRC); [ -f ../.git/shallow ] && $(GIT_BIN) fetch --unshallow)
KSU_LOCAL_VERSION := $(shell cd $(KSU_SRC); $(GIT_BIN) rev-list --count $(REPO_BRANCH))
KSU_VERSION := $(shell expr 40000 + $(KSU_LOCAL_VERSION) - 2815)
$(info -- $(REPO_NAME) version (local .git): $(KSU_VERSION))
else
KSU_VERSION := 13000
$(warning -- Could not fetch version online or via local .git! Using fallback version: $(KSU_VERSION))
endif
else
KSU_VERSION := $(shell expr 40000 + $(KSU_GITHUB_VERSION_COMMIT) - 2815)
$(info -- $(REPO_NAME) version (GitHub): $(KSU_VERSION))
endif
ifeq ($(KSU_GITHUB_VERSION),)
ifeq ($(LOCAL_GIT_EXISTS),1)
$(shell cd $(KSU_SRC); [ -f ../.git/shallow ] && $(GIT_BIN) fetch --unshallow)
KSU_VERSION_FULL := $(call get_ksu_version_full,$(KSU_VERSION_API))
$(info -- $(REPO_NAME) version (local .git): $(KSU_VERSION_FULL))
$(info -- $(REPO_NAME) Formatted version (local .git): $(KSU_VERSION))
else
KSU_VERSION_FULL := v$(KSU_VERSION_API)-$(REPO_NAME)-unknown@unknown
$(warning -- $(REPO_NAME) version: $(KSU_VERSION_FULL))
endif
else
$(shell cd $(KSU_SRC); [ -f ../.git/shallow ] && $(GIT_BIN) fetch --unshallow)
KSU_VERSION_FULL := $(call get_ksu_version_full,$(KSU_GITHUB_VERSION))
$(info -- $(REPO_NAME) version (Github): $(KSU_VERSION_FULL))
endif
ccflags-y += -DKSU_VERSION=$(KSU_VERSION)
ccflags-y += -DKSU_VERSION_FULL=\"$(KSU_VERSION_FULL)\"
# Custom Signs
ifdef KSU_EXPECTED_SIZE
ccflags-y += -DEXPECTED_SIZE=$(KSU_EXPECTED_SIZE)
$(info -- Custom KernelSU Manager signature size: $(KSU_EXPECTED_SIZE))
endif
ifdef KSU_EXPECTED_HASH
ccflags-y += -DEXPECTED_HASH=\"$(KSU_EXPECTED_HASH)\"
$(info -- Custom KernelSU Manager signature hash: $(KSU_EXPECTED_HASH))
endif
ifdef KSU_MANAGER_PACKAGE
ccflags-y += -DKSU_MANAGER_PACKAGE=\"$(KSU_MANAGER_PACKAGE)\"
$(info -- SukiSU Manager package name: $(KSU_MANAGER_PACKAGE))
endif
ifeq ($(CONFIG_KSU_MANUAL_HOOK), y)
ccflags-y += -DKSU_MANUAL_HOOK
$(info -- SukiSU: KSU_MANUAL_HOOK Temporarily discontinued)
else
ccflags-y += -DKSU_KPROBES_HOOK
ccflags-y += -DKSU_TP_HOOK
$(info -- SukiSU: KSU_TRACEPOINT_HOOK)
endif
KERNEL_VERSION := $(VERSION).$(PATCHLEVEL)
KERNEL_TYPE := Non-GKI
# Check for GKI 2.0 (5.10+ or 6.x+)
ifneq ($(shell test \( $(VERSION) -ge 5 -a $(PATCHLEVEL) -ge 10 \) -o $(VERSION) -ge 6; echo $$?),0)
# Check for GKI 1.0 (5.4)
ifeq ($(shell test $(VERSION)-$(PATCHLEVEL) = 5-4; echo $$?),0)
KERNEL_TYPE := GKI 1.0
endif
else
KERNEL_TYPE := GKI 2.0
endif
$(info -- KERNEL_VERSION: $(KERNEL_VERSION))
$(info -- KERNEL_TYPE: $(KERNEL_TYPE))
ifeq ($(CONFIG_KPM), y)
$(info -- KPM is enabled)
else
$(info -- KPM is disabled)
endif
# Check new vfs_getattr()
ifeq ($(shell grep -A1 "^int vfs_getattr" $(srctree)/fs/stat.c | grep -q "query_flags" ; echo $$?),0)
ccflags-y += -DKSU_HAS_NEW_VFS_GETATTR
endif
# Function proc_ops check
ifeq ($(shell grep -q "struct proc_ops " $(srctree)/include/linux/proc_fs.h; echo $$?),0)
ccflags-y += -DKSU_COMPAT_HAS_PROC_OPS
endif
ccflags-y += -Wno-strict-prototypes -Wno-int-conversion -Wno-gcc-compat -Wno-missing-prototypes
ccflags-y += -Wno-declaration-after-statement -Wno-unused-function -Wno-unused-variable
all:
make -C $(KDIR) M=$(MDIR) modules
compdb:
python3 $(MDIR)/.vscode/generate_compdb.py -O $(KDIR) $(MDIR)
clean:
make -C $(KDIR) M=$(MDIR) clean
# Keep a new line here!! Because someone may append config

631
kernel/allowlist.c Normal file
View file

@ -0,0 +1,631 @@
#include <linux/mutex.h>
#include <linux/task_work.h>
#include <linux/capability.h>
#include <linux/compiler.h>
#include <linux/fs.h>
#include <linux/gfp.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/version.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
#include <linux/compiler_types.h>
#endif
#include "klog.h" // IWYU pragma: keep
#include "ksud.h"
#include "selinux/selinux.h"
#include "allowlist.h"
#include "manager.h"
#include "syscall_hook_manager.h"
#define FILE_MAGIC 0x7f4b5355 // ' KSU', u32
#define FILE_FORMAT_VERSION 3 // u32
#define KSU_APP_PROFILE_PRESERVE_UID 9999 // NOBODY_UID
#define KSU_DEFAULT_SELINUX_DOMAIN "u:r:su:s0"
static DEFINE_MUTEX(allowlist_mutex);
// default profiles, these may be used frequently, so we cache it
static struct root_profile default_root_profile;
static struct non_root_profile default_non_root_profile;
void persistent_allow_list(void);
static int allow_list_arr[PAGE_SIZE / sizeof(int)] __read_mostly
__aligned(PAGE_SIZE);
static int allow_list_pointer __read_mostly = 0;
static void remove_uid_from_arr(uid_t uid)
{
int *temp_arr;
int i, j;
if (allow_list_pointer == 0)
return;
temp_arr = kzalloc(sizeof(allow_list_arr), GFP_KERNEL);
if (temp_arr == NULL) {
pr_err("%s: unable to allocate memory\n", __func__);
return;
}
for (i = j = 0; i < allow_list_pointer; i++) {
if (allow_list_arr[i] == uid)
continue;
temp_arr[j++] = allow_list_arr[i];
}
allow_list_pointer = j;
for (; j < ARRAY_SIZE(allow_list_arr); j++)
temp_arr[j] = -1;
memcpy(&allow_list_arr, temp_arr, PAGE_SIZE);
kfree(temp_arr);
}
static void init_default_profiles(void)
{
kernel_cap_t full_cap = CAP_FULL_SET;
default_root_profile.uid = 0;
default_root_profile.gid = 0;
default_root_profile.groups_count = 1;
default_root_profile.groups[0] = 0;
memcpy(&default_root_profile.capabilities.effective, &full_cap,
sizeof(default_root_profile.capabilities.effective));
default_root_profile.namespaces = 0;
strcpy(default_root_profile.selinux_domain, KSU_DEFAULT_SELINUX_DOMAIN);
// This means that we will umount modules by default!
default_non_root_profile.umount_modules = true;
}
struct perm_data {
struct list_head list;
struct app_profile profile;
};
static struct list_head allow_list;
static uint8_t allow_list_bitmap[PAGE_SIZE] __read_mostly __aligned(PAGE_SIZE);
#define BITMAP_UID_MAX ((sizeof(allow_list_bitmap) * BITS_PER_BYTE) - 1)
#define KERNEL_SU_ALLOWLIST "/data/adb/ksu/.allowlist"
void ksu_show_allow_list(void)
{
struct perm_data *p = NULL;
struct list_head *pos = NULL;
pr_info("ksu_show_allow_list\n");
list_for_each (pos, &allow_list) {
p = list_entry(pos, struct perm_data, list);
pr_info("uid :%d, allow: %d\n", p->profile.current_uid,
p->profile.allow_su);
}
}
#ifdef CONFIG_KSU_DEBUG
static void ksu_grant_root_to_shell(void)
{
struct app_profile profile = {
.version = KSU_APP_PROFILE_VER,
.allow_su = true,
.current_uid = 2000,
};
strcpy(profile.key, "com.android.shell");
strcpy(profile.rp_config.profile.selinux_domain,
KSU_DEFAULT_SELINUX_DOMAIN);
ksu_set_app_profile(&profile, false);
}
#endif
bool ksu_get_app_profile(struct app_profile *profile)
{
struct perm_data *p = NULL;
struct list_head *pos = NULL;
bool found = false;
list_for_each (pos, &allow_list) {
p = list_entry(pos, struct perm_data, list);
bool uid_match = profile->current_uid == p->profile.current_uid;
if (uid_match) {
// found it, override it with ours
memcpy(profile, &p->profile, sizeof(*profile));
found = true;
goto exit;
}
}
exit:
return found;
}
static inline bool forbid_system_uid(uid_t uid)
{
#define SHELL_UID 2000
#define SYSTEM_UID 1000
return uid < SHELL_UID && uid != SYSTEM_UID;
}
static bool profile_valid(struct app_profile *profile)
{
if (!profile) {
return false;
}
if (profile->version < KSU_APP_PROFILE_VER) {
pr_info("Unsupported profile version: %d\n", profile->version);
return false;
}
if (profile->allow_su) {
if (profile->rp_config.profile.groups_count > KSU_MAX_GROUPS) {
return false;
}
if (strlen(profile->rp_config.profile.selinux_domain) == 0) {
return false;
}
}
return true;
}
bool ksu_set_app_profile(struct app_profile *profile, bool persist)
{
struct perm_data *p = NULL;
struct list_head *pos = NULL;
bool result = false;
if (!profile_valid(profile)) {
pr_err("Failed to set app profile: invalid profile!\n");
return false;
}
list_for_each (pos, &allow_list) {
p = list_entry(pos, struct perm_data, list);
// both uid and package must match, otherwise it will break multiple package with different user id
if (profile->current_uid == p->profile.current_uid &&
!strcmp(profile->key, p->profile.key)) {
// found it, just override it all!
memcpy(&p->profile, profile, sizeof(*profile));
result = true;
goto out;
}
}
// not found, alloc a new node!
p = (struct perm_data *)kzalloc(sizeof(struct perm_data), GFP_KERNEL);
if (!p) {
pr_err("ksu_set_app_profile alloc failed\n");
return false;
}
memcpy(&p->profile, profile, sizeof(*profile));
if (profile->allow_su) {
pr_info("set root profile, key: %s, uid: %d, gid: %d, context: %s\n",
profile->key, profile->current_uid,
profile->rp_config.profile.gid,
profile->rp_config.profile.selinux_domain);
} else {
pr_info("set app profile, key: %s, uid: %d, umount modules: %d\n",
profile->key, profile->current_uid,
profile->nrp_config.profile.umount_modules);
}
list_add_tail(&p->list, &allow_list);
out:
if (profile->current_uid <= BITMAP_UID_MAX) {
if (profile->allow_su)
allow_list_bitmap[profile->current_uid / BITS_PER_BYTE] |=
1 << (profile->current_uid % BITS_PER_BYTE);
else
allow_list_bitmap[profile->current_uid / BITS_PER_BYTE] &=
~(1 << (profile->current_uid % BITS_PER_BYTE));
} else {
if (profile->allow_su) {
/*
* 1024 apps with uid higher than BITMAP_UID_MAX
* registered to request superuser?
*/
if (allow_list_pointer >= ARRAY_SIZE(allow_list_arr)) {
pr_err("too many apps registered\n");
WARN_ON(1);
return false;
}
allow_list_arr[allow_list_pointer++] = profile->current_uid;
} else {
remove_uid_from_arr(profile->current_uid);
}
}
result = true;
// check if the default profiles is changed, cache it to a single struct to accelerate access.
if (unlikely(!strcmp(profile->key, "$"))) {
// set default non root profile
memcpy(&default_non_root_profile, &profile->nrp_config.profile,
sizeof(default_non_root_profile));
}
if (unlikely(!strcmp(profile->key, "#"))) {
// set default root profile
memcpy(&default_root_profile, &profile->rp_config.profile,
sizeof(default_root_profile));
}
if (persist) {
persistent_allow_list();
// FIXME: use a new flag
ksu_mark_running_process();
}
return result;
}
bool __ksu_is_allow_uid(uid_t uid)
{
int i;
if (forbid_system_uid(uid)) {
// do not bother going through the list if it's system
return false;
}
if (likely(ksu_is_manager_uid_valid()) &&
unlikely(ksu_get_manager_uid() == uid)) {
// manager is always allowed!
return true;
}
if (likely(uid <= BITMAP_UID_MAX)) {
return !!(allow_list_bitmap[uid / BITS_PER_BYTE] &
(1 << (uid % BITS_PER_BYTE)));
} else {
for (i = 0; i < allow_list_pointer; i++) {
if (allow_list_arr[i] == uid)
return true;
}
}
return false;
}
bool __ksu_is_allow_uid_for_current(uid_t uid)
{
if (unlikely(uid == 0)) {
// already root, but only allow our domain.
return is_ksu_domain();
}
return __ksu_is_allow_uid(uid);
}
bool ksu_uid_should_umount(uid_t uid)
{
struct app_profile profile = { .current_uid = uid };
if (likely(ksu_is_manager_uid_valid()) &&
unlikely(ksu_get_manager_uid() == uid)) {
// we should not umount on manager!
return false;
}
bool found = ksu_get_app_profile(&profile);
if (!found) {
// no app profile found, it must be non root app
return default_non_root_profile.umount_modules;
}
if (profile.allow_su) {
// if found and it is granted to su, we shouldn't umount for it
return false;
} else {
// found an app profile
if (profile.nrp_config.use_default) {
return default_non_root_profile.umount_modules;
} else {
return profile.nrp_config.profile.umount_modules;
}
}
}
struct root_profile *ksu_get_root_profile(uid_t uid)
{
struct perm_data *p = NULL;
struct list_head *pos = NULL;
list_for_each (pos, &allow_list) {
p = list_entry(pos, struct perm_data, list);
if (uid == p->profile.current_uid && p->profile.allow_su) {
if (!p->profile.rp_config.use_default) {
return &p->profile.rp_config.profile;
}
}
}
// use default profile
return &default_root_profile;
}
bool ksu_get_allow_list(int *array, int *length, bool allow)
{
struct perm_data *p = NULL;
struct list_head *pos = NULL;
int i = 0;
list_for_each (pos, &allow_list) {
p = list_entry(pos, struct perm_data, list);
// pr_info("get_allow_list uid: %d allow: %d\n", p->uid, p->allow);
if (p->profile.allow_su == allow) {
array[i++] = p->profile.current_uid;
}
}
*length = i;
return true;
}
static void do_persistent_allow_list(struct callback_head *_cb)
{
u32 magic = FILE_MAGIC;
u32 version = FILE_FORMAT_VERSION;
struct perm_data *p = NULL;
struct list_head *pos = NULL;
loff_t off = 0;
mutex_lock(&allowlist_mutex);
struct file *fp =
filp_open(KERNEL_SU_ALLOWLIST, O_WRONLY | O_CREAT | O_TRUNC, 0644);
if (IS_ERR(fp)) {
pr_err("save_allow_list create file failed: %ld\n", PTR_ERR(fp));
goto unlock;
}
// store magic and version
if (kernel_write(fp, &magic, sizeof(magic), &off) != sizeof(magic)) {
pr_err("save_allow_list write magic failed.\n");
goto close_file;
}
if (kernel_write(fp, &version, sizeof(version), &off) != sizeof(version)) {
pr_err("save_allow_list write version failed.\n");
goto close_file;
}
list_for_each (pos, &allow_list) {
p = list_entry(pos, struct perm_data, list);
pr_info("save allow list, name: %s uid :%d, allow: %d\n",
p->profile.key, p->profile.current_uid, p->profile.allow_su);
kernel_write(fp, &p->profile, sizeof(p->profile), &off);
}
close_file:
filp_close(fp, 0);
unlock:
mutex_unlock(&allowlist_mutex);
kfree(_cb);
}
void persistent_allow_list()
{
struct task_struct *tsk;
tsk = get_pid_task(find_vpid(1), PIDTYPE_PID);
if (!tsk) {
pr_err("save_allow_list find init task err\n");
return;
}
struct callback_head *cb =
kzalloc(sizeof(struct callback_head), GFP_KERNEL);
if (!cb) {
pr_err("save_allow_list alloc cb err\b");
goto put_task;
}
cb->func = do_persistent_allow_list;
task_work_add(tsk, cb, TWA_RESUME);
put_task:
put_task_struct(tsk);
}
void ksu_load_allow_list()
{
loff_t off = 0;
ssize_t ret = 0;
struct file *fp = NULL;
u32 magic;
u32 version;
#ifdef CONFIG_KSU_DEBUG
// always allow adb shell by default
ksu_grant_root_to_shell();
#endif
// load allowlist now!
fp = filp_open(KERNEL_SU_ALLOWLIST, O_RDONLY, 0);
if (IS_ERR(fp)) {
pr_err("load_allow_list open file failed: %ld\n", PTR_ERR(fp));
return;
}
// verify magic
if (kernel_read(fp, &magic, sizeof(magic), &off) != sizeof(magic) ||
magic != FILE_MAGIC) {
pr_err("allowlist file invalid: %d!\n", magic);
goto exit;
}
if (kernel_read(fp, &version, sizeof(version), &off) != sizeof(version)) {
pr_err("allowlist read version: %d failed\n", version);
goto exit;
}
pr_info("allowlist version: %d\n", version);
while (true) {
struct app_profile profile;
ret = kernel_read(fp, &profile, sizeof(profile), &off);
if (ret <= 0) {
pr_info("load_allow_list read err: %zd\n", ret);
break;
}
pr_info("load_allow_uid, name: %s, uid: %d, allow: %d\n", profile.key,
profile.current_uid, profile.allow_su);
ksu_set_app_profile(&profile, false);
}
exit:
ksu_show_allow_list();
filp_close(fp, 0);
}
void ksu_prune_allowlist(bool (*is_uid_valid)(uid_t, char *, void *),
void *data)
{
struct perm_data *np = NULL;
struct perm_data *n = NULL;
if (!ksu_boot_completed) {
pr_info("boot not completed, skip prune\n");
return;
}
bool modified = false;
// TODO: use RCU!
mutex_lock(&allowlist_mutex);
list_for_each_entry_safe (np, n, &allow_list, list) {
uid_t uid = np->profile.current_uid;
char *package = np->profile.key;
// we use this uid for special cases, don't prune it!
bool is_preserved_uid = uid == KSU_APP_PROFILE_PRESERVE_UID;
if (!is_preserved_uid && !is_uid_valid(uid, package, data)) {
modified = true;
pr_info("prune uid: %d, package: %s\n", uid, package);
list_del(&np->list);
if (likely(uid <= BITMAP_UID_MAX)) {
allow_list_bitmap[uid / BITS_PER_BYTE] &=
~(1 << (uid % BITS_PER_BYTE));
}
remove_uid_from_arr(uid);
smp_mb();
kfree(np);
}
}
mutex_unlock(&allowlist_mutex);
if (modified) {
persistent_allow_list();
}
}
void ksu_allowlist_init(void)
{
int i;
BUILD_BUG_ON(sizeof(allow_list_bitmap) != PAGE_SIZE);
BUILD_BUG_ON(sizeof(allow_list_arr) != PAGE_SIZE);
for (i = 0; i < ARRAY_SIZE(allow_list_arr); i++)
allow_list_arr[i] = -1;
INIT_LIST_HEAD(&allow_list);
init_default_profiles();
}
void ksu_allowlist_exit(void)
{
struct perm_data *np = NULL;
struct perm_data *n = NULL;
// free allowlist
mutex_lock(&allowlist_mutex);
list_for_each_entry_safe (np, n, &allow_list, list) {
list_del(&np->list);
kfree(np);
}
mutex_unlock(&allowlist_mutex);
}
#ifdef CONFIG_KSU_MANUAL_SU
bool ksu_temp_grant_root_once(uid_t uid)
{
struct app_profile profile = {
.version = KSU_APP_PROFILE_VER,
.allow_su = true,
.current_uid = uid,
};
const char *default_key = "com.temp.once";
struct perm_data *p = NULL;
struct list_head *pos = NULL;
bool found = false;
list_for_each (pos, &allow_list) {
p = list_entry(pos, struct perm_data, list);
if (p->profile.current_uid == uid) {
strcpy(profile.key, p->profile.key);
found = true;
break;
}
}
if (!found) {
strcpy(profile.key, default_key);
}
profile.rp_config.profile.uid = default_root_profile.uid;
profile.rp_config.profile.gid = default_root_profile.gid;
profile.rp_config.profile.groups_count = default_root_profile.groups_count;
memcpy(profile.rp_config.profile.groups, default_root_profile.groups, sizeof(default_root_profile.groups));
memcpy(&profile.rp_config.profile.capabilities, &default_root_profile.capabilities, sizeof(default_root_profile.capabilities));
profile.rp_config.profile.namespaces = default_root_profile.namespaces;
strcpy(profile.rp_config.profile.selinux_domain, default_root_profile.selinux_domain);
bool ok = ksu_set_app_profile(&profile, false);
if (ok)
pr_info("pending_root: UID=%d granted and persisted\n", uid);
return ok;
}
void ksu_temp_revoke_root_once(uid_t uid)
{
struct app_profile profile = {
.version = KSU_APP_PROFILE_VER,
.allow_su = false,
.current_uid = uid,
};
const char *default_key = "com.temp.once";
struct perm_data *p = NULL;
struct list_head *pos = NULL;
bool found = false;
list_for_each (pos, &allow_list) {
p = list_entry(pos, struct perm_data, list);
if (p->profile.current_uid == uid) {
strcpy(profile.key, p->profile.key);
found = true;
break;
}
}
if (!found) {
strcpy(profile.key, default_key);
}
profile.nrp_config.profile.umount_modules = default_non_root_profile.umount_modules;
strcpy(profile.rp_config.profile.selinux_domain, KSU_DEFAULT_SELINUX_DOMAIN);
ksu_set_app_profile(&profile, false);
persistent_allow_list();
pr_info("pending_root: UID=%d removed and persist updated\n", uid);
}
#endif

49
kernel/allowlist.h Normal file
View file

@ -0,0 +1,49 @@
#ifndef __KSU_H_ALLOWLIST
#define __KSU_H_ALLOWLIST
#include <linux/types.h>
#include <linux/uidgid.h>
#include "app_profile.h"
#define PER_USER_RANGE 100000
#define FIRST_APPLICATION_UID 10000
#define LAST_APPLICATION_UID 19999
void ksu_allowlist_init(void);
void ksu_allowlist_exit(void);
void ksu_load_allow_list(void);
void ksu_show_allow_list(void);
// Check if the uid is in allow list
bool __ksu_is_allow_uid(uid_t uid);
#define ksu_is_allow_uid(uid) unlikely(__ksu_is_allow_uid(uid))
// Check if the uid is in allow list, or current is ksu domain root
bool __ksu_is_allow_uid_for_current(uid_t uid);
#define ksu_is_allow_uid_for_current(uid) unlikely(__ksu_is_allow_uid_for_current(uid))
bool ksu_get_allow_list(int *array, int *length, bool allow);
void ksu_prune_allowlist(bool (*is_uid_exist)(uid_t, char *, void *), void *data);
bool ksu_get_app_profile(struct app_profile *);
bool ksu_set_app_profile(struct app_profile *, bool persist);
bool ksu_uid_should_umount(uid_t uid);
struct root_profile *ksu_get_root_profile(uid_t uid);
static inline bool is_appuid(uid_t uid)
{
uid_t appid = uid % PER_USER_RANGE;
return appid >= FIRST_APPLICATION_UID && appid <= LAST_APPLICATION_UID;
}
#ifdef CONFIG_KSU_MANUAL_SU
bool ksu_temp_grant_root_once(uid_t uid);
void ksu_temp_revoke_root_once(uid_t uid);
#endif
#endif

421
kernel/apk_sign.c Normal file
View file

@ -0,0 +1,421 @@
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/gfp.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/version.h>
#ifdef CONFIG_KSU_DEBUG
#include <linux/moduleparam.h>
#endif
#include <crypto/hash.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
#include <crypto/sha2.h>
#else
#include <crypto/sha.h>
#endif
#include "apk_sign.h"
#include "dynamic_manager.h"
#include "klog.h" // IWYU pragma: keep
#include "manager_sign.h"
struct sdesc {
struct shash_desc shash;
char ctx[];
};
static apk_sign_key_t apk_sign_keys[] = {
{EXPECTED_SIZE_SHIRKNEKO, EXPECTED_HASH_SHIRKNEKO}, // ShirkNeko/SukiSU
#ifdef EXPECTED_SIZE
{EXPECTED_SIZE, EXPECTED_HASH}, // Custom
#endif
};
static struct sdesc *init_sdesc(struct crypto_shash *alg)
{
struct sdesc *sdesc;
int size;
size = sizeof(struct shash_desc) + crypto_shash_descsize(alg);
sdesc = kzalloc(size, GFP_KERNEL);
if (!sdesc)
return ERR_PTR(-ENOMEM);
sdesc->shash.tfm = alg;
return sdesc;
}
static int calc_hash(struct crypto_shash *alg, const unsigned char *data,
unsigned int datalen, unsigned char *digest)
{
struct sdesc *sdesc;
int ret;
sdesc = init_sdesc(alg);
if (IS_ERR(sdesc)) {
pr_info("can't alloc sdesc\n");
return PTR_ERR(sdesc);
}
ret = crypto_shash_digest(&sdesc->shash, data, datalen, digest);
kfree(sdesc);
return ret;
}
static int ksu_sha256(const unsigned char *data, unsigned int datalen,
unsigned char *digest)
{
struct crypto_shash *alg;
char *hash_alg_name = "sha256";
int ret;
alg = crypto_alloc_shash(hash_alg_name, 0, 0);
if (IS_ERR(alg)) {
pr_info("can't alloc alg %s\n", hash_alg_name);
return PTR_ERR(alg);
}
ret = calc_hash(alg, data, datalen, digest);
crypto_free_shash(alg);
return ret;
}
static struct dynamic_sign_key dynamic_sign = DYNAMIC_SIGN_DEFAULT_CONFIG;
static bool check_dynamic_sign(struct file *fp, u32 size4, loff_t *pos, int *matched_index)
{
struct dynamic_sign_key current_dynamic_key = dynamic_sign;
if (ksu_get_dynamic_manager_config(&current_dynamic_key.size, &current_dynamic_key.hash)) {
pr_debug("Using dynamic manager config: size=0x%x, hash=%.16s...\n",
current_dynamic_key.size, current_dynamic_key.hash);
}
if (size4 != current_dynamic_key.size) {
return false;
}
#define CERT_MAX_LENGTH 1024
char cert[CERT_MAX_LENGTH];
if (size4 > CERT_MAX_LENGTH) {
pr_info("cert length overlimit\n");
return false;
}
kernel_read(fp, cert, size4, pos);
unsigned char digest[SHA256_DIGEST_SIZE];
if (ksu_sha256(cert, size4, digest) < 0) {
pr_info("sha256 error\n");
return false;
}
char hash_str[SHA256_DIGEST_SIZE * 2 + 1];
hash_str[SHA256_DIGEST_SIZE * 2] = '\0';
bin2hex(hash_str, digest, SHA256_DIGEST_SIZE);
pr_info("sha256: %s, expected: %s, index: dynamic\n", hash_str, current_dynamic_key.hash);
if (strcmp(current_dynamic_key.hash, hash_str) == 0) {
if (matched_index) {
*matched_index = DYNAMIC_SIGN_INDEX;
}
return true;
}
return false;
}
static bool check_block(struct file *fp, u32 *size4, loff_t *pos, u32 *offset, int *matched_index)
{
int i;
apk_sign_key_t sign_key;
bool signature_valid = false;
kernel_read(fp, size4, 0x4, pos); // signer-sequence length
kernel_read(fp, size4, 0x4, pos); // signer length
kernel_read(fp, size4, 0x4, pos); // signed data length
*offset += 0x4 * 3;
kernel_read(fp, size4, 0x4, pos); // digests-sequence length
*pos += *size4;
*offset += 0x4 + *size4;
kernel_read(fp, size4, 0x4, pos); // certificates length
kernel_read(fp, size4, 0x4, pos); // certificate length
*offset += 0x4 * 2;
if (ksu_is_dynamic_manager_enabled()) {
loff_t temp_pos = *pos;
if (check_dynamic_sign(fp, *size4, &temp_pos, matched_index)) {
*pos = temp_pos;
*offset += *size4;
return true;
}
}
for (i = 0; i < ARRAY_SIZE(apk_sign_keys); i++) {
sign_key = apk_sign_keys[i];
if (*size4 != sign_key.size)
continue;
*offset += *size4;
#define CERT_MAX_LENGTH 1024
char cert[CERT_MAX_LENGTH];
if (*size4 > CERT_MAX_LENGTH) {
pr_info("cert length overlimit\n");
return false;
}
kernel_read(fp, cert, *size4, pos);
unsigned char digest[SHA256_DIGEST_SIZE];
if (ksu_sha256(cert, *size4, digest) < 0 ) {
pr_info("sha256 error\n");
return false;
}
char hash_str[SHA256_DIGEST_SIZE * 2 + 1];
hash_str[SHA256_DIGEST_SIZE * 2] = '\0';
bin2hex(hash_str, digest, SHA256_DIGEST_SIZE);
pr_info("sha256: %s, expected: %s, index: %d\n", hash_str, sign_key.sha256, i);
if (strcmp(sign_key.sha256, hash_str) == 0) {
signature_valid = true;
if (matched_index) {
*matched_index = i;
}
break;
}
}
return signature_valid;
}
struct zip_entry_header {
uint32_t signature;
uint16_t version;
uint16_t flags;
uint16_t compression;
uint16_t mod_time;
uint16_t mod_date;
uint32_t crc32;
uint32_t compressed_size;
uint32_t uncompressed_size;
uint16_t file_name_length;
uint16_t extra_field_length;
} __attribute__((packed));
// This is a necessary but not sufficient condition, but it is enough for us
static bool has_v1_signature_file(struct file *fp)
{
struct zip_entry_header header;
const char MANIFEST[] = "META-INF/MANIFEST.MF";
loff_t pos = 0;
while (kernel_read(fp, &header,
sizeof(struct zip_entry_header), &pos) ==
sizeof(struct zip_entry_header)) {
if (header.signature != 0x04034b50) {
// ZIP magic: 'PK'
return false;
}
// Read the entry file name
if (header.file_name_length == sizeof(MANIFEST) - 1) {
char fileName[sizeof(MANIFEST)];
kernel_read(fp, fileName,
header.file_name_length, &pos);
fileName[header.file_name_length] = '\0';
// Check if the entry matches META-INF/MANIFEST.MF
if (strncmp(MANIFEST, fileName, sizeof(MANIFEST) - 1) ==
0) {
return true;
}
} else {
// Skip the entry file name
pos += header.file_name_length;
}
// Skip to the next entry
pos += header.extra_field_length + header.compressed_size;
}
return false;
}
static __always_inline bool check_v2_signature(char *path, bool check_multi_manager, int *signature_index)
{
unsigned char buffer[0x11] = { 0 };
u32 size4;
u64 size8, size_of_block;
loff_t pos;
bool v2_signing_valid = false;
int v2_signing_blocks = 0;
bool v3_signing_exist = false;
bool v3_1_signing_exist = false;
int matched_index = -1;
int i;
struct file *fp = filp_open(path, O_RDONLY, 0);
if (IS_ERR(fp)) {
pr_err("open %s error.\n", path);
return false;
}
// If you want to check for multi-manager APK signing, but dynamic managering is not enabled, skip
if (check_multi_manager && !ksu_is_dynamic_manager_enabled()) {
filp_close(fp, 0);
return 0;
}
// disable inotify for this file
fp->f_mode |= FMODE_NONOTIFY;
// https://en.wikipedia.org/wiki/Zip_(file_format)#End_of_central_directory_record_(EOCD)
for (i = 0;; ++i) {
unsigned short n;
pos = generic_file_llseek(fp, -i - 2, SEEK_END);
kernel_read(fp, &n, 2, &pos);
if (n == i) {
pos -= 22;
kernel_read(fp, &size4, 4, &pos);
if ((size4 ^ 0xcafebabeu) == 0xccfbf1eeu) {
break;
}
}
if (i == 0xffff) {
pr_info("error: cannot find eocd\n");
goto clean;
}
}
pos += 12;
// offset
kernel_read(fp, &size4, 0x4, &pos);
pos = size4 - 0x18;
kernel_read(fp, &size8, 0x8, &pos);
kernel_read(fp, buffer, 0x10, &pos);
if (strcmp((char *)buffer, "APK Sig Block 42")) {
goto clean;
}
pos = size4 - (size8 + 0x8);
kernel_read(fp, &size_of_block, 0x8, &pos);
if (size_of_block != size8) {
goto clean;
}
int loop_count = 0;
while (loop_count++ < 10) {
uint32_t id;
uint32_t offset;
kernel_read(fp, &size8, 0x8,
&pos); // sequence length
if (size8 == size_of_block) {
break;
}
kernel_read(fp, &id, 0x4, &pos); // id
offset = 4;
if (id == 0x7109871au) {
v2_signing_blocks++;
bool result = check_block(fp, &size4, &pos, &offset, &matched_index);
if (result) {
v2_signing_valid = true;
}
} else if (id == 0xf05368c0u) {
// http://aospxref.com/android-14.0.0_r2/xref/frameworks/base/core/java/android/util/apk/ApkSignatureSchemeV3Verifier.java#73
v3_signing_exist = true;
} else if (id == 0x1b93ad61u) {
// http://aospxref.com/android-14.0.0_r2/xref/frameworks/base/core/java/android/util/apk/ApkSignatureSchemeV3Verifier.java#74
v3_1_signing_exist = true;
} else {
#ifdef CONFIG_KSU_DEBUG
pr_info("Unknown id: 0x%08x\n", id);
#endif
}
pos += (size8 - offset);
}
if (v2_signing_blocks != 1) {
#ifdef CONFIG_KSU_DEBUG
pr_err("Unexpected v2 signature count: %d\n",
v2_signing_blocks);
#endif
v2_signing_valid = false;
}
if (v2_signing_valid) {
int has_v1_signing = has_v1_signature_file(fp);
if (has_v1_signing) {
pr_err("Unexpected v1 signature scheme found!\n");
filp_close(fp, 0);
return false;
}
}
clean:
filp_close(fp, 0);
if (v3_signing_exist || v3_1_signing_exist) {
#ifdef CONFIG_KSU_DEBUG
pr_err("Unexpected v3 signature scheme found!\n");
#endif
return false;
}
if (v2_signing_valid) {
if (signature_index) {
*signature_index = matched_index;
}
if (check_multi_manager) {
// 0: ShirkNeko/SukiSU, DYNAMIC_SIGN_INDEX : Dynamic Sign
if (matched_index == 0 || matched_index == DYNAMIC_SIGN_INDEX) {
pr_info("Multi-manager APK detected (dynamic_manager enabled): signature_index=%d\n", matched_index);
return true;
}
return false;
} else {
// Common manager check: any valid signature will do
return true;
}
}
return false;
}
#ifdef CONFIG_KSU_DEBUG
int ksu_debug_manager_uid = -1;
#include "manager.h"
static int set_expected_size(const char *val, const struct kernel_param *kp)
{
int rv = param_set_uint(val, kp);
ksu_set_manager_uid(ksu_debug_manager_uid);
pr_info("ksu_manager_uid set to %d\n", ksu_debug_manager_uid);
return rv;
}
static struct kernel_param_ops expected_size_ops = {
.set = set_expected_size,
.get = param_get_uint,
};
module_param_cb(ksu_debug_manager_uid, &expected_size_ops,
&ksu_debug_manager_uid, S_IRUSR | S_IWUSR);
#endif
bool is_manager_apk(char *path)
{
return check_v2_signature(path, false, NULL);
}
bool is_dynamic_manager_apk(char *path, int *signature_index)
{
return check_v2_signature(path, true, signature_index);
}

11
kernel/apk_sign.h Normal file
View file

@ -0,0 +1,11 @@
#ifndef __KSU_H_APK_V2_SIGN
#define __KSU_H_APK_V2_SIGN
#include <linux/types.h>
#include "ksu.h"
bool is_manager_apk(char *path);
bool is_dynamic_manager_apk(char *path, int *signature_index);
#endif

303
kernel/app_profile.c Normal file
View file

@ -0,0 +1,303 @@
#include <linux/capability.h>
#include <linux/cred.h>
#include <linux/sched.h>
#include <linux/sched/signal.h>
#include <linux/seccomp.h>
#include <linux/thread_info.h>
#include <linux/uidgid.h>
#include <linux/version.h>
#include "objsec.h"
#include "allowlist.h"
#include "app_profile.h"
#include "klog.h" // IWYU pragma: keep
#include "selinux/selinux.h"
#include "syscall_hook_manager.h"
#include "sucompat.h"
#include "sulog.h"
#if LINUX_VERSION_CODE >= KERNEL_VERSION (6, 7, 0)
static struct group_info root_groups = { .usage = REFCOUNT_INIT(2), };
#else
static struct group_info root_groups = { .usage = ATOMIC_INIT(2) };
#endif
static void setup_groups(struct root_profile *profile, struct cred *cred)
{
if (profile->groups_count > KSU_MAX_GROUPS) {
pr_warn("Failed to setgroups, too large group: %d!\n",
profile->uid);
return;
}
if (profile->groups_count == 1 && profile->groups[0] == 0) {
// setgroup to root and return early.
if (cred->group_info)
put_group_info(cred->group_info);
cred->group_info = get_group_info(&root_groups);
return;
}
u32 ngroups = profile->groups_count;
struct group_info *group_info = groups_alloc(ngroups);
if (!group_info) {
pr_warn("Failed to setgroups, ENOMEM for: %d\n", profile->uid);
return;
}
int i;
for (i = 0; i < ngroups; i++) {
gid_t gid = profile->groups[i];
kgid_t kgid = make_kgid(current_user_ns(), gid);
if (!gid_valid(kgid)) {
pr_warn("Failed to setgroups, invalid gid: %d\n", gid);
put_group_info(group_info);
return;
}
group_info->gid[i] = kgid;
}
groups_sort(group_info);
set_groups(cred, group_info);
put_group_info(group_info);
}
void disable_seccomp(void)
{
assert_spin_locked(&current->sighand->siglock);
// disable seccomp
#if defined(CONFIG_GENERIC_ENTRY) && \
LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
clear_syscall_work(SECCOMP);
#else
clear_thread_flag(TIF_SECCOMP);
#endif
#ifdef CONFIG_SECCOMP
current->seccomp.mode = 0;
current->seccomp.filter = NULL;
atomic_set(&current->seccomp.filter_count, 0);
#else
#endif
}
void escape_with_root_profile(void)
{
struct cred *cred;
struct task_struct *p = current;
struct task_struct *t;
cred = prepare_creds();
if (!cred) {
pr_warn("prepare_creds failed!\n");
return;
}
if (cred->euid.val == 0) {
pr_warn("Already root, don't escape!\n");
#if __SULOG_GATE
ksu_sulog_report_su_grant(current_euid().val, NULL, "escape_to_root_failed");
#endif
abort_creds(cred);
return;
}
struct root_profile *profile = ksu_get_root_profile(cred->uid.val);
cred->uid.val = profile->uid;
cred->suid.val = profile->uid;
cred->euid.val = profile->uid;
cred->fsuid.val = profile->uid;
cred->gid.val = profile->gid;
cred->fsgid.val = profile->gid;
cred->sgid.val = profile->gid;
cred->egid.val = profile->gid;
cred->securebits = 0;
BUILD_BUG_ON(sizeof(profile->capabilities.effective) !=
sizeof(kernel_cap_t));
// setup capabilities
// we need CAP_DAC_READ_SEARCH becuase `/data/adb/ksud` is not accessible for non root process
// we add it here but don't add it to cap_inhertiable, it would be dropped automaticly after exec!
u64 cap_for_ksud =
profile->capabilities.effective | CAP_DAC_READ_SEARCH;
memcpy(&cred->cap_effective, &cap_for_ksud,
sizeof(cred->cap_effective));
memcpy(&cred->cap_permitted, &profile->capabilities.effective,
sizeof(cred->cap_permitted));
memcpy(&cred->cap_bset, &profile->capabilities.effective,
sizeof(cred->cap_bset));
setup_groups(profile, cred);
commit_creds(cred);
// Refer to kernel/seccomp.c: seccomp_set_mode_strict
// When disabling Seccomp, ensure that current->sighand->siglock is held during the operation.
spin_lock_irq(&current->sighand->siglock);
disable_seccomp();
spin_unlock_irq(&current->sighand->siglock);
setup_selinux(profile->selinux_domain);
#if __SULOG_GATE
ksu_sulog_report_su_grant(current_euid().val, NULL, "escape_to_root");
#endif
for_each_thread (p, t) {
ksu_set_task_tracepoint_flag(t);
}
}
#ifdef CONFIG_KSU_MANUAL_SU
#include "ksud.h"
#ifndef DEVPTS_SUPER_MAGIC
#define DEVPTS_SUPER_MAGIC 0x1cd1
#endif
static int __manual_su_handle_devpts(struct inode *inode)
{
if (!current->mm) {
return 0;
}
uid_t uid = current_uid().val;
if (uid % 100000 < 10000) {
// not untrusted_app, ignore it
return 0;
}
if (likely(!ksu_is_allow_uid_for_current(uid)))
return 0;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0) || defined(KSU_OPTIONAL_SELINUX_INODE)
struct inode_security_struct *sec = selinux_inode(inode);
#else
struct inode_security_struct *sec =
(struct inode_security_struct *)inode->i_security;
#endif
if (ksu_file_sid && sec)
sec->sid = ksu_file_sid;
return 0;
}
static void disable_seccomp_for_task(struct task_struct *tsk)
{
assert_spin_locked(&tsk->sighand->siglock);
#ifdef CONFIG_SECCOMP
if (tsk->seccomp.mode == SECCOMP_MODE_DISABLED && !tsk->seccomp.filter)
return;
#endif
clear_tsk_thread_flag(tsk, TIF_SECCOMP);
#ifdef CONFIG_SECCOMP
tsk->seccomp.mode = SECCOMP_MODE_DISABLED;
if (tsk->seccomp.filter) {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
seccomp_filter_release(tsk);
#else
put_seccomp_filter(tsk);
tsk->seccomp.filter = NULL;
#endif
}
#endif
}
void escape_to_root_for_cmd_su(uid_t target_uid, pid_t target_pid)
{
struct cred *newcreds;
struct task_struct *target_task;
unsigned long flags;
struct task_struct *p = current;
struct task_struct *t;
pr_info("cmd_su: escape_to_root_for_cmd_su called for UID: %d, PID: %d\n", target_uid, target_pid);
// Find target task by PID
rcu_read_lock();
target_task = pid_task(find_vpid(target_pid), PIDTYPE_PID);
if (!target_task) {
rcu_read_unlock();
pr_err("cmd_su: target task not found for PID: %d\n", target_pid);
#if __SULOG_GATE
ksu_sulog_report_su_grant(target_uid, "cmd_su", "target_not_found");
#endif
return;
}
get_task_struct(target_task);
rcu_read_unlock();
if (task_uid(target_task).val == 0) {
pr_warn("cmd_su: target task is already root, PID: %d\n", target_pid);
put_task_struct(target_task);
return;
}
newcreds = prepare_kernel_cred(target_task);
if (newcreds == NULL) {
pr_err("cmd_su: failed to allocate new cred for PID: %d\n", target_pid);
#if __SULOG_GATE
ksu_sulog_report_su_grant(target_uid, "cmd_su", "cred_alloc_failed");
#endif
put_task_struct(target_task);
return;
}
struct root_profile *profile = ksu_get_root_profile(target_uid);
newcreds->uid.val = profile->uid;
newcreds->suid.val = profile->uid;
newcreds->euid.val = profile->uid;
newcreds->fsuid.val = profile->uid;
newcreds->gid.val = profile->gid;
newcreds->fsgid.val = profile->gid;
newcreds->sgid.val = profile->gid;
newcreds->egid.val = profile->gid;
newcreds->securebits = 0;
u64 cap_for_cmd_su = profile->capabilities.effective | CAP_DAC_READ_SEARCH | CAP_SETUID | CAP_SETGID;
memcpy(&newcreds->cap_effective, &cap_for_cmd_su, sizeof(newcreds->cap_effective));
memcpy(&newcreds->cap_permitted, &profile->capabilities.effective, sizeof(newcreds->cap_permitted));
memcpy(&newcreds->cap_bset, &profile->capabilities.effective, sizeof(newcreds->cap_bset));
setup_groups(profile, newcreds);
task_lock(target_task);
const struct cred *old_creds = get_task_cred(target_task);
rcu_assign_pointer(target_task->real_cred, newcreds);
rcu_assign_pointer(target_task->cred, get_cred(newcreds));
task_unlock(target_task);
if (target_task->sighand) {
spin_lock_irqsave(&target_task->sighand->siglock, flags);
disable_seccomp_for_task(target_task);
spin_unlock_irqrestore(&target_task->sighand->siglock, flags);
}
setup_selinux(profile->selinux_domain);
put_cred(old_creds);
wake_up_process(target_task);
if (target_task->signal->tty) {
struct inode *inode = target_task->signal->tty->driver_data;
if (inode && inode->i_sb->s_magic == DEVPTS_SUPER_MAGIC) {
__manual_su_handle_devpts(inode);
}
}
put_task_struct(target_task);
#if __SULOG_GATE
ksu_sulog_report_su_grant(target_uid, "cmd_su", "manual_escalation");
#endif
for_each_thread (p, t) {
ksu_set_task_tracepoint_flag(t);
}
pr_info("cmd_su: privilege escalation completed for UID: %d, PID: %d\n", target_uid, target_pid);
}
#endif

70
kernel/app_profile.h Normal file
View file

@ -0,0 +1,70 @@
#ifndef __KSU_H_APP_PROFILE
#define __KSU_H_APP_PROFILE
#include <linux/types.h>
// Forward declarations
struct cred;
#define KSU_APP_PROFILE_VER 2
#define KSU_MAX_PACKAGE_NAME 256
// NGROUPS_MAX for Linux is 65535 generally, but we only supports 32 groups.
#define KSU_MAX_GROUPS 32
#define KSU_SELINUX_DOMAIN 64
struct root_profile {
int32_t uid;
int32_t gid;
int32_t groups_count;
int32_t groups[KSU_MAX_GROUPS];
// kernel_cap_t is u32[2] for capabilities v3
struct {
u64 effective;
u64 permitted;
u64 inheritable;
} capabilities;
char selinux_domain[KSU_SELINUX_DOMAIN];
int32_t namespaces;
};
struct non_root_profile {
bool umount_modules;
};
struct app_profile {
// It may be utilized for backward compatibility, although we have never explicitly made any promises regarding this.
u32 version;
// this is usually the package of the app, but can be other value for special apps
char key[KSU_MAX_PACKAGE_NAME];
int32_t current_uid;
bool allow_su;
union {
struct {
bool use_default;
char template_name[KSU_MAX_PACKAGE_NAME];
struct root_profile profile;
} rp_config;
struct {
bool use_default;
struct non_root_profile profile;
} nrp_config;
};
};
// Escalate current process to root with the appropriate profile
void escape_with_root_profile(void);
void escape_to_root_for_cmd_su(uid_t target_uid, pid_t target_pid);
void disable_seccomp(void);
#endif

68
kernel/arch.h Normal file
View file

@ -0,0 +1,68 @@
#ifndef __KSU_H_ARCH
#define __KSU_H_ARCH
#include <linux/version.h>
#if defined(__aarch64__)
#define __PT_PARM1_REG regs[0]
#define __PT_PARM2_REG regs[1]
#define __PT_PARM3_REG regs[2]
#define __PT_SYSCALL_PARM4_REG regs[3]
#define __PT_CCALL_PARM4_REG regs[3]
#define __PT_PARM5_REG regs[4]
#define __PT_PARM6_REG regs[5]
#define __PT_RET_REG regs[30]
#define __PT_FP_REG regs[29] /* Works only with CONFIG_FRAME_POINTER */
#define __PT_RC_REG regs[0]
#define __PT_SP_REG sp
#define __PT_IP_REG pc
#define REBOOT_SYMBOL "__arm64_sys_reboot"
#define SYS_READ_SYMBOL "__arm64_sys_read"
#define SYS_EXECVE_SYMBOL "__arm64_sys_execve"
#elif defined(__x86_64__)
#define __PT_PARM1_REG di
#define __PT_PARM2_REG si
#define __PT_PARM3_REG dx
/* syscall uses r10 for PARM4 */
#define __PT_SYSCALL_PARM4_REG r10
#define __PT_CCALL_PARM4_REG cx
#define __PT_PARM5_REG r8
#define __PT_PARM6_REG r9
#define __PT_RET_REG sp
#define __PT_FP_REG bp
#define __PT_RC_REG ax
#define __PT_SP_REG sp
#define __PT_IP_REG ip
#define REBOOT_SYMBOL "__x64_sys_reboot"
#define SYS_READ_SYMBOL "__x64_sys_read"
#define SYS_EXECVE_SYMBOL "__x64_sys_execve"
#else
#error "Unsupported arch"
#endif
/* allow some architecutres to override `struct pt_regs` */
#ifndef __PT_REGS_CAST
#define __PT_REGS_CAST(x) (x)
#endif
#define PT_REGS_PARM1(x) (__PT_REGS_CAST(x)->__PT_PARM1_REG)
#define PT_REGS_PARM2(x) (__PT_REGS_CAST(x)->__PT_PARM2_REG)
#define PT_REGS_PARM3(x) (__PT_REGS_CAST(x)->__PT_PARM3_REG)
#define PT_REGS_SYSCALL_PARM4(x) (__PT_REGS_CAST(x)->__PT_SYSCALL_PARM4_REG)
#define PT_REGS_CCALL_PARM4(x) (__PT_REGS_CAST(x)->__PT_CCALL_PARM4_REG)
#define PT_REGS_PARM5(x) (__PT_REGS_CAST(x)->__PT_PARM5_REG)
#define PT_REGS_PARM6(x) (__PT_REGS_CAST(x)->__PT_PARM6_REG)
#define PT_REGS_RET(x) (__PT_REGS_CAST(x)->__PT_RET_REG)
#define PT_REGS_FP(x) (__PT_REGS_CAST(x)->__PT_FP_REG)
#define PT_REGS_RC(x) (__PT_REGS_CAST(x)->__PT_RC_REG)
#define PT_REGS_SP(x) (__PT_REGS_CAST(x)->__PT_SP_REG)
#define PT_REGS_IP(x) (__PT_REGS_CAST(x)->__PT_IP_REG)
#define PT_REAL_REGS(regs) ((struct pt_regs *)PT_REGS_PARM1(regs))
#endif

504
kernel/dynamic_manager.c Normal file
View file

@ -0,0 +1,504 @@
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/gfp.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/version.h>
#include <linux/workqueue.h>
#ifdef CONFIG_KSU_DEBUG
#include <linux/moduleparam.h>
#endif
#include <crypto/hash.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
#include <crypto/sha2.h>
#else
#include <crypto/sha.h>
#endif
#include "dynamic_manager.h"
#include "klog.h" // IWYU pragma: keep
#include "manager.h"
#define MAX_MANAGERS 2
// Dynamic sign configuration
static struct dynamic_manager_config dynamic_manager = {
.size = 0x300,
.hash = "0000000000000000000000000000000000000000000000000000000000000000",
.is_set = 0
};
// Multi-manager state
static struct manager_info active_managers[MAX_MANAGERS];
static DEFINE_SPINLOCK(managers_lock);
static DEFINE_SPINLOCK(dynamic_manager_lock);
// Work queues for persistent storage
static struct work_struct save_dynamic_manager_work;
static struct work_struct load_dynamic_manager_work;
static struct work_struct clear_dynamic_manager_work;
bool ksu_is_dynamic_manager_enabled(void)
{
unsigned long flags;
bool enabled;
spin_lock_irqsave(&dynamic_manager_lock, flags);
enabled = dynamic_manager.is_set;
spin_unlock_irqrestore(&dynamic_manager_lock, flags);
return enabled;
}
void ksu_add_manager(uid_t uid, int signature_index)
{
unsigned long flags;
int i;
if (!ksu_is_dynamic_manager_enabled()) {
pr_info("Dynamic sign not enabled, skipping multi-manager add\n");
return;
}
spin_lock_irqsave(&managers_lock, flags);
// Check if manager already exists and update
for (i = 0; i < MAX_MANAGERS; i++) {
if (active_managers[i].is_active && active_managers[i].uid == uid) {
active_managers[i].signature_index = signature_index;
spin_unlock_irqrestore(&managers_lock, flags);
pr_info("Updated manager uid=%d, signature_index=%d\n", uid, signature_index);
return;
}
}
// Find free slot for new manager
for (i = 0; i < MAX_MANAGERS; i++) {
if (!active_managers[i].is_active) {
active_managers[i].uid = uid;
active_managers[i].signature_index = signature_index;
active_managers[i].is_active = true;
spin_unlock_irqrestore(&managers_lock, flags);
pr_info("Added manager uid=%d, signature_index=%d\n", uid, signature_index);
return;
}
}
spin_unlock_irqrestore(&managers_lock, flags);
pr_warn("Failed to add manager, no free slots\n");
}
void ksu_remove_manager(uid_t uid)
{
unsigned long flags;
int i;
if (!ksu_is_dynamic_manager_enabled()) {
return;
}
spin_lock_irqsave(&managers_lock, flags);
for (i = 0; i < MAX_MANAGERS; i++) {
if (active_managers[i].is_active && active_managers[i].uid == uid) {
active_managers[i].is_active = false;
pr_info("Removed manager uid=%d\n", uid);
break;
}
}
spin_unlock_irqrestore(&managers_lock, flags);
}
bool ksu_is_any_manager(uid_t uid)
{
unsigned long flags;
bool is_manager = false;
int i;
if (!ksu_is_dynamic_manager_enabled()) {
return false;
}
spin_lock_irqsave(&managers_lock, flags);
for (i = 0; i < MAX_MANAGERS; i++) {
if (active_managers[i].is_active && active_managers[i].uid == uid) {
is_manager = true;
break;
}
}
spin_unlock_irqrestore(&managers_lock, flags);
return is_manager;
}
int ksu_get_manager_signature_index(uid_t uid)
{
unsigned long flags;
int signature_index = -1;
int i;
// Check traditional manager first
if (ksu_manager_uid != KSU_INVALID_UID && uid == ksu_manager_uid) {
return DYNAMIC_SIGN_INDEX;
}
if (!ksu_is_dynamic_manager_enabled()) {
return -1;
}
spin_lock_irqsave(&managers_lock, flags);
for (i = 0; i < MAX_MANAGERS; i++) {
if (active_managers[i].is_active && active_managers[i].uid == uid) {
signature_index = active_managers[i].signature_index;
break;
}
}
spin_unlock_irqrestore(&managers_lock, flags);
return signature_index;
}
static void clear_dynamic_manager(void)
{
unsigned long flags;
int i;
spin_lock_irqsave(&managers_lock, flags);
for (i = 0; i < MAX_MANAGERS; i++) {
if (active_managers[i].is_active) {
pr_info("Clearing dynamic manager uid=%d (signature_index=%d) for rescan\n",
active_managers[i].uid, active_managers[i].signature_index);
active_managers[i].is_active = false;
}
}
spin_unlock_irqrestore(&managers_lock, flags);
}
int ksu_get_active_managers(struct manager_list_info *info)
{
unsigned long flags;
int i, count = 0;
if (!info) {
return -EINVAL;
}
// Add traditional manager first
if (ksu_manager_uid != KSU_INVALID_UID && count < 2) {
info->managers[count].uid = ksu_manager_uid;
info->managers[count].signature_index = 0;
count++;
}
// Add dynamic managers
if (ksu_is_dynamic_manager_enabled()) {
spin_lock_irqsave(&managers_lock, flags);
for (i = 0; i < MAX_MANAGERS && count < 2; i++) {
if (active_managers[i].is_active) {
info->managers[count].uid = active_managers[i].uid;
info->managers[count].signature_index = active_managers[i].signature_index;
count++;
}
}
spin_unlock_irqrestore(&managers_lock, flags);
}
info->count = count;
return 0;
}
static void do_save_dynamic_manager(struct work_struct *work)
{
u32 magic = DYNAMIC_MANAGER_FILE_MAGIC;
u32 version = DYNAMIC_MANAGER_FILE_VERSION;
struct dynamic_manager_config config_to_save;
loff_t off = 0;
unsigned long flags;
struct file *fp;
spin_lock_irqsave(&dynamic_manager_lock, flags);
config_to_save = dynamic_manager;
spin_unlock_irqrestore(&dynamic_manager_lock, flags);
if (!config_to_save.is_set) {
pr_info("Dynamic sign config not set, skipping save\n");
return;
}
fp = filp_open(KERNEL_SU_DYNAMIC_MANAGER, O_WRONLY | O_CREAT | O_TRUNC, 0644);
if (IS_ERR(fp)) {
pr_err("save_dynamic_manager create file failed: %ld\n", PTR_ERR(fp));
return;
}
if (kernel_write(fp, &magic, sizeof(magic), &off) != sizeof(magic)) {
pr_err("save_dynamic_manager write magic failed.\n");
goto exit;
}
if (kernel_write(fp, &version, sizeof(version), &off) != sizeof(version)) {
pr_err("save_dynamic_manager write version failed.\n");
goto exit;
}
if (kernel_write(fp, &config_to_save, sizeof(config_to_save), &off) != sizeof(config_to_save)) {
pr_err("save_dynamic_manager write config failed.\n");
goto exit;
}
pr_info("Dynamic sign config saved successfully\n");
exit:
filp_close(fp, 0);
}
static void do_load_dynamic_manager(struct work_struct *work)
{
loff_t off = 0;
ssize_t ret = 0;
struct file *fp = NULL;
u32 magic;
u32 version;
struct dynamic_manager_config loaded_config;
unsigned long flags;
int i;
fp = filp_open(KERNEL_SU_DYNAMIC_MANAGER, O_RDONLY, 0);
if (IS_ERR(fp)) {
if (PTR_ERR(fp) == -ENOENT) {
pr_info("No saved dynamic manager config found\n");
} else {
pr_err("load_dynamic_manager open file failed: %ld\n", PTR_ERR(fp));
}
return;
}
if (kernel_read(fp, &magic, sizeof(magic), &off) != sizeof(magic) ||
magic != DYNAMIC_MANAGER_FILE_MAGIC) {
pr_err("dynamic manager file invalid magic: %x!\n", magic);
goto exit;
}
if (kernel_read(fp, &version, sizeof(version), &off) != sizeof(version)) {
pr_err("dynamic manager read version failed\n");
goto exit;
}
pr_info("dynamic manager file version: %d\n", version);
ret = kernel_read(fp, &loaded_config, sizeof(loaded_config), &off);
if (ret <= 0) {
pr_info("load_dynamic_manager read err: %zd\n", ret);
goto exit;
}
if (ret != sizeof(loaded_config)) {
pr_err("load_dynamic_manager read incomplete config: %zd/%zu\n", ret, sizeof(loaded_config));
goto exit;
}
if (loaded_config.size < 0x100 || loaded_config.size > 0x1000) {
pr_err("Invalid saved config size: 0x%x\n", loaded_config.size);
goto exit;
}
if (strlen(loaded_config.hash) != 64) {
pr_err("Invalid saved config hash length: %zu\n", strlen(loaded_config.hash));
goto exit;
}
// Validate hash format
for (i = 0; i < 64; i++) {
char c = loaded_config.hash[i];
if (!((c >= '0' && c <= '9') || (c >= 'a' && c <= 'f'))) {
pr_err("Invalid saved config hash character at position %d: %c\n", i, c);
goto exit;
}
}
spin_lock_irqsave(&dynamic_manager_lock, flags);
dynamic_manager = loaded_config;
spin_unlock_irqrestore(&dynamic_manager_lock, flags);
pr_info("Dynamic sign config loaded: size=0x%x, hash=%.16s...\n",
loaded_config.size, loaded_config.hash);
exit:
filp_close(fp, 0);
}
static bool persistent_dynamic_manager(void)
{
return ksu_queue_work(&save_dynamic_manager_work);
}
static void do_clear_dynamic_manager(struct work_struct *work)
{
loff_t off = 0;
struct file *fp;
char zero_buffer[512];
memset(zero_buffer, 0, sizeof(zero_buffer));
fp = filp_open(KERNEL_SU_DYNAMIC_MANAGER, O_WRONLY | O_CREAT | O_TRUNC, 0644);
if (IS_ERR(fp)) {
pr_err("clear_dynamic_manager create file failed: %ld\n", PTR_ERR(fp));
return;
}
// Write null bytes to overwrite the file content
if (kernel_write(fp, zero_buffer, sizeof(zero_buffer), &off) != sizeof(zero_buffer)) {
pr_err("clear_dynamic_manager write null bytes failed.\n");
} else {
pr_info("Dynamic sign config file cleared successfully\n");
}
filp_close(fp, 0);
}
static bool clear_dynamic_manager_file(void)
{
return ksu_queue_work(&clear_dynamic_manager_work);
}
int ksu_handle_dynamic_manager(struct dynamic_manager_user_config *config)
{
unsigned long flags;
int ret = 0;
int i;
if (!config) {
return -EINVAL;
}
switch (config->operation) {
case DYNAMIC_MANAGER_OP_SET:
if (config->size < 0x100 || config->size > 0x1000) {
pr_err("invalid size: 0x%x\n", config->size);
return -EINVAL;
}
if (strlen(config->hash) != 64) {
pr_err("invalid hash length: %zu\n", strlen(config->hash));
return -EINVAL;
}
// Validate hash format
for (i = 0; i < 64; i++) {
char c = config->hash[i];
if (!((c >= '0' && c <= '9') || (c >= 'a' && c <= 'f'))) {
pr_err("invalid hash character at position %d: %c\n", i, c);
return -EINVAL;
}
}
spin_lock_irqsave(&dynamic_manager_lock, flags);
dynamic_manager.size = config->size;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0)
strscpy(dynamic_manager.hash, config->hash, sizeof(dynamic_manager.hash));
#else
strlcpy(dynamic_manager.hash, config->hash, sizeof(dynamic_manager.hash));
#endif
dynamic_manager.is_set = 1;
spin_unlock_irqrestore(&dynamic_manager_lock, flags);
persistent_dynamic_manager();
pr_info("dynamic manager updated: size=0x%x, hash=%.16s... (multi-manager enabled)\n",
config->size, config->hash);
break;
case DYNAMIC_MANAGER_OP_GET:
spin_lock_irqsave(&dynamic_manager_lock, flags);
if (dynamic_manager.is_set) {
config->size = dynamic_manager.size;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0)
strscpy(config->hash, dynamic_manager.hash, sizeof(config->hash));
#else
strlcpy(config->hash, dynamic_manager.hash, sizeof(config->hash));
#endif
ret = 0;
} else {
ret = -ENODATA;
}
spin_unlock_irqrestore(&dynamic_manager_lock, flags);
break;
case DYNAMIC_MANAGER_OP_CLEAR:
spin_lock_irqsave(&dynamic_manager_lock, flags);
dynamic_manager.size = 0x300;
strcpy(dynamic_manager.hash, "0000000000000000000000000000000000000000000000000000000000000000");
dynamic_manager.is_set = 0;
spin_unlock_irqrestore(&dynamic_manager_lock, flags);
// Clear only dynamic managers, preserve default manager
clear_dynamic_manager();
// Clear file using the same method as save
clear_dynamic_manager_file();
pr_info("Dynamic sign config cleared (multi-manager disabled)\n");
break;
default:
pr_err("Invalid dynamic manager operation: %d\n", config->operation);
return -EINVAL;
}
return ret;
}
bool ksu_load_dynamic_manager(void)
{
return ksu_queue_work(&load_dynamic_manager_work);
}
void ksu_dynamic_manager_init(void)
{
int i;
INIT_WORK(&save_dynamic_manager_work, do_save_dynamic_manager);
INIT_WORK(&load_dynamic_manager_work, do_load_dynamic_manager);
INIT_WORK(&clear_dynamic_manager_work, do_clear_dynamic_manager);
// Initialize manager slots
for (i = 0; i < MAX_MANAGERS; i++) {
active_managers[i].is_active = false;
}
ksu_load_dynamic_manager();
pr_info("Dynamic sign initialized with conditional multi-manager support\n");
}
void ksu_dynamic_manager_exit(void)
{
clear_dynamic_manager();
// Save current config before exit
do_save_dynamic_manager(NULL);
pr_info("Dynamic sign exited with persistent storage\n");
}
// Get dynamic manager configuration for signature verification
bool ksu_get_dynamic_manager_config(unsigned int *size, const char **hash)
{
unsigned long flags;
bool valid = false;
spin_lock_irqsave(&dynamic_manager_lock, flags);
if (dynamic_manager.is_set) {
if (size) *size = dynamic_manager.size;
if (hash) *hash = dynamic_manager.hash;
valid = true;
}
spin_unlock_irqrestore(&dynamic_manager_lock, flags);
return valid;
}

51
kernel/dynamic_manager.h Normal file
View file

@ -0,0 +1,51 @@
#ifndef __KSU_H_DYNAMIC_MANAGER
#define __KSU_H_DYNAMIC_MANAGER
#include <linux/types.h>
#include "ksu.h"
#define DYNAMIC_MANAGER_FILE_MAGIC 0x7f445347 // 'DSG', u32
#define DYNAMIC_MANAGER_FILE_VERSION 1 // u32
#define KERNEL_SU_DYNAMIC_MANAGER "/data/adb/ksu/.dynamic_manager"
#define DYNAMIC_SIGN_INDEX 100
struct dynamic_sign_key {
unsigned int size;
const char *hash;
};
#define DYNAMIC_SIGN_DEFAULT_CONFIG { \
.size = 0x300, \
.hash = "0000000000000000000000000000000000000000000000000000000000000000" \
}
struct dynamic_manager_config {
unsigned int size;
char hash[65];
int is_set;
};
struct manager_info {
uid_t uid;
int signature_index;
bool is_active;
};
// Dynamic sign operations
void ksu_dynamic_manager_init(void);
void ksu_dynamic_manager_exit(void);
int ksu_handle_dynamic_manager(struct dynamic_manager_user_config *config);
bool ksu_load_dynamic_manager(void);
bool ksu_is_dynamic_manager_enabled(void);
// Multi-manager operations
void ksu_add_manager(uid_t uid, int signature_index);
void ksu_remove_manager(uid_t uid);
bool ksu_is_any_manager(uid_t uid);
int ksu_get_manager_signature_index(uid_t uid);
int ksu_get_active_managers(struct manager_list_info *info);
// Configuration access for signature verification
bool ksu_get_dynamic_manager_config(unsigned int *size, const char **hash);
#endif

5
kernel/embed_ksud.c Normal file
View file

@ -0,0 +1,5 @@
// WARNING: THIS IS A STUB FILE
// This file will be regenerated by CI
unsigned int ksud_size = 0;
const char ksud[0] = {};

2
kernel/export_symbol.txt Normal file
View file

@ -0,0 +1,2 @@
register_kprobe
unregister_kprobe

173
kernel/feature.c Normal file
View file

@ -0,0 +1,173 @@
#include "feature.h"
#include "klog.h" // IWYU pragma: keep
#include <linux/mutex.h>
static const struct ksu_feature_handler *feature_handlers[KSU_FEATURE_MAX];
static DEFINE_MUTEX(feature_mutex);
int ksu_register_feature_handler(const struct ksu_feature_handler *handler)
{
if (!handler) {
pr_err("feature: register handler is NULL\n");
return -EINVAL;
}
if (handler->feature_id >= KSU_FEATURE_MAX) {
pr_err("feature: invalid feature_id %u\n", handler->feature_id);
return -EINVAL;
}
if (!handler->get_handler && !handler->set_handler) {
pr_err("feature: no handler provided for feature %u\n", handler->feature_id);
return -EINVAL;
}
mutex_lock(&feature_mutex);
if (feature_handlers[handler->feature_id]) {
pr_warn("feature: handler for %u already registered, overwriting\n",
handler->feature_id);
}
feature_handlers[handler->feature_id] = handler;
pr_info("feature: registered handler for %s (id=%u)\n",
handler->name ? handler->name : "unknown", handler->feature_id);
mutex_unlock(&feature_mutex);
return 0;
}
int ksu_unregister_feature_handler(u32 feature_id)
{
int ret = 0;
if (feature_id >= KSU_FEATURE_MAX) {
pr_err("feature: invalid feature_id %u\n", feature_id);
return -EINVAL;
}
mutex_lock(&feature_mutex);
if (!feature_handlers[feature_id]) {
pr_warn("feature: no handler registered for %u\n", feature_id);
ret = -ENOENT;
goto out;
}
feature_handlers[feature_id] = NULL;
pr_info("feature: unregistered handler for id=%u\n", feature_id);
out:
mutex_unlock(&feature_mutex);
return ret;
}
int ksu_get_feature(u32 feature_id, u64 *value, bool *supported)
{
int ret = 0;
const struct ksu_feature_handler *handler;
if (feature_id >= KSU_FEATURE_MAX) {
pr_err("feature: invalid feature_id %u\n", feature_id);
return -EINVAL;
}
if (!value || !supported) {
pr_err("feature: invalid parameters\n");
return -EINVAL;
}
mutex_lock(&feature_mutex);
handler = feature_handlers[feature_id];
if (!handler) {
*supported = false;
*value = 0;
pr_debug("feature: feature %u not supported\n", feature_id);
goto out;
}
*supported = true;
if (!handler->get_handler) {
pr_warn("feature: no get_handler for feature %u\n", feature_id);
ret = -EOPNOTSUPP;
goto out;
}
ret = handler->get_handler(value);
if (ret) {
pr_err("feature: get_handler for %u failed: %d\n", feature_id, ret);
}
out:
mutex_unlock(&feature_mutex);
return ret;
}
int ksu_set_feature(u32 feature_id, u64 value)
{
int ret = 0;
const struct ksu_feature_handler *handler;
if (feature_id >= KSU_FEATURE_MAX) {
pr_err("feature: invalid feature_id %u\n", feature_id);
return -EINVAL;
}
mutex_lock(&feature_mutex);
handler = feature_handlers[feature_id];
if (!handler) {
pr_err("feature: feature %u not registered\n", feature_id);
ret = -EOPNOTSUPP;
goto out;
}
if (!handler->set_handler) {
pr_warn("feature: no set_handler for feature %u\n", feature_id);
ret = -EOPNOTSUPP;
goto out;
}
ret = handler->set_handler(value);
if (ret) {
pr_err("feature: set_handler for %u failed: %d\n", feature_id, ret);
}
out:
mutex_unlock(&feature_mutex);
return ret;
}
void ksu_feature_init(void)
{
int i;
for (i = 0; i < KSU_FEATURE_MAX; i++) {
feature_handlers[i] = NULL;
}
pr_info("feature: feature management initialized\n");
}
void ksu_feature_exit(void)
{
int i;
mutex_lock(&feature_mutex);
for (i = 0; i < KSU_FEATURE_MAX; i++) {
feature_handlers[i] = NULL;
}
mutex_unlock(&feature_mutex);
pr_info("feature: feature management cleaned up\n");
}

37
kernel/feature.h Normal file
View file

@ -0,0 +1,37 @@
#ifndef __KSU_H_FEATURE
#define __KSU_H_FEATURE
#include <linux/types.h>
enum ksu_feature_id {
KSU_FEATURE_SU_COMPAT = 0,
KSU_FEATURE_KERNEL_UMOUNT = 1,
KSU_FEATURE_ENHANCED_SECURITY = 2,
KSU_FEATURE_SULOG = 3,
KSU_FEATURE_MAX
};
typedef int (*ksu_feature_get_t)(u64 *value);
typedef int (*ksu_feature_set_t)(u64 value);
struct ksu_feature_handler {
u32 feature_id;
const char *name;
ksu_feature_get_t get_handler;
ksu_feature_set_t set_handler;
};
int ksu_register_feature_handler(const struct ksu_feature_handler *handler);
int ksu_unregister_feature_handler(u32 feature_id);
int ksu_get_feature(u32 feature_id, u64 *value, bool *supported);
int ksu_set_feature(u32 feature_id, u64 value);
void ksu_feature_init(void);
void ksu_feature_exit(void);
#endif // __KSU_H_FEATURE

341
kernel/file_wrapper.c Normal file
View file

@ -0,0 +1,341 @@
#include <linux/export.h>
#include <linux/anon_inodes.h>
#include <linux/capability.h>
#include <linux/cred.h>
#include <linux/err.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/version.h>
#include "klog.h" // IWYU pragma: keep
#include "selinux/selinux.h"
#include "file_wrapper.h"
static loff_t ksu_wrapper_llseek(struct file *fp, loff_t off, int flags) {
struct ksu_file_wrapper* data = fp->private_data;
struct file* orig = data->orig;
return orig->f_op->llseek(data->orig, off, flags);
}
static ssize_t ksu_wrapper_read(struct file *fp, char __user *ptr, size_t sz, loff_t *off) {
struct ksu_file_wrapper* data = fp->private_data;
struct file* orig = data->orig;
return orig->f_op->read(orig, ptr, sz, off);
}
static ssize_t ksu_wrapper_write(struct file *fp, const char __user *ptr, size_t sz, loff_t *off) {
struct ksu_file_wrapper* data = fp->private_data;
struct file* orig = data->orig;
return orig->f_op->write(orig, ptr, sz, off);
}
static ssize_t ksu_wrapper_read_iter(struct kiocb *iocb, struct iov_iter *iovi) {
struct ksu_file_wrapper* data = iocb->ki_filp->private_data;
struct file* orig = data->orig;
iocb->ki_filp = orig;
return orig->f_op->read_iter(iocb, iovi);
}
static ssize_t ksu_wrapper_write_iter(struct kiocb *iocb, struct iov_iter *iovi) {
struct ksu_file_wrapper* data = iocb->ki_filp->private_data;
struct file* orig = data->orig;
iocb->ki_filp = orig;
return orig->f_op->write_iter(iocb, iovi);
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)
static int ksu_wrapper_iopoll(struct kiocb *kiocb, struct io_comp_batch* icb, unsigned int v) {
struct ksu_file_wrapper* data = kiocb->ki_filp->private_data;
struct file* orig = data->orig;
kiocb->ki_filp = orig;
return orig->f_op->iopoll(kiocb, icb, v);
}
#else
static int ksu_wrapper_iopoll(struct kiocb *kiocb, bool spin) {
struct ksu_file_wrapper* data = kiocb->ki_filp->private_data;
struct file* orig = data->orig;
kiocb->ki_filp = orig;
return orig->f_op->iopoll(kiocb, spin);
}
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0)
static int ksu_wrapper_iterate (struct file *fp, struct dir_context *dc) {
struct ksu_file_wrapper* data = fp->private_data;
struct file* orig = data->orig;
return orig->f_op->iterate(orig, dc);
}
#endif
static int ksu_wrapper_iterate_shared(struct file *fp, struct dir_context *dc) {
struct ksu_file_wrapper* data = fp->private_data;
struct file* orig = data->orig;
return orig->f_op->iterate_shared(orig, dc);
}
static __poll_t ksu_wrapper_poll(struct file *fp, struct poll_table_struct *pts) {
struct ksu_file_wrapper* data = fp->private_data;
struct file* orig = data->orig;
return orig->f_op->poll(orig, pts);
}
static long ksu_wrapper_unlocked_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) {
struct ksu_file_wrapper* data = fp->private_data;
struct file* orig = data->orig;
return orig->f_op->unlocked_ioctl(orig, cmd, arg);
}
static long ksu_wrapper_compat_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) {
struct ksu_file_wrapper* data = fp->private_data;
struct file* orig = data->orig;
return orig->f_op->compat_ioctl(orig, cmd, arg);
}
static int ksu_wrapper_mmap(struct file *fp, struct vm_area_struct * vma) {
struct ksu_file_wrapper* data = fp->private_data;
struct file* orig = data->orig;
return orig->f_op->mmap(orig, vma);
}
// static unsigned long mmap_supported_flags {}
static int ksu_wrapper_open(struct inode *ino, struct file *fp) {
struct ksu_file_wrapper* data = fp->private_data;
struct file* orig = data->orig;
struct inode *orig_ino = file_inode(orig);
return orig->f_op->open(orig_ino, orig);
}
static int ksu_wrapper_flush(struct file *fp, fl_owner_t id) {
struct ksu_file_wrapper* data = fp->private_data;
struct file* orig = data->orig;
return orig->f_op->flush(orig, id);
}
static int ksu_wrapper_fsync(struct file *fp, loff_t off1, loff_t off2, int datasync) {
struct ksu_file_wrapper* data = fp->private_data;
struct file* orig = data->orig;
return orig->f_op->fsync(orig, off1, off2, datasync);
}
static int ksu_wrapper_fasync(int arg, struct file *fp, int arg2) {
struct ksu_file_wrapper* data = fp->private_data;
struct file* orig = data->orig;
return orig->f_op->fasync(arg, orig, arg2);
}
static int ksu_wrapper_lock(struct file *fp, int arg1, struct file_lock *fl) {
struct ksu_file_wrapper* data = fp->private_data;
struct file* orig = data->orig;
return orig->f_op->lock(orig, arg1, fl);
}
#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0)
static ssize_t ksu_wrapper_sendpage(struct file *fp, struct page *pg, int arg1, size_t sz, loff_t *off, int arg2) {
struct ksu_file_wrapper* data = fp->private_data;
struct file* orig = data->orig;
if (orig->f_op->sendpage) {
return orig->f_op->sendpage(orig, pg, arg1, sz, off, arg2);
}
return -EINVAL;
}
#endif
static unsigned long ksu_wrapper_get_unmapped_area(struct file *fp, unsigned long arg1, unsigned long arg2, unsigned long arg3, unsigned long arg4) {
struct ksu_file_wrapper* data = fp->private_data;
struct file* orig = data->orig;
if (orig->f_op->get_unmapped_area) {
return orig->f_op->get_unmapped_area(orig, arg1, arg2, arg3, arg4);
}
return -EINVAL;
}
// static int ksu_wrapper_check_flags(int arg) {}
static int ksu_wrapper_flock(struct file *fp, int arg1, struct file_lock *fl) {
struct ksu_file_wrapper* data = fp->private_data;
struct file* orig = data->orig;
if (orig->f_op->flock) {
return orig->f_op->flock(orig, arg1, fl);
}
return -EINVAL;
}
static ssize_t ksu_wrapper_splice_write(struct pipe_inode_info * pii, struct file *fp, loff_t *off, size_t sz, unsigned int arg1) {
struct ksu_file_wrapper* data = fp->private_data;
struct file* orig = data->orig;
if (orig->f_op->splice_write) {
return orig->f_op->splice_write(pii, orig, off, sz, arg1);
}
return -EINVAL;
}
static ssize_t ksu_wrapper_splice_read(struct file *fp, loff_t *off, struct pipe_inode_info *pii, size_t sz, unsigned int arg1) {
struct ksu_file_wrapper* data = fp->private_data;
struct file* orig = data->orig;
if (orig->f_op->splice_read) {
return orig->f_op->splice_read(orig, off, pii, sz, arg1);
}
return -EINVAL;
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0)
void ksu_wrapper_splice_eof(struct file *fp) {
struct ksu_file_wrapper* data = fp->private_data;
struct file* orig = data->orig;
if (orig->f_op->splice_eof) {
return orig->f_op->splice_eof(orig);
}
}
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 12, 0)
static int ksu_wrapper_setlease(struct file *fp, int arg1, struct file_lease **fl, void **p) {
struct ksu_file_wrapper* data = fp->private_data;
struct file* orig = data->orig;
if (orig->f_op->setlease) {
return orig->f_op->setlease(orig, arg1, fl, p);
}
return -EINVAL;
}
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0)
static int ksu_wrapper_setlease(struct file *fp, int arg1, struct file_lock **fl, void **p) {
struct ksu_file_wrapper* data = fp->private_data;
struct file* orig = data->orig;
if (orig->f_op->setlease) {
return orig->f_op->setlease(orig, arg1, fl, p);
}
return -EINVAL;
}
#else
static int ksu_wrapper_setlease(struct file *fp, long arg1, struct file_lock **fl, void **p) {
struct ksu_file_wrapper* data = fp->private_data;
struct file* orig = data->orig;
if (orig->f_op->setlease) {
return orig->f_op->setlease(orig, arg1, fl, p);
}
return -EINVAL;
}
#endif
static long ksu_wrapper_fallocate(struct file *fp, int mode, loff_t offset, loff_t len) {
struct ksu_file_wrapper* data = fp->private_data;
struct file* orig = data->orig;
if (orig->f_op->fallocate) {
return orig->f_op->fallocate(orig, mode, offset, len);
}
return -EINVAL;
}
static void ksu_wrapper_show_fdinfo(struct seq_file *m, struct file *f) {
struct ksu_file_wrapper* data = f->private_data;
struct file* orig = data->orig;
if (orig->f_op->show_fdinfo) {
orig->f_op->show_fdinfo(m, orig);
}
}
static ssize_t ksu_wrapper_copy_file_range(struct file *f1, loff_t off1, struct file *f2,
loff_t off2, size_t sz, unsigned int flags) {
// TODO: determine which file to use
struct ksu_file_wrapper* data = f1->private_data;
struct file* orig = data->orig;
if (orig->f_op->copy_file_range) {
return orig->f_op->copy_file_range(orig, off1, f2, off2, sz, flags);
}
return -EINVAL;
}
static loff_t ksu_wrapper_remap_file_range(struct file *file_in, loff_t pos_in,
struct file *file_out, loff_t pos_out,
loff_t len, unsigned int remap_flags) {
// TODO: determine which file to use
struct ksu_file_wrapper* data = file_in->private_data;
struct file* orig = data->orig;
if (orig->f_op->remap_file_range) {
return orig->f_op->remap_file_range(orig, pos_in, file_out, pos_out, len, remap_flags);
}
return -EINVAL;
}
static int ksu_wrapper_fadvise(struct file *fp, loff_t off1, loff_t off2, int flags) {
struct ksu_file_wrapper* data = fp->private_data;
struct file* orig = data->orig;
if (orig->f_op->fadvise) {
return orig->f_op->fadvise(orig, off1, off2, flags);
}
return -EINVAL;
}
static int ksu_wrapper_release(struct inode *inode, struct file *filp) {
ksu_delete_file_wrapper(filp->private_data);
return 0;
}
struct ksu_file_wrapper* ksu_create_file_wrapper(struct file* fp) {
struct ksu_file_wrapper* p = kcalloc(sizeof(struct ksu_file_wrapper), 1, GFP_KERNEL);
if (!p) {
return NULL;
}
get_file(fp);
p->orig = fp;
p->ops.owner = THIS_MODULE;
p->ops.llseek = fp->f_op->llseek ? ksu_wrapper_llseek : NULL;
p->ops.read = fp->f_op->read ? ksu_wrapper_read : NULL;
p->ops.write = fp->f_op->write ? ksu_wrapper_write : NULL;
p->ops.read_iter = fp->f_op->read_iter ? ksu_wrapper_read_iter : NULL;
p->ops.write_iter = fp->f_op->write_iter ? ksu_wrapper_write_iter : NULL;
p->ops.iopoll = fp->f_op->iopoll ? ksu_wrapper_iopoll : NULL;
#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0)
p->ops.iterate = fp->f_op->iterate ? ksu_wrapper_iterate : NULL;
#endif
p->ops.iterate_shared = fp->f_op->iterate_shared ? ksu_wrapper_iterate_shared : NULL;
p->ops.poll = fp->f_op->poll ? ksu_wrapper_poll : NULL;
p->ops.unlocked_ioctl = fp->f_op->unlocked_ioctl ? ksu_wrapper_unlocked_ioctl : NULL;
p->ops.compat_ioctl = fp->f_op->compat_ioctl ? ksu_wrapper_compat_ioctl : NULL;
p->ops.mmap = fp->f_op->mmap ? ksu_wrapper_mmap : NULL;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 12, 0)
p->ops.fop_flags = fp->f_op->fop_flags;
#else
p->ops.mmap_supported_flags = fp->f_op->mmap_supported_flags;
#endif
p->ops.open = fp->f_op->open ? ksu_wrapper_open : NULL;
p->ops.flush = fp->f_op->flush ? ksu_wrapper_flush : NULL;
p->ops.release = ksu_wrapper_release;
p->ops.fsync = fp->f_op->fsync ? ksu_wrapper_fsync : NULL;
p->ops.fasync = fp->f_op->fasync ? ksu_wrapper_fasync : NULL;
p->ops.lock = fp->f_op->lock ? ksu_wrapper_lock : NULL;
#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0)
p->ops.sendpage = fp->f_op->sendpage ? ksu_wrapper_sendpage : NULL;
#endif
p->ops.get_unmapped_area = fp->f_op->get_unmapped_area ? ksu_wrapper_get_unmapped_area : NULL;
p->ops.check_flags = fp->f_op->check_flags;
p->ops.flock = fp->f_op->flock ? ksu_wrapper_flock : NULL;
p->ops.splice_write = fp->f_op->splice_write ? ksu_wrapper_splice_write : NULL;
p->ops.splice_read = fp->f_op->splice_read ? ksu_wrapper_splice_read : NULL;
p->ops.setlease = fp->f_op->setlease ? ksu_wrapper_setlease : NULL;
p->ops.fallocate = fp->f_op->fallocate ? ksu_wrapper_fallocate : NULL;
p->ops.show_fdinfo = fp->f_op->show_fdinfo ? ksu_wrapper_show_fdinfo : NULL;
p->ops.copy_file_range = fp->f_op->copy_file_range ? ksu_wrapper_copy_file_range : NULL;
p->ops.remap_file_range = fp->f_op->remap_file_range ? ksu_wrapper_remap_file_range : NULL;
p->ops.fadvise = fp->f_op->fadvise ? ksu_wrapper_fadvise : NULL;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0)
p->ops.splice_eof = fp->f_op->splice_eof ? ksu_wrapper_splice_eof : NULL;
#endif
return p;
}
void ksu_delete_file_wrapper(struct ksu_file_wrapper* data) {
fput((struct file*) data->orig);
kfree(data);
}

14
kernel/file_wrapper.h Normal file
View file

@ -0,0 +1,14 @@
#ifndef KSU_FILE_WRAPPER_H
#define KSU_FILE_WRAPPER_H
#include <linux/file.h>
#include <linux/fs.h>
struct ksu_file_wrapper {
struct file* orig;
struct file_operations ops;
};
struct ksu_file_wrapper* ksu_create_file_wrapper(struct file* fp);
void ksu_delete_file_wrapper(struct ksu_file_wrapper* data);
#endif // KSU_FILE_WRAPPER_H

24
kernel/kernel_compat.h Normal file
View file

@ -0,0 +1,24 @@
#ifndef __KSU_H_KERNEL_COMPAT
#define __KSU_H_KERNEL_COMPAT
#include <linux/fs.h>
#include <linux/version.h>
/*
* ksu_copy_from_user_retry
* try nofault copy first, if it fails, try with plain
* paramters are the same as copy_from_user
* 0 = success
*/
static long ksu_copy_from_user_retry(void *to,
const void __user *from, unsigned long count)
{
long ret = copy_from_user_nofault(to, from, count);
if (likely(!ret))
return ret;
// we faulted! fallback to slow path
return copy_from_user(to, from, count);
}
#endif

175
kernel/kernel_umount.c Normal file
View file

@ -0,0 +1,175 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/task_work.h>
#include <linux/cred.h>
#include <linux/fs.h>
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/nsproxy.h>
#include <linux/path.h>
#include <linux/printk.h>
#include <linux/types.h>
#include "kernel_umount.h"
#include "klog.h" // IWYU pragma: keep
#include "allowlist.h"
#include "selinux/selinux.h"
#include "feature.h"
#include "ksud.h"
#include "umount_manager.h"
#include "sulog.h"
static bool ksu_kernel_umount_enabled = true;
static int kernel_umount_feature_get(u64 *value)
{
*value = ksu_kernel_umount_enabled ? 1 : 0;
return 0;
}
static int kernel_umount_feature_set(u64 value)
{
bool enable = value != 0;
ksu_kernel_umount_enabled = enable;
pr_info("kernel_umount: set to %d\n", enable);
return 0;
}
static const struct ksu_feature_handler kernel_umount_handler = {
.feature_id = KSU_FEATURE_KERNEL_UMOUNT,
.name = "kernel_umount",
.get_handler = kernel_umount_feature_get,
.set_handler = kernel_umount_feature_set,
};
extern int path_umount(struct path *path, int flags);
static void ksu_umount_mnt(struct path *path, int flags)
{
int err = path_umount(path, flags);
if (err) {
pr_info("umount %s failed: %d\n", path->dentry->d_iname, err);
}
}
void try_umount(const char *mnt, int flags)
{
struct path path;
int err = kern_path(mnt, 0, &path);
if (err) {
return;
}
if (path.dentry != path.mnt->mnt_root) {
// it is not root mountpoint, maybe umounted by others already.
path_put(&path);
return;
}
ksu_umount_mnt(&path, flags);
}
struct umount_tw {
struct callback_head cb;
const struct cred *old_cred;
};
static void umount_tw_func(struct callback_head *cb)
{
struct umount_tw *tw = container_of(cb, struct umount_tw, cb);
const struct cred *saved = NULL;
if (tw->old_cred) {
saved = override_creds(tw->old_cred);
}
struct mount_entry *entry;
down_read(&mount_list_lock);
list_for_each_entry(entry, &mount_list, list) {
pr_info("%s: unmounting: %s flags 0x%x\n", __func__, entry->umountable, entry->flags);
try_umount(entry->umountable, entry->flags);
}
up_read(&mount_list_lock);
ksu_umount_manager_execute_all(tw->old_cred);
if (saved)
revert_creds(saved);
if (tw->old_cred)
put_cred(tw->old_cred);
kfree(tw);
}
int ksu_handle_umount(uid_t old_uid, uid_t new_uid)
{
struct umount_tw *tw;
// this hook is used for umounting overlayfs for some uid, if there isn't any module mounted, just ignore it!
if (!ksu_module_mounted) {
return 0;
}
if (!ksu_kernel_umount_enabled) {
return 0;
}
// FIXME: isolated process which directly forks from zygote is not handled
if (!is_appuid(new_uid)) {
return 0;
}
if (!ksu_uid_should_umount(new_uid)) {
return 0;
}
// check old process's selinux context, if it is not zygote, ignore it!
// because some su apps may setuid to untrusted_app but they are in global mount namespace
// when we umount for such process, that is a disaster!
bool is_zygote_child = is_zygote(get_current_cred());
if (!is_zygote_child) {
pr_info("handle umount ignore non zygote child: %d\n", current->pid);
return 0;
}
#if __SULOG_GATE
ksu_sulog_report_syscall(new_uid, NULL, "setuid", NULL);
#endif
// umount the target mnt
pr_info("handle umount for uid: %d, pid: %d\n", new_uid, current->pid);
tw = kzalloc(sizeof(*tw), GFP_ATOMIC);
if (!tw)
return 0;
tw->old_cred = get_current_cred();
tw->cb.func = umount_tw_func;
int err = task_work_add(current, &tw->cb, TWA_RESUME);
if (err) {
if (tw->old_cred) {
put_cred(tw->old_cred);
}
kfree(tw);
pr_warn("unmount add task_work failed\n");
}
return 0;
}
void ksu_kernel_umount_init(void)
{
int rc = 0;
rc = ksu_umount_manager_init();
if (rc) {
pr_err("Failed to initialize umount manager: %d\n", rc);
}
if (ksu_register_feature_handler(&kernel_umount_handler)) {
pr_err("Failed to register kernel_umount feature handler\n");
}
}
void ksu_kernel_umount_exit(void)
{
ksu_unregister_feature_handler(KSU_FEATURE_KERNEL_UMOUNT);
}

25
kernel/kernel_umount.h Normal file
View file

@ -0,0 +1,25 @@
#ifndef __KSU_H_KERNEL_UMOUNT
#define __KSU_H_KERNEL_UMOUNT
#include <linux/types.h>
#include <linux/list.h>
#include <linux/rwsem.h>
void ksu_kernel_umount_init(void);
void ksu_kernel_umount_exit(void);
void try_umount(const char *mnt, int flags);
// Handler function to be called from setresuid hook
int ksu_handle_umount(uid_t old_uid, uid_t new_uid);
// for the umount list
struct mount_entry {
char *umountable;
unsigned int flags;
struct list_head list;
};
extern struct list_head mount_list;
extern struct rw_semaphore mount_list_lock;
#endif

11
kernel/klog.h Normal file
View file

@ -0,0 +1,11 @@
#ifndef __KSU_H_KLOG
#define __KSU_H_KLOG
#include <linux/printk.h>
#ifdef pr_fmt
#undef pr_fmt
#define pr_fmt(fmt) "KernelSU: " fmt
#endif
#endif

6
kernel/kpm/Makefile Normal file
View file

@ -0,0 +1,6 @@
obj-y += kpm.o
obj-y += compact.o
obj-y += super_access.o
ccflags-y += -Wno-implicit-function-declaration -Wno-strict-prototypes -Wno-int-conversion -Wno-gcc-compat
ccflags-y += -Wno-declaration-after-statement -Wno-unused-function

100
kernel/kpm/compact.c Normal file
View file

@ -0,0 +1,100 @@
#include <linux/export.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/kernfs.h>
#include <linux/file.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/uaccess.h>
#include <linux/elf.h>
#include <linux/kallsyms.h>
#include <linux/version.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/rcupdate.h>
#include <asm/elf.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <asm/cacheflush.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/set_memory.h>
#include <linux/version.h>
#include <linux/export.h>
#include <linux/slab.h>
#include "kpm.h"
#include "compact.h"
#include "../allowlist.h"
#include "../manager.h"
static int sukisu_is_su_allow_uid(uid_t uid)
{
return ksu_is_allow_uid_for_current(uid) ? 1 : 0;
}
static int sukisu_get_ap_mod_exclude(uid_t uid)
{
return 0; /* Not supported */
}
static int sukisu_is_uid_should_umount(uid_t uid)
{
return ksu_uid_should_umount(uid) ? 1 : 0;
}
static int sukisu_is_current_uid_manager(void)
{
return is_manager();
}
static uid_t sukisu_get_manager_uid(void)
{
return ksu_manager_uid;
}
static void sukisu_set_manager_uid(uid_t uid, int force)
{
if (force || ksu_manager_uid == -1)
ksu_manager_uid = uid;
}
struct CompactAddressSymbol {
const char *symbol_name;
void *addr;
};
unsigned long sukisu_compact_find_symbol(const char *name);
static struct CompactAddressSymbol address_symbol[] = {
{ "kallsyms_lookup_name", &kallsyms_lookup_name },
{ "compact_find_symbol", &sukisu_compact_find_symbol },
{ "is_run_in_sukisu_ultra", (void *)1 },
{ "is_su_allow_uid", &sukisu_is_su_allow_uid },
{ "get_ap_mod_exclude", &sukisu_get_ap_mod_exclude },
{ "is_uid_should_umount", &sukisu_is_uid_should_umount },
{ "is_current_uid_manager", &sukisu_is_current_uid_manager },
{ "get_manager_uid", &sukisu_get_manager_uid },
{ "sukisu_set_manager_uid", &sukisu_set_manager_uid }
};
unsigned long sukisu_compact_find_symbol(const char* name)
{
int i;
unsigned long addr;
for (i = 0; i < (sizeof(address_symbol) / sizeof(struct CompactAddressSymbol)); i++) {
struct CompactAddressSymbol *symbol = &address_symbol[i];
if (strcmp(name, symbol->symbol_name) == 0)
return (unsigned long)symbol->addr;
}
addr = kallsyms_lookup_name(name);
if (addr)
return addr;
return 0;
}
EXPORT_SYMBOL(sukisu_compact_find_symbol);

6
kernel/kpm/compact.h Normal file
View file

@ -0,0 +1,6 @@
#ifndef __SUKISU_KPM_COMPACT_H
#define __SUKISU_KPM_COMPACT_H
extern unsigned long sukisu_compact_find_symbol(const char *name);
#endif

282
kernel/kpm/kpm.c Normal file
View file

@ -0,0 +1,282 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2025 Liankong (xhsw.new@outlook.com). All Rights Reserved.
* GPL-2
*
* KernelSU的KPM
*
* ELF ARM64
* KernelPatch的标准KPM格式实现加载和控制
*/
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/kernfs.h>
#include <linux/file.h>
#include <linux/vmalloc.h>
#include <linux/uaccess.h>
#include <linux/elf.h>
#include <linux/kallsyms.h>
#include <linux/version.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/rcupdate.h>
#include <asm/elf.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <asm/cacheflush.h>
#include <linux/module.h>
#include <linux/set_memory.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <asm/insn.h>
#include <linux/kprobes.h>
#include <linux/stacktrace.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,0,0) && defined(CONFIG_MODULES)
#include <linux/moduleloader.h>
#endif
#include "kpm.h"
#include "compact.h"
#define KPM_NAME_LEN 32
#define KPM_ARGS_LEN 1024
#ifndef NO_OPTIMIZE
#if defined(__GNUC__) && !defined(__clang__)
#define NO_OPTIMIZE __attribute__((optimize("O0")))
#elif defined(__clang__)
#define NO_OPTIMIZE __attribute__((optnone))
#else
#define NO_OPTIMIZE
#endif
#endif
noinline NO_OPTIMIZE void sukisu_kpm_load_module_path(const char *path,
const char *args, void *ptr, int *result)
{
pr_info("kpm: Stub function called (sukisu_kpm_load_module_path). "
"path=%s args=%s ptr=%p\n", path, args, ptr);
__asm__ volatile("nop");
}
EXPORT_SYMBOL(sukisu_kpm_load_module_path);
noinline NO_OPTIMIZE void sukisu_kpm_unload_module(const char *name,
void *ptr, int *result)
{
pr_info("kpm: Stub function called (sukisu_kpm_unload_module). "
"name=%s ptr=%p\n", name, ptr);
__asm__ volatile("nop");
}
EXPORT_SYMBOL(sukisu_kpm_unload_module);
noinline NO_OPTIMIZE void sukisu_kpm_num(int *result)
{
pr_info("kpm: Stub function called (sukisu_kpm_num).\n");
__asm__ volatile("nop");
}
EXPORT_SYMBOL(sukisu_kpm_num);
noinline NO_OPTIMIZE void sukisu_kpm_info(const char *name, char *buf, int bufferSize,
int *size)
{
pr_info("kpm: Stub function called (sukisu_kpm_info). "
"name=%s buffer=%p\n", name, buf);
__asm__ volatile("nop");
}
EXPORT_SYMBOL(sukisu_kpm_info);
noinline NO_OPTIMIZE void sukisu_kpm_list(void *out, int bufferSize,
int *result)
{
pr_info("kpm: Stub function called (sukisu_kpm_list). "
"buffer=%p size=%d\n", out, bufferSize);
}
EXPORT_SYMBOL(sukisu_kpm_list);
noinline NO_OPTIMIZE void sukisu_kpm_control(const char *name, const char *args, long arg_len,
int *result)
{
pr_info("kpm: Stub function called (sukisu_kpm_control). "
"name=%p args=%p arg_len=%ld\n", name, args, arg_len);
__asm__ volatile("nop");
}
EXPORT_SYMBOL(sukisu_kpm_control);
noinline NO_OPTIMIZE void sukisu_kpm_version(char *buf, int bufferSize)
{
pr_info("kpm: Stub function called (sukisu_kpm_version). "
"buffer=%p\n", buf);
}
EXPORT_SYMBOL(sukisu_kpm_version);
noinline int sukisu_handle_kpm(unsigned long control_code, unsigned long arg1, unsigned long arg2,
unsigned long result_code)
{
int res = -1;
if (control_code == SUKISU_KPM_LOAD) {
char kernel_load_path[256];
char kernel_args_buffer[256];
if (arg1 == 0) {
res = -EINVAL;
goto exit;
}
if (!access_ok(arg1, 255)) {
goto invalid_arg;
}
strncpy_from_user((char *)&kernel_load_path, (const char *)arg1, 255);
if (arg2 != 0) {
if (!access_ok(arg2, 255)) {
goto invalid_arg;
}
strncpy_from_user((char *)&kernel_args_buffer, (const char *)arg2, 255);
}
sukisu_kpm_load_module_path((const char *)&kernel_load_path,
(const char *)&kernel_args_buffer, NULL, &res);
} else if (control_code == SUKISU_KPM_UNLOAD) {
char kernel_name_buffer[256];
if (arg1 == 0) {
res = -EINVAL;
goto exit;
}
if (!access_ok(arg1, sizeof(kernel_name_buffer))) {
goto invalid_arg;
}
strncpy_from_user((char *)&kernel_name_buffer, (const char *)arg1, sizeof(kernel_name_buffer));
sukisu_kpm_unload_module((const char *)&kernel_name_buffer, NULL, &res);
} else if (control_code == SUKISU_KPM_NUM) {
sukisu_kpm_num(&res);
} else if (control_code == SUKISU_KPM_INFO) {
char kernel_name_buffer[256];
char buf[256];
int size;
if (arg1 == 0 || arg2 == 0) {
res = -EINVAL;
goto exit;
}
if (!access_ok(arg1, sizeof(kernel_name_buffer))) {
goto invalid_arg;
}
strncpy_from_user((char *)&kernel_name_buffer, (const char __user *)arg1, sizeof(kernel_name_buffer));
sukisu_kpm_info((const char *)&kernel_name_buffer, (char *)&buf, sizeof(buf), &size);
if (!access_ok(arg2, size)) {
goto invalid_arg;
}
res = copy_to_user(arg2, &buf, size);
} else if (control_code == SUKISU_KPM_LIST) {
char buf[1024];
int len = (int) arg2;
if (len <= 0) {
res = -EINVAL;
goto exit;
}
if (!access_ok(arg2, len)) {
goto invalid_arg;
}
sukisu_kpm_list((char *)&buf, sizeof(buf), &res);
if (res > len) {
res = -ENOBUFS;
goto exit;
}
if (copy_to_user(arg1, &buf, len) != 0)
pr_info("kpm: Copy to user failed.");
} else if (control_code == SUKISU_KPM_CONTROL) {
char kpm_name[KPM_NAME_LEN] = { 0 };
char kpm_args[KPM_ARGS_LEN] = { 0 };
if (!access_ok(arg1, sizeof(kpm_name))) {
goto invalid_arg;
}
if (!access_ok(arg2, sizeof(kpm_args))) {
goto invalid_arg;
}
long name_len = strncpy_from_user((char *)&kpm_name, (const char __user *)arg1, sizeof(kpm_name));
if (name_len <= 0) {
res = -EINVAL;
goto exit;
}
long arg_len = strncpy_from_user((char *)&kpm_args, (const char __user *)arg2, sizeof(kpm_args));
sukisu_kpm_control((const char *)&kpm_name, (const char *)&kpm_args, arg_len, &res);
} else if (control_code == SUKISU_KPM_VERSION) {
char buffer[256] = {0};
sukisu_kpm_version((char*) &buffer, sizeof(buffer));
unsigned int outlen = (unsigned int) arg2;
int len = strlen(buffer);
if (len >= outlen) len = outlen - 1;
res = copy_to_user(arg1, &buffer, len + 1);
}
exit:
if (copy_to_user(result_code, &res, sizeof(res)) != 0)
pr_info("kpm: Copy to user failed.");
return 0;
invalid_arg:
pr_err("kpm: invalid pointer detected! arg1: %px arg2: %px\n", (void *)arg1, (void *)arg2);
res = -EFAULT;
goto exit;
}
EXPORT_SYMBOL(sukisu_handle_kpm);
int sukisu_is_kpm_control_code(unsigned long control_code) {
return (control_code >= CMD_KPM_CONTROL &&
control_code <= CMD_KPM_CONTROL_MAX) ? 1 : 0;
}
int do_kpm(void __user *arg)
{
struct ksu_kpm_cmd cmd;
if (copy_from_user(&cmd, arg, sizeof(cmd))) {
pr_err("kpm: copy_from_user failed\n");
return -EFAULT;
}
if (!access_ok(cmd.control_code, sizeof(int))) {
pr_err("kpm: invalid control_code pointer %px\n", (void *)cmd.control_code);
return -EFAULT;
}
if (!access_ok(cmd.result_code, sizeof(int))) {
pr_err("kpm: invalid result_code pointer %px\n", (void *)cmd.result_code);
return -EFAULT;
}
return sukisu_handle_kpm(cmd.control_code, cmd.arg1, cmd.arg2, cmd.result_code);
}

70
kernel/kpm/kpm.h Normal file
View file

@ -0,0 +1,70 @@
#ifndef __SUKISU_KPM_H
#define __SUKISU_KPM_H
#include <linux/types.h>
#include <linux/ioctl.h>
struct ksu_kpm_cmd {
__aligned_u64 __user control_code;
__aligned_u64 __user arg1;
__aligned_u64 __user arg2;
__aligned_u64 __user result_code;
};
int sukisu_handle_kpm(unsigned long control_code, unsigned long arg3, unsigned long arg4, unsigned long result_code);
int sukisu_is_kpm_control_code(unsigned long control_code);
int do_kpm(void __user *arg);
#define KSU_IOCTL_KPM _IOC(_IOC_READ|_IOC_WRITE, 'K', 200, 0)
/* KPM Control Code */
#define CMD_KPM_CONTROL 1
#define CMD_KPM_CONTROL_MAX 10
/* Control Code */
/*
* prctl(xxx, 1, "PATH", "ARGS")
* success return 0, error return -N
*/
#define SUKISU_KPM_LOAD 1
/*
* prctl(xxx, 2, "NAME")
* success return 0, error return -N
*/
#define SUKISU_KPM_UNLOAD 2
/*
* num = prctl(xxx, 3)
* error return -N
* success return +num or 0
*/
#define SUKISU_KPM_NUM 3
/*
* prctl(xxx, 4, Buffer, BufferSize)
* success return +out, error return -N
*/
#define SUKISU_KPM_LIST 4
/*
* prctl(xxx, 5, "NAME", Buffer[256])
* success return +out, error return -N
*/
#define SUKISU_KPM_INFO 5
/*
* prctl(xxx, 6, "NAME", "ARGS")
* success return KPM's result value
* error return -N
*/
#define SUKISU_KPM_CONTROL 6
/*
* prctl(xxx, 7, buffer, bufferSize)
* success return KPM's result value
* error return -N
*/
#define SUKISU_KPM_VERSION 7
#endif

278
kernel/kpm/super_access.c Normal file
View file

@ -0,0 +1,278 @@
#include <linux/export.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/kernfs.h>
#include <linux/file.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/uaccess.h>
#include <linux/elf.h>
#include <linux/kallsyms.h>
#include <linux/version.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/rcupdate.h>
#include <asm/elf.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <asm/cacheflush.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/set_memory.h>
#include <linux/version.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/stddef.h>
#include <linux/mount.h>
#include <linux/kprobes.h>
#include <linux/mm_types.h>
#include <linux/netlink.h>
#include <linux/sched.h>
#include <../fs/mount.h>
#include "kpm.h"
#include "compact.h"
struct DynamicStructMember {
const char *name;
size_t size;
size_t offset;
};
struct DynamicStructInfo {
const char *name;
size_t count;
size_t total_size;
struct DynamicStructMember *members;
};
#define DYNAMIC_STRUCT_BEGIN(struct_name) \
static struct DynamicStructMember struct_name##_members[] = {
#define DEFINE_MEMBER(struct_name, member) \
{ \
.name = #member, \
.size = sizeof(((struct struct_name *)0)->member), \
.offset = offsetof(struct struct_name, member) \
},
#define DYNAMIC_STRUCT_END(struct_name) \
}; \
static struct DynamicStructInfo struct_name##_info = { \
.name = #struct_name, \
.count = sizeof(struct_name##_members) / sizeof(struct DynamicStructMember), \
.total_size = sizeof(struct struct_name), \
.members = struct_name##_members \
};
DYNAMIC_STRUCT_BEGIN(mount)
DEFINE_MEMBER(mount, mnt_parent)
DEFINE_MEMBER(mount, mnt)
DEFINE_MEMBER(mount, mnt_id)
DEFINE_MEMBER(mount, mnt_group_id)
DEFINE_MEMBER(mount, mnt_expiry_mark)
DEFINE_MEMBER(mount, mnt_master)
DEFINE_MEMBER(mount, mnt_devname)
DYNAMIC_STRUCT_END(mount)
DYNAMIC_STRUCT_BEGIN(vfsmount)
DEFINE_MEMBER(vfsmount, mnt_root)
DEFINE_MEMBER(vfsmount, mnt_sb)
DEFINE_MEMBER(vfsmount, mnt_flags)
DYNAMIC_STRUCT_END(vfsmount)
DYNAMIC_STRUCT_BEGIN(mnt_namespace)
DEFINE_MEMBER(mnt_namespace, ns)
DEFINE_MEMBER(mnt_namespace, root)
DEFINE_MEMBER(mnt_namespace, seq)
DEFINE_MEMBER(mnt_namespace, mounts)
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 15, 0)
DEFINE_MEMBER(mnt_namespace, count)
#endif
DYNAMIC_STRUCT_END(mnt_namespace)
#ifdef CONFIG_KPROBES
DYNAMIC_STRUCT_BEGIN(kprobe)
DEFINE_MEMBER(kprobe, addr)
DEFINE_MEMBER(kprobe, symbol_name)
DEFINE_MEMBER(kprobe, offset)
DEFINE_MEMBER(kprobe, pre_handler)
DEFINE_MEMBER(kprobe, post_handler)
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 15, 0)
DEFINE_MEMBER(kprobe, fault_handler)
#endif
DEFINE_MEMBER(kprobe, flags)
DYNAMIC_STRUCT_END(kprobe)
#endif
DYNAMIC_STRUCT_BEGIN(vm_area_struct)
DEFINE_MEMBER(vm_area_struct,vm_start)
DEFINE_MEMBER(vm_area_struct,vm_end)
DEFINE_MEMBER(vm_area_struct,vm_flags)
DEFINE_MEMBER(vm_area_struct,anon_vma)
DEFINE_MEMBER(vm_area_struct,vm_pgoff)
DEFINE_MEMBER(vm_area_struct,vm_file)
DEFINE_MEMBER(vm_area_struct,vm_private_data)
#ifdef CONFIG_ANON_VMA_NAME
DEFINE_MEMBER(vm_area_struct, anon_name)
#endif
DEFINE_MEMBER(vm_area_struct, vm_ops)
DYNAMIC_STRUCT_END(vm_area_struct)
DYNAMIC_STRUCT_BEGIN(vm_operations_struct)
DEFINE_MEMBER(vm_operations_struct, open)
DEFINE_MEMBER(vm_operations_struct, close)
DEFINE_MEMBER(vm_operations_struct, name)
DEFINE_MEMBER(vm_operations_struct, access)
DYNAMIC_STRUCT_END(vm_operations_struct)
DYNAMIC_STRUCT_BEGIN(netlink_kernel_cfg)
DEFINE_MEMBER(netlink_kernel_cfg, groups)
DEFINE_MEMBER(netlink_kernel_cfg, flags)
DEFINE_MEMBER(netlink_kernel_cfg, input)
DEFINE_MEMBER(netlink_kernel_cfg, cb_mutex)
DEFINE_MEMBER(netlink_kernel_cfg, bind)
DEFINE_MEMBER(netlink_kernel_cfg, unbind)
#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0)
DEFINE_MEMBER(netlink_kernel_cfg, compare)
#endif
DYNAMIC_STRUCT_END(netlink_kernel_cfg)
DYNAMIC_STRUCT_BEGIN(task_struct)
DEFINE_MEMBER(task_struct, pid)
DEFINE_MEMBER(task_struct, tgid)
DEFINE_MEMBER(task_struct, cred)
DEFINE_MEMBER(task_struct, real_cred)
DEFINE_MEMBER(task_struct, comm)
DEFINE_MEMBER(task_struct, parent)
DEFINE_MEMBER(task_struct, group_leader)
DEFINE_MEMBER(task_struct, mm)
DEFINE_MEMBER(task_struct, active_mm)
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)
DEFINE_MEMBER(task_struct, pids[PIDTYPE_PID].pid)
#else
DEFINE_MEMBER(task_struct, thread_pid)
#endif
DEFINE_MEMBER(task_struct, files)
DEFINE_MEMBER(task_struct, seccomp)
#ifdef CONFIG_THREAD_INFO_IN_TASK
DEFINE_MEMBER(task_struct, thread_info)
#endif
#ifdef CONFIG_CGROUPS
DEFINE_MEMBER(task_struct, cgroups)
#endif
#ifdef CONFIG_SECURITY
DEFINE_MEMBER(task_struct, security)
#endif
DEFINE_MEMBER(task_struct, thread)
DYNAMIC_STRUCT_END(task_struct)
#define STRUCT_INFO(name) &(name##_info)
static struct DynamicStructInfo *dynamic_struct_infos[] = {
STRUCT_INFO(mount),
STRUCT_INFO(vfsmount),
STRUCT_INFO(mnt_namespace),
#ifdef CONFIG_KPROBES
STRUCT_INFO(kprobe),
#endif
STRUCT_INFO(vm_area_struct),
STRUCT_INFO(vm_operations_struct),
STRUCT_INFO(netlink_kernel_cfg),
STRUCT_INFO(task_struct)
};
/*
* return 0 if successful
* return -1 if struct not defined
*/
int sukisu_super_find_struct(const char *struct_name, size_t *out_size, int *out_members)
{
for (size_t i = 0; i < (sizeof(dynamic_struct_infos) / sizeof(dynamic_struct_infos[0])); i++) {
struct DynamicStructInfo *info = dynamic_struct_infos[i];
if (strcmp(struct_name, info->name) == 0) {
if (out_size)
*out_size = info->total_size;
if (out_members)
*out_members = info->count;
return 0;
}
}
return -1;
}
EXPORT_SYMBOL(sukisu_super_find_struct);
/*
* Dynamic access struct
* return 0 if successful
* return -1 if struct not defined
* return -2 if member not defined
*/
int sukisu_super_access(const char *struct_name, const char *member_name, size_t *out_offset,
size_t *out_size)
{
for (size_t i = 0; i < (sizeof(dynamic_struct_infos) / sizeof(dynamic_struct_infos[0])); i++) {
struct DynamicStructInfo *info = dynamic_struct_infos[i];
if (strcmp(struct_name, info->name) == 0) {
for (size_t i1 = 0; i1 < info->count; i1++) {
if (strcmp(info->members[i1].name, member_name) == 0) {
if (out_offset)
*out_offset = info->members[i].offset;
if (out_size)
*out_size = info->members[i].size;
return 0;
}
}
return -2;
}
}
return -1;
}
EXPORT_SYMBOL(sukisu_super_access);
#define DYNAMIC_CONTAINER_OF(offset, member_ptr) ({ \
(offset != (size_t)-1) ? (void*)((char*)(member_ptr) - offset) : NULL; \
})
/*
* Dynamic container_of
* return 0 if success
* return -1 if current struct not defined
* return -2 if target member not defined
*/
int sukisu_super_container_of(const char *struct_name, const char *member_name, void *ptr,
void **out_ptr)
{
if (ptr == NULL)
return -3;
for (size_t i = 0; i < (sizeof(dynamic_struct_infos) / sizeof(dynamic_struct_infos[0])); i++) {
struct DynamicStructInfo *info = dynamic_struct_infos[i];
if (strcmp(struct_name, info->name) == 0) {
for (size_t i1 = 0; i1 < info->count; i1++) {
if (strcmp(info->members[i1].name, member_name) == 0) {
*out_ptr = (void *)DYNAMIC_CONTAINER_OF(info->members[i1].offset, ptr);
return 0;
}
}
return -2;
}
}
return -1;
}
EXPORT_SYMBOL(sukisu_super_container_of);

15
kernel/kpm/super_access.h Normal file
View file

@ -0,0 +1,15 @@
#ifndef __SUKISU_SUPER_ACCESS_H
#define __SUKISU_SUPER_ACCESS_H
#include <linux/types.h>
#include <linux/stddef.h>
#include "kpm.h"
#include "compact.h"
extern int sukisu_super_find_struct(const char *struct_name, size_t *out_size, int *out_members);
extern int sukisu_super_access(const char *struct_name, const char *member_name, size_t *out_offset,
size_t *out_size);
extern int sukisu_super_container_of(const char *struct_name, const char *member_name, void *ptr,
void **out_ptr);
#endif

116
kernel/ksu.c Normal file
View file

@ -0,0 +1,116 @@
#include <linux/export.h>
#include <linux/fs.h>
#include <linux/kobject.h>
#include <linux/module.h>
#include <linux/workqueue.h>
#include <linux/version.h>
#include "allowlist.h"
#include "feature.h"
#include "klog.h" // IWYU pragma: keep
#include "throne_tracker.h"
#include "syscall_hook_manager.h"
#include "ksud.h"
#include "supercalls.h"
#include "sulog.h"
#include "throne_comm.h"
#include "dynamic_manager.h"
static struct workqueue_struct *ksu_workqueue;
bool ksu_queue_work(struct work_struct *work)
{
return queue_work(ksu_workqueue, work);
}
void sukisu_custom_config_init(void)
{
}
void sukisu_custom_config_exit(void)
{
ksu_uid_exit();
ksu_throne_comm_exit();
ksu_dynamic_manager_exit();
#if __SULOG_GATE
ksu_sulog_exit();
#endif
}
int __init kernelsu_init(void)
{
#ifdef CONFIG_KSU_DEBUG
pr_alert("*************************************************************");
pr_alert("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **");
pr_alert("** **");
pr_alert("** You are running KernelSU in DEBUG mode **");
pr_alert("** **");
pr_alert("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **");
pr_alert("*************************************************************");
#endif
ksu_feature_init();
ksu_supercalls_init();
sukisu_custom_config_init();
ksu_syscall_hook_manager_init();
ksu_workqueue = alloc_ordered_workqueue("kernelsu_work_queue", 0);
ksu_allowlist_init();
ksu_throne_tracker_init();
#ifdef KSU_KPROBES_HOOK
ksu_ksud_init();
#else
pr_alert("KPROBES is disabled, KernelSU may not work, please check https://kernelsu.org/guide/how-to-integrate-for-non-gki.html");
#endif
#ifdef MODULE
#ifndef CONFIG_KSU_DEBUG
kobject_del(&THIS_MODULE->mkobj.kobj);
#endif
#endif
return 0;
}
extern void ksu_observer_exit(void);
void kernelsu_exit(void)
{
ksu_allowlist_exit();
ksu_observer_exit();
ksu_throne_tracker_exit();
destroy_workqueue(ksu_workqueue);
#ifdef KSU_KPROBES_HOOK
ksu_ksud_exit();
#endif
ksu_syscall_hook_manager_exit();
sukisu_custom_config_exit();
ksu_supercalls_exit();
ksu_feature_exit();
}
module_init(kernelsu_init);
module_exit(kernelsu_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("weishu");
MODULE_DESCRIPTION("Android KernelSU");
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 13, 0)
MODULE_IMPORT_NS("VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver");
#else
MODULE_IMPORT_NS(VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver);
#endif

62
kernel/ksu.h Normal file
View file

@ -0,0 +1,62 @@
#ifndef __KSU_H_KSU
#define __KSU_H_KSU
#include <linux/types.h>
#include <linux/workqueue.h>
#define KERNEL_SU_VERSION KSU_VERSION
#define KERNEL_SU_OPTION 0xDEADBEEF
extern bool ksu_uid_scanner_enabled;
#define EVENT_POST_FS_DATA 1
#define EVENT_BOOT_COMPLETED 2
#define EVENT_MODULE_MOUNTED 3
// SukiSU Ultra kernel su version full strings
#ifndef KSU_VERSION_FULL
#define KSU_VERSION_FULL "v3.x-00000000@unknown"
#endif
#define KSU_FULL_VERSION_STRING 255
#define DYNAMIC_MANAGER_OP_SET 0
#define DYNAMIC_MANAGER_OP_GET 1
#define DYNAMIC_MANAGER_OP_CLEAR 2
#define UID_SCANNER_OP_GET_STATUS 0
#define UID_SCANNER_OP_TOGGLE 1
#define UID_SCANNER_OP_CLEAR_ENV 2
struct dynamic_manager_user_config {
unsigned int operation;
unsigned int size;
char hash[65];
};
struct manager_list_info {
int count;
struct {
uid_t uid;
int signature_index;
} managers[2];
};
bool ksu_queue_work(struct work_struct *work);
#if 0
static inline int startswith(char *s, char *prefix)
{
return strncmp(s, prefix, strlen(prefix));
}
static inline int endswith(const char *s, const char *t)
{
size_t slen = strlen(s);
size_t tlen = strlen(t);
if (tlen > slen)
return 1;
return strcmp(s + slen - tlen, t);
}
#endif
#endif

660
kernel/ksud.c Normal file
View file

@ -0,0 +1,660 @@
#include <linux/rcupdate.h>
#include <linux/slab.h>
#include <linux/task_work.h>
#include <asm/current.h>
#include <linux/compat.h>
#include <linux/cred.h>
#include <linux/dcache.h>
#include <linux/err.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/version.h>
#include <linux/input-event-codes.h>
#include <linux/kprobes.h>
#include <linux/printk.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include <linux/namei.h>
#include <linux/workqueue.h>
#include "manager.h"
#include "allowlist.h"
#include "arch.h"
#include "klog.h" // IWYU pragma: keep
#include "ksud.h"
#include "selinux/selinux.h"
#include "throne_tracker.h"
bool ksu_module_mounted __read_mostly = false;
bool ksu_boot_completed __read_mostly = false;
static const char KERNEL_SU_RC[] =
"\n"
"on post-fs-data\n"
" start logd\n"
// We should wait for the post-fs-data finish
" exec u:r:su:s0 root -- " KSUD_PATH " post-fs-data\n"
"\n"
"on nonencrypted\n"
" exec u:r:su:s0 root -- " KSUD_PATH " services\n"
"\n"
"on property:vold.decrypt=trigger_restart_framework\n"
" exec u:r:su:s0 root -- " KSUD_PATH " services\n"
"\n"
"on property:sys.boot_completed=1\n"
" exec u:r:su:s0 root -- " KSUD_PATH " boot-completed\n"
"\n"
"\n";
static void stop_vfs_read_hook();
static void stop_execve_hook();
static void stop_input_hook();
#ifdef KSU_KPROBES_HOOK
static struct work_struct stop_vfs_read_work;
static struct work_struct stop_execve_hook_work;
static struct work_struct stop_input_hook_work;
#else
bool ksu_vfs_read_hook __read_mostly = true;
bool ksu_execveat_hook __read_mostly = true;
bool ksu_input_hook __read_mostly = true;
#endif
u32 ksu_file_sid;
// Detect whether it is on or not
static bool is_boot_phase = true;
void on_post_fs_data(void)
{
static bool done = false;
if (done) {
pr_info("on_post_fs_data already done\n");
return;
}
done = true;
pr_info("on_post_fs_data!\n");
ksu_load_allow_list();
ksu_observer_init();
// sanity check, this may influence the performance
stop_input_hook();
// End of boot state
is_boot_phase = false;
ksu_file_sid = ksu_get_ksu_file_sid();
pr_info("ksu_file sid: %d\n", ksu_file_sid);
}
extern void ext4_unregister_sysfs(struct super_block *sb);
int nuke_ext4_sysfs(const char* mnt)
{
#ifdef CONFIG_EXT4_FS
struct path path;
int err = kern_path(mnt, 0, &path);
if (err) {
pr_err("nuke path err: %d\n", err);
return err;
}
struct super_block *sb = path.dentry->d_inode->i_sb;
const char *name = sb->s_type->name;
if (strcmp(name, "ext4") != 0) {
pr_info("nuke but module aren't mounted\n");
path_put(&path);
return -EINVAL;
}
ext4_unregister_sysfs(sb);
path_put(&path);
return 0;
#endif
}
void on_module_mounted(void){
pr_info("on_module_mounted!\n");
ksu_module_mounted = true;
}
void on_boot_completed(void){
ksu_boot_completed = true;
pr_info("on_boot_completed!\n");
track_throne(true);
}
#define MAX_ARG_STRINGS 0x7FFFFFFF
struct user_arg_ptr {
#ifdef CONFIG_COMPAT
bool is_compat;
#endif
union {
const char __user *const __user *native;
#ifdef CONFIG_COMPAT
const compat_uptr_t __user *compat;
#endif
} ptr;
};
static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
{
const char __user *native;
#ifdef CONFIG_COMPAT
if (unlikely(argv.is_compat)) {
compat_uptr_t compat;
if (get_user(compat, argv.ptr.compat + nr))
return ERR_PTR(-EFAULT);
return compat_ptr(compat);
}
#endif
if (get_user(native, argv.ptr.native + nr))
return ERR_PTR(-EFAULT);
return native;
}
/*
* count() counts the number of strings in array ARGV.
*/
/*
* Make sure old GCC compiler can use __maybe_unused,
* Test passed in 4.4.x ~ 4.9.x when use GCC.
*/
static int __maybe_unused count(struct user_arg_ptr argv, int max)
{
int i = 0;
if (argv.ptr.native != NULL) {
for (;;) {
const char __user *p = get_user_arg_ptr(argv, i);
if (!p)
break;
if (IS_ERR(p))
return -EFAULT;
if (i >= max)
return -E2BIG;
++i;
if (fatal_signal_pending(current))
return -ERESTARTNOHAND;
}
}
return i;
}
static void on_post_fs_data_cbfun(struct callback_head *cb)
{
on_post_fs_data();
}
static struct callback_head on_post_fs_data_cb = { .func =
on_post_fs_data_cbfun };
// IMPORTANT NOTE: the call from execve_handler_pre WON'T provided correct value for envp and flags in GKI version
int ksu_handle_execveat_ksud(int *fd, struct filename **filename_ptr,
struct user_arg_ptr *argv,
struct user_arg_ptr *envp, int *flags)
{
#ifndef KSU_KPROBES_HOOK
if (!ksu_execveat_hook) {
return 0;
}
#endif
struct filename *filename;
static const char app_process[] = "/system/bin/app_process";
static bool first_app_process = true;
/* This applies to versions Android 10+ */
static const char system_bin_init[] = "/system/bin/init";
/* This applies to versions between Android 6 ~ 9 */
static const char old_system_init[] = "/init";
static bool init_second_stage_executed = false;
if (!filename_ptr)
return 0;
filename = *filename_ptr;
if (IS_ERR(filename)) {
return 0;
}
if (unlikely(!memcmp(filename->name, system_bin_init,
sizeof(system_bin_init) - 1) &&
argv)) {
// /system/bin/init executed
int argc = count(*argv, MAX_ARG_STRINGS);
pr_info("/system/bin/init argc: %d\n", argc);
if (argc > 1 && !init_second_stage_executed) {
const char __user *p = get_user_arg_ptr(*argv, 1);
if (p && !IS_ERR(p)) {
char first_arg[16];
strncpy_from_user_nofault(first_arg, p, sizeof(first_arg));
pr_info("/system/bin/init first arg: %s\n", first_arg);
if (!strcmp(first_arg, "second_stage")) {
pr_info("/system/bin/init second_stage executed\n");
apply_kernelsu_rules();
init_second_stage_executed = true;
}
} else {
pr_err("/system/bin/init parse args err!\n");
}
}
} else if (unlikely(!memcmp(filename->name, old_system_init,
sizeof(old_system_init) - 1) &&
argv)) {
// /init executed
int argc = count(*argv, MAX_ARG_STRINGS);
pr_info("/init argc: %d\n", argc);
if (argc > 1 && !init_second_stage_executed) {
/* This applies to versions between Android 6 ~ 7 */
const char __user *p = get_user_arg_ptr(*argv, 1);
if (p && !IS_ERR(p)) {
char first_arg[16];
strncpy_from_user_nofault(first_arg, p, sizeof(first_arg));
pr_info("/init first arg: %s\n", first_arg);
if (!strcmp(first_arg, "--second-stage")) {
pr_info("/init second_stage executed\n");
apply_kernelsu_rules();
init_second_stage_executed = true;
}
} else {
pr_err("/init parse args err!\n");
}
} else if (argc == 1 && !init_second_stage_executed && envp) {
/* This applies to versions between Android 8 ~ 9 */
int envc = count(*envp, MAX_ARG_STRINGS);
if (envc > 0) {
int n;
for (n = 1; n <= envc; n++) {
const char __user *p = get_user_arg_ptr(*envp, n);
if (!p || IS_ERR(p)) {
continue;
}
char env[256];
// Reading environment variable strings from user space
if (strncpy_from_user_nofault(env, p, sizeof(env)) < 0)
continue;
// Parsing environment variable names and values
char *env_name = env;
char *env_value = strchr(env, '=');
if (env_value == NULL)
continue;
// Replace equal sign with string terminator
*env_value = '\0';
env_value++;
// Check if the environment variable name and value are matching
if (!strcmp(env_name, "INIT_SECOND_STAGE") &&
(!strcmp(env_value, "1") ||
!strcmp(env_value, "true"))) {
pr_info("/init second_stage executed\n");
apply_kernelsu_rules();
init_second_stage_executed = true;
}
}
}
}
}
if (unlikely(first_app_process && !memcmp(filename->name, app_process,
sizeof(app_process) - 1))) {
first_app_process = false;
pr_info("exec app_process, /data prepared, second_stage: %d\n",
init_second_stage_executed);
struct task_struct *init_task;
rcu_read_lock();
init_task = rcu_dereference(current->real_parent);
if (init_task) {
task_work_add(init_task, &on_post_fs_data_cb, TWA_RESUME);
}
rcu_read_unlock();
stop_execve_hook();
}
return 0;
}
static ssize_t (*orig_read)(struct file *, char __user *, size_t, loff_t *);
static ssize_t (*orig_read_iter)(struct kiocb *, struct iov_iter *);
static struct file_operations fops_proxy;
static ssize_t read_count_append = 0;
static ssize_t read_proxy(struct file *file, char __user *buf, size_t count,
loff_t *pos)
{
bool first_read = file->f_pos == 0;
ssize_t ret = orig_read(file, buf, count, pos);
if (first_read) {
pr_info("read_proxy append %ld + %ld\n", ret, read_count_append);
ret += read_count_append;
}
return ret;
}
static ssize_t read_iter_proxy(struct kiocb *iocb, struct iov_iter *to)
{
bool first_read = iocb->ki_pos == 0;
ssize_t ret = orig_read_iter(iocb, to);
if (first_read) {
pr_info("read_iter_proxy append %ld + %ld\n", ret, read_count_append);
ret += read_count_append;
}
return ret;
}
static int ksu_handle_vfs_read(struct file **file_ptr, char __user **buf_ptr,
size_t *count_ptr, loff_t **pos)
{
#ifndef KSU_KPROBES_HOOK
if (!ksu_vfs_read_hook) {
return 0;
}
#endif
struct file *file;
char __user *buf;
size_t count;
if (strcmp(current->comm, "init")) {
// we are only interest in `init` process
return 0;
}
file = *file_ptr;
if (IS_ERR(file)) {
return 0;
}
if (!d_is_reg(file->f_path.dentry)) {
return 0;
}
const char *short_name = file->f_path.dentry->d_name.name;
if (strcmp(short_name, "atrace.rc")) {
// we are only interest `atrace.rc` file name file
return 0;
}
char path[256];
char *dpath = d_path(&file->f_path, path, sizeof(path));
if (IS_ERR(dpath)) {
return 0;
}
if (strcmp(dpath, "/system/etc/init/atrace.rc")) {
return 0;
}
// we only process the first read
static bool rc_inserted = false;
if (rc_inserted) {
// we don't need this kprobe, unregister it!
stop_vfs_read_hook();
return 0;
}
rc_inserted = true;
// now we can sure that the init process is reading
// `/system/etc/init/atrace.rc`
buf = *buf_ptr;
count = *count_ptr;
size_t rc_count = strlen(KERNEL_SU_RC);
pr_info("vfs_read: %s, comm: %s, count: %zu, rc_count: %zu\n", dpath,
current->comm, count, rc_count);
if (count < rc_count) {
pr_err("count: %zu < rc_count: %zu\n", count, rc_count);
return 0;
}
size_t ret = copy_to_user(buf, KERNEL_SU_RC, rc_count);
if (ret) {
pr_err("copy ksud.rc failed: %zu\n", ret);
return 0;
}
// we've succeed to insert ksud.rc, now we need to proxy the read and modify the result!
// But, we can not modify the file_operations directly, because it's in read-only memory.
// We just replace the whole file_operations with a proxy one.
memcpy(&fops_proxy, file->f_op, sizeof(struct file_operations));
orig_read = file->f_op->read;
if (orig_read) {
fops_proxy.read = read_proxy;
}
orig_read_iter = file->f_op->read_iter;
if (orig_read_iter) {
fops_proxy.read_iter = read_iter_proxy;
}
// replace the file_operations
file->f_op = &fops_proxy;
read_count_append = rc_count;
*buf_ptr = buf + rc_count;
*count_ptr = count - rc_count;
return 0;
}
int ksu_handle_sys_read(unsigned int fd, char __user **buf_ptr,
size_t *count_ptr)
{
struct file *file = fget(fd);
if (!file) {
return 0;
}
int result = ksu_handle_vfs_read(&file, buf_ptr, count_ptr, NULL);
fput(file);
return result;
}
static unsigned int volumedown_pressed_count = 0;
static bool is_volumedown_enough(unsigned int count)
{
return count >= 3;
}
int ksu_handle_input_handle_event(unsigned int *type, unsigned int *code,
int *value)
{
#ifndef KSU_KPROBES_HOOK
if (!ksu_input_hook) {
return 0;
}
#endif
if (*type == EV_KEY && *code == KEY_VOLUMEDOWN) {
int val = *value;
pr_info("KEY_VOLUMEDOWN val: %d\n", val);
if (val && is_boot_phase) {
// key pressed, count it
volumedown_pressed_count += 1;
if (is_volumedown_enough(volumedown_pressed_count)) {
stop_input_hook();
}
}
}
return 0;
}
bool ksu_is_safe_mode()
{
static bool safe_mode = false;
if (safe_mode) {
// don't need to check again, userspace may call multiple times
return true;
}
// stop hook first!
stop_input_hook();
pr_info("volumedown_pressed_count: %d\n", volumedown_pressed_count);
if (is_volumedown_enough(volumedown_pressed_count)) {
// pressed over 3 times
pr_info("KEY_VOLUMEDOWN pressed max times, safe mode detected!\n");
safe_mode = true;
return true;
}
return false;
}
#ifdef KSU_KPROBES_HOOK
static int sys_execve_handler_pre(struct kprobe *p, struct pt_regs *regs)
{
struct pt_regs *real_regs = PT_REAL_REGS(regs);
const char __user **filename_user =
(const char **)&PT_REGS_PARM1(real_regs);
const char __user *const __user *__argv =
(const char __user *const __user *)PT_REGS_PARM2(real_regs);
struct user_arg_ptr argv = { .ptr.native = __argv };
struct filename filename_in, *filename_p;
char path[32];
if (!filename_user)
return 0;
memset(path, 0, sizeof(path));
strncpy_from_user_nofault(path, *filename_user, 32);
filename_in.name = path;
filename_p = &filename_in;
return ksu_handle_execveat_ksud(AT_FDCWD, &filename_p, &argv, NULL, NULL);
}
static int sys_read_handler_pre(struct kprobe *p, struct pt_regs *regs)
{
struct pt_regs *real_regs = PT_REAL_REGS(regs);
unsigned int fd = PT_REGS_PARM1(real_regs);
char __user **buf_ptr = (char __user **)&PT_REGS_PARM2(real_regs);
size_t count_ptr = (size_t *)&PT_REGS_PARM3(real_regs);
return ksu_handle_sys_read(fd, buf_ptr, count_ptr);
}
static int input_handle_event_handler_pre(struct kprobe *p,
struct pt_regs *regs)
{
unsigned int *type = (unsigned int *)&PT_REGS_PARM2(regs);
unsigned int *code = (unsigned int *)&PT_REGS_PARM3(regs);
int *value = (int *)&PT_REGS_CCALL_PARM4(regs);
return ksu_handle_input_handle_event(type, code, value);
}
static struct kprobe execve_kp = {
.symbol_name = SYS_EXECVE_SYMBOL,
.pre_handler = sys_execve_handler_pre,
};
static struct kprobe vfs_read_kp = {
.symbol_name = SYS_READ_SYMBOL,
.pre_handler = sys_read_handler_pre,
};
static struct kprobe input_event_kp = {
.symbol_name = "input_event",
.pre_handler = input_handle_event_handler_pre,
};
static void do_stop_vfs_read_hook(struct work_struct *work)
{
unregister_kprobe(&vfs_read_kp);
}
static void do_stop_execve_hook(struct work_struct *work)
{
unregister_kprobe(&execve_kp);
}
static void do_stop_input_hook(struct work_struct *work)
{
unregister_kprobe(&input_event_kp);
}
#endif
static void stop_vfs_read_hook()
{
#ifdef KSU_KPROBES_HOOK
bool ret = schedule_work(&stop_vfs_read_work);
pr_info("unregister vfs_read kprobe: %d!\n", ret);
#else
ksu_vfs_read_hook = false;
pr_info("stop vfs_read_hook\n");
#endif
}
static void stop_execve_hook()
{
#ifdef KSU_KPROBES_HOOK
bool ret = schedule_work(&stop_execve_hook_work);
pr_info("unregister execve kprobe: %d!\n", ret);
#else
ksu_execveat_hook = false;
pr_info("stop execve_hook\n");
#endif
}
static void stop_input_hook()
{
static bool input_hook_stopped = false;
if (input_hook_stopped) {
return;
}
input_hook_stopped = true;
#ifdef KSU_KPROBES_HOOK
bool ret = schedule_work(&stop_input_hook_work);
pr_info("unregister input kprobe: %d!\n", ret);
#else
ksu_input_hook = false;
pr_info("stop input_hook\n");
#endif
}
// ksud: module support
void ksu_ksud_init()
{
#ifdef KSU_KPROBES_HOOK
int ret;
ret = register_kprobe(&execve_kp);
pr_info("ksud: execve_kp: %d\n", ret);
ret = register_kprobe(&vfs_read_kp);
pr_info("ksud: vfs_read_kp: %d\n", ret);
ret = register_kprobe(&input_event_kp);
pr_info("ksud: input_event_kp: %d\n", ret);
INIT_WORK(&stop_vfs_read_work, do_stop_vfs_read_hook);
INIT_WORK(&stop_execve_hook_work, do_stop_execve_hook);
INIT_WORK(&stop_input_hook_work, do_stop_input_hook);
#endif
}
void ksu_ksud_exit()
{
#ifdef KSU_KPROBES_HOOK
unregister_kprobe(&execve_kp);
// this should be done before unregister vfs_read_kp
// unregister_kprobe(&vfs_read_kp);
unregister_kprobe(&input_event_kp);
#endif
is_boot_phase = false;
}

21
kernel/ksud.h Normal file
View file

@ -0,0 +1,21 @@
#ifndef __KSU_H_KSUD
#define __KSU_H_KSUD
#define KSUD_PATH "/data/adb/ksud"
void ksu_ksud_init();
void ksu_ksud_exit();
void on_post_fs_data(void);
void on_module_mounted(void);
void on_boot_completed(void);
bool ksu_is_safe_mode(void);
int nuke_ext4_sysfs(const char* mnt);
extern u32 ksu_file_sid;
extern bool ksu_module_mounted;
extern bool ksu_boot_completed;
#endif

43
kernel/manager.h Normal file
View file

@ -0,0 +1,43 @@
#ifndef __KSU_H_KSU_MANAGER
#define __KSU_H_KSU_MANAGER
#include <linux/cred.h>
#include <linux/types.h>
#define KSU_INVALID_UID -1
extern uid_t ksu_manager_uid; // DO NOT DIRECT USE
extern bool ksu_is_any_manager(uid_t uid);
extern void ksu_add_manager(uid_t uid, int signature_index);
extern void ksu_remove_manager(uid_t uid);
extern int ksu_get_manager_signature_index(uid_t uid);
static inline bool ksu_is_manager_uid_valid(void)
{
return ksu_manager_uid != KSU_INVALID_UID;
}
static inline bool is_manager(void)
{
return unlikely(ksu_is_any_manager(current_uid().val) ||
(ksu_manager_uid != KSU_INVALID_UID && ksu_manager_uid == current_uid().val));
}
static inline uid_t ksu_get_manager_uid(void)
{
return ksu_manager_uid;
}
static inline void ksu_set_manager_uid(uid_t uid)
{
ksu_manager_uid = uid;
}
static inline void ksu_invalidate_manager_uid(void)
{
ksu_manager_uid = KSU_INVALID_UID;
}
int ksu_observer_init(void);
#endif

17
kernel/manager_sign.h Normal file
View file

@ -0,0 +1,17 @@
#ifndef MANAGER_SIGN_H
#define MANAGER_SIGN_H
// ShirkNeko/SukiSU
#define EXPECTED_SIZE_SHIRKNEKO 0x35c
#define EXPECTED_HASH_SHIRKNEKO "947ae944f3de4ed4c21a7e4f7953ecf351bfa2b36239da37a34111ad29993eef"
// Dynamic Sign
#define EXPECTED_SIZE_OTHER 0x300
#define EXPECTED_HASH_OTHER "0000000000000000000000000000000000000000000000000000000000000000"
typedef struct {
unsigned size;
const char *sha256;
} apk_sign_key_t;
#endif /* MANAGER_SIGN_H */

357
kernel/manual_su.c Normal file
View file

@ -0,0 +1,357 @@
#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/printk.h>
#include <linux/cred.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
#include <linux/file.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/binfmts.h>
#include "manual_su.h"
#include "ksu.h"
#include "allowlist.h"
#include "manager.h"
#include "app_profile.h"
static bool current_verified = false;
static void ksu_cleanup_expired_tokens(void);
static bool is_current_verified(void);
static void add_pending_root(uid_t uid);
static struct pending_uid pending_uids[MAX_PENDING] = {0};
static int pending_cnt = 0;
static struct ksu_token_entry auth_tokens[MAX_TOKENS] = {0};
static int token_count = 0;
static DEFINE_SPINLOCK(token_lock);
static char* get_token_from_envp(void)
{
struct mm_struct *mm;
char *envp_start, *envp_end;
char *env_ptr, *token = NULL;
unsigned long env_len;
char *env_copy = NULL;
if (!current->mm)
return NULL;
mm = current->mm;
down_read(&mm->mmap_lock);
envp_start = (char *)mm->env_start;
envp_end = (char *)mm->env_end;
env_len = envp_end - envp_start;
if (env_len <= 0 || env_len > PAGE_SIZE * 32) {
up_read(&mm->mmap_lock);
return NULL;
}
env_copy = kzalloc(env_len + 1, GFP_KERNEL);
if (!env_copy) {
up_read(&mm->mmap_lock);
return NULL;
}
if (copy_from_user(env_copy, envp_start, env_len)) {
kfree(env_copy);
up_read(&mm->mmap_lock);
return NULL;
}
up_read(&mm->mmap_lock);
env_copy[env_len] = '\0';
env_ptr = env_copy;
while (env_ptr < env_copy + env_len) {
if (strncmp(env_ptr, KSU_TOKEN_ENV_NAME "=", strlen(KSU_TOKEN_ENV_NAME) + 1) == 0) {
char *token_start = env_ptr + strlen(KSU_TOKEN_ENV_NAME) + 1;
char *token_end = strchr(token_start, '\0');
if (token_end && (token_end - token_start) == KSU_TOKEN_LENGTH) {
token = kzalloc(KSU_TOKEN_LENGTH + 1, GFP_KERNEL);
if (token) {
memcpy(token, token_start, KSU_TOKEN_LENGTH);
token[KSU_TOKEN_LENGTH] = '\0';
pr_info("manual_su: found auth token in environment\n");
}
}
break;
}
env_ptr += strlen(env_ptr) + 1;
}
kfree(env_copy);
return token;
}
static char* ksu_generate_auth_token(void)
{
static char token_buffer[KSU_TOKEN_LENGTH + 1];
unsigned long flags;
int i;
ksu_cleanup_expired_tokens();
spin_lock_irqsave(&token_lock, flags);
if (token_count >= MAX_TOKENS) {
for (i = 0; i < MAX_TOKENS - 1; i++) {
auth_tokens[i] = auth_tokens[i + 1];
}
token_count = MAX_TOKENS - 1;
}
for (i = 0; i < KSU_TOKEN_LENGTH; i++) {
u8 rand_byte;
get_random_bytes(&rand_byte, 1);
int char_type = rand_byte % 3;
if (char_type == 0) {
token_buffer[i] = 'A' + (rand_byte % 26);
} else if (char_type == 1) {
token_buffer[i] = 'a' + (rand_byte % 26);
} else {
token_buffer[i] = '0' + (rand_byte % 10);
}
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0)
strscpy(auth_tokens[token_count].token, token_buffer, KSU_TOKEN_LENGTH + 1);
#else
strlcpy(auth_tokens[token_count].token, token_buffer, KSU_TOKEN_LENGTH + 1);
#endif
auth_tokens[token_count].expire_time = jiffies + KSU_TOKEN_EXPIRE_TIME * HZ;
auth_tokens[token_count].used = false;
token_count++;
spin_unlock_irqrestore(&token_lock, flags);
pr_info("manual_su: generated new auth token (expires in %d seconds)\n", KSU_TOKEN_EXPIRE_TIME);
return token_buffer;
}
static bool ksu_verify_auth_token(const char *token)
{
unsigned long flags;
bool valid = false;
int i;
if (!token || strlen(token) != KSU_TOKEN_LENGTH) {
return false;
}
spin_lock_irqsave(&token_lock, flags);
for (i = 0; i < token_count; i++) {
if (!auth_tokens[i].used &&
time_before(jiffies, auth_tokens[i].expire_time) &&
strcmp(auth_tokens[i].token, token) == 0) {
auth_tokens[i].used = true;
valid = true;
pr_info("manual_su: auth token verified successfully\n");
break;
}
}
spin_unlock_irqrestore(&token_lock, flags);
if (!valid) {
pr_warn("manual_su: invalid or expired auth token\n");
}
return valid;
}
static void ksu_cleanup_expired_tokens(void)
{
unsigned long flags;
int i, j;
spin_lock_irqsave(&token_lock, flags);
for (i = 0; i < token_count; ) {
if (time_after(jiffies, auth_tokens[i].expire_time) || auth_tokens[i].used) {
for (j = i; j < token_count - 1; j++) {
auth_tokens[j] = auth_tokens[j + 1];
}
token_count--;
pr_debug("manual_su: cleaned up expired/used token\n");
} else {
i++;
}
}
spin_unlock_irqrestore(&token_lock, flags);
}
static int handle_token_generation(struct manual_su_request *request)
{
if (current_uid().val > 2000) {
pr_warn("manual_su: token generation denied for app UID %d\n", current_uid().val);
return -EPERM;
}
char *new_token = ksu_generate_auth_token();
if (!new_token) {
pr_err("manual_su: failed to generate token\n");
return -ENOMEM;
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0)
strscpy(request->token_buffer, new_token, KSU_TOKEN_LENGTH + 1);
#else
strlcpy(request->token_buffer, new_token, KSU_TOKEN_LENGTH + 1);
#endif
pr_info("manual_su: auth token generated successfully\n");
return 0;
}
static int handle_escalation_request(struct manual_su_request *request)
{
uid_t target_uid = request->target_uid;
pid_t target_pid = request->target_pid;
struct task_struct *tsk;
rcu_read_lock();
tsk = pid_task(find_vpid(target_pid), PIDTYPE_PID);
if (!tsk || ksu_task_is_dead(tsk)) {
rcu_read_unlock();
pr_err("cmd_su: PID %d is invalid or dead\n", target_pid);
return -ESRCH;
}
rcu_read_unlock();
if (current_uid().val == 0 || is_manager() || ksu_is_allow_uid_for_current(current_uid().val))
goto allowed;
char *env_token = get_token_from_envp();
if (!env_token) {
pr_warn("manual_su: no auth token found in environment\n");
return -EACCES;
}
bool token_valid = ksu_verify_auth_token(env_token);
kfree(env_token);
if (!token_valid) {
pr_warn("manual_su: token verification failed\n");
return -EACCES;
}
allowed:
current_verified = true;
escape_to_root_for_cmd_su(target_uid, target_pid);
return 0;
}
static int handle_add_pending_request(struct manual_su_request *request)
{
uid_t target_uid = request->target_uid;
if (!is_current_verified()) {
pr_warn("manual_su: add_pending denied, not verified\n");
return -EPERM;
}
add_pending_root(target_uid);
current_verified = false;
pr_info("manual_su: pending root added for UID %d\n", target_uid);
return 0;
}
int ksu_handle_manual_su_request(int option, struct manual_su_request *request)
{
if (!request) {
pr_err("manual_su: invalid request pointer\n");
return -EINVAL;
}
switch (option) {
case MANUAL_SU_OP_GENERATE_TOKEN:
pr_info("manual_su: handling token generation request\n");
return handle_token_generation(request);
case MANUAL_SU_OP_ESCALATE:
pr_info("manual_su: handling escalation request for UID %d, PID %d\n",
request->target_uid, request->target_pid);
return handle_escalation_request(request);
case MANUAL_SU_OP_ADD_PENDING:
pr_info("manual_su: handling add pending request for UID %d\n", request->target_uid);
return handle_add_pending_request(request);
default:
pr_err("manual_su: unknown option %d\n", option);
return -EINVAL;
}
}
static bool is_current_verified(void)
{
return current_verified;
}
bool is_pending_root(uid_t uid)
{
for (int i = 0; i < pending_cnt; i++) {
if (pending_uids[i].uid == uid) {
pending_uids[i].use_count++;
pending_uids[i].remove_calls++;
return true;
}
}
return false;
}
void remove_pending_root(uid_t uid)
{
for (int i = 0; i < pending_cnt; i++) {
if (pending_uids[i].uid == uid) {
pending_uids[i].remove_calls++;
if (pending_uids[i].remove_calls >= REMOVE_DELAY_CALLS) {
pending_uids[i] = pending_uids[--pending_cnt];
pr_info("pending_root: removed UID %d after %d calls\n", uid, REMOVE_DELAY_CALLS);
ksu_temp_revoke_root_once(uid);
} else {
pr_info("pending_root: UID %d remove_call=%d (<%d)\n",
uid, pending_uids[i].remove_calls, REMOVE_DELAY_CALLS);
}
return;
}
}
}
static void add_pending_root(uid_t uid)
{
if (pending_cnt >= MAX_PENDING) {
pr_warn("pending_root: cache full\n");
return;
}
for (int i = 0; i < pending_cnt; i++) {
if (pending_uids[i].uid == uid) {
pending_uids[i].use_count = 0;
pending_uids[i].remove_calls = 0;
return;
}
}
pending_uids[pending_cnt++] = (struct pending_uid){uid, 0};
ksu_temp_grant_root_once(uid);
pr_info("pending_root: cached UID %d\n", uid);
}
void ksu_try_escalate_for_uid(uid_t uid)
{
if (!is_pending_root(uid))
return;
pr_info("pending_root: UID=%d temporarily allowed\n", uid);
remove_pending_root(uid);
}

49
kernel/manual_su.h Normal file
View file

@ -0,0 +1,49 @@
#ifndef __KSU_MANUAL_SU_H
#define __KSU_MANUAL_SU_H
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/version.h>
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 7, 0)
#define mmap_lock mmap_sem
#endif
#define ksu_task_is_dead(t) ((t)->exit_state != 0)
#define MAX_PENDING 16
#define REMOVE_DELAY_CALLS 150
#define MAX_TOKENS 10
#define KSU_SU_VERIFIED_BIT (1UL << 0)
#define KSU_TOKEN_LENGTH 32
#define KSU_TOKEN_ENV_NAME "KSU_AUTH_TOKEN"
#define KSU_TOKEN_EXPIRE_TIME 150
#define MANUAL_SU_OP_GENERATE_TOKEN 0
#define MANUAL_SU_OP_ESCALATE 1
#define MANUAL_SU_OP_ADD_PENDING 2
struct pending_uid {
uid_t uid;
int use_count;
int remove_calls;
};
struct manual_su_request {
uid_t target_uid;
pid_t target_pid;
char token_buffer[KSU_TOKEN_LENGTH + 1];
};
struct ksu_token_entry {
char token[KSU_TOKEN_LENGTH + 1];
unsigned long expire_time;
bool used;
};
int ksu_handle_manual_su_request(int option, struct manual_su_request *request);
bool is_pending_root(uid_t uid);
void remove_pending_root(uid_t uid);
void ksu_try_escalate_for_uid(uid_t uid);
#endif

133
kernel/pkg_observer.c Normal file
View file

@ -0,0 +1,133 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/namei.h>
#include <linux/fsnotify_backend.h>
#include <linux/slab.h>
#include <linux/rculist.h>
#include <linux/version.h>
#include "klog.h" // IWYU pragma: keep
#include "ksu.h"
#include "throne_tracker.h"
#include "throne_comm.h"
#define MASK_SYSTEM (FS_CREATE | FS_MOVE | FS_EVENT_ON_CHILD)
struct watch_dir {
const char *path;
u32 mask;
struct path kpath;
struct inode *inode;
struct fsnotify_mark *mark;
};
static struct fsnotify_group *g;
static int ksu_handle_inode_event(struct fsnotify_mark *mark, u32 mask,
struct inode *inode, struct inode *dir,
const struct qstr *file_name, u32 cookie)
{
if (!file_name)
return 0;
if (mask & FS_ISDIR)
return 0;
if (file_name->len == 13 &&
!memcmp(file_name->name, "packages.list", 13)) {
pr_info("packages.list detected: %d\n", mask);
if (ksu_uid_scanner_enabled) {
ksu_request_userspace_scan();
}
track_throne(false);
}
return 0;
}
static const struct fsnotify_ops ksu_ops = {
.handle_inode_event = ksu_handle_inode_event,
};
static int add_mark_on_inode(struct inode *inode, u32 mask,
struct fsnotify_mark **out)
{
struct fsnotify_mark *m;
m = kzalloc(sizeof(*m), GFP_KERNEL);
if (!m)
return -ENOMEM;
fsnotify_init_mark(m, g);
m->mask = mask;
if (fsnotify_add_inode_mark(m, inode, 0)) {
fsnotify_put_mark(m);
return -EINVAL;
}
*out = m;
return 0;
}
static int watch_one_dir(struct watch_dir *wd)
{
int ret = kern_path(wd->path, LOOKUP_FOLLOW, &wd->kpath);
if (ret) {
pr_info("path not ready: %s (%d)\n", wd->path, ret);
return ret;
}
wd->inode = d_inode(wd->kpath.dentry);
ihold(wd->inode);
ret = add_mark_on_inode(wd->inode, wd->mask, &wd->mark);
if (ret) {
pr_err("Add mark failed for %s (%d)\n", wd->path, ret);
path_put(&wd->kpath);
iput(wd->inode);
wd->inode = NULL;
return ret;
}
pr_info("watching %s\n", wd->path);
return 0;
}
static void unwatch_one_dir(struct watch_dir *wd)
{
if (wd->mark) {
fsnotify_destroy_mark(wd->mark, g);
fsnotify_put_mark(wd->mark);
wd->mark = NULL;
}
if (wd->inode) {
iput(wd->inode);
wd->inode = NULL;
}
if (wd->kpath.dentry) {
path_put(&wd->kpath);
memset(&wd->kpath, 0, sizeof(wd->kpath));
}
}
static struct watch_dir g_watch = { .path = "/data/system",
.mask = MASK_SYSTEM };
int ksu_observer_init(void)
{
int ret = 0;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 0, 0)
g = fsnotify_alloc_group(&ksu_ops, 0);
#else
g = fsnotify_alloc_group(&ksu_ops);
#endif
if (IS_ERR(g))
return PTR_ERR(g);
ret = watch_one_dir(&g_watch);
pr_info("observer init done\n");
return 0;
}
void ksu_observer_exit(void)
{
unwatch_one_dir(&g_watch);
fsnotify_put_group(g);
pr_info("observer exit done\n");
}

69
kernel/seccomp_cache.c Normal file
View file

@ -0,0 +1,69 @@
#include <linux/version.h>
#include <linux/fs.h>
#include <linux/nsproxy.h>
#include <linux/sched/task.h>
#include <linux/uaccess.h>
#include <linux/filter.h>
#include <linux/seccomp.h>
#include "klog.h" // IWYU pragma: keep
#include "seccomp_cache.h"
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 2) // Android backport this feature in 5.10.2
struct action_cache {
DECLARE_BITMAP(allow_native, SECCOMP_ARCH_NATIVE_NR);
#ifdef SECCOMP_ARCH_COMPAT
DECLARE_BITMAP(allow_compat, SECCOMP_ARCH_COMPAT_NR);
#endif
};
struct seccomp_filter {
refcount_t refs;
refcount_t users;
bool log;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)
bool wait_killable_recv;
#endif
struct action_cache cache;
struct seccomp_filter *prev;
struct bpf_prog *prog;
struct notification *notif;
struct mutex notify_lock;
wait_queue_head_t wqh;
};
void ksu_seccomp_clear_cache(struct seccomp_filter *filter, int nr)
{
if (!filter) {
return;
}
if (nr >= 0 && nr < SECCOMP_ARCH_NATIVE_NR) {
clear_bit(nr, filter->cache.allow_native);
}
#ifdef SECCOMP_ARCH_COMPAT
if (nr >= 0 && nr < SECCOMP_ARCH_COMPAT_NR) {
clear_bit(nr, filter->cache.allow_compat);
}
#endif
}
void ksu_seccomp_allow_cache(struct seccomp_filter *filter, int nr)
{
if (!filter) {
return;
}
if (nr >= 0 && nr < SECCOMP_ARCH_NATIVE_NR) {
set_bit(nr, filter->cache.allow_native);
}
#ifdef SECCOMP_ARCH_COMPAT
if (nr >= 0 && nr < SECCOMP_ARCH_COMPAT_NR) {
set_bit(nr, filter->cache.allow_compat);
}
#endif
}
#endif

12
kernel/seccomp_cache.h Normal file
View file

@ -0,0 +1,12 @@
#ifndef __KSU_H_SECCOMP_CACHE
#define __KSU_H_SECCOMP_CACHE
#include <linux/fs.h>
#include <linux/version.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 2) // Android backport this feature in 5.10.2
extern void ksu_seccomp_clear_cache(struct seccomp_filter *filter, int nr);
extern void ksu_seccomp_allow_cache(struct seccomp_filter *filter, int nr);
#endif
#endif

8
kernel/selinux/Makefile Normal file
View file

@ -0,0 +1,8 @@
obj-y += selinux.o
obj-y += sepolicy.o
obj-y += rules.o
ccflags-y += -Wno-strict-prototypes -Wno-int-conversion
ccflags-y += -Wno-declaration-after-statement -Wno-unused-function
ccflags-y += -I$(srctree)/security/selinux -I$(srctree)/security/selinux/include
ccflags-y += -I$(objtree)/security/selinux -include $(srctree)/include/uapi/asm-generic/errno.h

477
kernel/selinux/rules.c Normal file
View file

@ -0,0 +1,477 @@
#include <linux/uaccess.h>
#include <linux/types.h>
#include <linux/version.h>
#include "../klog.h" // IWYU pragma: keep
#include "selinux.h"
#include "sepolicy.h"
#include "ss/services.h"
#include "linux/lsm_audit.h" // IWYU pragma: keep
#include "xfrm.h"
#define SELINUX_POLICY_INSTEAD_SELINUX_SS
#define KERNEL_SU_DOMAIN "su"
#define KERNEL_SU_FILE "ksu_file"
#define KERNEL_EXEC_TYPE "ksu_exec"
#define ALL NULL
static struct policydb *get_policydb(void)
{
struct policydb *db;
struct selinux_policy *policy = selinux_state.policy;
db = &policy->policydb;
return db;
}
static DEFINE_MUTEX(ksu_rules);
void apply_kernelsu_rules()
{
struct policydb *db;
if (!getenforce()) {
pr_info("SELinux permissive or disabled, apply rules!\n");
}
mutex_lock(&ksu_rules);
db = get_policydb();
ksu_permissive(db, KERNEL_SU_DOMAIN);
ksu_typeattribute(db, KERNEL_SU_DOMAIN, "mlstrustedsubject");
ksu_typeattribute(db, KERNEL_SU_DOMAIN, "netdomain");
ksu_typeattribute(db, KERNEL_SU_DOMAIN, "bluetoothdomain");
// Create unconstrained file type
ksu_type(db, KERNEL_SU_FILE, "file_type");
ksu_typeattribute(db, KERNEL_SU_FILE, "mlstrustedobject");
ksu_allow(db, ALL, KERNEL_SU_FILE, ALL, ALL);
// allow all!
ksu_allow(db, KERNEL_SU_DOMAIN, ALL, ALL, ALL);
// allow us do any ioctl
if (db->policyvers >= POLICYDB_VERSION_XPERMS_IOCTL) {
ksu_allowxperm(db, KERNEL_SU_DOMAIN, ALL, "blk_file", ALL);
ksu_allowxperm(db, KERNEL_SU_DOMAIN, ALL, "fifo_file", ALL);
ksu_allowxperm(db, KERNEL_SU_DOMAIN, ALL, "chr_file", ALL);
ksu_allowxperm(db, KERNEL_SU_DOMAIN, ALL, "file", ALL);
}
// we need to save allowlist in /data/adb/ksu
ksu_allow(db, "kernel", "adb_data_file", "dir", ALL);
ksu_allow(db, "kernel", "adb_data_file", "file", ALL);
// we need to search /data/app
ksu_allow(db, "kernel", "apk_data_file", "file", "open");
ksu_allow(db, "kernel", "apk_data_file", "dir", "open");
ksu_allow(db, "kernel", "apk_data_file", "dir", "read");
ksu_allow(db, "kernel", "apk_data_file", "dir", "search");
// we may need to do mount on shell
ksu_allow(db, "kernel", "shell_data_file", "file", ALL);
// we need to read /data/system/packages.list
ksu_allow(db, "kernel", "kernel", "capability", "dac_override");
// Android 10+:
// http://aospxref.com/android-12.0.0_r3/xref/system/sepolicy/private/file_contexts#512
ksu_allow(db, "kernel", "packages_list_file", "file", ALL);
// Kernel 4.4
ksu_allow(db, "kernel", "packages_list_file", "dir", ALL);
// Android 9-:
// http://aospxref.com/android-9.0.0_r61/xref/system/sepolicy/private/file_contexts#360
ksu_allow(db, "kernel", "system_data_file", "file", ALL);
ksu_allow(db, "kernel", "system_data_file", "dir", ALL);
// our ksud triggered by init
ksu_allow(db, "init", "adb_data_file", "file", ALL);
ksu_allow(db, "init", "adb_data_file", "dir", ALL); // #1289
ksu_allow(db, "init", KERNEL_SU_DOMAIN, ALL, ALL);
// we need to umount modules in zygote
ksu_allow(db, "zygote", "adb_data_file", "dir", "search");
// copied from Magisk rules
// suRights
ksu_allow(db, "servicemanager", KERNEL_SU_DOMAIN, "dir", "search");
ksu_allow(db, "servicemanager", KERNEL_SU_DOMAIN, "dir", "read");
ksu_allow(db, "servicemanager", KERNEL_SU_DOMAIN, "file", "open");
ksu_allow(db, "servicemanager", KERNEL_SU_DOMAIN, "file", "read");
ksu_allow(db, "servicemanager", KERNEL_SU_DOMAIN, "process", "getattr");
ksu_allow(db, ALL, KERNEL_SU_DOMAIN, "process", "sigchld");
// allowLog
ksu_allow(db, "logd", KERNEL_SU_DOMAIN, "dir", "search");
ksu_allow(db, "logd", KERNEL_SU_DOMAIN, "file", "read");
ksu_allow(db, "logd", KERNEL_SU_DOMAIN, "file", "open");
ksu_allow(db, "logd", KERNEL_SU_DOMAIN, "file", "getattr");
// dumpsys
ksu_allow(db, ALL, KERNEL_SU_DOMAIN, "fd", "use");
ksu_allow(db, ALL, KERNEL_SU_DOMAIN, "fifo_file", "write");
ksu_allow(db, ALL, KERNEL_SU_DOMAIN, "fifo_file", "read");
ksu_allow(db, ALL, KERNEL_SU_DOMAIN, "fifo_file", "open");
ksu_allow(db, ALL, KERNEL_SU_DOMAIN, "fifo_file", "getattr");
// bootctl
ksu_allow(db, "hwservicemanager", KERNEL_SU_DOMAIN, "dir", "search");
ksu_allow(db, "hwservicemanager", KERNEL_SU_DOMAIN, "file", "read");
ksu_allow(db, "hwservicemanager", KERNEL_SU_DOMAIN, "file", "open");
ksu_allow(db, "hwservicemanager", KERNEL_SU_DOMAIN, "process",
"getattr");
// For mounting loop devices, mirrors, tmpfs
ksu_allow(db, "kernel", ALL, "file", "read");
ksu_allow(db, "kernel", ALL, "file", "write");
// Allow all binder transactions
ksu_allow(db, ALL, KERNEL_SU_DOMAIN, "binder", ALL);
// Allow system server kill su process
ksu_allow(db, "system_server", KERNEL_SU_DOMAIN, "process", "getpgid");
ksu_allow(db, "system_server", KERNEL_SU_DOMAIN, "process", "sigkill");
// https://android-review.googlesource.com/c/platform/system/logging/+/3725346
ksu_dontaudit(db, "untrusted_app", KERNEL_SU_DOMAIN, "dir", "getattr");
mutex_unlock(&ksu_rules);
}
#define MAX_SEPOL_LEN 128
#define CMD_NORMAL_PERM 1
#define CMD_XPERM 2
#define CMD_TYPE_STATE 3
#define CMD_TYPE 4
#define CMD_TYPE_ATTR 5
#define CMD_ATTR 6
#define CMD_TYPE_TRANSITION 7
#define CMD_TYPE_CHANGE 8
#define CMD_GENFSCON 9
struct sepol_data {
u32 cmd;
u32 subcmd;
char __user *sepol1;
char __user *sepol2;
char __user *sepol3;
char __user *sepol4;
char __user *sepol5;
char __user *sepol6;
char __user *sepol7;
};
static int get_object(char *buf, char __user *user_object, size_t buf_sz,
char **object)
{
if (!user_object) {
*object = ALL;
return 0;
}
if (strncpy_from_user(buf, user_object, buf_sz) < 0) {
return -EINVAL;
}
*object = buf;
return 0;
}
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 4, 0))
extern int avc_ss_reset(u32 seqno);
#else
extern int avc_ss_reset(struct selinux_avc *avc, u32 seqno);
#endif
// reset avc cache table, otherwise the new rules will not take effect if already denied
static void reset_avc_cache()
{
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 4, 0))
avc_ss_reset(0);
selnl_notify_policyload(0);
selinux_status_update_policyload(0);
#else
struct selinux_avc *avc = selinux_state.avc;
avc_ss_reset(avc, 0);
selnl_notify_policyload(0);
selinux_status_update_policyload(&selinux_state, 0);
#endif
selinux_xfrm_notify_policyload();
}
int handle_sepolicy(unsigned long arg3, void __user *arg4)
{
struct policydb *db;
if (!arg4) {
return -EINVAL;
}
if (!getenforce()) {
pr_info("SELinux permissive or disabled when handle policy!\n");
}
struct sepol_data data;
if (copy_from_user(&data, arg4, sizeof(struct sepol_data))) {
pr_err("sepol: copy sepol_data failed.\n");
return -EINVAL;
}
u32 cmd = data.cmd;
u32 subcmd = data.subcmd;
mutex_lock(&ksu_rules);
db = get_policydb();
int ret = -EINVAL;
if (cmd == CMD_NORMAL_PERM) {
char src_buf[MAX_SEPOL_LEN];
char tgt_buf[MAX_SEPOL_LEN];
char cls_buf[MAX_SEPOL_LEN];
char perm_buf[MAX_SEPOL_LEN];
char *s, *t, *c, *p;
if (get_object(src_buf, data.sepol1, sizeof(src_buf), &s) < 0) {
pr_err("sepol: copy src failed.\n");
goto exit;
}
if (get_object(tgt_buf, data.sepol2, sizeof(tgt_buf), &t) < 0) {
pr_err("sepol: copy tgt failed.\n");
goto exit;
}
if (get_object(cls_buf, data.sepol3, sizeof(cls_buf), &c) < 0) {
pr_err("sepol: copy cls failed.\n");
goto exit;
}
if (get_object(perm_buf, data.sepol4, sizeof(perm_buf), &p) <
0) {
pr_err("sepol: copy perm failed.\n");
goto exit;
}
bool success = false;
if (subcmd == 1) {
success = ksu_allow(db, s, t, c, p);
} else if (subcmd == 2) {
success = ksu_deny(db, s, t, c, p);
} else if (subcmd == 3) {
success = ksu_auditallow(db, s, t, c, p);
} else if (subcmd == 4) {
success = ksu_dontaudit(db, s, t, c, p);
} else {
pr_err("sepol: unknown subcmd: %d\n", subcmd);
}
ret = success ? 0 : -EINVAL;
} else if (cmd == CMD_XPERM) {
char src_buf[MAX_SEPOL_LEN];
char tgt_buf[MAX_SEPOL_LEN];
char cls_buf[MAX_SEPOL_LEN];
char __maybe_unused
operation[MAX_SEPOL_LEN]; // it is always ioctl now!
char perm_set[MAX_SEPOL_LEN];
char *s, *t, *c;
if (get_object(src_buf, data.sepol1, sizeof(src_buf), &s) < 0) {
pr_err("sepol: copy src failed.\n");
goto exit;
}
if (get_object(tgt_buf, data.sepol2, sizeof(tgt_buf), &t) < 0) {
pr_err("sepol: copy tgt failed.\n");
goto exit;
}
if (get_object(cls_buf, data.sepol3, sizeof(cls_buf), &c) < 0) {
pr_err("sepol: copy cls failed.\n");
goto exit;
}
if (strncpy_from_user(operation, data.sepol4,
sizeof(operation)) < 0) {
pr_err("sepol: copy operation failed.\n");
goto exit;
}
if (strncpy_from_user(perm_set, data.sepol5, sizeof(perm_set)) <
0) {
pr_err("sepol: copy perm_set failed.\n");
goto exit;
}
bool success = false;
if (subcmd == 1) {
success = ksu_allowxperm(db, s, t, c, perm_set);
} else if (subcmd == 2) {
success = ksu_auditallowxperm(db, s, t, c, perm_set);
} else if (subcmd == 3) {
success = ksu_dontauditxperm(db, s, t, c, perm_set);
} else {
pr_err("sepol: unknown subcmd: %d\n", subcmd);
}
ret = success ? 0 : -EINVAL;
} else if (cmd == CMD_TYPE_STATE) {
char src[MAX_SEPOL_LEN];
if (strncpy_from_user(src, data.sepol1, sizeof(src)) < 0) {
pr_err("sepol: copy src failed.\n");
goto exit;
}
bool success = false;
if (subcmd == 1) {
success = ksu_permissive(db, src);
} else if (subcmd == 2) {
success = ksu_enforce(db, src);
} else {
pr_err("sepol: unknown subcmd: %d\n", subcmd);
}
if (success)
ret = 0;
} else if (cmd == CMD_TYPE || cmd == CMD_TYPE_ATTR) {
char type[MAX_SEPOL_LEN];
char attr[MAX_SEPOL_LEN];
if (strncpy_from_user(type, data.sepol1, sizeof(type)) < 0) {
pr_err("sepol: copy type failed.\n");
goto exit;
}
if (strncpy_from_user(attr, data.sepol2, sizeof(attr)) < 0) {
pr_err("sepol: copy attr failed.\n");
goto exit;
}
bool success = false;
if (cmd == CMD_TYPE) {
success = ksu_type(db, type, attr);
} else {
success = ksu_typeattribute(db, type, attr);
}
if (!success) {
pr_err("sepol: %d failed.\n", cmd);
goto exit;
}
ret = 0;
} else if (cmd == CMD_ATTR) {
char attr[MAX_SEPOL_LEN];
if (strncpy_from_user(attr, data.sepol1, sizeof(attr)) < 0) {
pr_err("sepol: copy attr failed.\n");
goto exit;
}
if (!ksu_attribute(db, attr)) {
pr_err("sepol: %d failed.\n", cmd);
goto exit;
}
ret = 0;
} else if (cmd == CMD_TYPE_TRANSITION) {
char src[MAX_SEPOL_LEN];
char tgt[MAX_SEPOL_LEN];
char cls[MAX_SEPOL_LEN];
char default_type[MAX_SEPOL_LEN];
char object[MAX_SEPOL_LEN];
if (strncpy_from_user(src, data.sepol1, sizeof(src)) < 0) {
pr_err("sepol: copy src failed.\n");
goto exit;
}
if (strncpy_from_user(tgt, data.sepol2, sizeof(tgt)) < 0) {
pr_err("sepol: copy tgt failed.\n");
goto exit;
}
if (strncpy_from_user(cls, data.sepol3, sizeof(cls)) < 0) {
pr_err("sepol: copy cls failed.\n");
goto exit;
}
if (strncpy_from_user(default_type, data.sepol4,
sizeof(default_type)) < 0) {
pr_err("sepol: copy default_type failed.\n");
goto exit;
}
char *real_object;
if (data.sepol5 == NULL) {
real_object = NULL;
} else {
if (strncpy_from_user(object, data.sepol5,
sizeof(object)) < 0) {
pr_err("sepol: copy object failed.\n");
goto exit;
}
real_object = object;
}
bool success = ksu_type_transition(db, src, tgt, cls,
default_type, real_object);
if (success)
ret = 0;
} else if (cmd == CMD_TYPE_CHANGE) {
char src[MAX_SEPOL_LEN];
char tgt[MAX_SEPOL_LEN];
char cls[MAX_SEPOL_LEN];
char default_type[MAX_SEPOL_LEN];
if (strncpy_from_user(src, data.sepol1, sizeof(src)) < 0) {
pr_err("sepol: copy src failed.\n");
goto exit;
}
if (strncpy_from_user(tgt, data.sepol2, sizeof(tgt)) < 0) {
pr_err("sepol: copy tgt failed.\n");
goto exit;
}
if (strncpy_from_user(cls, data.sepol3, sizeof(cls)) < 0) {
pr_err("sepol: copy cls failed.\n");
goto exit;
}
if (strncpy_from_user(default_type, data.sepol4,
sizeof(default_type)) < 0) {
pr_err("sepol: copy default_type failed.\n");
goto exit;
}
bool success = false;
if (subcmd == 1) {
success = ksu_type_change(db, src, tgt, cls,
default_type);
} else if (subcmd == 2) {
success = ksu_type_member(db, src, tgt, cls,
default_type);
} else {
pr_err("sepol: unknown subcmd: %d\n", subcmd);
}
if (success)
ret = 0;
} else if (cmd == CMD_GENFSCON) {
char name[MAX_SEPOL_LEN];
char path[MAX_SEPOL_LEN];
char context[MAX_SEPOL_LEN];
if (strncpy_from_user(name, data.sepol1, sizeof(name)) < 0) {
pr_err("sepol: copy name failed.\n");
goto exit;
}
if (strncpy_from_user(path, data.sepol2, sizeof(path)) < 0) {
pr_err("sepol: copy path failed.\n");
goto exit;
}
if (strncpy_from_user(context, data.sepol3, sizeof(context)) <
0) {
pr_err("sepol: copy context failed.\n");
goto exit;
}
if (!ksu_genfscon(db, name, path, context)) {
pr_err("sepol: %d failed.\n", cmd);
goto exit;
}
ret = 0;
} else {
pr_err("sepol: unknown cmd: %d\n", cmd);
}
exit:
mutex_unlock(&ksu_rules);
// only allow and xallow needs to reset avc cache, but we cannot do that because
// we are in atomic context. so we just reset it every time.
reset_avc_cache();
return ret;
}

154
kernel/selinux/selinux.c Normal file
View file

@ -0,0 +1,154 @@
#include "selinux.h"
#include "linux/cred.h"
#include "linux/sched.h"
#include "objsec.h"
#include "linux/version.h"
#include "../klog.h" // IWYU pragma: keep
#define KERNEL_SU_DOMAIN "u:r:su:s0"
static int transive_to_domain(const char *domain)
{
struct cred *cred;
struct task_security_struct *tsec;
u32 sid;
int error;
cred = (struct cred *)__task_cred(current);
tsec = cred->security;
if (!tsec) {
pr_err("tsec == NULL!\n");
return -1;
}
error = security_secctx_to_secid(domain, strlen(domain), &sid);
if (error) {
pr_info("security_secctx_to_secid %s -> sid: %d, error: %d\n",
domain, sid, error);
}
if (!error) {
tsec->sid = sid;
tsec->create_sid = 0;
tsec->keycreate_sid = 0;
tsec->sockcreate_sid = 0;
}
return error;
}
void setup_selinux(const char *domain)
{
if (transive_to_domain(domain)) {
pr_err("transive domain failed.\n");
return;
}
}
void setenforce(bool enforce)
{
#ifdef CONFIG_SECURITY_SELINUX_DEVELOP
selinux_state.enforcing = enforce;
#endif
}
bool getenforce()
{
#ifdef CONFIG_SECURITY_SELINUX_DISABLE
if (selinux_state.disabled) {
return false;
}
#endif
#ifdef CONFIG_SECURITY_SELINUX_DEVELOP
return selinux_state.enforcing;
#else
return true;
#endif
}
#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 14, 0)
struct lsm_context {
char *context;
u32 len;
};
static int __security_secid_to_secctx(u32 secid, struct lsm_context *cp)
{
return security_secid_to_secctx(secid, &cp->context, &cp->len);
}
static void __security_release_secctx(struct lsm_context *cp)
{
return security_release_secctx(cp->context, cp->len);
}
#else
#define __security_secid_to_secctx security_secid_to_secctx
#define __security_release_secctx security_release_secctx
#endif
bool is_task_ksu_domain(const struct cred* cred)
{
struct lsm_context ctx;
bool result;
if (!cred) {
return false;
}
const struct task_security_struct *tsec = selinux_cred(cred);
if (!tsec) {
return false;
}
int err = __security_secid_to_secctx(tsec->sid, &ctx);
if (err) {
return false;
}
result = strncmp(KERNEL_SU_DOMAIN, ctx.context, ctx.len) == 0;
__security_release_secctx(&ctx);
return result;
}
bool is_ksu_domain()
{
current_sid();
return is_task_ksu_domain(current_cred());
}
bool is_context(const struct cred* cred, const char* context)
{
if (!cred) {
return false;
}
const struct task_security_struct * tsec = selinux_cred(cred);
if (!tsec) {
return false;
}
struct lsm_context ctx;
bool result;
int err = __security_secid_to_secctx(tsec->sid, &ctx);
if (err) {
return false;
}
result = strncmp(context, ctx.context, ctx.len) == 0;
__security_release_secctx(&ctx);
return result;
}
bool is_zygote(const struct cred* cred)
{
return is_context(cred, "u:r:zygote:s0");
}
bool is_init(const struct cred* cred) {
return is_context(cred, "u:r:init:s0");
}
#define KSU_FILE_DOMAIN "u:object_r:ksu_file:s0"
u32 ksu_get_ksu_file_sid()
{
u32 ksu_file_sid = 0;
int err = security_secctx_to_secid(KSU_FILE_DOMAIN, strlen(KSU_FILE_DOMAIN),
&ksu_file_sid);
if (err) {
pr_info("get ksufile sid err %d\n", err);
}
return ksu_file_sid;
}

28
kernel/selinux/selinux.h Normal file
View file

@ -0,0 +1,28 @@
#ifndef __KSU_H_SELINUX
#define __KSU_H_SELINUX
#include "linux/types.h"
#include "linux/version.h"
#include "linux/cred.h"
void setup_selinux(const char *);
void setenforce(bool);
bool getenforce();
bool is_task_ksu_domain(const struct cred* cred);
bool is_ksu_domain();
bool is_zygote(const struct cred* cred);
bool is_init(const struct cred* cred);
void apply_kernelsu_rules();
u32 ksu_get_ksu_file_sid();
int handle_sepolicy(unsigned long arg3, void __user *arg4);
#endif

852
kernel/selinux/sepolicy.c Normal file
View file

@ -0,0 +1,852 @@
#include <linux/gfp.h>
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/version.h>
#include "sepolicy.h"
#include "../klog.h" // IWYU pragma: keep
#include "ss/symtab.h"
#define KSU_SUPPORT_ADD_TYPE
//////////////////////////////////////////////////////
// Declaration
//////////////////////////////////////////////////////
static struct avtab_node *get_avtab_node(struct policydb *db,
struct avtab_key *key,
struct avtab_extended_perms *xperms);
static bool add_rule(struct policydb *db, const char *s, const char *t,
const char *c, const char *p, int effect, bool invert);
static void add_rule_raw(struct policydb *db, struct type_datum *src,
struct type_datum *tgt, struct class_datum *cls,
struct perm_datum *perm, int effect, bool invert);
static void add_xperm_rule_raw(struct policydb *db, struct type_datum *src,
struct type_datum *tgt, struct class_datum *cls,
uint16_t low, uint16_t high, int effect,
bool invert);
static bool add_xperm_rule(struct policydb *db, const char *s, const char *t,
const char *c, const char *range, int effect,
bool invert);
static bool add_type_rule(struct policydb *db, const char *s, const char *t,
const char *c, const char *d, int effect);
static bool add_filename_trans(struct policydb *db, const char *s,
const char *t, const char *c, const char *d,
const char *o);
static bool add_genfscon(struct policydb *db, const char *fs_name,
const char *path, const char *context);
static bool add_type(struct policydb *db, const char *type_name, bool attr);
static bool set_type_state(struct policydb *db, const char *type_name,
bool permissive);
static void add_typeattribute_raw(struct policydb *db, struct type_datum *type,
struct type_datum *attr);
static bool add_typeattribute(struct policydb *db, const char *type,
const char *attr);
//////////////////////////////////////////////////////
// Implementation
//////////////////////////////////////////////////////
// Invert is adding rules for auditdeny; in other cases, invert is removing
// rules
#define strip_av(effect, invert) ((effect == AVTAB_AUDITDENY) == !invert)
#define ksu_hash_for_each(node_ptr, n_slot, cur) \
int i; \
for (i = 0; i < n_slot; ++i) \
for (cur = node_ptr[i]; cur; cur = cur->next)
// htable is a struct instead of pointer above 5.8.0:
// https://elixir.bootlin.com/linux/v5.8-rc1/source/security/selinux/ss/symtab.h
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0)
#define ksu_hashtab_for_each(htab, cur) \
ksu_hash_for_each(htab.htable, htab.size, cur)
#else
#define ksu_hashtab_for_each(htab, cur) \
ksu_hash_for_each(htab->htable, htab->size, cur)
#endif
// symtab_search is introduced on 5.9.0:
// https://elixir.bootlin.com/linux/v5.9-rc1/source/security/selinux/ss/symtab.h
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 9, 0)
#define symtab_search(s, name) hashtab_search((s)->table, name)
#define symtab_insert(s, name, datum) hashtab_insert((s)->table, name, datum)
#endif
#define avtab_for_each(avtab, cur) \
ksu_hash_for_each(avtab.htable, avtab.nslot, cur);
static struct avtab_node *get_avtab_node(struct policydb *db,
struct avtab_key *key,
struct avtab_extended_perms *xperms)
{
struct avtab_node *node;
/* AVTAB_XPERMS entries are not necessarily unique */
if (key->specified & AVTAB_XPERMS) {
bool match = false;
node = avtab_search_node(&db->te_avtab, key);
while (node) {
if ((node->datum.u.xperms->specified ==
xperms->specified) &&
(node->datum.u.xperms->driver == xperms->driver)) {
match = true;
break;
}
node = avtab_search_node_next(node, key->specified);
}
if (!match)
node = NULL;
} else {
node = avtab_search_node(&db->te_avtab, key);
}
if (!node) {
struct avtab_datum avdatum = {};
/*
* AUDITDENY, aka DONTAUDIT, are &= assigned, versus |= for
* others. Initialize the data accordingly.
*/
if (key->specified & AVTAB_XPERMS) {
avdatum.u.xperms = xperms;
} else {
avdatum.u.data =
key->specified == AVTAB_AUDITDENY ? ~0U : 0U;
}
/* this is used to get the node - insertion is actually unique */
node = avtab_insert_nonunique(&db->te_avtab, key, &avdatum);
int grow_size = sizeof(struct avtab_key);
grow_size += sizeof(struct avtab_datum);
if (key->specified & AVTAB_XPERMS) {
grow_size += sizeof(u8);
grow_size += sizeof(u8);
grow_size += sizeof(u32) *
ARRAY_SIZE(avdatum.u.xperms->perms.p);
}
db->len += grow_size;
}
return node;
}
static bool add_rule(struct policydb *db, const char *s, const char *t,
const char *c, const char *p, int effect, bool invert)
{
struct type_datum *src = NULL, *tgt = NULL;
struct class_datum *cls = NULL;
struct perm_datum *perm = NULL;
if (s) {
src = symtab_search(&db->p_types, s);
if (src == NULL) {
pr_info("source type %s does not exist\n", s);
return false;
}
}
if (t) {
tgt = symtab_search(&db->p_types, t);
if (tgt == NULL) {
pr_info("target type %s does not exist\n", t);
return false;
}
}
if (c) {
cls = symtab_search(&db->p_classes, c);
if (cls == NULL) {
pr_info("class %s does not exist\n", c);
return false;
}
}
if (p) {
if (c == NULL) {
pr_info("No class is specified, cannot add perm [%s] \n",
p);
return false;
}
perm = symtab_search(&cls->permissions, p);
if (perm == NULL && cls->comdatum != NULL) {
perm = symtab_search(&cls->comdatum->permissions, p);
}
if (perm == NULL) {
pr_info("perm %s does not exist in class %s\n", p, c);
return false;
}
}
add_rule_raw(db, src, tgt, cls, perm, effect, invert);
return true;
}
static void add_rule_raw(struct policydb *db, struct type_datum *src,
struct type_datum *tgt, struct class_datum *cls,
struct perm_datum *perm, int effect, bool invert)
{
if (src == NULL) {
struct hashtab_node *node;
if (strip_av(effect, invert)) {
ksu_hashtab_for_each(db->p_types.table, node)
{
add_rule_raw(db,
(struct type_datum *)node->datum,
tgt, cls, perm, effect, invert);
};
} else {
ksu_hashtab_for_each(db->p_types.table, node)
{
struct type_datum *type =
(struct type_datum *)(node->datum);
if (type->attribute) {
add_rule_raw(db, type, tgt, cls, perm,
effect, invert);
}
};
}
} else if (tgt == NULL) {
struct hashtab_node *node;
if (strip_av(effect, invert)) {
ksu_hashtab_for_each(db->p_types.table, node)
{
add_rule_raw(db, src,
(struct type_datum *)node->datum,
cls, perm, effect, invert);
};
} else {
ksu_hashtab_for_each(db->p_types.table, node)
{
struct type_datum *type =
(struct type_datum *)(node->datum);
if (type->attribute) {
add_rule_raw(db, src, type, cls, perm,
effect, invert);
}
};
}
} else if (cls == NULL) {
struct hashtab_node *node;
ksu_hashtab_for_each(db->p_classes.table, node)
{
add_rule_raw(db, src, tgt,
(struct class_datum *)node->datum, perm,
effect, invert);
}
} else {
struct avtab_key key;
key.source_type = src->value;
key.target_type = tgt->value;
key.target_class = cls->value;
key.specified = effect;
struct avtab_node *node = get_avtab_node(db, &key, NULL);
if (invert) {
if (perm)
node->datum.u.data &=
~(1U << (perm->value - 1));
else
node->datum.u.data = 0U;
} else {
if (perm)
node->datum.u.data |= 1U << (perm->value - 1);
else
node->datum.u.data = ~0U;
}
}
}
#define ioctl_driver(x) (x >> 8 & 0xFF)
#define ioctl_func(x) (x & 0xFF)
#define xperm_test(x, p) (1 & (p[x >> 5] >> (x & 0x1f)))
#define xperm_set(x, p) (p[x >> 5] |= (1 << (x & 0x1f)))
#define xperm_clear(x, p) (p[x >> 5] &= ~(1 << (x & 0x1f)))
static void add_xperm_rule_raw(struct policydb *db, struct type_datum *src,
struct type_datum *tgt, struct class_datum *cls,
uint16_t low, uint16_t high, int effect,
bool invert)
{
if (src == NULL) {
struct hashtab_node *node;
ksu_hashtab_for_each(db->p_types.table, node)
{
struct type_datum *type =
(struct type_datum *)(node->datum);
if (type->attribute) {
add_xperm_rule_raw(db, type, tgt, cls, low,
high, effect, invert);
}
};
} else if (tgt == NULL) {
struct hashtab_node *node;
ksu_hashtab_for_each(db->p_types.table, node)
{
struct type_datum *type =
(struct type_datum *)(node->datum);
if (type->attribute) {
add_xperm_rule_raw(db, src, type, cls, low,
high, effect, invert);
}
};
} else if (cls == NULL) {
struct hashtab_node *node;
ksu_hashtab_for_each(db->p_classes.table, node)
{
add_xperm_rule_raw(db, src, tgt,
(struct class_datum *)(node->datum),
low, high, effect, invert);
};
} else {
struct avtab_key key;
key.source_type = src->value;
key.target_type = tgt->value;
key.target_class = cls->value;
key.specified = effect;
struct avtab_datum *datum;
struct avtab_node *node;
struct avtab_extended_perms xperms;
memset(&xperms, 0, sizeof(xperms));
if (ioctl_driver(low) != ioctl_driver(high)) {
xperms.specified = AVTAB_XPERMS_IOCTLDRIVER;
xperms.driver = 0;
} else {
xperms.specified = AVTAB_XPERMS_IOCTLFUNCTION;
xperms.driver = ioctl_driver(low);
}
int i;
if (xperms.specified == AVTAB_XPERMS_IOCTLDRIVER) {
for (i = ioctl_driver(low); i <= ioctl_driver(high);
++i) {
if (invert)
xperm_clear(i, xperms.perms.p);
else
xperm_set(i, xperms.perms.p);
}
} else {
for (i = ioctl_func(low); i <= ioctl_func(high); ++i) {
if (invert)
xperm_clear(i, xperms.perms.p);
else
xperm_set(i, xperms.perms.p);
}
}
node = get_avtab_node(db, &key, &xperms);
if (!node) {
pr_warn("add_xperm_rule_raw cannot found node!\n");
return;
}
datum = &node->datum;
if (datum->u.xperms == NULL) {
datum->u.xperms =
(struct avtab_extended_perms *)(kzalloc(
sizeof(xperms), GFP_KERNEL));
if (!datum->u.xperms) {
pr_err("alloc xperms failed\n");
return;
}
memcpy(datum->u.xperms, &xperms, sizeof(xperms));
}
}
}
static bool add_xperm_rule(struct policydb *db, const char *s, const char *t,
const char *c, const char *range, int effect,
bool invert)
{
struct type_datum *src = NULL, *tgt = NULL;
struct class_datum *cls = NULL;
if (s) {
src = symtab_search(&db->p_types, s);
if (src == NULL) {
pr_info("source type %s does not exist\n", s);
return false;
}
}
if (t) {
tgt = symtab_search(&db->p_types, t);
if (tgt == NULL) {
pr_info("target type %s does not exist\n", t);
return false;
}
}
if (c) {
cls = symtab_search(&db->p_classes, c);
if (cls == NULL) {
pr_info("class %s does not exist\n", c);
return false;
}
}
u16 low, high;
if (range) {
if (strchr(range, '-')) {
sscanf(range, "%hx-%hx", &low, &high);
} else {
sscanf(range, "%hx", &low);
high = low;
}
} else {
low = 0;
high = 0xFFFF;
}
add_xperm_rule_raw(db, src, tgt, cls, low, high, effect, invert);
return true;
}
static bool add_type_rule(struct policydb *db, const char *s, const char *t,
const char *c, const char *d, int effect)
{
struct type_datum *src, *tgt, *def;
struct class_datum *cls;
src = symtab_search(&db->p_types, s);
if (src == NULL) {
pr_info("source type %s does not exist\n", s);
return false;
}
tgt = symtab_search(&db->p_types, t);
if (tgt == NULL) {
pr_info("target type %s does not exist\n", t);
return false;
}
cls = symtab_search(&db->p_classes, c);
if (cls == NULL) {
pr_info("class %s does not exist\n", c);
return false;
}
def = symtab_search(&db->p_types, d);
if (def == NULL) {
pr_info("default type %s does not exist\n", d);
return false;
}
struct avtab_key key;
key.source_type = src->value;
key.target_type = tgt->value;
key.target_class = cls->value;
key.specified = effect;
struct avtab_node *node = get_avtab_node(db, &key, NULL);
node->datum.u.data = def->value;
return true;
}
// 5.9.0 : static inline int hashtab_insert(struct hashtab *h, void *key, void
// *datum, struct hashtab_key_params key_params) 5.8.0: int
// hashtab_insert(struct hashtab *h, void *k, void *d);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0)
static u32 filenametr_hash(const void *k)
{
const struct filename_trans_key *ft = k;
unsigned long hash;
unsigned int byte_num;
unsigned char focus;
hash = ft->ttype ^ ft->tclass;
byte_num = 0;
while ((focus = ft->name[byte_num++]))
hash = partial_name_hash(focus, hash);
return hash;
}
static int filenametr_cmp(const void *k1, const void *k2)
{
const struct filename_trans_key *ft1 = k1;
const struct filename_trans_key *ft2 = k2;
int v;
v = ft1->ttype - ft2->ttype;
if (v)
return v;
v = ft1->tclass - ft2->tclass;
if (v)
return v;
return strcmp(ft1->name, ft2->name);
}
static const struct hashtab_key_params filenametr_key_params = {
.hash = filenametr_hash,
.cmp = filenametr_cmp,
};
#endif
static bool add_filename_trans(struct policydb *db, const char *s,
const char *t, const char *c, const char *d,
const char *o)
{
struct type_datum *src, *tgt, *def;
struct class_datum *cls;
src = symtab_search(&db->p_types, s);
if (src == NULL) {
pr_warn("source type %s does not exist\n", s);
return false;
}
tgt = symtab_search(&db->p_types, t);
if (tgt == NULL) {
pr_warn("target type %s does not exist\n", t);
return false;
}
cls = symtab_search(&db->p_classes, c);
if (cls == NULL) {
pr_warn("class %s does not exist\n", c);
return false;
}
def = symtab_search(&db->p_types, d);
if (def == NULL) {
pr_warn("default type %s does not exist\n", d);
return false;
}
struct filename_trans_key key;
key.ttype = tgt->value;
key.tclass = cls->value;
key.name = (char *)o;
struct filename_trans_datum *last = NULL;
struct filename_trans_datum *trans =
policydb_filenametr_search(db, &key);
while (trans) {
if (ebitmap_get_bit(&trans->stypes, src->value - 1)) {
// Duplicate, overwrite existing data and return
trans->otype = def->value;
return true;
}
if (trans->otype == def->value)
break;
last = trans;
trans = trans->next;
}
if (trans == NULL) {
trans = (struct filename_trans_datum *)kcalloc(1 ,sizeof(*trans),
GFP_ATOMIC);
struct filename_trans_key *new_key =
(struct filename_trans_key *)kzalloc(sizeof(*new_key),
GFP_ATOMIC);
*new_key = key;
new_key->name = kstrdup(key.name, GFP_ATOMIC);
trans->next = last;
trans->otype = def->value;
hashtab_insert(&db->filename_trans, new_key, trans,
filenametr_key_params);
}
db->compat_filename_trans_count++;
return ebitmap_set_bit(&trans->stypes, src->value - 1, 1) == 0;
}
static bool add_genfscon(struct policydb *db, const char *fs_name,
const char *path, const char *context)
{
return false;
}
static void *ksu_realloc(void *old, size_t new_size, size_t old_size)
{
// we can't use krealloc, because it may be read-only
void *new = kzalloc(new_size, GFP_ATOMIC);
if (!new) {
return NULL;
}
if (old_size) {
memcpy(new, old, old_size);
}
// we can't use kfree, because it may be read-only
// there maybe some leaks, maybe we can check ptr_write, but it's not a big deal
// kfree(old);
return new;
}
static bool add_type(struct policydb *db, const char *type_name, bool attr)
{
struct type_datum *type = symtab_search(&db->p_types, type_name);
if (type) {
pr_warn("Type %s already exists\n", type_name);
return true;
}
u32 value = ++db->p_types.nprim;
type = (struct type_datum *)kzalloc(sizeof(struct type_datum),
GFP_ATOMIC);
if (!type) {
pr_err("add_type: alloc type_datum failed.\n");
return false;
}
type->primary = 1;
type->value = value;
type->attribute = attr;
char *key = kstrdup(type_name, GFP_ATOMIC);
if (!key) {
pr_err("add_type: alloc key failed.\n");
return false;
}
if (symtab_insert(&db->p_types, key, type)) {
pr_err("add_type: insert symtab failed.\n");
return false;
}
struct ebitmap *new_type_attr_map_array =
ksu_realloc(db->type_attr_map_array,
value * sizeof(struct ebitmap),
(value - 1) * sizeof(struct ebitmap));
if (!new_type_attr_map_array) {
pr_err("add_type: alloc type_attr_map_array failed\n");
return false;
}
struct type_datum **new_type_val_to_struct =
ksu_realloc(db->type_val_to_struct,
sizeof(*db->type_val_to_struct) * value,
sizeof(*db->type_val_to_struct) * (value - 1));
if (!new_type_val_to_struct) {
pr_err("add_type: alloc type_val_to_struct failed\n");
return false;
}
char **new_val_to_name_types =
ksu_realloc(db->sym_val_to_name[SYM_TYPES],
sizeof(char *) * value,
sizeof(char *) * (value - 1));
if (!new_val_to_name_types) {
pr_err("add_type: alloc val_to_name failed\n");
return false;
}
db->type_attr_map_array = new_type_attr_map_array;
ebitmap_init(&db->type_attr_map_array[value - 1]);
ebitmap_set_bit(&db->type_attr_map_array[value - 1], value - 1, 1);
db->type_val_to_struct = new_type_val_to_struct;
db->type_val_to_struct[value - 1] = type;
db->sym_val_to_name[SYM_TYPES] = new_val_to_name_types;
db->sym_val_to_name[SYM_TYPES][value - 1] = key;
int i;
for (i = 0; i < db->p_roles.nprim; ++i) {
ebitmap_set_bit(&db->role_val_to_struct[i]->types, value - 1,
1);
}
return true;
}
static bool set_type_state(struct policydb *db, const char *type_name,
bool permissive)
{
struct type_datum *type;
if (type_name == NULL) {
struct hashtab_node *node;
ksu_hashtab_for_each(db->p_types.table, node)
{
type = (struct type_datum *)(node->datum);
if (ebitmap_set_bit(&db->permissive_map, type->value,
permissive))
pr_info("Could not set bit in permissive map\n");
};
} else {
type = (struct type_datum *)symtab_search(&db->p_types,
type_name);
if (type == NULL) {
pr_info("type %s does not exist\n", type_name);
return false;
}
if (ebitmap_set_bit(&db->permissive_map, type->value,
permissive)) {
pr_info("Could not set bit in permissive map\n");
return false;
}
}
return true;
}
static void add_typeattribute_raw(struct policydb *db, struct type_datum *type,
struct type_datum *attr)
{
struct ebitmap *sattr = &db->type_attr_map_array[type->value - 1];
ebitmap_set_bit(sattr, attr->value - 1, 1);
struct hashtab_node *node;
struct constraint_node *n;
struct constraint_expr *e;
ksu_hashtab_for_each(db->p_classes.table, node)
{
struct class_datum *cls = (struct class_datum *)(node->datum);
for (n = cls->constraints; n; n = n->next) {
for (e = n->expr; e; e = e->next) {
if (e->expr_type == CEXPR_NAMES &&
ebitmap_get_bit(&e->type_names->types,
attr->value - 1)) {
ebitmap_set_bit(&e->names,
type->value - 1, 1);
}
}
}
};
}
static bool add_typeattribute(struct policydb *db, const char *type,
const char *attr)
{
struct type_datum *type_d = symtab_search(&db->p_types, type);
if (type_d == NULL) {
pr_info("type %s does not exist\n", type);
return false;
} else if (type_d->attribute) {
pr_info("type %s is an attribute\n", attr);
return false;
}
struct type_datum *attr_d = symtab_search(&db->p_types, attr);
if (attr_d == NULL) {
pr_info("attribute %s does not exist\n", type);
return false;
} else if (!attr_d->attribute) {
pr_info("type %s is not an attribute \n", attr);
return false;
}
add_typeattribute_raw(db, type_d, attr_d);
return true;
}
//////////////////////////////////////////////////////////////////////////
// Operation on types
bool ksu_type(struct policydb *db, const char *name, const char *attr)
{
return add_type(db, name, false) && add_typeattribute(db, name, attr);
}
bool ksu_attribute(struct policydb *db, const char *name)
{
return add_type(db, name, true);
}
bool ksu_permissive(struct policydb *db, const char *type)
{
return set_type_state(db, type, true);
}
bool ksu_enforce(struct policydb *db, const char *type)
{
return set_type_state(db, type, false);
}
bool ksu_typeattribute(struct policydb *db, const char *type, const char *attr)
{
return add_typeattribute(db, type, attr);
}
bool ksu_exists(struct policydb *db, const char *type)
{
return symtab_search(&db->p_types, type) != NULL;
}
// Access vector rules
bool ksu_allow(struct policydb *db, const char *src, const char *tgt,
const char *cls, const char *perm)
{
return add_rule(db, src, tgt, cls, perm, AVTAB_ALLOWED, false);
}
bool ksu_deny(struct policydb *db, const char *src, const char *tgt,
const char *cls, const char *perm)
{
return add_rule(db, src, tgt, cls, perm, AVTAB_ALLOWED, true);
}
bool ksu_auditallow(struct policydb *db, const char *src, const char *tgt,
const char *cls, const char *perm)
{
return add_rule(db, src, tgt, cls, perm, AVTAB_AUDITALLOW, false);
}
bool ksu_dontaudit(struct policydb *db, const char *src, const char *tgt,
const char *cls, const char *perm)
{
return add_rule(db, src, tgt, cls, perm, AVTAB_AUDITDENY, true);
}
// Extended permissions access vector rules
bool ksu_allowxperm(struct policydb *db, const char *src, const char *tgt,
const char *cls, const char *range)
{
return add_xperm_rule(db, src, tgt, cls, range, AVTAB_XPERMS_ALLOWED,
false);
}
bool ksu_auditallowxperm(struct policydb *db, const char *src, const char *tgt,
const char *cls, const char *range)
{
return add_xperm_rule(db, src, tgt, cls, range, AVTAB_XPERMS_AUDITALLOW,
false);
}
bool ksu_dontauditxperm(struct policydb *db, const char *src, const char *tgt,
const char *cls, const char *range)
{
return add_xperm_rule(db, src, tgt, cls, range, AVTAB_XPERMS_DONTAUDIT,
false);
}
// Type rules
bool ksu_type_transition(struct policydb *db, const char *src, const char *tgt,
const char *cls, const char *def, const char *obj)
{
if (obj) {
return add_filename_trans(db, src, tgt, cls, def, obj);
} else {
return add_type_rule(db, src, tgt, cls, def, AVTAB_TRANSITION);
}
}
bool ksu_type_change(struct policydb *db, const char *src, const char *tgt,
const char *cls, const char *def)
{
return add_type_rule(db, src, tgt, cls, def, AVTAB_CHANGE);
}
bool ksu_type_member(struct policydb *db, const char *src, const char *tgt,
const char *cls, const char *def)
{
return add_type_rule(db, src, tgt, cls, def, AVTAB_MEMBER);
}
// File system labeling
bool ksu_genfscon(struct policydb *db, const char *fs_name, const char *path,
const char *ctx)
{
return add_genfscon(db, fs_name, path, ctx);
}

46
kernel/selinux/sepolicy.h Normal file
View file

@ -0,0 +1,46 @@
#ifndef __KSU_H_SEPOLICY
#define __KSU_H_SEPOLICY
#include <linux/types.h>
#include "ss/policydb.h"
// Operation on types
bool ksu_type(struct policydb *db, const char *name, const char *attr);
bool ksu_attribute(struct policydb *db, const char *name);
bool ksu_permissive(struct policydb *db, const char *type);
bool ksu_enforce(struct policydb *db, const char *type);
bool ksu_typeattribute(struct policydb *db, const char *type, const char *attr);
bool ksu_exists(struct policydb *db, const char *type);
// Access vector rules
bool ksu_allow(struct policydb *db, const char *src, const char *tgt,
const char *cls, const char *perm);
bool ksu_deny(struct policydb *db, const char *src, const char *tgt,
const char *cls, const char *perm);
bool ksu_auditallow(struct policydb *db, const char *src, const char *tgt,
const char *cls, const char *perm);
bool ksu_dontaudit(struct policydb *db, const char *src, const char *tgt,
const char *cls, const char *perm);
// Extended permissions access vector rules
bool ksu_allowxperm(struct policydb *db, const char *src, const char *tgt,
const char *cls, const char *range);
bool ksu_auditallowxperm(struct policydb *db, const char *src, const char *tgt,
const char *cls, const char *range);
bool ksu_dontauditxperm(struct policydb *db, const char *src, const char *tgt,
const char *cls, const char *range);
// Type rules
bool ksu_type_transition(struct policydb *db, const char *src, const char *tgt,
const char *cls, const char *def, const char *obj);
bool ksu_type_change(struct policydb *db, const char *src, const char *tgt,
const char *cls, const char *def);
bool ksu_type_member(struct policydb *db, const char *src, const char *tgt,
const char *cls, const char *def);
// File system labeling
bool ksu_genfscon(struct policydb *db, const char *fs_name, const char *path,
const char *ctx);
#endif

171
kernel/setuid_hook.c Normal file
View file

@ -0,0 +1,171 @@
#include <linux/compiler.h>
#include <linux/sched/signal.h>
#include <linux/slab.h>
#include <linux/task_work.h>
#include <linux/thread_info.h>
#include <linux/seccomp.h>
#include <linux/bpf.h>
#include <linux/capability.h>
#include <linux/cred.h>
#include <linux/dcache.h>
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/init_task.h>
#include <linux/kernel.h>
#include <linux/kprobes.h>
#include <linux/mm.h>
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/nsproxy.h>
#include <linux/path.h>
#include <linux/printk.h>
#include <linux/sched.h>
#include <linux/security.h>
#include <linux/stddef.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include <linux/uidgid.h>
#include <linux/version.h>
#include <linux/binfmts.h>
#include <linux/tty.h>
#include "allowlist.h"
#include "setuid_hook.h"
#include "feature.h"
#include "klog.h" // IWYU pragma: keep
#include "manager.h"
#include "selinux/selinux.h"
#include "seccomp_cache.h"
#include "supercalls.h"
#include "syscall_hook_manager.h"
#include "kernel_umount.h"
#include "app_profile.h"
static bool ksu_enhanced_security_enabled = false;
static int enhanced_security_feature_get(u64 *value)
{
*value = ksu_enhanced_security_enabled ? 1 : 0;
return 0;
}
static int enhanced_security_feature_set(u64 value)
{
bool enable = value != 0;
ksu_enhanced_security_enabled = enable;
pr_info("enhanced_security: set to %d\n", enable);
return 0;
}
static const struct ksu_feature_handler enhanced_security_handler = {
.feature_id = KSU_FEATURE_ENHANCED_SECURITY,
.name = "enhanced_security",
.get_handler = enhanced_security_feature_get,
.set_handler = enhanced_security_feature_set,
};
static inline bool is_allow_su()
{
if (is_manager()) {
// we are manager, allow!
return true;
}
return ksu_is_allow_uid_for_current(current_uid().val);
}
int ksu_handle_setresuid(uid_t ruid, uid_t euid, uid_t suid)
{
uid_t new_uid = ruid;
uid_t old_uid = current_uid().val;
pr_info("handle_setresuid from %d to %d\n", old_uid, new_uid);
// if old process is root, ignore it.
if (old_uid != 0 && ksu_enhanced_security_enabled) {
// disallow any non-ksu domain escalation from non-root to root!
// euid is what we care about here as it controls permission
if (unlikely(euid == 0)) {
if (!is_ksu_domain()) {
pr_warn("find suspicious EoP: %d %s, from %d to %d\n",
current->pid, current->comm, old_uid, new_uid);
force_sig(SIGKILL);
return 0;
}
}
// disallow appuid decrease to any other uid if it is not allowed to su
if (is_appuid(old_uid)) {
if (euid < current_euid().val && !ksu_is_allow_uid_for_current(old_uid)) {
pr_warn("find suspicious EoP: %d %s, from %d to %d\n",
current->pid, current->comm, old_uid, new_uid);
force_sig(SIGKILL);
return 0;
}
}
return 0;
}
// if on private space, see if its possibly the manager
if (new_uid > PER_USER_RANGE && new_uid % PER_USER_RANGE == ksu_get_manager_uid()) {
ksu_set_manager_uid(new_uid);
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
if (ksu_get_manager_uid() == new_uid) {
pr_info("install fd for manager: %d\n", new_uid);
ksu_install_fd();
spin_lock_irq(&current->sighand->siglock);
ksu_seccomp_allow_cache(current->seccomp.filter, __NR_reboot);
ksu_set_task_tracepoint_flag(current);
spin_unlock_irq(&current->sighand->siglock);
return 0;
}
if (ksu_is_allow_uid_for_current(new_uid)) {
if (current->seccomp.mode == SECCOMP_MODE_FILTER &&
current->seccomp.filter) {
spin_lock_irq(&current->sighand->siglock);
ksu_seccomp_allow_cache(current->seccomp.filter, __NR_reboot);
spin_unlock_irq(&current->sighand->siglock);
}
ksu_set_task_tracepoint_flag(current);
} else {
ksu_clear_task_tracepoint_flag_if_needed(current);
}
#else
if (ksu_is_allow_uid_for_current(new_uid)) {
spin_lock_irq(&current->sighand->siglock);
disable_seccomp();
spin_unlock_irq(&current->sighand->siglock);
if (ksu_get_manager_uid() == new_uid) {
pr_info("install fd for ksu manager(uid=%d)\n",
new_uid);
ksu_install_fd();
}
return 0;
}
#endif
// Handle kernel umount
ksu_handle_umount(old_uid, new_uid);
return 0;
}
void ksu_setuid_hook_init(void)
{
ksu_kernel_umount_init();
if (ksu_register_feature_handler(&enhanced_security_handler)) {
pr_err("Failed to register enhanced security feature handler\n");
}
}
void ksu_setuid_hook_exit(void)
{
pr_info("ksu_core_exit\n");
ksu_kernel_umount_exit();
ksu_unregister_feature_handler(KSU_FEATURE_ENHANCED_SECURITY);
}

14
kernel/setuid_hook.h Normal file
View file

@ -0,0 +1,14 @@
#ifndef __KSU_H_KSU_CORE
#define __KSU_H_KSU_CORE
#include <linux/init.h>
#include <linux/types.h>
#include "apk_sign.h"
#include <linux/thread_info.h>
void ksu_setuid_hook_init(void);
void ksu_setuid_hook_exit(void);
int ksu_handle_setresuid(uid_t ruid, uid_t euid, uid_t suid);
#endif

79
kernel/setup.sh Executable file
View file

@ -0,0 +1,79 @@
#!/bin/sh
set -eu
KERNEL_ROOT=$(pwd)
display_usage() {
echo "Usage: $0 [--cleanup | <commit-or-tag>]"
echo " --cleanup: Cleans up previous modifications made by the script."
echo " <commit-or-tag>: Sets up or updates the KernelSU to specified tag or commit."
echo " -h, --help: Displays this usage information."
echo " (no args): Sets up or updates the KernelSU environment to the latest tagged version."
}
initialize_variables() {
if test -d "$KERNEL_ROOT/common/drivers"; then
DRIVER_DIR="$KERNEL_ROOT/common/drivers"
elif test -d "$KERNEL_ROOT/drivers"; then
DRIVER_DIR="$KERNEL_ROOT/drivers"
else
echo '[ERROR] "drivers/" directory not found.'
exit 127
fi
DRIVER_MAKEFILE=$DRIVER_DIR/Makefile
DRIVER_KCONFIG=$DRIVER_DIR/Kconfig
}
# Reverts modifications made by this script
perform_cleanup() {
echo "[+] Cleaning up..."
[ -L "$DRIVER_DIR/kernelsu" ] && rm "$DRIVER_DIR/kernelsu" && echo "[-] Symlink removed."
grep -q "kernelsu" "$DRIVER_MAKEFILE" && sed -i '/kernelsu/d' "$DRIVER_MAKEFILE" && echo "[-] Makefile reverted."
grep -q "kernelsu" "$DRIVER_KCONFIG" && sed -i '/kernelsu/d' "$DRIVER_KCONFIG" && echo "[-] Kconfig reverted."
if [ -d "$KERNEL_ROOT/KernelSU" ]; then
rm -rf "$KERNEL_ROOT/KernelSU" && echo "[-] KernelSU directory deleted."
fi
}
# Sets up or update KernelSU environment
setup_kernelsu() {
echo "[+] Setting up KernelSU..."
# Clone the repository
if [ ! -d "$KERNEL_ROOT/KernelSU" ]; then
git clone https://github.com/SukiSU-Ultra/SukiSU-Ultra KernelSU
echo "[+] Repository cloned."
fi
cd "$KERNEL_ROOT/KernelSU"
git stash && echo "[-] Stashed current changes."
if [ "$(git status | grep -Po 'v\d+(\.\d+)*' | head -n1)" ]; then
git checkout main && echo "[-] Switched to main branch."
fi
git pull && echo "[+] Repository updated."
if [ -z "${1-}" ]; then
git checkout "$(git describe --abbrev=0 --tags)" && echo "[-] Checked out latest tag."
else
git checkout "$1" && echo "[-] Checked out $1." || echo "[-] Checkout default branch"
fi
cd "$DRIVER_DIR"
ln -sf "$(realpath --relative-to="$DRIVER_DIR" "$KERNEL_ROOT/KernelSU/kernel")" "kernelsu" && echo "[+] Symlink created."
# Add entries in Makefile and Kconfig if not already existing
grep -q "kernelsu" "$DRIVER_MAKEFILE" || echo 'obj-$(CONFIG_KSU) += kernelsu/' >> "$DRIVER_MAKEFILE" && echo "[+] Modified Makefile."
grep -q 'source "drivers/kernelsu/Kconfig"' "$DRIVER_KCONFIG" || sed -i '/endmenu/i\source "drivers/kernelsu/Kconfig"' "$DRIVER_KCONFIG" && echo "[+] Modified Kconfig."
echo '[+] Done.'
}
# Process command-line arguments
if [ "$#" -eq 0 ]; then
initialize_variables
setup_kernelsu
elif [ "$1" = "-h" ] || [ "$1" = "--help" ]; then
display_usage
elif [ "$1" = "--cleanup" ]; then
initialize_variables
perform_cleanup
else
initialize_variables
setup_kernelsu "$@"
fi

187
kernel/sucompat.c Normal file
View file

@ -0,0 +1,187 @@
#include "linux/compiler.h"
#include "linux/printk.h"
#include <asm/current.h>
#include <linux/cred.h>
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include <linux/version.h>
#include <linux/sched/task_stack.h>
#include <linux/ptrace.h>
#include "allowlist.h"
#include "feature.h"
#include "klog.h" // IWYU pragma: keep
#include "ksud.h"
#include "sucompat.h"
#include "app_profile.h"
#include "syscall_hook_manager.h"
#include "sulog.h"
#define SU_PATH "/system/bin/su"
#define SH_PATH "/system/bin/sh"
bool ksu_su_compat_enabled __read_mostly = true;
static int su_compat_feature_get(u64 *value)
{
*value = ksu_su_compat_enabled ? 1 : 0;
return 0;
}
static int su_compat_feature_set(u64 value)
{
bool enable = value != 0;
ksu_su_compat_enabled = enable;
pr_info("su_compat: set to %d\n", enable);
return 0;
}
static const struct ksu_feature_handler su_compat_handler = {
.feature_id = KSU_FEATURE_SU_COMPAT,
.name = "su_compat",
.get_handler = su_compat_feature_get,
.set_handler = su_compat_feature_set,
};
static void __user *userspace_stack_buffer(const void *d, size_t len)
{
// To avoid having to mmap a page in userspace, just write below the stack
// pointer.
char __user *p = (void __user *)current_user_stack_pointer() - len;
return copy_to_user(p, d, len) ? NULL : p;
}
static char __user *sh_user_path(void)
{
static const char sh_path[] = "/system/bin/sh";
return userspace_stack_buffer(sh_path, sizeof(sh_path));
}
static char __user *ksud_user_path(void)
{
static const char ksud_path[] = KSUD_PATH;
return userspace_stack_buffer(ksud_path, sizeof(ksud_path));
}
int ksu_handle_faccessat(int *dfd, const char __user **filename_user, int *mode,
int *__unused_flags)
{
const char su[] = SU_PATH;
if (!ksu_is_allow_uid_for_current(current_uid().val)) {
return 0;
}
char path[sizeof(su) + 1];
memset(path, 0, sizeof(path));
strncpy_from_user_nofault(path, *filename_user, sizeof(path));
if (unlikely(!memcmp(path, su, sizeof(su)))) {
#if __SULOG_GATE
ksu_sulog_report_syscall(current_uid().val, NULL, "faccessat", path);
#endif
pr_info("faccessat su->sh!\n");
*filename_user = sh_user_path();
}
return 0;
}
int ksu_handle_stat(int *dfd, const char __user **filename_user, int *flags)
{
// const char sh[] = SH_PATH;
const char su[] = SU_PATH;
if (!ksu_is_allow_uid_for_current(current_uid().val)) {
return 0;
}
if (unlikely(!filename_user)) {
return 0;
}
char path[sizeof(su) + 1];
memset(path, 0, sizeof(path));
// Remove this later!! we use syscall hook, so this will never happen!!!!!
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0) && 0
// it becomes a `struct filename *` after 5.18
// https://elixir.bootlin.com/linux/v5.18/source/fs/stat.c#L216
const char sh[] = SH_PATH;
struct filename *filename = *((struct filename **)filename_user);
if (IS_ERR(filename)) {
return 0;
}
if (likely(memcmp(filename->name, su, sizeof(su))))
return 0;
pr_info("vfs_statx su->sh!\n");
memcpy((void *)filename->name, sh, sizeof(sh));
#else
strncpy_from_user_nofault(path, *filename_user, sizeof(path));
if (unlikely(!memcmp(path, su, sizeof(su)))) {
#if __SULOG_GATE
ksu_sulog_report_syscall(current_uid().val, NULL, "newfstatat", path);
#endif
pr_info("newfstatat su->sh!\n");
*filename_user = sh_user_path();
}
#endif
return 0;
}
int ksu_handle_execve_sucompat(const char __user **filename_user,
void *__never_use_argv, void *__never_use_envp,
int *__never_use_flags)
{
const char su[] = SU_PATH;
char path[sizeof(su) + 1];
if (unlikely(!filename_user))
return 0;
memset(path, 0, sizeof(path));
strncpy_from_user_nofault(path, *filename_user, sizeof(path));
if (likely(memcmp(path, su, sizeof(su))))
return 0;
#if __SULOG_GATE
bool is_allowed = ksu_is_allow_uid_for_current(current_uid().val);
ksu_sulog_report_syscall(current_uid().val, NULL, "execve", path);
if (!is_allowed)
return 0;
ksu_sulog_report_su_attempt(current_uid().val, NULL, path, is_allowed);
#else
if (!ksu_is_allow_uid_for_current(current_uid().val)) {
return 0;
}
#endif
pr_info("sys_execve su found\n");
*filename_user = ksud_user_path();
escape_with_root_profile();
return 0;
}
// sucompat: permitted process can execute 'su' to gain root access.
void ksu_sucompat_init()
{
if (ksu_register_feature_handler(&su_compat_handler)) {
pr_err("Failed to register su_compat feature handler\n");
}
}
void ksu_sucompat_exit()
{
ksu_unregister_feature_handler(KSU_FEATURE_SU_COMPAT);
}

18
kernel/sucompat.h Normal file
View file

@ -0,0 +1,18 @@
#ifndef __KSU_H_SUCOMPAT
#define __KSU_H_SUCOMPAT
#include <linux/types.h>
extern bool ksu_su_compat_enabled;
void ksu_sucompat_init(void);
void ksu_sucompat_exit(void);
// Handler functions exported for hook_manager
int ksu_handle_faccessat(int *dfd, const char __user **filename_user,
int *mode, int *__unused_flags);
int ksu_handle_stat(int *dfd, const char __user **filename_user, int *flags);
int ksu_handle_execve_sucompat(const char __user **filename_user,
void *__never_use_argv, void *__never_use_envp,
int *__never_use_flags);
#endif

369
kernel/sulog.c Normal file
View file

@ -0,0 +1,369 @@
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/time.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include <linux/workqueue.h>
#include <linux/cred.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include "klog.h"
#include "sulog.h"
#include "ksu.h"
#include "feature.h"
#if __SULOG_GATE
struct dedup_entry dedup_tbl[SULOG_COMM_LEN];
static DEFINE_SPINLOCK(dedup_lock);
static LIST_HEAD(sulog_queue);
static struct workqueue_struct *sulog_workqueue;
static struct work_struct sulog_work;
static bool sulog_enabled __read_mostly = true;
static int sulog_feature_get(u64 *value)
{
*value = sulog_enabled ? 1 : 0;
return 0;
}
static int sulog_feature_set(u64 value)
{
bool enable = value != 0;
sulog_enabled = enable;
pr_info("sulog: set to %d\n", enable);
return 0;
}
static const struct ksu_feature_handler sulog_handler = {
.feature_id = KSU_FEATURE_SULOG,
.name = "sulog",
.get_handler = sulog_feature_get,
.set_handler = sulog_feature_set,
};
static void get_timestamp(char *buf, size_t len)
{
struct timespec64 ts;
struct tm tm;
ktime_get_real_ts64(&ts);
time64_to_tm(ts.tv_sec - sys_tz.tz_minuteswest * 60, 0, &tm);
snprintf(buf, len, "%04ld-%02d-%02d %02d:%02d:%02d",
tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
tm.tm_hour, tm.tm_min, tm.tm_sec);
}
static void ksu_get_cmdline(char *full_comm, const char *comm, size_t buf_len)
{
if (!full_comm || buf_len <= 0)
return;
if (comm && strlen(comm) > 0) {
KSU_STRSCPY(full_comm, comm, buf_len);
return;
}
if (in_atomic() || in_interrupt() || irqs_disabled()) {
KSU_STRSCPY(full_comm, current->comm, buf_len);
return;
}
if (!current->mm) {
KSU_STRSCPY(full_comm, current->comm, buf_len);
return;
}
int n = get_cmdline(current, full_comm, buf_len);
if (n <= 0) {
KSU_STRSCPY(full_comm, current->comm, buf_len);
return;
}
for (int i = 0; i < n && i < buf_len - 1; i++) {
if (full_comm[i] == '\0')
full_comm[i] = ' ';
}
full_comm[n < buf_len ? n : buf_len - 1] = '\0';
}
static void sanitize_string(char *str, size_t len)
{
if (!str || len == 0)
return;
size_t read_pos = 0, write_pos = 0;
while (read_pos < len && str[read_pos] != '\0') {
char c = str[read_pos];
if (c == '\n' || c == '\r') {
read_pos++;
continue;
}
if (c == ' ' && write_pos > 0 && str[write_pos - 1] == ' ') {
read_pos++;
continue;
}
str[write_pos++] = c;
read_pos++;
}
str[write_pos] = '\0';
}
static bool dedup_should_print(uid_t uid, u8 type, const char *content, size_t len)
{
struct dedup_key key = {
.crc = dedup_calc_hash(content, len),
.uid = uid,
.type = type,
};
u64 now = ktime_get_ns();
u64 delta_ns = DEDUP_SECS * NSEC_PER_SEC;
u32 idx = key.crc & (SULOG_COMM_LEN - 1);
spin_lock(&dedup_lock);
struct dedup_entry *e = &dedup_tbl[idx];
if (e->key.crc == key.crc &&
e->key.uid == key.uid &&
e->key.type == key.type &&
(now - e->ts_ns) < delta_ns) {
spin_unlock(&dedup_lock);
return false;
}
e->key = key;
e->ts_ns = now;
spin_unlock(&dedup_lock);
return true;
}
static void sulog_work_handler(struct work_struct *work)
{
struct file *fp;
struct sulog_entry *entry, *tmp;
LIST_HEAD(local_queue);
loff_t pos = 0;
unsigned long flags;
spin_lock_irqsave(&dedup_lock, flags);
list_splice_init(&sulog_queue, &local_queue);
spin_unlock_irqrestore(&dedup_lock, flags);
if (list_empty(&local_queue))
return;
fp = filp_open(SULOG_PATH, O_WRONLY | O_CREAT | O_APPEND, 0640);
if (IS_ERR(fp)) {
pr_err("sulog: failed to open log file: %ld\n", PTR_ERR(fp));
goto cleanup;
}
if (fp->f_inode->i_size > SULOG_MAX_SIZE) {
if (vfs_truncate(&fp->f_path, 0))
pr_err("sulog: failed to truncate log file\n");
pos = 0;
} else {
pos = fp->f_inode->i_size;
}
list_for_each_entry(entry, &local_queue, list)
kernel_write(fp, entry->content, strlen(entry->content), &pos);
vfs_fsync(fp, 0);
filp_close(fp, 0);
cleanup:
list_for_each_entry_safe(entry, tmp, &local_queue, list) {
list_del(&entry->list);
kfree(entry);
}
}
static void sulog_add_entry(char *log_buf, size_t len, uid_t uid, u8 dedup_type)
{
struct sulog_entry *entry;
unsigned long flags;
if (!sulog_enabled || !log_buf || len == 0)
return;
if (!dedup_should_print(uid, dedup_type, log_buf, len))
return;
entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry)
return;
KSU_STRSCPY(entry->content, log_buf, SULOG_ENTRY_MAX_LEN);
spin_lock_irqsave(&dedup_lock, flags);
list_add_tail(&entry->list, &sulog_queue);
spin_unlock_irqrestore(&dedup_lock, flags);
if (sulog_workqueue)
queue_work(sulog_workqueue, &sulog_work);
}
void ksu_sulog_report_su_grant(uid_t uid, const char *comm, const char *method)
{
char log_buf[SULOG_ENTRY_MAX_LEN];
char timestamp[32];
char full_comm[SULOG_COMM_LEN];
if (!sulog_enabled)
return;
get_timestamp(timestamp, sizeof(timestamp));
ksu_get_cmdline(full_comm, comm, sizeof(full_comm));
sanitize_string(full_comm, sizeof(full_comm));
snprintf(log_buf, sizeof(log_buf),
"[%s] SU_GRANT: UID=%d COMM=%s METHOD=%s PID=%d\n",
timestamp, uid, full_comm, method ? method : "unknown", current->pid);
sulog_add_entry(log_buf, strlen(log_buf), uid, DEDUP_SU_GRANT);
}
void ksu_sulog_report_su_attempt(uid_t uid, const char *comm, const char *target_path, bool success)
{
char log_buf[SULOG_ENTRY_MAX_LEN];
char timestamp[32];
char full_comm[SULOG_COMM_LEN];
if (!sulog_enabled)
return;
get_timestamp(timestamp, sizeof(timestamp));
ksu_get_cmdline(full_comm, comm, sizeof(full_comm));
sanitize_string(full_comm, sizeof(full_comm));
snprintf(log_buf, sizeof(log_buf),
"[%s] SU_EXEC: UID=%d COMM=%s TARGET=%s RESULT=%s PID=%d\n",
timestamp, uid, full_comm, target_path ? target_path : "unknown",
success ? "SUCCESS" : "DENIED", current->pid);
sulog_add_entry(log_buf, strlen(log_buf), uid, DEDUP_SU_ATTEMPT);
}
void ksu_sulog_report_permission_check(uid_t uid, const char *comm, bool allowed)
{
char log_buf[SULOG_ENTRY_MAX_LEN];
char timestamp[32];
char full_comm[SULOG_COMM_LEN];
if (!sulog_enabled)
return;
get_timestamp(timestamp, sizeof(timestamp));
ksu_get_cmdline(full_comm, comm, sizeof(full_comm));
sanitize_string(full_comm, sizeof(full_comm));
snprintf(log_buf, sizeof(log_buf),
"[%s] PERM_CHECK: UID=%d COMM=%s RESULT=%s PID=%d\n",
timestamp, uid, full_comm, allowed ? "ALLOWED" : "DENIED", current->pid);
sulog_add_entry(log_buf, strlen(log_buf), uid, DEDUP_PERM_CHECK);
}
void ksu_sulog_report_manager_operation(const char *operation, uid_t manager_uid, uid_t target_uid)
{
char log_buf[SULOG_ENTRY_MAX_LEN];
char timestamp[32];
char full_comm[SULOG_COMM_LEN];
if (!sulog_enabled)
return;
get_timestamp(timestamp, sizeof(timestamp));
ksu_get_cmdline(full_comm, NULL, sizeof(full_comm));
sanitize_string(full_comm, sizeof(full_comm));
snprintf(log_buf, sizeof(log_buf),
"[%s] MANAGER_OP: OP=%s MANAGER_UID=%d TARGET_UID=%d COMM=%s PID=%d\n",
timestamp, operation ? operation : "unknown", manager_uid, target_uid, full_comm, current->pid);
sulog_add_entry(log_buf, strlen(log_buf), manager_uid, DEDUP_MANAGER_OP);
}
void ksu_sulog_report_syscall(uid_t uid, const char *comm, const char *syscall, const char *args)
{
char log_buf[SULOG_ENTRY_MAX_LEN];
char timestamp[32];
char full_comm[SULOG_COMM_LEN];
if (!sulog_enabled)
return;
get_timestamp(timestamp, sizeof(timestamp));
ksu_get_cmdline(full_comm, comm, sizeof(full_comm));
sanitize_string(full_comm, sizeof(full_comm));
snprintf(log_buf, sizeof(log_buf),
"[%s] SYSCALL: UID=%d COMM=%s SYSCALL=%s ARGS=%s PID=%d\n",
timestamp, uid, full_comm, syscall ? syscall : "unknown",
args ? args : "none", current->pid);
sulog_add_entry(log_buf, strlen(log_buf), uid, DEDUP_SYSCALL);
}
int ksu_sulog_init(void)
{
if (ksu_register_feature_handler(&sulog_handler)) {
pr_err("Failed to register sulog feature handler\n");
}
sulog_workqueue = alloc_workqueue("ksu_sulog", WQ_UNBOUND | WQ_HIGHPRI, 1);
if (!sulog_workqueue) {
pr_err("sulog: failed to create workqueue\n");
return -ENOMEM;
}
INIT_WORK(&sulog_work, sulog_work_handler);
pr_info("sulog: initialized successfully\n");
return 0;
}
void ksu_sulog_exit(void)
{
struct sulog_entry *entry, *tmp;
unsigned long flags;
ksu_unregister_feature_handler(KSU_FEATURE_SULOG);
sulog_enabled = false;
if (sulog_workqueue) {
flush_workqueue(sulog_workqueue);
destroy_workqueue(sulog_workqueue);
sulog_workqueue = NULL;
}
spin_lock_irqsave(&dedup_lock, flags);
list_for_each_entry_safe(entry, tmp, &sulog_queue, list) {
list_del(&entry->list);
kfree(entry);
}
spin_unlock_irqrestore(&dedup_lock, flags);
pr_info("sulog: cleaned up successfully\n");
}
#endif // __SULOG_GATE

93
kernel/sulog.h Normal file
View file

@ -0,0 +1,93 @@
#ifndef __KSU_SULOG_H
#define __KSU_SULOG_H
#include <linux/types.h>
#include <linux/version.h>
#include <linux/crc32.h> // needed for function dedup_calc_hash
#define __SULOG_GATE 1
#if __SULOG_GATE
extern struct timezone sys_tz;
#define SULOG_PATH "/data/adb/ksu/log/sulog.log"
#define SULOG_MAX_SIZE (32 * 1024 * 1024) // 128MB
#define SULOG_ENTRY_MAX_LEN 512
#define SULOG_COMM_LEN 256
#define DEDUP_SECS 10
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 10, 0)
static inline size_t strlcpy(char *dest, const char *src, size_t size)
{
return strscpy(dest, src, size);
}
#endif
#define KSU_STRSCPY(dst, src, size) \
do { \
if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0)) { \
strscpy(dst, src, size); \
} else { \
strlcpy(dst, src, size); \
} \
} while (0)
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)
#include <linux/rtc.h>
static inline void time64_to_tm(time64_t totalsecs, int offset, struct tm *result)
{
struct rtc_time rtc_tm;
rtc_time64_to_tm(totalsecs, &rtc_tm);
result->tm_sec = rtc_tm.tm_sec;
result->tm_min = rtc_tm.tm_min;
result->tm_hour = rtc_tm.tm_hour;
result->tm_mday = rtc_tm.tm_mday;
result->tm_mon = rtc_tm.tm_mon;
result->tm_year = rtc_tm.tm_year;
}
#endif
struct dedup_key {
u32 crc;
uid_t uid;
u8 type;
u8 _pad[1];
};
struct dedup_entry {
struct dedup_key key;
u64 ts_ns;
};
enum {
DEDUP_SU_GRANT = 0,
DEDUP_SU_ATTEMPT,
DEDUP_PERM_CHECK,
DEDUP_MANAGER_OP,
DEDUP_SYSCALL,
};
static inline u32 dedup_calc_hash(const char *content, size_t len)
{
return crc32(0, content, len);
}
struct sulog_entry {
struct list_head list;
char content[SULOG_ENTRY_MAX_LEN];
};
void ksu_sulog_report_su_grant(uid_t uid, const char *comm, const char *method);
void ksu_sulog_report_su_attempt(uid_t uid, const char *comm, const char *target_path, bool success);
void ksu_sulog_report_permission_check(uid_t uid, const char *comm, bool allowed);
void ksu_sulog_report_manager_operation(const char *operation, uid_t manager_uid, uid_t target_uid);
void ksu_sulog_report_syscall(uid_t uid, const char *comm, const char *syscall, const char *args);
int ksu_sulog_init(void);
void ksu_sulog_exit(void);
#endif // __SULOG_GATE
#endif /* __KSU_SULOG_H */

1072
kernel/supercalls.c Normal file

File diff suppressed because it is too large Load diff

197
kernel/supercalls.h Normal file
View file

@ -0,0 +1,197 @@
#ifndef __KSU_H_SUPERCALLS
#define __KSU_H_SUPERCALLS
#include <linux/types.h>
#include <linux/ioctl.h>
#include "ksu.h"
#include "app_profile.h"
#ifdef CONFIG_KPM
#include "kpm/kpm.h"
#endif
// Magic numbers for reboot hook to install fd
#define KSU_INSTALL_MAGIC1 0xDEADBEEF
#define KSU_INSTALL_MAGIC2 0xCAFEBABE
// Command structures for ioctl
struct ksu_become_daemon_cmd {
__u8 token[65]; // Input: daemon token (null-terminated)
};
struct ksu_get_info_cmd {
__u32 version; // Output: KERNEL_SU_VERSION
__u32 flags; // Output: flags (bit 0: MODULE mode)
__u32 features; // Output: max feature ID supported
};
struct ksu_report_event_cmd {
__u32 event; // Input: EVENT_POST_FS_DATA, EVENT_BOOT_COMPLETED, etc.
};
struct ksu_set_sepolicy_cmd {
__u64 cmd; // Input: sepolicy command
__aligned_u64 arg; // Input: sepolicy argument pointer
};
struct ksu_check_safemode_cmd {
__u8 in_safe_mode; // Output: true if in safe mode, false otherwise
};
struct ksu_get_allow_list_cmd {
__u32 uids[128]; // Output: array of allowed/denied UIDs
__u32 count; // Output: number of UIDs in array
__u8 allow; // Input: true for allow list, false for deny list
};
struct ksu_uid_granted_root_cmd {
__u32 uid; // Input: target UID to check
__u8 granted; // Output: true if granted, false otherwise
};
struct ksu_uid_should_umount_cmd {
__u32 uid; // Input: target UID to check
__u8 should_umount; // Output: true if should umount, false otherwise
};
struct ksu_get_manager_uid_cmd {
__u32 uid; // Output: manager UID
};
struct ksu_get_app_profile_cmd {
struct app_profile profile; // Input/Output: app profile structure
};
struct ksu_set_app_profile_cmd {
struct app_profile profile; // Input: app profile structure
};
struct ksu_get_feature_cmd {
__u32 feature_id; // Input: feature ID (enum ksu_feature_id)
__u64 value; // Output: feature value/state
__u8 supported; // Output: true if feature is supported, false otherwise
};
struct ksu_set_feature_cmd {
__u32 feature_id; // Input: feature ID (enum ksu_feature_id)
__u64 value; // Input: feature value/state to set
};
struct ksu_get_wrapper_fd_cmd {
__u32 fd; // Input: userspace fd
__u32 flags; // Input: flags of userspace fd
};
struct ksu_manage_mark_cmd {
__u32 operation; // Input: KSU_MARK_*
__s32 pid; // Input: target pid (0 for all processes)
__u32 result; // Output: for get operation - mark status or reg_count
};
#define KSU_MARK_GET 1
#define KSU_MARK_MARK 2
#define KSU_MARK_UNMARK 3
#define KSU_MARK_REFRESH 4
struct ksu_nuke_ext4_sysfs_cmd {
__aligned_u64 arg; // Input: mnt pointer
};
struct ksu_add_try_umount_cmd {
__aligned_u64 arg; // char ptr, this is the mountpoint
__u32 flags; // this is the flag we use for it
__u8 mode; // denotes what to do with it 0:wipe_list 1:add_to_list 2:delete_entry
};
#define KSU_UMOUNT_WIPE 0 // ignore everything and wipe list
#define KSU_UMOUNT_ADD 1 // add entry (path + flags)
#define KSU_UMOUNT_DEL 2 // delete entry, strcmp
// Other command structures
struct ksu_get_full_version_cmd {
char version_full[KSU_FULL_VERSION_STRING]; // Output: full version string
};
struct ksu_hook_type_cmd {
char hook_type[32]; // Output: hook type string
};
struct ksu_enable_kpm_cmd {
__u8 enabled; // Output: true if KPM is enabled
};
struct ksu_dynamic_manager_cmd {
struct dynamic_manager_user_config config; // Input/Output: dynamic manager config
};
struct ksu_get_managers_cmd {
struct manager_list_info manager_info; // Output: manager list information
};
struct ksu_enable_uid_scanner_cmd {
__u32 operation; // Input: operation type (UID_SCANNER_OP_GET_STATUS, UID_SCANNER_OP_TOGGLE, UID_SCANNER_OP_CLEAR_ENV)
__u32 enabled; // Input: enable or disable (for UID_SCANNER_OP_TOGGLE)
void __user *status_ptr; // Input: pointer to store status (for UID_SCANNER_OP_GET_STATUS)
};
#ifdef CONFIG_KSU_MANUAL_SU
struct ksu_manual_su_cmd {
__u32 option; // Input: operation type (MANUAL_SU_OP_GENERATE_TOKEN, MANUAL_SU_OP_ESCALATE, MANUAL_SU_OP_ADD_PENDING)
__u32 target_uid; // Input: target UID
__u32 target_pid; // Input: target PID
char token_buffer[33]; // Input/Output: token buffer
};
#endif
// IOCTL command definitions
#define KSU_IOCTL_GRANT_ROOT _IOC(_IOC_NONE, 'K', 1, 0)
#define KSU_IOCTL_GET_INFO _IOC(_IOC_READ, 'K', 2, 0)
#define KSU_IOCTL_REPORT_EVENT _IOC(_IOC_WRITE, 'K', 3, 0)
#define KSU_IOCTL_SET_SEPOLICY _IOC(_IOC_READ|_IOC_WRITE, 'K', 4, 0)
#define KSU_IOCTL_CHECK_SAFEMODE _IOC(_IOC_READ, 'K', 5, 0)
#define KSU_IOCTL_GET_ALLOW_LIST _IOC(_IOC_READ|_IOC_WRITE, 'K', 6, 0)
#define KSU_IOCTL_GET_DENY_LIST _IOC(_IOC_READ|_IOC_WRITE, 'K', 7, 0)
#define KSU_IOCTL_UID_GRANTED_ROOT _IOC(_IOC_READ|_IOC_WRITE, 'K', 8, 0)
#define KSU_IOCTL_UID_SHOULD_UMOUNT _IOC(_IOC_READ|_IOC_WRITE, 'K', 9, 0)
#define KSU_IOCTL_GET_MANAGER_UID _IOC(_IOC_READ, 'K', 10, 0)
#define KSU_IOCTL_GET_APP_PROFILE _IOC(_IOC_READ|_IOC_WRITE, 'K', 11, 0)
#define KSU_IOCTL_SET_APP_PROFILE _IOC(_IOC_WRITE, 'K', 12, 0)
#define KSU_IOCTL_GET_FEATURE _IOC(_IOC_READ|_IOC_WRITE, 'K', 13, 0)
#define KSU_IOCTL_SET_FEATURE _IOC(_IOC_WRITE, 'K', 14, 0)
#define KSU_IOCTL_GET_WRAPPER_FD _IOC(_IOC_WRITE, 'K', 15, 0)
#define KSU_IOCTL_MANAGE_MARK _IOC(_IOC_READ|_IOC_WRITE, 'K', 16, 0)
#define KSU_IOCTL_NUKE_EXT4_SYSFS _IOC(_IOC_WRITE, 'K', 17, 0)
#define KSU_IOCTL_ADD_TRY_UMOUNT _IOC(_IOC_WRITE, 'K', 18, 0)
// Other IOCTL command definitions
#define KSU_IOCTL_GET_FULL_VERSION _IOC(_IOC_READ, 'K', 100, 0)
#define KSU_IOCTL_HOOK_TYPE _IOC(_IOC_READ, 'K', 101, 0)
#define KSU_IOCTL_ENABLE_KPM _IOC(_IOC_READ, 'K', 102, 0)
#define KSU_IOCTL_DYNAMIC_MANAGER _IOC(_IOC_READ|_IOC_WRITE, 'K', 103, 0)
#define KSU_IOCTL_GET_MANAGERS _IOC(_IOC_READ|_IOC_WRITE, 'K', 104, 0)
#define KSU_IOCTL_ENABLE_UID_SCANNER _IOC(_IOC_READ|_IOC_WRITE, 'K', 105, 0)
#ifdef CONFIG_KSU_MANUAL_SU
#define KSU_IOCTL_MANUAL_SU _IOC(_IOC_READ|_IOC_WRITE, 'K', 106, 0)
#endif
#define KSU_IOCTL_UMOUNT_MANAGER _IOC(_IOC_READ|_IOC_WRITE, 'K', 107, 0)
// IOCTL handler types
typedef int (*ksu_ioctl_handler_t)(void __user *arg);
typedef bool (*ksu_perm_check_t)(void);
// IOCTL command mapping
struct ksu_ioctl_cmd_map {
unsigned int cmd;
const char *name;
ksu_ioctl_handler_t handler;
ksu_perm_check_t perm_check; // Permission check function
};
// Install KSU fd to current process
int ksu_install_fd(void);
void ksu_supercalls_init(void);
void ksu_supercalls_exit(void);
#endif // __KSU_H_SUPERCALLS

View file

@ -0,0 +1,374 @@
#include "linux/compiler.h"
#include "linux/cred.h"
#include "linux/printk.h"
#include "selinux/selinux.h"
#include <linux/spinlock.h>
#include <linux/kprobes.h>
#include <linux/tracepoint.h>
#include <asm/syscall.h>
#include <linux/slab.h>
#include <linux/ptrace.h>
#include <trace/events/syscalls.h>
#include <linux/namei.h>
#include "allowlist.h"
#include "arch.h"
#include "klog.h" // IWYU pragma: keep
#include "syscall_hook_manager.h"
#include "sucompat.h"
#include "setuid_hook.h"
#include "selinux/selinux.h"
// Tracepoint registration count management
// == 1: just us
// > 1: someone else is also using syscall tracepoint e.g. ftrace
static int tracepoint_reg_count = 0;
static DEFINE_SPINLOCK(tracepoint_reg_lock);
void ksu_clear_task_tracepoint_flag_if_needed(struct task_struct *t)
{
unsigned long flags;
spin_lock_irqsave(&tracepoint_reg_lock, flags);
if (tracepoint_reg_count <= 1) {
ksu_clear_task_tracepoint_flag(t);
}
spin_unlock_irqrestore(&tracepoint_reg_lock, flags);
}
// Process marking management
static void handle_process_mark(bool mark)
{
struct task_struct *p, *t;
read_lock(&tasklist_lock);
for_each_process_thread(p, t) {
if (mark)
ksu_set_task_tracepoint_flag(t);
else
ksu_clear_task_tracepoint_flag(t);
}
read_unlock(&tasklist_lock);
}
void ksu_mark_all_process(void)
{
handle_process_mark(true);
pr_info("hook_manager: mark all user process done!\n");
}
void ksu_unmark_all_process(void)
{
handle_process_mark(false);
pr_info("hook_manager: unmark all user process done!\n");
}
static void ksu_mark_running_process_locked()
{
struct task_struct *p, *t;
read_lock(&tasklist_lock);
for_each_process_thread (p, t) {
if (!t->mm) { // only user processes
continue;
}
int uid = task_uid(t).val;
const struct cred *cred = get_task_cred(t);
bool ksu_root_process =
uid == 0 && is_task_ksu_domain(cred);
bool is_zygote_process = is_zygote(cred);
bool is_shell = uid == 2000;
// before boot completed, we shall mark init for marking zygote
bool is_init = t->pid == 1;
if (ksu_root_process || is_zygote_process || is_shell || is_init
|| ksu_is_allow_uid(uid)) {
ksu_set_task_tracepoint_flag(t);
pr_info("hook_manager: mark process: pid:%d, uid: %d, comm:%s\n",
t->pid, uid, t->comm);
} else {
ksu_clear_task_tracepoint_flag(t);
pr_info("hook_manager: unmark process: pid:%d, uid: %d, comm:%s\n",
t->pid, uid, t->comm);
}
put_cred(cred);
}
read_unlock(&tasklist_lock);
}
void ksu_mark_running_process()
{
unsigned long flags;
spin_lock_irqsave(&tracepoint_reg_lock, flags);
if (tracepoint_reg_count <= 1) {
ksu_mark_running_process_locked();
} else {
pr_info("hook_manager: not mark running process since syscall tracepoint is in use\n");
}
spin_unlock_irqrestore(&tracepoint_reg_lock, flags);
}
// Get task mark status
// Returns: 1 if marked, 0 if not marked, -ESRCH if task not found
int ksu_get_task_mark(pid_t pid)
{
struct task_struct *task;
int marked = -ESRCH;
rcu_read_lock();
task = find_task_by_vpid(pid);
if (task) {
get_task_struct(task);
rcu_read_unlock();
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
marked = test_task_syscall_work(task, SYSCALL_TRACEPOINT) ? 1 : 0;
#else
marked = test_tsk_thread_flag(task, TIF_SYSCALL_TRACEPOINT) ? 1 : 0;
#endif
put_task_struct(task);
} else {
rcu_read_unlock();
}
return marked;
}
// Set task mark status
// Returns: 0 on success, -ESRCH if task not found
int ksu_set_task_mark(pid_t pid, bool mark)
{
struct task_struct *task;
int ret = -ESRCH;
rcu_read_lock();
task = find_task_by_vpid(pid);
if (task) {
get_task_struct(task);
rcu_read_unlock();
if (mark) {
ksu_set_task_tracepoint_flag(task);
pr_info("hook_manager: marked task pid=%d comm=%s\n", pid, task->comm);
} else {
ksu_clear_task_tracepoint_flag(task);
pr_info("hook_manager: unmarked task pid=%d comm=%s\n", pid, task->comm);
}
put_task_struct(task);
ret = 0;
} else {
rcu_read_unlock();
}
return ret;
}
#ifdef CONFIG_KRETPROBES
static struct kretprobe *init_kretprobe(const char *name,
kretprobe_handler_t handler)
{
struct kretprobe *rp = kzalloc(sizeof(struct kretprobe), GFP_KERNEL);
if (!rp)
return NULL;
rp->kp.symbol_name = name;
rp->handler = handler;
rp->data_size = 0;
rp->maxactive = 0;
int ret = register_kretprobe(rp);
pr_info("hook_manager: register_%s kretprobe: %d\n", name, ret);
if (ret) {
kfree(rp);
return NULL;
}
return rp;
}
static void destroy_kretprobe(struct kretprobe **rp_ptr)
{
struct kretprobe *rp = *rp_ptr;
if (!rp)
return;
unregister_kretprobe(rp);
synchronize_rcu();
kfree(rp);
*rp_ptr = NULL;
}
static int syscall_regfunc_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
{
unsigned long flags;
spin_lock_irqsave(&tracepoint_reg_lock, flags);
if (tracepoint_reg_count < 1) {
// while install our tracepoint, mark our processes
ksu_mark_running_process_locked();
} else if (tracepoint_reg_count == 1) {
// while other tracepoint first added, mark all processes
ksu_mark_all_process();
}
tracepoint_reg_count++;
spin_unlock_irqrestore(&tracepoint_reg_lock, flags);
return 0;
}
static int syscall_unregfunc_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
{
unsigned long flags;
spin_lock_irqsave(&tracepoint_reg_lock, flags);
tracepoint_reg_count--;
if (tracepoint_reg_count <= 0) {
// while no tracepoint left, unmark all processes
ksu_unmark_all_process();
} else if (tracepoint_reg_count == 1) {
// while just our tracepoint left, unmark disallowed processes
ksu_mark_running_process_locked();
}
spin_unlock_irqrestore(&tracepoint_reg_lock, flags);
return 0;
}
static struct kretprobe *syscall_regfunc_rp = NULL;
static struct kretprobe *syscall_unregfunc_rp = NULL;
#endif
static inline bool check_syscall_fastpath(int nr)
{
switch (nr) {
case __NR_newfstatat:
case __NR_faccessat:
case __NR_execve:
case __NR_setresuid:
case __NR_clone:
case __NR_clone3:
return true;
default:
return false;
}
}
// Unmark init's child that are not zygote, adbd or ksud
int ksu_handle_init_mark_tracker(const char __user **filename_user)
{
char path[64];
if (unlikely(!filename_user))
return 0;
memset(path, 0, sizeof(path));
strncpy_from_user_nofault(path, *filename_user, sizeof(path));
if (likely(strstr(path, "/app_process") == NULL && strstr(path, "/adbd") == NULL && strstr(path, "/ksud") == NULL)) {
pr_info("hook_manager: unmark %d exec %s", current->pid, path);
ksu_clear_task_tracepoint_flag_if_needed(current);
}
return 0;
}
#ifdef CONFIG_KSU_MANUAL_SU
#include "manual_su.h"
static inline void ksu_handle_task_alloc(struct pt_regs *regs)
{
ksu_try_escalate_for_uid(current_uid().val);
}
#endif
#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
// Generic sys_enter handler that dispatches to specific handlers
static void ksu_sys_enter_handler(void *data, struct pt_regs *regs, long id)
{
if (unlikely(check_syscall_fastpath(id))) {
#ifdef KSU_TP_HOOK
if (ksu_su_compat_enabled) {
// Handle newfstatat
if (id == __NR_newfstatat) {
int *dfd = (int *)&PT_REGS_PARM1(regs);
const char __user **filename_user =
(const char __user **)&PT_REGS_PARM2(regs);
int *flags = (int *)&PT_REGS_SYSCALL_PARM4(regs);
ksu_handle_stat(dfd, filename_user, flags);
return;
}
// Handle faccessat
if (id == __NR_faccessat) {
int *dfd = (int *)&PT_REGS_PARM1(regs);
const char __user **filename_user =
(const char __user **)&PT_REGS_PARM2(regs);
int *mode = (int *)&PT_REGS_PARM3(regs);
ksu_handle_faccessat(dfd, filename_user, mode, NULL);
return;
}
// Handle execve
if (id == __NR_execve) {
const char __user **filename_user =
(const char __user **)&PT_REGS_PARM1(regs);
if (current->pid != 1 && is_init(get_current_cred())) {
ksu_handle_init_mark_tracker(filename_user);
} else {
ksu_handle_execve_sucompat(filename_user, NULL, NULL, NULL);
}
return;
}
}
#endif
// Handle setresuid
if (id == __NR_setresuid) {
uid_t ruid = (uid_t)PT_REGS_PARM1(regs);
uid_t euid = (uid_t)PT_REGS_PARM2(regs);
uid_t suid = (uid_t)PT_REGS_PARM3(regs);
ksu_handle_setresuid(ruid, euid, suid);
return;
}
#ifdef CONFIG_KSU_MANUAL_SU
// Handle task_alloc via clone/fork
if (id == __NR_clone || id == __NR_clone3)
return ksu_handle_task_alloc(regs);
#endif
}
}
#endif
void ksu_syscall_hook_manager_init(void)
{
int ret;
pr_info("hook_manager: ksu_hook_manager_init called\n");
#ifdef CONFIG_KRETPROBES
// Register kretprobe for syscall_regfunc
syscall_regfunc_rp = init_kretprobe("syscall_regfunc", syscall_regfunc_handler);
// Register kretprobe for syscall_unregfunc
syscall_unregfunc_rp = init_kretprobe("syscall_unregfunc", syscall_unregfunc_handler);
#endif
#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
ret = register_trace_sys_enter(ksu_sys_enter_handler, NULL);
#ifndef CONFIG_KRETPROBES
ksu_mark_running_process_locked();
#endif
if (ret) {
pr_err("hook_manager: failed to register sys_enter tracepoint: %d\n", ret);
} else {
pr_info("hook_manager: sys_enter tracepoint registered\n");
}
#endif
ksu_setuid_hook_init();
ksu_sucompat_init();
}
void ksu_syscall_hook_manager_exit(void)
{
pr_info("hook_manager: ksu_hook_manager_exit called\n");
#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
unregister_trace_sys_enter(ksu_sys_enter_handler, NULL);
tracepoint_synchronize_unregister();
pr_info("hook_manager: sys_enter tracepoint unregistered\n");
#endif
#ifdef CONFIG_KRETPROBES
destroy_kretprobe(&syscall_regfunc_rp);
destroy_kretprobe(&syscall_unregfunc_rp);
#endif
ksu_sucompat_exit();
ksu_setuid_hook_exit();
}

View file

@ -0,0 +1,47 @@
#ifndef __KSU_H_HOOK_MANAGER
#define __KSU_H_HOOK_MANAGER
#include <linux/version.h>
#include <linux/sched.h>
#include <linux/thread_info.h>
#include <linux/init.h>
#include <linux/binfmts.h>
#include <linux/tty.h>
#include <linux/fs.h>
#include "selinux/selinux.h"
// Hook manager initialization and cleanup
void ksu_syscall_hook_manager_init(void);
void ksu_syscall_hook_manager_exit(void);
// Process marking for tracepoint
void ksu_mark_all_process(void);
void ksu_unmark_all_process(void);
void ksu_mark_running_process(void);
// Per-task mark operations
int ksu_get_task_mark(pid_t pid);
int ksu_set_task_mark(pid_t pid, bool mark);
static inline void ksu_set_task_tracepoint_flag(struct task_struct *t)
{
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
set_task_syscall_work(t, SYSCALL_TRACEPOINT);
#else
set_tsk_thread_flag(t, TIF_SYSCALL_TRACEPOINT);
#endif
}
static inline void ksu_clear_task_tracepoint_flag(struct task_struct *t)
{
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
clear_task_syscall_work(t, SYSCALL_TRACEPOINT);
#else
clear_tsk_thread_flag(t, TIF_SYSCALL_TRACEPOINT);
#endif
}
void ksu_clear_task_tracepoint_flag_if_needed(struct task_struct *t);
#endif

214
kernel/throne_comm.c Normal file
View file

@ -0,0 +1,214 @@
#include <linux/fs.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include <linux/workqueue.h>
#include <linux/version.h>
#include "klog.h"
#include "throne_comm.h"
#include "ksu.h"
#define PROC_UID_SCANNER "ksu_uid_scanner"
#define UID_SCANNER_STATE_FILE "/data/adb/ksu/.uid_scanner"
static struct proc_dir_entry *proc_entry = NULL;
static struct workqueue_struct *scanner_wq = NULL;
static struct work_struct scan_work;
static struct work_struct ksu_state_save_work;
static struct work_struct ksu_state_load_work;
// Signal userspace to rescan
static bool need_rescan = false;
static void rescan_work_fn(struct work_struct *work)
{
// Signal userspace through proc interface
need_rescan = true;
pr_info("requested userspace uid rescan\n");
}
void ksu_request_userspace_scan(void)
{
if (scanner_wq) {
queue_work(scanner_wq, &scan_work);
}
}
void ksu_handle_userspace_update(void)
{
// Called when userspace notifies update complete
need_rescan = false;
pr_info("userspace uid list updated\n");
}
static void do_save_throne_state(struct work_struct *work)
{
struct file *fp;
char state_char = ksu_uid_scanner_enabled ? '1' : '0';
loff_t off = 0;
fp = filp_open(UID_SCANNER_STATE_FILE, O_WRONLY | O_CREAT | O_TRUNC, 0644);
if (IS_ERR(fp)) {
pr_err("save_throne_state create file failed: %ld\n", PTR_ERR(fp));
return;
}
if (kernel_write(fp, &state_char, sizeof(state_char), &off) != sizeof(state_char)) {
pr_err("save_throne_state write failed\n");
goto exit;
}
pr_info("throne state saved: %s\n", ksu_uid_scanner_enabled ? "enabled" : "disabled");
exit:
filp_close(fp, 0);
}
void do_load_throne_state(struct work_struct *work)
{
struct file *fp;
char state_char;
loff_t off = 0;
ssize_t ret;
fp = filp_open(UID_SCANNER_STATE_FILE, O_RDONLY, 0);
if (IS_ERR(fp)) {
pr_info("throne state file not found, using default: disabled\n");
ksu_uid_scanner_enabled = false;
return;
}
ret = kernel_read(fp, &state_char, sizeof(state_char), &off);
if (ret != sizeof(state_char)) {
pr_err("load_throne_state read err: %zd\n", ret);
ksu_uid_scanner_enabled = false;
goto exit;
}
ksu_uid_scanner_enabled = (state_char == '1');
pr_info("throne state loaded: %s\n", ksu_uid_scanner_enabled ? "enabled" : "disabled");
exit:
filp_close(fp, 0);
}
bool ksu_throne_comm_load_state(void)
{
return ksu_queue_work(&ksu_state_load_work);
}
void ksu_throne_comm_save_state(void)
{
ksu_queue_work(&ksu_state_save_work);
}
static int uid_scanner_show(struct seq_file *m, void *v)
{
if (need_rescan) {
seq_puts(m, "RESCAN\n");
} else {
seq_puts(m, "OK\n");
}
return 0;
}
static int uid_scanner_open(struct inode *inode, struct file *file)
{
return single_open(file, uid_scanner_show, NULL);
}
static ssize_t uid_scanner_write(struct file *file, const char __user *buffer,
size_t count, loff_t *pos)
{
char cmd[16];
if (count >= sizeof(cmd))
return -EINVAL;
if (copy_from_user(cmd, buffer, count))
return -EFAULT;
cmd[count] = '\0';
// Remove newline if present
if (count > 0 && cmd[count-1] == '\n')
cmd[count-1] = '\0';
if (strcmp(cmd, "UPDATED") == 0) {
ksu_handle_userspace_update();
pr_info("received userspace update notification\n");
}
return count;
}
#ifdef KSU_COMPAT_HAS_PROC_OPS
static const struct proc_ops uid_scanner_proc_ops = {
.proc_open = uid_scanner_open,
.proc_read = seq_read,
.proc_write = uid_scanner_write,
.proc_lseek = seq_lseek,
.proc_release = single_release,
};
#else
static const struct file_operations uid_scanner_proc_ops = {
.owner = THIS_MODULE,
.open = uid_scanner_open,
.read = seq_read,
.write = uid_scanner_write,
.llseek = seq_lseek,
.release = single_release,
};
#endif
int ksu_throne_comm_init(void)
{
// Create workqueue
scanner_wq = alloc_workqueue("ksu_scanner", WQ_UNBOUND, 1);
if (!scanner_wq) {
pr_err("failed to create scanner workqueue\n");
return -ENOMEM;
}
INIT_WORK(&scan_work, rescan_work_fn);
// Create proc entry
proc_entry = proc_create(PROC_UID_SCANNER, 0600, NULL, &uid_scanner_proc_ops);
if (!proc_entry) {
pr_err("failed to create proc entry\n");
destroy_workqueue(scanner_wq);
return -ENOMEM;
}
pr_info("throne communication initialized\n");
return 0;
}
void ksu_throne_comm_exit(void)
{
if (proc_entry) {
proc_remove(proc_entry);
proc_entry = NULL;
}
if (scanner_wq) {
destroy_workqueue(scanner_wq);
scanner_wq = NULL;
}
pr_info("throne communication cleaned up\n");
}
int ksu_uid_init(void)
{
INIT_WORK(&ksu_state_save_work, do_save_throne_state);
INIT_WORK(&ksu_state_load_work, do_load_throne_state);
return 0;
}
void ksu_uid_exit(void)
{
do_save_throne_state(NULL);
}

22
kernel/throne_comm.h Normal file
View file

@ -0,0 +1,22 @@
#ifndef __KSU_H_THRONE_COMM
#define __KSU_H_THRONE_COMM
void ksu_request_userspace_scan(void);
void ksu_handle_userspace_update(void);
int ksu_throne_comm_init(void);
void ksu_throne_comm_exit(void);
int ksu_uid_init(void);
void ksu_uid_exit(void);
bool ksu_throne_comm_load_state(void);
void ksu_throne_comm_save_state(void);
void do_load_throne_state(struct work_struct *work);
#endif

567
kernel/throne_tracker.c Normal file
View file

@ -0,0 +1,567 @@
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/version.h>
#include <linux/stat.h>
#include <linux/namei.h>
#include "allowlist.h"
#include "klog.h" // IWYU pragma: keep
#include "manager.h"
#include "throne_tracker.h"
#include "apk_sign.h"
#include "dynamic_manager.h"
#include "throne_comm.h"
uid_t ksu_manager_uid = KSU_INVALID_UID;
static uid_t locked_manager_uid = KSU_INVALID_UID;
static uid_t locked_dynamic_manager_uid = KSU_INVALID_UID;
#define KSU_UID_LIST_PATH "/data/misc/user_uid/uid_list"
#define SYSTEM_PACKAGES_LIST_PATH "/data/system/packages.list"
struct uid_data {
struct list_head list;
u32 uid;
char package[KSU_MAX_PACKAGE_NAME];
};
// Try read /data/misc/user_uid/uid_list
static int uid_from_um_list(struct list_head *uid_list)
{
struct file *fp;
char *buf = NULL;
loff_t size, pos = 0;
ssize_t nr;
int cnt = 0;
fp = filp_open(KSU_UID_LIST_PATH, O_RDONLY, 0);
if (IS_ERR(fp))
return -ENOENT;
size = fp->f_inode->i_size;
if (size <= 0) {
filp_close(fp, NULL);
return -ENODATA;
}
buf = kzalloc(size + 1, GFP_ATOMIC);
if (!buf) {
pr_err("uid_list: OOM %lld B\n", size);
filp_close(fp, NULL);
return -ENOMEM;
}
nr = kernel_read(fp, buf, size, &pos);
filp_close(fp, NULL);
if (nr != size) {
pr_err("uid_list: short read %zd/%lld\n", nr, size);
kfree(buf);
return -EIO;
}
buf[size] = '\0';
for (char *line = buf, *next; line; line = next) {
next = strchr(line, '\n');
if (next) *next++ = '\0';
while (*line == ' ' || *line == '\t' || *line == '\r') ++line;
if (!*line) continue;
char *uid_str = strsep(&line, " \t");
char *pkg = line;
if (!pkg) continue;
while (*pkg == ' ' || *pkg == '\t') ++pkg;
if (!*pkg) continue;
u32 uid;
if (kstrtou32(uid_str, 10, &uid)) {
pr_warn_once("uid_list: bad uid <%s>\n", uid_str);
continue;
}
struct uid_data *d = kzalloc(sizeof(*d), GFP_ATOMIC);
if (unlikely(!d)) {
pr_err("uid_list: OOM uid=%u\n", uid);
continue;
}
d->uid = uid;
strscpy(d->package, pkg, KSU_MAX_PACKAGE_NAME);
list_add_tail(&d->list, uid_list);
++cnt;
}
kfree(buf);
pr_info("uid_list: loaded %d entries\n", cnt);
return cnt > 0 ? 0 : -ENODATA;
}
static int get_pkg_from_apk_path(char *pkg, const char *path)
{
int len = strlen(path);
if (len >= KSU_MAX_PACKAGE_NAME || len < 1)
return -1;
const char *last_slash = NULL;
const char *second_last_slash = NULL;
int i;
for (i = len - 1; i >= 0; i--) {
if (path[i] == '/') {
if (!last_slash) {
last_slash = &path[i];
} else {
second_last_slash = &path[i];
break;
}
}
}
if (!last_slash || !second_last_slash)
return -1;
const char *last_hyphen = strchr(second_last_slash, '-');
if (!last_hyphen || last_hyphen > last_slash)
return -1;
int pkg_len = last_hyphen - second_last_slash - 1;
if (pkg_len >= KSU_MAX_PACKAGE_NAME || pkg_len <= 0)
return -1;
// Copying the package name
strncpy(pkg, second_last_slash + 1, pkg_len);
pkg[pkg_len] = '\0';
return 0;
}
static void crown_manager(const char *apk, struct list_head *uid_data, int signature_index)
{
char pkg[KSU_MAX_PACKAGE_NAME];
if (get_pkg_from_apk_path(pkg, apk) < 0) {
pr_err("Failed to get package name from apk path: %s\n", apk);
return;
}
pr_info("manager pkg: %s, signature_index: %d\n", pkg, signature_index);
#ifdef KSU_MANAGER_PACKAGE
// pkg is `/<real package>`
if (strncmp(pkg, KSU_MANAGER_PACKAGE, sizeof(KSU_MANAGER_PACKAGE))) {
pr_info("manager package is inconsistent with kernel build: %s\n",
KSU_MANAGER_PACKAGE);
return;
}
#endif
struct uid_data *np;
list_for_each_entry(np, uid_data, list) {
if (strncmp(np->package, pkg, KSU_MAX_PACKAGE_NAME) == 0) {
bool is_dynamic = (signature_index == DYNAMIC_SIGN_INDEX || signature_index >= 2);
if (is_dynamic) {
if (locked_dynamic_manager_uid != KSU_INVALID_UID && locked_dynamic_manager_uid != np->uid) {
pr_info("Unlocking previous dynamic manager UID: %d\n", locked_dynamic_manager_uid);
ksu_remove_manager(locked_dynamic_manager_uid);
locked_dynamic_manager_uid = KSU_INVALID_UID;
}
} else {
if (locked_manager_uid != KSU_INVALID_UID && locked_manager_uid != np->uid) {
pr_info("Unlocking previous manager UID: %d\n", locked_manager_uid);
ksu_invalidate_manager_uid(); // unlock old one
locked_manager_uid = KSU_INVALID_UID;
}
}
pr_info("Crowning %s manager: %s (uid=%d, signature_index=%d)\n",
is_dynamic ? "dynamic" : "traditional", pkg, np->uid, signature_index);
if (is_dynamic) {
ksu_add_manager(np->uid, signature_index);
locked_dynamic_manager_uid = np->uid;
// If there is no traditional manager, set it to the current UID
if (!ksu_is_manager_uid_valid()) {
ksu_set_manager_uid(np->uid);
locked_manager_uid = np->uid;
}
} else {
ksu_set_manager_uid(np->uid); // throne new UID
locked_manager_uid = np->uid; // store locked UID
}
break;
}
}
}
#define DATA_PATH_LEN 384 // 384 is enough for /data/app/<package>/base.apk
struct data_path {
char dirpath[DATA_PATH_LEN];
int depth;
struct list_head list;
};
struct apk_path_hash {
unsigned int hash;
bool exists;
struct list_head list;
};
static struct list_head apk_path_hash_list = LIST_HEAD_INIT(apk_path_hash_list);
struct my_dir_context {
struct dir_context ctx;
struct list_head *data_path_list;
char *parent_dir;
void *private_data;
int depth;
int *stop;
};
// https://docs.kernel.org/filesystems/porting.html
// filldir_t (readdir callbacks) calling conventions have changed. Instead of returning 0 or -E... it returns bool now. false means "no more" (as -E... used to) and true - "keep going" (as 0 in old calling conventions). Rationale: callers never looked at specific -E... values anyway. -> iterate_shared() instances require no changes at all, all filldir_t ones in the tree converted.
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)
#define FILLDIR_RETURN_TYPE bool
#define FILLDIR_ACTOR_CONTINUE true
#define FILLDIR_ACTOR_STOP false
#else
#define FILLDIR_RETURN_TYPE int
#define FILLDIR_ACTOR_CONTINUE 0
#define FILLDIR_ACTOR_STOP -EINVAL
#endif
FILLDIR_RETURN_TYPE my_actor(struct dir_context *ctx, const char *name,
int namelen, loff_t off, u64 ino,
unsigned int d_type)
{
struct my_dir_context *my_ctx =
container_of(ctx, struct my_dir_context, ctx);
char dirpath[DATA_PATH_LEN];
if (!my_ctx) {
pr_err("Invalid context\n");
return FILLDIR_ACTOR_STOP;
}
if (my_ctx->stop && *my_ctx->stop) {
pr_info("Stop searching\n");
return FILLDIR_ACTOR_STOP;
}
if (!strncmp(name, "..", namelen) || !strncmp(name, ".", namelen))
return FILLDIR_ACTOR_CONTINUE; // Skip "." and ".."
if (d_type == DT_DIR && namelen >= 8 && !strncmp(name, "vmdl", 4) &&
!strncmp(name + namelen - 4, ".tmp", 4)) {
pr_info("Skipping directory: %.*s\n", namelen, name);
return FILLDIR_ACTOR_CONTINUE; // Skip staging package
}
if (snprintf(dirpath, DATA_PATH_LEN, "%s/%.*s", my_ctx->parent_dir,
namelen, name) >= DATA_PATH_LEN) {
pr_err("Path too long: %s/%.*s\n", my_ctx->parent_dir, namelen,
name);
return FILLDIR_ACTOR_CONTINUE;
}
if (d_type == DT_DIR && my_ctx->depth > 0 &&
(my_ctx->stop && !*my_ctx->stop)) {
struct data_path *data = kzalloc(sizeof(struct data_path), GFP_ATOMIC);
if (!data) {
pr_err("Failed to allocate memory for %s\n", dirpath);
return FILLDIR_ACTOR_CONTINUE;
}
strscpy(data->dirpath, dirpath, DATA_PATH_LEN);
data->depth = my_ctx->depth - 1;
list_add_tail(&data->list, my_ctx->data_path_list);
} else {
if ((namelen == 8) && (strncmp(name, "base.apk", namelen) == 0)) {
struct apk_path_hash *pos, *n;
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)
unsigned int hash = full_name_hash(dirpath, strlen(dirpath));
#else
unsigned int hash = full_name_hash(NULL, dirpath, strlen(dirpath));
#endif
list_for_each_entry(pos, &apk_path_hash_list, list) {
if (hash == pos->hash) {
pos->exists = true;
return FILLDIR_ACTOR_CONTINUE;
}
}
int signature_index = -1;
bool is_multi_manager = is_dynamic_manager_apk(
dirpath, &signature_index);
pr_info("Found new base.apk at path: %s, is_multi_manager: %d, signature_index: %d\n",
dirpath, is_multi_manager, signature_index);
// Check for dynamic sign or multi-manager signatures
if (is_multi_manager && (signature_index == DYNAMIC_SIGN_INDEX || signature_index >= 2)) {
crown_manager(dirpath, my_ctx->private_data, signature_index);
} else if (is_manager_apk(dirpath)) {
crown_manager(dirpath, my_ctx->private_data, 0);
*my_ctx->stop = 1;
}
struct apk_path_hash *apk_data = kzalloc(sizeof(*apk_data), GFP_ATOMIC);
if (apk_data) {
apk_data->hash = hash;
apk_data->exists = true;
list_add_tail(&apk_data->list, &apk_path_hash_list);
}
if (is_manager_apk(dirpath)) {
// Manager found, clear APK cache list
list_for_each_entry_safe(pos, n, &apk_path_hash_list, list) {
list_del(&pos->list);
kfree(pos);
}
}
}
}
return FILLDIR_ACTOR_CONTINUE;
}
void search_manager(const char *path, int depth, struct list_head *uid_data)
{
int i, stop = 0;
struct list_head data_path_list;
INIT_LIST_HEAD(&data_path_list);
unsigned long data_app_magic = 0;
// Initialize APK cache list
struct apk_path_hash *pos, *n;
list_for_each_entry (pos, &apk_path_hash_list, list) {
pos->exists = false;
}
// First depth
struct data_path data;
strscpy(data.dirpath, path, DATA_PATH_LEN);
data.depth = depth;
list_add_tail(&data.list, &data_path_list);
for (i = depth; i >= 0; i--) {
struct data_path *pos, *n;
list_for_each_entry_safe (pos, n, &data_path_list, list) {
struct my_dir_context ctx = { .ctx.actor = my_actor,
.data_path_list = &data_path_list,
.parent_dir = pos->dirpath,
.private_data = uid_data,
.depth = pos->depth,
.stop = &stop };
struct file *file;
if (!stop) {
file = filp_open(pos->dirpath, O_RDONLY | O_NOFOLLOW, 0);
if (IS_ERR(file)) {
pr_err("Failed to open directory: %s, err: %ld\n",
pos->dirpath, PTR_ERR(file));
goto skip_iterate;
}
// grab magic on first folder, which is /data/app
if (!data_app_magic) {
if (file->f_inode->i_sb->s_magic) {
data_app_magic = file->f_inode->i_sb->s_magic;
pr_info("%s: dir: %s got magic! 0x%lx\n", __func__,
pos->dirpath, data_app_magic);
} else {
filp_close(file, NULL);
goto skip_iterate;
}
}
if (file->f_inode->i_sb->s_magic != data_app_magic) {
pr_info("%s: skip: %s magic: 0x%lx expected: 0x%lx\n",
__func__, pos->dirpath,
file->f_inode->i_sb->s_magic, data_app_magic);
filp_close(file, NULL);
goto skip_iterate;
}
iterate_dir(file, &ctx.ctx);
filp_close(file, NULL);
}
skip_iterate:
list_del(&pos->list);
if (pos != &data)
kfree(pos);
}
}
// Remove stale cached APK entries
list_for_each_entry_safe (pos, n, &apk_path_hash_list, list) {
if (!pos->exists) {
list_del(&pos->list);
kfree(pos);
}
}
}
static bool is_uid_exist(uid_t uid, char *package, void *data)
{
struct list_head *list = (struct list_head *)data;
struct uid_data *np;
bool exist = false;
list_for_each_entry (np, list, list) {
if (np->uid == uid % 100000 &&
strncmp(np->package, package, KSU_MAX_PACKAGE_NAME) == 0) {
exist = true;
break;
}
}
return exist;
}
void track_throne(bool prune_only)
{
struct list_head uid_list;
struct uid_data *np, *n;
struct file *fp;
char chr = 0;
loff_t pos = 0;
loff_t line_start = 0;
char buf[KSU_MAX_PACKAGE_NAME];
static bool manager_exist = false;
static bool dynamic_manager_exist = false;
int current_manager_uid = ksu_get_manager_uid() % 100000;
// init uid list head
INIT_LIST_HEAD(&uid_list);
if (ksu_uid_scanner_enabled) {
pr_info("Scanning %s directory..\n", KSU_UID_LIST_PATH);
if (uid_from_um_list(&uid_list) == 0) {
pr_info("Loaded UIDs from %s success\n", KSU_UID_LIST_PATH);
goto uid_ready;
}
pr_warn("%s read failed, fallback to %s\n",
KSU_UID_LIST_PATH, SYSTEM_PACKAGES_LIST_PATH);
}
{
fp = filp_open(SYSTEM_PACKAGES_LIST_PATH, O_RDONLY, 0);
if (IS_ERR(fp)) {
pr_err("%s: open " SYSTEM_PACKAGES_LIST_PATH " failed: %ld\n", __func__, PTR_ERR(fp));
return;
}
for (;;) {
ssize_t count =
kernel_read(fp, &chr, sizeof(chr), &pos);
if (count != sizeof(chr))
break;
if (chr != '\n')
continue;
count = kernel_read(fp, buf, sizeof(buf),
&line_start);
struct uid_data *data =
kzalloc(sizeof(struct uid_data), GFP_ATOMIC);
if (!data) {
filp_close(fp, 0);
goto out;
}
char *tmp = buf;
const char *delim = " ";
char *package = strsep(&tmp, delim);
char *uid = strsep(&tmp, delim);
if (!uid || !package) {
pr_err("update_uid: package or uid is NULL!\n");
break;
}
u32 res;
if (kstrtou32(uid, 10, &res)) {
pr_err("update_uid: uid parse err\n");
break;
}
data->uid = res;
strncpy(data->package, package, KSU_MAX_PACKAGE_NAME);
list_add_tail(&data->list, &uid_list);
// reset line start
line_start = pos;
}
filp_close(fp, 0);
}
uid_ready:
if (prune_only)
goto prune;
// first, check if manager_uid exist!
list_for_each_entry(np, &uid_list, list) {
if (np->uid == current_manager_uid) {
manager_exist = true;
break;
}
}
if (!manager_exist && locked_manager_uid != KSU_INVALID_UID) {
pr_info("Manager APK removed, unlock previous UID: %d\n",
locked_manager_uid);
ksu_invalidate_manager_uid();
locked_manager_uid = KSU_INVALID_UID;
}
// Check if the Dynamic Manager exists (only check locked UIDs)
if (ksu_is_dynamic_manager_enabled() &&
locked_dynamic_manager_uid != KSU_INVALID_UID) {
list_for_each_entry(np, &uid_list, list) {
if (np->uid == locked_dynamic_manager_uid) {
dynamic_manager_exist = true;
break;
}
}
if (!dynamic_manager_exist) {
pr_info("Dynamic manager APK removed, unlock previous UID: %d\n",
locked_dynamic_manager_uid);
ksu_remove_manager(locked_dynamic_manager_uid);
locked_dynamic_manager_uid = KSU_INVALID_UID;
}
}
bool need_search = !manager_exist;
if (ksu_is_dynamic_manager_enabled() && !dynamic_manager_exist)
need_search = true;
if (need_search) {
pr_info("Searching for manager(s)...\n");
search_manager("/data/app", 2, &uid_list);
pr_info("Manager search finished\n");
}
prune:
// then prune the allowlist
ksu_prune_allowlist(is_uid_exist, &uid_list);
out:
// free uid_list
list_for_each_entry_safe(np, n, &uid_list, list) {
list_del(&np->list);
kfree(np);
}
}
void ksu_throne_tracker_init(void)
{
// nothing to do
}
void ksu_throne_tracker_exit(void)
{
// nothing to do
}

10
kernel/throne_tracker.h Normal file
View file

@ -0,0 +1,10 @@
#ifndef __KSU_H_UID_OBSERVER
#define __KSU_H_UID_OBSERVER
void ksu_throne_tracker_init();
void ksu_throne_tracker_exit();
void track_throne(bool prune_only);
#endif

242
kernel/umount_manager.c Normal file
View file

@ -0,0 +1,242 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/namei.h>
#include <linux/path.h>
#include <linux/mount.h>
#include <linux/cred.h>
#include "klog.h"
#include "kernel_umount.h"
#include "umount_manager.h"
static struct umount_manager g_umount_mgr = {
.entry_count = 0,
.max_entries = 64,
};
static void try_umount_path(struct umount_entry *entry)
{
try_umount(entry->path, entry->flags);
}
static struct umount_entry *find_entry_locked(const char *path)
{
struct umount_entry *entry;
list_for_each_entry(entry, &g_umount_mgr.entry_list, list) {
if (strcmp(entry->path, path) == 0) {
return entry;
}
}
return NULL;
}
int ksu_umount_manager_init(void)
{
INIT_LIST_HEAD(&g_umount_mgr.entry_list);
spin_lock_init(&g_umount_mgr.lock);
return 0;
}
void ksu_umount_manager_exit(void)
{
struct umount_entry *entry, *tmp;
unsigned long flags;
spin_lock_irqsave(&g_umount_mgr.lock, flags);
list_for_each_entry_safe(entry, tmp, &g_umount_mgr.entry_list, list) {
list_del(&entry->list);
kfree(entry);
g_umount_mgr.entry_count--;
}
spin_unlock_irqrestore(&g_umount_mgr.lock, flags);
pr_info("Umount manager cleaned up\n");
}
int ksu_umount_manager_add(const char *path, int flags, bool is_default)
{
struct umount_entry *entry;
unsigned long irqflags;
int ret = 0;
if (flags == -1)
flags = MNT_DETACH;
if (!path || strlen(path) == 0 || strlen(path) >= 256) {
return -EINVAL;
}
spin_lock_irqsave(&g_umount_mgr.lock, irqflags);
if (g_umount_mgr.entry_count >= g_umount_mgr.max_entries) {
pr_err("Umount manager: max entries reached\n");
ret = -ENOMEM;
goto out;
}
if (find_entry_locked(path)) {
pr_warn("Umount manager: path already exists: %s\n", path);
ret = -EEXIST;
goto out;
}
entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry) {
ret = -ENOMEM;
goto out;
}
strncpy(entry->path, path, sizeof(entry->path) - 1);
entry->flags = flags;
entry->state = UMOUNT_STATE_IDLE;
entry->is_default = is_default;
entry->ref_count = 0;
list_add_tail(&entry->list, &g_umount_mgr.entry_list);
g_umount_mgr.entry_count++;
pr_info("Umount manager: added %s entry: %s\n",
is_default ? "default" : "custom", path);
out:
spin_unlock_irqrestore(&g_umount_mgr.lock, irqflags);
return ret;
}
int ksu_umount_manager_remove(const char *path)
{
struct umount_entry *entry;
unsigned long flags;
int ret = 0;
if (!path) {
return -EINVAL;
}
spin_lock_irqsave(&g_umount_mgr.lock, flags);
entry = find_entry_locked(path);
if (!entry) {
ret = -ENOENT;
goto out;
}
if (entry->is_default) {
pr_err("Umount manager: cannot remove default entry: %s\n", path);
ret = -EPERM;
goto out;
}
if (entry->state == UMOUNT_STATE_BUSY || entry->ref_count > 0) {
pr_err("Umount manager: entry is busy: %s\n", path);
ret = -EBUSY;
goto out;
}
list_del(&entry->list);
g_umount_mgr.entry_count--;
kfree(entry);
pr_info("Umount manager: removed entry: %s\n", path);
out:
spin_unlock_irqrestore(&g_umount_mgr.lock, flags);
return ret;
}
void ksu_umount_manager_execute_all(const struct cred *cred)
{
struct umount_entry *entry;
unsigned long flags;
spin_lock_irqsave(&g_umount_mgr.lock, flags);
list_for_each_entry(entry, &g_umount_mgr.entry_list, list) {
if (entry->state == UMOUNT_STATE_IDLE) {
entry->ref_count++;
}
}
spin_unlock_irqrestore(&g_umount_mgr.lock, flags);
list_for_each_entry(entry, &g_umount_mgr.entry_list, list) {
if (entry->ref_count > 0 && entry->state == UMOUNT_STATE_IDLE) {
try_umount_path(entry);
}
}
spin_lock_irqsave(&g_umount_mgr.lock, flags);
list_for_each_entry(entry, &g_umount_mgr.entry_list, list) {
if (entry->ref_count > 0) {
entry->ref_count--;
}
}
spin_unlock_irqrestore(&g_umount_mgr.lock, flags);
}
int ksu_umount_manager_get_entries(struct ksu_umount_entry_info __user *entries, u32 *count)
{
struct umount_entry *entry;
struct ksu_umount_entry_info info;
unsigned long flags;
u32 idx = 0;
u32 max_count = *count;
spin_lock_irqsave(&g_umount_mgr.lock, flags);
list_for_each_entry(entry, &g_umount_mgr.entry_list, list) {
if (idx >= max_count) {
break;
}
memset(&info, 0, sizeof(info));
strncpy(info.path, entry->path, sizeof(info.path) - 1);
info.flags = entry->flags;
info.is_default = entry->is_default;
info.state = entry->state;
info.ref_count = entry->ref_count;
if (copy_to_user(&entries[idx], &info, sizeof(info))) {
spin_unlock_irqrestore(&g_umount_mgr.lock, flags);
return -EFAULT;
}
idx++;
}
*count = idx;
spin_unlock_irqrestore(&g_umount_mgr.lock, flags);
return 0;
}
int ksu_umount_manager_clear_custom(void)
{
struct umount_entry *entry, *tmp;
unsigned long flags;
u32 cleared = 0;
spin_lock_irqsave(&g_umount_mgr.lock, flags);
list_for_each_entry_safe(entry, tmp, &g_umount_mgr.entry_list, list) {
if (!entry->is_default && entry->state == UMOUNT_STATE_IDLE && entry->ref_count == 0) {
list_del(&entry->list);
kfree(entry);
g_umount_mgr.entry_count--;
cleared++;
}
}
spin_unlock_irqrestore(&g_umount_mgr.lock, flags);
pr_info("Umount manager: cleared %u custom entries\n", cleared);
return 0;
}

63
kernel/umount_manager.h Normal file
View file

@ -0,0 +1,63 @@
#ifndef __KSU_H_UMOUNT_MANAGER
#define __KSU_H_UMOUNT_MANAGER
#include <linux/types.h>
#include <linux/list.h>
#include <linux/spinlock.h>
struct cred;
enum umount_entry_state {
UMOUNT_STATE_IDLE = 0,
UMOUNT_STATE_ACTIVE = 1,
UMOUNT_STATE_BUSY = 2,
};
struct umount_entry {
struct list_head list;
char path[256];
int flags;
enum umount_entry_state state;
bool is_default;
u32 ref_count;
};
struct umount_manager {
struct list_head entry_list;
spinlock_t lock;
u32 entry_count;
u32 max_entries;
};
enum umount_manager_op {
UMOUNT_OP_ADD = 0,
UMOUNT_OP_REMOVE = 1,
UMOUNT_OP_LIST = 2,
UMOUNT_OP_CLEAR_CUSTOM = 3,
};
struct ksu_umount_manager_cmd {
__u32 operation;
char path[256];
__s32 flags;
__u32 count;
__aligned_u64 entries_ptr;
};
struct ksu_umount_entry_info {
char path[256];
__s32 flags;
__u8 is_default;
__u32 state;
__u32 ref_count;
};
int ksu_umount_manager_init(void);
void ksu_umount_manager_exit(void);
int ksu_umount_manager_add(const char *path, int flags, bool is_default);
int ksu_umount_manager_remove(const char *path);
void ksu_umount_manager_execute_all(const struct cred *cred);
int ksu_umount_manager_get_entries(struct ksu_umount_entry_info __user *entries, u32 *count);
int ksu_umount_manager_clear_custom(void);
#endif // __KSU_H_UMOUNT_MANAGER