Repository: matsuzaka-yuki/FolkPatch Branch: main Commit: f62ce3b9a7ce Files: 309 Total size: 3.7 MB Directory structure: gitextract_v7dadxbv/ ├── .gitattributes ├── .github/ │ ├── ISSUE_TEMPLATE/ │ │ ├── bug_report.yml │ │ ├── config.yml │ │ └── feature_request.yml │ ├── actions/ │ │ └── setup-build-env/ │ │ └── action.yml │ ├── dependabot.yml │ └── workflows/ │ ├── CI_up.yml │ └── build.yml ├── .gitignore ├── Build-Debug.bat ├── Build-Release.bat ├── LICENSE ├── README.md ├── README_EN.md ├── README_JA.md ├── apd/ │ ├── .gitignore │ ├── Cargo.toml │ ├── build.rs │ └── src/ │ ├── apd.rs │ ├── assets.rs │ ├── banner │ ├── cli.rs │ ├── defs.rs │ ├── event.rs │ ├── install_jq.sh │ ├── installer.sh │ ├── installer_bind.sh │ ├── lua.rs │ ├── magic_mount.rs │ ├── main.rs │ ├── metamodule.rs │ ├── module.rs │ ├── module_config.rs │ ├── package.rs │ ├── pty.rs │ ├── resetprop.rs │ ├── restorecon.rs │ ├── sepolicy.rs │ ├── supercall.rs │ └── utils.rs ├── app/ │ ├── .gitignore │ ├── build.gradle.kts │ ├── libs/ │ │ └── arm64-v8a/ │ │ ├── .gitignore │ │ ├── libkpatch.so.version │ │ └── libkptools.so.version │ ├── proguard-rules.pro │ └── src/ │ └── main/ │ ├── AndroidManifest.xml │ ├── aidl/ │ │ └── me/ │ │ └── bmax/ │ │ └── apatch/ │ │ └── IAPRootService.aidl │ ├── assets/ │ │ ├── .gitignore │ │ ├── InstallAP.sh │ │ ├── UninstallAP.sh │ │ ├── boot_extract.sh │ │ ├── boot_flash.sh │ │ ├── boot_patch.sh │ │ ├── boot_unpatch.sh │ │ ├── jq/ │ │ │ └── jq │ │ ├── kpimg.version │ │ └── util_functions.sh │ ├── cpp/ │ │ ├── CMakeLists.txt │ │ ├── apjni.cpp │ │ ├── apjni.hpp │ │ ├── jni_helper.hpp │ │ ├── security.cpp │ │ ├── security.hpp │ │ ├── supercall.h │ │ ├── type_traits.hpp │ │ ├── uapi/ │ │ │ └── scdefs.h │ │ └── version │ ├── java/ │ │ └── me/ │ │ └── bmax/ │ │ └── apatch/ │ │ ├── APatchApp.kt │ │ ├── Natives.kt │ │ ├── data/ │ │ │ └── ScriptInfo.kt │ │ ├── services/ │ │ │ └── RootServices.java │ │ ├── ui/ │ │ │ ├── CrashHandleActivity.kt │ │ │ ├── MainActivity.kt │ │ │ ├── WebUIActivity.kt │ │ │ ├── component/ │ │ │ │ ├── Dialog.kt │ │ │ │ ├── DropdownMenu.kt │ │ │ │ ├── ExpressiveCard.kt │ │ │ │ ├── ExpressiveSwitch.kt │ │ │ │ ├── FilePicker.kt │ │ │ │ ├── KeyEventBlocker.kt │ │ │ │ ├── KpmAutoLoadConfig.kt │ │ │ │ ├── LoadingIndicator.kt │ │ │ │ ├── ModuleCardComponents.kt │ │ │ │ ├── SearchBar.kt │ │ │ │ ├── SectionHeader.kt │ │ │ │ ├── SegmentedControl.kt │ │ │ │ ├── SettingsItem.kt │ │ │ │ ├── SplicedColumnGroup.kt │ │ │ │ ├── SplicedLazyColumn.kt │ │ │ │ ├── ThemeColorPicker.kt │ │ │ │ ├── ThemeModeSelector.kt │ │ │ │ ├── ToggleSettingCard.kt │ │ │ │ ├── TwoColumnGrid.kt │ │ │ │ ├── UmountConfig.kt │ │ │ │ ├── UpdateDialog.kt │ │ │ │ ├── WarningCard.kt │ │ │ │ └── chart/ │ │ │ │ ├── ChartUtils.kt │ │ │ │ ├── ModulePieChart.kt │ │ │ │ ├── StorageColumnChart.kt │ │ │ │ ├── SystemAreaChart.kt │ │ │ │ └── SystemLineChart.kt │ │ │ ├── model/ │ │ │ │ └── ApiMarketplaceItem.kt │ │ │ ├── screen/ │ │ │ │ ├── APM.kt │ │ │ │ ├── AboutScreen.kt │ │ │ │ ├── ApiMarketplace.kt │ │ │ │ ├── ApmBulkInstallScreen.kt │ │ │ │ ├── AppProfile.kt │ │ │ │ ├── BannerApiService.kt │ │ │ │ ├── BottomBarDestination.kt │ │ │ │ ├── ExecuteAPMAction.kt │ │ │ │ ├── Home.kt │ │ │ │ ├── HomeCircle.kt │ │ │ │ ├── HomeStats.kt │ │ │ │ ├── HomeV2.kt │ │ │ │ ├── HomeV3.kt │ │ │ │ ├── HomeV4.kt │ │ │ │ ├── Install.kt │ │ │ │ ├── InstallModeSelect.kt │ │ │ │ ├── KPM.kt │ │ │ │ ├── KpmAutoLoadConfigScreen.kt │ │ │ │ ├── MyThemesScreen.kt │ │ │ │ ├── OnlineKPMScreen.kt │ │ │ │ ├── OnlineModuleScreen.kt │ │ │ │ ├── OnlineScriptScreen.kt │ │ │ │ ├── Patches.kt │ │ │ │ ├── ScriptExecutionLogScreen.kt │ │ │ │ ├── ScriptLibrary.kt │ │ │ │ ├── Settings.kt │ │ │ │ ├── SuAuditLogScreen.kt │ │ │ │ ├── SuperUser.kt │ │ │ │ ├── ThemeStore.kt │ │ │ │ └── settings/ │ │ │ │ ├── AppearanceSettings.kt │ │ │ │ ├── AppearanceSettingsScreen.kt │ │ │ │ ├── BackupSettings.kt │ │ │ │ ├── BackupSettingsScreen.kt │ │ │ │ ├── BehaviorSettings.kt │ │ │ │ ├── BehaviorSettingsScreen.kt │ │ │ │ ├── FunctionSettings.kt │ │ │ │ ├── FunctionSettingsScreen.kt │ │ │ │ ├── GeneralSettings.kt │ │ │ │ ├── GeneralSettingsScreen.kt │ │ │ │ ├── LanguagePickerScreen.kt │ │ │ │ ├── ModuleSettings.kt │ │ │ │ ├── ModuleSettingsScreen.kt │ │ │ │ ├── MultimediaSettings.kt │ │ │ │ ├── MultimediaSettingsScreen.kt │ │ │ │ ├── SecuritySettings.kt │ │ │ │ ├── SecuritySettingsScreen.kt │ │ │ │ └── SettingsShared.kt │ │ │ ├── theme/ │ │ │ │ ├── AmberTheme.kt │ │ │ │ ├── BackgroundConfig.kt │ │ │ │ ├── BackgroundLayer.kt │ │ │ │ ├── BackupConfig.kt │ │ │ │ ├── BlueGreyTheme.kt │ │ │ │ ├── BlueTheme.kt │ │ │ │ ├── BrownTheme.kt │ │ │ │ ├── CardManage.kt │ │ │ │ ├── CyanTheme.kt │ │ │ │ ├── DeepOrangeTheme.kt │ │ │ │ ├── DeepPurpleTheme.kt │ │ │ │ ├── FontConfig.kt │ │ │ │ ├── GreenTheme.kt │ │ │ │ ├── IndigoTheme.kt │ │ │ │ ├── InkWashTheme.kt │ │ │ │ ├── LightBlueTheme.kt │ │ │ │ ├── LightGreenTheme.kt │ │ │ │ ├── LimeTheme.kt │ │ │ │ ├── MusicConfig.kt │ │ │ │ ├── OrangeTheme.kt │ │ │ │ ├── PinkTheme.kt │ │ │ │ ├── PurpleTheme.kt │ │ │ │ ├── RedTheme.kt │ │ │ │ ├── SakuraTheme.kt │ │ │ │ ├── SoundEffectConfig.kt │ │ │ │ ├── TealTheme.kt │ │ │ │ ├── Theme.kt │ │ │ │ ├── ThemeManager.kt │ │ │ │ ├── Type.kt │ │ │ │ ├── VibrationConfig.kt │ │ │ │ └── YellowTheme.kt │ │ │ ├── viewmodel/ │ │ │ │ ├── APModuleViewModel.kt │ │ │ │ ├── ApiMarketplaceViewModel.kt │ │ │ │ ├── DashboardViewModel.kt │ │ │ │ ├── KPModel.kt │ │ │ │ ├── KPModuleViewModel.kt │ │ │ │ ├── OnlineKPMViewModel.kt │ │ │ │ ├── OnlineModuleViewModel.kt │ │ │ │ ├── OnlineScriptViewModel.kt │ │ │ │ ├── PatchesViewModel.kt │ │ │ │ ├── ScriptLibraryViewModel.kt │ │ │ │ ├── SuperUserViewModel.kt │ │ │ │ └── ThemeStoreViewModel.kt │ │ │ └── webui/ │ │ │ ├── AppIconUtil.kt │ │ │ ├── MimeUtil.java │ │ │ ├── MonetColorsProvider.kt │ │ │ ├── SuFilePathHandler.java │ │ │ └── WebViewInterface.kt │ │ └── util/ │ │ ├── APatchCli.kt │ │ ├── APatchKeyHelper.java │ │ ├── AppData.kt │ │ ├── BackupLogManager.kt │ │ ├── BiometricUtils.kt │ │ ├── BulkInstallManager.kt │ │ ├── ComposePrefs.kt │ │ ├── DPIUtils.kt │ │ ├── DeviceInfoUtils.kt │ │ ├── Downloader.kt │ │ ├── FolkApiClient.kt │ │ ├── HanziToPinyin.java │ │ ├── HardwareMonitor.kt │ │ ├── IOStreamUtils.kt │ │ ├── LauncherIconUtils.kt │ │ ├── LogEvent.kt │ │ ├── ModuleBackupUtils.kt │ │ ├── ModuleShortcut.kt │ │ ├── MusicManager.kt │ │ ├── PermissionUtils.kt │ │ ├── PkgConfig.kt │ │ ├── SafeFileProvider.kt │ │ ├── SafeUriResolver.kt │ │ ├── ScriptLibraryManager.kt │ │ ├── SoundEffectManager.kt │ │ ├── SuAuditLog.kt │ │ ├── ThemeDownloader.kt │ │ ├── UpdateChecker.kt │ │ ├── Version.kt │ │ ├── VibrationManager.kt │ │ ├── WebDavUtils.kt │ │ └── ui/ │ │ ├── APDialogBlurBehindUtils.kt │ │ ├── AnsiUtils.kt │ │ ├── CompositionProvider.kt │ │ ├── GlassEffectHelper.kt │ │ ├── HomeBottomSpacer.kt │ │ ├── HyperlinkText.kt │ │ ├── NavigationBarsSpacer.kt │ │ └── ToastExt.kt │ └── res/ │ ├── drawable/ │ │ ├── device_mobile_down.xml │ │ ├── github.xml │ │ ├── ic_clear_background.xml │ │ ├── ic_custom_background.xml │ │ ├── ic_launcher_foreground_alt.xml │ │ ├── ic_launcher_monochrome.xml │ │ ├── ic_launcher_monochrome_alt.xml │ │ ├── info_circle_filled.xml │ │ ├── package_import.xml │ │ ├── play_circle.xml │ │ ├── settings.xml │ │ ├── telegram.xml │ │ ├── trash.xml │ │ └── webui.xml │ ├── mipmap-anydpi-v26/ │ │ ├── ic_launcher.xml │ │ ├── ic_launcher_alt.xml │ │ ├── ic_launcher_alt_round.xml │ │ └── ic_launcher_round.xml │ ├── resources.properties │ ├── values/ │ │ ├── arrays.xml │ │ ├── colors.xml │ │ ├── ic_launcher_background.xml │ │ ├── strings.xml │ │ └── themes.xml │ ├── values-ar/ │ │ └── strings.xml │ ├── values-es/ │ │ └── strings.xml │ ├── values-in/ │ │ └── strings.xml │ ├── values-ja/ │ │ └── strings.xml │ ├── values-ko/ │ │ └── strings.xml │ ├── values-mgl/ │ │ └── strings.xml │ ├── values-night/ │ │ └── themes.xml │ ├── values-pl/ │ │ └── strings.xml │ ├── values-pt-rBR/ │ │ └── strings.xml │ ├── values-ru/ │ │ └── strings.xml │ ├── values-tr-rTR/ │ │ └── strings.xml │ ├── values-vi/ │ │ └── strings.xml │ ├── values-zh-rAG/ │ │ └── strings.xml │ ├── values-zh-rAT/ │ │ └── strings.xml │ ├── values-zh-rCK/ │ │ └── strings.xml │ ├── values-zh-rCN/ │ │ └── strings.xml │ ├── values-zh-rMC/ │ │ └── strings.xml │ ├── values-zh-rTW/ │ │ └── strings.xml │ ├── values-zh-rWC/ │ │ └── strings.xml │ └── xml/ │ ├── backup_rules.xml │ ├── data_extraction_rules.xml │ ├── file_paths.xml │ └── network_security_config.xml ├── auth.properties.template ├── build.gradle.kts ├── fastlane/ │ └── metadata/ │ └── android/ │ ├── en-US/ │ │ ├── full_description.txt │ │ └── short_description.txt │ └── pl-PL/ │ ├── full_description.txt │ └── short_description.txt ├── fpd/ │ ├── Cargo.toml │ └── src/ │ ├── main.rs │ ├── prop_patch.rs │ └── umount.rs ├── gradle/ │ ├── libs.versions.toml │ └── wrapper/ │ ├── gradle-wrapper.jar │ └── gradle-wrapper.properties ├── gradle.properties ├── gradlew ├── gradlew.bat ├── keystore.properties.template ├── local.properties.template ├── scripts/ │ ├── Build-Debug.sh │ ├── Build-Release.sh │ ├── init-wsl.sh │ ├── setup-wsl.sh │ ├── update_binary.sh │ └── update_script.sh └── settings.gradle.kts ================================================ FILE CONTENTS ================================================ ================================================ FILE: .gitattributes ================================================ # Set the default behavior, in case people don't have core.autocrlf set. * text eol=lf # Explicitly declare text files you want to always be normalized and converted # to native line endings on checkout. # *.c text # *.h text # Declare files that will always have CRLF line endings on checkout. *.cmd text eol=crlf *.bat text eol=crlf # Denote all files that are truly binary and should not be modified. tools/** binary *.jar binary *.exe binary *.apk binary *.png binary *.jpg binary *.ttf binary *.so binary *.wav binary # Help GitHub detect languages native/jni/external/** linguist-vendored native/jni/systemproperties/** linguist-language=C++ ================================================ FILE: .github/ISSUE_TEMPLATE/bug_report.yml ================================================ name: Bug report | 反馈 Bug description: Report bugs or unexpected behavior | 报告错误或未预料的行为 labels: [bug] body: - type: markdown attributes: value: | Thanks for reporting issues of APatch! To better assist you, please provide the following information. To avoid duplicate issues, please use English in the title. 感谢给 FolkPatch 汇报问题! 为了使我们更好地帮助你,请提供以下信息。 为了防止重复汇报,标题请务必使用英文。 - type: checkboxes attributes: label: Please check before submitting an issue | 在提交 Issue 前请检查 options: - label: I searched the issues and didn't found anything relevant | 我已经搜索了 Issues 列表,没有发现于本问题相关内容 required: true - label: If the patch fails or the image cannot be booted after flashing the new boot.img, visit KernelPatch to clarify your doubts | 修复失败或刷入修补后镜像不能启动,请前往 KernelPatch 提问 required: true - label: I will upload the bug report file in APatch Manager > Settings > Send logs | 我会上传 Bug Report 文件从 APatch 管理器 > 设置 > 发送日志 required: true - label: I know how to reproduce the issue, which might not be specific to my device | 我知道如何重新复现这个问题 required: false - type: checkboxes id: latest attributes: label: Version requirements | 版本要求 options: - label: I'm using the latest CI version of APatch Manager | 我正在使用最新 CI 版本 required: true - type: textarea attributes: label: Bug description | 描述 Bug description: | Please enter a clear and concise description of the bug. 对 Bug 的清晰简洁的描述。 validations: required: true - type: textarea attributes: label: Reproduce method | 复现方法 description: | Steps to reproduce the bug. 复现的步骤。 placeholder: | - 1. Go to... - 2. Click on... - 3. Scroll down to... - 4. See error validations: required: true - type: textarea attributes: label: Expected behavior | 预期行为 description: | Please enter a clear and concise description of what you expected to happen. 对你期望发生的行为进行清晰简洁的描述。 validations: required: true - type: textarea attributes: label: Actual behavior | 实际行为 description: | Tell us what actually happened. 告诉我们实际发生了什么。 validations: required: true - type: textarea attributes: label: Screenshots | 截图 description: | If possible, add screenshots to help explain your issue. 如果可以的话,添加截图可以帮你解释问题。 - type: textarea attributes: label: Logs | 日志 description: | If possible, add the crash log to help us find your issue. 如果可以的话,添加崩溃日志可以帮助我们找到问题。 - type: input attributes: label: Device name | 设备名称 validations: required: true - type: input attributes: label: OS version | 系统版本 validations: required: true - type: input attributes: label: APatch version | FolkPatch 版本 validations: required: true - type: input attributes: label: Kernel version | 内核版本 validations: required: true - type: input attributes: label: KernelPatch version | KernelPatch 版本 validations: required: true - type: textarea attributes: label: Other information | 其他信息 description: | Add any information about the issue. 添加关于问题的任何信息。 placeholder: | Upload logs in .zip format by clicking the bottom bar. Uploading logs to other websites or using external links isn't allowed 点击文本框底栏上传日志压缩包,禁止上传到其它网站或使用外链提供日志 validations: required: true ================================================ FILE: .github/ISSUE_TEMPLATE/config.yml ================================================ blank_issues_enabled: false contact_links: - name: Ask a question | 提问 url: https://github.com/bmax121/APatch/discussions/new?category=Q-A about: If you've any questions, ask them here | 如果有任何疑问请在这里提问 - name: Official Telegram channel | 官方 Telegram 频道 url: https://t.me/FolkPatch about: Subscribe to receive releases and announcements | 可以订阅通知和发行版 ================================================ FILE: .github/ISSUE_TEMPLATE/feature_request.yml ================================================ --- name: Feature request | 新特性请求 description: Suggest an idea for this project | 提出建议 labels: [enhancement] body: - type: textarea attributes: label: Is your request related to a specific issue? | 你的请求是否与某个问题相关? description: | Please enter a clear and concise description of the issue. 请清晰准确表述该问题。 validations: required: true - type: textarea attributes: label: Describe the solution you'd like | 描述你想要的解决方案 description: | Please enter a clear and concise description of what you'd like. 请清晰准确描述新特性的预期行为。 validations: required: true - type: textarea attributes: label: Describe the alternatives you've considered | 描述您考虑过的备选方案 description: | Please enter a clear and concise description of any alternative solutions or features you've considered. 对您考虑过的任何替代解决方案或功能的清晰简洁的描述。 validations: required: true - type: textarea attributes: label: Other information | 其他信息 description: | Add any information or screenshots about the feature request. 其他关于新特性的信息或者截图。 validations: required: false ================================================ FILE: .github/actions/setup-build-env/action.yml ================================================ name: Setup Build Environment description: Install all build dependencies (Java, Android SDK, Gradle, Rust toolchain) runs: using: composite steps: - uses: actions/setup-java@v4 with: distribution: temurin java-version: '21' - uses: android-actions/setup-android@v3 - uses: gradle/actions/setup-gradle@v3 - uses: actions-rs/toolchain@v1 with: toolchain: stable target: aarch64-linux-android override: true - name: Install cargo-ndk shell: bash run: cargo install cargo-ndk - name: Grant execute permission shell: bash run: chmod +x gradlew - uses: actions/cache@v4 with: path: | ~/.gradle/caches ~/.gradle/wrapper key: ${{ runner.os }}-gradle-${{ hashFiles('**/*.gradle*', '**/gradle-wrapper.properties') }} restore-keys: ${{ runner.os }}-gradle- ================================================ FILE: .github/dependabot.yml ================================================ version: 2 updates: - package-ecosystem: gradle directory: "/" schedule: interval: daily target-branch: main registries: - maven-google - gradle-plugin groups: maven-dependencies: patterns: - "*" - package-ecosystem: github-actions target-branch: main directory: / schedule: interval: daily groups: action-dependencies: patterns: - "*" - package-ecosystem: cargo target-branch: main directory: apd/ schedule: interval: daily allow: - dependency-type: "all" groups: rust-dependencies: patterns: - "*" registries: maven-google: type: maven-repository url: "https://dl.google.com/dl/android/maven2/" gradle-plugin: type: maven-repository url: "https://plugins.gradle.org/m2/" ================================================ FILE: .github/workflows/CI_up.yml ================================================ name: CI Upload to Telegram on: workflow_call: inputs: artifact_names: required: true type: string description: 'Comma-separated artifact names to download' message: required: false type: string default: 'FolkPatch Build' description: 'Telegram message caption' jobs: upload: name: Upload to Telegram runs-on: ubuntu-latest steps: - name: Check Telegram config id: tg_check run: | if [ -n "${{ secrets.TELEGRAM_BOT_TOKEN }}" ] && [ -n "${{ secrets.TELEGRAM_CHAT_ID }}" ]; then echo "enabled=true" >> "$GITHUB_OUTPUT" else echo "::notice::Telegram secrets not configured, skipping upload" fi - name: Checkout repository if: steps.tg_check.outputs.enabled == 'true' uses: actions/checkout@v5 - name: Download artifacts if: steps.tg_check.outputs.enabled == 'true' uses: actions/download-artifact@v4 with: pattern: folkpatch-* path: artifacts merge-multiple: true - name: Send to Telegram if: steps.tg_check.outputs.enabled == 'true' run: | FILES=$(find artifacts -type f 2>/dev/null | sort) FILE_COUNT=$(echo "$FILES" | grep -c . || true) if [ "$FILE_COUNT" -eq 0 ]; then echo "::error::No files found in artifacts" exit 1 fi CAPTION="${{ inputs.message }} Branch: ${{ github.ref_name }} Commit: ${{ github.sha }} Run: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" if [ "$FILE_COUNT" -eq 1 ]; then curl -sf -X POST "https://api.telegram.org/bot${{ secrets.TELEGRAM_BOT_TOKEN }}/sendDocument" \ -F "chat_id=${{ secrets.TELEGRAM_CHAT_ID }}" \ -F "document=@$FILES" \ -F "caption=${CAPTION}" else FILE_ARR=() while IFS= read -r f; do FILE_ARR+=("$f") done <<< "$FILES" TOTAL=${#FILE_ARR[@]} MEDIA_JSON="[" for i in "${!FILE_ARR[@]}"; do FNAME=$(basename "${FILE_ARR[$i]}") if [ $i -gt 0 ]; then MEDIA_JSON+="," fi if [ $((i + 1)) -eq $TOTAL ]; then MEDIA_JSON+="{\"type\":\"document\",\"media\":\"attach://$FNAME\",\"caption\":\"${CAPTION}\"}" else MEDIA_JSON+="{\"type\":\"document\",\"media\":\"attach://$FNAME\"}" fi done MEDIA_JSON+="]" ARGS=(-X POST "https://api.telegram.org/bot${{ secrets.TELEGRAM_BOT_TOKEN }}/sendMediaGroup") ARGS+=(-F "chat_id=${{ secrets.TELEGRAM_CHAT_ID }}") ARGS+=(-F "media=${MEDIA_JSON}") while IFS= read -r f; do FNAME=$(basename "$f") ARGS+=(-F "$FNAME=@$f") done <<< "$FILES" curl -sf "${ARGS[@]}" fi ================================================ FILE: .github/workflows/build.yml ================================================ name: Build FolkPatch on: push: branches: [ main, develop ] paths: [ 'app/**', 'fpd/**', '.github/workflows/build.yml' ] pull_request: branches: [ main, develop ] paths: [ 'app/**', 'fpd/**', '.github/workflows/build.yml' ] workflow_dispatch: inputs: build_type: description: 'Build type' required: true default: 'both' type: choice options: [ both, debug, release ] env: KEYSTORE_FILE: debug.keystore KEYSTORE_PASSWORD: android KEY_ALIAS: androiddebugkey KEY_PASSWORD: android jobs: prepare: runs-on: ubuntu-latest outputs: build_debug: ${{ steps.set.outputs.debug }} build_release: ${{ steps.set.outputs.release }} commit_msg: ${{ steps.msg.outputs.text }} commit_author: ${{ steps.msg.outputs.author }} artifact_list: ${{ steps.set.outputs.artifact_list }} steps: - id: set run: | TYPE="${{ inputs.build_type }}" [ "${{ github.event_name }}" != "workflow_dispatch" ] && TYPE="both" DEBUG="false"; RELEASE="false"; LIST="" [ "$TYPE" = "both" -o "$TYPE" = "debug" ] && { DEBUG="true"; LIST="folkpatch-debug-${{ github.sha }}"; } [ "$TYPE" = "both" -o "$TYPE" = "release" ] && { RELEASE="true" [ -n "$LIST" ] && LIST="$LIST," || true LIST="${LIST}folkpatch-release-${{ github.sha }}" } echo "debug=$DEBUG" >> "$GITHUB_OUTPUT" echo "release=$RELEASE" >> "$GITHUB_OUTPUT" echo "artifact_list=$LIST" >> "$GITHUB_OUTPUT" - id: msg run: | case "${{ github.event_name }}" in push) TEXT="${{ github.event.head_commit.message }}" AUTHOR="${{ github.event.head_commit.author.name }}" ;; pull_request) TEXT="PR: ${{ github.event.pull_request.title }}" AUTHOR="${{ github.event.pull_request.user.login }}" ;; *) TEXT="FolkPatch Build" AUTHOR="${{ github.actor }}" ;; esac TEXT="${TEXT%%$'\n'*}" echo "text=$TEXT" >> "$GITHUB_OUTPUT" echo "author=$AUTHOR" >> "$GITHUB_OUTPUT" build_debug: name: Build Debug needs: prepare if: needs.prepare.outputs.build_debug == 'true' runs-on: ubuntu-latest steps: - uses: actions/checkout@v5 with: submodules: recursive - uses: ./.github/actions/setup-build-env - run: ./gradlew assembleDebug --no-daemon - name: Rename APK run: | SHORT="${GITHUB_SHA::7}" APK=$(find app/build/outputs/apk/debug -name "*.apk" -type f | head -1) [ -n "$APK" ] && mv "$APK" "FolkPatch-Debug-${SHORT}.apk" - uses: actions/upload-artifact@v4 with: name: folkpatch-debug-${{ github.sha }} path: FolkPatch-Debug-*.apk retention-days: 30 build_release: name: Build Release needs: prepare if: needs.prepare.outputs.build_release == 'true' runs-on: ubuntu-latest steps: - uses: actions/checkout@v5 with: submodules: recursive - uses: ./.github/actions/setup-build-env - name: Decode keystore run: | echo "${{ secrets.KEYSTORE_BASE64 }}" | base64 -d > FolkPatch.jks - name: Create keystore.properties run: | cat > keystore.properties << 'EOF' KEYSTORE_FILE=../FolkPatch.jks KEYSTORE_PASSWORD=${{ secrets.KEYSTORE_PASSWORD }} KEY_ALIAS=${{ secrets.KEY_ALIAS }} KEY_PASSWORD=${{ secrets.KEY_PASSWORD }} EOF - run: ./gradlew assembleRelease --no-daemon - name: Debug APK location run: find app/build/outputs -name "*.apk" -type f || echo "No APK found" - name: Rename APK run: | SHORT="${GITHUB_SHA::7}" APK=$(find app/build/outputs/apk/release -name "*.apk" -type f | head -1) [ -n "$APK" ] && mv "$APK" "FolkPatch-Release-${SHORT}.apk" - uses: actions/upload-artifact@v4 with: name: folkpatch-release-${{ github.sha }} path: FolkPatch-Release-*.apk retention-days: 30 upload: name: Upload to Telegram needs: [prepare, build_debug, build_release] if: always() && needs.prepare.result == 'success' && (needs.build_debug.result == 'success' || needs.build_release.result == 'success') && github.actor != 'dependabot[bot]' runs-on: ubuntu-latest steps: - name: Check Telegram config id: tg_check run: | if [ -n "${{ secrets.TELEGRAM_BOT_TOKEN }}" ] && [ -n "${{ secrets.TELEGRAM_CHAT_ID }}" ]; then echo "enabled=true" >> "$GITHUB_OUTPUT" else echo "::notice::Telegram secrets not configured, skipping upload" fi - name: Download artifacts if: steps.tg_check.outputs.enabled == 'true' uses: actions/download-artifact@v4 with: pattern: folkpatch-* path: artifacts merge-multiple: true - name: Send to Telegram if: steps.tg_check.outputs.enabled == 'true' run: | FILES=$(find artifacts -type f 2>/dev/null | sort) FILE_COUNT=$(echo "$FILES" | grep -c . || true) if [ "$FILE_COUNT" -eq 0 ]; then echo "::error::No files found in artifacts" exit 1 fi CAPTION="${{ needs.prepare.outputs.commit_msg }}\nBranch: ${{ github.ref_name }}\nCommit: ${{ github.sha }}\nAuthor: ${{ needs.prepare.outputs.commit_author }}\nRun: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" if [ "$FILE_COUNT" -eq 1 ]; then curl -sf -X POST "https://api.telegram.org/bot${{ secrets.TELEGRAM_BOT_TOKEN }}/sendDocument" \ -F "chat_id=${{ secrets.TELEGRAM_CHAT_ID }}" \ -F "document=@$FILES" \ -F "caption=${CAPTION}" else FILE_ARR=() while IFS= read -r f; do FILE_ARR+=("$f") done <<< "$FILES" TOTAL=${#FILE_ARR[@]} MEDIA_JSON="[" for i in "${!FILE_ARR[@]}"; do FNAME=$(basename "${FILE_ARR[$i]}") if [ $i -gt 0 ]; then MEDIA_JSON+="," fi if [ $((i + 1)) -eq $TOTAL ]; then MEDIA_JSON+="{\"type\":\"document\",\"media\":\"attach://$FNAME\",\"caption\":\"${CAPTION}\"}" else MEDIA_JSON+="{\"type\":\"document\",\"media\":\"attach://$FNAME\"}" fi done MEDIA_JSON+="]" ARGS=(-X POST "https://api.telegram.org/bot${{ secrets.TELEGRAM_BOT_TOKEN }}/sendMediaGroup") ARGS+=(-F "chat_id=${{ secrets.TELEGRAM_CHAT_ID }}") ARGS+=(-F "media=${MEDIA_JSON}") while IFS= read -r f; do FNAME=$(basename "$f") ARGS+=(-F "$FNAME=@$f") done <<< "$FILES" curl -sf "${ARGS[@]}" fi ================================================ FILE: .gitignore ================================================ *.iml .gradle .idea .DS_Store build captures .cxx *.keystore *.jks keystore.properties local.properties auth.properties # FolkPatch specific signing files app/folkpatch-release*.jks keystore.properties .vscode .kotlin app/src/main/resources/ private .claude/ .opencode # Built native binaries app/src/main/assets/Service/ fpd/target/ fpd/.cargo/ # demo project demo KernelPatch ================================================ FILE: Build-Debug.bat ================================================ @echo off chcp 65001 > nul 2>&1 setlocal enabledelayedexpansion echo [1/4] Entering apd directory... cd /d apd if errorlevel 1 ( echo Error: Failed to enter apd directory, please check if the directory exists! pause exit /b 1 ) echo [2/4] Executing cargo clean... cargo clean if errorlevel 1 ( echo Error: cargo clean execution failed! pause exit /b 1 ) echo [3/4] Returning to parent directory... cd .. if errorlevel 1 ( echo Error: Failed to return to parent directory! pause exit /b 1 ) echo [4/4] Executing gradlew.bat assembleDebug... .\gradlew.bat assembleDebug if errorlevel 1 ( echo Error: gradlew.bat assembleDebug execution failed! pause exit /b 1 ) echo. echo All commands executed successfully! pause endlocal ================================================ FILE: Build-Release.bat ================================================ @echo off chcp 65001 > nul 2>&1 setlocal enabledelayedexpansion echo [1/4] Entering apd directory... cd /d apd if errorlevel 1 ( echo Error: Failed to enter apd directory, please check if the directory exists! pause exit /b 1 ) echo [2/4] Executing cargo clean... cargo clean if errorlevel 1 ( echo Error: cargo clean execution failed! pause exit /b 1 ) echo [3/4] Returning to parent directory... cd .. if errorlevel 1 ( echo Error: Failed to return to parent directory! pause exit /b 1 ) echo [4/4] Executing gradlew.bat assembleRelease... .\gradlew.bat assembleRelease if errorlevel 1 ( echo Error: gradlew.bat assembleDebug execution failed! pause exit /b 1 ) echo. echo All commands executed successfully! pause endlocal ================================================ FILE: LICENSE ================================================ GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU General Public License is a free, copyleft license for software and other kinds of works. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Use with the GNU Affero General Public License. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. If the program does terminal interaction, make it output a short notice like this when it starts in an interactive mode: Copyright (C) This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, your program's commands might be different; for a GUI interface, you would use an "about box". You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU GPL, see . The GNU General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read . ================================================ FILE: README.md ================================================
FolkPatch logo [![Latest Release](https://img.shields.io/github/v/release/matsuzaka-yuki/FolkPatch?label=Release&logo=github)](https://github.com/LyraVoid/FolkPatch/releases/latest) [![Channel](https://img.shields.io/badge/Follow-Telegram-blue.svg?logo=telegram)](https://t.me/FolkPatch) [![GitHub License](https://img.shields.io/github/license/matsuzaka-yuki/FolkPatch?logo=gnu)](/LICENSE)
🌏 **README 语言:** [**English**](./README_EN.md) / [**中文**](./README.md) / [**日本語**](./README_JA.md) FolkPatch - 专注界面优化与功能扩展的Root管理工具 通过我们的综合文档快速开始。无论是安装使用、模块管理,还是自定义设置,文档涵盖了您成功使用FolkPatch所需的所有内容。 [📚 阅读完整文档](https://fp.mysqil.com/) →
--- ## ✨ 介绍 ### 🎨 核心功能 - [x] 基于 KernelPatch 的 Root 实现 - [x] 无需重新编译内核即可 Hook 内核函数 ### 📱 前置要求 - **必须:** 基于 ARM64 架构且 Linux 内核版本 3.18 至 6.15 的 Android 设备 ### 🎨 管理器的界面与设计 - [x] 全新的 UI 与交互体验优化 - [x] 个性化壁纸支持 - [x] 国际化支持 - [x] 动画性能与交互流畅度优化 - [x] 界面视觉细节与动态效果提升 - [x] 支持手动关闭自动更新检查,将版本升级的主导权交还给用户 ### 📦 模块相关 - [x] APM: 类 Magisk 模块系统 , 支持批量刷入与全量备份 - [x] KPM: 内核模块系统(支持 inline-hook 与 syscall-table-hook) , 支持自动加载 - [x] 通过商店可以下载热门的 APM 或 KPM ### ⚡ 技术特性 - [x] 基于 [KernelPatch](https://github.com/bmax121/KernelPatch/) ## 🚀 下载安装 ### 📦 使用指导 1. **下载安装:** 从 [发布页面](https://github.com/LyraVoid/FolkPatch/releases/latest) 下载最新版安装包 2. **安装应用:** 安装最新版安装包到你的 Android 设备 3. **开始使用:** 阅读 https://fp.mysqil.com/ ## 🙏 开源致谢 本项目基于以下开源项目: - [KernelPatch](https://github.com/bmax121/KernelPatch/) - 核心组件 - [Magisk](https://github.com/topjohnwu/Magisk) - magiskpolicy - [KernelSU](https://github.com/tiann/KernelSU) - 应用UI和类似Magisk的模块支持 - [Sukisu-Ultra](https://github.com/SukiSU-Ultra/SukiSU-Ultra) - 参考一些界面的设计 - [APatch](https://github.com/bmax121/APatch) - 上游分支 ## 📄 许可证 - FolkPatch 遵循 [GNU General Public License v3 (GPL-3)](http://www.gnu.org/copyleft/gpl.html) 许可证开源 , 作为二改者或分发者 , 您需遵守以下标准: - 若您修改了代码或在项目中集成了 FolkPatch 并向第三方分发 , 您的整个项目必须同样采用 GPLv3 协议开源 - 分发二进制文件时 , 必须主动提供或承诺提供完整且可读的源代码 - 严禁对软件授权本身收取许可费 , 您可以针对分发、技术支持或定制开发收费 - 分发行为即代表您授予所有用户使用该项目涉及的您的相关专利 - 本软件“按原样”提供 , 不含任何担保 , 原作者不对因使用本软件造成的任何损失负责 - 任何违反上述条款的行为将导致您的 GPLv3 授权自动终止 , 届时 , 您将失去分发 FolkPatch 的合法权利 , 原作者保留依法追究著作权侵权责任(包括但不限于申请停止侵权禁令、经济赔偿及下架违规项目)的权利 ## 💬 社区交流 ### FolkPatch讨论交流 - Telegram 频道: [@FolkPatch](https://t.me/FolkPatch) ================================================ FILE: README_EN.md ================================================
FolkPatch logo [![Latest Release](https://img.shields.io/github/v/release/matsuzaka-yuki/FolkPatch?label=Release&logo=github)](https://github.com/LyraVoid/FolkPatch/releases/latest) [![Channel](https://img.shields.io/badge/Follow-Telegram-blue.svg?logo=telegram)](https://t.me/FolkPatch) [![GitHub License](https://img.shields.io/github/license/matsuzaka-yuki/FolkPatch?logo=gnu)](/LICENSE)
🌏 **README Language:** [**English**](./README_EN.md) / [**中文**](./README.md) / [**日本語**](./README_JA.md) FolkPatch - A Root management tool focused on interface optimization and feature extension Get started quickly with our comprehensive documentation. Whether it's installation, module management, or custom settings, the documentation covers everything you need to successfully use FolkPatch. [📚 Read Full Documentation](https://fp.mysqil.com/) →
--- ## ✨ Introduction ### 🎨 Core Features - [x] Root implementation based on KernelPatch - [x] Hook kernel functions without recompiling the kernel ### 📱 Prerequisites - **Required:** ARM64 architecture Android device with Linux kernel version 3.18 to 6.15 ### 🎨 Manager Interface & Design - [x] Brand new UI and interaction experience optimization - [x] Personalized wallpaper support - [x] Internationalization support - [x] Animation performance and interaction fluency optimization - [x] Interface visual details and dynamic effects enhancement - [x] Support for manually disabling automatic update checks, giving users control over version upgrades ### 📦 Module Related - [x] APM: Magisk-like module system, supports batch flashing and full backup - [x] KPM: Kernel module system (supports inline-hook and syscall-table-hook), supports automatic loading - [x] Download popular APM or KPM through the store ### ⚡ Technical Features - [x] Based on [KernelPatch](https://github.com/bmax121/KernelPatch/) ## 🚀 Download & Install ### 📦 Installation Guide 1. **Download & Install:** Download the latest installation package from the [Releases page](https://github.com/LyraVoid/FolkPatch/releases/latest) 2. **Install App:** Install the latest installation package to your Android device 3. **Get Started:** Read https://fp.mysqil.com/ ## 🙏 Open Source Credits This project is based on the following open source projects: - [KernelPatch](https://github.com/bmax121/KernelPatch/) - Core component - [Magisk](https://github.com/topjohnwu/Magisk) - magiskpolicy - [KernelSU](https://github.com/tiann/KernelSU) - App UI and Magisk-like module support - [Sukisu-Ultra](https://github.com/SukiSU-Ultra/SukiSU-Ultra) - Referenced some interface designs - [APatch](https://github.com/bmax121/APatch) - Upstream branch ## 📄 License - FolkPatch is open sourced under the [GNU General Public License v3 (GPL-3)](http://www.gnu.org/copyleft/gpl.html) license. As a modifier or distributor, you must comply with the following standards: - If you modify the code or integrate FolkPatch into your project and distribute it to a third party, your entire project must also be open sourced under the GPLv3 license - When distributing binary files, you must actively provide or promise to provide complete and readable source code - Strictly prohibit charging licensing fees for the software license itself. You may charge for distribution, technical support, or customized development - Distribution implies that you grant all users the relevant patents involved in the use of the project - This software is provided "as is", without any warranty. The original author is not responsible for any losses caused by using this software - Any violation of the above terms will automatically terminate your GPLv3 license. At that time, you will lose the legal right to distribute FolkPatch. The original author reserves the right to pursue copyright infringement liability (including but not limited to applying for injunctions to stop infringement, economic compensation, and removing infringing projects) ## 💬 Community & Discussion ### FolkPatch Discussion & Communication - Telegram Channel: [@FolkPatch](https://t.me/FolkPatch) ================================================ FILE: README_JA.md ================================================
FolkPatch logo [![Latest Release](https://img.shields.io/github/v/release/matsuzaka-yuki/FolkPatch?label=Release&logo=github)](https://github.com/LyraVoid/FolkPatch/releases/latest) [![Channel](https://img.shields.io/badge/Follow-Telegram-blue.svg?logo=telegram)](https://t.me/FolkPatch) [![GitHub License](https://img.shields.io/github/license/matsuzaka-yuki/FolkPatch?logo=gnu)](/LICENSE)
🌏 **README の言語:** [**English**](./README_EN.md) / [**中文**](./README.md) / [**日本語**](./README_JA.md) FolkPatch - インターフェースの最適化と拡張機能に重視した Root 管理ツール 包括的なドキュメントですぐに開始しましょう。インストール、モジュールの管理、カスタム設定など FolkPatch を快適に使用するための情報はドキュメントに網羅しています。 [📚 完全なドキュメントを読む](https://fp.mysqil.com/) →
--- ## ✨ 紹介 ### 🎨 コア機能 - [x] KernelPatch ベースの Root 実装 - [x] カーネルの再コンパイルなしでカーネル関数をフック可能 ### 📱 前提条件 - **必須:** ARM64 アーキテクチャベースで Linux カーネルバージョン 3.18 から 6.15 の Android デバイス ### 🎨 マネージャーのインターフェースとデザイン - [x] 全く新しい UI とインタラクションエクスペリエンスの最適化 - [x] パーソナライズされた壁紙サポート - [x] 国際化サポート - [x] アニメーションパフォーマンスとインタラクションの滑らかさの最適化 - [x] インターフェースの視覚的詳細と動的効果の向上 - [x] 自動更新チェックの手動無効化をサポートし、バージョンアップグレードの主導権をユーザーに返還 ### 📦 モジュール関連 - [x] APM: Magisk ライクなモジュールシステム、一括フラッシュとフルバックアップをサポート - [x] KPM: カーネルモジュールシステム(inline-hook と syscall-table-hook をサポート)、自動ロードをサポート - [x] ストアから人気のある APM または KPM をダウンロード可能 ### ⚡ 技術的特徴 - [x] [KernelPatch](https://github.com/bmax121/KernelPatch/) に基づいています ## 🚀 ダウンロードとインストール ### 📦 インストールガイド 1. **ダウンロードとインストール:** [リリースページ](https://github.com/LyraVoid/FolkPatch/releases/latest)から最新のインストールパッケージをダウンロード 2. **アプリをインストール:** 最新のインストールパッケージをあなたの Android デバイスにインストール 3. **使用開始:** https://fp.mysqil.com/ を読んでください ## 🙏 オープンソースクレジット このプロジェクトは以下のオープンソースプロジェクトに基づいています: - [KernelPatch](https://github.com/bmax121/KernelPatch/) - コアコンポーネント - [Magisk](https://github.com/topjohnwu/Magisk) - magiskpolicy - [KernelSU](https://github.com/tiann/KernelSU) - アプリ UI と Magisk ライクなモジュールサポート - [Sukisu-Ultra](https://github.com/SukiSU-Ultra/SukiSU-Ultra) - 一部のインターフェースデザインを参照 - [APatch](https://github.com/bmax121/APatch) - 上流ブランチ ## 📄 ライセンス - FolkPatch は [GNU General Public License v3 (GPL-3)](http://www.gnu.org/copyleft/gpl.html) ライセンスの下でオープンソースされています。変更者または配布者として、以下の基準を遵守する必要があります: - コードを変更した場合、またはプロジェクトに FolkPatch を統合して第三者に配布する場合、プロジェクト全体も GPLv3 ライセンスの下でオープンソースする必要があります - バイナリファイルを配布する場合、完全かつ読み取り可能なソースコードを積極的に提供するか、提供することを約束する必要があります - ソフトウェアライセンス自体に対するライセンス料の徴収を厳禁します。配布、技術サポート、カスタム開発に対して料金を請求できます - 配布行為は、プロジェクトに関連するすべてのユーザーにあなたの関連特許の使用権を付与することを意味します - 本ソフトウェアは「現状のまま」提供され、いかなる保証もありません。原作者は本ソフトウェアの使用による損失について責任を負いません - 上記の条項に違反すると GPLv3 ライセンスは自動的に終了します。その際、FolkPatch を配布する正当な権利を失い、原作者は著作権侵害の責任を追求する権利(侵害停止命令の申請、経済的賠償、違反プロジェクトの削除を含むがこれらに限定されない)を留保します ## 💬 コミュニティとディスカッション ### FolkPatch ディスカッションとコミュニケーション - Telegram チャンネル: [@FolkPatch](https://t.me/FolkPatch) ================================================ FILE: apd/.gitignore ================================================ /target .cargo/ ================================================ FILE: apd/Cargo.toml ================================================ [package] name = "apd" version = "0.1.0" edition = "2024" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] mlua = { version = "0.11.5", features = ["lua54","vendored"] } anyhow = "1" csv = "1.3.1" clap = { version = "4", features = ["derive"] } const_format = "0.2" zip = { version = "7.2.0",features = [ "deflate", "deflate64", "time", "lzma", "xz", ], default-features = false } zip-extensions = { git = "https://github.com/AndroidPatch/zip-extensions-rs.git", branch = "master", features = [ "deflate", "lzma", "xz", ], default-features = false } java-properties = { git = "https://github.com/AndroidPatch/java-properties.git", branch = "master", default-features = false } log = "0.4" env_logger = "0.11" serde = { version = "1", features = ["derive"] } serde_json = "1" encoding_rs = "0.8" walkdir="2.4" retry = "2" libc = "0.2" extattr = "1" jwalk = "0.8" is_executable = "1" nom = "8" derive-new = "0.7.0" which = "8" getopts = "0.2" errno = "0.3.14" notify = "8.2" signal-hook = "0.4" regex-lite = "0.1.9" [target.'cfg(any(target_os = "android", target_os = "linux"))'.dependencies] rustix = { version = "1", features = ["all-apis"] } # some android specific dependencies which compiles under unix are also listed here for convenience of coding android-properties = { version = "0.2.2", features = ["bionic-deprecated"] } procfs = "0.18" loopdev = { git = "https://github.com/AndroidPatch/loopdev" } prop-rs-android = { git = "https://github.com/Kernel-SU/ksu_props" } policy = {git = "https://github.com/AndroidPatch/ap_policy"} [target.'cfg(target_os = "android")'.dependencies] android_logger = { version = "0.15", default-features = false } [profile.release] strip = true overflow-checks = false rpath = false opt-level = 3 codegen-units = 1 panic = "abort" lto = "fat" ================================================ FILE: apd/build.rs ================================================ use std::{env, fs::File, io::Write, path::Path, process::Command}; fn get_git_version() -> Result<(u32, String), std::io::Error> { // Try to get version code from environment variable first let version_code: u32 = if let Ok(env_version_code) = env::var("APATCH_VERSION_CODE") { env_version_code.parse().map_err(|_| { std::io::Error::new(std::io::ErrorKind::Other, "Failed to parse {version_code}") })? } else { // Fallback to git-based calculation let output = Command::new("git") .args(["rev-list", "--count", "HEAD"]) .output()?; let output = output.stdout; let git_count = String::from_utf8(output).expect("Failed to read git count stdout"); let git_count: u32 = git_count.trim().parse().map_err(|_| { std::io::Error::new(std::io::ErrorKind::Other, "Failed to parse git count") })?; std::cmp::max(11000 + 200 + git_count, 10762) // For historical reasons and ensure minimum version }; let version_name = if let Ok(env_version_name) = env::var("APATCH_VERSION_NAME") { env_version_name } else { "113005-Matsuzaka-yuki".to_string() }; Ok((version_code, version_name)) } fn main() { // update VersionCode when git repository change println!("cargo:rerun-if-changed=../.git/HEAD"); println!("cargo:rerun-if-changed=../.git/refs/"); let (code, name) = match get_git_version() { Ok((code, name)) => (code, name), Err(_) => { // show warning if git is not installed println!("cargo:warning=Failed to get git version, using 0.0.0"); (0, "0.0.0".to_string()) } }; let out_dir = env::var("OUT_DIR").expect("Failed to get $OUT_DIR"); println!("out_dir: ${out_dir}"); println!("code: ${code}"); let out_dir = Path::new(&out_dir); File::create(Path::new(out_dir).join("VERSION_CODE")) .expect("Failed to create VERSION_CODE") .write_all(code.to_string().as_bytes()) .expect("Failed to write VERSION_CODE"); File::create(Path::new(out_dir).join("VERSION_NAME")) .expect("Failed to create VERSION_NAME") .write_all(name.trim().as_bytes()) .expect("Failed to write VERSION_NAME"); } ================================================ FILE: apd/src/apd.rs ================================================ #[cfg(unix)] use std::os::unix::process::CommandExt; use std::{env, ffi::CStr, path::PathBuf, process::Command}; use anyhow::{Ok, Result}; #[cfg(unix)] use getopts::Options; use rustix::thread::{Gid, Uid, set_thread_res_gid, set_thread_res_uid}; #[cfg(any(target_os = "linux", target_os = "android"))] use crate::pty::prepare_pty; use crate::{ defs, utils::{self, umask}, }; fn print_usage(opts: Options) { let brief = "APatch\n\nUsage: [options] [-] [user [argument...]]".to_string(); print!("{}", opts.usage(&brief)); } fn set_identity(uid: u32, gid: u32) { #[cfg(any(target_os = "linux", target_os = "android"))] let gid = Gid::from_raw(gid); let uid = Uid::from_raw(uid); set_thread_res_gid(gid, gid, gid).ok(); set_thread_res_uid(uid, uid, uid).ok(); } #[cfg(not(unix))] pub fn root_shell() -> Result<()> { unimplemented!() } #[cfg(unix)] pub fn root_shell() -> Result<()> { // we are root now, this was set in kernel! let env_args: Vec = env::args().collect(); let args = env_args .iter() .position(|arg| arg == "-c") .map(|i| { let rest = env_args[i + 1..].to_vec(); let mut new_args = env_args[..i].to_vec(); new_args.push("-c".to_string()); if !rest.is_empty() { new_args.push(rest.join(" ")); } new_args }) .unwrap_or_else(|| env_args.clone()); let mut opts = Options::new(); opts.optopt( "c", "command", "pass COMMAND to the invoked shell", "COMMAND", ); opts.optflag("h", "help", "display this help message and exit"); opts.optflag("l", "login", "pretend the shell to be a login shell"); opts.optflag( "p", "preserve-environment", "preserve the entire environment", ); opts.optopt( "s", "shell", "use SHELL instead of the default /system/bin/sh", "SHELL", ); opts.optflag("v", "version", "display version number and exit"); opts.optflag("V", "", "display version code and exit"); opts.optflag( "M", "mount-master", "force run in the global mount namespace", ); opts.optflag("", "no-pty", "Do not allocate a new pseudo terminal."); // Replace -cn with -z, -mm with -M for supporting getopt_long let args = args .into_iter() .map(|e| { if e == "-mm" { "-M".to_string() } else if e == "-cn" { "-z".to_string() } else { e } }) .collect::>(); let matches = match opts.parse(&args[1..]) { Result::Ok(m) => m, Err(f) => { println!("{f}"); print_usage(opts); std::process::exit(-1); } }; if matches.opt_present("h") { print_usage(opts); return Ok(()); } if matches.opt_present("v") { println!("{}:APatch(FolkPatch)", defs::VERSION_NAME); return Ok(()); } if matches.opt_present("V") { println!("{}", defs::VERSION_CODE); return Ok(()); } let shell = matches.opt_str("s").unwrap_or("/system/bin/sh".to_string()); let mut is_login = matches.opt_present("l"); let preserve_env = matches.opt_present("p"); let mount_master = matches.opt_present("M"); // we've made sure that -c is the last option and it already contains the whole command, no need to construct it again let args = matches .opt_str("c") .map(|cmd| vec!["-c".to_string(), cmd]) .unwrap_or_default(); let mut free_idx = 0; if !matches.free.is_empty() && matches.free[free_idx] == "-" { is_login = true; free_idx += 1; } // use current uid if no user specified, these has been done in kernel! let mut uid = unsafe { libc::getuid() }; let gid = unsafe { libc::getgid() }; if free_idx < matches.free.len() { let name = &matches.free[free_idx]; uid = { #[cfg(target_arch = "aarch64")] let pw = unsafe { libc::getpwnam(name.as_ptr()).as_ref() }; #[cfg(target_arch = "x86_64")] let pw = unsafe { libc::getpwnam(name.as_ptr() as *const i8).as_ref() }; match pw { Some(pw) => pw.pw_uid, None => name.parse::().unwrap_or(0), } } } // https://github.com/topjohnwu/Magisk/blob/master/native/src/core/su/su_daemon.cpp#L408 let arg0 = if is_login { "-" } else { &shell }; let mut command = &mut Command::new(&shell); if !preserve_env { // This is actually incorrect, I don't know why. // command = command.env_clear(); let pw = unsafe { libc::getpwuid(uid).as_ref() }; if let Some(pw) = pw { let home = unsafe { CStr::from_ptr(pw.pw_dir) }; let pw_name = unsafe { CStr::from_ptr(pw.pw_name) }; let home = home.to_string_lossy(); let pw_name = pw_name.to_string_lossy(); command = command .env("HOME", home.as_ref() as &str) .env("USER", pw_name.as_ref() as &str) .env("LOGNAME", pw_name.as_ref() as &str) .env("SHELL", &shell); } } // add /data/adb/ap/bin to PATH #[cfg(any(target_os = "linux", target_os = "android"))] add_path_to_env(defs::BINARY_DIR)?; // when AP_RC_PATH exists and ENV is not set, set ENV to AP_RC_PATH if PathBuf::from(defs::AP_RC_PATH).exists() && env::var("ENV").is_err() { command = command.env("ENV", defs::AP_RC_PATH); } #[cfg(target_os = "android")] if !matches.opt_present("no-pty") { if let Err(e) = prepare_pty() { log::error!("failed to prepare pty: {:?}", e); } } // escape from the current cgroup and become session leader // WARNING!!! This cause some root shell hang forever! // command = command.process_group(0); command = unsafe { command.pre_exec(move || { umask(0o22); utils::switch_cgroups(); // switch to global mount namespace #[cfg(any(target_os = "linux", target_os = "android"))] let global_namespace_enable = std::fs::read_to_string(defs::GLOBAL_NAMESPACE_FILE).unwrap_or("0".to_string()); if global_namespace_enable.trim() == "1" || mount_master { let _ = utils::switch_mnt_ns(1); } set_identity(uid, gid); Result::Ok(()) }) }; command = command.args(args).arg0(arg0); Err(command.exec().into()) } fn add_path_to_env(path: &str) -> Result<()> { let mut paths = env::var_os("PATH").map_or(Vec::new(), |val| env::split_paths(&val).collect::>()); let new_path = PathBuf::from(path.trim_end_matches('/')); paths.push(new_path); let new_path_env = env::join_paths(paths)?; unsafe { env::set_var("PATH", new_path_env) }; Ok(()) } ================================================ FILE: apd/src/assets.rs ================================================ use anyhow::Result; use const_format::concatcp; use crate::{defs::BINARY_DIR, utils}; pub const RESETPROP_PATH: &str = concatcp!(BINARY_DIR, "resetprop"); pub const BUSYBOX_PATH: &str = concatcp!(BINARY_DIR, "busybox"); pub const MAGISKPOLICY_PATH: &str = concatcp!(BINARY_DIR, "magiskpolicy"); pub fn ensure_binaries() -> Result<()> { utils::ensure_binary(BUSYBOX_PATH)?; let resetprop_link = RESETPROP_PATH; let _ = std::fs::remove_file(resetprop_link); std::os::unix::fs::symlink("/data/adb/apd", resetprop_link)?; let magiskpolicy_link = MAGISKPOLICY_PATH; let _ = std::fs::remove_file(magiskpolicy_link); std::os::unix::fs::symlink("/data/adb/apd", magiskpolicy_link)?; Ok(()) } ================================================ FILE: apd/src/banner ================================================ ___ ___ __ ___ _ _____ ___ / __\/___\/ / /\ /\ / _ \/_\ /__ \/ __\ /\ /\ / _\ // // / / //_// /_)//_\\ / /\/ / / /_/ / / / / \_// /___/ __ \/ ___/ _ \/ / / /___/ __ / \/ \___/\____/\/ \/\/ \_/ \_/\/ \____/\/ /_/ ================================================ FILE: apd/src/cli.rs ================================================ use crate::{defs, event, lua, module, module_config, supercall, utils}; #[cfg(target_os = "android")] use android_logger::Config; use anyhow::{Context, Result}; use clap::Parser; #[cfg(target_os = "android")] use log::LevelFilter; /// APatch cli #[derive(Parser, Debug)] #[command(author, version = defs::VERSION_CODE, about, long_about = None)] struct Args { #[arg( short, long, value_name = "KEY", help = "Super key for authentication root" )] superkey: Option, #[command(subcommand)] command: Commands, } #[derive(clap::Subcommand, Debug)] enum Commands { /// Manage APatch modules Module { #[command(subcommand)] command: Module, }, /// Trigger `post-fs-data` event PostFsData, /// Trigger `service` event Services, /// Trigger `boot-complete` event BootCompleted, /// Start uid listener for synchronizing root list UidListener, /// Resetprop - Magisk-compatible system property tool Resetprop(crate::resetprop::Args), /// MagiskPolicy - SELinux Policy Patch Tool Sepolicy(crate::sepolicy::Args), } #[derive(clap::Subcommand, Debug)] enum Module { /// Install module Install { /// module zip file path zip: String, }, /// Uninstall module Uninstall { /// module id id: String, }, /// Undo uninstall module UndoUninstall { /// module id id: String, }, /// enable module Enable { /// module id id: String, }, /// disable module Disable { // module id id: String, }, /// run action for module Action { // module id id: String, }, /// module lua runner Lua { // module id id: String, // lua function function: String, }, /// list all modules List, /// manage module configuration Config { /// target internal module name (resolved as internal.) #[arg(long)] internal: Option, #[command(subcommand)] command: ModuleConfigCmd, }, } #[derive(clap::Subcommand, Debug)] enum ModuleConfigCmd { /// Get a config value Get { /// config key key: String, }, /// Set a config value Set { /// config key key: String, /// config value (omit to read from stdin) value: Option, /// read value from stdin (default if value not provided) #[arg(long)] stdin: bool, /// use temporary config (cleared on reboot) #[arg(short, long)] temp: bool, }, /// List all config entries List, /// Delete a config entry Delete { /// config key key: String, /// delete from temporary config #[arg(short, long)] temp: bool, }, /// Clear all config entries Clear { /// clear temporary config #[arg(short, long)] temp: bool, }, } pub fn run() -> Result<()> { #[cfg(target_os = "android")] android_logger::init_once( Config::default() .with_max_level(LevelFilter::Trace) // limit log level .with_tag("APatchD") .with_filter( android_logger::FilterBuilder::new() .filter_level(LevelFilter::Trace) .filter_module("notify", LevelFilter::Warn) .build(), ), ); #[cfg(not(target_os = "android"))] env_logger::init(); // the kernel executes su with argv[0] = "/system/bin/kp" or "/system/bin/su" or "su" or "kp" and replace it with us let arg0 = std::env::args().next().unwrap_or_default(); if arg0.ends_with("kp") || arg0.ends_with("su") { return crate::apd::root_shell(); } if arg0.ends_with("resetprop") { let all_args: Vec = std::env::args().collect(); crate::resetprop::resetprop_main(&all_args) } if arg0.ends_with("magiskpolicy") { let all_args: Vec = std::env::args().collect(); crate::sepolicy::policy_main(&all_args) } let cli = Args::parse(); log::info!("command: {:?}", cli.command); if let Some(ref _superkey) = cli.superkey { supercall::privilege_apd_profile(&cli.superkey); } let result = match cli.command { Commands::PostFsData => event::on_post_data_fs(cli.superkey), Commands::BootCompleted => event::on_boot_completed(cli.superkey), Commands::UidListener => event::start_uid_listener(), Commands::Module { command } => { #[cfg(any(target_os = "linux", target_os = "android"))] { utils::switch_mnt_ns(1)?; } match command { Module::Install { zip } => module::install_module(&zip), Module::Uninstall { id } => module::uninstall_module(&id), Module::UndoUninstall { id } => module::undo_uninstall_module(&id), Module::Action { id } => module::run_action(&id), Module::Lua { id, function } => { lua::run_lua(&id, &function, false, true).map_err(|e| anyhow::anyhow!("{}", e)) } Module::Enable { id } => module::enable_module(&id), Module::Disable { id } => module::disable_module(&id), Module::List => module::list_modules(), Module::Config { internal, command } => { let module_id = match internal { Some(internal_name) => format!("internal.{internal_name}"), None => std::env::var("AP_MODULE").map_err(|_| { anyhow::anyhow!( "This command must be run in the context of a module or passed --internal " ) })?, }; match command { ModuleConfigCmd::Get { key } => { // Use merge_configs to respect priority (temp overrides persist) let config = module_config::merge_configs(&module_id)?; match config.get(&key) { Some(value) => { println!("{value}"); Ok(()) } None => anyhow::bail!("Key '{key}' not found"), } } ModuleConfigCmd::Set { key, value, stdin, temp, } => { // Validate key at CLI layer for better user experience module_config::validate_config_key(&key)?; // Read value from stdin or argument let value_str = match value { Some(v) if !stdin => v, _ => { // Read from stdin use std::io::Read; let mut buffer = String::new(); std::io::stdin() .read_to_string(&mut buffer) .context("Failed to read from stdin")?; buffer } }; // Validate value module_config::validate_config_value(&value_str)?; let config_type = if temp { module_config::ConfigType::Temp } else { module_config::ConfigType::Persist }; module_config::set_config_value( &module_id, &key, &value_str, config_type, ) } ModuleConfigCmd::List => { let config = module_config::merge_configs(&module_id)?; if config.is_empty() { println!("No config entries found"); } else { for (key, value) in config { println!("{key}={value}"); } } Ok(()) } ModuleConfigCmd::Delete { key, temp } => { let config_type = if temp { module_config::ConfigType::Temp } else { module_config::ConfigType::Persist }; module_config::delete_config_value(&module_id, &key, config_type) } ModuleConfigCmd::Clear { temp } => { let config_type = if temp { module_config::ConfigType::Temp } else { module_config::ConfigType::Persist }; module_config::clear_config(&module_id, config_type) } } } } } Commands::Services => event::on_services(cli.superkey), Commands::Resetprop(resetprop_args) => crate::resetprop::execute(&resetprop_args) .inspect_err(|e| { if e.downcast_ref::() .is_some() { std::process::exit(2); } }), Commands::Sepolicy(sepolicy_args) => crate::sepolicy::execute(&sepolicy_args), }; if let Err(e) = &result { log::error!("Error: {:?}", e); } result } ================================================ FILE: apd/src/defs.rs ================================================ use const_format::concatcp; pub const ADB_DIR: &str = "/data/adb/"; pub const WORKING_DIR: &str = concatcp!(ADB_DIR, "ap/"); pub const BINARY_DIR: &str = concatcp!(WORKING_DIR, "bin/"); pub const APATCH_LOG_FOLDER: &str = concatcp!(WORKING_DIR, "log/"); pub const AP_RC_PATH: &str = concatcp!(WORKING_DIR, ".aprc"); pub const GLOBAL_NAMESPACE_FILE: &str = concatcp!(ADB_DIR, ".global_namespace_enable"); pub const MAGIC_MOUNT_FILE: &str = concatcp!(ADB_DIR, ".magic_mount_enable"); pub const HIDE_SERVICE_FILE: &str = concatcp!(ADB_DIR, ".hide_service_enable"); pub const HIDE_BINARY_PATH: &str = concatcp!(ADB_DIR, "fp/bin/fpd"); pub const UMOUNT_SERVICE_FILE: &str = concatcp!(ADB_DIR, ".umount_service_enable"); pub const UMOUNT_BINARY_PATH: &str = concatcp!(ADB_DIR, "fp/bin/fpd"); pub const UTS_SPOOF_ENABLE_FILE: &str = concatcp!(ADB_DIR, ".uts_spoof_enable"); pub const UTS_SPOOF_CONFIG_FILE: &str = concatcp!(ADB_DIR, ".uts_spoof_config"); pub const UTS_SPOOF_BOOT_PENDING: &str = concatcp!(ADB_DIR, ".uts_spoof_boot_pending"); pub const DAEMON_PATH: &str = concatcp!(ADB_DIR, "apd"); pub const AUTO_EXCLUDE_KNOWN_PACKAGES_FILE: &str = concatcp!(WORKING_DIR, "auto_exclude_known_packages"); pub const PATHHIDE_DIR: &str = concatcp!(ADB_DIR, "fp/pathhide/"); pub const PATHHIDE_ENABLE_FILE: &str = concatcp!(ADB_DIR, "fp/pathhide/enabled"); pub const PATHHIDE_PATHS_FILE: &str = concatcp!(ADB_DIR, "fp/pathhide/paths"); pub const PATHHIDE_UIDS_FILE: &str = concatcp!(ADB_DIR, "fp/pathhide/uids"); pub const PATHHIDE_UID_MODE_FILE: &str = concatcp!(ADB_DIR, "fp/pathhide/uid_mode"); pub const PATHHIDE_FILTER_SYSTEM_FILE: &str = concatcp!(ADB_DIR, "fp/pathhide/filter_system"); pub const NETISOLATE_DIR: &str = concatcp!(ADB_DIR, "fp/netisolate/"); pub const NETISOLATE_ENABLE_FILE: &str = concatcp!(ADB_DIR, "fp/netisolate/enabled"); pub const NETISOLATE_UIDS_FILE: &str = concatcp!(ADB_DIR, "fp/netisolate/uids"); pub const MODULE_DIR: &str = concatcp!(ADB_DIR, "modules/"); pub const AP_MAGIC_MOUNT_SOURCE: &str = concatcp!(WORKING_DIR, "magic_mount"); // warning: this directory should not change, or you need to change the code in module_installer.sh!!! pub const MODULE_UPDATE_DIR: &str = concatcp!(ADB_DIR, "modules_update/"); pub const TEMP_DIR: &str = "/debug_ramdisk"; pub const TEMP_DIR_LEGACY: &str = "/sbin"; pub const MODULE_WEB_DIR: &str = "webroot"; pub const MODULE_ACTION_SH: &str = "action.sh"; pub const DISABLE_FILE_NAME: &str = "disable"; pub const SKIP_MOUNT_FILE_NAME: &str = "skip_mount"; pub const UPDATE_FILE_NAME: &str = "update"; pub const REMOVE_FILE_NAME: &str = "remove"; // Metamodule support pub const METAMODULE_MOUNT_SCRIPT: &str = "metamount.sh"; pub const METAMODULE_METAINSTALL_SCRIPT: &str = "metainstall.sh"; pub const METAMODULE_METAUNINSTALL_SCRIPT: &str = "metauninstall.sh"; pub const METAMODULE_DIR: &str = concatcp!(ADB_DIR, "metamodule/"); pub const FP_KPMS_DIR: &str = concatcp!(ADB_DIR, "fp/kpms/"); pub const FP_KPMS_AUTOLOAD_DIR: &str = concatcp!(ADB_DIR, "fp/kpms/autoload/"); pub const KPM_AUTOLOAD_CONFIG: &str = concatcp!(ADB_DIR, "fp/kpms/kpm_autoload_config.json"); // Module config pub const MODULE_CONFIG_DIR: &str = concatcp!(WORKING_DIR, "module_configs/"); pub const PERSIST_CONFIG_NAME: &str = "persist.config"; pub const TEMP_CONFIG_NAME: &str = "tmp.config"; pub const PTS_NAME: &str = "pts"; pub const VERSION_CODE: &str = include_str!(concat!(env!("OUT_DIR"), "/VERSION_CODE")); pub const VERSION_NAME: &str = include_str!(concat!(env!("OUT_DIR"), "/VERSION_NAME")); ================================================ FILE: apd/src/event.rs ================================================ use crate::sepolicy::get_policy_main; use anyhow::{Context, Result}; use libc::SIGPWR; use log::{info, warn}; use notify::{ Config, Event, EventKind, INotifyWatcher, RecursiveMode, Watcher, event::{ModifyKind, RenameMode}, }; use signal_hook::{consts::signal::*, iterator::Signals}; use std::process::Stdio; use std::{ env, ffi::CStr, fs, os::unix::{fs::PermissionsExt, process::CommandExt}, path::{Path, PathBuf}, process::Command, sync::{Arc, Mutex}, thread, time::Duration, }; use crate::{ assets, defs, lua, metamodule, module, restorecon, supercall, supercall::{init_load_su_path, refresh_ap_package_list}, utils::{self, switch_cgroups}, }; pub fn report_kernel(superkey: Option, event: &str, state: &str) -> Result<()> { let args = vec![ superkey.unwrap_or_default(), "event".to_string(), event.to_string(), state.to_string(), ]; let args_ref: Vec<&str> = args.iter().map(|s| s.as_str()).collect(); let _ = utils::run_command("truncate", &args_ref, None)?.wait()?; Ok(()) } fn setup_fp_directories() -> Result<()> { utils::ensure_dir_with_perms( Path::new("/data/adb/fp/bin"), Path::new("/data/adb/fp"), 0o755, )?; utils::ensure_dir_with_perms( Path::new(defs::FP_KPMS_AUTOLOAD_DIR), Path::new(defs::FP_KPMS_DIR), 0o755, )?; Ok(()) } fn setup_logging() -> Result<()> { let log_dir = Path::new(defs::APATCH_LOG_FOLDER); if !log_dir.exists() { fs::create_dir(log_dir).expect("Failed to create log folder"); let permissions = fs::Permissions::from_mode(0o700); fs::set_permissions(log_dir, permissions).expect("Failed to set permissions"); } let command_string = format!( "cd {}; rm -f *.last; [ -f dmesg.log ] && mv dmesg.log dmesg.last; [ -f logcat.log ] && mv logcat.log logcat.last; [ -f locat.log ] && mv locat.log logcat.last; rm -f *.log *.old.log", defs::APATCH_LOG_FOLDER ); let result = utils::run_command("sh", &["-c", &command_string], None)?.wait()?; if result.success() { info!("Successfully rotated logs."); } else { info!("Failed to rotate logs."); } let logcat_path = format!("{}logcat.log", defs::APATCH_LOG_FOLDER); let dmesg_path = format!("{}dmesg.log", defs::APATCH_LOG_FOLDER); let bootlog = fs::File::create(&dmesg_path)?; let _ = unsafe { Command::new("timeout") .process_group(0) .pre_exec(|| { switch_cgroups(); Ok(()) }) .args(vec![ "-s", "9", "45s", "logcat", "-b", "main,system,crash", "DrmLibFs:S", "-f", &logcat_path, "logcatcher-bootlog:S", "&", ]) .spawn() }; let _ = unsafe { Command::new("timeout") .process_group(0) .pre_exec(|| { switch_cgroups(); Ok(()) }) .args(["-s", "9", "120s", "dmesg", "-w"]) .stdout(Stdio::from(bootlog)) .spawn() }; Ok(()) } fn disable_all_modules_safe() { if let Err(e) = module::disable_all_modules() { warn!("disable all modules failed: {e}"); } } pub fn on_post_data_fs(superkey: Option) -> Result<()> { utils::umask(0); report_kernel(superkey.clone(), "post-fs-data", "before")?; setup_fp_directories()?; supercall::autoload_kpm_modules(&superkey, "post-fs-data"); init_load_su_path(&superkey); let mut sepol = get_policy_main(&["magiskpolicy".to_string(), "--live".to_string()])?; sepol.magisk_rules(); sepol .to_file("/sys/fs/selinux/load") .context("Cannot apply policy")?; info!("Re-privilege apd profile after injecting sepolicy"); supercall::privilege_apd_profile(&superkey); // Apply UTS namespace spoofing if configured supercall::apply_uts_spoof(&superkey); // Apply pathhide config if enabled supercall::apply_pathhide(&superkey); // Apply netisolate config if enabled supercall::apply_netisolate(&superkey); // Clear all temporary module configs early if let Err(e) = crate::module_config::clear_all_temp_configs() { warn!("clear temp configs failed: {e}"); } if utils::has_magisk() { warn!("Magisk detected, skip post-fs-data!"); report_kernel(superkey.clone(), "post-fs-data", "after")?; return Ok(()); } setup_logging()?; for key in ["KERNELPATCH_VERSION", "KERNEL_VERSION"] { match env::var(key) { Ok(value) => println!("{key}: {value}"), Err(_) => println!("{key} not found"), } } let safe_mode = utils::is_safe_mode(superkey.clone()); if safe_mode { // we should still mount modules.img to `/data/adb/modules` in safe mode // becuase we may need to operate the module dir in safe mode warn!("safe mode, skip common post-fs-data.d scripts"); disable_all_modules_safe(); } else { // Then exec common post-fs-data scripts if let Err(e) = module::exec_common_scripts("post-fs-data.d", true) { warn!("exec common post-fs-data scripts failed: {}", e); } } let module_update_dir = defs::MODULE_UPDATE_DIR; //save module place let module_dir = defs::MODULE_DIR; // run modules place let module_update_flag = Path::new(defs::WORKING_DIR).join(defs::UPDATE_FILE_NAME); // if update ,there will be renewed modules file assets::ensure_binaries().with_context(|| "binary missing")?; if Path::new(defs::MODULE_UPDATE_DIR).exists() { module::handle_updated_modules()?; fs::remove_dir_all(module_update_dir)?; } if safe_mode { warn!("safe mode, skip post-fs-data scripts and disable all modules!"); disable_all_modules_safe(); return Ok(()); } if let Err(e) = module::prune_modules() { warn!("prune modules failed: {}", e); } if let Err(e) = restorecon::restorecon() { warn!("restorecon failed: {}", e); } // load sepolicy.rule if module::load_sepolicy_rule().is_err() { warn!("load sepolicy.rule failed"); } if Path::new(defs::MAGIC_MOUNT_FILE).exists() { info!("Magic Mount mode enabled"); if let Err(e) = crate::magic_mount::magic_mount() { log::error!("Magic Mount failed: {}", e); } } else { info!("Magic Mount disabled"); if let Err(e) = metamodule::exec_mount_script(module_dir) { warn!("execute metamodule mount failed: {e}"); } } // Execute Hide Service if enabled if Path::new(defs::HIDE_SERVICE_FILE).exists() { info!("Hide Service enabled, executing fpd -hide..."); if Path::new(defs::HIDE_BINARY_PATH).exists() { let result = Command::new(defs::HIDE_BINARY_PATH).arg("-hide").status(); match result { Ok(status) => { if status.success() { info!("fpd -hide executed successfully"); } else { warn!("fpd -hide exited with status: {:?}", status.code()); } } Err(e) => { warn!("Failed to execute fpd -hide: {}", e); } } } else { warn!( "fpd binary not found at {}, please copy it manually", defs::HIDE_BINARY_PATH ); } } else { info!("Hide Service disabled"); } // exec modules post-fs-data scripts // TODO: Add timeout if let Err(e) = module::exec_stage_script("post-fs-data", true) { warn!("exec post-fs-data scripts failed: {}", e); } if let Err(e) = lua::exec_stage_lua("post-fs-data", true, superkey.as_deref().unwrap_or("")) { warn!("Failed to exec post-fs-data lua: {}", e); } // load system.prop if let Err(e) = module::load_system_prop() { warn!("load system.prop failed: {}", e); } info!("remove update flag"); let _ = fs::remove_file(module_update_flag); run_stage("post-mount", superkey.clone(), true); report_kernel(superkey, "post-fs-data", "after")?; env::set_current_dir("/").with_context(|| "failed to chdir to /")?; Ok(()) } fn run_stage(stage: &str, superkey: Option, block: bool) { utils::umask(0); if utils::has_magisk() { warn!("Magisk detected, skip {stage}"); return; } if utils::is_safe_mode(superkey.clone()) { warn!("safe mode, skip {stage} scripts"); disable_all_modules_safe(); return; } // execute metamodule stage script first (priority) if let Err(e) = metamodule::exec_stage_script(stage, block) { warn!("Failed to exec metamodule {stage} script: {e}"); } if let Err(e) = module::exec_common_scripts(&format!("{stage}.d"), block) { warn!("Failed to exec common {stage} scripts: {e}"); } if let Err(e) = module::exec_stage_script(stage, block) { warn!("Failed to exec {stage} scripts: {e}"); } if let Err(e) = lua::exec_stage_lua(stage, block, superkey.as_deref().unwrap_or("")) { warn!("Failed to exec {stage} lua: {e}"); } } pub fn on_services(superkey: Option) -> Result<()> { info!("on_services triggered!"); supercall::autoload_kpm_modules(&superkey, "service"); run_stage("service", superkey, false); Ok(()) } fn run_uid_monitor() { info!("Trigger run_uid_monitor!"); let mut command = &mut Command::new("/data/adb/apd"); { command = command.process_group(0); command = unsafe { command.pre_exec(|| { // ignore the error? switch_cgroups(); Ok(()) }) }; } command = command.arg("uid-listener"); command .spawn() .map(|_| ()) .expect("[run_uid_monitor] Failed to run uid monitor"); } pub fn on_boot_completed(superkey: Option) -> Result<()> { info!("on_boot_completed triggered!"); // Clear UTS spoof boot safety flag — boot completed successfully if Path::new(defs::UTS_SPOOF_BOOT_PENDING).exists() { let _ = std::fs::remove_file(defs::UTS_SPOOF_BOOT_PENDING); info!("UTS spoof boot safety flag cleared"); } run_stage("boot-completed", superkey, false); // Execute Umount Service if enabled // Run at boot-completed (latest possible stage) to ensure all mount // points — including those created by system_server and Zygote hooks // (e.g. ReZygisk module.prop bind mounts) — are fully established. if Path::new(defs::UMOUNT_SERVICE_FILE).exists() { info!("Umount Service enabled, executing fpd -umount..."); if Path::new(defs::UMOUNT_BINARY_PATH).exists() { let result = unsafe { Command::new(defs::UMOUNT_BINARY_PATH) .arg("-umount") .stdout(Stdio::piped()) .stderr(Stdio::piped()) .pre_exec(|| { let _ = utils::switch_mnt_ns(1); Ok(()) }) .output() }; match result { Ok(output) => { let stdout = String::from_utf8_lossy(&output.stdout); let stderr = String::from_utf8_lossy(&output.stderr); if output.status.success() { info!("fpd -umount executed successfully"); } else { warn!("fpd -umount exited with status: {:?}", output.status.code()); } if !stdout.trim().is_empty() { info!("fpd -umount stdout: {}", stdout.trim()); } if !stderr.trim().is_empty() { info!("fpd -umount stderr: {}", stderr.trim()); } } Err(e) => { warn!("Failed to execute fpd -umount: {}", e); } } } else { warn!( "fpd binary not found at {}, please copy it manually", defs::UMOUNT_BINARY_PATH ); } } else { info!("Umount Service disabled"); } run_uid_monitor(); Ok(()) } pub fn start_uid_listener() -> Result<()> { info!("start_uid_listener triggered!"); println!("[start_uid_listener] Registering..."); // create inotify instance const SYS_PACKAGES_LIST_TMP: &str = "/data/system/packages.list.tmp"; let sys_packages_list_tmp = PathBuf::from(&SYS_PACKAGES_LIST_TMP); let dir: PathBuf = sys_packages_list_tmp.parent().unwrap().into(); let (tx, rx) = std::sync::mpsc::channel(); let tx_clone = tx.clone(); let mutex = Arc::new(Mutex::new(())); { let mutex_clone = mutex.clone(); thread::spawn(move || { let mut signals = Signals::new(&[SIGTERM, SIGINT, SIGPWR]).unwrap(); for sig in signals.forever() { log::warn!("[shutdown] Caught signal {sig}, refreshing package list..."); let skey = CStr::from_bytes_with_nul(b"su\0") .expect("[shutdown_listener] CStr::from_bytes_with_nul failed"); refresh_ap_package_list(&skey, &mutex_clone); break; } }); } let mut watcher = INotifyWatcher::new( move |ev: notify::Result| match ev { Ok(Event { kind: EventKind::Modify(ModifyKind::Name(RenameMode::Both)), paths, .. }) => { if paths.contains(&sys_packages_list_tmp) { info!("[uid_monitor] System packages list changed, sending to tx..."); tx_clone.send(false).unwrap() } } Err(err) => warn!("inotify error: {err}"), _ => (), }, Config::default(), )?; watcher.watch(dir.as_ref(), RecursiveMode::NonRecursive)?; { let skey = CStr::from_bytes_with_nul(b"su\0") .expect("[start_uid_listener] CStr::from_bytes_with_nul failed"); info!("[uid_monitor] Performing initial refresh on startup..."); refresh_ap_package_list(&skey, &mutex); } let mut debounce = false; while let Ok(delayed) = rx.recv() { if delayed { debounce = false; let skey = CStr::from_bytes_with_nul(b"su\0") .expect("[start_uid_listener] CStr::from_bytes_with_nul failed"); refresh_ap_package_list(&skey, &mutex); report_kernel(None, "uid_listener", "package-list-updated").unwrap_or_else(|e| { warn!("Failed to report kernel about package list update: {e}"); }); } else if !debounce { thread::sleep(Duration::from_secs(1)); debounce = true; tx.send(true)?; } } Ok(()) } ================================================ FILE: apd/src/install_jq.sh ================================================ #!/system/bin/sh # Install jq binary to /data/adb/jq # This script is called during APatch installation JQ_DIR="/data/adb" JQ_BIN="$JQ_DIR/jq" # Check if jq already exists and is up to date if [ -f "$JQ_BIN" ]; then # jq already installed, skip exit 0 fi # Extract jq from assets if [ -f "$APATCH_ASSETS_DIR/jq/jq" ]; then cp "$APATCH_ASSETS_DIR/jq/jq" "$JQ_BIN" chmod 755 "$JQ_BIN" echo "jq installed to $JQ_BIN" else echo "jq binary not found in assets" exit 1 fi ================================================ FILE: apd/src/installer.sh ================================================ #!/system/bin/sh ############################################ # APatch Module installer script # mostly from module_installer.sh # and util_functions.sh in Magisk ############################################ umask 022 ui_print() { if $BOOTMODE; then echo "$1" else echo -e "ui_print $1\nui_print" >> /proc/self/fd/$OUTFD fi } toupper() { echo "$@" | tr '[:lower:]' '[:upper:]' } grep_cmdline() { local REGEX="s/^$1=//p" { echo $(cat /proc/cmdline)$(sed -e 's/[^"]//g' -e 's/""//g' /proc/cmdline) | xargs -n 1; \ sed -e 's/ = /=/g' -e 's/, /,/g' -e 's/"//g' /proc/bootconfig; \ } 2>/dev/null | sed -n "$REGEX" } grep_prop() { local REGEX="s/^$1=//p" shift local FILES=$@ [ -z "$FILES" ] && FILES='/system/build.prop' cat $FILES 2>/dev/null | dos2unix | sed -n "$REGEX" | head -n 1 } grep_get_prop() { local result=$(grep_prop $@) if [ -z "$result" ]; then # Fallback to getprop getprop "$1" else echo $result fi } is_mounted() { grep -q " $(readlink -f $1) " /proc/mounts 2>/dev/null return $? } abort() { ui_print "$1" $BOOTMODE || recovery_cleanup [ ! -z $MODPATH ] && rm -rf $MODPATH rm -rf $TMPDIR exit 1 } print_title() { local len line1len line2len bar line1len=$(echo -n $1 | wc -c) line2len=$(echo -n $2 | wc -c) len=$line2len [ $line1len -gt $line2len ] && len=$line1len len=$((len + 2)) bar=$(printf "%${len}s" | tr ' ' '*') ui_print "$bar" ui_print " $1 " [ "$2" ] && ui_print " $2 " ui_print "$bar" } ###################### # Environment Related ###################### setup_flashable() { ensure_bb $BOOTMODE && return if [ -z $OUTFD ] || readlink /proc/$$/fd/$OUTFD | grep -q /tmp; then # We will have to manually find out OUTFD for FD in `ls /proc/$$/fd`; do if readlink /proc/$$/fd/$FD | grep -q pipe; then if ps | grep -v grep | grep -qE " 3 $FD |status_fd=$FD"; then OUTFD=$FD break fi fi done fi recovery_actions } ensure_bb() { : } recovery_actions() { : } recovery_cleanup() { : } ####################### # Installation Related ####################### # find_block [partname...] find_block() { local BLOCK DEV DEVICE DEVNAME PARTNAME UEVENT for BLOCK in "$@"; do DEVICE=`find /dev/block \( -type b -o -type c -o -type l \) -iname $BLOCK | head -n 1` 2>/dev/null if [ ! -z $DEVICE ]; then readlink -f $DEVICE return 0 fi done # Fallback by parsing sysfs uevents for UEVENT in /sys/dev/block/*/uevent; do DEVNAME=`grep_prop DEVNAME $UEVENT` PARTNAME=`grep_prop PARTNAME $UEVENT` for BLOCK in "$@"; do if [ "$(toupper $BLOCK)" = "$(toupper $PARTNAME)" ]; then echo /dev/block/$DEVNAME return 0 fi done done # Look just in /dev in case we're dealing with MTD/NAND without /dev/block devices/links for DEV in "$@"; do DEVICE=`find /dev \( -type b -o -type c -o -type l \) -maxdepth 1 -iname $DEV | head -n 1` 2>/dev/null if [ ! -z $DEVICE ]; then readlink -f $DEVICE return 0 fi done return 1 } # setup_mntpoint setup_mntpoint() { local POINT=$1 [ -L $POINT ] && mv -f $POINT ${POINT}_link if [ ! -d $POINT ]; then rm -f $POINT mkdir -p $POINT fi } # mount_name mount_name() { local PART=$1 local POINT=$2 local FLAG=$3 setup_mntpoint $POINT is_mounted $POINT && return # First try mounting with fstab mount $FLAG $POINT 2>/dev/null if ! is_mounted $POINT; then local BLOCK=$(find_block $PART) mount $FLAG $BLOCK $POINT || return fi ui_print "- Mounting $POINT" } # mount_ro_ensure mount_ro_ensure() { # We handle ro partitions only in recovery $BOOTMODE && return local PART=$1 local POINT=$2 mount_name "$PART" $POINT '-o ro' is_mounted $POINT || abort "! Cannot mount $POINT" } mount_partitions() { # Check A/B slot SLOT=`grep_cmdline androidboot.slot_suffix` if [ -z $SLOT ]; then SLOT=`grep_cmdline androidboot.slot` [ -z $SLOT ] || SLOT=_${SLOT} fi [ -z $SLOT ] || ui_print "- Current boot slot: $SLOT" # Mount ro partitions if is_mounted /system_root; then umount /system 2&>/dev/null umount /system_root 2&>/dev/null fi mount_ro_ensure "system$SLOT app$SLOT" /system if [ -f /system/init -o -L /system/init ]; then SYSTEM_ROOT=true setup_mntpoint /system_root if ! mount --move /system /system_root; then umount /system umount -l /system 2>/dev/null mount_ro_ensure "system$SLOT app$SLOT" /system_root fi mount -o bind /system_root/system /system else SYSTEM_ROOT=false grep ' / ' /proc/mounts | grep -qv 'rootfs' || grep -q ' /system_root ' /proc/mounts && SYSTEM_ROOT=true fi # /vendor is used only on some older devices for recovery AVBv1 signing so is not critical if fails [ -L /system/vendor ] && mount_name vendor$SLOT /vendor '-o ro' $SYSTEM_ROOT && ui_print "- Device is system-as-root" # Mount sepolicy rules dir locations in recovery (best effort) if ! $BOOTMODE; then mount_name "cache cac" /cache mount_name metadata /metadata mount_name persist /persist fi } api_level_arch_detect() { API=$(grep_get_prop ro.build.version.sdk) ABI=$(grep_get_prop ro.product.cpu.abi) if [ "$ABI" = "x86" ]; then ARCH=x86 ABI32=x86 IS64BIT=false elif [ "$ABI" = "arm64-v8a" ]; then ARCH=arm64 ABI32=armeabi-v7a IS64BIT=true elif [ "$ABI" = "x86_64" ]; then ARCH=x64 ABI32=x86 IS64BIT=true else ARCH=arm ABI=armeabi-v7a ABI32=armeabi-v7a IS64BIT=false fi } ################# # Module Related ################# set_perm() { chown $2:$3 $1 || return 1 chmod $4 $1 || return 1 local CON=$5 [ -z $CON ] && CON=u:object_r:system_file:s0 chcon $CON $1 || return 1 } set_perm_recursive() { find $1 -type d 2>/dev/null | while read dir; do set_perm $dir $2 $3 $4 $6 done find $1 -type f -o -type l 2>/dev/null | while read file; do set_perm $file $2 $3 $5 $6 done } mktouch() { mkdir -p ${1%/*} 2>/dev/null [ -z $2 ] && touch $1 || echo $2 > $1 chmod 644 $1 } mark_remove() { mkdir -p ${1%/*} 2>/dev/null mknod $1 c 0 0 chmod 644 $1 } mark_replace() { # REPLACE must be directory!!! # https://docs.kernel.org/filesystems/overlayfs.html#whiteouts-and-opaque-directories mkdir -p $1 2>/dev/null setfattr -n trusted.overlay.opaque -v y $1 chmod 644 $1 } request_size_check() { reqSizeM=`du -ms "$1" | cut -f1` } request_zip_size_check() { reqSizeM=`unzip -l "$1" | tail -n 1 | awk '{ print int(($1 - 1) / 1048576 + 1) }'` } boot_actions() { if [ ! -f "$NVBASE/jq" ]; then local apk_path=$(find /data/app -name "base.apk" -path "*/me.yuki.folk-*" 2>/dev/null | head -n 1) if [ -n "$apk_path" ] && [ -f "$apk_path" ]; then # Extract jq from APK assets mkdir -p /data/local/tmp/jq_extract if unzip -o "$apk_path" "jq/jq" -d /data/local/tmp/jq_extract >&2; then if [ -f "/data/local/tmp/jq_extract/jq/jq" ]; then cp /data/local/tmp/jq_extract/jq/jq "$NVBASE/jq" chmod 755 "$NVBASE/jq" rm -rf /data/local/tmp/jq_extract fi else rm -rf /data/local/tmp/jq_extract fi fi fi return } # Require ZIPFILE to be set is_legacy_script() { unzip -l "$ZIPFILE" install.sh | grep -q install.sh return $? } handle_partition() { # if /system/vendor is a symlink, we need to move it out of $MODPATH/system, otherwise it will be overlayed # if /system/vendor is a normal directory, it is ok to overlay it and we don't need to overlay it separately. if [ ! -e $MODPATH/system/$1 ]; then # no partition found return; fi if [ -L "/system/$1" ] && [ "$(readlink -f /system/$1)" = "/$1" ]; then ui_print "- Handle partition /$1" ln -sf "./system/$1" "$MODPATH/$1" fi } # Require OUTFD, ZIPFILE to be set install_module() { rm -rf $TMPDIR mkdir -p $TMPDIR chcon u:object_r:system_file:s0 $TMPDIR cd $TMPDIR mount_partitions api_level_arch_detect # Setup busybox and binaries if $BOOTMODE; then boot_actions else recovery_actions fi # Extract prop file unzip -o "$ZIPFILE" module.prop -d $TMPDIR >&2 [ ! -f $TMPDIR/module.prop ] && abort "! Unable to extract zip file!" local MODDIRNAME=modules $BOOTMODE && MODDIRNAME=modules_update local MODULEROOT=$NVBASE/$MODDIRNAME MODID=`grep_prop id $TMPDIR/module.prop` MODNAME=`grep_prop name $TMPDIR/module.prop` MODAUTH=`grep_prop author $TMPDIR/module.prop` MODPATH=$MODULEROOT/$MODID # Create mod paths rm -rf $MODPATH mkdir -p $MODPATH if is_legacy_script; then unzip -oj "$ZIPFILE" module.prop install.sh uninstall.sh 'common/*' -d $TMPDIR >&2 # Load install script . $TMPDIR/install.sh # Callbacks print_modname on_install [ -f $TMPDIR/uninstall.sh ] && cp -af $TMPDIR/uninstall.sh $MODPATH/uninstall.sh $SKIPMOUNT && touch $MODPATH/skip_mount $PROPFILE && cp -af $TMPDIR/system.prop $MODPATH/system.prop cp -af $TMPDIR/module.prop $MODPATH/module.prop $POSTFSDATA && cp -af $TMPDIR/post-fs-data.sh $MODPATH/post-fs-data.sh $LATESTARTSERVICE && cp -af $TMPDIR/service.sh $MODPATH/service.sh ui_print "- Setting permissions" set_permissions else print_title "$MODNAME" "by $MODAUTH" print_title "Powered by APatch" unzip -o "$ZIPFILE" customize.sh -d $MODPATH >&2 if ! grep -q '^SKIPUNZIP=1$' $MODPATH/customize.sh 2>/dev/null; then ui_print "- Extracting module files" unzip -o "$ZIPFILE" -x 'META-INF/*' -d $MODPATH >&2 # Default permissions set_perm_recursive $MODPATH 0 0 0755 0644 set_perm_recursive $MODPATH/system/bin 0 2000 0755 0755 set_perm_recursive $MODPATH/system/xbin 0 2000 0755 0755 set_perm_recursive $MODPATH/system/system_ext/bin 0 2000 0755 0755 set_perm_recursive $MODPATH/system/vendor 0 2000 0755 0755 u:object_r:vendor_file:s0 fi # Load customization script [ -f $MODPATH/customize.sh ] && . $MODPATH/customize.sh fi # Handle replace folders for TARGET in $REPLACE; do ui_print "- Replace target: $TARGET" mark_replace $MODPATH$TARGET done # Handle remove files for TARGET in $REMOVE; do ui_print "- Remove target: $TARGET" mark_remove $MODPATH$TARGET done handle_partition vendor handle_partition system_ext handle_partition product if $BOOTMODE; then mktouch $NVBASE/modules/$MODID/update rm -rf $NVBASE/modules/$MODID/remove 2>/dev/null rm -rf $NVBASE/modules/$MODID/disable 2>/dev/null cp -af $MODPATH/module.prop $NVBASE/modules/$MODID/module.prop fi # Remove stuff that doesn't belong to modules and clean up any empty directories rm -rf \ $MODPATH/system/placeholder $MODPATH/customize.sh \ $MODPATH/README.md $MODPATH/.git* rmdir -p $MODPATH 2>/dev/null cd / $BOOTMODE || recovery_cleanup rm -rf $TMPDIR ui_print "- Done" } ########## # Presets ########## # Detect whether in boot mode [ -z $BOOTMODE ] && ps | grep zygote | grep -qv grep && BOOTMODE=true [ -z $BOOTMODE ] && ps -A 2>/dev/null | grep zygote | grep -qv grep && BOOTMODE=true [ -z $BOOTMODE ] && BOOTMODE=false NVBASE=/data/adb TMPDIR=/dev/tmp POSTFSDATAD=$NVBASE/post-fs-data.d SERVICED=$NVBASE/service.d # Some modules dependents on this export MAGISK_VER=27.0 export MAGISK_VER_CODE=27000 ================================================ FILE: apd/src/installer_bind.sh ================================================ #!/system/bin/sh ############################################ # APatch Module installer script # mostly from module_installer.sh # and util_functions.sh in Magisk ############################################ umask 022 ui_print() { if $BOOTMODE; then echo "$1" else echo -e "ui_print $1\nui_print" >> /proc/self/fd/$OUTFD fi } toupper() { echo "$@" | tr '[:lower:]' '[:upper:]' } grep_cmdline() { local REGEX="s/^$1=//p" { echo $(cat /proc/cmdline)$(sed -e 's/[^"]//g' -e 's/""//g' /proc/cmdline) | xargs -n 1; \ sed -e 's/ = /=/g' -e 's/, /,/g' -e 's/"//g' /proc/bootconfig; \ } 2>/dev/null | sed -n "$REGEX" } grep_prop() { local REGEX="s/$1=//p" shift local FILES=$@ [ -z "$FILES" ] && FILES='/system/build.prop' cat $FILES 2>/dev/null | dos2unix | sed -n "$REGEX" | head -n 1 | xargs } grep_get_prop() { local result=$(grep_prop $@) if [ -z "$result" ]; then # Fallback to getprop getprop "$1" else echo $result fi } is_mounted() { grep -q " $(readlink -f $1) " /proc/mounts 2>/dev/null return $? } abort() { ui_print "$1" $BOOTMODE || recovery_cleanup [ ! -z $MODPATH ] && rm -rf $MODPATH rm -rf $TMPDIR exit 1 } print_title() { local len line1len line2len bar line1len=$(echo -n $1 | wc -c) line2len=$(echo -n $2 | wc -c) len=$line2len [ $line1len -gt $line2len ] && len=$line1len len=$((len + 2)) bar=$(printf "%${len}s" | tr ' ' '*') ui_print "$bar" ui_print " $1 " [ "$2" ] && ui_print " $2 " ui_print "$bar" } check_sepolicy() { /data/adb/apd sepolicy check "$1" return $? } ###################### # Environment Related ###################### setup_flashable() { ensure_bb $BOOTMODE && return if [ -z $OUTFD ] || readlink /proc/$$/fd/$OUTFD | grep -q /tmp; then # We will have to manually find out OUTFD for FD in /proc/$$/fd/*; do if readlink /proc/$$/fd/$FD | grep -q pipe; then if ps | grep -v grep | grep -qE " 3 $FD |status_fd=$FD"; then OUTFD=$FD break fi fi done fi recovery_actions } ensure_bb() { : } recovery_actions() { : } recovery_cleanup() { : } ####################### # Installation Related ####################### # find_block [partname...] find_block() { local BLOCK DEV DEVICE DEVNAME PARTNAME UEVENT for BLOCK in "$@"; do DEVICE=`find /dev/block \( -type b -o -type c -o -type l \) -iname $BLOCK | head -n 1` 2>/dev/null if [ ! -z $DEVICE ]; then readlink -f $DEVICE return 0 fi done # Fallback by parsing sysfs uevents for UEVENT in /sys/dev/block/*/uevent; do DEVNAME=`grep_prop DEVNAME $UEVENT` PARTNAME=`grep_prop PARTNAME $UEVENT` for BLOCK in "$@"; do if [ "$(toupper $BLOCK)" = "$(toupper $PARTNAME)" ]; then echo /dev/block/$DEVNAME return 0 fi done done # Look just in /dev in case we're dealing with MTD/NAND without /dev/block devices/links for DEV in "$@"; do DEVICE=`find /dev \( -type b -o -type c -o -type l \) -maxdepth 1 -iname $DEV | head -n 1` 2>/dev/null if [ ! -z $DEVICE ]; then readlink -f $DEVICE return 0 fi done return 1 } # setup_mntpoint setup_mntpoint() { local POINT=$1 [ -L $POINT ] && mv -f $POINT ${POINT}_link if [ ! -d $POINT ]; then rm -f $POINT mkdir -p $POINT fi } # mount_name mount_name() { local PART=$1 local POINT=$2 local FLAG=$3 setup_mntpoint $POINT is_mounted $POINT && return # First try mounting with fstab mount $FLAG $POINT 2>/dev/null if ! is_mounted $POINT; then local BLOCK=$(find_block $PART) mount $FLAG $BLOCK $POINT || return fi ui_print "- Mounting $POINT" } # mount_ro_ensure mount_ro_ensure() { # We handle ro partitions only in recovery $BOOTMODE && return local PART=$1 local POINT=$2 mount_name "$PART" $POINT '-o ro' is_mounted $POINT || abort "! Cannot mount $POINT" } mount_partitions() { # Check A/B slot SLOT=`grep_cmdline androidboot.slot_suffix` if [ -z $SLOT ]; then SLOT=`grep_cmdline androidboot.slot` [ -z $SLOT ] || SLOT=_${SLOT} fi [ -z $SLOT ] || ui_print "- Current boot slot: $SLOT" # Mount ro partitions if is_mounted /system_root; then umount /system 2&>/dev/null umount /system_root 2&>/dev/null fi mount_ro_ensure "system$SLOT app$SLOT" /system if [ -f /system/init -o -L /system/init ]; then SYSTEM_ROOT=true setup_mntpoint /system_root if ! mount --move /system /system_root; then umount /system umount -l /system 2>/dev/null mount_ro_ensure "system$SLOT app$SLOT" /system_root fi mount -o bind /system_root/system /system else SYSTEM_ROOT=false grep ' / ' /proc/mounts | grep -qv 'rootfs' || grep -q ' /system_root ' /proc/mounts && SYSTEM_ROOT=true fi # /vendor is used only on some older devices for recovery AVBv1 signing so is not critical if fails [ -L /system/vendor ] && mount_name vendor$SLOT /vendor '-o ro' $SYSTEM_ROOT && ui_print "- Device is system-as-root" # Mount sepolicy rules dir locations in recovery (best effort) if ! $BOOTMODE; then mount_name "cache cac" /cache mount_name metadata /metadata mount_name persist /persist fi } api_level_arch_detect() { API=$(grep_get_prop ro.build.version.sdk) ABI=$(grep_get_prop ro.product.cpu.abi) if [ "$ABI" = "x86" ]; then ARCH=x86 ABI32=x86 IS64BIT=false elif [ "$ABI" = "arm64-v8a" ]; then ARCH=arm64 ABI32=armeabi-v7a IS64BIT=true elif [ "$ABI" = "x86_64" ]; then ARCH=x64 ABI32=x86 IS64BIT=true else ARCH=arm ABI=armeabi-v7a ABI32=armeabi-v7a IS64BIT=false fi } ################# # Module Related ################# set_perm() { chown $2:$3 $1 || return 1 chmod $4 $1 || return 1 local CON=$5 [ -z $CON ] && CON=u:object_r:system_file:s0 chcon $CON $1 || return 1 } set_perm_recursive() { find $1 -type d 2>/dev/null | while read dir; do set_perm $dir $2 $3 $4 $6 done find $1 -type f -o -type l 2>/dev/null | while read file; do set_perm $file $2 $3 $5 $6 done } mktouch() { mkdir -p ${1%/*} 2>/dev/null [ -z $2 ] && touch $1 || echo $2 > $1 chmod 644 $1 } mark_remove() { mkdir -p ${1%/*} 2>/dev/null mknod $1 c 0 0 chmod 644 $1 } mark_replace() { # REPLACE must be directory!!! # https://docs.kernel.org/filesystems/overlayfs.html#whiteouts-and-opaque-directories mkdir -p $1 2>/dev/null setfattr -n trusted.overlay.opaque -v y $1 chmod 644 $1 } request_size_check() { reqSizeM=`du -ms "$1" | cut -f1` } request_zip_size_check() { reqSizeM=`unzip -l "$1" | tail -n 1 | awk '{ print int(($1 - 1) / 1048576 + 1) }'` } boot_actions() { return; } # Require ZIPFILE to be set is_legacy_script() { unzip -l "$ZIPFILE" install.sh | grep -q install.sh return $? } handle_partition() { PARTITION="$1" REQUIRE_SYMLINK="$2" if [ ! -e "$MODPATH/system/$PARTITION" ]; then # no partition found return; fi if [ "$REQUIRE_SYMLINK" = "false" ] || [ -L "/system/$PARTITION" ] && [ "$(readlink -f "/system/$PARTITION")" = "/$PARTITION" ]; then ui_print "- Handle partition /$PARTITION" ln -sf "./system/$PARTITION" "$MODPATH/$PARTITION" fi } # Require OUTFD, ZIPFILE to be set install_module() { rm -rf $TMPDIR mkdir -p $TMPDIR chcon u:object_r:system_file:s0 $TMPDIR cd $TMPDIR mount_partitions api_level_arch_detect # Setup busybox and binaries if $BOOTMODE; then boot_actions else recovery_actions fi # Extract prop file unzip -o "$ZIPFILE" module.prop -d $TMPDIR >&2 [ ! -f $TMPDIR/module.prop ] && abort "! Unable to extract zip file!" local MODDIRNAME=modules $BOOTMODE && MODDIRNAME=modules_update local MODULEROOT=$NVBASE/$MODDIRNAME MODID=`grep_prop id $TMPDIR/module.prop` MODNAME=`grep_prop name $TMPDIR/module.prop` MODAUTH=`grep_prop author $TMPDIR/module.prop` MODPATH=$MODULEROOT/$MODID # Create mod paths rm -rf $MODPATH mkdir -p $MODPATH if is_legacy_script; then unzip -oj "$ZIPFILE" module.prop install.sh uninstall.sh 'common/*' -d $TMPDIR >&2 # Load install script . $TMPDIR/install.sh # Callbacks print_modname on_install [ -f $TMPDIR/uninstall.sh ] && cp -af $TMPDIR/uninstall.sh $MODPATH/uninstall.sh $SKIPMOUNT && touch $MODPATH/skip_mount $PROPFILE && cp -af $TMPDIR/system.prop $MODPATH/system.prop cp -af $TMPDIR/module.prop $MODPATH/module.prop $POSTFSDATA && cp -af $TMPDIR/post-fs-data.sh $MODPATH/post-fs-data.sh $LATESTARTSERVICE && cp -af $TMPDIR/service.sh $MODPATH/service.sh ui_print "- Setting permissions" set_permissions else print_title "$MODNAME" "by $MODAUTH" print_title "Powered by APatch" unzip -o "$ZIPFILE" customize.sh -d $MODPATH >&2 if ! grep -q '^SKIPUNZIP=1$' $MODPATH/customize.sh 2>/dev/null; then ui_print "- Extracting module files" unzip -o "$ZIPFILE" -x 'META-INF/*' -d $MODPATH >&2 # Default permissions set_perm_recursive $MODPATH 0 0 0755 0644 set_perm_recursive $MODPATH/system/bin 0 2000 0755 0755 set_perm_recursive $MODPATH/system/xbin 0 2000 0755 0755 set_perm_recursive $MODPATH/system/system_ext/bin 0 2000 0755 0755 set_perm_recursive $MODPATH/system/vendor 0 2000 0755 0755 u:object_r:vendor_file:s0 fi # Load customization script [ -f $MODPATH/customize.sh ] && . $MODPATH/customize.sh fi handle_partition vendor true handle_partition system_ext true handle_partition product true handle_partition odm false # Handle replace folders for TARGET in $REPLACE; do ui_print "- Replace target: $TARGET" mark_replace "$MODPATH$TARGET" done # Handle remove files for TARGET in $REMOVE; do ui_print "- Remove target: $TARGET" mark_remove "$MODPATH$TARGET" done if $BOOTMODE; then mktouch $NVBASE/modules/$MODID/update rm -rf $NVBASE/modules/$MODID/remove 2>/dev/null rm -rf $NVBASE/modules/$MODID/disable 2>/dev/null cp -af $MODPATH/module.prop $NVBASE/modules/$MODID/module.prop fi # Remove stuff that doesn't belong to modules and clean up any empty directories rm -rf \ $MODPATH/system/placeholder $MODPATH/customize.sh \ $MODPATH/README.md $MODPATH/.git* rmdir -p $MODPATH 2>/dev/null cd / $BOOTMODE || recovery_cleanup rm -rf $TMPDIR ui_print "- Done" } ########## # Presets ########## # Detect whether in boot mode [ -z $BOOTMODE ] && ps | grep zygote | grep -qv grep && BOOTMODE=true [ -z $BOOTMODE ] && ps -A 2>/dev/null | grep zygote | grep -qv grep && BOOTMODE=true [ -z $BOOTMODE ] && BOOTMODE=false NVBASE=/data/adb TMPDIR=/dev/tmp POSTFSDATAD=$NVBASE/post-fs-data.d SERVICED=$NVBASE/service.d # Some modules dependents on this export MAGISK_VER=27.0 export MAGISK_VER_CODE=27000 ================================================ FILE: apd/src/lua.rs ================================================ use crate::module::*; use crate::utils::*; use anyhow::Result; use log::{info, warn}; use mlua::{Function, Lua, Result as LuaResult, Table}; use std::{fs, path::Path}; pub fn save_text>(filename: P, content: &str) -> std::io::Result<()> { let _ = ensure_dir_exists("/data/adb/config"); let path = format!("/data/adb/config/{}", filename.as_ref().display()); fs::write(&path, content)?; Ok(()) } pub fn load_text>(filename: P) -> std::io::Result { let _ = ensure_dir_exists("/data/adb/config"); let path = format!("/data/adb/config/{}", filename.as_ref().display()); fs::read_to_string(path) } pub fn load_all_lua_modules(lua: &Lua) -> LuaResult<()> { let modules_dir = Path::new("/data/adb/modules"); let modules: Table = match lua.globals().get("modules") { Ok(t) => t, Err(_) => { let t = lua.create_table()?; lua.globals().set("modules", t.clone())?; t } }; if modules_dir.exists() { for entry in fs::read_dir(modules_dir).unwrap_or_else(|_| fs::read_dir("/dev/null").unwrap()) { if let Ok(entry) = entry { let path = entry.path(); if path.is_dir() { let id = path.file_name().unwrap().to_string_lossy().to_string(); let package: Table = lua.globals().get("package")?; let old_cpath: String = package.get("cpath")?; let new_cpath = format!("{}/?.so;{}", path.to_string_lossy(), old_cpath); package.set("cpath", new_cpath)?; let lua_file = path.join(format!("{}.lua", id)); if lua_file.exists() { match fs::read_to_string(&lua_file) { Ok(code) => { match lua .load(&code) .set_name(&*lua_file.to_string_lossy()) .eval::() { Ok(module) => { modules.set(id.clone(), module.clone())?; } Err(e) => { eprintln!( "Failed to eval Lua {}: {}", lua_file.display(), e ); } } } Err(e) => { eprintln!("Failed to read Lua {}: {}", lua_file.display(), e); } } } } } } } Ok(()) } pub fn info_lua(lua: &Lua) -> LuaResult { lua.create_function(|_, msg: String| { info!("[Lua] {}", msg); Ok(()) }) } pub fn warn_lua(lua: &Lua) -> LuaResult { lua.create_function(|_, msg: String| { warn!("[Lua] {}", msg); Ok(()) }) } pub fn install_module_lua(lua: &Lua) -> LuaResult { lua.create_function(|_, zip: String| { install_module(&zip) .map_err(|e| mlua::Error::external(format!("install_module failed: {}", e))) }) } pub fn save_text_lua(lua: &Lua) -> LuaResult { lua.create_function(|_, (filename, content): (String, String)| { save_text(&filename, &content) .map_err(|e| mlua::Error::external(format!("save filed: {}", e)))?; Ok(()) }) } pub fn read_text_lua(lua: &Lua) -> LuaResult { lua.create_function(|_, filename: String| { let content = match load_text(&filename) { Ok(s) => s, Err(ref e) if e.kind() == std::io::ErrorKind::NotFound => String::new(), Err(e) => return Err(mlua::Error::external(format!("read failed: {}", e))), }; Ok(content) }) } pub fn exec_stage_lua(stage: &str, wait: bool, superkey: &str) -> Result<()> { let stage_safe = stage.replace('-', "_"); run_lua(&superkey, &stage_safe, true, wait).map_err(|e| anyhow::anyhow!("{}", e))?; Ok(()) } pub fn run_lua(id: &str, function: &str, on_each_module: bool, _wait: bool) -> mlua::Result<()> { let lua = unsafe { Lua::unsafe_new() }; let func = install_module_lua(&lua)?; lua.globals().set("install_module", func)?; lua.globals().set("info", info_lua(&lua)?)?; lua.globals().set("warn", warn_lua(&lua)?)?; lua.globals().set("setConfig", save_text_lua(&lua)?)?; lua.globals().set("getConfig", read_text_lua(&lua)?)?; load_all_lua_modules(&lua)?; let modules: mlua::Table = lua.globals().get("modules")?; if on_each_module { for pair in modules.pairs::() { let (_, module_table) = pair?; if let Ok(func_obj) = module_table.get::(function) { func_obj.call::<()>(id)?; } } } else { let module_table: mlua::Table = modules.get(id)?; let func_obj: mlua::Function = module_table.get(function)?; func_obj.call::<()>(())?; } Ok(()) } ================================================ FILE: apd/src/magic_mount.rs ================================================ use std::{ cmp::PartialEq, collections::{HashMap, hash_map::Entry}, fs, fs::{DirEntry, FileType, create_dir, create_dir_all, read_dir, read_link}, os::unix::fs::{FileTypeExt, symlink}, path::{Path, PathBuf}, }; use anyhow::{Context, Result, bail}; use extattr::lgetxattr; use rustix::{ fs::{Gid, MetadataExt, Mode, Uid, chmod, chown}, mount::{ MountFlags, MountPropagationFlags, UnmountFlags, mount, mount_bind, mount_change, mount_move, unmount, }, }; use crate::{ defs::{ AP_MAGIC_MOUNT_SOURCE, DISABLE_FILE_NAME, MODULE_DIR, REMOVE_FILE_NAME, SKIP_MOUNT_FILE_NAME, }, magic_mount::NodeFileType::{Directory, RegularFile, Symlink, Whiteout}, restorecon::{lgetfilecon, lsetfilecon}, utils::ensure_dir_exists, }; const REPLACE_DIR_FILE_NAME: &str = ".replace"; const REPLACE_DIR_XATTR: &str = "trusted.overlay.opaque"; #[derive(PartialEq, Eq, Hash, Clone, Debug)] enum NodeFileType { RegularFile, Directory, Symlink, Whiteout, } impl NodeFileType { fn from_file_type(file_type: FileType) -> Self { if file_type.is_file() { RegularFile } else if file_type.is_dir() { Directory } else if file_type.is_symlink() { Symlink } else { Whiteout } } /// Check if mounting this node type over `real_path` requires a tmpfs overlay /// due to type mismatch or missing file. fn needs_tmpfs_vs_real(&self, real_path: &Path) -> bool { match self { Symlink => true, Whiteout => real_path.exists(), _ => match real_path.symlink_metadata() { Ok(metadata) => { let real_type = Self::from_file_type(metadata.file_type()); real_type != *self || real_type == Symlink } Err(_) => true, }, } } } #[derive(Debug, Clone)] struct Node { name: String, file_type: NodeFileType, children: HashMap, // the module that owned this node module_path: Option, replace: bool, skip: bool, } impl Node { fn collect_module_files

(&mut self, module_dir: P) -> Result where P: AsRef, { let dir = module_dir.as_ref(); let mut has_file = false; for entry in dir.read_dir()?.flatten() { let name = entry.file_name().to_string_lossy().to_string(); let node = match self.children.entry(name.clone()) { Entry::Occupied(o) => Some(o.into_mut()), Entry::Vacant(v) => Self::new_module(&name, &entry).map(|it| v.insert(it)), }; if let Some(node) = node { has_file |= if node.file_type == NodeFileType::Directory { node.collect_module_files(dir.join(&node.name))? || node.replace } else { true } } } Ok(has_file) } fn dir_is_replace

(path: P) -> bool where P: AsRef, { if let Ok(v) = lgetxattr(&path, REPLACE_DIR_XATTR) && String::from_utf8_lossy(&v) == "y" { return true; } path.as_ref().join(REPLACE_DIR_FILE_NAME).exists() } fn new_root(name: T) -> Self { Node { name: name.to_string(), file_type: Directory, children: Default::default(), module_path: None, replace: false, skip: false, } } fn new_module(name: &S, entry: &DirEntry) -> Option where S: ToString, { if let Ok(metadata) = entry.metadata() { let path = entry.path(); let file_type = if metadata.file_type().is_char_device() && metadata.rdev() == 0 { NodeFileType::Whiteout } else { NodeFileType::from_file_type(metadata.file_type()) }; let replace = file_type == NodeFileType::Directory && Self::dir_is_replace(&path); if replace { log::debug!("{} need replace", path.display()); } return Some(Self { name: name.to_string(), file_type, children: HashMap::default(), module_path: Some(path), replace, skip: false, }); } None } } fn collect_module_files() -> Result> { let mut root = Node::new_root(""); let mut system = Node::new_root("system"); let module_root = Path::new(MODULE_DIR); let mut has_file = false; log::debug!("begin collect module files: {}", module_root.display()); for entry in module_root.read_dir()?.flatten() { if !entry.file_type()?.is_dir() { continue; } let id = entry.file_name().to_str().unwrap().to_string(); log::debug!("processing new module: {id}"); let prop = entry.path().join("module.prop"); if !prop.exists() { log::debug!("skipped module {id}, because not found module.prop"); continue; } if entry.path().join(DISABLE_FILE_NAME).exists() || entry.path().join(REMOVE_FILE_NAME).exists() || entry.path().join(SKIP_MOUNT_FILE_NAME).exists() { log::debug!("skipped module {id}, due to disable/remove/skip_mount"); continue; } let mod_system = entry.path().join("system"); if !mod_system.is_dir() { continue; } log::debug!("collecting {}", entry.path().display()); has_file |= system.collect_module_files(mod_system)?; } if has_file { const BUILTIN_PARTITIONS: [(&str, bool); 5] = [ ("vendor", true), ("system_ext", true), ("product", true), ("odm", false), ("oem", false), ]; for (partition, require_symlink) in BUILTIN_PARTITIONS { let path_of_root = Path::new("/").join(partition); let path_of_system = Path::new("/system").join(partition); if path_of_root.is_dir() && (!require_symlink || path_of_system.is_symlink()) { let name = partition.to_string(); if let Some(node) = system.children.remove(&name) { root.children.insert(name, node); } } } root.children.insert("system".to_string(), system); Ok(Some(root)) } else { Ok(None) } } fn clone_symlink, Dst: AsRef>(src: Src, dst: Dst) -> Result<()> { let src_symlink = read_link(src.as_ref())?; symlink(&src_symlink, dst.as_ref())?; lsetfilecon(dst.as_ref(), lgetfilecon(src.as_ref())?.as_str())?; log::debug!( "clone symlink {} -> {}({})", dst.as_ref().display(), dst.as_ref().display(), src_symlink.display() ); Ok(()) } fn mount_mirror, WP: AsRef>( path: P, work_dir_path: WP, entry: &DirEntry, ) -> Result<()> { let path = path.as_ref().join(entry.file_name()); let work_dir_path = work_dir_path.as_ref().join(entry.file_name()); let file_type = entry.file_type()?; if file_type.is_file() { log::debug!( "mount mirror file {} -> {}", path.display(), work_dir_path.display() ); fs::File::create(&work_dir_path)?; mount_bind(&path, &work_dir_path)?; } else if file_type.is_dir() { log::debug!( "mount mirror dir {} -> {}", path.display(), work_dir_path.display() ); create_dir(&work_dir_path)?; let metadata = entry.metadata()?; chmod(&work_dir_path, Mode::from_raw_mode(metadata.mode()))?; chown( &work_dir_path, Some(Uid::from_raw(metadata.uid())), Some(Gid::from_raw(metadata.gid())), )?; lsetfilecon(&work_dir_path, lgetfilecon(&path)?.as_str())?; for entry in read_dir(&path)?.flatten() { mount_mirror(&path, &work_dir_path, &entry)?; } } else if file_type.is_symlink() { log::debug!( "create mirror symlink {} -> {}", path.display(), work_dir_path.display() ); clone_symlink(&path, &work_dir_path)?; } Ok(()) } fn should_create_tmpfs(path: &Path, current: &mut Node, has_tmpfs: bool) -> bool { if has_tmpfs { return false; } if current.replace && current.module_path.is_some() { return true; } for (name, node) in &mut current.children { let real_path = path.join(name); if node.file_type.needs_tmpfs_vs_real(&real_path) { if current.module_path.is_none() { log::error!("cannot create tmpfs on {}, ignore: {name}", path.display()); node.skip = true; continue; } return true; } } false } fn prepare_tmpfs_skeleton( path: &Path, work_dir_path: &Path, module_path: Option<&PathBuf>, ) -> Result<()> { log::debug!( "creating tmpfs skeleton for {} at {}", path.display(), work_dir_path.display() ); create_dir_all(work_dir_path)?; let source: &Path = if path.exists() { path } else if let Some(mp) = module_path { mp } else { bail!("cannot mount root dir {}!", path.display()); }; let metadata = source.metadata()?; chmod(work_dir_path, Mode::from_raw_mode(metadata.mode()))?; chown( work_dir_path, Some(Uid::from_raw(metadata.uid())), Some(Gid::from_raw(metadata.gid())), )?; lsetfilecon(work_dir_path, lgetfilecon(source)?.as_str())?; Ok(()) } fn handle_mount_result(result: Result<()>, path: &Path, name: &str, has_tmpfs: bool) -> Result<()> { if let Err(e) = result { if has_tmpfs { return Err(e); } log::error!("mount child {}/{} failed: {}", path.display(), name, e); } Ok(()) } fn process_existing_entries( path: &Path, work_dir_path: &Path, children: &mut HashMap, has_tmpfs: bool, ) -> Result<()> { for entry in path.read_dir()?.flatten() { let name = entry.file_name().to_string_lossy().to_string(); let result = if let Some(node) = children.remove(&name) { if node.skip { continue; } do_magic_mount(path, work_dir_path, node, has_tmpfs) .with_context(|| format!("magic mount {}/{name}", path.display())) } else if has_tmpfs { mount_mirror(path, work_dir_path, &entry) .with_context(|| format!("mount mirror {}/{name}", path.display())) } else { Ok(()) }; handle_mount_result(result, path, &name, has_tmpfs)?; } Ok(()) } fn process_remaining_children( path: &Path, work_dir_path: &Path, children: HashMap, has_tmpfs: bool, ) -> Result<()> { for (name, node) in children { if node.skip { continue; } let result = do_magic_mount(path, work_dir_path, node, has_tmpfs) .with_context(|| format!("magic mount {}/{name}", path.display())); handle_mount_result(result, path, &name, has_tmpfs)?; } Ok(()) } fn move_tmpfs_to_target(work_dir_path: &Path, target: &Path) -> Result<()> { log::debug!( "moving tmpfs {} -> {}", work_dir_path.display(), target.display() ); mount_move(work_dir_path, target).context("move self")?; mount_change(target, MountPropagationFlags::PRIVATE).context("make self private")?; Ok(()) } fn do_magic_mount, WP: AsRef>( path: P, work_dir_path: WP, mut current: Node, has_tmpfs: bool, ) -> Result<()> { let path = path.as_ref().join(¤t.name); let work_dir_path = work_dir_path.as_ref().join(¤t.name); match current.file_type { RegularFile => { let target_path = if has_tmpfs { fs::File::create(&work_dir_path)?; &work_dir_path } else { &path }; if let Some(module_path) = ¤t.module_path { log::debug!( "mount module file {} -> {}", module_path.display(), work_dir_path.display() ); mount_bind(module_path, target_path)?; } else { bail!("cannot mount root file {}!", path.display()); } } Symlink => { if let Some(module_path) = ¤t.module_path { log::debug!( "create module symlink {} -> {}", module_path.display(), work_dir_path.display() ); clone_symlink(module_path, &work_dir_path)?; } else { bail!("cannot mount root symlink {}!", path.display()); } } Directory => { let create_tmpfs = should_create_tmpfs(&path, &mut current, has_tmpfs); let has_tmpfs = has_tmpfs || create_tmpfs; if has_tmpfs { prepare_tmpfs_skeleton(&path, &work_dir_path, current.module_path.as_ref())?; } if create_tmpfs { log::debug!( "creating tmpfs for {} at {}", path.display(), work_dir_path.display() ); mount_bind(&work_dir_path, &work_dir_path).context("bind self")?; } if path.exists() && !current.replace { process_existing_entries( &path, &work_dir_path, &mut current.children, has_tmpfs, )?; } if current.replace { if current.module_path.is_none() { bail!("dir {} is declared as replaced but it is root!", path.display()); } log::debug!("dir {} is replaced", path.display()); } process_remaining_children(&path, &work_dir_path, current.children, has_tmpfs)?; if create_tmpfs { move_tmpfs_to_target(&work_dir_path, &path)?; } } Whiteout => { log::debug!("file {} is removed", path.display()); } } Ok(()) } pub fn magic_mount() -> Result<()> { if let Some(root) = collect_module_files()? { log::debug!("collected: {:#?}", root); let tmp_dir = PathBuf::from(AP_MAGIC_MOUNT_SOURCE); ensure_dir_exists(&tmp_dir)?; mount("tmpfs", &tmp_dir, "tmpfs", MountFlags::empty(), None).context("mount tmp")?; mount_change(&tmp_dir, MountPropagationFlags::PRIVATE).context("make tmp private")?; let result = do_magic_mount("/", &tmp_dir, root, false); if let Err(e) = unmount(&tmp_dir, UnmountFlags::DETACH) { log::error!("failed to unmount tmp {}", e); } fs::remove_dir(tmp_dir).ok(); result } else { log::info!("no modules to mount, skipping!"); Ok(()) } } ================================================ FILE: apd/src/main.rs ================================================ mod apd; mod assets; mod cli; mod defs; mod event; mod lua; mod magic_mount; mod metamodule; mod module; mod module_config; mod package; #[cfg(any(target_os = "linux", target_os = "android"))] mod pty; mod resetprop; mod restorecon; mod sepolicy; mod supercall; mod utils; fn main() -> anyhow::Result<()> { cli::run() } ================================================ FILE: apd/src/metamodule.rs ================================================ //! Metamodule management //! //! This module handles all metamodule-related functionality. //! Metamodules are special modules that manage how regular modules are mounted //! and provide hooks for module installation/uninstallation. use std::{ collections::HashMap, path::{Path, PathBuf}, process::Command, }; use anyhow::{Context, Result, ensure}; use log::{info, warn}; use crate::{assets, defs, module::ModuleType::All}; /// Determine whether the provided module properties mark it as a metamodule pub fn is_metamodule(props: &HashMap) -> bool { props.get("metamodule").is_some_and(|s| { let trimmed = s.trim(); trimmed == "1" || trimmed.eq_ignore_ascii_case("true") }) } /// Get metamodule path if it exists /// The metamodule is stored in /data/adb/modules/{id} with a symlink at /data/adb/metamodule pub fn get_metamodule_path() -> Option { let path = Path::new(defs::METAMODULE_DIR); // Check if symlink exists and resolve it if path.is_symlink() && let Ok(target) = std::fs::read_link(path) { // If target is relative, resolve it let resolved = if target.is_absolute() { target } else { path.parent()?.join(target) }; if resolved.exists() && resolved.is_dir() { return Some(resolved); } warn!( "Metamodule symlink points to non-existent path: {}", resolved.display() ); } // Fallback: search for metamodule=1 in modules directory let mut result = None; let _ = crate::module::foreach_module(All, |module_path| { if let Ok(props) = crate::module::read_module_prop(module_path) && is_metamodule(&props) { info!( "Found metamodule in modules directory: {}", module_path.display() ); result = Some(module_path.to_path_buf()); } Ok(()) }); result } /// Check if metamodule exists pub fn has_metamodule() -> bool { get_metamodule_path().is_some() } /// Check if it's safe to install a regular module /// Returns Ok(()) if safe, Err(is_disabled) if blocked /// - Err(true) means metamodule is disabled /// - Err(false) means metamodule is in other unstable state pub fn check_install_safety() -> Result<(), bool> { // No metamodule → safe let Some(metamodule_path) = get_metamodule_path() else { return Ok(()); }; // No metainstall.sh → safe (uses default installer) // The staged update directory may contain the latest scripts, so check both locations let has_metainstall = metamodule_path .join(defs::METAMODULE_METAINSTALL_SCRIPT) .exists() || metamodule_path.file_name().is_some_and(|module_id| { Path::new(defs::MODULE_UPDATE_DIR) .join(module_id) .join(defs::METAMODULE_METAINSTALL_SCRIPT) .exists() }); if !has_metainstall { return Ok(()); } // Check for marker files let has_update = metamodule_path.join(defs::UPDATE_FILE_NAME).exists(); let has_remove = metamodule_path.join(defs::REMOVE_FILE_NAME).exists(); let has_disable = metamodule_path.join(defs::DISABLE_FILE_NAME).exists(); // Stable state (no markers) → safe if !has_update && !has_remove && !has_disable { return Ok(()); } // Return true if disabled, false for other unstable states Err(has_disable && !has_update && !has_remove) } /// Create or update the metamodule symlink /// Points /data/adb/metamodule -> /data/adb/modules/{module_id} pub fn ensure_symlink

(module_path: P) -> Result<()> where P: AsRef, { // METAMODULE_DIR might have trailing slash, so we need to trim it let symlink_path = Path::new(defs::METAMODULE_DIR.trim_end_matches('/')); let module_path = module_path.as_ref(); info!( "Creating metamodule symlink: {} -> {}", symlink_path.display(), module_path.display() ); // Remove existing symlink if it exists if symlink_path.exists() || symlink_path.is_symlink() { info!("Removing old metamodule symlink/path"); if symlink_path.is_symlink() { std::fs::remove_file(symlink_path).with_context(|| "Failed to remove old symlink")?; } else { // Could be a directory, remove it std::fs::remove_dir_all(symlink_path) .with_context(|| "Failed to remove old directory")?; } } // Create symlink #[cfg(unix)] std::os::unix::fs::symlink(module_path, symlink_path) .with_context(|| format!("Failed to create symlink to {}", module_path.display()))?; info!("Metamodule symlink created successfully"); Ok(()) } /// Remove the metamodule symlink pub fn remove_symlink() -> Result<()> { let symlink_path = Path::new(defs::METAMODULE_DIR.trim_end_matches('/')); if symlink_path.is_symlink() { std::fs::remove_file(symlink_path) .with_context(|| "Failed to remove metamodule symlink")?; info!("Metamodule symlink removed"); } Ok(()) } /// Get the install script content, using metainstall.sh from metamodule if available /// Returns the script content to be executed pub fn get_install_script( is_metamodule: bool, installer_content: &str, install_module_script: &str, ) -> Result { // Check if there's a metamodule with metainstall.sh // Only apply this logic for regular modules (not when installing metamodule itself) let install_script = if is_metamodule { info!("Installing metamodule, using default installer"); install_module_script.to_string() } else if let Some(metamodule_path) = get_metamodule_path() { if metamodule_path.join(defs::DISABLE_FILE_NAME).exists() { info!("Metamodule is disabled, using default installer"); install_module_script.to_string() } else { let metainstall_path = metamodule_path.join(defs::METAMODULE_METAINSTALL_SCRIPT); if metainstall_path.exists() { info!("Using metainstall.sh from metamodule"); let metamodule_content = std::fs::read_to_string(&metainstall_path) .with_context(|| "Failed to read metamodule metainstall.sh")?; format!("{installer_content}\n{metamodule_content}\nexit 0\n") } else { info!("Metamodule exists but has no metainstall.sh, using default installer"); install_module_script.to_string() } } } else { info!("No metamodule found, using default installer"); install_module_script.to_string() }; Ok(install_script) } /// Check if metamodule script exists and is ready to execute /// Returns None if metamodule doesn't exist, is disabled, or script is missing /// Returns Some(script_path) if script is ready to execute fn check_metamodule_script(script_name: &str) -> Option { // Check if metamodule exists let metamodule_path = get_metamodule_path()?; // Check if metamodule is disabled if metamodule_path.join(defs::DISABLE_FILE_NAME).exists() { info!("Metamodule is disabled, skipping {script_name}"); return None; } // Check if script exists let script_path = metamodule_path.join(script_name); if !script_path.exists() { return None; } Some(script_path) } /// Execute metamodule's metauninstall.sh for a specific module pub fn exec_metauninstall_script(module_id: &str) -> Result<()> { let Some(metauninstall_path) = check_metamodule_script(defs::METAMODULE_METAUNINSTALL_SCRIPT) else { return Ok(()); }; info!("Executing metamodule metauninstall.sh for module: {module_id}",); let result = Command::new(assets::BUSYBOX_PATH) .args(["sh", metauninstall_path.to_str().unwrap()]) .current_dir(metauninstall_path.parent().unwrap()) .envs(crate::module::get_common_script_envs()) .env("MODULE_ID", module_id) .status()?; ensure!( result.success(), "Metamodule metauninstall.sh failed for module {module_id}: {:?}", result ); info!("Metamodule metauninstall.sh executed successfully for {module_id}",); Ok(()) } /// Execute metamodule mount script pub fn exec_mount_script(module_dir: &str) -> Result<()> { let Some(mount_script) = check_metamodule_script(defs::METAMODULE_MOUNT_SCRIPT) else { return Ok(()); }; info!("Executing mount script for metamodule"); let result = Command::new(assets::BUSYBOX_PATH) .args(["sh", mount_script.to_str().unwrap()]) .envs(crate::module::get_common_script_envs()) .env("MODULE_DIR", module_dir) .status()?; ensure!( result.success(), "Metamodule mount script failed with status: {:?}", result ); info!("Metamodule mount script executed successfully"); Ok(()) } /// Execute metamodule script for a specific stage pub fn exec_stage_script(stage: &str, block: bool) -> Result<()> { let Some(script_path) = check_metamodule_script(&format!("{stage}.sh")) else { return Ok(()); }; info!("Executing metamodule {stage}.sh"); crate::module::exec_script(&script_path, block)?; info!("Metamodule {stage}.sh executed successfully"); Ok(()) } ================================================ FILE: apd/src/module.rs ================================================ use crate::sepolicy::get_policy_main; use crate::{lua, module_config}; use anyhow::{Context, Result, anyhow, bail, ensure}; use const_format::concatcp; use is_executable::is_executable; use java_properties::PropertiesIter; use log::{debug, info, warn}; #[cfg(unix)] use std::os::unix::{prelude::PermissionsExt, process::CommandExt}; use std::{ collections::HashMap, env::var as env_var, fs::{self, remove_dir_all}, io::Cursor, path::{Path, PathBuf, Component}, process::Command, str::FromStr, }; use zip_extensions::zip_extract_file_to_memory; #[allow(clippy::wildcard_imports)] use crate::utils::*; use crate::{ assets, defs::{self, MODULE_DIR, MODULE_UPDATE_DIR}, metamodule, restorecon, }; const INSTALLER_CONTENT: &str = include_str!("./installer.sh"); const INSTALL_MODULE_SCRIPT: &str = concatcp!( INSTALLER_CONTENT, "\n", "install_module", "\n", "exit 0", "\n" ); #[derive(PartialEq, Eq)] pub enum ModuleType { All, Active, Updated, } fn exec_install_script(module_file: &str, is_metamodule: bool) -> Result<()> { let realpath = std::fs::canonicalize(module_file) .with_context(|| format!("realpath: {module_file} failed"))?; // Get install script from metamodule module let install_script = metamodule::get_install_script(is_metamodule, INSTALLER_CONTENT, INSTALL_MODULE_SCRIPT)?; let result = Command::new(assets::BUSYBOX_PATH) .args(["sh", "-c", &install_script]) .envs(get_common_script_envs()) .env("OUTFD", "1") .env("ZIPFILE", realpath) .status()?; ensure!(result.success(), "Failed to install module script"); Ok(()) } pub fn handle_updated_modules() -> Result<()> { let modules_root = Path::new(MODULE_DIR); foreach_module(ModuleType::Updated, |updated_module| { if !updated_module.is_dir() { return Ok(()); } if let Some(name) = updated_module.file_name() { let module_dir = modules_root.join(name); let mut disabled = false; let mut removed = false; if module_dir.exists() { // If the old module is disabled, we need to also disable the new one disabled = module_dir.join(defs::DISABLE_FILE_NAME).exists(); removed = module_dir.join(defs::REMOVE_FILE_NAME).exists(); remove_dir_all(&module_dir)?; } std::fs::rename(updated_module, &module_dir)?; if removed { let path = module_dir.join(defs::REMOVE_FILE_NAME); if let Err(e) = ensure_file_exists(&path) { warn!("Failed to create {}: {e}", path.display()); } } else if disabled { let path = module_dir.join(defs::DISABLE_FILE_NAME); if let Err(e) = ensure_file_exists(&path) { warn!("Failed to create {}: {e}", path.display()); } } } Ok(()) })?; Ok(()) } /// Get common environment variables for script execution pub fn get_common_script_envs() -> Vec<(&'static str, String)> { vec![ ("ASH_STANDALONE", "1".to_string()), ("APATCH", "true".to_string()), ("APATCH_VER", defs::VERSION_NAME.to_string()), ("APATCH_VER_CODE", defs::VERSION_CODE.to_string()), ( "PATH", format!( "/data/adb:{}:{}", defs::BINARY_DIR.trim_end_matches('/'), env_var("PATH").unwrap_or_default() ), ), ] } // because we use something like A-B update // we need to update the module state after the boot_completed // if someone(such as the module) install a module before the boot_completed // then it may cause some problems, just forbid it fn ensure_boot_completed() -> Result<()> { // ensure getprop sys.boot_completed == 1 if getprop("sys.boot_completed").as_deref() != Some("1") { bail!("Android is Booting!"); } Ok(()) } fn mark_update() -> Result<()> { ensure_file_exists(concatcp!(defs::WORKING_DIR, defs::UPDATE_FILE_NAME)) } fn mark_module_state(module: &str, flag_file: &str, create_or_delete: bool) -> Result<()> { let module_state_file = Path::new(defs::MODULE_DIR).join(module).join(flag_file); if create_or_delete { ensure_file_exists(module_state_file) } else { if module_state_file.exists() { fs::remove_file(module_state_file)?; } Ok(()) } } pub fn foreach_module( module_type: ModuleType, mut f: impl FnMut(&Path) -> Result<()>, ) -> Result<()> { let modules_dir = Path::new(match module_type { ModuleType::Updated => MODULE_UPDATE_DIR, _ => defs::MODULE_DIR, }); let dir = std::fs::read_dir(modules_dir)?; for entry in dir.flatten() { let path = entry.path(); if !path.is_dir() { warn!("{} is not a directory, skip", path.display()); continue; } if module_type == ModuleType::Active && path.join(defs::DISABLE_FILE_NAME).exists() { info!("{} is disabled, skip", path.display()); continue; } if module_type == ModuleType::Active && path.join(defs::REMOVE_FILE_NAME).exists() { warn!("{} is removed, skip", path.display()); continue; } f(&path)?; } Ok(()) } fn foreach_active_module(f: impl FnMut(&Path) -> Result<()>) -> Result<()> { foreach_module(ModuleType::Active, f) } pub fn load_sepolicy_rule() -> Result<()> { foreach_active_module(|path| { let rule_file = path.join("sepolicy.rule"); if !rule_file.exists() { return Ok(()); } info!("load policy: {}", &rule_file.display()); let mut _sepol = get_policy_main(&[ "magiskpolicy".to_string(), "--live".to_string(), "--apply".to_string(), rule_file.display().to_string(), ])?; Ok(()) })?; Ok(()) } pub fn exec_script>(path: T, wait: bool) -> Result<()> { info!("exec {}", path.as_ref().display()); let is_module_script = path.as_ref().starts_with(defs::MODULE_DIR); // Extract module_id from path if it matches /data/adb/modules/{id}/... let module_id = if is_module_script { path.as_ref() .strip_prefix(defs::MODULE_DIR) .ok() .and_then(|p| p.components().next()) .and_then(|c| c.as_os_str().to_str()) .map(ToString::to_string) } else { None }; if is_module_script && module_id.is_none() { debug!( "Failed to extract module_id from script path '{}'. Script will run without AP_MODULE environment variable.", path.as_ref().display() ); } let is_elf = fs::read(path.as_ref()) .ok() .and_then(|bytes| bytes.get(..4).map(|b| b.to_vec())) .map_or(false, |magic| magic == [0x7f, b'E', b'L', b'F']); let mut command = Command::new(if is_elf { path.as_ref().as_os_str().to_owned() } else { assets::BUSYBOX_PATH.into() }); #[cfg(unix)] { command.process_group(0); unsafe { command.pre_exec(|| { switch_cgroups(); Ok(()) }); } } command .current_dir(path.as_ref().parent().unwrap()) .env("APATCH", "true") .env("APATCH_VER", defs::VERSION_NAME) .env("APATCH_VER_CODE", defs::VERSION_CODE) .env( "PATH", format!( "{}:{}", env_var("PATH")?, defs::BINARY_DIR.trim_end_matches('/') ), ); if !is_elf { command .arg("sh") .arg(path.as_ref()) .env("ASH_STANDALONE", "1"); } if let Some(id) = module_id { command.env("AP_MODULE", id); } let result = if wait { command.status().map(|_| ()) } else { command.spawn().map(|_| ()) }; result.map_err(|err| anyhow!("Failed to exec {}: {}", path.as_ref().display(), err)) } pub fn exec_stage_script(stage: &str, block: bool) -> Result<()> { foreach_active_module(|module| { let script_path = module.join(format!("{stage}.sh")); if !script_path.exists() { return Ok(()); } exec_script(&script_path, block) })?; Ok(()) } pub fn exec_common_scripts(dir: &str, wait: bool) -> Result<()> { let script_dir = Path::new(defs::ADB_DIR).join(dir); if !script_dir.exists() { info!("{} not exists, skip", script_dir.display()); return Ok(()); } let dir = fs::read_dir(&script_dir)?; for entry in dir.flatten() { let path = entry.path(); if !is_executable(&path) { warn!("{} is not executable, skip", path.display()); continue; } exec_script(path, wait)?; } Ok(()) } pub fn load_system_prop() -> Result<()> { foreach_active_module(|module| { let system_prop = module.join("system.prop"); if !system_prop.exists() { return Ok(()); } info!("load {} system.prop", module.display()); crate::resetprop::load_system_prop_file(&system_prop)?; Ok(()) })?; Ok(()) } pub fn prune_modules() -> Result<()> { foreach_module(ModuleType::All, |module| { fs::remove_file(module.join(defs::UPDATE_FILE_NAME)).ok(); if !module.join(defs::REMOVE_FILE_NAME).exists() { return Ok(()); } info!("remove module: {}", module.display()); // Execute metamodule's metauninstall.sh first let module_id = module.file_name().and_then(|n| n.to_str()).unwrap_or(""); // Check if this is a metamodule let is_metamodule = read_module_prop(module) .map(|props| metamodule::is_metamodule(&props)) .unwrap_or(false); if is_metamodule { info!("Removing metamodule symlink"); if let Err(e) = metamodule::remove_symlink() { warn!("Failed to remove metamodule symlink: {e}"); } } else if let Err(e) = metamodule::exec_metauninstall_script(module_id) { warn!("Failed to exec metamodule uninstall for {module_id}: {e}",); } // Then execute module's own uninstall.sh let uninstaller = module.join("uninstall.sh"); if uninstaller.exists() && let Err(e) = exec_script(uninstaller, true) { warn!("Failed to exec uninstaller: {e}"); } // Clear module configs before removing module directory if let Err(e) = module_config::clear_module_configs(module_id) { warn!("Failed to clear configs for {module_id}: {e}"); } // Finally remove the module directory if let Err(e) = remove_dir_all(module) { warn!("Failed to remove {}: {e}", module.display()); } Ok(()) })?; // collect remaining modules, if none, clean up metamodule record let remaining_modules: Vec<_> = std::fs::read_dir(defs::MODULE_DIR)? .filter_map(std::result::Result::ok) .filter(|entry| entry.path().join("module.prop").exists()) .collect(); if remaining_modules.is_empty() { info!("no remaining modules."); } Ok(()) } fn _install_module(zip: &str) -> Result<()> { ensure_boot_completed()?; // print banner println!(include_str!("banner")); assets::ensure_binaries().with_context(|| "binary missing")?; // first check if workding dir is usable ensure_dir_exists(defs::WORKING_DIR).with_context(|| "Failed to create working dir")?; ensure_dir_exists(defs::BINARY_DIR).with_context(|| "Failed to create bin dir")?; // read the module_id from zip let mut buffer: Vec = Vec::new(); let entry_path = PathBuf::from_str("module.prop")?; let zip_path = PathBuf::from_str(zip)?; let zip_path = zip_path.canonicalize()?; zip_extract_file_to_memory(&zip_path, &entry_path, &mut buffer)?; let mut module_prop = HashMap::new(); PropertiesIter::new_with_encoding(Cursor::new(buffer), encoding_rs::UTF_8).read_into( |k, v| { module_prop.insert(k, v); }, )?; info!("module prop: {:?}", module_prop); let Some(module_id) = module_prop.get("id") else { bail!("module id not found in module.prop!"); }; let module_id = module_id.trim(); // Check if this module is a metamodule let is_metamodule = metamodule::is_metamodule(&module_prop); // Check if it's safe to install regular module if !is_metamodule && let Err(is_disabled) = metamodule::check_install_safety() { println!("\n❌ Installation Blocked"); println!("┌────────────────────────────────"); println!("│ A metamodule with custom installer is active"); println!("│"); if is_disabled { println!("│ Current state: Disabled"); println!("│ Action required: Re-enable or uninstall it, then reboot"); } else { println!("│ Current state: Pending changes"); println!("│ Action required: Reboot to apply changes first"); } println!("└─────────────────────────────────\n"); bail!("Metamodule installation blocked"); } let modules_dir = Path::new(defs::MODULE_DIR); let modules_update_dir = Path::new(defs::MODULE_UPDATE_DIR); if !Path::new(modules_dir).exists() { fs::create_dir(modules_dir).expect("Failed to create modules folder"); let permissions = fs::Permissions::from_mode(0o700); fs::set_permissions(modules_dir, permissions).expect("Failed to set permissions"); } if is_metamodule { info!("Installing metamodule: {module_id}"); // Check if there's already a metamodule installed if metamodule::has_metamodule() && let Some(existing_path) = metamodule::get_metamodule_path() { let existing_id = read_module_prop(&existing_path) .ok() .and_then(|m| m.get("id").cloned()) .unwrap_or_else(|| "unknown".to_string()); if existing_id != module_id { println!("\n❌ Installation Failed"); println!("┌────────────────────────────────"); println!("│ A metamodule is already installed"); println!("│ Current metamodule: {existing_id}"); println!("│"); println!("│ Only one metamodule can be active at a time."); println!("│"); println!("│ To install this metamodule:"); println!("│ 1. Uninstall the current metamodule"); println!("│ 2. Reboot your device"); println!("│ 3. Install the new metamodule"); println!("└─────────────────────────────────\n"); bail!("Cannot install multiple metamodules"); } } } let module_dir = format!("{}{}", modules_dir.display(), module_id); let _module_update_dir = format!("{}{}", modules_update_dir.display(), module_id); info!("module dir: {}", module_dir); if !Path::new(&module_dir.clone()).exists() { fs::create_dir(&module_dir.clone()).expect("Failed to create module folder"); let permissions = fs::Permissions::from_mode(0o700); fs::set_permissions(module_dir.clone(), permissions).expect("Failed to set permissions"); } // unzip the image and move it to modules_update/ dir let file = fs::File::open(zip)?; let mut archive = zip::ZipArchive::new(file)?; archive.extract(&_module_update_dir)?; // Set SELinux context for module root directory and special files // This is critical for .img files that need to be loop-mounted #[cfg(unix)] { let module_update_path = Path::new(&_module_update_dir); if module_update_path.exists() { // Set adb_data_file context for the module root directory restorecon::lsetfilecon(&_module_update_dir, restorecon::ADB_CON)?; // Process special files like .img that need proper permissions for mounting if let Ok(entries) = fs::read_dir(&_module_update_dir) { for entry in entries.flatten() { let path = entry.path(); if let Some(extension) = path.extension() { if extension == "img" { // Set proper permissions for image files (readable by all) fs::set_permissions(&path, fs::Permissions::from_mode(0o644))?; // Set SELinux context to allow loop mounting restorecon::lsetfilecon(&path, restorecon::ADB_CON)?; info!("Set permissions and SELinux context for: {:?}", path); } } } } } } println!("- Running module installer"); exec_install_script(zip, is_metamodule)?; // set permission and selinux context for $MOD/system let module_system_dir = PathBuf::from(module_dir.clone()).join("system"); if module_system_dir.exists() { #[cfg(unix)] fs::set_permissions(&module_system_dir, fs::Permissions::from_mode(0o755))?; restorecon::restore_syscon(&module_system_dir)?; } // Create symlink for metamodule if is_metamodule { println!("- Creating metamodule symlink"); metamodule::ensure_symlink(&module_dir)?; } mark_update()?; Ok(()) } pub fn install_module(zip: &str) -> Result<()> { let result = _install_module(zip); result } pub fn _uninstall_module(id: &str, update_dir: &str) -> Result<()> { let dir = Path::new(update_dir); ensure!(dir.exists(), "No module installed"); // iterate the modules_update dir, find the module to be removed let dir = fs::read_dir(dir)?; for entry in dir.flatten() { let path = entry.path(); let module_prop = path.join("module.prop"); if !module_prop.exists() { continue; } let content = fs::read(module_prop)?; let mut module_id: String = String::new(); PropertiesIter::new_with_encoding(Cursor::new(content), encoding_rs::UTF_8).read_into( |k, v| { if k.eq("id") { module_id = v; } }, )?; if module_id.eq(id) { let remove_file = path.join(defs::REMOVE_FILE_NAME); fs::File::create(remove_file).with_context(|| "Failed to create remove file.")?; break; } } // santity check let target_module_path = format!("{update_dir}/{id}"); let target_module = Path::new(&target_module_path); if target_module.exists() { let remove_file = target_module.join(defs::REMOVE_FILE_NAME); if !remove_file.exists() { fs::File::create(remove_file).with_context(|| "Failed to create remove file.")?; } } let _ = mark_module_state(id, defs::REMOVE_FILE_NAME, true); Ok(()) } pub fn uninstall_module(id: &str) -> Result<()> { _uninstall_module(id, defs::MODULE_DIR)?; mark_update()?; Ok(()) } pub fn undo_uninstall_module(id: &str) -> Result<()> { let module_path = Path::new(defs::MODULE_DIR).join(id); ensure!(module_path.exists(), "Module {id} not found"); // Remove the remove mark let remove_file = module_path.join(defs::REMOVE_FILE_NAME); if remove_file.exists() { fs::remove_file(&remove_file) .with_context(|| format!("Failed to delete remove file for module '{id}'"))?; info!("Removed the remove mark for module {id}"); } Ok(()) } /// Read module.prop from the given module path and return as a HashMap pub fn read_module_prop(module_path: &Path) -> Result> { let module_prop = module_path.join("module.prop"); ensure!( module_prop.exists(), "module.prop not found in {}", module_path.display() ); let content = std::fs::read(&module_prop) .with_context(|| format!("Failed to read module.prop: {}", module_prop.display()))?; let mut prop_map: HashMap = HashMap::new(); PropertiesIter::new_with_encoding(Cursor::new(content), encoding_rs::UTF_8) .read_into(|k, v| { prop_map.insert(k, v); }) .with_context(|| format!("Failed to parse module.prop: {}", module_prop.display()))?; Ok(prop_map) } pub fn run_action(id: &str) -> Result<()> { let action_script_path = format!("/data/adb/modules/{}/action.sh", id); if Path::new(&action_script_path).exists() { let _ = exec_script(&action_script_path, true); } else { //if no action.sh, try to run lua action lua::run_lua(&id, "action", false, true).map_err(|e| anyhow::anyhow!("{}", e))?; } Ok(()) } fn _change_module_state(module_dir: &str, mid: &str, enable: bool) -> Result<()> { let src_module_path = format!("{module_dir}/{mid}"); let src_module = Path::new(&src_module_path); ensure!(src_module.exists(), "module: {} not found!", mid); let disable_path = src_module.join(defs::DISABLE_FILE_NAME); if enable { if disable_path.exists() { fs::remove_file(&disable_path).with_context(|| { format!("Failed to remove disable file: {}", &disable_path.display()) })?; } } else { ensure_file_exists(disable_path)?; } let _ = mark_module_state(mid, defs::DISABLE_FILE_NAME, !enable); Ok(()) } pub fn _enable_module(id: &str, update_dir: &Path) -> Result<()> { if let Some(module_dir_str) = update_dir.to_str() { _change_module_state(module_dir_str, id, true) } else { info!("Enable module failed: Invalid path"); Err(anyhow::anyhow!("Invalid module directory")) } } pub fn enable_module(id: &str) -> Result<()> { let update_dir = Path::new(defs::MODULE_DIR); _enable_module(id, update_dir)?; Ok(()) } pub fn _disable_module(id: &str, update_dir: &Path) -> Result<()> { if let Some(module_dir_str) = update_dir.to_str() { _change_module_state(module_dir_str, id, false) } else { info!("Disable module failed: Invalid path"); Err(anyhow::anyhow!("Invalid module directory")) } } pub fn disable_module(id: &str) -> Result<()> { let module_dir = Path::new(defs::MODULE_DIR); _disable_module(id, module_dir)?; Ok(()) } pub fn _disable_all_modules(dir: &str) -> Result<()> { let dir = fs::read_dir(dir)?; for entry in dir.flatten() { let path = entry.path(); let disable_flag = path.join(defs::DISABLE_FILE_NAME); if let Err(e) = ensure_file_exists(disable_flag) { warn!("Failed to disable module: {}: {}", path.display(), e); } } Ok(()) } pub fn disable_all_modules() -> Result<()> { // Skip disabling modules since boot completed if getprop("sys.boot_completed").as_deref() == Some("1") { info!("System boot completed, no need to disable all modules"); return Ok(()); } mark_update()?; _disable_all_modules(defs::MODULE_DIR)?; Ok(()) } // Resolve a module icon path to an absolute on-disk path fn resolve_module_icon_path( module_prop_map: &mut HashMap, key: &str, module_path: &Path, ) { let module_id = module_prop_map.get("id").map(|s| s.as_str()).unwrap_or(""); if let Some(icon_value) = module_prop_map.get(key).map(|v| v.trim()).filter(|v| !v.is_empty()) { let path = Path::new(icon_value); if path.is_absolute() || path.components().any(|c| matches!(c, Component::ParentDir)) { log::warn!("Rejected {} (invalid path) for module {}: {}", key, module_id, icon_value); return; } let candidate = module_path.join(path); if candidate.exists() && candidate.is_file() { if let Some(full_path) = candidate.to_str() { module_prop_map.insert(key.to_string(), full_path.to_string()); } } else { log::debug!("{} not found for module {}: {}", key, module_id, candidate.display()); } } } fn _list_modules(path: &str) -> Vec> { // Load all module configs once to minimize I/O overhead let all_configs = match module_config::get_all_module_configs() { Ok(configs) => configs, Err(e) => { warn!("Failed to load module configs: {e}"); HashMap::new() } }; // first check enabled modules let dir = fs::read_dir(path); let Ok(dir) = dir else { return Vec::new(); }; let mut modules: Vec> = Vec::new(); for entry in dir.flatten() { let path = entry.path(); info!("path: {}", path.display()); let module_prop = path.join("module.prop"); if !module_prop.exists() { continue; } let content = fs::read(&module_prop); let Ok(content) = content else { warn!("Failed to read file: {}", module_prop.display()); continue; }; let mut module_prop_map: HashMap = HashMap::new(); let encoding = encoding_rs::UTF_8; if PropertiesIter::new_with_encoding(Cursor::new(content), encoding) .read_into(|k, v| { module_prop_map.insert(k, v); }) .is_err() { warn!("Failed to parse module.prop: {}", module_prop.display()); continue; } if !module_prop_map.contains_key("id") || module_prop_map["id"].is_empty() { match entry.file_name().to_str() { Some(id) => { info!("Use dir name as module id: {}", id); module_prop_map.insert("id".to_owned(), id.to_owned()); } _ => { info!("Failed to get module id: {:?}", module_prop); continue; } } } // Add enabled, update, remove flags let enabled = !path.join(defs::DISABLE_FILE_NAME).exists(); let update = path.join(defs::UPDATE_FILE_NAME).exists(); let remove = path.join(defs::REMOVE_FILE_NAME).exists(); let web = path.join(defs::MODULE_WEB_DIR).exists(); let id = module_prop_map.get("id").map(|s| s.as_str()).unwrap_or(""); let id_lua_file = format!("{}.lua", id); let action = path.join(defs::MODULE_ACTION_SH).exists() || path.join(&id_lua_file).exists(); module_prop_map.insert("enabled".to_owned(), enabled.to_string()); module_prop_map.insert("update".to_owned(), update.to_string()); module_prop_map.insert("remove".to_owned(), remove.to_string()); module_prop_map.insert("web".to_owned(), web.to_string()); module_prop_map.insert("action".to_owned(), action.to_string()); // Resolve and validate module icon paths for action and webui icons resolve_module_icon_path(&mut module_prop_map, "actionIcon", &path); resolve_module_icon_path(&mut module_prop_map, "webuiIcon", &path); // Apply module config overrides and extract managed features if let Some(module_id) = module_prop_map.get("id") && let Some(config) = all_configs.get(module_id.as_str()) { // Apply override.description if let Some(desc) = config.get("override.description") { module_prop_map.insert("description".to_owned(), desc.clone()); } } modules.push(module_prop_map); } modules } pub fn list_modules() -> Result<()> { let modules = _list_modules(defs::MODULE_DIR); println!("{}", serde_json::to_string_pretty(&modules)?); Ok(()) } ================================================ FILE: apd/src/module_config.rs ================================================ use std::{ collections::HashMap, fs::{self, File}, io::{Read, Write}, path::{Path, PathBuf}, }; use anyhow::{Context, Result, bail}; use log::{debug, warn}; use crate::{defs, utils::ensure_dir_exists}; #[allow(clippy::unreadable_literal)] const MODULE_CONFIG_MAGIC: u32 = 0x4150544D; // "APTM" const MODULE_CONFIG_VERSION: u32 = 1; // Validation limits pub const MAX_CONFIG_KEY_LEN: usize = 256; pub const MAX_CONFIG_VALUE_LEN: usize = 1024 * 1024; // 1MB pub const MAX_CONFIG_COUNT: usize = 32; #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum ConfigType { Persist, Temp, } impl ConfigType { const fn filename(self) -> &'static str { match self { Self::Persist => defs::PERSIST_CONFIG_NAME, Self::Temp => defs::TEMP_CONFIG_NAME, } } } /// Validate config key /// Uses the same validation rules as module_id: ^[a-zA-Z][a-zA-Z0-9._-]+$ /// - Must start with a letter (a-zA-Z) /// - Followed by one or more alphanumeric, dot, underscore, or hyphen characters /// - Minimum length: 2 characters pub fn validate_config_key(key: &str) -> Result<()> { if key.is_empty() { bail!("Config key cannot be empty"); } if key.len() > MAX_CONFIG_KEY_LEN { bail!( "Config key too long: {} bytes (max: {MAX_CONFIG_KEY_LEN})", key.len(), ); } // Use same pattern as module_id for consistency let re = regex_lite::Regex::new(r"^[a-zA-Z][a-zA-Z0-9._-]+$")?; if !re.is_match(key) { bail!("Invalid config key: '{key}'. Must match /^[a-zA-Z][a-zA-Z0-9._-]+$/"); } Ok(()) } /// Validate config value /// Only enforces maximum length - no character restrictions /// Values are stored in binary format with length prefix, so any UTF-8 data is safe pub fn validate_config_value(value: &str) -> Result<()> { if value.len() > MAX_CONFIG_VALUE_LEN { bail!( "Config value too long: {} bytes (max: {})", value.len(), MAX_CONFIG_VALUE_LEN ); } // No character restrictions - binary storage format handles all UTF-8 safely Ok(()) } /// Validate config count fn validate_config_count(config: &HashMap) -> Result<()> { if config.len() > MAX_CONFIG_COUNT { bail!( "Too many config entries: {} (max: {MAX_CONFIG_COUNT})", config.len(), ); } Ok(()) } /// Get the config directory path for a module fn get_config_dir(module_id: &str) -> PathBuf { Path::new(defs::MODULE_CONFIG_DIR).join(module_id) } /// Get the config file path for a module fn get_config_path(module_id: &str, config_type: ConfigType) -> PathBuf { get_config_dir(module_id).join(config_type.filename()) } /// Ensure the config directory exists fn ensure_config_dir(module_id: &str) -> Result { let dir = get_config_dir(module_id); ensure_dir_exists(&dir)?; Ok(dir) } /// Load config from binary file pub fn load_config(module_id: &str, config_type: ConfigType) -> Result> { let config_path = get_config_path(module_id, config_type); if !config_path.exists() { debug!("Config file not found: {}", config_path.display()); return Ok(HashMap::new()); } let mut file = File::open(&config_path) .with_context(|| format!("Failed to open config file: {}", config_path.display()))?; // Read magic let mut magic_buf = [0u8; 4]; file.read_exact(&mut magic_buf) .with_context(|| "Failed to read magic")?; let magic = u32::from_le_bytes(magic_buf); if magic != MODULE_CONFIG_MAGIC { bail!("Invalid config magic: expected 0x{MODULE_CONFIG_MAGIC:08x}, got 0x{magic:08x}"); } // Read version let mut version_buf = [0u8; 4]; file.read_exact(&mut version_buf) .with_context(|| "Failed to read version")?; let version = u32::from_le_bytes(version_buf); if version != MODULE_CONFIG_VERSION { bail!("Unsupported config version: expected {MODULE_CONFIG_VERSION}, got {version}"); } // Read count let mut count_buf = [0u8; 4]; file.read_exact(&mut count_buf) .with_context(|| "Failed to read count")?; let count = u32::from_le_bytes(count_buf); // Read entries let mut config = HashMap::new(); for i in 0..count { // Read key length let mut key_len_buf = [0u8; 4]; file.read_exact(&mut key_len_buf) .with_context(|| format!("Failed to read key length for entry {i}"))?; let key_len = u32::from_le_bytes(key_len_buf) as usize; // Read key data let mut key_buf = vec![0u8; key_len]; file.read_exact(&mut key_buf) .with_context(|| format!("Failed to read key data for entry {i}"))?; let key = String::from_utf8(key_buf) .with_context(|| format!("Invalid UTF-8 in key for entry {i}"))?; // Read value length let mut value_len_buf = [0u8; 4]; file.read_exact(&mut value_len_buf) .with_context(|| format!("Failed to read value length for entry {i}"))?; let value_len = u32::from_le_bytes(value_len_buf) as usize; // Read value data let mut value_buf = vec![0u8; value_len]; file.read_exact(&mut value_buf) .with_context(|| format!("Failed to read value data for entry {i}"))?; let value = String::from_utf8(value_buf) .with_context(|| format!("Invalid UTF-8 in value for entry {i}"))?; config.insert(key, value); } debug!( "Loaded {} entries from {}", config.len(), config_path.display() ); Ok(config) } /// Save config to binary file pub fn save_config( module_id: &str, config_type: ConfigType, config: &HashMap, ) -> Result<()> { // Validate config count validate_config_count(config)?; // Validate all keys and values for (key, value) in config { validate_config_key(key).with_context(|| format!("Invalid config key: '{key}'"))?; validate_config_value(value) .with_context(|| format!("Invalid config value for key '{key}'"))?; } ensure_config_dir(module_id)?; let config_path = get_config_path(module_id, config_type); let temp_path = config_path.with_extension("tmp"); // Write to temporary file first let mut file = File::create(&temp_path) .with_context(|| format!("Failed to create temp config file: {}", temp_path.display()))?; // Write magic file.write_all(&MODULE_CONFIG_MAGIC.to_le_bytes()) .with_context(|| "Failed to write magic")?; // Write version file.write_all(&MODULE_CONFIG_VERSION.to_le_bytes()) .with_context(|| "Failed to write version")?; // Write count let count = config.len() as u32; file.write_all(&count.to_le_bytes()) .with_context(|| "Failed to write count")?; // Write entries for (key, value) in config { // Write key length let key_bytes = key.as_bytes(); let key_len = key_bytes.len() as u32; file.write_all(&key_len.to_le_bytes()) .with_context(|| format!("Failed to write key length for '{key}'"))?; // Write key data file.write_all(key_bytes) .with_context(|| format!("Failed to write key data for '{key}'"))?; // Write value length let value_bytes = value.as_bytes(); let value_len = value_bytes.len() as u32; file.write_all(&value_len.to_le_bytes()) .with_context(|| format!("Failed to write value length for '{key}'"))?; // Write value data file.write_all(value_bytes) .with_context(|| format!("Failed to write value data for '{key}'"))?; } file.sync_all() .with_context(|| "Failed to sync config file")?; // Atomic rename fs::rename(&temp_path, &config_path).with_context(|| { format!( "Failed to rename config file: {} -> {}", temp_path.display(), config_path.display() ) })?; debug!( "Saved {} entries to {}", config.len(), config_path.display() ); Ok(()) } /// Get a single config value #[allow(dead_code)] pub fn get_config_value( module_id: &str, key: &str, config_type: ConfigType, ) -> Result> { let config = load_config(module_id, config_type)?; Ok(config.get(key).cloned()) } /// Set a single config value pub fn set_config_value( module_id: &str, key: &str, value: &str, config_type: ConfigType, ) -> Result<()> { // Validate input early for better error messages validate_config_key(key)?; validate_config_value(value)?; let mut config = load_config(module_id, config_type)?; config.insert(key.to_string(), value.to_string()); // Note: save_config will also validate, but this provides earlier feedback save_config(module_id, config_type, &config)?; Ok(()) } /// Delete a single config value pub fn delete_config_value(module_id: &str, key: &str, config_type: ConfigType) -> Result<()> { let mut config = load_config(module_id, config_type)?; if config.remove(key).is_none() { bail!("Key '{key}' not found in config"); } save_config(module_id, config_type, &config)?; Ok(()) } /// Clear all config values pub fn clear_config(module_id: &str, config_type: ConfigType) -> Result<()> { let config_path = get_config_path(module_id, config_type); if config_path.exists() { fs::remove_file(&config_path) .with_context(|| format!("Failed to remove config file: {}", config_path.display()))?; debug!("Cleared config: {}", config_path.display()); } Ok(()) } /// Merge persist and temp configs (temp takes priority) pub fn merge_configs(module_id: &str) -> Result> { let mut merged = match load_config(module_id, ConfigType::Persist) { Ok(config) => config, Err(e) => { warn!("Failed to load persist config for module '{module_id}': {e}"); HashMap::new() } }; let temp = match load_config(module_id, ConfigType::Temp) { Ok(config) => config, Err(e) => { warn!("Failed to load temp config for module '{module_id}': {e}"); HashMap::new() } }; // Temp config overrides persist config for (key, value) in temp { merged.insert(key, value); } Ok(merged) } /// Get all module configs (for iteration) /// Loads all configs in a single pass to minimize I/O overhead pub fn get_all_module_configs() -> Result>> { let config_root = Path::new(defs::MODULE_CONFIG_DIR); if !config_root.exists() { return Ok(HashMap::new()); } let mut all_configs = HashMap::new(); for entry in fs::read_dir(config_root) .with_context(|| format!("Failed to read config directory: {}", config_root.display()))? { let entry = entry?; let path = entry.path(); if !path.is_dir() { continue; } if let Some(module_id) = path.file_name().and_then(|n| n.to_str()) { match merge_configs(module_id) { Ok(config) => { if !config.is_empty() { all_configs.insert(module_id.to_string(), config); } } Err(e) => { warn!("Failed to load config for module '{module_id}': {e}"); // Continue processing other modules } } } } Ok(all_configs) } /// Clear all temporary configs (called during post-fs-data) pub fn clear_all_temp_configs() -> Result<()> { let config_root = Path::new(defs::MODULE_CONFIG_DIR); if !config_root.exists() { debug!("Config directory does not exist, nothing to clear"); return Ok(()); } let mut cleared_count = 0; for entry in fs::read_dir(config_root) .with_context(|| format!("Failed to read config directory: {}", config_root.display()))? { let entry = entry?; let path = entry.path(); if !path.is_dir() { continue; } let temp_config = path.join(defs::TEMP_CONFIG_NAME); if temp_config.exists() { match fs::remove_file(&temp_config) { Ok(()) => { debug!("Cleared temp config: {}", temp_config.display()); cleared_count += 1; } Err(e) => { warn!("Failed to clear temp config {}: {e}", temp_config.display()); } } } } if cleared_count > 0 { debug!("Cleared {cleared_count} temp config file(s)"); } Ok(()) } /// Clear all configs for a module (called during uninstall) pub fn clear_module_configs(module_id: &str) -> Result<()> { let config_dir = get_config_dir(module_id); if config_dir.exists() { fs::remove_dir_all(&config_dir).with_context(|| { format!( "Failed to remove config directory: {}", config_dir.display() ) })?; debug!("Cleared all configs for module: {module_id}"); } Ok(()) } ================================================ FILE: apd/src/package.rs ================================================ use std::{ collections::{HashMap, HashSet}, fs::File, io::{self, BufRead}, path::Path, process::Command, thread, time::Duration, }; use log::{info, warn}; use serde::{Deserialize, Serialize}; use crate::defs; const DEFAULT_SCONTEXT: &str = "u:r:untrusted_app:s0"; const MAGISK_SCONTEXT: &str = "u:r:magisk:s0"; #[derive(Deserialize, Serialize, Clone)] pub struct PackageConfig { pub pkg: String, pub exclude: i32, pub allow: i32, pub uid: i32, pub to_uid: i32, pub sctx: String, } fn read_known_user_packages() -> HashSet { std::fs::read_to_string(defs::AUTO_EXCLUDE_KNOWN_PACKAGES_FILE) .map(|content| { content .lines() .map(str::trim) .filter(|line| !line.is_empty()) .map(ToOwned::to_owned) .collect() }) .unwrap_or_default() } fn write_known_user_packages(packages: &HashSet) -> io::Result<()> { let mut sorted_packages: Vec<_> = packages.iter().cloned().collect(); sorted_packages.sort(); let mut content = sorted_packages.join("\n"); if !content.is_empty() { content.push('\n'); } std::fs::write(defs::AUTO_EXCLUDE_KNOWN_PACKAGES_FILE, content) } fn list_user_packages() -> HashSet { let commands: [(&str, &[&str]); 2] = [ ("cmd", &["package", "list", "packages", "-3"]), ("pm", &["list", "packages", "-3"]), ]; for (program, args) in commands { let output = match Command::new(program).args(args).output() { Ok(output) if output.status.success() => output, Ok(output) => { warn!("User package query {} {:?} failed: {:?}", program, args, output.status.code()); continue; } Err(e) => { warn!("User package query {} {:?} failed: {}", program, args, e); continue; } }; return String::from_utf8_lossy(&output.stdout) .lines() .filter_map(|line| line.strip_prefix("package:")) .map(str::trim) .filter(|pkg| !pkg.is_empty()) .map(ToOwned::to_owned) .collect(); } HashSet::new() } pub fn sync_auto_exclude_new_apps( package_configs: &mut Vec, uid_map: &HashMap, mode: i32, ) -> io::Result { let current_user_packages = list_user_packages(); if current_user_packages.is_empty() { return Ok(false); } let known_user_packages = read_known_user_packages(); let known_initialized = Path::new(defs::AUTO_EXCLUDE_KNOWN_PACKAGES_FILE).exists(); let mut changed = false; if mode != 0 && known_initialized { let new_packages: Vec<_> = current_user_packages .difference(&known_user_packages) .cloned() .collect(); for pkg in new_packages { let Some(&uid) = uid_map.get(&pkg) else { warn!("[auto_exclude] Missing uid for package {}, skip", pkg); continue; }; let exists = package_configs.iter().any(|config| config.pkg == pkg || config.uid == uid); if exists { continue; } let (allow, exclude, sctx, mode_name) = match mode { 1 => (1, 0, MAGISK_SCONTEXT.to_string(), "root"), 2 => (0, 1, DEFAULT_SCONTEXT.to_string(), "exclude"), _ => continue, }; info!( "[new_app_profile] New package detected, apply {} by default: {} ({})", mode_name, pkg, uid ); package_configs.push(PackageConfig { pkg, exclude, allow, uid, to_uid: 0, sctx, }); changed = true; } } write_known_user_packages(¤t_user_packages)?; Ok(changed || !known_initialized) } pub fn read_ap_package_config() -> Vec { let max_retry = 5; for _ in 0..max_retry { let file = match File::open("/data/adb/ap/package_config") { Ok(file) => file, Err(e) => { warn!("Error opening file: {}", e); thread::sleep(Duration::from_secs(1)); continue; } }; let mut reader = csv::Reader::from_reader(file); let mut package_configs = Vec::new(); let mut success = true; for record in reader.deserialize() { match record { Ok(config) => package_configs.push(config), Err(e) => { warn!("Error deserializing record: {}", e); success = false; break; } } } if success { return package_configs; } thread::sleep(Duration::from_secs(1)); } Vec::new() } pub fn write_ap_package_config(package_configs: &[PackageConfig]) -> io::Result<()> { let max_retry = 5; for _ in 0..max_retry { let temp_path = "/data/adb/ap/package_config.tmp"; let file = match File::create(temp_path) { Ok(file) => file, Err(e) => { warn!("Error creating temp file: {}", e); thread::sleep(Duration::from_secs(1)); continue; } }; let mut writer = csv::Writer::from_writer(file); let mut success = true; for config in package_configs { if let Err(e) = writer.serialize(config) { warn!("Error serializing record: {}", e); success = false; break; } } if !success { thread::sleep(Duration::from_secs(1)); continue; } if let Err(e) = writer.flush() { warn!("Error flushing writer: {}", e); thread::sleep(Duration::from_secs(1)); continue; } if let Err(e) = std::fs::rename(temp_path, "/data/adb/ap/package_config") { warn!("Error renaming temp file: {}", e); thread::sleep(Duration::from_secs(1)); continue; } return Ok(()); } Err(io::Error::new( io::ErrorKind::Other, "Failed after max retries", )) } fn read_lines

(filename: P) -> io::Result>> where P: AsRef, { File::open(filename).map(|file| io::BufReader::new(file).lines()) } pub fn synchronize_package_uid() -> io::Result<()> { info!("[synchronize_package_uid] Start synchronizing root list with system packages..."); let max_retry = 5; for _ in 0..max_retry { match read_lines("/data/system/packages.list") { Ok(lines) => { let lines: Vec<_> = lines.filter_map(|line| line.ok()).collect(); let mut package_configs = read_ap_package_config(); let uid_map: HashMap = lines .iter() .filter_map(|line| { let words: Vec<&str> = line.split_whitespace().collect(); if words.len() < 2 { return None; } words[1] .parse::() .ok() .map(|uid| (words[0].to_string(), uid)) }) .collect(); let system_packages: Vec = lines .iter() .filter_map(|line| line.split_whitespace().next()) .map(|pkg| pkg.to_string()) .collect(); let original_len = package_configs.len(); package_configs.retain(|config| system_packages.contains(&config.pkg)); let removed_count = original_len - package_configs.len(); if removed_count > 0 { info!( "Removed {} uninstalled package configurations", removed_count ); } let mut updated = false; let new_app_profile_mode = crate::supercall::get_new_app_profile_mode(); if sync_auto_exclude_new_apps(&mut package_configs, &uid_map, new_app_profile_mode)? { updated = true; } for line in &lines { let words: Vec<&str> = line.split_whitespace().collect(); if words.len() >= 2 { let pkg_name = words[0]; if let Ok(uid) = words[1].parse::() { if let Some(config) = package_configs .iter_mut() .find(|config| config.pkg == pkg_name) { if config.uid % 100000 != uid % 100000 { let uid = config.uid / 100000 * 100000 + uid % 100000; info!( "Updating uid for package {}: {} -> {}", pkg_name, config.uid, uid ); config.uid = uid; updated = true; } } } else { warn!("Error parsing uid: {}", words[1]); } } } if updated || removed_count > 0 { write_ap_package_config(&package_configs)?; } return Ok(()); } Err(e) => { warn!("Error reading packages.list: {}", e); thread::sleep(Duration::from_secs(1)); } } } Err(io::Error::new( io::ErrorKind::Other, "Failed after max retries", )) } ================================================ FILE: apd/src/pty.rs ================================================ use std::{ ffi::c_int, fs::File, io::{Read, Write, stderr, stdin, stdout}, mem::MaybeUninit, os::fd::{AsFd, AsRawFd, OwnedFd, RawFd}, process::exit, ptr::null_mut, sync::Mutex, thread, }; use anyhow::{Ok, Result, bail}; use libc::{ EINTR, SIG_BLOCK, SIG_UNBLOCK, SIGWINCH, TIOCGWINSZ, TIOCSWINSZ, fork, pthread_sigmask, sigaddset, sigemptyset, sigset_t, sigwait, waitpid, winsize, }; use rustix::{ fs::{Mode, OFlags, open}, io::dup, ioctl::{Getter, Opcode, ioctl, opcode}, process::setsid, pty::{grantpt, unlockpt}, stdio::{dup2_stderr, dup2_stdin, dup2_stdout}, termios::{OptionalActions, Termios, isatty, tcgetattr, tcsetattr}, }; use crate::{defs::PTS_NAME, utils::get_tmp_path}; // https://github.com/topjohnwu/Magisk/blob/5627053b7481618adfdf8fa3569b48275589915b/native/src/core/su/pts.cpp fn get_pty_num(fd: F) -> Result { // TIOCGPTN: Get the PTY number const TIOCGPTN: Opcode = opcode::read::(b'T', 0x30); Ok(unsafe { let tiocgptn = Getter::::new(); ioctl(fd, tiocgptn)? }) } static OLD_STDIN: Mutex> = Mutex::new(None); fn watch_sigwinch_async(slave: RawFd) { let mut winch = MaybeUninit::::uninit(); unsafe { sigemptyset(winch.as_mut_ptr()); sigaddset(winch.as_mut_ptr(), SIGWINCH); pthread_sigmask(SIG_BLOCK, winch.as_mut_ptr(), null_mut()); } thread::spawn(move || unsafe { let mut winch = MaybeUninit::::uninit(); sigemptyset(winch.as_mut_ptr()); sigaddset(winch.as_mut_ptr(), SIGWINCH); pthread_sigmask(SIG_UNBLOCK, winch.as_mut_ptr(), null_mut()); let mut sig: c_int = 0; loop { let mut w = MaybeUninit::::uninit(); if libc::ioctl(1, TIOCGWINSZ, w.as_mut_ptr()) < 0 { continue; } libc::ioctl(slave, TIOCSWINSZ, w.as_mut_ptr()); if sigwait(winch.as_mut_ptr(), &mut sig) != 0 { break; } } }); } fn set_stdin_raw() -> rustix::io::Result<()> { let mut termios = tcgetattr(stdin())?; let mut guard = OLD_STDIN.lock().unwrap(); *guard = Some(termios.clone()); drop(guard); termios.make_raw(); tcsetattr(stdin(), OptionalActions::Flush, &termios) } fn restore_stdin() -> Result<()> { let mut guard = OLD_STDIN.lock().unwrap(); if let Some(original_termios) = guard.take() { tcsetattr(stdin(), OptionalActions::Flush, &original_termios)?; } Ok(()) } fn pump(mut from: R, mut to: W) { let mut buf = [0u8; 4096]; loop { match from.read(&mut buf) { Result::Ok(len) => { if len == 0 { return; } if to.write_all(&buf[0..len]).is_err() { return; } if to.flush().is_err() { return; } } Err(_) => { return; } } } } fn pump_stdin_async(mut ptmx: File) { let _ = set_stdin_raw(); thread::spawn(move || { let mut stdin = stdin(); pump(&mut stdin, &mut ptmx); }); } fn pump_stdout_blocking(mut ptmx: File) { let mut stdout = stdout(); pump(&mut ptmx, &mut stdout); let _ = restore_stdin(); } fn create_transfer(ptmx: OwnedFd) -> Result<()> { let pid = unsafe { fork() }; match pid { d if d < 0 => bail!("fork"), 0 => return Ok(()), _ => {} } let ptmx_r = ptmx; let ptmx_w = dup(&ptmx_r)?; let ptmx_r = File::from(ptmx_r); let ptmx_w = File::from(ptmx_w); watch_sigwinch_async(ptmx_w.as_raw_fd()); pump_stdin_async(ptmx_r); pump_stdout_blocking(ptmx_w); let mut status: c_int = -1; unsafe { loop { if waitpid(pid, &mut status, 0) == -1 && std::io::Error::last_os_error().raw_os_error() != Some(EINTR) { continue; } break; } } exit(status) } pub fn prepare_pty() -> Result<()> { let tty_in = isatty(stdin()); let tty_out = isatty(stdout()); let tty_err = isatty(stderr()); if !tty_in && !tty_out && !tty_err { return Ok(()); } let mut pts_path = format!("{}/{}", get_tmp_path(), PTS_NAME); if !std::path::Path::new(&pts_path).exists() { pts_path = "/dev/pts".to_string(); } let ptmx_path = format!("{}/ptmx", pts_path); let ptmx_fd = open(ptmx_path, OFlags::RDWR, Mode::empty())?; grantpt(&ptmx_fd)?; unlockpt(&ptmx_fd)?; let pty_num = get_pty_num(&ptmx_fd)?; create_transfer(ptmx_fd)?; setsid()?; let pty_fd = open(format!("{pts_path}/{pty_num}"), OFlags::RDWR, Mode::empty())?; if tty_in { dup2_stdin(&pty_fd)?; } if tty_out { dup2_stdout(&pty_fd)?; } if tty_err { dup2_stderr(&pty_fd)?; } Ok(()) } ================================================ FILE: apd/src/resetprop.rs ================================================ use anyhow::{bail, Context, Result}; use clap::error::ErrorKind; use clap::Parser; use log::info; use prop_rs_android::resetprop::ResetProp; use prop_rs_android::sys_prop; use std::fmt; use std::fs::File; use std::io::{BufRead, BufReader}; use std::path::Path; use std::time::Duration; #[derive(Debug)] pub struct WaitTimeoutError { name: String, } impl fmt::Display for WaitTimeoutError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "timeout waiting for {}", self.name) } } impl std::error::Error for WaitTimeoutError {} /// Magisk-compatible Android system property tool. #[derive(Debug, clap::Args)] #[allow(clippy::struct_excessive_bools)] pub struct Args { /// Skip property_service (force direct mmap operation). #[arg(short = 'n', long = "skip-svc")] skip_svc: bool, /// Also operate on persistent property storage (persist.* files). #[arg(short = 'p', long = "persistent")] persistent: bool, /// Only read persistent properties from storage. #[arg(short = 'P')] persist_only: bool, /// Delete the named property. #[arg(short = 'd', long = "delete")] delete: bool, /// Verbose output. #[arg(short = 'v', long = "verbose")] verbose: bool, /// Wait for a property to exist or match a value. #[arg(short = 'w', long = "wait")] wait: bool, /// Timeout in seconds for --wait (default: wait forever). #[arg(long = "timeout")] timeout: Option, /// Load and set properties from FILE. #[arg(short = 'f', long = "file")] file: Option, /// Compact property area memory (reclaim holes left by deleted properties). /// Optionally pass a SELinux context name to compact only that area. #[arg(short = 'c', long = "compact")] compact: bool, /// Show SELinux context when listing properties. #[arg(short = 'Z')] show_context: bool, /// Property name. name: Option, /// Property value (for set or wait-for-value). value: Option, } #[derive(Parser)] #[command( name = "resetprop", version, about = "Magisk-compatible system property tool", disable_help_subcommand = true )] struct ResetPropParser { #[command(flatten)] arg: Args, } pub fn resetprop_main(args: &[String]) -> ! { if let Err(err) = run_from_args(args) { let code = if err.downcast_ref::().is_some() { 2 } else { 1 }; eprintln!("resetprop: {err:#}"); std::process::exit(code); } std::process::exit(0); } /// Entry point for resetprop multicall. /// /// `args` should include argv[0] (the program name). fn run_from_args(args: &[String]) -> Result<()> { let parser = match ResetPropParser::try_parse_from(args) { Ok(cli) => cli, Err(err) => { if matches!( err.kind(), ErrorKind::DisplayHelp | ErrorKind::DisplayVersion ) { err.print()?; return Ok(()); } return Err(anyhow::anyhow!("{err}")); } }; execute(&parser.arg) } /// Execute resetprop logic /// Subcommand will direct call that, skip run_from_args pub fn execute(cli: &Args) -> Result<()> { sys_prop::init().context("Failed to initialize system property API")?; let rp = ResetProp { skip_svc: cli.skip_svc, persistent: cli.persistent, persist_only: cli.persist_only, verbose: cli.verbose, show_context: cli.show_context, }; // Validate: at most one special mode let special_modes = u8::from(cli.wait) + u8::from(cli.delete) + u8::from(cli.compact) + u8::from(cli.file.is_some()); if special_modes > 1 { bail!("multiple operation modes detected"); } // -w: wait mode if cli.wait { let name = cli .name .as_deref() .context("--wait requires a property name")?; let timeout = cli.timeout.map(Duration::from_secs_f64); let ok = rp .wait(name, cli.value.as_deref(), timeout) .context("wait failed")?; if !ok { return Err(WaitTimeoutError { name: name.to_owned(), } .into()); } return Ok(()); } // -c: compact property area memory // When a positional argument is given, treat it as a SELinux context name. if cli.compact { let context = cli.name.as_deref(); let compacted = sys_prop::compact(context).context("compact failed")?; if !compacted { bail!("nothing to compact"); } return Ok(()); } // -f: load from file if let Some(path) = &cli.file { let file = File::open(path).with_context(|| format!("Failed to open {path}"))?; let reader = BufReader::new(file); rp.load_props(reader.lines()) .context("Failed to load properties from file")?; return Ok(()); } // -d: delete if cli.delete { let name = cli .name .as_deref() .context("--delete requires a property name")?; let deleted = rp.delete(name).context("delete failed")?; if !deleted { bail!("{name} not found"); } return Ok(()); } match (&cli.name, &cli.value) { // resetprop name value (set) (Some(name), Some(value)) => { rp.set(name, value) .with_context(|| format!("Failed to set {name}"))?; } // resetprop name (get) (Some(name), None) => match rp.get(name) { Some(val) => println!("{val}"), None => bail!("{name} not found"), }, // resetprop (list all) (None, None) => { let props = rp.list_all().context("Failed to list properties")?; for (name, value) in &props { println!("[{name}]: [{value}]"); } } // resetprop — invalid (None, Some(_)) => { bail!("property name is required"); } } Ok(()) } /// Load system.prop file using internal resetprop API. /// /// Equivalent to `resetprop -n --file `. #[allow(dead_code)] pub fn load_system_prop_file(path: &Path) -> Result<()> { sys_prop::init().context("Failed to initialize system property API")?; let rp = ResetProp { skip_svc: true, persistent: false, persist_only: false, verbose: false, show_context: false, }; let file = File::open(path).with_context(|| format!("Failed to open {}", path.display()))?; let reader = BufReader::new(file); rp.load_props(reader.lines()) .with_context(|| format!("Failed to load properties from {}", path.display()))?; info!("Loaded system.prop from {}", path.display()); Ok(()) } ================================================ FILE: apd/src/restorecon.rs ================================================ use std::path::Path; use anyhow::Result; #[cfg(any(target_os = "linux", target_os = "android"))] use anyhow::{Context, Ok}; #[cfg(any(target_os = "linux", target_os = "android"))] use extattr::{Flags as XattrFlags, lsetxattr}; use jwalk::{Parallelism::Serial, WalkDir}; use crate::defs; pub const SYSTEM_CON: &str = "u:object_r:system_file:s0"; pub const ADB_CON: &str = "u:object_r:adb_data_file:s0"; pub const UNLABEL_CON: &str = "u:object_r:unlabeled:s0"; const SELINUX_XATTR: &str = "security.selinux"; pub fn lsetfilecon>(path: P, con: &str) -> Result<()> { #[cfg(any(target_os = "linux", target_os = "android"))] lsetxattr(&path, SELINUX_XATTR, con, XattrFlags::empty()).with_context(|| { format!( "Failed to change SELinux context for {}", path.as_ref().display() ) })?; Ok(()) } #[cfg(any(target_os = "linux", target_os = "android"))] pub fn lgetfilecon>(path: P) -> Result { let con = extattr::lgetxattr(&path, SELINUX_XATTR).with_context(|| { format!( "Failed to get SELinux context for {}", path.as_ref().display() ) })?; let con = String::from_utf8_lossy(&con); Ok(con.to_string()) } #[cfg(any(target_os = "linux", target_os = "android"))] pub fn setsyscon>(path: P) -> Result<()> { lsetfilecon(path, SYSTEM_CON) } #[cfg(not(any(target_os = "linux", target_os = "android")))] pub fn setsyscon>(path: P) -> Result<()> { unimplemented!() } #[cfg(not(any(target_os = "linux", target_os = "android")))] pub fn lgetfilecon>(path: P) -> Result { unimplemented!() } pub fn restore_syscon>(dir: P) -> Result<()> { for dir_entry in WalkDir::new(dir).parallelism(Serial) { if let Some(path) = dir_entry.ok().map(|dir_entry| dir_entry.path()) { setsyscon(&path)?; } } Ok(()) } fn restore_syscon_if_unlabeled>(dir: P) -> Result<()> { for dir_entry in WalkDir::new(dir).parallelism(Serial) { if let Some(path) = dir_entry.ok().map(|dir_entry| dir_entry.path()) { if let Result::Ok(con) = lgetfilecon(&path) { if con == UNLABEL_CON || con.is_empty() { lsetfilecon(&path, SYSTEM_CON)?; } } } } Ok(()) } pub fn restorecon() -> Result<()> { lsetfilecon(defs::DAEMON_PATH, ADB_CON)?; restore_syscon_if_unlabeled(defs::MODULE_DIR)?; Ok(()) } ================================================ FILE: apd/src/sepolicy.rs ================================================ use anyhow::{Context, Result, bail}; use clap::Parser; use policy::{SePolicy, format_statement_help}; use std::io::{self, Write}; use std::path::PathBuf; /// Write adapter for formatting struct WriteAdapter(T); impl std::fmt::Write for WriteAdapter { fn write_str(&mut self, s: &str) -> std::fmt::Result { self.0.write_all(s.as_bytes()).map_err(|_| std::fmt::Error) } } /// MagiskPolicy - SELinux Policy Patch Tool #[derive(Debug, clap::Args)] #[allow(clippy::struct_excessive_bools)] pub struct Args { /// Load monolithic sepolicy from FILE #[arg(long = "load", value_name = "FILE")] load: Option, /// Load from precompiled sepolicy or compile split cil policies #[arg(long = "load-split")] load_split: bool, /// Compile split cil policies #[arg(long = "compile-split")] compile_split: bool, /// Dump monolithic sepolicy to FILE #[arg(long = "save", value_name = "FILE")] save: Option, /// Immediately load sepolicy into the kernel #[arg(long = "live")] live: bool, /// Apply built-in Magisk sepolicy rules #[arg(long = "magisk")] magisk: bool, /// Apply rules from FILE, read and parsed line by line as policy statements #[arg(long = "apply", value_name = "FILE")] apply: Vec, /// Print all rules in the loaded sepolicy #[arg(long = "print-rules")] print_rules: bool, /// Policy statements to apply #[arg(required = false)] policies: Vec, } #[derive(Parser)] #[command( name = "magiskpolicy", version, about = "SELinux Policy Patch Tool", disable_help_subcommand = true )] struct MagiskPolicyParser { #[command(flatten)] arg: Args, } pub fn policy_main(args: &[String]) -> ! { if let Err(err) = run_from_args(args) { eprintln!("magiskpolicy: {err:#}"); std::process::exit(1); } std::process::exit(0); } /// Entry point for magiskpolicy multicall. /// /// `args` should include argv[0] (the program name). fn run_from_args(args: &[String]) -> Result<()> { let parser = match MagiskPolicyParser::try_parse_from(args) { Ok(cli) => cli, Err(err) => { if err.kind() == clap::error::ErrorKind::DisplayHelp { print_usage(args.first().map(|s| s.as_str()).unwrap_or("magiskpolicy")); return Ok(()); } if err.kind() == clap::error::ErrorKind::DisplayVersion { err.print()?; return Ok(()); } return Err(anyhow::anyhow!("{err}")); } }; execute(&parser.arg) } pub fn get_policy_main(args: &[String]) -> Result { let parser = MagiskPolicyParser::try_parse_from(args)?; let cli = parser.arg; // Validate mutually exclusive options let load_count = cli.load.iter().count() + cli.compile_split as usize + cli.load_split as usize; if load_count > 1 { bail!("Multiple load source supplied"); } // Load policy let mut sepol = if let Some(ref file) = cli.load { SePolicy::from_file(file) .with_context(|| format!("Cannot load policy from {}", file.display()))? } else if cli.load_split { SePolicy::from_split().context("Cannot load split policy")? } else if cli.compile_split { SePolicy::compile_split().context("Cannot compile split policy")? } else { SePolicy::from_file("/sys/fs/selinux/policy").context("Cannot load live policy")? }; execute_next(&cli, &mut sepol)?; Ok(sepol) } /// Execute magiskpolicy logic /// Subcommand will direct call that, skip run_from_args pub fn execute(cli: &Args) -> Result<()> { // Validate mutually exclusive options let load_count = cli.load.iter().count() + cli.compile_split as usize + cli.load_split as usize; if load_count > 1 { bail!("Multiple load source supplied"); } // Load policy let mut sepol = if let Some(ref file) = cli.load { SePolicy::from_file(file) .with_context(|| format!("Cannot load policy from {}", file.display()))? } else if cli.load_split { SePolicy::from_split().context("Cannot load split policy")? } else if cli.compile_split { SePolicy::compile_split().context("Cannot compile split policy")? } else { SePolicy::from_file("/sys/fs/selinux/policy").context("Cannot load live policy")? }; execute_next(cli, &mut sepol)?; Ok(()) } fn execute_next(cli: &Args, sepol: &mut SePolicy) -> Result<()> { if cli.print_rules { if cli.magisk || !cli.apply.is_empty() || !cli.policies.is_empty() || cli.live || cli.save.is_some() { bail!("Cannot print rules with other options"); } sepol.print_rules(); return Ok(()); } if cli.magisk { sepol.magisk_rules(); } for file in &cli.apply { sepol .load_rule_file(file) .with_context(|| format!("Cannot load rule file {}", file.display()))?; } for statement in &cli.policies { sepol.load_rules(statement); } if cli.live { sepol .to_file("/sys/fs/selinux/load") .context("Cannot apply policy")?; } if let Some(ref file) = cli.save { sepol .to_file(file) .with_context(|| format!("Cannot dump policy to {}", file.display()))?; } Ok(()) } /// Print usage information fn print_usage(cmd: &str) { eprintln!( r#"MagiskPolicy - SELinux Policy Patch Tool Usage: {cmd} [--options...] [policy statements...] Options: --help show help message for policy statements --load FILE load monolithic sepolicy from FILE --load-split load from precompiled sepolicy or compile split cil policies --compile-split compile split cil policies --save FILE dump monolithic sepolicy to FILE --live immediately load sepolicy into the kernel --magisk apply built-in Magisk sepolicy rules --apply FILE apply rules from FILE, read and parsed line by line as policy statements (multiple --apply are allowed) --print-rules print all rules in the loaded sepolicy If neither --load, --load-split, nor --compile-split is specified, it will load from current live policies (/sys/fs/selinux/policy) "# ); let _ = format_statement_help(&mut WriteAdapter(io::stderr())); eprintln!(); } ================================================ FILE: apd/src/supercall.rs ================================================ use std::{ ffi::{CStr, CString}, fs::File, io::{self, Read}, process, sync::{Arc, Mutex}, }; use libc::{c_long, c_void, syscall, uid_t, EINVAL}; use log::{error, info, warn}; use crate::package::{read_ap_package_config, synchronize_package_uid}; const MAJOR: c_long = 0; const MINOR: c_long = 13; const PATCH: c_long = 1; const KSTORAGE_EXCLUDE_LIST_GROUP: i32 = 1; const KSTORAGE_AUTO_EXCLUDE_GROUP: i32 = 3; const __NR_SUPERCALL: c_long = 45; const SUPERCALL_SU: c_long = 0x1010; const SUPERCALL_KSTORAGE_WRITE: c_long = 0x1041; const SUPERCALL_KSTORAGE_READ: c_long = 0x1042; const SUPERCALL_SU_GRANT_UID: c_long = 0x1100; const SUPERCALL_SU_REVOKE_UID: c_long = 0x1101; const SUPERCALL_SU_NUMS: c_long = 0x1102; const SUPERCALL_SU_LIST: c_long = 0x1103; const SUPERCALL_SU_RESET_PATH: c_long = 0x1111; const SUPERCALL_SU_GET_SAFEMODE: c_long = 0x1112; const SUPERCALL_KPM_LOAD: c_long = 0x1020; const SUPERCALL_UTS_SET: c_long = 0x1050; const SUPERCALL_UTS_RESET: c_long = 0x1051; const SUPERCALL_PATHHIDE_ENABLE: c_long = 0x1064; const SUPERCALL_PATHHIDE_ADD: c_long = 0x1060; const SUPERCALL_PATHHIDE_CLEAR: c_long = 0x1063; const SUPERCALL_PATHHIDE_UID_MODE: c_long = 0x106A; const SUPERCALL_PATHHIDE_UID_ADD: c_long = 0x1066; const SUPERCALL_PATHHIDE_UID_CLEAR: c_long = 0x1069; const SUPERCALL_PATHHIDE_FILTER_SYSTEM: c_long = 0x106B; const SUPERCALL_NETISOLATE_ENABLE: c_long = 0x1070; const SUPERCALL_NETISOLATE_UID_ADD: c_long = 0x1072; const SUPERCALL_NETISOLATE_UID_REMOVE: c_long = 0x1073; const SUPERCALL_NETISOLATE_UID_LIST: c_long = 0x1074; const SUPERCALL_NETISOLATE_UID_CLEAR: c_long = 0x1075; const SUPERCALL_SCONTEXT_LEN: usize = 0x60; #[repr(C)] struct SuProfile { uid: i32, to_uid: i32, scontext: [u8; SUPERCALL_SCONTEXT_LEN], } fn ver_and_cmd(cmd: c_long) -> c_long { let version_code: u32 = ((MAJOR << 16) + (MINOR << 8) + PATCH).try_into().unwrap(); ((version_code as c_long) << 32) | (0x1158 << 16) | (cmd & 0xFFFF) } fn sc_su_revoke_uid(key: &CStr, uid: uid_t) -> c_long { if key.to_bytes().is_empty() { return (-EINVAL).into(); } unsafe { syscall( __NR_SUPERCALL, key.as_ptr(), ver_and_cmd(SUPERCALL_SU_REVOKE_UID), uid, ) as c_long } } fn sc_su_grant_uid(key: &CStr, profile: &SuProfile) -> c_long { if key.to_bytes().is_empty() { return (-EINVAL).into(); } unsafe { syscall( __NR_SUPERCALL, key.as_ptr(), ver_and_cmd(SUPERCALL_SU_GRANT_UID), profile, ) as c_long } } fn sc_kstorage_write( key: &CStr, gid: i32, did: i64, data: *mut c_void, offset: i32, dlen: i32, ) -> c_long { if key.to_bytes().is_empty() { return (-EINVAL).into(); } unsafe { syscall( __NR_SUPERCALL, key.as_ptr(), ver_and_cmd(SUPERCALL_KSTORAGE_WRITE), gid as c_long, did as c_long, data, (((offset as i64) << 32) | (dlen as i64)) as c_long, ) as c_long } } fn sc_kstorage_read( key: &CStr, gid: i32, did: i64, out_data: *mut c_void, offset: i32, dlen: i32, ) -> c_long { if key.to_bytes().is_empty() { return (-EINVAL).into(); } unsafe { syscall( __NR_SUPERCALL, key.as_ptr(), ver_and_cmd(SUPERCALL_KSTORAGE_READ), gid as c_long, did as c_long, out_data, (((offset as i64) << 32) | (dlen as i64)) as c_long, ) as c_long } } fn sc_set_ap_mod_exclude(key: &CStr, uid: i64, exclude: i32) -> c_long { sc_kstorage_write( key, KSTORAGE_EXCLUDE_LIST_GROUP, uid, &exclude as *const i32 as *mut c_void, 0, size_of::() as i32, ) } pub fn get_new_app_profile_mode() -> i32 { let key = CStr::from_bytes_with_nul(b"su\0").expect("auto exclude key init failed"); let mut enabled = 0_i32; let rc = sc_kstorage_read( key, KSTORAGE_AUTO_EXCLUDE_GROUP, 0, &mut enabled as *mut i32 as *mut c_void, 0, size_of::() as i32, ); if rc < 0 { return 0; } enabled } pub fn sc_su_get_safemode(key: &CStr) -> c_long { if key.to_bytes().is_empty() { warn!("[sc_su_get_safemode] null superkey, tell apd we are not in safemode!"); return 0; } let key_ptr = key.as_ptr(); if key_ptr.is_null() { warn!("[sc_su_get_safemode] superkey pointer is null!"); return 0; } unsafe { syscall( __NR_SUPERCALL, key_ptr, ver_and_cmd(SUPERCALL_SU_GET_SAFEMODE), ) as c_long } } fn sc_su(key: &CStr, profile: &SuProfile) -> c_long { if key.to_bytes().is_empty() { return (-EINVAL).into(); } unsafe { syscall( __NR_SUPERCALL, key.as_ptr(), ver_and_cmd(SUPERCALL_SU), profile, ) as c_long } } fn sc_su_reset_path(key: &CStr, path: &CStr) -> c_long { if key.to_bytes().is_empty() || path.to_bytes().is_empty() { return (-EINVAL).into(); } unsafe { syscall( __NR_SUPERCALL, key.as_ptr(), ver_and_cmd(SUPERCALL_SU_RESET_PATH), path.as_ptr(), ) as c_long } } fn sc_kpm_load(key: &CStr, path: &CStr, args: &CStr) -> c_long { if key.to_bytes().is_empty() || path.to_bytes().is_empty() { return (-EINVAL).into(); } unsafe { syscall( __NR_SUPERCALL, key.as_ptr(), ver_and_cmd(SUPERCALL_KPM_LOAD), path.as_ptr(), args.as_ptr(), std::ptr::null::(), ) as c_long } } fn sc_su_uid_nums(key: &CStr) -> c_long { if key.to_bytes().is_empty() { return (-EINVAL).into(); } unsafe { syscall(__NR_SUPERCALL, key.as_ptr(), ver_and_cmd(SUPERCALL_SU_NUMS)) as c_long } } fn sc_su_allow_uids(key: &CStr, buf: &mut [uid_t]) -> c_long { if key.to_bytes().is_empty() { return (-EINVAL).into(); } if buf.is_empty() { return (-EINVAL).into(); } unsafe { syscall( __NR_SUPERCALL, key.as_ptr(), ver_and_cmd(SUPERCALL_SU_LIST), buf.as_mut_ptr(), buf.len() as i32, ) as c_long } } fn read_file_to_string(path: &str) -> io::Result { let mut file = File::open(path)?; let mut content = String::new(); file.read_to_string(&mut content)?; Ok(content) } fn convert_string_to_u8_array(s: &str) -> [u8; SUPERCALL_SCONTEXT_LEN] { let mut u8_array = [0u8; SUPERCALL_SCONTEXT_LEN]; let bytes = s.as_bytes(); let len = usize::min(SUPERCALL_SCONTEXT_LEN, bytes.len()); u8_array[..len].copy_from_slice(&bytes[..len]); u8_array } fn convert_superkey(s: &Option) -> Option { s.as_ref().and_then(|s| CString::new(s.clone()).ok()) } pub fn refresh_ap_package_list(skey: &CStr, mutex: &Arc>) { let _lock = mutex.lock().unwrap(); if let Err(e) = synchronize_package_uid() { error!("Failed to synchronize package UIDs: {}", e); } let package_configs = read_ap_package_config(); let num = sc_su_uid_nums(skey); if num < 0 { error!("[refresh_su_list] Error getting number of UIDs: {}", num); return; } let num = num as usize; let mut uids = vec![0 as uid_t; num]; let n = sc_su_allow_uids(skey, &mut uids); if n < 0 { error!("[refresh_su_list] Error getting su list"); return; } let granted_uids: std::collections::HashSet = package_configs .iter() .filter(|c| c.allow == 1 && c.exclude == 0) .map(|c| c.uid as uid_t) .collect(); for uid in &uids { if *uid == 0 || *uid == 2000 { continue; } if granted_uids.contains(uid) { continue; } info!( "[refresh_ap_package_list] Revoking {} root permission...", uid ); let rc = sc_su_revoke_uid(skey, *uid); if rc != 0 { error!("[refresh_ap_package_list] Error revoking UID: {}", rc); } } for config in &package_configs { if config.allow == 1 && config.exclude == 0 { let profile = SuProfile { uid: config.uid, to_uid: config.to_uid, scontext: convert_string_to_u8_array(&config.sctx), }; let result = sc_su_grant_uid(skey, &profile); info!( "[refresh_ap_package_list] Loading {}: result = {}", config.pkg, result ); } if config.allow == 0 && config.exclude == 1 { let result = sc_set_ap_mod_exclude(skey, config.uid as i64, 1); info!( "[refresh_ap_package_list] Loading exclude {}: result = {}", config.pkg, result ); } } } pub fn privilege_apd_profile(superkey: &Option) { let key = convert_superkey(superkey); let all_allow_ctx = "u:r:magisk:s0"; let profile = SuProfile { uid: process::id().try_into().expect("PID conversion failed"), to_uid: 0, scontext: convert_string_to_u8_array(all_allow_ctx), }; if let Some(ref key) = key { let result = sc_su(key, &profile); info!("[privilege_apd_profile] result = {}", result); } } pub fn init_load_su_path(superkey: &Option) { let su_path_file = "/data/adb/ap/su_path"; match read_file_to_string(su_path_file) { Ok(su_path) => { let superkey_cstr = convert_superkey(superkey); match superkey_cstr { Some(superkey_cstr) => match CString::new(su_path.trim()) { Ok(su_path_cstr) => { let result = sc_su_reset_path(&superkey_cstr, &su_path_cstr); if result == 0 { info!("suPath load successfully"); } else { warn!("Failed to load su path, error code: {}", result); } } Err(e) => { warn!("Failed to convert su_path: {}", e); } }, _ => { warn!("Superkey is None, skipping..."); } } } Err(e) => { warn!("Failed to read su_path file: {}", e); } } } pub fn autoload_kpm_modules(superkey: &Option, event_filter: &str) { use serde::Deserialize; #[derive(Deserialize, Default)] struct KpmAutoLoadEntry { path: String, #[serde(default = "default_event")] event: String, #[serde(default)] args: String, } fn default_event() -> String { "service".to_string() } #[derive(Deserialize, Default)] struct KpmAutoLoadConfig { enabled: bool, #[serde(default, rename = "kpmEntries")] kpm_entries: Vec, } let config_path = crate::defs::KPM_AUTOLOAD_CONFIG; let content = match std::fs::read_to_string(config_path) { Ok(c) => c, Err(e) => { info!("[kpm_autoload] config not found or unreadable ({}): {}", config_path, e); return; } }; let config: KpmAutoLoadConfig = match serde_json::from_str(&content) { Ok(c) => c, Err(e) => { warn!("[kpm_autoload] failed to parse config: {}", e); return; } }; if !config.enabled || config.kpm_entries.is_empty() { info!("[kpm_autoload] disabled or no entries configured, skipping"); return; } let key = convert_superkey(superkey); let key = match key { Some(k) => k, None => { warn!("[kpm_autoload] no superkey available"); return; } }; const MAX_KPM_MODULES: usize = 64; if config.kpm_entries.len() > MAX_KPM_MODULES { warn!( "[kpm_autoload] too many entries ({}), truncating to {}", config.kpm_entries.len(), MAX_KPM_MODULES ); } let mut success = 0u32; let mut fail = 0u32; for entry in config.kpm_entries.iter().take(MAX_KPM_MODULES) { if entry.event != event_filter { info!("[kpm_autoload] skipping '{}' (event='{}', expected='{}')", entry.path, entry.event, event_filter); continue; } let path_str = &entry.path; if !std::path::Path::new(path_str).exists() { warn!("[kpm_autoload] file not found: {}", path_str); fail += 1; continue; } let canonical = match std::fs::canonicalize(path_str) { Ok(p) => p, Err(e) => { warn!("[kpm_autoload] cannot canonicalize '{}': {}", path_str, e); fail += 1; continue; } }; let allowed_dir = std::path::Path::new(crate::defs::FP_KPMS_AUTOLOAD_DIR); if !canonical.starts_with(allowed_dir) { warn!( "[kpm_autoload] path '{}' outside allowed directory '{}', skipping", path_str, crate::defs::FP_KPMS_AUTOLOAD_DIR ); fail += 1; continue; } let path_cstr = match CString::new(canonical.to_string_lossy().into_owned()) { Ok(c) => c, Err(e) => { warn!("[kpm_autoload] invalid canonical path: {}", e); fail += 1; continue; } }; let args_cstr = match CString::new(entry.args.clone()) { Ok(c) => c, Err(e) => { warn!("[kpm_autoload] invalid args for '{}': {}", path_str, e); fail += 1; continue; } }; info!("[kpm_autoload] loading '{}' with event='{}' args='{}'", path_str, entry.event, entry.args); let rc = sc_kpm_load(&key, &path_cstr, &args_cstr); if rc == 0 { success += 1; info!("[kpm_autoload] loaded: {}", path_str); } else { fail += 1; warn!("[kpm_autoload] failed to load '{}', rc={}", path_str, rc); } } info!("[kpm_autoload] done: success={}, fail={}", success, fail); } fn sc_pathhide_enable(key: &CStr, enable: bool) -> c_long { if key.to_bytes().is_empty() { return (-EINVAL).into(); } unsafe { syscall( __NR_SUPERCALL, key.as_ptr(), ver_and_cmd(SUPERCALL_PATHHIDE_ENABLE), if enable { 1i64 } else { 0i64 }, ) as c_long } } fn sc_pathhide_add(key: &CStr, path: &CStr) -> c_long { if key.to_bytes().is_empty() || path.to_bytes().is_empty() { return (-EINVAL).into(); } unsafe { syscall( __NR_SUPERCALL, key.as_ptr(), ver_and_cmd(SUPERCALL_PATHHIDE_ADD), path.as_ptr(), ) as c_long } } fn sc_pathhide_clear(key: &CStr) -> c_long { if key.to_bytes().is_empty() { return (-EINVAL).into(); } unsafe { syscall( __NR_SUPERCALL, key.as_ptr(), ver_and_cmd(SUPERCALL_PATHHIDE_CLEAR), ) as c_long } } fn sc_pathhide_uid_mode(key: &CStr, enable: bool) -> c_long { if key.to_bytes().is_empty() { return (-EINVAL).into(); } unsafe { syscall( __NR_SUPERCALL, key.as_ptr(), ver_and_cmd(SUPERCALL_PATHHIDE_UID_MODE), if enable { 1i64 } else { 0i64 }, ) as c_long } } fn sc_pathhide_filter_system(key: &CStr, enable: bool) -> c_long { if key.to_bytes().is_empty() { return (-EINVAL).into(); } unsafe { syscall( __NR_SUPERCALL, key.as_ptr(), ver_and_cmd(SUPERCALL_PATHHIDE_FILTER_SYSTEM), if enable { 1i64 } else { 0i64 }, ) as c_long } } fn sc_pathhide_uid_add(key: &CStr, uid: i32) -> c_long { if key.to_bytes().is_empty() { return (-EINVAL).into(); } unsafe { syscall( __NR_SUPERCALL, key.as_ptr(), ver_and_cmd(SUPERCALL_PATHHIDE_UID_ADD), uid, ) as c_long } } fn sc_pathhide_uid_clear(key: &CStr) -> c_long { if key.to_bytes().is_empty() { return (-EINVAL).into(); } unsafe { syscall( __NR_SUPERCALL, key.as_ptr(), ver_and_cmd(SUPERCALL_PATHHIDE_UID_CLEAR), ) as c_long } } fn sc_netisolate_enable(key: &CStr, enable: bool) -> c_long { if key.to_bytes().is_empty() { return (-EINVAL).into(); } unsafe { syscall( __NR_SUPERCALL, key.as_ptr(), ver_and_cmd(SUPERCALL_NETISOLATE_ENABLE), if enable { 1i64 } else { 0i64 }, ) as c_long } } fn sc_netisolate_uid_add(key: &CStr, uid: i32) -> c_long { if key.to_bytes().is_empty() { return (-EINVAL).into(); } unsafe { syscall( __NR_SUPERCALL, key.as_ptr(), ver_and_cmd(SUPERCALL_NETISOLATE_UID_ADD), uid, ) as c_long } } fn sc_netisolate_uid_clear(key: &CStr) -> c_long { if key.to_bytes().is_empty() { return (-EINVAL).into(); } unsafe { syscall( __NR_SUPERCALL, key.as_ptr(), ver_and_cmd(SUPERCALL_NETISOLATE_UID_CLEAR), ) as c_long } } pub fn apply_netisolate(superkey: &Option) { if !std::path::Path::new(crate::defs::NETISOLATE_ENABLE_FILE).exists() { info!("[netisolate] disabled, skipping"); return; } let key = convert_superkey(superkey); let key = match key { Some(k) => k, None => { warn!("[netisolate] no superkey available"); return; } }; // Step 1: Clear and populate UID blocklist (netisolate not yet enabled) match std::fs::read_to_string(crate::defs::NETISOLATE_UIDS_FILE) { Ok(uids) => { sc_netisolate_uid_clear(&key); let mut count = 0u32; for uid_str in uids.lines() { let uid_str = uid_str.trim(); if uid_str.is_empty() { continue; } match uid_str.parse::() { Ok(uid) => { let rc = sc_netisolate_uid_add(&key, uid); if rc < 0 { warn!("[netisolate] add uid {} failed: {}", uid, rc); } else { count += 1; } } Err(_) => { warn!("[netisolate] invalid uid: '{}'", uid_str); } } } info!("[netisolate] {} uids restored", count); } Err(_) => { info!("[netisolate] no uids file"); } } // Step 2: Enable netisolate LAST let rc = sc_netisolate_enable(&key, true); if rc < 0 { warn!("[netisolate] enable failed: {}", rc); return; } info!("[netisolate] auto-apply completed"); } pub fn apply_pathhide(superkey: &Option) { if !std::path::Path::new(crate::defs::PATHHIDE_ENABLE_FILE).exists() { info!("[pathhide] disabled, skipping"); return; } let key = convert_superkey(superkey); let key = match key { Some(k) => k, None => { warn!("[pathhide] no superkey available"); return; } }; // Step 1: Clear and populate blocklist (pathhide not yet enabled, hooks are no-ops) match std::fs::read_to_string(crate::defs::PATHHIDE_PATHS_FILE) { Ok(paths) => { sc_pathhide_clear(&key); let mut count = 0u32; for path in paths.lines() { let path = path.trim(); if path.is_empty() { continue; } match CString::new(path) { Ok(path_cstr) => { let rc = sc_pathhide_add(&key, &path_cstr); if rc < 0 { warn!("[pathhide] add path '{}' failed: {}", path, rc); } else { count += 1; } } Err(e) => { warn!("[pathhide] invalid path '{}': {}", path, e); } } } info!("[pathhide] {} paths restored", count); } Err(_) => { info!("[pathhide] no paths file, clearing blocklist"); sc_pathhide_clear(&key); } } // Step 2: Configure UID whitelist BEFORE enabling (so filters are ready) if std::path::Path::new(crate::defs::PATHHIDE_UID_MODE_FILE).exists() { match std::fs::read_to_string(crate::defs::PATHHIDE_UIDS_FILE) { Ok(uids) => { sc_pathhide_uid_clear(&key); let mut count = 0u32; for uid_str in uids.lines() { let uid_str = uid_str.trim(); if uid_str.is_empty() { continue; } match uid_str.parse::() { Ok(uid) => { let rc = sc_pathhide_uid_add(&key, uid); if rc < 0 { warn!("[pathhide] add uid {} failed: {}", uid, rc); } else { count += 1; } } Err(_) => { warn!("[pathhide] invalid uid: '{}'", uid_str); } } } info!("[pathhide] {} uids restored", count); } Err(_) => { info!("[pathhide] no uids file"); } } let rc = sc_pathhide_uid_mode(&key, true); if rc < 0 { warn!("[pathhide] uid mode enable failed: {}", rc); } } // Step 2.5: Configure filter_system (allow hiding from system/root UIDs) if std::path::Path::new(crate::defs::PATHHIDE_FILTER_SYSTEM_FILE).exists() { let rc = sc_pathhide_filter_system(&key, true); if rc < 0 { warn!("[pathhide] filter_system enable failed: {}", rc); } } // Step 3: Enable pathhide LAST (all config is now in place) let rc = sc_pathhide_enable(&key, true); if rc < 0 { warn!("[pathhide] enable failed: {}", rc); return; } info!("[pathhide] auto-apply completed"); } fn sc_uts_set(key: &CStr, release: Option<&CStr>, version: Option<&CStr>) -> c_long { if key.to_bytes().is_empty() { return (-EINVAL).into(); } let release_ptr = match release { Some(r) => r.as_ptr(), None => std::ptr::null(), }; let version_ptr = match version { Some(v) => v.as_ptr(), None => std::ptr::null(), }; unsafe { syscall( __NR_SUPERCALL, key.as_ptr(), ver_and_cmd(SUPERCALL_UTS_SET), release_ptr, version_ptr, ) as c_long } } fn sc_uts_reset(key: &CStr) -> c_long { if key.to_bytes().is_empty() { return (-EINVAL).into(); } unsafe { syscall( __NR_SUPERCALL, key.as_ptr(), ver_and_cmd(SUPERCALL_UTS_RESET), ) as c_long } } pub fn apply_uts_spoof(superkey: &Option) { use std::path::Path; const MAX_BOOT_RETRIES: u32 = 3; if !Path::new(crate::defs::UTS_SPOOF_ENABLE_FILE).exists() { info!("[uts_spoof] disabled, skipping"); return; } let config_content = match std::fs::read_to_string(crate::defs::UTS_SPOOF_CONFIG_FILE) { Ok(c) => c, Err(e) => { warn!("[uts_spoof] failed to read config: {}", e); return; } }; let config: serde_json::Value = match serde_json::from_str(&config_content) { Ok(v) => v, Err(e) => { warn!("[uts_spoof] failed to parse config: {}", e); return; } }; let release = config.get("release").and_then(|v| v.as_str()).unwrap_or(""); let version = config.get("version").and_then(|v| v.as_str()).unwrap_or(""); let key = convert_superkey(superkey); let key = match key { Some(k) => k, None => { warn!("[uts_spoof] no superkey available"); return; } }; let _ = sc_uts_reset(&key); let retries = match std::fs::read_to_string(crate::defs::UTS_SPOOF_BOOT_PENDING) { Ok(s) => s.trim().parse::().unwrap_or(0), Err(_) => 0, }; if retries >= MAX_BOOT_RETRIES { warn!( "[uts_spoof] boot pending retries ({}) >= max ({}), skipping spoof to prevent bootloop", retries, MAX_BOOT_RETRIES ); let _ = std::fs::remove_file(crate::defs::UTS_SPOOF_BOOT_PENDING); return; } if let Err(e) = std::fs::write(crate::defs::UTS_SPOOF_BOOT_PENDING, (retries + 1).to_string()) { warn!("[uts_spoof] failed to write boot pending flag: {}", e); } // Only set if we have values to spoof let release_cstr = if !release.is_empty() { match CString::new(release) { Ok(c) => Some(c), Err(e) => { warn!("[uts_spoof] invalid release string: {}", e); None } } } else { None }; let version_cstr = if !version.is_empty() { match CString::new(version) { Ok(c) => Some(c), Err(e) => { warn!("[uts_spoof] invalid version string: {}", e); None } } } else { None }; if release_cstr.is_some() || version_cstr.is_some() { let rc = sc_uts_set( &key, release_cstr.as_deref(), version_cstr.as_deref(), ); if rc == 0 { info!("[uts_spoof] applied: release='{}' version='{}'", release, version); } else { warn!("[uts_spoof] set failed: {}", rc); } } else { info!("[uts_spoof] config has empty values, skipping set"); } } ================================================ FILE: apd/src/utils.rs ================================================ #[allow(unused_imports)] use std::fs::{Permissions, set_permissions}; #[cfg(unix)] use std::os::unix::prelude::PermissionsExt; use std::{ ffi::CString, fs::{File, OpenOptions, create_dir_all, metadata}, io::{ErrorKind::AlreadyExists, Write}, path::Path, process::{Command, Stdio}, }; use anyhow::{Context, Error, Ok, Result, bail}; use log::{info, warn}; use crate::{defs, supercall::sc_su_get_safemode}; pub fn ensure_file_exists>(file: T) -> Result<()> { match File::options().write(true).create_new(true).open(&file) { Result::Ok(_) => Ok(()), Err(err) => { if err.kind() == AlreadyExists && file.as_ref().is_file() { Ok(()) } else { Err(Error::from(err)) .with_context(|| format!("{} is not a regular file", file.as_ref().display())) } } } } pub fn ensure_dir_exists>(dir: T) -> Result<()> { let result = create_dir_all(&dir).map_err(Error::from); if dir.as_ref().is_dir() { result } else if result.is_ok() { bail!("{} is not a regular directory", dir.as_ref().display()) } else { result } } pub fn ensure_dir_with_perms(dir: &Path, parent: &Path, mode: u32) -> Result<()> { if dir.exists() { return Ok(()); } create_dir_all(dir) .with_context(|| format!("Failed to create {} directory", dir.display()))?; let permissions = Permissions::from_mode(mode); set_permissions(parent, permissions.clone()) .with_context(|| format!("Failed to set permissions for {}", parent.display()))?; set_permissions(dir, permissions) .with_context(|| format!("Failed to set permissions for {}", dir.display()))?; info!("Created directory: {}", dir.display()); Ok(()) } // todo: ensure pub fn ensure_binary>(path: T) -> Result<()> { set_permissions(&path, Permissions::from_mode(0o755))?; Ok(()) } pub fn get_work_dir() -> &'static str { defs::WORKING_DIR } #[cfg(any(target_os = "linux", target_os = "android"))] pub fn getprop(prop: &str) -> Option { android_properties::getprop(prop).value() } #[cfg(not(any(target_os = "linux", target_os = "android")))] pub fn getprop(_prop: &str) -> Option { unimplemented!() } pub fn run_command( command: &str, args: &[&str], stdout: Option, ) -> Result { let mut command_builder = Command::new(command); command_builder.args(args); if let Some(out) = stdout { command_builder.stdout(out); } let child = command_builder.spawn()?; Ok(child) } pub fn is_safe_mode(superkey: Option) -> bool { let safemode = getprop("persist.sys.safemode") .filter(|prop| prop == "1") .is_some() || getprop("ro.sys.safemode") .filter(|prop| prop == "1") .is_some(); info!("safemode: {}", safemode); if safemode { return true; } let safemode = superkey .as_ref() .and_then(|key_str| CString::new(key_str.as_str()).ok()) .map_or_else( || { warn!("[is_safe_mode] No valid superkey provided, assuming safemode as false."); false }, |cstr| sc_su_get_safemode(&cstr) == 1, ); info!("kernel_safemode: {}", safemode); safemode } #[cfg(any(target_os = "linux", target_os = "android"))] pub fn switch_mnt_ns(pid: i32) -> Result<()> { use std::os::fd::AsRawFd; use anyhow::ensure; let path = format!("/proc/{pid}/ns/mnt"); let fd = File::open(path)?; let current_dir = std::env::current_dir(); let ret = unsafe { libc::setns(fd.as_raw_fd(), libc::CLONE_NEWNS) }; if let Result::Ok(current_dir) = current_dir { let _ = std::env::set_current_dir(current_dir); } ensure!(ret == 0, "switch mnt ns failed"); Ok(()) } fn switch_cgroup(grp: &str, pid: u32) { let path = Path::new(grp).join("cgroup.procs"); if !path.exists() { return; } let fp = OpenOptions::new().append(true).open(path); if let Result::Ok(mut fp) = fp { let _ = write!(fp, "{pid}"); } } pub fn switch_cgroups() { let pid = std::process::id(); switch_cgroup("/acct", pid); switch_cgroup("/dev/cg2_bpf", pid); switch_cgroup("/sys/fs/cgroup", pid); if getprop("ro.config.per_app_memcg") .filter(|prop| prop == "false") .is_none() { switch_cgroup("/dev/memcg/apps", pid); } } #[cfg(any(target_os = "linux", target_os = "android"))] pub fn umask(mask: u32) { unsafe { libc::umask(mask) }; } #[cfg(not(any(target_os = "linux", target_os = "android")))] pub fn umask(_mask: u32) { unimplemented!("umask is not supported on this platform") } pub fn has_magisk() -> bool { which::which("magisk").is_ok() } pub fn get_tmp_path() -> &'static str { if metadata(defs::TEMP_DIR_LEGACY).is_ok() { return defs::TEMP_DIR_LEGACY; } if metadata(defs::TEMP_DIR).is_ok() { return defs::TEMP_DIR; } "" } ================================================ FILE: app/.gitignore ================================================ /build /release/ *.jks *.keystore keystore.properties ================================================ FILE: app/build.gradle.kts ================================================ @file:Suppress("UnstableApiUsage") import com.android.build.gradle.tasks.PackageAndroidArtifact import org.jetbrains.kotlin.gradle.dsl.JvmTarget import java.net.URI import java.util.Properties import java.io.File import java.io.FileInputStream plugins { alias(libs.plugins.agp.app) alias(libs.plugins.kotlin) alias(libs.plugins.kotlin.compose.compiler) alias(libs.plugins.ksp) alias(libs.plugins.lsplugin.apksign) alias(libs.plugins.lsplugin.resopt) alias(libs.plugins.lsplugin.cmaker) id("kotlin-parcelize") } val androidCompileSdkVersion: Int by rootProject.extra val androidCompileNdkVersion: String by rootProject.extra val androidBuildToolsVersion: String by rootProject.extra val androidMinSdkVersion: Int by rootProject.extra val androidTargetSdkVersion: Int by rootProject.extra val androidSourceCompatibility: JavaVersion by rootProject.extra val androidTargetCompatibility: JavaVersion by rootProject.extra val managerVersionCode: Int by rootProject.extra val managerVersionName: String by rootProject.extra val branchName: String by rootProject.extra val kernelPatchVersion: String by rootProject.extra // Load keystore properties val keystoreProperties = Properties() val keystorePropertiesFile = rootProject.file("keystore.properties") if (keystorePropertiesFile.exists()) { keystoreProperties.load(FileInputStream(keystorePropertiesFile)) } // Load local properties val localProperties = Properties() val localPropertiesFile = rootProject.file("local.properties") if (localPropertiesFile.exists()) { localProperties.load(FileInputStream(localPropertiesFile)) } apksign { storeFileProperty = "KEYSTORE_FILE" storePasswordProperty = "KEYSTORE_PASSWORD" keyAliasProperty = "KEY_ALIAS" keyPasswordProperty = "KEY_PASSWORD" } val ccache = System.getenv("PATH")?.split(File.pathSeparator) ?.map { File(it, "ccache") }?.firstOrNull { it.exists() }?.absolutePath val baseFlags = listOf( "-Wall", "-Qunused-arguments", "-fno-rtti", "-fvisibility=hidden", "-fvisibility-inlines-hidden", "-fno-exceptions", "-fno-stack-protector", "-fomit-frame-pointer", "-Wno-builtin-macro-redefined", "-Wno-unused-value", "-D__FILE__=__FILE_NAME__", "-DANDROID_SUPPORT_FLEXIBLE_PAGE_SIZES=ON", "-Wno-unused", "-Wno-unused-parameter", "-Wno-unused-command-line-argument", "-Wno-incompatible-function-pointer-types", "-U_FORTIFY_SOURCE", "-D_FORTIFY_SOURCE=0" ) val baseArgs = mutableListOf( "-DANDROID_STL=none", "-DANDROID_SUPPORT_FLEXIBLE_PAGE_SIZES=ON", "-DCMAKE_CXX_STANDARD=23", "-DCMAKE_C_STANDARD=23", "-DCMAKE_INTERPROCEDURAL_OPTIMIZATION=ON", "-DCMAKE_VISIBILITY_INLINES_HIDDEN=ON", "-DCMAKE_CXX_VISIBILITY_PRESET=hidden", "-DCMAKE_C_VISIBILITY_PRESET=hidden" ).apply { if (ccache != null) add("-DANDROID_CCACHE=$ccache") } android { namespace = "me.bmax.apatch" signingConfigs { create("release") { storeFile = file(keystoreProperties.getProperty("KEYSTORE_FILE") ?: "debug.keystore") storePassword = keystoreProperties.getProperty("KEYSTORE_PASSWORD") ?: "android" keyAlias = keystoreProperties.getProperty("KEY_ALIAS") ?: "androiddebugkey" keyPassword = keystoreProperties.getProperty("KEY_PASSWORD") ?: "android" enableV1Signing = true enableV2Signing = true enableV3Signing = true enableV4Signing = true } } buildTypes { debug { isDebuggable = true isMinifyEnabled = false isShrinkResources = false proguardFiles( getDefaultProguardFile("proguard-android-optimize.txt"), "proguard-rules.pro" ) externalNativeBuild { cmake { arguments += listOf("-DCMAKE_CXX_FLAGS_DEBUG=-Og", "-DCMAKE_C_FLAGS_DEBUG=-Og") } } } release { isMinifyEnabled = true isShrinkResources = true isDebuggable = false multiDexEnabled = true vcsInfo.include = false if (keystorePropertiesFile.exists()) { signingConfig = signingConfigs.getByName("release") } proguardFiles( getDefaultProguardFile("proguard-android-optimize.txt"), "proguard-rules.pro" ) externalNativeBuild { cmake { val relFlags = listOf( "-flto", "-ffunction-sections", "-fdata-sections", "-Wl,--gc-sections", "-fno-unwind-tables", "-fno-asynchronous-unwind-tables", "-Wl,--exclude-libs,ALL", "-Ofast", "-fmerge-all-constants", "-flto=full", "-ffat-lto-objects", "-fno-semantic-interposition", "-fno-threadsafe-statics" ) cppFlags += relFlags cFlags += relFlags arguments += listOf("-DCMAKE_BUILD_TYPE=Release", "-DCMAKE_CXX_FLAGS_RELEASE=-O3 -DNDEBUG", "-DCMAKE_C_FLAGS_RELEASE=-O3 -DNDEBUG") } } } } dependenciesInfo.includeInApk = false buildFeatures { aidl = true buildConfig = true compose = true prefab = true } defaultConfig { applicationId = "me.yuki.folk" minSdk = androidMinSdkVersion targetSdk = androidTargetSdkVersion versionCode = managerVersionCode versionName = managerVersionName buildConfigField("String", "buildKPV", "\"$kernelPatchVersion\"") buildConfigField("boolean", "DEBUG_FAKE_ROOT", localProperties.getProperty("debug.fake_root", "false")) base.archivesName = "FolkPatch_${managerVersionCode}_${managerVersionName}_on_${branchName}" ndk.abiFilters.addAll(arrayOf("arm64-v8a")) externalNativeBuild { cmake { cppFlags += baseFlags + "-std=c++2b" cFlags += baseFlags + "-std=c2x" arguments += baseArgs // Pass Token and Signature Hash to CMake val authProps = Properties() val authFile = rootProject.file("auth.properties") if (authFile.exists()) { authProps.load(FileInputStream(authFile)) } val token = authProps.getProperty("api.token", "") val signatureHash = authProps.getProperty("app.signature.hash", "") // Pass to C++ compiler directly via flags // Only add flags if values are non-empty to avoid compiler errors if (token.isNotEmpty()) { cppFlags += "-DAPI_TOKEN=\"$token\"" } if (signatureHash.isNotEmpty()) { cppFlags += "-DAPP_SIGNATURE_HASH=\"$signatureHash\"" } cppFlags += "-DAPP_PACKAGE_NAME=\"$applicationId\"" abiFilters("arm64-v8a") } } } compileOptions { sourceCompatibility = JavaVersion.VERSION_21 targetCompatibility = JavaVersion.VERSION_21 } packaging { jniLibs { useLegacyPackaging = true } resources { excludes += "**" merges += "META-INF/com/google/android/**" } } externalNativeBuild { cmake { version = "3.28.0+" path("src/main/cpp/CMakeLists.txt") } } androidResources { generateLocaleConfig = true } compileSdk = androidCompileSdkVersion ndkVersion = androidCompileNdkVersion buildToolsVersion = androidBuildToolsVersion lint { abortOnError = false checkReleaseBuilds = false } android.sourceSets.named("main") { kotlin.directories += "build/generated/ksp/$name/kotlin" jniLibs.directories += "libs" } } // https://stackoverflow.com/a/77745844 tasks.withType { doFirst { appMetadata.asFile.orNull?.writeText("") } } java { toolchain { languageVersion = JavaLanguageVersion.of(21) } } kotlin { jvmToolchain(21) compilerOptions { jvmTarget = JvmTarget.JVM_21 } } fun registerDownloadTask( taskName: String, srcUrl: String, destPath: String, project: Project, version: String? = null ) { project.tasks.register(taskName) { val destFile = File(destPath) val versionFile = File("$destPath.version") doLast { var forceDownload = false if (version != null) { if (!versionFile.exists() || versionFile.readText().trim() != version) { forceDownload = true } } if (!destFile.exists() || forceDownload || isFileUpdated(srcUrl, destFile)) { println(" - Downloading $srcUrl to ${destFile.absolutePath}") downloadFile(srcUrl, destFile) if (version != null) { versionFile.writeText(version) } println(" - Download completed.") } else { println(" - File is up-to-date, skipping download.") } } } } fun isFileUpdated(url: String, localFile: File): Boolean { val connection = URI.create(url).toURL().openConnection() val remoteLastModified = connection.getHeaderFieldDate("Last-Modified", 0L) return remoteLastModified > localFile.lastModified() } fun downloadFile(url: String, destFile: File) { URI.create(url).toURL().openStream().use { input -> destFile.outputStream().use { output -> input.copyTo(output) } } } registerDownloadTask( taskName = "downloadKpimg", srcUrl = "https://github.com/LyraVoid/KernelPatch/releases/download/$kernelPatchVersion/kpimg-android", destPath = "${project.projectDir}/src/main/assets/kpimg", project = project, version = kernelPatchVersion ) registerDownloadTask( taskName = "downloadKptools", srcUrl = "https://github.com/LyraVoid/KernelPatch/releases/download/$kernelPatchVersion/kptools-android", destPath = "${project.projectDir}/libs/arm64-v8a/libkptools.so", project = project, version = kernelPatchVersion ) // Compat kp version less than 0.10.7 // TODO: Remove in future registerDownloadTask( taskName = "downloadCompatKpatch", srcUrl = "https://github.com/bmax121/KernelPatch/releases/download/0.10.7/kpatch-android", destPath = "${project.projectDir}/libs/arm64-v8a/libkpatch.so", project = project, version = "0.10.7" ) tasks.register("mergeScripts") { into("${project.projectDir}/src/main/resources/META-INF/com/google/android") from(rootProject.file("${project.rootDir}/scripts/update_binary.sh")) { rename { "update-binary" } } from(rootProject.file("${project.rootDir}/scripts/update_script.sh")) { rename { "updater-script" } } } // Build fpd (FolkPatch service binary) for arm64 tasks.register("buildFpd") { executable("cargo") args("ndk", "-t", "arm64-v8a", "build", "--release") workingDir("${project.rootDir}/fpd") doFirst { println("Building fpd for arm64...") } doLast { val fpdBinary = file("${project.rootDir}/fpd/target/aarch64-linux-android/release/fpd") val serviceDir = file("src/main/assets/Service") serviceDir.mkdirs() fpdBinary.copyTo(file("${serviceDir}/fpd"), overwrite = true) println("fpd binary built and copied to Service/fpd") } } tasks.getByName("preBuild").dependsOn( "downloadKpimg", "downloadKptools", "downloadCompatKpatch", "mergeScripts", "buildFpd", ) // https://github.com/bbqsrc/cargo-ndk // cargo ndk -t arm64-v8a build --release tasks.register("cargoBuild") { executable("cargo") args("ndk", "-t", "arm64-v8a", "build", "--release") workingDir("${project.rootDir}/apd") environment("APATCH_VERSION_CODE", "${managerVersionCode}") environment("APATCH_VERSION_NAME", "${managerVersionCode}-Matsuzaka-yuki") } tasks.register("buildApd") { dependsOn("cargoBuild") from("${project.rootDir}/apd/target/aarch64-linux-android/release/apd") into("${project.projectDir}/libs/arm64-v8a") rename("apd", "libapd.so") } tasks.configureEach { if (name == "mergeDebugJniLibFolders" || name == "mergeReleaseJniLibFolders") { dependsOn("buildApd") } } tasks.register("cargoClean") { executable("cargo") args("clean") workingDir("${project.rootDir}/apd") } tasks.register("apdClean") { dependsOn("cargoClean") delete(file("${project.projectDir}/libs/arm64-v8a/libapd.so")) } tasks.clean { dependsOn("apdClean") } ksp { arg("compose-destinations.defaultTransitions", "none") } dependencies { implementation(libs.androidx.appcompat) implementation(libs.androidx.activity.compose) implementation(libs.androidx.core.splashscreen) implementation(libs.androidx.webkit) implementation(libs.androidx.biometric) implementation(platform(libs.androidx.compose.bom)) implementation(libs.androidx.compose.material.icons.extended) implementation(libs.androidx.compose.material) implementation(libs.androidx.compose.material3) implementation("androidx.compose.material3:material3-android:1.5.0-alpha17") implementation(libs.androidx.compose.ui) implementation(libs.androidx.compose.ui.tooling.preview) implementation(libs.androidx.compose.runtime.livedata) debugImplementation(libs.androidx.compose.ui.test.manifest) debugImplementation(libs.androidx.compose.ui.tooling) implementation(libs.androidx.lifecycle.runtime.compose) implementation(libs.androidx.lifecycle.runtime.ktx) implementation(libs.androidx.lifecycle.viewmodel.compose) implementation("androidx.lifecycle:lifecycle-process:2.8.7") implementation(libs.compose.destinations.core) ksp(libs.compose.destinations.ksp) implementation(libs.com.github.topjohnwu.libsu.core) implementation(libs.com.github.topjohnwu.libsu.service) implementation(libs.com.github.topjohnwu.libsu.nio) implementation(libs.com.github.topjohnwu.libsu.io) implementation(libs.dev.rikka.rikkax.parcelablelist) implementation(libs.io.coil.kt.coil.compose) implementation(libs.io.coil.kt.coil.gif) implementation(libs.kotlinx.coroutines.core) implementation(libs.me.zhanghai.android.appiconloader.coil) implementation(libs.sheet.compose.dialogs.core) implementation(libs.sheet.compose.dialogs.list) implementation(libs.sheet.compose.dialogs.input) implementation(libs.markdown) implementation(libs.ini4j) implementation(libs.google.code.gson) implementation(libs.liquid) compileOnly(libs.cxx) } ================================================ FILE: app/libs/arm64-v8a/.gitignore ================================================ libkptools.so libapjni.so libkpatch.so libapd.so ================================================ FILE: app/libs/arm64-v8a/libkpatch.so.version ================================================ 0.10.7 ================================================ FILE: app/libs/arm64-v8a/libkptools.so.version ================================================ 0.13.1 ================================================ FILE: app/proguard-rules.pro ================================================ -dontwarn org.bouncycastle.jsse.BCSSLParameters -dontwarn org.bouncycastle.jsse.BCSSLSocket -dontwarn org.bouncycastle.jsse.provider.BouncyCastleJsseProvider -dontwarn org.conscrypt.Conscrypt$Version -dontwarn org.conscrypt.Conscrypt -dontwarn org.conscrypt.ConscryptHostnameVerifier -dontwarn org.openjsse.javax.net.ssl.SSLParameters -dontwarn org.openjsse.javax.net.ssl.SSLSocket -dontwarn org.openjsse.net.ssl.OpenJSSE -dontwarn java.beans.Introspector -dontwarn java.beans.VetoableChangeListener -dontwarn java.beans.VetoableChangeSupport -dontwarn java.beans.BeanInfo -dontwarn java.beans.IntrospectionException -dontwarn java.beans.PropertyDescriptor # Keep ini4j Service Provider Interface -keep,allowobfuscation,allowoptimization class org.ini4j.spi.** { *; } # Keep native methods and JNI classes -keep class me.bmax.apatch.Natives { *; } -keepclasseswithmembernames class * { native ; } -keep class me.bmax.apatch.Natives$Profile { *; } -keep class me.bmax.apatch.Natives$KPMCtlRes { *; } # Keep RootServices -keep class me.bmax.apatch.services.RootServices { *; } # Keep AIDL interfaces -keep class me.bmax.apatch.IAPRootService { *; } -keep class me.bmax.apatch.IAPRootService$Stub { *; } -keep class rikka.parcelablelist.ParcelableListSlice { *; } # Keep ScriptInfo for Gson serialization in release -keep class me.bmax.apatch.data.ScriptInfo { *; } -keepclassmembers class me.bmax.apatch.data.ScriptInfo { *; } # Gson -keepattributes Signature -keepattributes *Annotation* -keep class sun.misc.Unsafe { *; } -keep class com.google.gson.** { *; } -keep class * extends com.google.gson.reflect.TypeToken # Kotlin -assumenosideeffects class kotlin.jvm.internal.Intrinsics { public static void check*(...); public static void throw*(...); } # Keep Umount configuration classes -keep class me.bmax.apatch.ui.component.UmountConfig { *; } -keep class me.bmax.apatch.ui.component.UmountConfigManager { *; } -keep class me.bmax.apatch.ui.screen.UmountConfigScreen { *; } -keepclassmembers class me.bmax.apatch.ui.component.UmountConfigManager { public static *; } -keepclassmembers class me.bmax.apatch.ui.component.UmountConfig { public (boolean, java.lang.String); } # Keep Umount destination -keep class me.bmax.apatch.ui.screen.destinations.UmountConfigScreenDestination { *; } -repackageclasses -allowaccessmodification -overloadaggressively -renamesourcefileattribute SourceFile ================================================ FILE: app/src/main/AndroidManifest.xml ================================================ ================================================ FILE: app/src/main/aidl/me/bmax/apatch/IAPRootService.aidl ================================================ // IAPRootService.aidl package me.bmax.apatch; import android.content.pm.PackageInfo; import rikka.parcelablelist.ParcelableListSlice; interface IAPRootService { ParcelableListSlice getPackages(int flags); } ================================================ FILE: app/src/main/assets/.gitignore ================================================ kpimg *.kpm ================================================ FILE: app/src/main/assets/InstallAP.sh ================================================ #!/bin/sh # By SakuraKyuo OUTFD=/proc/self/fd/$2 function ui_print() { echo -e "ui_print $1\nui_print" >> $OUTFD } function ui_printfile() { while IFS='' read -r line || $BB [[ -n "$line" ]]; do ui_print "$line"; done < $1; } function kernelFlagsErr(){ ui_print "- Installation has Aborted!" ui_print "- APatch requires CONFIG_KALLSYMS to be Enabled." ui_print "- But your kernel seems NOT enabled it." exit } function apatchNote(){ ui_print "- APatch Patch Done" ui_print "- APatch Key is: Ap$skey" ui_print "- We do have saved Origin Boot image to /data" ui_print "- If you encounter bootloop, reboot into Recovery and flash it" exit } function failed(){ ui_printfile /dev/tmp/install/log ui_print "- APatch Patch Failed." ui_print "- Please feedback to the developer with the screenshots." exit } function boot_execute_ab(){ ./lib/arm64-v8a/libkptools.so unpack boot.img if [[ ! $(./lib/arm64-v8a/libkptools.so -i ./kernel -f | grep CONFIG_KALLSYMS=y) ]]; then kernelFlagsErr fi mv kernel kernel-origin ./lib/arm64-v8a/libkptools.so -p --image kernel-origin --skey "Ap$skey" --kpimg ./assets/kpimg --out ./kernel 2>&1 | tee /dev/tmp/install/log if [[ ! $(cat /dev/tmp/install/log | grep "patch done") ]]; then failed fi ui_printfile /dev/tmp/install/log ./lib/arm64-v8a/libkptools.so repack boot.img dd if=/dev/tmp/install/new-boot.img of=/dev/block/by-name/boot$slot mv boot.img /data/boot.img apatchNote } function boot_execute(){ ./lib/arm64-v8a/libkptools.so unpack boot.img if [[ ! $(./lib/arm64-v8a/libkptools.so -i ./kernel -f | grep CONFIG_KALLSYMS=y) ]]; then kernelFlagsErr fi mv kernel kernel-origin ./lib/arm64-v8a/libkptools.so -p --image kernel-origin --skey "Ap$skey" --kpimg ./assets/kpimg --out ./kernel 2>&1 | tee /dev/tmp/install/log if [[ ! $(cat /dev/tmp/install/log | grep "patch done") ]]; then failed fi ui_printfile /dev/tmp/install/log ./lib/arm64-v8a/libkptools.so repack boot.img dd if=/dev/tmp/install/new-boot.img of=/dev/block/by-name/boot$slot mv boot.img /data/boot.img apatchNote } function main(){ cd /dev/tmp/install chmod a+x ./assets/kpimg chmod a+x ./lib/arm64-v8a/libkptools.so slot=$(getprop ro.boot.slot_suffix) skey=$(cat /proc/sys/kernel/random/uuid | cut -d \- -f1) if [[ ! "$slot" == "" ]]; then ui_print "" ui_print "- You are using A/B device." # Script author ui_print "- Install Script by SakuraKyuo" # Get kernel ui_print "" dd if=/dev/block/by-name/boot$slot of=/dev/tmp/install/boot.img if [[ "$?" == 0 ]]; then ui_print "- Detected boot partition." boot_execute_ab fi else ui_print "You are using A Only device." # Get kernel ui_print "" dd if=/dev/block/by-name/boot of=/dev/tmp/install/boot.img if [[ "$?" == 0 ]]; then ui_print "- Detected boot partition." boot_execute fi fi } main ================================================ FILE: app/src/main/assets/UninstallAP.sh ================================================ #!/bin/sh # By SakuraKyuo OUTFD=/proc/self/fd/$2 function ui_print() { echo -e "ui_print $1\nui_print" >> $OUTFD } function ui_printfile() { while IFS='' read -r line || $BB [[ -n "$line" ]]; do ui_print "$line"; done < $1; } function apatchNote(){ ui_print "- APatch Unpatch Done" exit } function failed(){ ui_print "- APatch Unpatch Failed." ui_print "- Please feedback to the developer with the screenshots." exit } function boot_execute_ab(){ ./lib/arm64-v8a/libkptools.so unpack boot.img mv kernel kernel-origin ./lib/arm64-v8a/libkptools.so -u --image kernel-origin --out ./kernel if [[ ! "$?" == 0 ]]; then failed fi ./lib/arm64-v8a/libkptools.so repack boot.img dd if=/dev/tmp/install/new-boot.img of=/dev/block/by-name/boot$slot apatchNote } function boot_execute(){ ./lib/arm64-v8a/libkptools.so unpack boot.img mv kernel kernel-origin ./lib/arm64-v8a/libkptools.so -u --image kernel-origin --out ./kernel if [[ ! "$?" == 0 ]]; then failed fi ./lib/arm64-v8a/libkptools.so repack boot.img dd if=/dev/tmp/install/new-boot.img of=/dev/block/by-name/boot apatchNote } function main(){ cd /dev/tmp/install chmod a+x ./lib/arm64-v8a/libkptools.so slot=$(getprop ro.boot.slot_suffix) if [[ ! "$slot" == "" ]]; then ui_print "" ui_print "- You are using A/B device." # Get kernel ui_print "" dd if=/dev/block/by-name/boot$slot of=/dev/tmp/install/boot.img if [[ "$?" == 0 ]]; then ui_print "- Detected boot partition." boot_execute_ab fi else ui_print "You are using A Only device." # Get kernel ui_print "" dd if=/dev/block/by-name/boot of=/dev/tmp/install/boot.img if [[ "$?" == 0 ]]; then ui_print "- Detected boot partition." boot_execute fi fi } main ================================================ FILE: app/src/main/assets/boot_extract.sh ================================================ #!/system/bin/sh ARCH=$(getprop ro.product.cpu.abi) IS_INSTALL_NEXT_SLOT=$1 # Load utility functions . ./util_functions.sh if [ "$IS_INSTALL_NEXT_SLOT" = "true" ]; then get_next_slot else get_current_slot fi find_boot_image [ -e "$BOOTIMAGE" ] || { >&2 echo "- can't find boot.img!"; exit 1; } true ================================================ FILE: app/src/main/assets/boot_flash.sh ================================================ #!/system/bin/sh ####################################################################################### # APatch Boot Image Flasher ####################################################################################### ARCH=$(getprop ro.product.cpu.abi) # Load utility functions . ./util_functions.sh echo "****************************" echo " FolkPatch Boot Image Flasher" echo "****************************" BOOTIMAGE=$1 SOURCE_IMAGE=$2 [ -e "$SOURCE_IMAGE" ] || { echo "- $SOURCE_IMAGE does not exist!"; exit 1; } echo "- Target image: $BOOTIMAGE" echo "- Source image: $SOURCE_IMAGE" echo "- Flashing boot image" flash_image "$SOURCE_IMAGE" "$BOOTIMAGE" if [ $? -ne 0 ]; then >&2 echo "- Flash error: $?" exit $? fi echo "- Flash successful" ================================================ FILE: app/src/main/assets/boot_patch.sh ================================================ #!/system/bin/sh ####################################################################################### # APatch Boot Image Patcher ####################################################################################### # # Usage: boot_patch.sh [ARGS_PASS_TO_KPTOOLS] # # This script should be placed in a directory with the following files: # # File name Type Description # # boot_patch.sh script A script to patch boot image for APatch. # (this file) The script will use files in its same # directory to complete the patching process. # bootimg binary The target boot image # kpimg binary KernelPatch core Image # kptools executable The KernelPatch tools binary to inject kpimg to kernel Image # ####################################################################################### ARCH=$(getprop ro.product.cpu.abi) # Load utility functions . ./util_functions.sh echo "****************************" echo " FolkPatch Boot Image Patcher" echo "****************************" SUPERKEY="$1" BOOTIMAGE=$2 FLASH_TO_DEVICE=$3 shift 2 [ -z "$SUPERKEY" ] && { >&2 echo "- SuperKey empty!"; exit 1; } [ -e "$BOOTIMAGE" ] || { >&2 echo "- $BOOTIMAGE does not exist!"; exit 1; } # Check for dependencies command -v ./kptools >/dev/null 2>&1 || { >&2 echo "- Command kptools not found!"; exit 1; } if [ ! -f kernel ]; then echo "- Unpacking boot image" set -x ./kptools unpack "$BOOTIMAGE" "$@" patch_rc=$? set +x if [ $patch_rc -ne 0 ]; then >&2 echo "- Unpack error: $patch_rc" exit $? fi fi if [ ! $(./kptools -i kernel -f | grep CONFIG_KALLSYMS=y) ]; then echo "- Patcher has Aborted!" echo "- APatch requires CONFIG_KALLSYMS to be Enabled." echo "- But your kernel seems NOT enabled it." exit 0 fi if [ $(./kptools -i kernel -l | grep patched=false) ]; then echo "- Backing boot.img " cp "$BOOTIMAGE" "ori.img" >/dev/null 2>&1 fi mv kernel kernel.ori echo "- Patching kernel" set -x ./kptools -p -i kernel.ori -S "$SUPERKEY" -k kpimg -o kernel "$@" patch_rc=$? set +x if [ $patch_rc -ne 0 ]; then >&2 echo "- Patch kernel error: $patch_rc" exit $? fi echo "- Repacking boot image" ./kptools repack "$BOOTIMAGE" if [ ! $(./kptools -i kernel.ori -f | grep CONFIG_KALLSYMS_ALL=y) ]; then echo "- Detected CONFIG_KALLSYMS_ALL is not set!" echo "- APatch has patched but maybe your device won't boot." echo "- Make sure you have original boot image backup." fi if [ $? -ne 0 ]; then >&2 echo "- Repack error: $?" exit $? fi if [ "$FLASH_TO_DEVICE" = "true" ]; then # flash if [ -b "$BOOTIMAGE" ] || [ -c "$BOOTIMAGE" ] && [ -f "new-boot.img" ]; then echo "- Flashing new boot image" flash_image new-boot.img "$BOOTIMAGE" if [ $? -ne 0 ]; then >&2 echo "- Flash error: $?" exit $? fi fi echo "- Successfully Flashed!" else echo "- Successfully Patched!" fi ================================================ FILE: app/src/main/assets/boot_unpatch.sh ================================================ #!/system/bin/sh ####################################################################################### # APatch Boot Image Unpatcher ####################################################################################### ARCH=$(getprop ro.product.cpu.abi) # Load utility functions . ./util_functions.sh echo "****************************" echo " FolkPatch Boot Image Unpatcher" echo "****************************" BOOTIMAGE=$1 [ -e "$BOOTIMAGE" ] || { echo "- $BOOTIMAGE does not exist!"; exit 1; } echo "- Target image: $BOOTIMAGE" # Check for dependencies command -v ./kptools >/dev/null 2>&1 || { echo "- Command kptools not found!"; exit 1; } if [ ! -f kernel ]; then echo "- Unpacking boot image" set -x ./kptools unpack "$BOOTIMAGE" "$@" patch_rc=$? if [ $patch_rc -ne 0 ]; then >&2 echo "- Unpack error: $patch_rc" exit $patch_rc fi fi if [ ! $(./kptools -i kernel -l | grep patched=false) ]; then echo "- kernel has been patched " if [ -f "new-boot.img" ]; then echo "- found backup boot.img ,use it for recovery" else mv kernel kernel.ori echo "- Unpatching kernel" ./kptools -u --image kernel.ori --out kernel "$@" if [ $? -ne 0 ]; then >&2 echo "- Unpatch error: $?" exit $? fi echo "- Repacking boot image" ./kptools repack "$BOOTIMAGE" if [ $? -ne 0 ]; then >&2 echo "- Repack error: $?" exit $? fi fi else echo "- no need unpatch" exit 0 fi if [ -f "new-boot.img" ]; then echo "- Flashing boot image" flash_image new-boot.img "$BOOTIMAGE" if [ $? -ne 0 ]; then >&2 echo "- Flash error: $?" exit $? fi fi echo "- Flash successful" # Reset any error code true ================================================ FILE: app/src/main/assets/kpimg.version ================================================ 0.13.1 ================================================ FILE: app/src/main/assets/util_functions.sh ================================================ #!/system/bin/sh ####################################################################################### # Helper Functions (credits to topjohnwu) ####################################################################################### APATCH_VER='0.10.4' APATCH_VER_CODE=12146 ui_print() { if $BOOTMODE; then echo "$1" else echo -e "ui_print $1\nui_print" >> /proc/self/fd/$OUTFD fi } toupper() { echo "$@" | tr '[:lower:]' '[:upper:]' } grep_cmdline() { local REGEX="s/^$1=//p" { echo $(cat /proc/cmdline)$(sed -e 's/[^"]//g' -e 's/""//g' /proc/cmdline) | xargs -n 1; \ sed -e 's/ = /=/g' -e 's/, /,/g' -e 's/"//g' /proc/bootconfig; \ } 2>/dev/null | sed -n "$REGEX" } grep_prop() { local REGEX="s/^$1=//p" shift local FILES=$@ [ -z "$FILES" ] && FILES='/system/build.prop' cat $FILES 2>/dev/null | dos2unix | sed -n "$REGEX" | head -n 1 } getvar() { local VARNAME=$1 local VALUE local PROPPATH='/data/.magisk /cache/.magisk' [ ! -z $MAGISKTMP ] && PROPPATH="$MAGISKTMP/.magisk/config $PROPPATH" VALUE=$(grep_prop $VARNAME $PROPPATH) [ ! -z $VALUE ] && eval $VARNAME=\$VALUE } is_mounted() { grep -q " $(readlink -f $1) " /proc/mounts 2>/dev/null return $? } abort() { ui_print "$1" $BOOTMODE || recovery_cleanup [ ! -z $MODPATH ] && rm -rf $MODPATH rm -rf $TMPDIR exit 1 } set_nvbase() { NVBASE="$1" MAGISKBIN="$1/magisk" } print_title() { local len line1len line2len bar line1len=$(echo -n $1 | wc -c) line2len=$(echo -n $2 | wc -c) len=$line2len [ $line1len -gt $line2len ] && len=$line1len len=$((len + 2)) bar=$(printf "%${len}s" | tr ' ' '*') ui_print "$bar" ui_print " $1 " [ "$2" ] && ui_print " $2 " ui_print "$bar" } setup_flashable() { ensure_bb $BOOTMODE && return if [ -z $OUTFD ] || readlink /proc/$$/fd/$OUTFD | grep -q /tmp; then # We will have to manually find out OUTFD for FD in $(ls /proc/$$/fd); do if readlink /proc/$$/fd/$FD | grep -q pipe; then if ps | grep -v grep | grep -qE " 3 $FD |status_fd=$FD"; then OUTFD=$FD break fi fi done fi recovery_actions } ensure_bb() { if set -o | grep -q standalone; then # We are definitely in busybox ash set -o standalone return fi # Find our busybox binary local bb if [ -f $TMPDIR/busybox ]; then bb=$TMPDIR/busybox elif [ -f $MAGISKBIN/busybox ]; then bb=$MAGISKBIN/busybox else abort "! Cannot find BusyBox" fi chmod 755 $bb # Busybox could be a script, make sure /system/bin/sh exists if [ ! -f /system/bin/sh ]; then umount -l /system 2>/dev/null mkdir -p /system/bin ln -s $(command -v sh) /system/bin/sh fi export ASH_STANDALONE=1 # Find our current arguments # Run in busybox environment to ensure consistent results # /proc//cmdline shall be