Repository: luicfrr/react-native-vision-camera-face-detector Branch: main Commit: f392e3f25b70 Files: 46 Total size: 118.9 KB Directory structure: gitextract_hbn6p4vk/ ├── .github/ │ └── ISSUE_TEMPLATE/ │ ├── but_report.yml │ ├── config.yml │ └── feature_request.md ├── .gitignore ├── .yarnrc ├── LICENSE ├── README.md ├── VisionCameraFaceDetector.podspec ├── android/ │ ├── build.gradle │ ├── gradle/ │ │ └── wrapper/ │ │ ├── gradle-wrapper.jar │ │ └── gradle-wrapper.properties │ ├── gradlew │ ├── gradlew.bat │ └── src/ │ └── main/ │ ├── AndroidManifest.xml │ └── java/ │ └── com/ │ └── visioncamerafacedetector/ │ ├── FaceDetectorCommon.kt │ ├── ImageFaceDetectorModule.kt │ ├── VisionCameraFaceDetectorOrientation.kt │ ├── VisionCameraFaceDetectorPlugin.kt │ └── VisionCameraFaceDetectorPluginPackage.kt ├── babel.config.js ├── example/ │ ├── README.md │ ├── app.config.js │ ├── babel.config.js │ ├── index.js │ ├── metro.config.js │ ├── package.json │ ├── src/ │ │ └── index.tsx │ ├── tsconfig.dev.json │ └── tsconfig.json ├── ios/ │ ├── FaceDetectorCommon.swift │ ├── ImageFaceDetectorModule.m │ ├── ImageFaceDetectorModule.swift │ ├── VisionCameraFaceDetector-Bridging-Header.h │ ├── VisionCameraFaceDetector.m │ ├── VisionCameraFaceDetector.swift │ ├── VisionCameraFaceDetector.xcodeproj/ │ │ ├── project.pbxproj │ │ └── project.xcworkspace/ │ │ └── xcshareddata/ │ │ └── IDEWorkspaceChecks.plist │ └── VisionCameraFaceDetectorOrientation.swift ├── package.json ├── scripts/ │ └── bootstrap.js ├── src/ │ ├── Camera.tsx │ ├── FaceDetector.ts │ ├── ImageFaceDetector.ts │ └── index.ts ├── tsconfig.build.json └── tsconfig.json ================================================ FILE CONTENTS ================================================ ================================================ FILE: .github/ISSUE_TEMPLATE/but_report.yml ================================================ name: 🐛 Bug Report description: File a bug report title: '🐛 ' labels: [🐛 bug] body: - type: textarea attributes: label: What's happening? description: Explain what you are trying to do and what happened instead. Be as precise as possible, I can't help you if I don't understand your issue. placeholder: I wanted to take a picture, but the method failed with this error "[capture/photo-not-enabled] Failed to take photo, photo is not enabled!" validations: required: true - type: textarea attributes: label: Reproduceable Code description: > Share a small reproduceable code snippet here (or the entire file if necessary). Most importantly, share how you use the `` component and what props you pass to it. This will be automatically formatted into code, so no need for backticks. render: tsx placeholder: > const faceDetectionOptions = useRef( { } ).current // ... validations: required: true - type: textarea attributes: label: Relevant log output description: > Paste any relevant **native log output** (Xcode Logs/Android Studio Logcat) here. This will be automatically formatted into code, so no need for backticks. * For iOS, run the project through Xcode and copy the logs from the log window. * For Android, either open the project through Android Studio and paste the logs from the logcat window, or run `adb logcat` in terminal. render: shell placeholder: > 09:03:46 I ReactNativeJS: Running "FaceDetectorExample" with {"rootTag":11} 09:03:47 I ReactNativeJS: Re-rendering App. Camera: undefined | Microphone: undefined 09:03:47 I VisionCamera: Installing JSI bindings... 09:03:47 I VisionCamera: Finished installing JSI bindings! ... validations: required: true - type: input attributes: label: Device description: > Which device are you seeing this Problem on? Mention the full name of the phone, as well as the operating system and version. If you have tested this on multiple devices (ex. Android and iOS) then mention all of those devices (comma separated) placeholder: ex. iPhone 11 Pro (iOS 14.3), Galaxy S24 (Android 16) validations: required: true - type: input attributes: label: VisionCamera Version description: Which version of react-native-vision-camera are you using? placeholder: ex. 4.7.2 validations: required: true - type: input attributes: label: VisionCameraFaceDetector Version description: Which version of react-native-vision-camera-face-detector are you using? placeholder: ex. 1.10.1 validations: required: true - type: dropdown attributes: label: Can you reproduce this issue in the VisionCameraFaceDetector Example app? description: > Try to build the example app (`example/`) and see if the issue is reproduceable here. **Note:** If you don't try this in the example app, I most likely won't help you with your issue. options: - I didn't try (⚠️ your issue might get ignored & closed if you don't try this) - Yes, I can reproduce the same issue in the Example app here - No, I cannot reproduce the issue in the Example app default: 0 validations: required: true - type: checkboxes attributes: label: Additional information description: Please check all the boxes that apply options: - label: I am using Expo - label: I have enabled Frame Processors (react-native-worklets-core) - label: I have read the [VisionCamera's Troubleshooting Guide](https://react-native-vision-camera.com/docs/guides/troubleshooting) required: true - label: I have read all the [VisionCameraFaceDetector's README file](https://github.com/luicfrr/react-native-vision-camera-face-detector) required: true - label: I searched for [similar issues in this repository](https://github.com/luicfrr/react-native-vision-camera-face-detector/issues) and found none. required: true - label: I understand this is an open-source project and that I am not paying anything to use this package, so I do not expect an urgent fix, a custom feature, or a tutorial on how to do something. required: true ================================================ FILE: .github/ISSUE_TEMPLATE/config.yml ================================================ blank_issues_enabled: false ================================================ FILE: .github/ISSUE_TEMPLATE/feature_request.md ================================================ --- name: Feature request about: Suggest an idea for this project title: "[FEAT ✨] Replace your title here" labels: '' assignees: '' --- **Is your feature request related to a problem? Please describe.** A clear and concise description of what the problem is. **Describe the solution you'd like** A clear and concise description of what you want to happen. **Additional context** Add any other context, logs or screenshots about the feature request here. ================================================ FILE: .gitignore ================================================ # OSX # .DS_Store # node.js # node_modules/ npm-debug.log yarn-error.log lib/ # Android/IntelliJ # build/ .idea .gradle local.properties *.iml *.hprof .cxx/ *.keystore !debug.keystore # Xcode # build/ *.pbxuser !default.pbxuser *.mode1v3 !default.mode1v3 *.mode2v3 !default.mode2v3 *.perspectivev3 !default.perspectivev3 xcuserdata *.xccheckout *.moved-aside DerivedData *.hmap *.ipa *.xcuserstate ios/.xcode.env.local # Example # example/android example/ios example/.expo ================================================ FILE: .yarnrc ================================================ # Override Yarn command so we can automatically setup the repo on running `yarn` yarn-path "scripts/bootstrap.js" ================================================ FILE: LICENSE ================================================ MIT License Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: README.md ================================================ ## 📚 Introduction `react-native-vision-camera-face-detector` is a React Native library that integrates with the Vision Camera module to provide face detection functionality. It allows you to easily detect faces in real-time using device's front/back camera. Also supports static image face detections (thanks to @XChikuX). Is this package usefull to you? Buy Me A Coffee Or give it a ⭐ on [GitHub](https://github.com/luicfrr/react-native-vision-camera-face-detector). ## 🏗️ Features - Real-time face detection using front and back camera - Adjustable face detection settings - Optional native side face bounds, contour and landmarks auto scaling - Can be combined with [Skia Frame Processor](https://react-native-vision-camera.com/docs/guides/skia-frame-processors) ## 🧰 Installation ```bash yarn add react-native-vision-camera-face-detector ``` Then you need to add `react-native-worklets-core` plugin to `babel.config.js`. More details [here](https://react-native-vision-camera.com/docs/guides/frame-processors#react-native-worklets-core). ## 🪲 Knowing Bugs There are open issues ([here](https://github.com/mrousavy/react-native-vision-camera/issues/3362), [here](https://github.com/mrousavy/react-native-vision-camera/issues/3034), and [here](https://github.com/mrousavy/react-native-vision-camera/issues/2951)) about a bug on Skia Frame Processor that may cause a Black Screen on some Android Devices. This bug can be easily fixed with [this trick](https://github.com/mrousavy/react-native-vision-camera/issues/3362#issuecomment-2624299305) but it makes Frame drawings to be in incorrect orientation. ## 💡 Usage Recommended way (see [Example App](https://github.com/luicfrr/react-native-vision-camera-face-detector/blob/main/example/src/index.tsx) for Skia usage): ```jsx import { StyleSheet, Text, View } from 'react-native' import { useEffect, useState, useRef } from 'react' import { Frame, useCameraDevice } from 'react-native-vision-camera' import { Face, Camera, FaceDetectionOptions } from 'react-native-vision-camera-face-detector' export default function App() { const faceDetectionOptions = useRef( { // detection options } ).current const device = useCameraDevice('front') useEffect(() => { (async () => { const status = await Camera.requestCameraPermission() console.log({ status }) })() }, [device]) function handleFacesDetection( faces: Face[], frame: Frame ) { console.log( 'faces', faces.length, 'frame', frame.toString() ) } return ( {!!device? : No Device } ) } ``` Or use it following [vision-camera docs](https://react-native-vision-camera.com/docs/guides/frame-processors-interacting): ```jsx import { StyleSheet, Text, View, NativeModules, Platform } from 'react-native' import { useEffect, useState, useRef } from 'react' import { Camera, runAsync, useCameraDevice, useFrameProcessor } from 'react-native-vision-camera' import { Face, useFaceDetector, FaceDetectionOptions } from 'react-native-vision-camera-face-detector' import { Worklets } from 'react-native-worklets-core' export default function App() { const faceDetectionOptions = useRef( { // detection options } ).current const device = useCameraDevice('front') const { detectFaces, stopListeners } = useFaceDetector( faceDetectionOptions ) useEffect( () => { return () => { // you must call `stopListeners` when current component is unmounted stopListeners() } }, [] ) useEffect(() => { if(!device) { // you must call `stopListeners` when `Camera` component is unmounted stopListeners() return } (async () => { const status = await Camera.requestCameraPermission() console.log({ status }) })() }, [device]) const handleDetectedFaces = Worklets.createRunOnJS( ( faces: Face[] ) => { console.log( 'faces detected', faces ) }) const frameProcessor = useFrameProcessor((frame) => { 'worklet' runAsync(frame, () => { 'worklet' const faces = detectFaces(frame) // ... chain some asynchronous frame processor // ... do something asynchronously with frame handleDetectedFaces(faces) }) // ... chain frame processors // ... do something with frame }, [handleDetectedFaces]) return ( {!!device? : No Device } ) } ``` As face detection is a heavy process you should run it in an asynchronous thread so it can be finished without blocking your camera preview. You should read `vision-camera` [docs](https://react-native-vision-camera.com/docs/guides/frame-processors-interacting#running-asynchronously) about this feature. ## 🖼️ Static Image Face Detection You can detect faces in static images without the camera (picking images from your gallery/files) or you can use it to detect faces in photos taken from camera (see [Example App](https://github.com/luicfrr/react-native-vision-camera-face-detector/blob/main/example/src/index.tsx)): Supported image sources: - Requirings (`require('path/to/file')`) - URI string (`file://`, `content://`, `http(s)://`) - Object (`{ uri: string }`) ```ts import { detectFaces, ImageFaceDetectionOptions } from 'react-native-vision-camera-face-detector' const detectionOptions: ImageFaceDetectionOptions = { // detection options } // Using a bundled asset const faces1 = await detectFaces({ image: require('./assets/photo.jpg'), options: detectionOptions }) // Using a local file path or content URI (e.g. from an image picker) const faces2 = await detectFaces({ image: 'file:///storage/emulated/0/Download/pic.jpg', options: detectionOptions }) const faces3 = await detectFaces({ image: { uri: 'content://media/external/images/media/12345' }, options: detectionOptions }) console.log({ faces1, faces2, faces3 }) ``` ## Face Detection Options #### Common (Frame Processor and Static Images) | Option | Description | Default | Options | | ------------- | ------------- | ------------- | ------------- | | `performanceMode` | Favor speed or accuracy when detecting faces. | `fast` | `fast`, `accurate`| | `landmarkMode` | Whether to attempt to identify facial `landmarks`: eyes, ears, nose, cheeks, mouth, and so on. | `none` | `none`, `all` | | `contourMode` | Whether to detect the contours of facial features. Contours are detected for only the most prominent face in an image. | `none` | `none`, `all` | | `classificationMode` | Whether or not to classify faces into categories such as 'smiling', and 'eyes open'. | `none` | `none`, `all` | | `minFaceSize` | Sets the smallest desired face size, expressed as the ratio of the width of the head to width of the image. | `0.15` | `number` | | `trackingEnabled` | Whether or not to assign faces an ID, which can be used to track faces across images. Note that when contour detection is enabled, only one face is detected, so face tracking doesn't produce useful results. For this reason, and to improve detection speed, don't enable both contour detection and face tracking. | `false` | `boolean` | #### Frame Processor | Option | Description | Default | Options | | ------------- | ------------- | ------------- | ------------- | | `cameraFacing` | Current active camera | `front` | `front`, `back` | | `autoMode` | Should handle auto scale (face bounds, contour and landmarks) and rotation on native side? If this option is disabled all detection results will be relative to frame coordinates, not to screen/preview. You shouldn't use this option if you want to draw on screen using `Skia Frame Processor`. See [this](https://github.com/luicfrr/react-native-vision-camera-face-detector/issues/30#issuecomment-2058805546) and [this](https://github.com/luicfrr/react-native-vision-camera-face-detector/issues/35) for more details. | `false` | `boolean` | | `windowWidth` | * Required if you want to use `autoMode`. You must handle your own logic to get screen sizes, with or without statusbar size, etc... | `1.0` | `number` | | `windowHeight` | * Required if you want to use `autoMode`. You must handle your own logic to get screen sizes, with or without statusbar size, etc... | `1.0` | `number` | #### Static Images | Option | Description | Default | Options | | ------------- | ------------- | ------------- | ------------- | | `image` | Image source | - | `number`, `string`, `{ uri: string }` | ## 🔧 Troubleshooting Here is a common issue when trying to use this package and how you can try to fix it: - `Regular javascript function cannot be shared. Try decorating the function with the 'worklet' keyword...`: - If you're using `react-native-reanimated` maybe you're missing [this](https://github.com/mrousavy/react-native-vision-camera/issues/1791#issuecomment-1892130378) step. - `Execution failed for task ':react-native-vision-camera-face-detector:compileDebugKotlin'...`: - This error is probably related to gradle cache. Try [this](https://github.com/luicfrr/react-native-vision-camera-face-detector/issues/71#issuecomment-2186614831) sollution first. - Also check [this](https://github.com/luicfrr/react-native-vision-camera-face-detector/issues/90#issuecomment-2358160166) comment. If you find other errors while using this package you're wellcome to open a new issue or create a PR with the fix. ## 👷 Built With - [React Native](https://reactnative.dev/) - [Google MLKit](https://developers.google.com/ml-kit) - [Vision Camera](https://react-native-vision-camera.com/) ## 🔎 About This package was tested using the following: - `react-native`: `0.79.5` (new arch disabled) - `react-native-vision-camera`: `4.7.2` - `react-native-worklets-core`: `1.6.2` - `@shopify/react-native-skia`: `2.2.19` - `react-native-reanimated`: `~3.17.4` - `@react-native-firebase`: `^22.2.1` - `expo`: `^53` Min O.S version: - `Android`: `SDK 26` (Android 8) - `IOS`: `15.5` Make sure to follow tested versions and your device is using the minimum O.S version before opening issues. ## 📚 Author Made with ❤️ by [luicfrr](https://github.com/luicfrr) ================================================ FILE: VisionCameraFaceDetector.podspec ================================================ require "json" package = JSON.parse(File.read(File.join(__dir__, "package.json"))) Pod::Spec.new do |s| s.name = "VisionCameraFaceDetector" s.version = package["version"] s.summary = package["description"] s.homepage = package["homepage"] s.license = package["license"] s.authors = package["author"] s.platforms = { :ios => "15.5" } # 15.5 is the minimum version for GoogleMLKit/FaceDetection 7.0.0 s.source = { :git => "https://github.com/luicfrr/react-native-vision-camera-face-detector.git", :tag => "#{s.version}" } s.source_files = "ios/**/*.{h,m,mm,swift}" s.dependency "React-Core" s.dependency "GoogleMLKit/FaceDetection" , "8.0.0" s.dependency "VisionCamera" end ================================================ FILE: android/build.gradle ================================================ def safeExtGet(prop, fallback) { rootProject.ext.has(prop) ? rootProject.ext.get(prop) : fallback } def kotlinVersion = safeExtGet("VisionCameraFaceDetector_kotlinVersion", "2.1.20") apply plugin: "com.android.library" apply plugin: "kotlin-android" buildscript { repositories { google() mavenCentral() } dependencies { classpath "com.android.tools.build:gradle:8.13.0" classpath "org.jetbrains.kotlin:kotlin-gradle-plugin:${kotlinVersion}" } } android { buildToolsVersion = safeExtGet("VisionCameraFaceDetector_buildToolsVersion", "35.0.0") ndkVersion safeExtGet("VisionCameraFaceDetector_ndkVersion", "27.3.13750724") defaultConfig { minSdkVersion safeExtGet("VisionCameraFaceDetector_minSdkVersion", 26) compileSdkVersion safeExtGet("VisionCameraFaceDetector_compileSdkVersion", 35) targetSdkVersion safeExtGet("VisionCameraFaceDetector_targetSdkVersion", 35) versionCode 1 versionName "1.0" } buildTypes { release { minifyEnabled false } } lintOptions { disable "GradleCompatible" } } repositories { mavenLocal() maven { // All of React Native (JS, Obj-C sources, Android binaries) is installed from npm url("$rootDir/../node_modules/react-native/android") } google() mavenCentral() } dependencies { //noinspection GradleDynamicVersion implementation "com.facebook.react:react-native:+" // From node_modules api project(":react-native-vision-camera") implementation "androidx.annotation:annotation:1.9.1" implementation "androidx.camera:camera-core:1.5.1" implementation "com.google.mlkit:face-detection:16.1.7" } ================================================ FILE: android/gradle/wrapper/gradle-wrapper.properties ================================================ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists distributionUrl=https\://services.gradle.org/distributions/gradle-8.13-bin.zip networkTimeout=10000 validateDistributionUrl=true zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists ================================================ FILE: android/gradlew ================================================ #!/usr/bin/env sh # # Copyright 2015 the original author or authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ############################################################################## ## ## Gradle start up script for UN*X ## ############################################################################## # Attempt to set APP_HOME # Resolve links: $0 may be a link PRG="$0" # Need this for relative symlinks. while [ -h "$PRG" ] ; do ls=`ls -ld "$PRG"` link=`expr "$ls" : '.*-> \(.*\)$'` if expr "$link" : '/.*' > /dev/null; then PRG="$link" else PRG=`dirname "$PRG"`"/$link" fi done SAVED="`pwd`" cd "`dirname \"$PRG\"`/" >/dev/null APP_HOME="`pwd -P`" cd "$SAVED" >/dev/null APP_NAME="Gradle" APP_BASE_NAME=`basename "$0"` # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' # Use the maximum available, or set MAX_FD != -1 to use that value. MAX_FD="maximum" warn () { echo "$*" } die () { echo echo "$*" echo exit 1 } # OS specific support (must be 'true' or 'false'). cygwin=false msys=false darwin=false nonstop=false case "`uname`" in CYGWIN* ) cygwin=true ;; Darwin* ) darwin=true ;; MINGW* ) msys=true ;; NONSTOP* ) nonstop=true ;; esac CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar # Determine the Java command to use to start the JVM. if [ -n "$JAVA_HOME" ] ; then if [ -x "$JAVA_HOME/jre/sh/java" ] ; then # IBM's JDK on AIX uses strange locations for the executables JAVACMD="$JAVA_HOME/jre/sh/java" else JAVACMD="$JAVA_HOME/bin/java" fi if [ ! -x "$JAVACMD" ] ; then die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME Please set the JAVA_HOME variable in your environment to match the location of your Java installation." fi else JAVACMD="java" which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. Please set the JAVA_HOME variable in your environment to match the location of your Java installation." fi # Increase the maximum file descriptors if we can. if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then MAX_FD_LIMIT=`ulimit -H -n` if [ $? -eq 0 ] ; then if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then MAX_FD="$MAX_FD_LIMIT" fi ulimit -n $MAX_FD if [ $? -ne 0 ] ; then warn "Could not set maximum file descriptor limit: $MAX_FD" fi else warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" fi fi # For Darwin, add options to specify how the application appears in the dock if $darwin; then GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" fi # For Cygwin or MSYS, switch paths to Windows format before running java if [ "$cygwin" = "true" -o "$msys" = "true" ] ; then APP_HOME=`cygpath --path --mixed "$APP_HOME"` CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` JAVACMD=`cygpath --unix "$JAVACMD"` # We build the pattern for arguments to be converted via cygpath ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` SEP="" for dir in $ROOTDIRSRAW ; do ROOTDIRS="$ROOTDIRS$SEP$dir" SEP="|" done OURCYGPATTERN="(^($ROOTDIRS))" # Add a user-defined pattern to the cygpath arguments if [ "$GRADLE_CYGPATTERN" != "" ] ; then OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" fi # Now convert the arguments - kludge to limit ourselves to /bin/sh i=0 for arg in "$@" ; do CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` else eval `echo args$i`="\"$arg\"" fi i=`expr $i + 1` done case $i in 0) set -- ;; 1) set -- "$args0" ;; 2) set -- "$args0" "$args1" ;; 3) set -- "$args0" "$args1" "$args2" ;; 4) set -- "$args0" "$args1" "$args2" "$args3" ;; 5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; 6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; 7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; 8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; 9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; esac fi # Escape application args save () { for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done echo " " } APP_ARGS=`save "$@"` # Collect all arguments for the java command, following the shell quoting and substitution rules eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS" exec "$JAVACMD" "$@" ================================================ FILE: android/gradlew.bat ================================================ @rem @rem Copyright 2015 the original author or authors. @rem @rem Licensed under the Apache License, Version 2.0 (the "License"); @rem you may not use this file except in compliance with the License. @rem You may obtain a copy of the License at @rem @rem https://www.apache.org/licenses/LICENSE-2.0 @rem @rem Unless required by applicable law or agreed to in writing, software @rem distributed under the License is distributed on an "AS IS" BASIS, @rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @rem See the License for the specific language governing permissions and @rem limitations under the License. @rem @if "%DEBUG%" == "" @echo off @rem ########################################################################## @rem @rem Gradle startup script for Windows @rem @rem ########################################################################## @rem Set local scope for the variables with windows NT shell if "%OS%"=="Windows_NT" setlocal set DIRNAME=%~dp0 if "%DIRNAME%" == "" set DIRNAME=. set APP_BASE_NAME=%~n0 set APP_HOME=%DIRNAME% @rem Resolve any "." and ".." in APP_HOME to make it shorter. for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi @rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" @rem Find java.exe if defined JAVA_HOME goto findJavaFromJavaHome set JAVA_EXE=java.exe %JAVA_EXE% -version >NUL 2>&1 if "%ERRORLEVEL%" == "0" goto execute echo. echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. echo. echo Please set the JAVA_HOME variable in your environment to match the echo location of your Java installation. goto fail :findJavaFromJavaHome set JAVA_HOME=%JAVA_HOME:"=% set JAVA_EXE=%JAVA_HOME%/bin/java.exe if exist "%JAVA_EXE%" goto execute echo. echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% echo. echo Please set the JAVA_HOME variable in your environment to match the echo location of your Java installation. goto fail :execute @rem Setup the command line set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar @rem Execute Gradle "%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %* :end @rem End local scope for the variables with windows NT shell if "%ERRORLEVEL%"=="0" goto mainEnd :fail rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of rem the _cmd.exe /c_ return code! if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 exit /b 1 :mainEnd if "%OS%"=="Windows_NT" endlocal :omega ================================================ FILE: android/src/main/AndroidManifest.xml ================================================ ================================================ FILE: android/src/main/java/com/visioncamerafacedetector/FaceDetectorCommon.kt ================================================ package com.visioncamerafacedetector import android.graphics.Rect import android.view.Surface import com.mrousavy.camera.core.types.Position import com.google.mlkit.vision.face.Face import com.google.mlkit.vision.face.FaceLandmark import com.google.mlkit.vision.face.FaceContour import com.google.mlkit.vision.face.FaceDetection import com.google.mlkit.vision.face.FaceDetector import com.google.mlkit.vision.face.FaceDetectorOptions data class FaceDetectorResult( val runContours: Boolean = false, val runClassifications: Boolean = false, val runLandmarks: Boolean = false, val trackingEnabled: Boolean = false, val faceDetector: FaceDetector ) class FaceDetectorCommon() { private fun processBoundingBox( boundingBox: Rect, sourceWidth: Double = 0.0, sourceHeight: Double = 0.0, scaleX: Double = 1.0, scaleY: Double = 1.0, autoMode: Boolean = false, cameraFacing: Position = Position.FRONT, orientation: Int? = Surface.ROTATION_0 ): Map { val bounds: MutableMap = HashMap() val width = boundingBox.width().toDouble() * scaleX val height = boundingBox.height().toDouble() * scaleY val x = boundingBox.left.toDouble() val y = boundingBox.top.toDouble() bounds["width"] = width bounds["height"] = height bounds["x"] = x * scaleX bounds["y"] = y * scaleY if(!autoMode) return bounds // using front camera if(cameraFacing == Position.FRONT) { when (orientation) { // device is portrait Surface.ROTATION_0 -> { bounds["x"] = ((-x * scaleX) + sourceWidth * scaleX) - width bounds["y"] = y * scaleY } // device is landscape right Surface.ROTATION_270 -> { bounds["x"] = y * scaleX bounds["y"] = x * scaleY } // device is upside down Surface.ROTATION_180 -> { bounds["x"] = x * scaleX bounds["y"] = ((-y * scaleY) + sourceHeight * scaleY) - height } // device is landscape left Surface.ROTATION_90 -> { bounds["x"] = ((-y * scaleX) + sourceWidth * scaleX) - width bounds["y"] = ((-x * scaleY) + sourceHeight * scaleY) - height } } return bounds } // using back camera when (orientation) { // device is portrait Surface.ROTATION_0 -> { bounds["x"] = x * scaleX bounds["y"] = y * scaleY } // device is landscape right Surface.ROTATION_270 -> { bounds["x"] = y * scaleX bounds["y"] = ((-x * scaleY) + sourceHeight * scaleY) - height } // device is upside down Surface.ROTATION_180 -> { bounds["x"] =((-x * scaleX) + sourceWidth * scaleX) - width bounds["y"] = ((-y * scaleY) + sourceHeight * scaleY) - height } // device is landscape left Surface.ROTATION_90 -> { bounds["x"] = ((-y * scaleX) + sourceWidth * scaleX) - width bounds["y"] = x * scaleY } } return bounds } private fun processLandmarks( face: Face, sourceWidth: Double = 0.0, sourceHeight: Double = 0.0, scaleX: Double = 1.0, scaleY: Double = 1.0, autoMode: Boolean = false, cameraFacing: Position = Position.FRONT, orientation: Int? = Surface.ROTATION_0 ): Map { val faceLandmarksTypes = intArrayOf( FaceLandmark.LEFT_CHEEK, FaceLandmark.LEFT_EAR, FaceLandmark.LEFT_EYE, FaceLandmark.MOUTH_BOTTOM, FaceLandmark.MOUTH_LEFT, FaceLandmark.MOUTH_RIGHT, FaceLandmark.NOSE_BASE, FaceLandmark.RIGHT_CHEEK, FaceLandmark.RIGHT_EAR, FaceLandmark.RIGHT_EYE ) val faceLandmarksTypesStrings = arrayOf( "LEFT_CHEEK", "LEFT_EAR", "LEFT_EYE", "MOUTH_BOTTOM", "MOUTH_LEFT", "MOUTH_RIGHT", "NOSE_BASE", "RIGHT_CHEEK", "RIGHT_EAR", "RIGHT_EYE" ) val faceLandmarksTypesMap: MutableMap = HashMap() for (i in faceLandmarksTypesStrings.indices) { val landmark = face.getLandmark(faceLandmarksTypes[i]) val landmarkName = faceLandmarksTypesStrings[i] if (landmark == null) continue val point = landmark.position val currentPointsMap: MutableMap = HashMap() val x = point.x.toDouble() val y = point.y.toDouble() currentPointsMap["x"] = x * scaleX currentPointsMap["y"] = y * scaleY if(autoMode) { if(cameraFacing == Position.FRONT) { // using front camera when (orientation) { // device is portrait Surface.ROTATION_0 -> { currentPointsMap["x"] = ((-x * scaleX) + sourceWidth * scaleX) currentPointsMap["y"] = y * scaleY } // device is landscape right Surface.ROTATION_270 -> { currentPointsMap["x"] = y * scaleX currentPointsMap["y"] = x * scaleY } // device is upside down Surface.ROTATION_180 -> { currentPointsMap["x"] = x * scaleX currentPointsMap["y"] = ((-y * scaleY) + sourceHeight * scaleY) } // device is landscape left Surface.ROTATION_90 -> { currentPointsMap["x"] = ((-y * scaleX) + sourceWidth * scaleX) currentPointsMap["y"] = ((-x * scaleY) + sourceHeight * scaleY) } else -> { currentPointsMap["x"] = x * scaleX currentPointsMap["y"] = y * scaleY } } } else { // using back camera when (orientation) { // device is portrait Surface.ROTATION_0 -> { currentPointsMap["x"] = x * scaleX currentPointsMap["y"] = y * scaleY } // device is landscape right Surface.ROTATION_270 -> { currentPointsMap["x"] = y * scaleX currentPointsMap["y"] = ((-x * scaleY) + sourceHeight * scaleY) } // device is upside down Surface.ROTATION_180 -> { currentPointsMap["x"] =((-x * scaleX) + sourceWidth * scaleX) currentPointsMap["y"] = ((-y * scaleY) + sourceHeight * scaleY) } // device is landscape left Surface.ROTATION_90 -> { currentPointsMap["x"] = ((-y * scaleX) + sourceWidth * scaleX) currentPointsMap["y"] = x * scaleY } else -> { currentPointsMap["x"] = x * scaleX currentPointsMap["y"] = y * scaleY } } } } faceLandmarksTypesMap[landmarkName] = currentPointsMap } return faceLandmarksTypesMap } private fun processFaceContours( face: Face, sourceWidth: Double = 0.0, sourceHeight: Double = 0.0, scaleX: Double = 1.0, scaleY: Double = 1.0, autoMode: Boolean = false, cameraFacing: Position = Position.FRONT, orientation: Int? = Surface.ROTATION_0 ): Map { val faceContoursTypes = intArrayOf( FaceContour.FACE, FaceContour.LEFT_CHEEK, FaceContour.LEFT_EYE, FaceContour.LEFT_EYEBROW_BOTTOM, FaceContour.LEFT_EYEBROW_TOP, FaceContour.LOWER_LIP_BOTTOM, FaceContour.LOWER_LIP_TOP, FaceContour.NOSE_BOTTOM, FaceContour.NOSE_BRIDGE, FaceContour.RIGHT_CHEEK, FaceContour.RIGHT_EYE, FaceContour.RIGHT_EYEBROW_BOTTOM, FaceContour.RIGHT_EYEBROW_TOP, FaceContour.UPPER_LIP_BOTTOM, FaceContour.UPPER_LIP_TOP ) val faceContoursTypesStrings = arrayOf( "FACE", "LEFT_CHEEK", "LEFT_EYE", "LEFT_EYEBROW_BOTTOM", "LEFT_EYEBROW_TOP", "LOWER_LIP_BOTTOM", "LOWER_LIP_TOP", "NOSE_BOTTOM", "NOSE_BRIDGE", "RIGHT_CHEEK", "RIGHT_EYE", "RIGHT_EYEBROW_BOTTOM", "RIGHT_EYEBROW_TOP", "UPPER_LIP_BOTTOM", "UPPER_LIP_TOP" ) val faceContoursTypesMap: MutableMap = HashMap() for (i in faceContoursTypesStrings.indices) { val contour = face.getContour(faceContoursTypes[i]) val contourName = faceContoursTypesStrings[i] if (contour == null) continue val points = contour.points val pointsMap: MutableList> = mutableListOf() for (j in points.indices) { val currentPointsMap: MutableMap = HashMap() val x = points[j].x.toDouble() val y = points[j].y.toDouble() currentPointsMap["x"] = points[j].x.toDouble() * scaleX currentPointsMap["y"] = points[j].y.toDouble() * scaleY if(autoMode) { if(cameraFacing == Position.FRONT) { // using front camera when (orientation) { // device is portrait Surface.ROTATION_0 -> { currentPointsMap["x"] = ((-x * scaleX) + sourceWidth * scaleX) currentPointsMap["y"] = y * scaleY } // device is landscape right Surface.ROTATION_270 -> { currentPointsMap["x"] = y * scaleX currentPointsMap["y"] = x * scaleY } // device is upside down Surface.ROTATION_180 -> { currentPointsMap["x"] = x * scaleX currentPointsMap["y"] = ((-y * scaleY) + sourceHeight * scaleY) } // device is landscape left Surface.ROTATION_90 -> { currentPointsMap["x"] = ((-y * scaleX) + sourceWidth * scaleX) currentPointsMap["y"] = ((-x * scaleY) + sourceHeight * scaleY) } else -> { currentPointsMap["x"] = x * scaleX currentPointsMap["y"] = y * scaleY } } } else { // using back camera when (orientation) { // device is portrait Surface.ROTATION_0 -> { currentPointsMap["x"] = x * scaleX currentPointsMap["y"] = y * scaleY } // device is landscape right Surface.ROTATION_270 -> { currentPointsMap["x"] = y * scaleX currentPointsMap["y"] = ((-x * scaleY) + sourceHeight * scaleY) } // device is upside down Surface.ROTATION_180 -> { currentPointsMap["x"] =((-x * scaleX) + sourceWidth * scaleX) currentPointsMap["y"] = ((-y * scaleY) + sourceHeight * scaleY) } // device is landscape left Surface.ROTATION_90 -> { currentPointsMap["x"] = ((-y * scaleX) + sourceWidth * scaleX) currentPointsMap["y"] = x * scaleY } else -> { currentPointsMap["x"] = x * scaleX currentPointsMap["y"] = y * scaleY } } } } pointsMap.add(currentPointsMap) } faceContoursTypesMap[contourName] = pointsMap } return faceContoursTypesMap } fun getFaceDetector( options: Map? ): FaceDetectorResult { var performanceModeValue = FaceDetectorOptions.PERFORMANCE_MODE_FAST var landmarkModeValue = FaceDetectorOptions.LANDMARK_MODE_NONE var classificationModeValue = FaceDetectorOptions.CLASSIFICATION_MODE_NONE var contourModeValue = FaceDetectorOptions.CONTOUR_MODE_NONE var runLandmarks = false var runClassifications = false var runContours = false var trackingEnabled = false if (options?.get("performanceMode").toString() == "accurate") { performanceModeValue = FaceDetectorOptions.PERFORMANCE_MODE_ACCURATE } if (options?.get("landmarkMode").toString() == "all") { runLandmarks = true landmarkModeValue = FaceDetectorOptions.LANDMARK_MODE_ALL } if (options?.get("classificationMode").toString() == "all") { runClassifications = true classificationModeValue = FaceDetectorOptions.CLASSIFICATION_MODE_ALL } if (options?.get("contourMode").toString() == "all") { runContours = true contourModeValue = FaceDetectorOptions.CONTOUR_MODE_ALL } val minFaceSize = (options?.get("minFaceSize") ?: 0.15) as Double val optionsBuilder = FaceDetectorOptions.Builder() .setPerformanceMode(performanceModeValue) .setLandmarkMode(landmarkModeValue) .setContourMode(contourModeValue) .setClassificationMode(classificationModeValue) .setMinFaceSize(minFaceSize.toFloat()) if (options?.get("trackingEnabled").toString() == "true") { trackingEnabled = true optionsBuilder.enableTracking() } val faceDetector = FaceDetection.getClient( optionsBuilder.build() ) return FaceDetectorResult( runContours = runContours, runClassifications = runClassifications, runLandmarks = runLandmarks, trackingEnabled = trackingEnabled, faceDetector = faceDetector ) } fun processFaces( faces: List, runLandmarks: Boolean, runClassifications: Boolean, runContours: Boolean, trackingEnabled: Boolean, sourceWidth: Double = 0.0, sourceHeight: Double = 0.0, scaleX: Double = 1.0, scaleY: Double = 1.0, autoMode: Boolean = false, cameraFacing: Position = Position.FRONT, orientation: Int? = Surface.ROTATION_0 ): ArrayList> { val result = ArrayList>() faces.forEach{face -> val map: MutableMap = HashMap() if (runLandmarks) { map["landmarks"] = processLandmarks( face, sourceWidth, sourceHeight, scaleX, scaleY, autoMode, cameraFacing, orientation ) } if (runClassifications) { map["leftEyeOpenProbability"] = face.leftEyeOpenProbability?.toDouble() ?: -1 map["rightEyeOpenProbability"] = face.rightEyeOpenProbability?.toDouble() ?: -1 map["smilingProbability"] = face.smilingProbability?.toDouble() ?: -1 } if (runContours) { map["contours"] = processFaceContours( face, sourceWidth, sourceHeight, scaleX, scaleY, autoMode, cameraFacing, orientation ) } if (trackingEnabled) { map["trackingId"] = face.trackingId ?: -1 } map["rollAngle"] = face.headEulerAngleZ.toDouble() map["pitchAngle"] = face.headEulerAngleX.toDouble() map["yawAngle"] = face.headEulerAngleY.toDouble() map["bounds"] = processBoundingBox( face.boundingBox, sourceWidth, sourceHeight, scaleX, scaleY, autoMode, cameraFacing, orientation ) result.add(map) } return result } } ================================================ FILE: android/src/main/java/com/visioncamerafacedetector/ImageFaceDetectorModule.kt ================================================ package com.visioncamerafacedetector import android.util.Log import android.graphics.Bitmap import android.graphics.BitmapFactory import android.net.Uri import com.facebook.react.bridge.* import com.google.mlkit.vision.common.InputImage import java.io.InputStream import java.net.HttpURLConnection import java.net.URL private const val TAG = "ImageFaceDetector" class ImageFaceDetectorModule( private val reactContext: ReactApplicationContext ): ReactContextBaseJavaModule(reactContext) { override fun getName(): String = "ImageFaceDetector" private fun toWritableArray( list: ArrayList> ): WritableArray { val array = Arguments.createArray() for (map in list) { val writableMap = Arguments.createMap() for ((key, value) in map) { @Suppress("UNCHECKED_CAST") when (value) { is Boolean -> writableMap.putBoolean(key, value) is Int -> writableMap.putInt(key, value) is Double -> writableMap.putDouble(key, value) is Float -> writableMap.putDouble(key, value.toDouble()) is String -> writableMap.putString(key, value) is Map<*, *> -> writableMap.putMap(key, toWritableMap(value as Map)) is ArrayList<*> -> writableMap.putArray(key, toWritableArray(value as ArrayList>)) else -> writableMap.putNull(key) } } array.pushMap(writableMap) } return array } private fun toWritableMap( map: Map ): WritableMap { val writableMap = Arguments.createMap() for ((key, value) in map) { @Suppress("UNCHECKED_CAST") when (value) { is Boolean -> writableMap.putBoolean(key, value) is Int -> writableMap.putInt(key, value) is Double -> writableMap.putDouble(key, value) is Float -> writableMap.putDouble(key, value.toDouble()) is String -> writableMap.putString(key, value) is Map<*, *> -> writableMap.putMap(key, toWritableMap(value as Map)) is ArrayList<*> -> writableMap.putArray(key, toWritableArray(value as ArrayList>)) else -> writableMap.putNull(key) } } return writableMap } private fun toMap( readableMap: ReadableMap? ): Map { val map = mutableMapOf() if (readableMap == null) return map val iterator = readableMap.keySetIterator() while (iterator.hasNextKey()) { val key = iterator.nextKey() when (readableMap.getType(key)) { ReadableType.Null -> map[key] = "" ReadableType.Boolean -> map[key] = readableMap.getBoolean(key) ReadableType.Number -> map[key] = readableMap.getDouble(key) ReadableType.String -> map[key] = readableMap.getString(key) ?: "" ReadableType.Map -> map[key] = toMap(readableMap.getMap(key)) ReadableType.Array -> map[key] = toList(readableMap.getArray(key)) } } return map } private fun toList( readableArray: ReadableArray? ): ArrayList { val list = arrayListOf() if (readableArray == null) return list for (i in 0 until readableArray.size()) { when (readableArray.getType(i)) { ReadableType.Null -> list.add("") ReadableType.Boolean -> list.add(readableArray.getBoolean(i)) ReadableType.Number -> list.add(readableArray.getDouble(i)) ReadableType.String -> list.add(readableArray.getString(i) ?: "") ReadableType.Map -> list.add(toMap(readableArray.getMap(i))) ReadableType.Array -> list.add(toList(readableArray.getArray(i))) } } return list } @ReactMethod fun detectFaces( uri: String, options: ReadableMap?, promise: Promise ) { try { val common = FaceDetectorCommon() val ( runContours, runClassifications, runLandmarks, trackingEnabled, faceDetector ) = common.getFaceDetector( toMap(options) ) val image = InputImage.fromBitmap( loadBitmapFromUri(uri)!!, 0 ) faceDetector.process(image) .addOnSuccessListener { faces -> val result = common.processFaces( faces, runLandmarks, runClassifications, runContours, trackingEnabled ) promise.resolve( toWritableArray(result) ) } .addOnFailureListener { e -> Log.e(TAG, "Error processing image face detection: ", e) // resolve empty list on error promise.resolve(Arguments.createArray()) } } catch (e: Exception) { Log.e(TAG, "Error preparing face detection: ", e) // resolve empty list on error promise.resolve(Arguments.createArray()) } } private fun loadBitmapFromUri(uriString: String): Bitmap? { return try { val uri = Uri.parse(uriString) when (uri.scheme?.lowercase()) { "content", "android.resource" -> { val stream = reactContext.contentResolver.openInputStream(uri) stream.useDecode() } "file" -> { val path = uri.path ?: return null if (path.startsWith("/android_asset/")) { val assetPath = path.removePrefix("/android_asset/") reactContext.assets.open(assetPath).useDecode() } else { BitmapFactory.decodeFile(path) } } "asset" -> { val assetPath = uriString.removePrefix("asset:/").removePrefix("asset:///") reactContext.assets.open(assetPath).useDecode() } "http", "https" -> { val url = URL(uriString) val conn = url.openConnection() as HttpURLConnection conn.connect() val input = conn.inputStream input.useDecode() } else -> { // Fallback try as file path BitmapFactory.decodeFile(uriString) } } } catch (e: Exception) { null } } } private fun InputStream?.useDecode(): Bitmap? { if (this == null) return null return try { this.use { BitmapFactory.decodeStream(it) } } catch (e: Exception) { null } } ================================================ FILE: android/src/main/java/com/visioncamerafacedetector/VisionCameraFaceDetectorOrientation.kt ================================================ package com.visioncamerafacedetector import android.util.Log import android.view.OrientationEventListener import android.view.Surface import com.facebook.react.bridge.ReactApplicationContext private const val TAG = "FaceDetectorOrientation" class VisionCameraFaceDetectorOrientation( private val context: ReactApplicationContext ) { var orientation = Surface.ROTATION_0 private var orientationListener: OrientationEventListener? = null init { if (orientationListener == null) { Log.d(TAG, "Assigning new device orientation listener") orientationListener = object : OrientationEventListener(context) { override fun onOrientationChanged(rotationDegrees: Int) { orientation = degreesToSurfaceRotation(rotationDegrees) } } } orientation = Surface.ROTATION_0 startDeviceOrientationListener() } private fun startDeviceOrientationListener() { if ( orientationListener != null && orientationListener!!.canDetectOrientation() ) { Log.d(TAG, "Enabling device orientation listener") orientationListener!!.enable() } } fun stopDeviceOrientationListener() { orientationListener?.disable() orientationListener = null Log.d(TAG, "Disabled device orientation listener") } private fun degreesToSurfaceRotation(degrees: Int): Int = when (degrees) { in 45..135 -> Surface.ROTATION_270 in 135..225 -> Surface.ROTATION_180 in 225..315 -> Surface.ROTATION_90 else -> Surface.ROTATION_0 } } ================================================ FILE: android/src/main/java/com/visioncamerafacedetector/VisionCameraFaceDetectorPlugin.kt ================================================ package com.visioncamerafacedetector import android.util.Log import android.view.Surface import com.google.android.gms.tasks.Tasks import com.google.mlkit.vision.common.InputImage import com.google.mlkit.vision.face.FaceDetector import com.mrousavy.camera.core.FrameInvalidError import com.mrousavy.camera.core.types.Position import com.mrousavy.camera.frameprocessors.Frame import com.mrousavy.camera.frameprocessors.FrameProcessorPlugin private const val TAG = "FaceDetector" class VisionCameraFaceDetectorPlugin( options: Map?, private val orientationManager: VisionCameraFaceDetectorOrientation ) : FrameProcessorPlugin() { // detection props private var autoMode = false private var faceDetector: FaceDetector? = null private var runLandmarks = false private var runClassifications = false private var runContours = false private var trackingEnabled = false private var windowWidth = 1.0 private var windowHeight = 1.0 private var cameraFacing: Position = Position.FRONT private val common = FaceDetectorCommon() init { // handle auto scaling autoMode = options?.get("autoMode").toString() == "true" windowWidth = (options?.get("windowWidth") ?: 1.0) as Double windowHeight = (options?.get("windowHeight") ?: 1.0) as Double if (options?.get("cameraFacing").toString() == "back") { cameraFacing = Position.BACK } val faceDetectorResult = common.getFaceDetector(options) runLandmarks = faceDetectorResult.runLandmarks runClassifications = faceDetectorResult.runClassifications runContours = faceDetectorResult.runContours trackingEnabled = faceDetectorResult.trackingEnabled faceDetector = faceDetectorResult.faceDetector } override fun callback( frame: Frame, params: Map? ): ArrayList> { try { val image = InputImage.fromMediaImage( frame.image, frame.imageProxy.imageInfo.rotationDegrees ) // we need to invert sizes as frame is always -90deg rotated val width = image.height.toDouble() val height = image.width.toDouble() val scaleX = if(autoMode) windowWidth / width else 1.0 val scaleY = if(autoMode) windowHeight / height else 1.0 val task = faceDetector!!.process(image) val faces = Tasks.await(task) return common.processFaces( faces, runLandmarks, runClassifications, runContours, trackingEnabled, width, height, scaleX, scaleY, autoMode, cameraFacing, orientationManager.orientation ) } catch (e: Exception) { Log.e(TAG, "Error processing face detection: ", e) } catch (e: FrameInvalidError) { Log.e(TAG, "Frame invalid error: ", e) } return ArrayList() } } ================================================ FILE: android/src/main/java/com/visioncamerafacedetector/VisionCameraFaceDetectorPluginPackage.kt ================================================ package com.visioncamerafacedetector import com.facebook.react.ReactPackage import com.facebook.react.bridge.NativeModule import com.facebook.react.bridge.ReactApplicationContext import com.facebook.react.bridge.ReactContextBaseJavaModule import com.facebook.react.bridge.ReactMethod import com.facebook.react.uimanager.ViewManager import com.mrousavy.camera.frameprocessors.FrameProcessorPluginRegistry class VisionCameraFaceDetectorPluginPackage: ReactPackage { companion object { private var orientationManager: VisionCameraFaceDetectorOrientation? = null init { FrameProcessorPluginRegistry.addFrameProcessorPlugin("detectFaces") { proxy, options -> if(orientationManager == null) { orientationManager = VisionCameraFaceDetectorOrientation(proxy.context) } VisionCameraFaceDetectorPlugin(options, orientationManager!!) } } fun stopDeviceOrientationListener() { orientationManager?.stopDeviceOrientationListener() orientationManager = null } } override fun createNativeModules(reactContext: ReactApplicationContext): List { return listOf( VisionCameraFaceDetectorOrientationManager(reactContext), ImageFaceDetectorModule(reactContext) ) } override fun createViewManagers(reactContext: ReactApplicationContext): List> { return emptyList() } } class VisionCameraFaceDetectorOrientationManager(context: ReactApplicationContext) : ReactContextBaseJavaModule(context) { override fun getName(): String { return "VisionCameraFaceDetectorOrientationManager" } @ReactMethod fun stopDeviceOrientationListener() { VisionCameraFaceDetectorPluginPackage.stopDeviceOrientationListener() } } ================================================ FILE: babel.config.js ================================================ module.exports = { presets: ['module:metro-react-native-babel-preset'], }; ================================================ FILE: example/README.md ================================================ ## Getting Started Install all dependencies with: ```bash npm instal # or yarn ``` Before running example app you need to delete everything inside `/node_modules/react-native-vision-camera-face-detector/node_modules` except `.bin` folder. Then run the app in development mode: ```bash npm run android # or npm run ios # or yarn android # or yarn ios ``` Or in production mode: ```bash npm run android:prod # or npm run ios:prod # or yarn android:prod # or yarn ios:prod ``` ## Cleaning If, for some reason, you need to clean the project just run: ```bash npm run prebuild:clean # or yarn prebuild:clean ``` ================================================ FILE: example/app.config.js ================================================ export default { expo: { newArchEnabled: false, name: 'Face Detector Example', slug: 'face-detector-example', version: '1.0.0', jsEngine: 'hermes', orientation: 'portrait', icon: './assets/icon.png', splash: { 'image': './assets/splash.png', 'resizeMode': 'contain', 'backgroundColor': '#ffffff' }, assetBundlePatterns: [ '**/*' ], ios: { bundleIdentifier: 'com.facedetector.example', buildNumber: '1', privacyManifests: { NSPrivacyAccessedAPITypes: [ { NSPrivacyAccessedAPIType: 'NSPrivacyAccessedAPICategoryUserDefaults', NSPrivacyAccessedAPITypeReasons: [ 'CA92.1' ] } ] } }, android: { package: 'com.facedetector.example', versionCode: 1, adaptiveIcon: { foregroundImage: './assets/adaptive-icon.png', backgroundColor: '#ffffff' } }, plugins: [ [ 'react-native-vision-camera', { cameraPermissionText: '$(PRODUCT_NAME) needs to access your device\'s camera.' } ], [ 'expo-image-picker', { photosPermission: 'The app accesses your photos to let you share them with your friends.' } ], [ 'expo-build-properties', { android: { // android 8 minSdkVersion: 26, // android 14 compileSdkVersion: 35, targetSdkVersion: 35, buildToolsVersion: '35.0.0' }, ios: { deploymentTarget: '15.5', useFrameworks: 'static' } } ] ] } } ================================================ FILE: example/babel.config.js ================================================ const path = require( "path" ) const pak = require( "../package.json" ) module.exports = { presets: [ 'babel-preset-expo' ], plugins: [ [ 'module-resolver', { alias: { [ pak.name ]: path.join( __dirname, "..", pak.source ) }, root: [ './src' ], 'extensions': [ '.tsx', '.ts', '.js', '.json' ] } ], [ 'react-native-reanimated/plugin', { processNestedWorklets: true } ], [ 'react-native-worklets-core/plugin' ] ] } ================================================ FILE: example/index.js ================================================ import { registerRootComponent } from 'expo' import App from './src' registerRootComponent( App ) ================================================ FILE: example/metro.config.js ================================================ // Learn more https://docs.expo.io/guides/customizing-metro const { getDefaultConfig } = require( 'expo/metro-config' ) const blacklist = require( 'metro-config/src/defaults/exclusionList' ) const path = require( 'path' ) const escape = require( 'escape-string-regexp' ) const pak = require( '../package.json' ) const root = path.resolve( __dirname, '..' ) const defaultConfig = getDefaultConfig( __dirname ) const modules = Object.keys( { ...pak.peerDependencies } ) module.exports = { ...defaultConfig, projectRoot: __dirname, watchFolders: [ root ], // We need to make sure that only one version is loaded for peerDependencies // So we blacklist them at the root, and alias them to the versions in example's node_modules resolver: { ...defaultConfig.resolver, blacklistRE: blacklist( modules.map( ( m ) => ( new RegExp( `^${ escape( path.join( root, 'node_modules', m ) ) }\\/.*$` ) ) ) ), extraNodeModules: modules.reduce( ( acc, name ) => { acc[ name ] = path.join( __dirname, 'node_modules', name ) return acc }, {} ) }, transformer: { ...defaultConfig.transformer, getTransformOptions: async () => ( { transform: { experimentalImportSupport: false, inlineRequires: true } } ) } } ================================================ FILE: example/package.json ================================================ { "name": "face-detector-example", "version": "1.0.0", "private": true, "scripts": { "pods": "pod-install --quiet", "lint": "yarn test && eslint --quiet --fix --ext .js,.ts,.tsx,.jsx .", "test": "tsc", "prebuild": "npx expo prebuild", "prebuild:clean": "npx expo prebuild --clean", "android": "yarn prebuild && npx expo run:android -d", "android:prod": "yarn prebuild && npx expo run:android -d --variant release", "ios": "yarn prebuild && npx expo run:ios -d", "ios:prod": "yarn prebuild && npx expo run:ios -d --configuration Release", "start": "expo start --dev-client" }, "main": "index.js", "dependencies": { "@react-native-community/hooks": "^100.1.0", "@react-native-firebase/app": "^22.2.1", "@react-native-firebase/messaging": "^22.2.1", "@react-navigation/native": "^7.1.10", "@shopify/react-native-skia": "2.2.19", "expo": "^53", "expo-application": "~6.1.5", "expo-build-properties": "~0.14.8", "expo-dev-client": "~5.2.4", "expo-image-picker": "~16.1.4", "react": "19.0.0", "react-native": "../node_modules/react-native", "react-native-reanimated": "~3.17.4", "react-native-safe-area-context": "5.4.0", "react-native-vision-camera": "../node_modules/react-native-vision-camera", "react-native-vision-camera-face-detector": "link:../", "react-native-worklets-core": "../node_modules/react-native-worklets-core" }, "devDependencies": { "@babel/core": "^7.28.4", "@babel/preset-env": "^7.28.3", "@babel/runtime": "^7.28.3", "@types/react": "~19.0.10", "babel-plugin-module-resolver": "^5.0.2", "eslint": "../node_modules/eslint", "metro-react-native-babel-preset": "^0.77.0", "pod-install": "^0.3.7", "typescript": "~5.8.3" } } ================================================ FILE: example/src/index.tsx ================================================ import React, { ReactNode, useEffect, useRef, useState } from 'react' import { StyleSheet, Text, Button, View, useWindowDimensions } from 'react-native' import { CameraPosition, DrawableFrame, Frame, Camera as VisionCamera, useCameraDevice, useCameraPermission } from 'react-native-vision-camera' import { launchImageLibraryAsync } from 'expo-image-picker' import { useIsFocused } from '@react-navigation/core' import { useAppState } from '@react-native-community/hooks' import { SafeAreaProvider } from 'react-native-safe-area-context' import { NavigationContainer } from '@react-navigation/native' import { Face, Camera, Contours, Landmarks, detectFaces, FrameFaceDetectionOptions } from 'react-native-vision-camera-face-detector' import { ClipOp, Skia, TileMode } from '@shopify/react-native-skia' import Animated, { useAnimatedStyle, useSharedValue, withTiming } from 'react-native-reanimated' /** * Entry point component * * @return {ReactNode} Component */ function Index(): ReactNode { return ( ) } /** * Face detection component * * @return {ReactNode} Component */ function FaceDetection(): ReactNode { const { width, height } = useWindowDimensions() const { hasPermission, requestPermission } = useCameraPermission() const [ cameraMounted, setCameraMounted ] = useState( false ) const [ cameraPaused, setCameraPaused ] = useState( false ) const [ autoMode, setAutoMode ] = useState( true ) const [ cameraFacing, setCameraFacing ] = useState( 'front' ) const faceDetectionOptions = useRef( { performanceMode: 'fast', classificationMode: 'all', contourMode: 'all', landmarkMode: 'all', windowWidth: width, windowHeight: height } ).current const isFocused = useIsFocused() const appState = useAppState() const isCameraActive = ( !cameraPaused && isFocused && appState === 'active' ) const cameraDevice = useCameraDevice( cameraFacing ) // // vision camera ref // const camera = useRef( null ) // // face rectangle position // const aFaceW = useSharedValue( 0 ) const aFaceH = useSharedValue( 0 ) const aFaceX = useSharedValue( 0 ) const aFaceY = useSharedValue( 0 ) const aRot = useSharedValue( 0 ) const boundingBoxStyle = useAnimatedStyle( () => ( { position: 'absolute', borderWidth: 4, borderLeftColor: 'rgb(0,255,0)', borderRightColor: 'rgb(0,255,0)', borderBottomColor: 'rgb(0,255,0)', borderTopColor: 'rgb(255,0,0)', width: withTiming( aFaceW.value, { duration: 100 } ), height: withTiming( aFaceH.value, { duration: 100 } ), left: withTiming( aFaceX.value, { duration: 100 } ), top: withTiming( aFaceY.value, { duration: 100 } ), transform: [ { rotate: `${ aRot.value }deg` } ] } ) ) useEffect( () => { if ( hasPermission ) return requestPermission() }, [] ) /** * Handle camera UI rotation * * @param {number} rotation Camera rotation */ function handleUiRotation( rotation: number ) { aRot.value = rotation } /** * Hanldes camera mount error event * * @param {any} error Error event */ function handleCameraMountError( error: any ) { console.error( 'camera mount error', error ) } /** * Handle detection result * * @param {Face[]} faces Detection result * @param {Frame} frame Current frame * @returns {void} */ function handleFacesDetected( faces: Face[], frame: Frame ): void { // if no faces are detected we do nothing if ( faces.length <= 0 ) { aFaceW.value = 0 aFaceH.value = 0 aFaceX.value = 0 aFaceY.value = 0 return } console.log( 'faces', faces.length, 'frame', frame.toString(), 'faces', JSON.stringify( faces ) ) const { bounds } = faces[ 0 ] const { width, height, x, y } = bounds aFaceW.value = width aFaceH.value = height aFaceX.value = x aFaceY.value = y // only call camera methods if ref is defined if ( camera.current ) { // take photo, capture video, etc... } } /** * Handle skia frame actions * * @param {Face[]} faces Detection result * @param {DrawableFrame} frame Current frame * @returns {void} */ function handleSkiaActions( faces: Face[], frame: DrawableFrame ): void { 'worklet' // if no faces are detected we do nothing if ( faces.length <= 0 ) return console.log( 'SKIA - faces', faces.length, 'frame', frame.toString() ) const { bounds, contours, landmarks } = faces[ 0 ] // draw a blur shape around the face points const blurRadius = 25 const blurFilter = Skia.ImageFilter.MakeBlur( blurRadius, blurRadius, TileMode.Repeat, null ) const blurPaint = Skia.Paint() blurPaint.setImageFilter( blurFilter ) const contourPath = Skia.Path.Make() const necessaryContours: ( keyof Contours )[] = [ 'FACE', 'LEFT_CHEEK', 'RIGHT_CHEEK' ] necessaryContours.map( ( key ) => { contours?.[ key ]?.map( ( point, index ) => { if ( index === 0 ) { // it's a starting point contourPath.moveTo( point.x, point.y ) } else { // it's a continuation contourPath.lineTo( point.x, point.y ) } } ) contourPath.close() } ) frame.save() frame.clipPath( contourPath, ClipOp.Intersect, true ) frame.render( blurPaint ) frame.restore() // draw mouth shape const mouthPath = Skia.Path.Make() const mouthPaint = Skia.Paint() mouthPaint.setColor( Skia.Color( 'red' ) ) const necessaryLandmarks: ( keyof Landmarks )[] = [ 'MOUTH_BOTTOM', 'MOUTH_LEFT', 'MOUTH_RIGHT' ] necessaryLandmarks.map( ( key, index ) => { const point = landmarks?.[ key ] if ( !point ) return if ( index === 0 ) { // it's a starting point mouthPath.moveTo( point.x, point.y ) } else { // it's a continuation mouthPath.lineTo( point.x, point.y ) } } ) mouthPath.close() frame.drawPath( mouthPath, mouthPaint ) // draw a rectangle around the face const rectPaint = Skia.Paint() rectPaint.setColor( Skia.Color( 'blue' ) ) rectPaint.setStyle( 1 ) rectPaint.setStrokeWidth( 5 ) frame.drawRect( bounds, rectPaint ) } /** * Detect faces from image * * @returns {Promise} Promise */ async function detectFacesFromImage(): Promise { // No permissions request is necessary for launching the image library let result = await launchImageLibraryAsync( { mediaTypes: [ 'images' ], allowsEditing: true, aspect: [ 4, 3 ], quality: 1 } ) if ( result.canceled ) return const faces = await detectFaces( { image: result.assets[ 0 ].uri } ) console.log( 'image detected faces', faces ) } /** * Detect faces from photo * * @returns {Promise} Promise */ async function detectFacesFromPhoto(): Promise { if ( !camera.current ) return // take snapshot is faster than take photo // but it does not process captured image const { path } = await camera.current?.takeSnapshot() const faces = await detectFaces( { image: `file://${ path }` } ) console.log( 'photo detected faces', faces ) } return ( <> { hasPermission && cameraDevice ? <> { cameraMounted && <> { cameraPaused && Camera is PAUSED } } { !cameraMounted && Camera is NOT mounted } : No camera device or permission }