diff --git a/.cspell-wordlist.txt b/.cspell-wordlist.txt
index 2e5092801..f9ee818fb 100644
--- a/.cspell-wordlist.txt
+++ b/.cspell-wordlist.txt
@@ -95,6 +95,7 @@ Português
codegen
cstdint
ocurred
+RNFS
libfbjni
libc
gradlew
@@ -104,3 +105,4 @@ POTTEDPLANT
TVMONITOR
sublist
TTFT
+worklet
\ No newline at end of file
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 7989ebb5d..dd9585bc2 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -24,7 +24,7 @@ jobs:
run: yarn lint
- name: Typecheck files
- run: yarn typecheck
+ run: yarn workspaces foreach --all --topological-dev run prepare && yarn typecheck
build-library:
runs-on: ubuntu-latest
@@ -35,7 +35,5 @@ jobs:
- name: Setup
uses: ./.github/actions/setup
- - name: Build package
- run: |
- cd packages/react-native-executorch
- yarn prepare
+ - name: Build all packages
+ run: yarn workspaces foreach --all --topological-dev run prepare
diff --git a/.nvmrc b/.nvmrc
index 9a2a0e219..53d1c14db 100644
--- a/.nvmrc
+++ b/.nvmrc
@@ -1 +1 @@
-v20
+v22
diff --git a/apps/computer-vision/app.json b/apps/computer-vision/app.json
index 4d68c039b..65b8d1081 100644
--- a/apps/computer-vision/app.json
+++ b/apps/computer-vision/app.json
@@ -18,7 +18,8 @@
"bundleIdentifier": "com.anonymous.computervision",
"infoPlist": {
"NSCameraUsageDescription": "Process photo from camera"
- }
+ },
+ "appleTeamId": "B357MU264T"
},
"android": {
"adaptiveIcon": {
@@ -30,6 +31,17 @@
"web": {
"favicon": "./assets/icons/favicon.png"
},
- "plugins": ["expo-font", "expo-router"]
+ "plugins": [
+ "expo-font",
+ "expo-router",
+ [
+ "expo-build-properties",
+ {
+ "android": {
+ "minSdkVersion": 26
+ }
+ }
+ ]
+ ]
}
}
diff --git a/apps/computer-vision/app/_layout.tsx b/apps/computer-vision/app/_layout.tsx
index 5914d2fe8..35fba7fb1 100644
--- a/apps/computer-vision/app/_layout.tsx
+++ b/apps/computer-vision/app/_layout.tsx
@@ -1,4 +1,7 @@
import { Drawer } from 'expo-router/drawer';
+import { initExecutorch } from 'react-native-executorch';
+import { ExpoResourceFetcher } from '@react-native-executorch/expo-resource-fetcher';
+
import ColorPalette from '../colors';
import React, { useState } from 'react';
import { Text, StyleSheet, View } from 'react-native';
@@ -10,6 +13,10 @@ import {
} from '@react-navigation/drawer';
import { GeneratingContext } from '../context';
+initExecutorch({
+ resourceFetcher: ExpoResourceFetcher,
+});
+
interface CustomDrawerProps extends DrawerContentComponentProps {
isGenerating: boolean;
}
diff --git a/apps/computer-vision/app/camera_object_detection/index.tsx b/apps/computer-vision/app/camera_object_detection/index.tsx
new file mode 100644
index 000000000..74947bc80
--- /dev/null
+++ b/apps/computer-vision/app/camera_object_detection/index.tsx
@@ -0,0 +1,265 @@
+import React, { useEffect, useMemo, useState } from 'react';
+import {
+ View,
+ StyleSheet,
+ Text,
+ ActivityIndicator,
+ TouchableOpacity,
+} from 'react-native';
+import {
+ Camera,
+ useCameraDevices,
+ getCameraFormat,
+ Templates,
+ useFrameProcessor,
+ useCameraPermission,
+ type Frame,
+} from 'react-native-vision-camera';
+import { useResizePlugin } from 'vision-camera-resize-plugin';
+import {
+ ObjectDetectionModule,
+ SSDLITE_320_MOBILENET_V3_LARGE,
+} from 'react-native-executorch';
+import ScreenWrapper from '../../ScreenWrapper';
+import ColorPalette from '../../colors';
+
+export default function CameraObjectDetectionScreen() {
+ // Model state
+ const detectionModel = useMemo(() => new ObjectDetectionModule(), []);
+ const [isModelReady, setIsModelReady] = useState(false);
+
+ // Screen dimensions
+
+ // Camera setup - V5 API
+ const devices = useCameraDevices();
+ const device = devices.find((d) => d.position === 'back') ?? devices[0];
+
+ const format = useMemo(() => {
+ if (device == null) return undefined;
+ return getCameraFormat(device, Templates.Video);
+ }, [device]);
+
+ const { hasPermission, requestPermission } = useCameraPermission();
+
+ // Resize plugin for efficient frame scaling
+ const { resize } = useResizePlugin();
+
+ // Load model
+ useEffect(() => {
+ (async () => {
+ try {
+ await detectionModel.load(SSDLITE_320_MOBILENET_V3_LARGE);
+ setIsModelReady(true);
+ } catch (error) {
+ console.error('Failed to load model:', error);
+ }
+ })();
+
+ return () => {
+ detectionModel.delete();
+ };
+ }, [detectionModel]);
+
+ // Frame processing with Vision Camera v4
+ const frameProcessor = useFrameProcessor(
+ (frame: Frame) => {
+ 'worklet';
+
+ if (!isModelReady) {
+ return;
+ }
+
+ try {
+ // Use VisionCamera's resize plugin for better performance
+ const resized = resize(frame, {
+ scale: {
+ width: 640,
+ height: 640,
+ },
+ pixelFormat: 'rgb',
+ dataType: 'uint8',
+ });
+
+ // Prepare frame data for model
+ const frameData = {
+ data: resized.buffer,
+ width: 640,
+ height: 640,
+ };
+
+ // Run inference - generateFromFrame is JSI-bound for worklet compatibility
+ const result = detectionModel.generateFromFrame(frameData, 0.5);
+ console.log(result);
+ } catch (error: any) {
+ console.log(
+ 'Frame processing error:',
+ error?.message || 'Unknown error'
+ );
+ }
+ },
+ [isModelReady, detectionModel, resize]
+ );
+
+ // Loading state
+ if (!isModelReady) {
+ return (
+
+
+
+ Loading model...
+
+
+ );
+ }
+
+ // Permission request
+ if (!hasPermission) {
+ return (
+
+
+
+ Camera permission is required
+
+
+ Grant Permission
+
+
+
+ );
+ }
+
+ // No camera device
+ if (device == null) {
+ return (
+
+
+ No camera device found
+
+
+ );
+ }
+
+ return (
+
+
+ {/* Camera View */}
+
+
+
+ );
+}
+
+const styles = StyleSheet.create({
+ container: {
+ flex: 1,
+ },
+ statsContainer: {
+ position: 'absolute',
+ top: 20,
+ right: 20,
+ flexDirection: 'row',
+ gap: 12,
+ },
+ statBox: {
+ backgroundColor: 'rgba(0, 0, 0, 0.7)',
+ paddingHorizontal: 16,
+ paddingVertical: 8,
+ borderRadius: 12,
+ alignItems: 'center',
+ minWidth: 70,
+ },
+ statLabel: {
+ color: '#888',
+ fontSize: 11,
+ fontWeight: '600',
+ textTransform: 'uppercase',
+ },
+ statValue: {
+ color: '#fff',
+ fontSize: 24,
+ fontWeight: 'bold',
+ marginTop: 2,
+ },
+ detectionList: {
+ position: 'absolute',
+ bottom: 20,
+ left: 20,
+ right: 20,
+ gap: 8,
+ },
+ detectionItem: {
+ backgroundColor: 'rgba(0, 0, 0, 0.8)',
+ paddingHorizontal: 16,
+ paddingVertical: 12,
+ borderRadius: 12,
+ flexDirection: 'row',
+ justifyContent: 'space-between',
+ alignItems: 'center',
+ borderLeftWidth: 4,
+ },
+ detectionLabel: {
+ color: 'white',
+ fontSize: 16,
+ fontWeight: '600',
+ textTransform: 'capitalize',
+ },
+ detectionScore: {
+ color: '#4ECDC4',
+ fontSize: 16,
+ fontWeight: 'bold',
+ },
+ loadingContainer: {
+ flex: 1,
+ justifyContent: 'center',
+ alignItems: 'center',
+ },
+ loadingText: {
+ marginTop: 16,
+ fontSize: 16,
+ color: ColorPalette.strongPrimary,
+ },
+ errorContainer: {
+ flex: 1,
+ justifyContent: 'center',
+ alignItems: 'center',
+ padding: 20,
+ },
+ errorText: {
+ fontSize: 16,
+ color: '#d32f2f',
+ textAlign: 'center',
+ },
+ permissionContainer: {
+ flex: 1,
+ justifyContent: 'center',
+ alignItems: 'center',
+ padding: 20,
+ },
+ permissionText: {
+ fontSize: 18,
+ color: ColorPalette.strongPrimary,
+ marginBottom: 20,
+ textAlign: 'center',
+ },
+ permissionButton: {
+ backgroundColor: ColorPalette.strongPrimary,
+ paddingHorizontal: 24,
+ paddingVertical: 12,
+ borderRadius: 8,
+ },
+ permissionButtonText: {
+ color: 'white',
+ fontSize: 16,
+ fontWeight: 'bold',
+ },
+});
diff --git a/apps/computer-vision/app/index.tsx b/apps/computer-vision/app/index.tsx
index 38a77fc27..e35f0ef48 100644
--- a/apps/computer-vision/app/index.tsx
+++ b/apps/computer-vision/app/index.tsx
@@ -53,6 +53,12 @@ export default function Home() {
>
Image Generation
+ router.navigate('camera_object_detection/')}
+ >
+ 🎥 Camera Object Detection
+
);
@@ -92,6 +98,12 @@ const styles = StyleSheet.create({
alignItems: 'center',
marginBottom: 10,
},
+ cameraButton: {
+ backgroundColor: '#2563eb',
+ },
+ testButton: {
+ backgroundColor: '#10b981',
+ },
buttonText: {
color: 'white',
fontSize: fontSizes.md,
diff --git a/apps/computer-vision/app/object_detection/index.tsx b/apps/computer-vision/app/object_detection/index.tsx
index 6a43dd920..9e60589fb 100644
--- a/apps/computer-vision/app/object_detection/index.tsx
+++ b/apps/computer-vision/app/object_detection/index.tsx
@@ -1,16 +1,66 @@
import Spinner from '../../components/Spinner';
-import { BottomBar } from '../../components/BottomBar';
import { getImage } from '../../utils';
import {
Detection,
useObjectDetection,
SSDLITE_320_MOBILENET_V3_LARGE,
} from 'react-native-executorch';
-import { View, StyleSheet, Image } from 'react-native';
+import { View, StyleSheet, Image, TouchableOpacity, Text } from 'react-native';
import ImageWithBboxes from '../../components/ImageWithBboxes';
import React, { useContext, useEffect, useState } from 'react';
import { GeneratingContext } from '../../context';
import ScreenWrapper from '../../ScreenWrapper';
+import ColorPalette from '../../colors';
+import { Images } from 'react-native-nitro-image';
+
+// Helper function to convert image URI to raw pixel data using NitroImage
+async function imageUriToPixelData(
+ uri: string,
+ targetWidth: number,
+ targetHeight: number
+): Promise<{
+ data: ArrayBuffer;
+ width: number;
+ height: number;
+ channels: number;
+}> {
+ try {
+ // Load image and resize to target dimensions
+ const image = await Images.loadFromFileAsync(uri);
+ const resized = image.resize(targetWidth, targetHeight);
+
+ // Get pixel data as ArrayBuffer (RGBA format)
+ const pixelData = resized.toRawPixelData();
+ const buffer =
+ pixelData instanceof ArrayBuffer ? pixelData : pixelData.buffer;
+
+ // Calculate actual buffer dimensions (accounts for device pixel ratio)
+ const bufferSize = buffer?.byteLength || 0;
+ const totalPixels = bufferSize / 4; // RGBA = 4 bytes per pixel
+ const aspectRatio = targetWidth / targetHeight;
+ const actualHeight = Math.sqrt(totalPixels / aspectRatio);
+ const actualWidth = totalPixels / actualHeight;
+
+ console.log('Requested:', targetWidth, 'x', targetHeight);
+ console.log('Buffer size:', bufferSize);
+ console.log(
+ 'Actual dimensions:',
+ Math.round(actualWidth),
+ 'x',
+ Math.round(actualHeight)
+ );
+
+ return {
+ data: buffer,
+ width: Math.round(actualWidth),
+ height: Math.round(actualHeight),
+ channels: 4, // RGBA
+ };
+ } catch (error) {
+ console.error('Error loading image with NitroImage:', error);
+ throw error;
+ }
+}
export default function ObjectDetectionScreen() {
const [imageUri, setImageUri] = useState('');
@@ -42,10 +92,41 @@ export default function ObjectDetectionScreen() {
const runForward = async () => {
if (imageUri) {
try {
- const output = await ssdLite.forward(imageUri);
+ console.log('Running forward with string URI...');
+ const output = await ssdLite.forward(imageUri, 0.5);
+ console.log('String URI result:', output.length, 'detections');
setResults(output);
} catch (e) {
- console.error(e);
+ console.error('Error in runForward:', e);
+ }
+ }
+ };
+
+ const runForwardPixels = async () => {
+ if (imageUri && imageDimensions) {
+ try {
+ console.log('Converting image to pixel data...');
+ // Resize to 640x640 to avoid memory issues
+ const intermediateSize = 640;
+ const pixelData = await imageUriToPixelData(
+ imageUri,
+ intermediateSize,
+ intermediateSize
+ );
+
+ console.log('Running forward with pixel data...', {
+ width: pixelData.width,
+ height: pixelData.height,
+ channels: pixelData.channels,
+ dataSize: pixelData.data.byteLength,
+ });
+
+ // Run inference using unified forward() API
+ const output = await ssdLite.forward(pixelData, 0.5);
+ console.log('Pixel data result:', output.length, 'detections');
+ setResults(output);
+ } catch (e) {
+ console.error('Error in runForwardPixels:', e);
}
}
};
@@ -81,10 +162,41 @@ export default function ObjectDetectionScreen() {
)}
-
+
+ {/* Custom bottom bar with two buttons */}
+
+
+ handleCameraPress(false)}>
+ 📷 Gallery
+
+
+
+
+
+ Run (String)
+
+
+
+ Run (Pixels)
+
+
+
);
}
@@ -129,4 +241,43 @@ const styles = StyleSheet.create({
width: '100%',
height: '100%',
},
+ bottomContainer: {
+ width: '100%',
+ gap: 15,
+ alignItems: 'center',
+ padding: 16,
+ flex: 1,
+ },
+ bottomIconsContainer: {
+ flexDirection: 'row',
+ justifyContent: 'center',
+ width: '100%',
+ },
+ iconText: {
+ fontSize: 16,
+ color: ColorPalette.primary,
+ },
+ buttonsRow: {
+ flexDirection: 'row',
+ width: '100%',
+ gap: 10,
+ },
+ button: {
+ height: 50,
+ justifyContent: 'center',
+ alignItems: 'center',
+ backgroundColor: ColorPalette.primary,
+ color: '#fff',
+ borderRadius: 8,
+ },
+ halfButton: {
+ flex: 1,
+ },
+ buttonDisabled: {
+ opacity: 0.5,
+ },
+ buttonText: {
+ color: '#fff',
+ fontSize: 16,
+ },
});
diff --git a/apps/computer-vision/package.json b/apps/computer-vision/package.json
index 63885109a..6fe0ffe97 100644
--- a/apps/computer-vision/package.json
+++ b/apps/computer-vision/package.json
@@ -11,17 +11,19 @@
"lint": "eslint . --ext .ts,.tsx --fix"
},
"dependencies": {
- "@react-native/metro-config": "^0.76.3",
+ "@react-native-executorch/expo-resource-fetcher": "workspace:*",
+ "@react-native/metro-config": "^0.81.5",
"@react-navigation/drawer": "^7.3.9",
"@react-navigation/native": "^7.1.6",
"@shopify/react-native-skia": "2.2.12",
"expo": "^54.0.27",
+ "expo-build-properties": "~1.0.10",
"expo-constants": "~18.0.11",
"expo-font": "~14.0.10",
"expo-linking": "~8.0.10",
"expo-router": "~6.0.17",
"expo-status-bar": "~3.0.9",
- "metro-config": "^0.81.0",
+ "metro-config": "^0.81.5",
"react": "19.1.0",
"react-native": "0.81.5",
"react-native-device-info": "^14.0.4",
@@ -29,17 +31,20 @@
"react-native-gesture-handler": "~2.28.0",
"react-native-image-picker": "^7.2.2",
"react-native-loading-spinner-overlay": "^3.0.1",
- "react-native-reanimated": "~4.1.1",
+ "react-native-reanimated": "~4.2.1",
"react-native-safe-area-context": "~5.6.0",
"react-native-screens": "~4.16.0",
"react-native-svg": "15.12.1",
"react-native-svg-transformer": "^1.5.0",
- "react-native-worklets": "0.5.1"
+ "react-native-vision-camera": "4.7.3",
+ "react-native-worklets": "^0.7.2",
+ "react-native-worklets-core": "^1.6.2",
+ "vision-camera-resize-plugin": "^3.2.0"
},
"devDependencies": {
"@babel/core": "^7.25.2",
"@types/pngjs": "^6.0.5",
- "@types/react": "~19.1.10"
+ "@types/react": "~19.2.0"
},
"private": true
}
diff --git a/apps/computer-vision/tsconfig.json b/apps/computer-vision/tsconfig.json
index 47026ce43..a08f2140a 100644
--- a/apps/computer-vision/tsconfig.json
+++ b/apps/computer-vision/tsconfig.json
@@ -9,7 +9,10 @@
"customConditions": ["react-native"],
"noEmit": true,
"paths": {
- "react-native-executorch": ["../../packages/react-native-executorch/src"]
+ "react-native-executorch": ["../../packages/react-native-executorch/src"],
+ "@react-native-executorch/expo-resource-fetcher": [
+ "../../packages/expo-resource-fetcher/src"
+ ]
}
}
}
diff --git a/apps/llm/app/_layout.tsx b/apps/llm/app/_layout.tsx
index 68c715a80..5ece80f1f 100644
--- a/apps/llm/app/_layout.tsx
+++ b/apps/llm/app/_layout.tsx
@@ -1,8 +1,9 @@
import { Drawer } from 'expo-router/drawer';
+import { initExecutorch } from 'react-native-executorch';
+import { ExpoResourceFetcher } from '@react-native-executorch/expo-resource-fetcher';
import ColorPalette from '../colors';
import React, { useState } from 'react';
import { Text, StyleSheet, View } from 'react-native';
-
import {
DrawerContentComponentProps,
DrawerContentScrollView,
@@ -10,6 +11,10 @@ import {
} from '@react-navigation/drawer';
import { GeneratingContext } from '../context';
+initExecutorch({
+ resourceFetcher: ExpoResourceFetcher,
+});
+
interface CustomDrawerProps extends DrawerContentComponentProps {
isGenerating: boolean;
}
diff --git a/apps/llm/package.json b/apps/llm/package.json
index de046a299..04597d963 100644
--- a/apps/llm/package.json
+++ b/apps/llm/package.json
@@ -11,7 +11,8 @@
"lint": "eslint . --ext .ts,.tsx --fix"
},
"dependencies": {
- "@react-native/metro-config": "^0.76.3",
+ "@react-native-executorch/expo-resource-fetcher": "workspace:*",
+ "@react-native/metro-config": "^0.81.5",
"@react-navigation/drawer": "^7.3.9",
"@react-navigation/native": "^7.1.6",
"expo": "^54.0.27",
@@ -22,7 +23,7 @@
"expo-linking": "~8.0.10",
"expo-router": "~6.0.17",
"expo-status-bar": "~3.0.9",
- "metro-config": "^0.81.0",
+ "metro-config": "^0.81.5",
"react": "19.1.0",
"react-native": "0.81.5",
"react-native-audio-api": "^0.8.2",
diff --git a/apps/llm/tsconfig.json b/apps/llm/tsconfig.json
index 47026ce43..a08f2140a 100644
--- a/apps/llm/tsconfig.json
+++ b/apps/llm/tsconfig.json
@@ -9,7 +9,10 @@
"customConditions": ["react-native"],
"noEmit": true,
"paths": {
- "react-native-executorch": ["../../packages/react-native-executorch/src"]
+ "react-native-executorch": ["../../packages/react-native-executorch/src"],
+ "@react-native-executorch/expo-resource-fetcher": [
+ "../../packages/expo-resource-fetcher/src"
+ ]
}
}
}
diff --git a/apps/speech/App.tsx b/apps/speech/App.tsx
index af0598b59..ab036678e 100644
--- a/apps/speech/App.tsx
+++ b/apps/speech/App.tsx
@@ -5,6 +5,12 @@ import { SpeechToTextScreen } from './screens/SpeechToTextScreen';
import ColorPalette from './colors';
import ExecutorchLogo from './assets/executorch.svg';
import { Quiz } from './screens/Quiz';
+import { initExecutorch } from 'react-native-executorch';
+import { ExpoResourceFetcher } from '@react-native-executorch/expo-resource-fetcher';
+
+initExecutorch({
+ resourceFetcher: ExpoResourceFetcher,
+});
export default function App() {
const [currentScreen, setCurrentScreen] = useState<
diff --git a/apps/speech/package.json b/apps/speech/package.json
index 094fa2b78..e6047a385 100644
--- a/apps/speech/package.json
+++ b/apps/speech/package.json
@@ -11,12 +11,13 @@
"lint": "eslint . --ext .ts,.tsx --fix"
},
"dependencies": {
- "@react-native/metro-config": "^0.76.3",
+ "@react-native-executorch/expo-resource-fetcher": "workspace:*",
+ "@react-native/metro-config": "^0.81.5",
"buffer": "^6.0.3",
"expo": "^54.0.27",
"expo-font": "~14.0.10",
"expo-status-bar": "~3.0.9",
- "metro-config": "^0.81.0",
+ "metro-config": "^0.81.5",
"react": "19.1.0",
"react-native": "0.81.5",
"react-native-audio-api": "0.6.5",
diff --git a/apps/speech/tsconfig.json b/apps/speech/tsconfig.json
index 47026ce43..a08f2140a 100644
--- a/apps/speech/tsconfig.json
+++ b/apps/speech/tsconfig.json
@@ -9,7 +9,10 @@
"customConditions": ["react-native"],
"noEmit": true,
"paths": {
- "react-native-executorch": ["../../packages/react-native-executorch/src"]
+ "react-native-executorch": ["../../packages/react-native-executorch/src"],
+ "@react-native-executorch/expo-resource-fetcher": [
+ "../../packages/expo-resource-fetcher/src"
+ ]
}
}
}
diff --git a/apps/text-embeddings/app/_layout.tsx b/apps/text-embeddings/app/_layout.tsx
index 16bf0e87a..c0633a993 100644
--- a/apps/text-embeddings/app/_layout.tsx
+++ b/apps/text-embeddings/app/_layout.tsx
@@ -1,4 +1,6 @@
import { Drawer } from 'expo-router/drawer';
+import { initExecutorch } from 'react-native-executorch';
+import { ExpoResourceFetcher } from '@react-native-executorch/expo-resource-fetcher';
import ColorPalette from '../colors';
import React, { useState } from 'react';
import { Text, StyleSheet, View } from 'react-native';
@@ -10,6 +12,10 @@ import {
} from '@react-navigation/drawer';
import { GeneratingContext } from '../context';
+initExecutorch({
+ resourceFetcher: ExpoResourceFetcher,
+});
+
interface CustomDrawerProps extends DrawerContentComponentProps {
isGenerating: boolean;
}
diff --git a/apps/text-embeddings/package.json b/apps/text-embeddings/package.json
index cbf0da96c..64093b919 100644
--- a/apps/text-embeddings/package.json
+++ b/apps/text-embeddings/package.json
@@ -14,6 +14,7 @@
"@react-navigation/native": "*"
},
"dependencies": {
+ "@react-native-executorch/expo-resource-fetcher": "workspace:*",
"@react-navigation/drawer": "^7.3.9",
"expo": "^54.0.27",
"expo-constants": "~18.0.11",
diff --git a/apps/text-embeddings/tsconfig.json b/apps/text-embeddings/tsconfig.json
index 47026ce43..a08f2140a 100644
--- a/apps/text-embeddings/tsconfig.json
+++ b/apps/text-embeddings/tsconfig.json
@@ -9,7 +9,10 @@
"customConditions": ["react-native"],
"noEmit": true,
"paths": {
- "react-native-executorch": ["../../packages/react-native-executorch/src"]
+ "react-native-executorch": ["../../packages/react-native-executorch/src"],
+ "@react-native-executorch/expo-resource-fetcher": [
+ "../../packages/expo-resource-fetcher/src"
+ ]
}
}
}
diff --git a/docs/docs/06-api-reference/index.md b/docs/docs/06-api-reference/index.md
index 1415421de..debc55540 100644
--- a/docs/docs/06-api-reference/index.md
+++ b/docs/docs/06-api-reference/index.md
@@ -20,159 +20,159 @@
## Models - Classification
-- [EFFICIENTNET\_V2\_S](variables/EFFICIENTNET_V2_S.md)
+- [EFFICIENTNET_V2_S](variables/EFFICIENTNET_V2_S.md)
## Models - Image Embeddings
-- [CLIP\_VIT\_BASE\_PATCH32\_IMAGE](variables/CLIP_VIT_BASE_PATCH32_IMAGE.md)
+- [CLIP_VIT_BASE_PATCH32_IMAGE](variables/CLIP_VIT_BASE_PATCH32_IMAGE.md)
## Models - Image Generation
-- [BK\_SDM\_TINY\_VPRED\_256](variables/BK_SDM_TINY_VPRED_256.md)
-- [BK\_SDM\_TINY\_VPRED\_512](variables/BK_SDM_TINY_VPRED_512.md)
+- [BK_SDM_TINY_VPRED_256](variables/BK_SDM_TINY_VPRED_256.md)
+- [BK_SDM_TINY_VPRED_512](variables/BK_SDM_TINY_VPRED_512.md)
## Models - Image Segmentation
-- [DEEPLAB\_V3\_RESNET50](variables/DEEPLAB_V3_RESNET50.md)
+- [DEEPLAB_V3_RESNET50](variables/DEEPLAB_V3_RESNET50.md)
## Models - LMM
-- [HAMMER2\_1\_0\_5B](variables/HAMMER2_1_0_5B.md)
-- [HAMMER2\_1\_0\_5B\_QUANTIZED](variables/HAMMER2_1_0_5B_QUANTIZED.md)
-- [HAMMER2\_1\_1\_5B](variables/HAMMER2_1_1_5B.md)
-- [HAMMER2\_1\_1\_5B\_QUANTIZED](variables/HAMMER2_1_1_5B_QUANTIZED.md)
-- [HAMMER2\_1\_3B](variables/HAMMER2_1_3B.md)
-- [HAMMER2\_1\_3B\_QUANTIZED](variables/HAMMER2_1_3B_QUANTIZED.md)
-- [LLAMA3\_2\_1B](variables/LLAMA3_2_1B.md)
-- [LLAMA3\_2\_1B\_QLORA](variables/LLAMA3_2_1B_QLORA.md)
-- [LLAMA3\_2\_1B\_SPINQUANT](variables/LLAMA3_2_1B_SPINQUANT.md)
-- [LLAMA3\_2\_3B](variables/LLAMA3_2_3B.md)
-- [LLAMA3\_2\_3B\_QLORA](variables/LLAMA3_2_3B_QLORA.md)
-- [LLAMA3\_2\_3B\_SPINQUANT](variables/LLAMA3_2_3B_SPINQUANT.md)
-- [PHI\_4\_MINI\_4B](variables/PHI_4_MINI_4B.md)
-- [PHI\_4\_MINI\_4B\_QUANTIZED](variables/PHI_4_MINI_4B_QUANTIZED.md)
-- [QWEN2\_5\_0\_5B](variables/QWEN2_5_0_5B.md)
-- [QWEN2\_5\_0\_5B\_QUANTIZED](variables/QWEN2_5_0_5B_QUANTIZED.md)
-- [QWEN2\_5\_1\_5B](variables/QWEN2_5_1_5B.md)
-- [QWEN2\_5\_1\_5B\_QUANTIZED](variables/QWEN2_5_1_5B_QUANTIZED.md)
-- [QWEN2\_5\_3B](variables/QWEN2_5_3B.md)
-- [QWEN2\_5\_3B\_QUANTIZED](variables/QWEN2_5_3B_QUANTIZED.md)
-- [QWEN3\_0\_6B](variables/QWEN3_0_6B.md)
-- [QWEN3\_0\_6B\_QUANTIZED](variables/QWEN3_0_6B_QUANTIZED.md)
-- [QWEN3\_1\_7B](variables/QWEN3_1_7B.md)
-- [QWEN3\_1\_7B\_QUANTIZED](variables/QWEN3_1_7B_QUANTIZED.md)
-- [QWEN3\_4B](variables/QWEN3_4B.md)
-- [QWEN3\_4B\_QUANTIZED](variables/QWEN3_4B_QUANTIZED.md)
-- [SMOLLM2\_1\_1\_7B](variables/SMOLLM2_1_1_7B.md)
-- [SMOLLM2\_1\_1\_7B\_QUANTIZED](variables/SMOLLM2_1_1_7B_QUANTIZED.md)
-- [SMOLLM2\_1\_135M](variables/SMOLLM2_1_135M.md)
-- [SMOLLM2\_1\_135M\_QUANTIZED](variables/SMOLLM2_1_135M_QUANTIZED.md)
-- [SMOLLM2\_1\_360M](variables/SMOLLM2_1_360M.md)
-- [SMOLLM2\_1\_360M\_QUANTIZED](variables/SMOLLM2_1_360M_QUANTIZED.md)
+- [HAMMER2_1_0_5B](variables/HAMMER2_1_0_5B.md)
+- [HAMMER2_1_0_5B_QUANTIZED](variables/HAMMER2_1_0_5B_QUANTIZED.md)
+- [HAMMER2_1_1_5B](variables/HAMMER2_1_1_5B.md)
+- [HAMMER2_1_1_5B_QUANTIZED](variables/HAMMER2_1_1_5B_QUANTIZED.md)
+- [HAMMER2_1_3B](variables/HAMMER2_1_3B.md)
+- [HAMMER2_1_3B_QUANTIZED](variables/HAMMER2_1_3B_QUANTIZED.md)
+- [LLAMA3_2_1B](variables/LLAMA3_2_1B.md)
+- [LLAMA3_2_1B_QLORA](variables/LLAMA3_2_1B_QLORA.md)
+- [LLAMA3_2_1B_SPINQUANT](variables/LLAMA3_2_1B_SPINQUANT.md)
+- [LLAMA3_2_3B](variables/LLAMA3_2_3B.md)
+- [LLAMA3_2_3B_QLORA](variables/LLAMA3_2_3B_QLORA.md)
+- [LLAMA3_2_3B_SPINQUANT](variables/LLAMA3_2_3B_SPINQUANT.md)
+- [PHI_4_MINI_4B](variables/PHI_4_MINI_4B.md)
+- [PHI_4_MINI_4B_QUANTIZED](variables/PHI_4_MINI_4B_QUANTIZED.md)
+- [QWEN2_5_0_5B](variables/QWEN2_5_0_5B.md)
+- [QWEN2_5_0_5B_QUANTIZED](variables/QWEN2_5_0_5B_QUANTIZED.md)
+- [QWEN2_5_1_5B](variables/QWEN2_5_1_5B.md)
+- [QWEN2_5_1_5B_QUANTIZED](variables/QWEN2_5_1_5B_QUANTIZED.md)
+- [QWEN2_5_3B](variables/QWEN2_5_3B.md)
+- [QWEN2_5_3B_QUANTIZED](variables/QWEN2_5_3B_QUANTIZED.md)
+- [QWEN3_0_6B](variables/QWEN3_0_6B.md)
+- [QWEN3_0_6B_QUANTIZED](variables/QWEN3_0_6B_QUANTIZED.md)
+- [QWEN3_1_7B](variables/QWEN3_1_7B.md)
+- [QWEN3_1_7B_QUANTIZED](variables/QWEN3_1_7B_QUANTIZED.md)
+- [QWEN3_4B](variables/QWEN3_4B.md)
+- [QWEN3_4B_QUANTIZED](variables/QWEN3_4B_QUANTIZED.md)
+- [SMOLLM2_1_1_7B](variables/SMOLLM2_1_1_7B.md)
+- [SMOLLM2_1_1_7B_QUANTIZED](variables/SMOLLM2_1_1_7B_QUANTIZED.md)
+- [SMOLLM2_1_135M](variables/SMOLLM2_1_135M.md)
+- [SMOLLM2_1_135M_QUANTIZED](variables/SMOLLM2_1_135M_QUANTIZED.md)
+- [SMOLLM2_1_360M](variables/SMOLLM2_1_360M.md)
+- [SMOLLM2_1_360M_QUANTIZED](variables/SMOLLM2_1_360M_QUANTIZED.md)
## Models - Object Detection
-- [SSDLITE\_320\_MOBILENET\_V3\_LARGE](variables/SSDLITE_320_MOBILENET_V3_LARGE.md)
+- [SSDLITE_320_MOBILENET_V3_LARGE](variables/SSDLITE_320_MOBILENET_V3_LARGE.md)
## Models - Speech To Text
-- [WHISPER\_BASE](variables/WHISPER_BASE.md)
-- [WHISPER\_BASE\_EN](variables/WHISPER_BASE_EN.md)
-- [WHISPER\_SMALL](variables/WHISPER_SMALL.md)
-- [WHISPER\_SMALL\_EN](variables/WHISPER_SMALL_EN.md)
-- [WHISPER\_TINY](variables/WHISPER_TINY.md)
-- [WHISPER\_TINY\_EN](variables/WHISPER_TINY_EN.md)
-- [WHISPER\_TINY\_EN\_QUANTIZED](variables/WHISPER_TINY_EN_QUANTIZED.md)
+- [WHISPER_BASE](variables/WHISPER_BASE.md)
+- [WHISPER_BASE_EN](variables/WHISPER_BASE_EN.md)
+- [WHISPER_SMALL](variables/WHISPER_SMALL.md)
+- [WHISPER_SMALL_EN](variables/WHISPER_SMALL_EN.md)
+- [WHISPER_TINY](variables/WHISPER_TINY.md)
+- [WHISPER_TINY_EN](variables/WHISPER_TINY_EN.md)
+- [WHISPER_TINY_EN_QUANTIZED](variables/WHISPER_TINY_EN_QUANTIZED.md)
## Models - Style Transfer
-- [STYLE\_TRANSFER\_CANDY](variables/STYLE_TRANSFER_CANDY.md)
-- [STYLE\_TRANSFER\_MOSAIC](variables/STYLE_TRANSFER_MOSAIC.md)
-- [STYLE\_TRANSFER\_RAIN\_PRINCESS](variables/STYLE_TRANSFER_RAIN_PRINCESS.md)
-- [STYLE\_TRANSFER\_UDNIE](variables/STYLE_TRANSFER_UDNIE.md)
+- [STYLE_TRANSFER_CANDY](variables/STYLE_TRANSFER_CANDY.md)
+- [STYLE_TRANSFER_MOSAIC](variables/STYLE_TRANSFER_MOSAIC.md)
+- [STYLE_TRANSFER_RAIN_PRINCESS](variables/STYLE_TRANSFER_RAIN_PRINCESS.md)
+- [STYLE_TRANSFER_UDNIE](variables/STYLE_TRANSFER_UDNIE.md)
## Models - Text Embeddings
-- [ALL\_MINILM\_L6\_V2](variables/ALL_MINILM_L6_V2.md)
-- [ALL\_MPNET\_BASE\_V2](variables/ALL_MPNET_BASE_V2.md)
-- [CLIP\_VIT\_BASE\_PATCH32\_TEXT](variables/CLIP_VIT_BASE_PATCH32_TEXT.md)
-- [MULTI\_QA\_MINILM\_L6\_COS\_V1](variables/MULTI_QA_MINILM_L6_COS_V1.md)
-- [MULTI\_QA\_MPNET\_BASE\_DOT\_V1](variables/MULTI_QA_MPNET_BASE_DOT_V1.md)
+- [ALL_MINILM_L6_V2](variables/ALL_MINILM_L6_V2.md)
+- [ALL_MPNET_BASE_V2](variables/ALL_MPNET_BASE_V2.md)
+- [CLIP_VIT_BASE_PATCH32_TEXT](variables/CLIP_VIT_BASE_PATCH32_TEXT.md)
+- [MULTI_QA_MINILM_L6_COS_V1](variables/MULTI_QA_MINILM_L6_COS_V1.md)
+- [MULTI_QA_MPNET_BASE_DOT_V1](variables/MULTI_QA_MPNET_BASE_DOT_V1.md)
## Models - Text to Speech
-- [KOKORO\_MEDIUM](variables/KOKORO_MEDIUM.md)
-- [KOKORO\_SMALL](variables/KOKORO_SMALL.md)
+- [KOKORO_MEDIUM](variables/KOKORO_MEDIUM.md)
+- [KOKORO_SMALL](variables/KOKORO_SMALL.md)
## Models - Voice Activity Detection
-- [FSMN\_VAD](variables/FSMN_VAD.md)
+- [FSMN_VAD](variables/FSMN_VAD.md)
## OCR Supported Alphabets
-- [OCR\_ABAZA](variables/OCR_ABAZA.md)
-- [OCR\_ADYGHE](variables/OCR_ADYGHE.md)
-- [OCR\_AFRIKAANS](variables/OCR_AFRIKAANS.md)
-- [OCR\_ALBANIAN](variables/OCR_ALBANIAN.md)
-- [OCR\_AVAR](variables/OCR_AVAR.md)
-- [OCR\_AZERBAIJANI](variables/OCR_AZERBAIJANI.md)
-- [OCR\_BELARUSIAN](variables/OCR_BELARUSIAN.md)
-- [OCR\_BOSNIAN](variables/OCR_BOSNIAN.md)
-- [OCR\_BULGARIAN](variables/OCR_BULGARIAN.md)
-- [OCR\_CHECHEN](variables/OCR_CHECHEN.md)
-- [OCR\_CROATIAN](variables/OCR_CROATIAN.md)
-- [OCR\_CZECH](variables/OCR_CZECH.md)
-- [OCR\_DANISH](variables/OCR_DANISH.md)
-- [OCR\_DARGWA](variables/OCR_DARGWA.md)
-- [OCR\_DUTCH](variables/OCR_DUTCH.md)
-- [OCR\_ENGLISH](variables/OCR_ENGLISH.md)
-- [OCR\_ESTONIAN](variables/OCR_ESTONIAN.md)
-- [OCR\_FRENCH](variables/OCR_FRENCH.md)
-- [OCR\_GERMAN](variables/OCR_GERMAN.md)
-- [OCR\_HUNGARIAN](variables/OCR_HUNGARIAN.md)
-- [OCR\_ICELANDIC](variables/OCR_ICELANDIC.md)
-- [OCR\_INDONESIAN](variables/OCR_INDONESIAN.md)
-- [OCR\_INGUSH](variables/OCR_INGUSH.md)
-- [OCR\_IRISH](variables/OCR_IRISH.md)
-- [OCR\_ITALIAN](variables/OCR_ITALIAN.md)
-- [OCR\_JAPANESE](variables/OCR_JAPANESE.md)
-- [OCR\_KANNADA](variables/OCR_KANNADA.md)
-- [OCR\_KARBADIAN](variables/OCR_KARBADIAN.md)
-- [OCR\_KOREAN](variables/OCR_KOREAN.md)
-- [OCR\_KURDISH](variables/OCR_KURDISH.md)
-- [OCR\_LAK](variables/OCR_LAK.md)
-- [OCR\_LATIN](variables/OCR_LATIN.md)
-- [OCR\_LATVIAN](variables/OCR_LATVIAN.md)
-- [OCR\_LEZGHIAN](variables/OCR_LEZGHIAN.md)
-- [OCR\_LITHUANIAN](variables/OCR_LITHUANIAN.md)
-- [OCR\_MALAY](variables/OCR_MALAY.md)
-- [OCR\_MALTESE](variables/OCR_MALTESE.md)
-- [OCR\_MAORI](variables/OCR_MAORI.md)
-- [OCR\_MONGOLIAN](variables/OCR_MONGOLIAN.md)
-- [OCR\_NORWEGIAN](variables/OCR_NORWEGIAN.md)
-- [OCR\_OCCITAN](variables/OCR_OCCITAN.md)
-- [OCR\_PALI](variables/OCR_PALI.md)
-- [OCR\_POLISH](variables/OCR_POLISH.md)
-- [OCR\_PORTUGUESE](variables/OCR_PORTUGUESE.md)
-- [OCR\_ROMANIAN](variables/OCR_ROMANIAN.md)
-- [OCR\_RUSSIAN](variables/OCR_RUSSIAN.md)
-- [OCR\_SERBIAN\_CYRILLIC](variables/OCR_SERBIAN_CYRILLIC.md)
-- [OCR\_SERBIAN\_LATIN](variables/OCR_SERBIAN_LATIN.md)
-- [OCR\_SIMPLIFIED\_CHINESE](variables/OCR_SIMPLIFIED_CHINESE.md)
-- [OCR\_SLOVAK](variables/OCR_SLOVAK.md)
-- [OCR\_SLOVENIAN](variables/OCR_SLOVENIAN.md)
-- [OCR\_SPANISH](variables/OCR_SPANISH.md)
-- [OCR\_SWAHILI](variables/OCR_SWAHILI.md)
-- [OCR\_SWEDISH](variables/OCR_SWEDISH.md)
-- [OCR\_TABASSARAN](variables/OCR_TABASSARAN.md)
-- [OCR\_TAGALOG](variables/OCR_TAGALOG.md)
-- [OCR\_TAJIK](variables/OCR_TAJIK.md)
-- [OCR\_TELUGU](variables/OCR_TELUGU.md)
-- [OCR\_TURKISH](variables/OCR_TURKISH.md)
-- [OCR\_UKRAINIAN](variables/OCR_UKRAINIAN.md)
-- [OCR\_UZBEK](variables/OCR_UZBEK.md)
-- [OCR\_VIETNAMESE](variables/OCR_VIETNAMESE.md)
-- [OCR\_WELSH](variables/OCR_WELSH.md)
+- [OCR_ABAZA](variables/OCR_ABAZA.md)
+- [OCR_ADYGHE](variables/OCR_ADYGHE.md)
+- [OCR_AFRIKAANS](variables/OCR_AFRIKAANS.md)
+- [OCR_ALBANIAN](variables/OCR_ALBANIAN.md)
+- [OCR_AVAR](variables/OCR_AVAR.md)
+- [OCR_AZERBAIJANI](variables/OCR_AZERBAIJANI.md)
+- [OCR_BELARUSIAN](variables/OCR_BELARUSIAN.md)
+- [OCR_BOSNIAN](variables/OCR_BOSNIAN.md)
+- [OCR_BULGARIAN](variables/OCR_BULGARIAN.md)
+- [OCR_CHECHEN](variables/OCR_CHECHEN.md)
+- [OCR_CROATIAN](variables/OCR_CROATIAN.md)
+- [OCR_CZECH](variables/OCR_CZECH.md)
+- [OCR_DANISH](variables/OCR_DANISH.md)
+- [OCR_DARGWA](variables/OCR_DARGWA.md)
+- [OCR_DUTCH](variables/OCR_DUTCH.md)
+- [OCR_ENGLISH](variables/OCR_ENGLISH.md)
+- [OCR_ESTONIAN](variables/OCR_ESTONIAN.md)
+- [OCR_FRENCH](variables/OCR_FRENCH.md)
+- [OCR_GERMAN](variables/OCR_GERMAN.md)
+- [OCR_HUNGARIAN](variables/OCR_HUNGARIAN.md)
+- [OCR_ICELANDIC](variables/OCR_ICELANDIC.md)
+- [OCR_INDONESIAN](variables/OCR_INDONESIAN.md)
+- [OCR_INGUSH](variables/OCR_INGUSH.md)
+- [OCR_IRISH](variables/OCR_IRISH.md)
+- [OCR_ITALIAN](variables/OCR_ITALIAN.md)
+- [OCR_JAPANESE](variables/OCR_JAPANESE.md)
+- [OCR_KANNADA](variables/OCR_KANNADA.md)
+- [OCR_KARBADIAN](variables/OCR_KARBADIAN.md)
+- [OCR_KOREAN](variables/OCR_KOREAN.md)
+- [OCR_KURDISH](variables/OCR_KURDISH.md)
+- [OCR_LAK](variables/OCR_LAK.md)
+- [OCR_LATIN](variables/OCR_LATIN.md)
+- [OCR_LATVIAN](variables/OCR_LATVIAN.md)
+- [OCR_LEZGHIAN](variables/OCR_LEZGHIAN.md)
+- [OCR_LITHUANIAN](variables/OCR_LITHUANIAN.md)
+- [OCR_MALAY](variables/OCR_MALAY.md)
+- [OCR_MALTESE](variables/OCR_MALTESE.md)
+- [OCR_MAORI](variables/OCR_MAORI.md)
+- [OCR_MONGOLIAN](variables/OCR_MONGOLIAN.md)
+- [OCR_NORWEGIAN](variables/OCR_NORWEGIAN.md)
+- [OCR_OCCITAN](variables/OCR_OCCITAN.md)
+- [OCR_PALI](variables/OCR_PALI.md)
+- [OCR_POLISH](variables/OCR_POLISH.md)
+- [OCR_PORTUGUESE](variables/OCR_PORTUGUESE.md)
+- [OCR_ROMANIAN](variables/OCR_ROMANIAN.md)
+- [OCR_RUSSIAN](variables/OCR_RUSSIAN.md)
+- [OCR_SERBIAN_CYRILLIC](variables/OCR_SERBIAN_CYRILLIC.md)
+- [OCR_SERBIAN_LATIN](variables/OCR_SERBIAN_LATIN.md)
+- [OCR_SIMPLIFIED_CHINESE](variables/OCR_SIMPLIFIED_CHINESE.md)
+- [OCR_SLOVAK](variables/OCR_SLOVAK.md)
+- [OCR_SLOVENIAN](variables/OCR_SLOVENIAN.md)
+- [OCR_SPANISH](variables/OCR_SPANISH.md)
+- [OCR_SWAHILI](variables/OCR_SWAHILI.md)
+- [OCR_SWEDISH](variables/OCR_SWEDISH.md)
+- [OCR_TABASSARAN](variables/OCR_TABASSARAN.md)
+- [OCR_TAGALOG](variables/OCR_TAGALOG.md)
+- [OCR_TAJIK](variables/OCR_TAJIK.md)
+- [OCR_TELUGU](variables/OCR_TELUGU.md)
+- [OCR_TURKISH](variables/OCR_TURKISH.md)
+- [OCR_UKRAINIAN](variables/OCR_UKRAINIAN.md)
+- [OCR_UZBEK](variables/OCR_UZBEK.md)
+- [OCR_VIETNAMESE](variables/OCR_VIETNAMESE.md)
+- [OCR_WELSH](variables/OCR_WELSH.md)
## Other
@@ -181,14 +181,14 @@
## TTS Supported Voices
-- [KOKORO\_VOICE\_AF\_HEART](variables/KOKORO_VOICE_AF_HEART.md)
-- [KOKORO\_VOICE\_AF\_RIVER](variables/KOKORO_VOICE_AF_RIVER.md)
-- [KOKORO\_VOICE\_AF\_SARAH](variables/KOKORO_VOICE_AF_SARAH.md)
-- [KOKORO\_VOICE\_AM\_ADAM](variables/KOKORO_VOICE_AM_ADAM.md)
-- [KOKORO\_VOICE\_AM\_MICHAEL](variables/KOKORO_VOICE_AM_MICHAEL.md)
-- [KOKORO\_VOICE\_AM\_SANTA](variables/KOKORO_VOICE_AM_SANTA.md)
-- [KOKORO\_VOICE\_BF\_EMMA](variables/KOKORO_VOICE_BF_EMMA.md)
-- [KOKORO\_VOICE\_BM\_DANIEL](variables/KOKORO_VOICE_BM_DANIEL.md)
+- [KOKORO_VOICE_AF_HEART](variables/KOKORO_VOICE_AF_HEART.md)
+- [KOKORO_VOICE_AF_RIVER](variables/KOKORO_VOICE_AF_RIVER.md)
+- [KOKORO_VOICE_AF_SARAH](variables/KOKORO_VOICE_AF_SARAH.md)
+- [KOKORO_VOICE_AM_ADAM](variables/KOKORO_VOICE_AM_ADAM.md)
+- [KOKORO_VOICE_AM_MICHAEL](variables/KOKORO_VOICE_AM_MICHAEL.md)
+- [KOKORO_VOICE_AM_SANTA](variables/KOKORO_VOICE_AM_SANTA.md)
+- [KOKORO_VOICE_BF_EMMA](variables/KOKORO_VOICE_BF_EMMA.md)
+- [KOKORO_VOICE_BM_DANIEL](variables/KOKORO_VOICE_BM_DANIEL.md)
## Types
@@ -251,7 +251,7 @@
- [SpeechToTextLanguage](type-aliases/SpeechToTextLanguage.md)
- [TensorBuffer](type-aliases/TensorBuffer.md)
- [TextToSpeechLanguage](type-aliases/TextToSpeechLanguage.md)
-- [SPECIAL\_TOKENS](variables/SPECIAL_TOKENS.md)
+- [SPECIAL_TOKENS](variables/SPECIAL_TOKENS.md)
## Typescript API
@@ -277,11 +277,11 @@
## Utilities - LLM
-- [DEFAULT\_CHAT\_CONFIG](variables/DEFAULT_CHAT_CONFIG.md)
-- [DEFAULT\_CONTEXT\_WINDOW\_LENGTH](variables/DEFAULT_CONTEXT_WINDOW_LENGTH.md)
-- [DEFAULT\_MESSAGE\_HISTORY](variables/DEFAULT_MESSAGE_HISTORY.md)
-- [DEFAULT\_SYSTEM\_PROMPT](variables/DEFAULT_SYSTEM_PROMPT.md)
+- [DEFAULT_CHAT_CONFIG](variables/DEFAULT_CHAT_CONFIG.md)
+- [DEFAULT_CONTEXT_WINDOW_LENGTH](variables/DEFAULT_CONTEXT_WINDOW_LENGTH.md)
+- [DEFAULT_MESSAGE_HISTORY](variables/DEFAULT_MESSAGE_HISTORY.md)
+- [DEFAULT_SYSTEM_PROMPT](variables/DEFAULT_SYSTEM_PROMPT.md)
- [parseToolCall](variables/parseToolCall.md)
-- [DEFAULT\_STRUCTURED\_OUTPUT\_PROMPT](functions/DEFAULT_STRUCTURED_OUTPUT_PROMPT.md)
+- [DEFAULT_STRUCTURED_OUTPUT_PROMPT](functions/DEFAULT_STRUCTURED_OUTPUT_PROMPT.md)
- [fixAndValidateStructuredOutput](functions/fixAndValidateStructuredOutput.md)
- [getStructuredOutputPrompt](functions/getStructuredOutputPrompt.md)
diff --git a/docs/src/theme/SearchBar.tsx b/docs/src/theme/SearchBar.tsx
index 7536c3716..8dd05bdab 100644
--- a/docs/src/theme/SearchBar.tsx
+++ b/docs/src/theme/SearchBar.tsx
@@ -2,4 +2,4 @@ import React from 'react';
export default function SearchBar() {
return
;
-}
\ No newline at end of file
+}
diff --git a/docs/versioned_docs/version-0.6.x/02-hooks/02-computer-vision/useOCR.md b/docs/versioned_docs/version-0.6.x/02-hooks/02-computer-vision/useOCR.md
index 0c804e1a0..d491ed65b 100644
--- a/docs/versioned_docs/version-0.6.x/02-hooks/02-computer-vision/useOCR.md
+++ b/docs/versioned_docs/version-0.6.x/02-hooks/02-computer-vision/useOCR.md
@@ -309,7 +309,7 @@ You need to make sure the recognizer models you pass in `recognizerSources` matc
|  |  |
| ----------------------------------------------- | ----------------------------------------------------- |
-| Original Image | Image with detected Text Boxes |
+| Original Image | Image with detected Text Boxes |
:::warning
Times presented in the tables are measured as consecutive runs of the model. Initial run times may be up to 2x longer due to model loading and initialization.
diff --git a/docs/versioned_docs/version-0.6.x/02-hooks/02-computer-vision/useTextToImage.md b/docs/versioned_docs/version-0.6.x/02-hooks/02-computer-vision/useTextToImage.md
index 1d35d7a3a..476f8d95d 100644
--- a/docs/versioned_docs/version-0.6.x/02-hooks/02-computer-vision/useTextToImage.md
+++ b/docs/versioned_docs/version-0.6.x/02-hooks/02-computer-vision/useTextToImage.md
@@ -94,7 +94,7 @@ function App() {
|  |  |
| ------------------------------------------------------- | ------------------------------------------------------- |
-| Image of size 256×256 | Image of size 512×512 |
+| Image of size 256×256 | Image of size 512×512 |
## Supported models
diff --git a/docs/versioned_docs/version-0.6.x/02-hooks/02-computer-vision/useVerticalOCR.md b/docs/versioned_docs/version-0.6.x/02-hooks/02-computer-vision/useVerticalOCR.md
index 33f4d935f..88934fd2e 100644
--- a/docs/versioned_docs/version-0.6.x/02-hooks/02-computer-vision/useVerticalOCR.md
+++ b/docs/versioned_docs/version-0.6.x/02-hooks/02-computer-vision/useVerticalOCR.md
@@ -324,7 +324,7 @@ You need to make sure the recognizer models you pass in `recognizerSources` matc
|  |  |
| ------------------------------------------------------- | ------------------------------------------------------------ |
-| Original Image | Image with detected Text Boxes |
+| Original Image | Image with detected Text Boxes |
:::warning
Times presented in the tables are measured as consecutive runs of the model. Initial run times may be up to 2x longer due to model loading and initialization.
diff --git a/package.json b/package.json
index e4ee020b2..85ecd0d76 100644
--- a/package.json
+++ b/package.json
@@ -4,7 +4,7 @@
"packageManager": "yarn@4.1.1",
"workspaces": {
"packages": [
- "packages/react-native-executorch",
+ "packages/*",
"apps/*"
]
},
@@ -15,6 +15,7 @@
},
"private": true,
"devDependencies": {
+ "@babel/plugin-transform-export-namespace-from": "^7.27.1",
"@cspell/eslint-plugin": "^8.19.0",
"@evilmartians/lefthook": "^1.5.0",
"@react-native/eslint-config": "^0.79.0",
diff --git a/packages/bare-resource-fetcher/README.md b/packages/bare-resource-fetcher/README.md
new file mode 100644
index 000000000..9b1493f1d
--- /dev/null
+++ b/packages/bare-resource-fetcher/README.md
@@ -0,0 +1,37 @@
+# @react-native-executorch/bare-resource-fetcher
+
+Bare React Native adapter for `react-native-executorch` that provides resource fetching capabilities using native filesystem libraries.
+
+## Installation
+
+```bash
+yarn add @react-native-executorch/bare-resource-fetcher
+yarn add @dr.pogodin/react-native-fs @kesha-antonov/react-native-background-downloader
+```
+
+### Native Dependencies Setup
+
+After installing, follow the setup guides for the native dependencies:
+
+- **[@dr.pogodin/react-native-fs](https://github.com/birdofpreyru/react-native-fs#getting-started)** - Filesystem operations
+- **[@kesha-antonov/react-native-background-downloader](https://github.com/kesha-antonov/react-native-background-downloader#bare-react-native-projects)** - Background download support
+
+> **Note**: Make sure to complete the native setup (iOS/Android configuration) for both dependencies before using this adapter.
+
+## Usage
+
+```typescript
+import { initExecutorch } from 'react-native-executorch';
+import { BareResourceFetcher } from '@react-native-executorch/bare-resource-fetcher';
+
+initExecutorch({
+ resourceFetcher: BareResourceFetcher,
+});
+```
+
+## When to Use
+
+Use this adapter if you're working with:
+- Bare React Native projects (created with `npx @react-native-community/cli@latest init`)
+- Projects that need true background downloads
+- Projects requiring direct native filesystem access
diff --git a/packages/bare-resource-fetcher/package.json b/packages/bare-resource-fetcher/package.json
new file mode 100644
index 000000000..b5533ac48
--- /dev/null
+++ b/packages/bare-resource-fetcher/package.json
@@ -0,0 +1,43 @@
+{
+ "name": "@react-native-executorch/bare-resource-fetcher",
+ "version": "0.1.0",
+ "description": "Bare React Native resource fetcher for react-native-executorch",
+ "main": "lib/index.js",
+ "types": "lib/index.d.ts",
+ "exports": {
+ ".": {
+ "import": "./lib/index.js",
+ "types": "./lib/index.d.ts"
+ }
+ },
+ "files": [
+ "lib"
+ ],
+ "license": "MIT",
+ "repository": {
+ "type": "git",
+ "url": "git+https://github.com/software-mansion/react-native-executorch.git",
+ "directory": "packages/bare-resource-fetcher"
+ },
+ "scripts": {
+ "prepare": "tsc",
+ "typecheck": "tsc --noEmit",
+ "lint": "eslint \"**/*.{js,ts,tsx}\"",
+ "clean": "del-cli lib"
+ },
+ "peerDependencies": {
+ "@dr.pogodin/react-native-fs": "^2.0.0",
+ "@kesha-antonov/react-native-background-downloader": "^4.0.0",
+ "react-native": "*",
+ "react-native-executorch": "*"
+ },
+ "devDependencies": {
+ "@dr.pogodin/react-native-fs": "^2.36.2",
+ "@kesha-antonov/react-native-background-downloader": "^4.4.5",
+ "@types/react": "~19.1.10",
+ "react": "19.1.0",
+ "react-native": "0.81.5",
+ "react-native-executorch": "workspace:*",
+ "typescript": "~5.9.2"
+ }
+}
diff --git a/packages/bare-resource-fetcher/src/ResourceFetcher.ts b/packages/bare-resource-fetcher/src/ResourceFetcher.ts
new file mode 100644
index 000000000..bc7d45a51
--- /dev/null
+++ b/packages/bare-resource-fetcher/src/ResourceFetcher.ts
@@ -0,0 +1,476 @@
+import {
+ createDownloadTask,
+ completeHandler,
+ DownloadTask,
+ BeginHandlerParams,
+ ProgressHandlerParams,
+} from '@kesha-antonov/react-native-background-downloader';
+import * as RNFS from '@dr.pogodin/react-native-fs';
+import { Image } from 'react-native';
+import { RNEDirectory } from './constants/directories';
+import {
+ ResourceSource,
+ ResourceFetcherAdapter,
+ RnExecutorchErrorCode,
+ RnExecutorchError,
+} from 'react-native-executorch';
+import {
+ ResourceFetcherUtils,
+ DownloadStatus,
+ SourceType,
+ ResourceSourceExtended,
+} from './ResourceFetcherUtils';
+
+interface DownloadResource {
+ task: DownloadTask;
+ status: DownloadStatus;
+ extendedInfo: ResourceSourceExtended;
+}
+
+interface BareResourceFetcherInterface extends ResourceFetcherAdapter {
+ downloads: Map;
+ singleFetch(sourceExtended: ResourceSourceExtended): Promise;
+ returnOrStartNext(
+ sourceExtended: ResourceSourceExtended,
+ result: string | string[]
+ ): string[] | Promise;
+ completeDownload(
+ extendedInfo: ResourceSourceExtended,
+ source: ResourceSource
+ ): Promise;
+ pause(source: ResourceSource): Promise;
+ resume(source: ResourceSource): Promise;
+ cancel(source: ResourceSource): Promise;
+ findActive(sources: ResourceSource[]): ResourceSource;
+ pauseFetching(...sources: ResourceSource[]): Promise;
+ resumeFetching(...sources: ResourceSource[]): Promise;
+ cancelFetching(...sources: ResourceSource[]): Promise;
+ listDownloadedFiles(): Promise;
+ listDownloadedModels(): Promise;
+ deleteResources(...sources: ResourceSource[]): Promise;
+ getFilesTotalSize(...sources: ResourceSource[]): Promise;
+ handleObject(source: ResourceSource): Promise;
+ handleLocalFile(source: ResourceSource): string;
+ handleReleaseModeFile(
+ sourceExtended: ResourceSourceExtended
+ ): Promise;
+ handleDevModeFile(
+ sourceExtended: ResourceSourceExtended
+ ): Promise;
+ handleRemoteFile(
+ sourceExtended: ResourceSourceExtended
+ ): Promise;
+}
+
+export const BareResourceFetcher: BareResourceFetcherInterface = {
+ downloads: new Map(), //map of currently downloading (or paused) files, if the download was started by .fetch() method.
+
+ async fetch(
+ callback: (downloadProgress: number) => void = () => {},
+ ...sources: ResourceSource[]
+ ) {
+ if (sources.length === 0) {
+ throw new RnExecutorchError(
+ RnExecutorchErrorCode.InvalidUserInput,
+ 'Empty list given as an argument'
+ );
+ }
+ const { results: info, totalLength } =
+ await ResourceFetcherUtils.getFilesSizes(sources);
+ const head: ResourceSourceExtended = {
+ source: info[0]!.source,
+ sourceType: info[0]!.type,
+ callback:
+ info[0]!.type === SourceType.REMOTE_FILE
+ ? ResourceFetcherUtils.calculateDownloadProgress(
+ totalLength,
+ info[0]!.previousFilesTotalLength,
+ info[0]!.length,
+ callback
+ )
+ : () => {},
+ results: [],
+ };
+
+ let node = head;
+ for (let idx = 1; idx < sources.length; idx++) {
+ node.next = {
+ source: info[idx]!.source,
+ sourceType: info[idx]!.type,
+ callback:
+ info[idx]!.type === SourceType.REMOTE_FILE
+ ? ResourceFetcherUtils.calculateDownloadProgress(
+ totalLength,
+ info[idx]!.previousFilesTotalLength,
+ info[idx]!.length,
+ callback
+ )
+ : () => {},
+ results: [],
+ };
+ node = node.next;
+ }
+ return this.singleFetch(head);
+ },
+
+ async singleFetch(
+ sourceExtended: ResourceSourceExtended
+ ): Promise {
+ const source = sourceExtended.source;
+ switch (sourceExtended.sourceType) {
+ case SourceType.OBJECT: {
+ return this.returnOrStartNext(
+ sourceExtended,
+ await this.handleObject(source)
+ );
+ }
+ case SourceType.LOCAL_FILE: {
+ return this.returnOrStartNext(
+ sourceExtended,
+ this.handleLocalFile(source)
+ );
+ }
+ case SourceType.RELEASE_MODE_FILE: {
+ return this.returnOrStartNext(
+ sourceExtended,
+ await this.handleReleaseModeFile(sourceExtended)
+ );
+ }
+ case SourceType.DEV_MODE_FILE: {
+ const result = await this.handleDevModeFile(sourceExtended);
+ if (result !== null) {
+ return this.returnOrStartNext(sourceExtended, result);
+ }
+ return null;
+ }
+ default: {
+ //case SourceType.REMOTE_FILE
+ const result = await this.handleRemoteFile(sourceExtended);
+ if (result !== null) {
+ return this.returnOrStartNext(sourceExtended, result);
+ }
+ return null;
+ }
+ }
+ },
+
+ //if any download ends successfully this function is called - it checks whether it should trigger next download or return list of paths.
+ returnOrStartNext(sourceExtended: ResourceSourceExtended, result: string) {
+ sourceExtended.results.push(result);
+
+ if (sourceExtended.next) {
+ const nextSource = sourceExtended.next;
+ nextSource.results.push(...sourceExtended.results);
+ return this.singleFetch(nextSource);
+ }
+ sourceExtended.callback!(1);
+ return sourceExtended.results;
+ },
+
+ async pause(source: ResourceSource) {
+ const resource = this.downloads.get(source);
+ if (!resource) {
+ throw new RnExecutorchError(
+ RnExecutorchErrorCode.NotFound,
+ 'No active download found for the given source'
+ );
+ }
+ switch (resource.status) {
+ case DownloadStatus.PAUSED:
+ throw new RnExecutorchError(
+ RnExecutorchErrorCode.ResourceFetcherAlreadyPaused,
+ "The file download is currently paused. Can't pause the download of the same file twice."
+ );
+ default: {
+ resource.status = DownloadStatus.PAUSED;
+ resource.task.pause();
+ }
+ }
+ },
+
+ async resume(source: ResourceSource) {
+ const resource = this.downloads.get(source)!;
+ if (
+ !resource.extendedInfo.fileUri ||
+ !resource.extendedInfo.cacheFileUri ||
+ !resource.extendedInfo.uri
+ ) {
+ throw new RnExecutorchError(
+ RnExecutorchErrorCode.ResourceFetcherMissingUri,
+ 'Something went wrong. File uri info is not specified'
+ );
+ }
+ switch (resource.status) {
+ case DownloadStatus.ONGOING:
+ throw new RnExecutorchError(
+ RnExecutorchErrorCode.ResourceFetcherAlreadyOngoing,
+ "The file download is currently ongoing. Can't resume the ongoing download."
+ );
+ default: {
+ resource.status = DownloadStatus.ONGOING;
+ resource.task.resume();
+
+ return new Promise((resolve, reject) => {
+ resource.task
+ .done(async () => {
+ const result = await this.completeDownload(
+ resource.extendedInfo,
+ source
+ );
+ resolve(result);
+ })
+ .error((e) => {
+ reject(e);
+ });
+ });
+ }
+ }
+ },
+
+ async cancel(source: ResourceSource) {
+ const resource = this.downloads.get(source);
+ if (!resource) {
+ throw new RnExecutorchError(
+ RnExecutorchErrorCode.NotFound,
+ 'No active download found for the given source'
+ );
+ }
+ resource.task.stop();
+ this.downloads.delete(source);
+ },
+
+ async completeDownload(
+ extendedInfo: ResourceSourceExtended,
+ source: ResourceSource
+ ): Promise {
+ // Check if download was cancelled or paused
+ if (
+ !this.downloads.has(source) ||
+ this.downloads.get(source)!.status === DownloadStatus.PAUSED
+ ) {
+ return null;
+ }
+
+ await RNFS.moveFile(extendedInfo.cacheFileUri!, extendedInfo.fileUri!);
+ this.downloads.delete(source);
+ ResourceFetcherUtils.triggerHuggingFaceDownloadCounter(extendedInfo.uri!);
+
+ const filename = extendedInfo.fileUri!.split('/').pop();
+ if (filename) {
+ await completeHandler(filename);
+ }
+
+ const result = this.returnOrStartNext(
+ extendedInfo,
+ ResourceFetcherUtils.removeFilePrefix(extendedInfo.fileUri!)
+ );
+ return result instanceof Promise ? await result : result;
+ },
+
+ async pauseFetching(...sources: ResourceSource[]) {
+ const source = this.findActive(sources);
+ await this.pause(source);
+ },
+
+ async resumeFetching(...sources: ResourceSource[]) {
+ const source = this.findActive(sources);
+ await this.resume(source);
+ },
+
+ async cancelFetching(...sources: ResourceSource[]) {
+ const source = this.findActive(sources);
+ await this.cancel(source);
+ },
+
+ findActive(sources: ResourceSource[]) {
+ for (const source of sources) {
+ if (this.downloads.has(source)) {
+ return source;
+ }
+ }
+ throw new RnExecutorchError(
+ RnExecutorchErrorCode.ResourceFetcherNotActive,
+ 'None of given sources are currently during downloading process.'
+ );
+ },
+
+ async listDownloadedFiles() {
+ const files = await RNFS.readDir(RNEDirectory);
+ return files.map((file) => file.path);
+ },
+
+ async listDownloadedModels() {
+ const files = await this.listDownloadedFiles();
+ return files.filter((file: string) => file.endsWith('.pte'));
+ },
+
+ async deleteResources(...sources: ResourceSource[]) {
+ for (const source of sources) {
+ const filename = ResourceFetcherUtils.getFilenameFromUri(
+ source as string
+ );
+ const fileUri = `${RNEDirectory}${filename}`;
+ if (await ResourceFetcherUtils.checkFileExists(fileUri)) {
+ await RNFS.unlink(fileUri);
+ }
+ }
+ },
+
+ async getFilesTotalSize(...sources: ResourceSource[]) {
+ return (await ResourceFetcherUtils.getFilesSizes(sources)).totalLength;
+ },
+
+ async handleObject(source: ResourceSource) {
+ if (typeof source !== 'object') {
+ throw new RnExecutorchError(
+ RnExecutorchErrorCode.InvalidModelSource,
+ 'Source is expected to be object'
+ );
+ }
+ const jsonString = JSON.stringify(source);
+ const digest = ResourceFetcherUtils.hashObject(jsonString);
+ const filename = `${digest}.json`;
+ const path = `${RNEDirectory}${filename}`;
+
+ if (await ResourceFetcherUtils.checkFileExists(path)) {
+ return ResourceFetcherUtils.removeFilePrefix(path);
+ }
+
+ await ResourceFetcherUtils.createDirectoryIfNoExists();
+ await RNFS.writeFile(path, jsonString, 'utf8');
+
+ return ResourceFetcherUtils.removeFilePrefix(path);
+ },
+
+ handleLocalFile(source: ResourceSource) {
+ if (typeof source !== 'string') {
+ throw new RnExecutorchError(
+ RnExecutorchErrorCode.InvalidModelSource,
+ 'Source is expected to be string'
+ );
+ }
+ return ResourceFetcherUtils.removeFilePrefix(source);
+ },
+
+ async handleReleaseModeFile(sourceExtended: ResourceSourceExtended) {
+ const source = sourceExtended.source;
+ if (typeof source !== 'number') {
+ throw new RnExecutorchError(
+ RnExecutorchErrorCode.InvalidModelSource,
+ 'Source is expected to be number'
+ );
+ }
+ const assetSource = Image.resolveAssetSource(source);
+ const uri = assetSource.uri;
+ const filename = ResourceFetcherUtils.getFilenameFromUri(uri);
+ const fileUri = `${RNEDirectory}${filename}`;
+
+ if (await ResourceFetcherUtils.checkFileExists(fileUri)) {
+ return ResourceFetcherUtils.removeFilePrefix(fileUri);
+ }
+ await ResourceFetcherUtils.createDirectoryIfNoExists();
+
+ if (uri.startsWith('http') || uri.startsWith('file')) {
+ await RNFS.copyFile(uri, fileUri);
+ }
+ return ResourceFetcherUtils.removeFilePrefix(fileUri);
+ },
+
+ async handleDevModeFile(sourceExtended: ResourceSourceExtended) {
+ const source = sourceExtended.source;
+ if (typeof source !== 'number') {
+ throw new RnExecutorchError(
+ RnExecutorchErrorCode.InvalidModelSource,
+ 'Source is expected to be a number'
+ );
+ }
+ sourceExtended.uri = Image.resolveAssetSource(source).uri;
+ return await this.handleRemoteFile(sourceExtended);
+ },
+
+ async handleRemoteFile(sourceExtended: ResourceSourceExtended) {
+ const source = sourceExtended.source;
+ if (typeof source === 'object') {
+ throw new RnExecutorchError(
+ RnExecutorchErrorCode.InvalidModelSource,
+ 'Source is expected to be a string or a number'
+ );
+ }
+ if (this.downloads.has(source)) {
+ const resource = this.downloads.get(source)!;
+ if (resource.status === DownloadStatus.PAUSED) {
+ // if the download is paused, `fetch` is treated like `resume`
+ return this.resume(source);
+ }
+ // if the download is ongoing, throw error.
+ throw new RnExecutorchError(
+ RnExecutorchErrorCode.ResourceFetcherDownloadInProgress,
+ 'Already downloading this file'
+ );
+ }
+ if (typeof source === 'number' && !sourceExtended.uri) {
+ throw new RnExecutorchError(
+ RnExecutorchErrorCode.ResourceFetcherMissingUri,
+ 'Source Uri is expected to be available here'
+ );
+ }
+ if (typeof source === 'string') {
+ sourceExtended.uri = source;
+ }
+ const uri = sourceExtended.uri!;
+ const filename = ResourceFetcherUtils.getFilenameFromUri(uri);
+ sourceExtended.fileUri = `${RNEDirectory}${filename}`;
+ sourceExtended.cacheFileUri = `${RNFS.CachesDirectoryPath}/${filename}`;
+
+ if (await ResourceFetcherUtils.checkFileExists(sourceExtended.fileUri)) {
+ return ResourceFetcherUtils.removeFilePrefix(sourceExtended.fileUri);
+ }
+ await ResourceFetcherUtils.createDirectoryIfNoExists();
+
+ return new Promise((resolve, reject) => {
+ const task = createDownloadTask({
+ id: filename,
+ url: uri,
+ destination: sourceExtended.cacheFileUri!,
+ })
+ .begin((_: BeginHandlerParams) => {
+ sourceExtended.callback!(0);
+ })
+ .progress((progress: ProgressHandlerParams) => {
+ sourceExtended.callback!(
+ progress.bytesDownloaded / progress.bytesTotal
+ );
+ })
+ .done(async () => {
+ const nextResult = await this.completeDownload(
+ sourceExtended,
+ source
+ );
+ resolve(nextResult);
+ })
+ .error((error) => {
+ this.downloads.delete(source);
+ reject(
+ new RnExecutorchError(
+ RnExecutorchErrorCode.ResourceFetcherDownloadFailed,
+ `Failed to fetch resource from '${source}', context: ${error}`
+ )
+ );
+ });
+
+ // Start the download task
+ task.start();
+
+ const downloadResource: DownloadResource = {
+ task: task,
+ status: DownloadStatus.ONGOING,
+ extendedInfo: sourceExtended,
+ };
+ this.downloads.set(source, downloadResource);
+ });
+ },
+
+ async readAsString(path: string) {
+ return await RNFS.readFile(path, 'utf8');
+ },
+};
diff --git a/packages/bare-resource-fetcher/src/ResourceFetcherUtils.ts b/packages/bare-resource-fetcher/src/ResourceFetcherUtils.ts
new file mode 100644
index 000000000..681c6e3b4
--- /dev/null
+++ b/packages/bare-resource-fetcher/src/ResourceFetcherUtils.ts
@@ -0,0 +1,102 @@
+import { RNEDirectory } from './constants/directories';
+import {
+ ResourceSource,
+ Logger,
+ ResourceFetcherUtils as CoreUtils,
+ HTTP_CODE,
+ DownloadStatus,
+ SourceType,
+ ResourceSourceExtended,
+ RnExecutorchError,
+ RnExecutorchErrorCode,
+} from 'react-native-executorch';
+import { Image } from 'react-native';
+import * as RNFS from '@dr.pogodin/react-native-fs';
+
+export { HTTP_CODE, DownloadStatus, SourceType };
+export type { ResourceSourceExtended };
+
+export namespace ResourceFetcherUtils {
+ export const removeFilePrefix = CoreUtils.removeFilePrefix;
+ export const hashObject = CoreUtils.hashObject;
+ export const calculateDownloadProgress = CoreUtils.calculateDownloadProgress;
+ export const triggerHuggingFaceDownloadCounter =
+ CoreUtils.triggerHuggingFaceDownloadCounter;
+ export const getFilenameFromUri = CoreUtils.getFilenameFromUri;
+
+ export function getType(source: ResourceSource): SourceType {
+ if (typeof source === 'object') {
+ return SourceType.OBJECT;
+ } else if (typeof source === 'number') {
+ const uri = Image.resolveAssetSource(source).uri;
+ if (uri.startsWith('http')) {
+ return SourceType.DEV_MODE_FILE;
+ }
+ return SourceType.RELEASE_MODE_FILE;
+ }
+ // typeof source == 'string'
+ if (source.startsWith('file://')) {
+ return SourceType.LOCAL_FILE;
+ }
+ return SourceType.REMOTE_FILE;
+ }
+
+ export async function getFilesSizes(sources: ResourceSource[]) {
+ const results: Array<{
+ source: ResourceSource;
+ type: SourceType;
+ length: number;
+ previousFilesTotalLength: number;
+ }> = [];
+ let totalLength = 0;
+ let previousFilesTotalLength = 0;
+ for (const source of sources) {
+ const type = ResourceFetcherUtils.getType(source);
+ let length = 0;
+ try {
+ if (type === SourceType.REMOTE_FILE && typeof source === 'string') {
+ const response = await fetch(source, { method: 'HEAD' });
+ if (!response.ok) {
+ Logger.warn(
+ `Failed to fetch HEAD for ${source}: ${response.status}`
+ );
+ continue;
+ }
+
+ const contentLength = response.headers.get('content-length');
+ if (!contentLength) {
+ Logger.warn(`No content-length header for ${source}`);
+ }
+
+ length = contentLength ? parseInt(contentLength, 10) : 0;
+ previousFilesTotalLength = totalLength;
+ totalLength += length;
+ }
+ } catch (error) {
+ Logger.warn(`Error fetching HEAD for ${source}:`, error);
+ continue;
+ } finally {
+ results.push({ source, type, length, previousFilesTotalLength });
+ }
+ }
+ return { results, totalLength };
+ }
+
+ export async function createDirectoryIfNoExists() {
+ if (!(await checkFileExists(RNEDirectory))) {
+ try {
+ await RNFS.mkdir(RNEDirectory);
+ } catch (error) {
+ throw new RnExecutorchError(
+ RnExecutorchErrorCode.AccessFailed,
+ `Failed to create directory at ${RNEDirectory}`,
+ error
+ );
+ }
+ }
+ }
+
+ export async function checkFileExists(fileUri: string) {
+ return await RNFS.exists(fileUri);
+ }
+}
diff --git a/packages/bare-resource-fetcher/src/constants/directories.ts b/packages/bare-resource-fetcher/src/constants/directories.ts
new file mode 100644
index 000000000..4ba8b8401
--- /dev/null
+++ b/packages/bare-resource-fetcher/src/constants/directories.ts
@@ -0,0 +1,3 @@
+import { directories } from '@kesha-antonov/react-native-background-downloader';
+
+export const RNEDirectory = `${directories.documents}/react-native-executorch/`;
diff --git a/packages/bare-resource-fetcher/src/index.ts b/packages/bare-resource-fetcher/src/index.ts
new file mode 100644
index 000000000..c0ec4024f
--- /dev/null
+++ b/packages/bare-resource-fetcher/src/index.ts
@@ -0,0 +1 @@
+export * from './ResourceFetcher';
diff --git a/packages/bare-resource-fetcher/tsconfig.json b/packages/bare-resource-fetcher/tsconfig.json
new file mode 100644
index 000000000..cadd2509a
--- /dev/null
+++ b/packages/bare-resource-fetcher/tsconfig.json
@@ -0,0 +1,28 @@
+{
+ "extends": "../../tsconfig.json",
+ "compilerOptions": {
+ "rootDir": "src",
+ "outDir": "lib",
+ "declaration": true,
+ "declarationMap": true,
+ "tsBuildInfoFile": "./lib/typescript/tsconfig.tsbuildinfo",
+ "composite": true,
+ "allowJs": false,
+ "allowUnreachableCode": false,
+ "allowUnusedLabels": false,
+ "esModuleInterop": true,
+ "forceConsistentCasingInFileNames": true,
+ "module": "esnext",
+ "moduleResolution": "node",
+ "noFallthroughCasesInSwitch": true,
+ "noImplicitReturns": true,
+ "noStrictGenericChecks": false,
+ "noUnusedLocals": true,
+ "noUnusedParameters": true,
+ "noUncheckedIndexedAccess": true,
+ "strict": true,
+ "types": ["react", "node"]
+ },
+ "include": ["src"],
+ "exclude": ["node_modules", "lib"]
+}
diff --git a/packages/expo-resource-fetcher/README.md b/packages/expo-resource-fetcher/README.md
new file mode 100644
index 000000000..279853434
--- /dev/null
+++ b/packages/expo-resource-fetcher/README.md
@@ -0,0 +1,30 @@
+# @react-native-executorch/expo-resource-fetcher
+
+Expo adapter for `react-native-executorch` that provides resource fetching capabilities using Expo's filesystem APIs.
+
+## Installation
+
+```bash
+yarn add @react-native-executorch/expo-resource-fetcher
+yarn add expo-file-system expo-asset
+```
+
+## Usage
+
+```typescript
+import { initExecutorch } from 'react-native-executorch';
+import { ExpoResourceFetcher } from '@react-native-executorch/expo-resource-fetcher';
+
+initExecutorch({
+ resourceFetcher: ExpoResourceFetcher,
+});
+```
+
+## When to Use
+
+Use this adapter if you're working with:
+- Expo projects
+- Expo Router projects
+- Projects using Expo managed workflow
+
+This adapter leverages `expo-file-system` and `expo-asset` to handle file operations and downloads.
diff --git a/packages/expo-resource-fetcher/package.json b/packages/expo-resource-fetcher/package.json
new file mode 100644
index 000000000..7cf89487f
--- /dev/null
+++ b/packages/expo-resource-fetcher/package.json
@@ -0,0 +1,45 @@
+{
+ "name": "@react-native-executorch/expo-resource-fetcher",
+ "version": "0.1.0",
+ "description": "Expo resource fetcher for react-native-executorch",
+ "main": "lib/index.js",
+ "types": "lib/index.d.ts",
+ "exports": {
+ ".": {
+ "import": "./lib/index.js",
+ "types": "./lib/index.d.ts"
+ }
+ },
+ "files": [
+ "lib"
+ ],
+ "license": "MIT",
+ "repository": {
+ "type": "git",
+ "url": "git+https://github.com/software-mansion/react-native-executorch.git",
+ "directory": "packages/expo-resource-fetcher"
+ },
+ "scripts": {
+ "prepare": "tsc",
+ "typecheck": "tsc --noEmit",
+ "lint": "eslint \"**/*.{js,ts,tsx}\"",
+ "clean": "del-cli lib"
+ },
+ "peerDependencies": {
+ "expo": ">=54.0.0",
+ "expo-asset": "^12.0.0",
+ "expo-file-system": "^19.0.0",
+ "react-native": "*",
+ "react-native-executorch": "*"
+ },
+ "devDependencies": {
+ "@types/react": "~19.1.10",
+ "expo": "^54.0.0",
+ "expo-asset": "12.0.11",
+ "expo-file-system": "^19.0.20",
+ "react": "19.1.0",
+ "react-native": "0.81.5",
+ "react-native-executorch": "workspace:*",
+ "typescript": "~5.9.2"
+ }
+}
diff --git a/packages/expo-resource-fetcher/src/ResourceFetcher.ts b/packages/expo-resource-fetcher/src/ResourceFetcher.ts
new file mode 100644
index 000000000..28baa87fc
--- /dev/null
+++ b/packages/expo-resource-fetcher/src/ResourceFetcher.ts
@@ -0,0 +1,462 @@
+import {
+ cacheDirectory,
+ copyAsync,
+ createDownloadResumable,
+ moveAsync,
+ FileSystemSessionType,
+ writeAsStringAsync,
+ EncodingType,
+ deleteAsync,
+ readDirectoryAsync,
+ readAsStringAsync,
+} from 'expo-file-system/legacy';
+import { Asset } from 'expo-asset';
+import { Platform } from 'react-native';
+import { RNEDirectory } from './constants/directories';
+import {
+ ResourceSource,
+ ResourceFetcherAdapter,
+ RnExecutorchErrorCode,
+ RnExecutorchError,
+} from 'react-native-executorch';
+import {
+ ResourceFetcherUtils,
+ HTTP_CODE,
+ DownloadStatus,
+ SourceType,
+ ResourceSourceExtended,
+ DownloadResource,
+} from './ResourceFetcherUtils';
+
+interface ExpoResourceFetcherInterface extends ResourceFetcherAdapter {
+ downloads: Map;
+ singleFetch(sourceExtended: ResourceSourceExtended): Promise;
+ returnOrStartNext(
+ sourceExtended: ResourceSourceExtended,
+ result: string | string[]
+ ): string[] | Promise;
+ pause(source: ResourceSource): Promise;
+ resume(source: ResourceSource): Promise;
+ cancel(source: ResourceSource): Promise;
+ findActive(sources: ResourceSource[]): ResourceSource;
+ pauseFetching(...sources: ResourceSource[]): Promise;
+ resumeFetching(...sources: ResourceSource[]): Promise;
+ cancelFetching(...sources: ResourceSource[]): Promise;
+ listDownloadedFiles(): Promise;
+ listDownloadedModels(): Promise;
+ deleteResources(...sources: ResourceSource[]): Promise;
+ getFilesTotalSize(...sources: ResourceSource[]): Promise;
+ handleObject(source: ResourceSource): Promise;
+ handleLocalFile(source: ResourceSource): string;
+ handleReleaseModeFile(
+ sourceExtended: ResourceSourceExtended
+ ): Promise;
+ handleDevModeFile(
+ sourceExtended: ResourceSourceExtended
+ ): Promise;
+ handleRemoteFile(
+ sourceExtended: ResourceSourceExtended
+ ): Promise;
+}
+
+export const ExpoResourceFetcher: ExpoResourceFetcherInterface = {
+ downloads: new Map(), //map of currently downloading (or paused) files, if the download was started by .fetch() method.
+
+ async fetch(
+ callback: (downloadProgress: number) => void = () => {},
+ ...sources: ResourceSource[]
+ ) {
+ if (sources.length === 0) {
+ throw new RnExecutorchError(
+ RnExecutorchErrorCode.InvalidUserInput,
+ 'Empty list given as an argument'
+ );
+ }
+ const { results: info, totalLength } =
+ await ResourceFetcherUtils.getFilesSizes(sources);
+ const head: ResourceSourceExtended = {
+ source: info[0]!.source,
+ sourceType: info[0]!.type,
+ callback:
+ info[0]!.type === SourceType.REMOTE_FILE
+ ? ResourceFetcherUtils.calculateDownloadProgress(
+ totalLength,
+ info[0]!.previousFilesTotalLength,
+ info[0]!.length,
+ callback
+ )
+ : () => {},
+ results: [],
+ };
+
+ let node = head;
+ for (let idx = 1; idx < sources.length; idx++) {
+ node.next = {
+ source: info[idx]!.source,
+ sourceType: info[idx]!.type,
+ callback:
+ info[idx]!.type === SourceType.REMOTE_FILE
+ ? ResourceFetcherUtils.calculateDownloadProgress(
+ totalLength,
+ info[idx]!.previousFilesTotalLength,
+ info[idx]!.length,
+ callback
+ )
+ : () => {},
+ results: [],
+ };
+ node = node.next;
+ }
+ return this.singleFetch(head);
+ },
+
+ async singleFetch(
+ sourceExtended: ResourceSourceExtended
+ ): Promise {
+ const source = sourceExtended.source;
+ switch (sourceExtended.sourceType) {
+ case SourceType.OBJECT: {
+ return this.returnOrStartNext(
+ sourceExtended,
+ await this.handleObject(source)
+ );
+ }
+ case SourceType.LOCAL_FILE: {
+ return this.returnOrStartNext(
+ sourceExtended,
+ this.handleLocalFile(source)
+ );
+ }
+ case SourceType.RELEASE_MODE_FILE: {
+ return this.returnOrStartNext(
+ sourceExtended,
+ await this.handleReleaseModeFile(sourceExtended)
+ );
+ }
+ case SourceType.DEV_MODE_FILE: {
+ const result = await this.handleDevModeFile(sourceExtended);
+ if (result !== null) {
+ return this.returnOrStartNext(sourceExtended, result);
+ }
+ return null;
+ }
+ default: {
+ //case SourceType.REMOTE_FILE
+ const result = await this.handleRemoteFile(sourceExtended);
+ if (result !== null) {
+ return this.returnOrStartNext(sourceExtended, result);
+ }
+ return null;
+ }
+ }
+ },
+
+ //if any download ends successfully this function is called - it checks whether it should trigger next download or return list of paths.
+ returnOrStartNext(sourceExtended: ResourceSourceExtended, result: string) {
+ sourceExtended.results.push(result);
+
+ if (sourceExtended.next) {
+ const nextSource = sourceExtended.next;
+ nextSource.results.push(...sourceExtended.results);
+ return this.singleFetch(nextSource);
+ }
+ sourceExtended.callback!(1);
+ return sourceExtended.results;
+ },
+
+ async pause(source: ResourceSource) {
+ const resource = this.downloads.get(source)!;
+ switch (resource.status) {
+ case DownloadStatus.PAUSED:
+ throw new RnExecutorchError(
+ RnExecutorchErrorCode.ResourceFetcherAlreadyPaused,
+ "The file download is currently paused. Can't pause the download of the same file twice."
+ );
+ default: {
+ resource.status = DownloadStatus.PAUSED;
+ await resource.downloadResumable.pauseAsync();
+ }
+ }
+ },
+
+ async resume(source: ResourceSource) {
+ const resource = this.downloads.get(source)!;
+ if (
+ !resource.extendedInfo.fileUri ||
+ !resource.extendedInfo.cacheFileUri ||
+ !resource.extendedInfo.uri
+ ) {
+ throw new RnExecutorchError(
+ RnExecutorchErrorCode.ResourceFetcherMissingUri,
+ 'Something went wrong. File uri info is not specified'
+ );
+ }
+ switch (resource.status) {
+ case DownloadStatus.ONGOING:
+ throw new RnExecutorchError(
+ RnExecutorchErrorCode.ResourceFetcherAlreadyOngoing,
+ "The file download is currently ongoing. Can't resume the ongoing download."
+ );
+ default: {
+ resource.status = DownloadStatus.ONGOING;
+ const result = await resource.downloadResumable.resumeAsync();
+ if (
+ !this.downloads.has(source) ||
+ this.downloads.get(source)!.status === DownloadStatus.PAUSED
+ ) {
+ //if canceled or paused after earlier resuming.
+ return null;
+ }
+ if (
+ !result ||
+ (result.status !== HTTP_CODE.OK &&
+ result.status !== HTTP_CODE.PARTIAL_CONTENT)
+ ) {
+ throw new RnExecutorchError(
+ RnExecutorchErrorCode.ResourceFetcherDownloadFailed,
+ `Failed to fetch resource from '${resource.extendedInfo.uri}, context: ${result}'`
+ );
+ }
+ await moveAsync({
+ from: resource.extendedInfo.cacheFileUri,
+ to: resource.extendedInfo.fileUri,
+ });
+ this.downloads.delete(source);
+ ResourceFetcherUtils.triggerHuggingFaceDownloadCounter(
+ resource.extendedInfo.uri
+ );
+
+ return this.returnOrStartNext(
+ resource.extendedInfo,
+ ResourceFetcherUtils.removeFilePrefix(resource.extendedInfo.fileUri)
+ );
+ }
+ }
+ },
+
+ async cancel(source: ResourceSource) {
+ const resource = this.downloads.get(source)!;
+ await resource.downloadResumable.cancelAsync();
+ this.downloads.delete(source);
+ },
+
+ async pauseFetching(...sources: ResourceSource[]) {
+ const source = this.findActive(sources);
+ await this.pause(source);
+ },
+
+ async resumeFetching(...sources: ResourceSource[]) {
+ const source = this.findActive(sources);
+ await this.resume(source);
+ },
+
+ async cancelFetching(...sources: ResourceSource[]) {
+ const source = this.findActive(sources);
+ await this.cancel(source);
+ },
+
+ findActive(sources: ResourceSource[]) {
+ for (const source of sources) {
+ if (this.downloads.has(source)) {
+ return source;
+ }
+ }
+ throw new RnExecutorchError(
+ RnExecutorchErrorCode.ResourceFetcherNotActive,
+ 'None of given sources are currently during downloading process.'
+ );
+ },
+
+ async listDownloadedFiles() {
+ const files = await readDirectoryAsync(RNEDirectory);
+ return files.map((file: string) => `${RNEDirectory}${file}`);
+ },
+
+ async listDownloadedModels() {
+ const files = await this.listDownloadedFiles();
+ return files.filter((file: string) => file.endsWith('.pte'));
+ },
+
+ async deleteResources(...sources: ResourceSource[]) {
+ for (const source of sources) {
+ const filename = ResourceFetcherUtils.getFilenameFromUri(
+ source as string
+ );
+ const fileUri = `${RNEDirectory}${filename}`;
+ if (await ResourceFetcherUtils.checkFileExists(fileUri)) {
+ await deleteAsync(fileUri);
+ }
+ }
+ },
+
+ async getFilesTotalSize(...sources: ResourceSource[]) {
+ return (await ResourceFetcherUtils.getFilesSizes(sources)).totalLength;
+ },
+
+ async handleObject(source: ResourceSource) {
+ if (typeof source !== 'object') {
+ throw new RnExecutorchError(
+ RnExecutorchErrorCode.InvalidModelSource,
+ 'Source is expected to be number'
+ );
+ }
+ const jsonString = JSON.stringify(source);
+ const digest = ResourceFetcherUtils.hashObject(jsonString);
+ const filename = `${digest}.json`;
+ const path = `${RNEDirectory}${filename}`;
+
+ if (await ResourceFetcherUtils.checkFileExists(path)) {
+ return ResourceFetcherUtils.removeFilePrefix(path);
+ }
+
+ await ResourceFetcherUtils.createDirectoryIfNoExists();
+ await writeAsStringAsync(path, jsonString, {
+ encoding: EncodingType.UTF8,
+ });
+
+ return ResourceFetcherUtils.removeFilePrefix(path);
+ },
+
+ handleLocalFile(source: ResourceSource) {
+ if (typeof source !== 'string') {
+ throw new RnExecutorchError(
+ RnExecutorchErrorCode.InvalidModelSource,
+ 'Source is expected to be string'
+ );
+ }
+ return ResourceFetcherUtils.removeFilePrefix(source);
+ },
+
+ async handleReleaseModeFile(sourceExtended: ResourceSourceExtended) {
+ const source = sourceExtended.source;
+ if (typeof source !== 'number') {
+ throw new RnExecutorchError(
+ RnExecutorchErrorCode.InvalidModelSource,
+ 'Source is expected to be number'
+ );
+ }
+ const asset = Asset.fromModule(source);
+ const uri = asset.uri;
+ const filename = ResourceFetcherUtils.getFilenameFromUri(uri);
+ const fileUri = `${RNEDirectory}${filename}`;
+ // On Android, file uri does not contain file extension, so we add it manually
+ const fileUriWithType =
+ Platform.OS === 'android' ? `${fileUri}.${asset.type}` : fileUri;
+ if (await ResourceFetcherUtils.checkFileExists(fileUri)) {
+ return ResourceFetcherUtils.removeFilePrefix(fileUri);
+ }
+ await ResourceFetcherUtils.createDirectoryIfNoExists();
+ await copyAsync({
+ from: asset.uri,
+ to: fileUriWithType,
+ });
+ return ResourceFetcherUtils.removeFilePrefix(fileUriWithType);
+ },
+
+ async handleDevModeFile(sourceExtended: ResourceSourceExtended) {
+ const source = sourceExtended.source;
+ if (typeof source !== 'number') {
+ throw new RnExecutorchError(
+ RnExecutorchErrorCode.InvalidModelSource,
+ 'Source is expected to be a number'
+ );
+ }
+ sourceExtended.uri = Asset.fromModule(source).uri;
+ return await this.handleRemoteFile(sourceExtended);
+ },
+
+ async handleRemoteFile(sourceExtended: ResourceSourceExtended) {
+ const source = sourceExtended.source;
+ if (typeof source === 'object') {
+ throw new RnExecutorchError(
+ RnExecutorchErrorCode.InvalidModelSource,
+ 'Source is expected to be a string or a number'
+ );
+ }
+ if (this.downloads.has(source)) {
+ const resource = this.downloads.get(source)!;
+ if (resource.status === DownloadStatus.PAUSED) {
+ // if the download is paused, `fetch` is treated like `resume`
+ this.resume(source);
+ }
+ // if the download is ongoing, throw error.
+ throw new RnExecutorchError(
+ RnExecutorchErrorCode.ResourceFetcherDownloadInProgress,
+ 'Already downloading this file'
+ );
+ }
+ if (typeof source === 'number' && !sourceExtended.uri) {
+ throw new RnExecutorchError(
+ RnExecutorchErrorCode.ResourceFetcherMissingUri,
+ 'Source Uri is expected to be available here'
+ );
+ }
+ if (typeof source === 'string') {
+ sourceExtended.uri = source;
+ }
+ const uri = sourceExtended.uri!;
+ const filename = ResourceFetcherUtils.getFilenameFromUri(uri);
+ sourceExtended.fileUri = `${RNEDirectory}${filename}`;
+ sourceExtended.cacheFileUri = `${cacheDirectory}${filename}`;
+
+ if (await ResourceFetcherUtils.checkFileExists(sourceExtended.fileUri)) {
+ return ResourceFetcherUtils.removeFilePrefix(sourceExtended.fileUri);
+ }
+ await ResourceFetcherUtils.createDirectoryIfNoExists();
+
+ const downloadResumable = createDownloadResumable(
+ uri,
+ sourceExtended.cacheFileUri,
+ { sessionType: FileSystemSessionType.BACKGROUND },
+ ({
+ totalBytesWritten,
+ totalBytesExpectedToWrite,
+ }: {
+ totalBytesWritten: number;
+ totalBytesExpectedToWrite: number;
+ }) => {
+ if (totalBytesExpectedToWrite === -1) {
+ // If totalBytesExpectedToWrite is -1, it means the server does not provide content length.
+ sourceExtended.callback!(0);
+ return;
+ }
+ sourceExtended.callback!(totalBytesWritten / totalBytesExpectedToWrite);
+ }
+ );
+ //create value for the this.download Map
+ const downloadResource: DownloadResource = {
+ downloadResumable: downloadResumable,
+ status: DownloadStatus.ONGOING,
+ extendedInfo: sourceExtended,
+ };
+ //add key-value pair to map
+ this.downloads.set(source, downloadResource);
+ const result = await downloadResumable.downloadAsync();
+ if (
+ !this.downloads.has(source) ||
+ this.downloads.get(source)!.status === DownloadStatus.PAUSED
+ ) {
+ // if canceled or paused during the download
+ return null;
+ }
+ if (!result || result.status !== HTTP_CODE.OK) {
+ throw new RnExecutorchError(
+ RnExecutorchErrorCode.ResourceFetcherDownloadFailed,
+ `Failed to fetch resource from '${source}, context: ${result}'`
+ );
+ }
+ await moveAsync({
+ from: sourceExtended.cacheFileUri,
+ to: sourceExtended.fileUri,
+ });
+ this.downloads.delete(source);
+ ResourceFetcherUtils.triggerHuggingFaceDownloadCounter(uri);
+ return ResourceFetcherUtils.removeFilePrefix(sourceExtended.fileUri);
+ },
+
+ async readAsString(path: string) {
+ // Expo needs URI
+ const uri = path.startsWith('file://') ? path : `file://${path}`;
+ return await readAsStringAsync(uri);
+ },
+};
diff --git a/packages/expo-resource-fetcher/src/ResourceFetcherUtils.ts b/packages/expo-resource-fetcher/src/ResourceFetcherUtils.ts
new file mode 100644
index 000000000..1ff7c0e1b
--- /dev/null
+++ b/packages/expo-resource-fetcher/src/ResourceFetcherUtils.ts
@@ -0,0 +1,116 @@
+import { RNEDirectory } from './constants/directories';
+import {
+ ResourceSource,
+ Logger,
+ ResourceFetcherUtils as CoreUtils,
+ HTTP_CODE,
+ DownloadStatus,
+ SourceType,
+ ResourceSourceExtended,
+ RnExecutorchError,
+ RnExecutorchErrorCode,
+} from 'react-native-executorch';
+import { Asset } from 'expo-asset';
+
+/**
+ * @internal
+ */
+import {
+ getInfoAsync,
+ makeDirectoryAsync,
+ type DownloadResumable,
+} from 'expo-file-system/legacy';
+
+export { HTTP_CODE, DownloadStatus, SourceType, ResourceSourceExtended };
+
+export interface DownloadResource {
+ downloadResumable: DownloadResumable;
+ status: DownloadStatus;
+ extendedInfo: ResourceSourceExtended;
+}
+
+export namespace ResourceFetcherUtils {
+ export const removeFilePrefix = CoreUtils.removeFilePrefix;
+ export const hashObject = CoreUtils.hashObject;
+ export const calculateDownloadProgress = CoreUtils.calculateDownloadProgress;
+ export const triggerHuggingFaceDownloadCounter =
+ CoreUtils.triggerHuggingFaceDownloadCounter;
+ export const getFilenameFromUri = CoreUtils.getFilenameFromUri;
+
+ export function getType(source: ResourceSource): SourceType {
+ if (typeof source === 'object') {
+ return SourceType.OBJECT;
+ } else if (typeof source === 'number') {
+ const uri = Asset.fromModule(source).uri;
+ if (uri.startsWith('http')) {
+ return SourceType.DEV_MODE_FILE;
+ }
+ return SourceType.RELEASE_MODE_FILE;
+ }
+ // typeof source == 'string'
+ if (source.startsWith('file://')) {
+ return SourceType.LOCAL_FILE;
+ }
+ return SourceType.REMOTE_FILE;
+ }
+
+ export async function getFilesSizes(sources: ResourceSource[]) {
+ const results: Array<{
+ source: ResourceSource;
+ type: SourceType;
+ length: number;
+ previousFilesTotalLength: number;
+ }> = [];
+ let totalLength = 0;
+ let previousFilesTotalLength = 0;
+ for (const source of sources) {
+ const type = ResourceFetcherUtils.getType(source);
+ let length = 0;
+ try {
+ if (type === SourceType.REMOTE_FILE && typeof source === 'string') {
+ const response = await fetch(source, { method: 'HEAD' });
+ if (!response.ok) {
+ Logger.warn(
+ `Failed to fetch HEAD for ${source}: ${response.status}`
+ );
+ continue;
+ }
+
+ const contentLength = response.headers.get('content-length');
+ if (!contentLength) {
+ Logger.warn(`No content-length header for ${source}`);
+ }
+
+ length = contentLength ? parseInt(contentLength, 10) : 0;
+ previousFilesTotalLength = totalLength;
+ totalLength += length;
+ }
+ } catch (error) {
+ Logger.warn(`Error fetching HEAD for ${source}:`, error);
+ continue;
+ } finally {
+ results.push({ source, type, length, previousFilesTotalLength });
+ }
+ }
+ return { results, totalLength };
+ }
+
+ export async function createDirectoryIfNoExists() {
+ if (!(await checkFileExists(RNEDirectory))) {
+ try {
+ await makeDirectoryAsync(RNEDirectory, { intermediates: true });
+ } catch (error) {
+ throw new RnExecutorchError(
+ RnExecutorchErrorCode.AccessFailed,
+ `Failed to create directory at ${RNEDirectory}`,
+ error
+ );
+ }
+ }
+ }
+
+ export async function checkFileExists(fileUri: string) {
+ const fileInfo = await getInfoAsync(fileUri);
+ return fileInfo.exists;
+ }
+}
diff --git a/packages/react-native-executorch/src/constants/directories.ts b/packages/expo-resource-fetcher/src/constants/directories.ts
similarity index 100%
rename from packages/react-native-executorch/src/constants/directories.ts
rename to packages/expo-resource-fetcher/src/constants/directories.ts
diff --git a/packages/expo-resource-fetcher/src/index.ts b/packages/expo-resource-fetcher/src/index.ts
new file mode 100644
index 000000000..c0ec4024f
--- /dev/null
+++ b/packages/expo-resource-fetcher/src/index.ts
@@ -0,0 +1 @@
+export * from './ResourceFetcher';
diff --git a/packages/expo-resource-fetcher/tsconfig.json b/packages/expo-resource-fetcher/tsconfig.json
new file mode 100644
index 000000000..cadd2509a
--- /dev/null
+++ b/packages/expo-resource-fetcher/tsconfig.json
@@ -0,0 +1,28 @@
+{
+ "extends": "../../tsconfig.json",
+ "compilerOptions": {
+ "rootDir": "src",
+ "outDir": "lib",
+ "declaration": true,
+ "declarationMap": true,
+ "tsBuildInfoFile": "./lib/typescript/tsconfig.tsbuildinfo",
+ "composite": true,
+ "allowJs": false,
+ "allowUnreachableCode": false,
+ "allowUnusedLabels": false,
+ "esModuleInterop": true,
+ "forceConsistentCasingInFileNames": true,
+ "module": "esnext",
+ "moduleResolution": "node",
+ "noFallthroughCasesInSwitch": true,
+ "noImplicitReturns": true,
+ "noStrictGenericChecks": false,
+ "noUnusedLocals": true,
+ "noUnusedParameters": true,
+ "noUncheckedIndexedAccess": true,
+ "strict": true,
+ "types": ["react", "node"]
+ },
+ "include": ["src"],
+ "exclude": ["node_modules", "lib"]
+}
diff --git a/packages/react-native-executorch/android/gradle.properties b/packages/react-native-executorch/android/gradle.properties
index b30a8b11d..97cdd1854 100644
--- a/packages/react-native-executorch/android/gradle.properties
+++ b/packages/react-native-executorch/android/gradle.properties
@@ -1,5 +1,5 @@
RnExecutorch_kotlinVersion=1.7.0
-RnExecutorch_minSdkVersion=21
+RnExecutorch_minSdkVersion=26
RnExecutorch_targetSdkVersion=31
RnExecutorch_compileSdkVersion=31
-RnExecutorch_ndkversion=21.4.7075529
+RnExecutorch_ndkversion=21.4.7075529
\ No newline at end of file
diff --git a/packages/react-native-executorch/common/rnexecutorch/ErrorCodes.h b/packages/react-native-executorch/common/rnexecutorch/ErrorCodes.h
index 6ca7b8581..67748d716 100644
--- a/packages/react-native-executorch/common/rnexecutorch/ErrorCodes.h
+++ b/packages/react-native-executorch/common/rnexecutorch/ErrorCodes.h
@@ -9,27 +9,33 @@ namespace rnexecutorch {
enum class RnExecutorchErrorCode : int32_t {
/**
- * An umbrella-error that is thrown usually when something unexpected happens, for example a 3rd-party library error.
+ * An umbrella-error that is thrown usually when something unexpected happens,
+ * for example a 3rd-party library error.
*/
UnknownError = 101,
/**
- * Thrown when a user tries to run a model that is not yet downloaded or loaded into memory.
+ * Thrown when a user tries to run a model that is not yet downloaded or
+ * loaded into memory.
*/
ModuleNotLoaded = 102,
/**
- * An error ocurred when saving a file. This could be, for instance a result image from an image model.
+ * An error ocurred when saving a file. This could be, for instance a result
+ * image from an image model.
*/
FileWriteFailed = 103,
/**
- * Thrown when a user tries to run a model that is currently processing. It is only allowed to run a single model prediction at a time.
+ * Thrown when a user tries to run a model that is currently processing. It is
+ * only allowed to run a single model prediction at a time.
*/
ModelGenerating = 104,
/**
- * Thrown when a language is passed to a multi-language model that is not supported. For example OCR or Speech To Text.
+ * Thrown when a language is passed to a multi-language model that is not
+ * supported. For example OCR or Speech To Text.
*/
LanguageNotSupported = 105,
/**
- * Thrown when config parameters passed to a model are invalid. For example, when LLM's topp is outside of range [0, 1].
+ * Thrown when config parameters passed to a model are invalid. For example,
+ * when LLM's topp is outside of range [0, 1].
*/
InvalidConfig = 112,
/**
@@ -37,7 +43,8 @@ enum class RnExecutorchErrorCode : int32_t {
*/
InvalidModelSource = 255,
/**
- * Thrown when the number of passed inputs to the model is different than the model metadata specifies.
+ * Thrown when the number of passed inputs to the model is different than the
+ * model metadata specifies.
*/
UnexpectedNumInputs = 97,
/**
@@ -45,7 +52,8 @@ enum class RnExecutorchErrorCode : int32_t {
*/
ThreadPoolError = 113,
/**
- * Thrown when a file read operation failed. This could be invalid image url passed to image models, or unsupported format.
+ * Thrown when a file read operation failed. This could be invalid image url
+ * passed to image models, or unsupported format.
*/
FileReadFailed = 114,
/**
@@ -53,7 +61,8 @@ enum class RnExecutorchErrorCode : int32_t {
*/
InvalidModelOutput = 115,
/**
- * Thrown when the dimensions of input tensors don't match the model's expected dimensions.
+ * Thrown when the dimensions of input tensors don't match the model's
+ * expected dimensions.
*/
WrongDimensions = 116,
/**
@@ -62,7 +71,8 @@ enum class RnExecutorchErrorCode : int32_t {
*/
InvalidUserInput = 117,
/**
- * Thrown when the number of downloaded files is unexpected, due to download interruptions.
+ * Thrown when the number of downloaded files is unexpected, due to download
+ * interruptions.
*/
DownloadInterrupted = 118,
/**
@@ -75,19 +85,23 @@ enum class RnExecutorchErrorCode : int32_t {
*/
MultilingualConfiguration = 160,
/**
- * Thrown when streaming transcription is attempted but audio data chunk is missing.
+ * Thrown when streaming transcription is attempted but audio data chunk is
+ * missing.
*/
MissingDataChunk = 161,
/**
- * Thrown when trying to stop or insert data into a stream that hasn't been started.
+ * Thrown when trying to stop or insert data into a stream that hasn't been
+ * started.
*/
StreamingNotStarted = 162,
/**
- * Thrown when trying to start a new streaming session while another is already in progress.
+ * Thrown when trying to start a new streaming session while another is
+ * already in progress.
*/
StreamingInProgress = 163,
/**
- * Thrown when a resource fails to download. This could be due to invalid URL, or for example a network problem.
+ * Thrown when a resource fails to download. This could be due to invalid URL,
+ * or for example a network problem.
*/
ResourceFetcherDownloadFailed = 180,
/**
@@ -103,7 +117,8 @@ enum class RnExecutorchErrorCode : int32_t {
*/
ResourceFetcherAlreadyOngoing = 183,
/**
- * Thrown when trying to pause, resume, or cancel a download that is not active.
+ * Thrown when trying to pause, resume, or cancel a download that is not
+ * active.
*/
ResourceFetcherNotActive = 184,
/**
diff --git a/packages/react-native-executorch/common/rnexecutorch/RnExecutorchInstaller.cpp b/packages/react-native-executorch/common/rnexecutorch/RnExecutorchInstaller.cpp
index 7a4426e06..8f9fbb041 100644
--- a/packages/react-native-executorch/common/rnexecutorch/RnExecutorchInstaller.cpp
+++ b/packages/react-native-executorch/common/rnexecutorch/RnExecutorchInstaller.cpp
@@ -2,6 +2,7 @@
#include
#include
+#include
#include
#include
#include
@@ -10,9 +11,9 @@
#include
#include
#include
-#include
#include
#include
+#include
#include
#include
#include
diff --git a/packages/react-native-executorch/common/rnexecutorch/RnExecutorchInstaller.h b/packages/react-native-executorch/common/rnexecutorch/RnExecutorchInstaller.h
index d5c98763d..d299e34d1 100644
--- a/packages/react-native-executorch/common/rnexecutorch/RnExecutorchInstaller.h
+++ b/packages/react-native-executorch/common/rnexecutorch/RnExecutorchInstaller.h
@@ -40,7 +40,7 @@ class RnExecutorchInstaller {
const jsi::Value *args, size_t count) -> jsi::Value {
constexpr std::size_t expectedCount = std::tuple_size_v<
typename meta::ConstructorTraits::arg_types>;
- // count doesn't account for the JSCallInvoker
+
if (count != expectedCount - 1) {
char errorMessage[100];
std::snprintf(
@@ -54,8 +54,16 @@ class RnExecutorchInstaller {
meta::createConstructorArgsWithCallInvoker(
args, runtime, jsCallInvoker);
- auto modelImplementationPtr = std::make_shared(
- std::make_from_tuple(constructorArgs));
+ // This unpacks the tuple and calls the constructor directly inside
+ // make_shared. It avoids creating a temporary object, so no
+ // move/copy is required.
+ auto modelImplementationPtr = std::apply(
+ [](auto &&...unpackedArgs) {
+ return std::make_shared(
+ std::forward(unpackedArgs)...);
+ },
+ std::move(constructorArgs));
+
auto modelHostObject = std::make_shared>(
modelImplementationPtr, jsCallInvoker);
diff --git a/packages/react-native-executorch/common/rnexecutorch/host_objects/JsiConversions.h b/packages/react-native-executorch/common/rnexecutorch/host_objects/JsiConversions.h
index 2baf922db..59e598d11 100644
--- a/packages/react-native-executorch/common/rnexecutorch/host_objects/JsiConversions.h
+++ b/packages/react-native-executorch/common/rnexecutorch/host_objects/JsiConversions.h
@@ -15,6 +15,7 @@
#include
#include
+#include
#include
#include
#include
@@ -24,7 +25,9 @@ namespace rnexecutorch::jsi_conversion {
using namespace facebook;
-// Conversion from jsi to C++ types --------------------------------------------
+// =========================================================================
+// Conversion from jsi to C++ types (Input)
+// =========================================================================
template T getValue(const jsi::Value &val, jsi::Runtime &runtime);
@@ -50,7 +53,6 @@ inline std::u32string getValue(const jsi::Value &val,
jsi::Runtime &runtime) {
std::string utf8 = getValue(val, runtime);
std::wstring_convert, char32_t> conv;
-
return conv.from_bytes(utf8);
}
@@ -118,8 +120,7 @@ inline JSTensorViewIn getValue(const jsi::Value &val,
return tensorView;
}
-// C++ set from JS array. Set with heterogenerous look-up (adding std::less<>
-// enables querying with std::string_view).
+// C++ set from JS array
template <>
inline std::set>
getValue>>(const jsi::Value &val,
@@ -218,6 +219,7 @@ getValue>(const jsi::Value &val, jsi::Runtime &runtime) {
return getArrayAsVector(val, runtime);
}
+// ✅ Fix: Add support for uint64_t vectors (fixes Undefined Symbol error)
template <>
inline std::vector
getValue>(const jsi::Value &val, jsi::Runtime &runtime) {
@@ -279,17 +281,16 @@ inline std::span getValue>(const jsi::Value &val,
return getTypedArrayAsSpan(val, runtime);
}
+// ✅ Fix: Add support for uint64_t spans (fixes Undefined Symbol error)
template <>
inline std::span
getValue>(const jsi::Value &val, jsi::Runtime &runtime) {
return getTypedArrayAsSpan(val, runtime);
}
-// Conversion from C++ types to jsi --------------------------------------------
-
-// Implementation functions might return any type, but in a promise we can only
-// return jsi::Value or jsi::Object. For each type being returned
-// we add a function here.
+// =========================================================================
+// Conversion from C++ types to jsi (Output)
+// =========================================================================
inline jsi::Value getJsiValue(std::shared_ptr valuePtr,
jsi::Runtime &runtime) {
@@ -305,16 +306,16 @@ inline jsi::Value getJsiValue(const std::vector &vec,
return {runtime, array};
}
-inline jsi::Value getJsiValue(const std::vector &vec,
+inline jsi::Value getJsiValue(const std::vector &vec,
jsi::Runtime &runtime) {
jsi::Array array(runtime, vec.size());
for (size_t i = 0; i < vec.size(); i++) {
- array.setValueAtIndex(runtime, i, jsi::Value(static_cast(vec[i])));
+ array.setValueAtIndex(runtime, i, jsi::Value(vec[i]));
}
return {runtime, array};
}
-inline jsi::Value getJsiValue(const std::vector &vec,
+inline jsi::Value getJsiValue(const std::vector &vec,
jsi::Runtime &runtime) {
jsi::Array array(runtime, vec.size());
for (size_t i = 0; i < vec.size(); i++) {
@@ -323,47 +324,32 @@ inline jsi::Value getJsiValue(const std::vector &vec,
return {runtime, array};
}
-inline jsi::Value getJsiValue(const std::vector &vec,
+// ✅ Fix: Add support for uint64_t (unsigned long long) vectors
+// This fixes the error in TokenizerModule::encode/decode
+inline jsi::Value getJsiValue(const std::vector &vec,
jsi::Runtime &runtime) {
jsi::Array array(runtime, vec.size());
for (size_t i = 0; i < vec.size(); i++) {
- array.setValueAtIndex(runtime, i,
- jsi::String::createFromUtf8(runtime, vec[i]));
+ // JS numbers are doubles. Large uint64s > 2^53 will lose precision.
+ array.setValueAtIndex(runtime, i, jsi::Value(static_cast(vec[i])));
}
return {runtime, array};
}
-inline jsi::Value getJsiValue(const std::vector &vec,
+// ✅ Fix: Add support for int64_t vectors
+inline jsi::Value getJsiValue(const std::vector &vec,
jsi::Runtime &runtime) {
jsi::Array array(runtime, vec.size());
for (size_t i = 0; i < vec.size(); i++) {
- array.setValueAtIndex(runtime, i, jsi::Value(vec[i]));
+ array.setValueAtIndex(runtime, i, jsi::Value(static_cast(vec[i])));
}
return {runtime, array};
}
-// Conditional as on android, size_t and uint64_t reduce to the same type,
-// introducing ambiguity
-template &&
- !std::is_same_v>>
-inline jsi::Value getJsiValue(T val, jsi::Runtime &runtime) {
- return jsi::Value(static_cast(val));
-}
-
-inline jsi::Value getJsiValue(uint64_t val, jsi::Runtime &runtime) {
- jsi::BigInt bigInt = jsi::BigInt::fromUint64(runtime, val);
- return {runtime, bigInt};
-}
-
inline jsi::Value getJsiValue(int val, jsi::Runtime &runtime) {
return {runtime, val};
}
-inline jsi::Value getJsiValue(bool val, jsi::Runtime &runtime) {
- return jsi::Value(val);
-}
-
inline jsi::Value getJsiValue(const std::shared_ptr &buf,
jsi::Runtime &runtime) {
jsi::ArrayBuffer arrayBuffer(runtime, buf);
@@ -401,7 +387,7 @@ inline jsi::Value getJsiValue(const std::vector &vec,
}
inline jsi::Value getJsiValue(const std::string &str, jsi::Runtime &runtime) {
- return jsi::String::createFromUtf8(runtime, str);
+ return jsi::String::createFromAscii(runtime, str);
}
inline jsi::Value
@@ -483,4 +469,39 @@ getJsiValue(const std::vector
return jsiSegments;
}
+// ✅ Fix: Use 'const &' to match template expansion rules
+inline jsi::Value
+getJsiValue(const models::image_segmentation::types::SegmentationResult &result,
+ jsi::Runtime &runtime) {
+
+ // Handle empty result (e.g. dropped frame)
+ if (!result.argmax || !result.buffers) {
+ return jsi::Value::undefined();
+ }
+
+ jsi::Object dict(runtime);
+
+ // 1. Create Argmax
+ auto argmaxBuf = jsi::ArrayBuffer(runtime, result.argmax);
+ auto int32Ctor =
+ runtime.global().getPropertyAsFunction(runtime, "Int32Array");
+ auto int32Arr =
+ int32Ctor.callAsConstructor(runtime, argmaxBuf).getObject(runtime);
+ dict.setProperty(runtime, "ARGMAX", int32Arr);
+
+ // 2. Create Class Arrays
+ for (auto &[label, buffer] : *result.buffers) {
+ auto floatBuf = jsi::ArrayBuffer(runtime, buffer);
+ auto floatCtor =
+ runtime.global().getPropertyAsFunction(runtime, "Float32Array");
+ auto floatArr =
+ floatCtor.callAsConstructor(runtime, floatBuf).getObject(runtime);
+
+ dict.setProperty(
+ runtime, jsi::String::createFromAscii(runtime, label.data()), floatArr);
+ }
+
+ return jsi::Value(std::move(dict));
+}
+
} // namespace rnexecutorch::jsi_conversion
diff --git a/packages/react-native-executorch/common/rnexecutorch/host_objects/ModelHostObject.h b/packages/react-native-executorch/common/rnexecutorch/host_objects/ModelHostObject.h
index a1ce8e8e8..e2f3e50a5 100644
--- a/packages/react-native-executorch/common/rnexecutorch/host_objects/ModelHostObject.h
+++ b/packages/react-native-executorch/common/rnexecutorch/host_objects/ModelHostObject.h
@@ -9,6 +9,7 @@
#include
#include
+#include
#include
#include
#include
@@ -17,6 +18,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -45,10 +47,11 @@ template class ModelHostObject : public JsiHostObject {
"getInputShape"));
}
- if constexpr (meta::HasGenerate) {
- addFunctions(JSI_EXPORT_FUNCTION(ModelHostObject,
- promiseHostFunction<&Model::generate>,
- "generate"));
+ if constexpr (meta::HasGenerateFromString) {
+ addFunctions(
+ JSI_EXPORT_FUNCTION(ModelHostObject,
+ promiseHostFunction<&Model::generateFromString>,
+ "generateFromString"));
}
if constexpr (meta::HasEncode) {
@@ -107,11 +110,6 @@ template class ModelHostObject : public JsiHostObject {
synchronousHostFunction<&Model::getGeneratedTokenCount>,
"getGeneratedTokenCount"));
- addFunctions(JSI_EXPORT_FUNCTION(
- ModelHostObject,
- synchronousHostFunction<&Model::getPromptTokenCount>,
- "getPromptTokenCount"));
-
addFunctions(
JSI_EXPORT_FUNCTION(ModelHostObject,
synchronousHostFunction<&Model::setCountInterval>,
@@ -155,14 +153,29 @@ template class ModelHostObject : public JsiHostObject {
addFunctions(JSI_EXPORT_FUNCTION(ModelHostObject,
promiseHostFunction<&Model::stream>,
"stream"));
+ // addFunctions(JSI_EXPORT_FUNCTION(
+ // ModelHostObject, promiseHostFunction<&Model::setFixedModel>,
+ // "setFixedModel"));
+ }
+
+ // Register generateFromFrame for all VisionModel subclasses
+ if constexpr (meta::DerivedFromOrSameAs) {
addFunctions(JSI_EXPORT_FUNCTION(
- ModelHostObject, synchronousHostFunction<&Model::streamStop>,
- "streamStop"));
+ ModelHostObject, visionHostFunction<&Model::generateFromFrame>,
+ "generateFromFrame"));
+ }
+
+ // Register generateFromPixels for models that support it
+ if constexpr (meta::HasGenerateFromPixels) {
+ addFunctions(
+ JSI_EXPORT_FUNCTION(ModelHostObject,
+ visionHostFunction<&Model::generateFromPixels>,
+ "generateFromPixels"));
}
}
- // A generic host function that runs synchronously, works analogously to the
- // generic promise host function.
+ // A generic host function that runs synchronously, works analogously to
+ // the generic promise host function.
template JSI_HOST_FUNCTION(synchronousHostFunction) {
constexpr std::size_t functionArgCount = meta::getArgumentCount(FnPtr);
if (functionArgCount != count) {
@@ -208,9 +221,70 @@ template class ModelHostObject : public JsiHostObject {
}
}
+ template JSI_HOST_FUNCTION(visionHostFunction) {
+ // 1. Check Argument Count
+ // (We rely on our new FunctionTraits)
+ constexpr std::size_t cppArgCount =
+ meta::FunctionTraits::arity;
+
+ // We expect JS args = (Total C++ Args) - (2 injected args: Runtime + Value)
+ constexpr std::size_t expectedJsArgs = cppArgCount - 1;
+ log(LOG_LEVEL::Debug, cppArgCount, count);
+ if (count != expectedJsArgs) {
+ throw jsi::JSError(runtime, "Argument count mismatch in vision function");
+ }
+
+ try {
+ // 2. The Magic Trick 🪄
+ // We get a pointer to a dummy function: void dummy(Rest...) {}
+ // This function has exactly the signature of the arguments we want to
+ // parse.
+ auto dummyFuncPtr = &meta::TailSignature::dummy;
+
+ // 3. Let existing helpers do the work
+ // We pass the dummy pointer. The helper inspects its arguments (Rest...)
+ // and converts args[0]...args[N] accordingly.
+ // Note: We pass (args + 1) because JS args[0] is the PixelData, which we
+ // handle manually. Note: We use expectedJsArgs - 1 because we skipped one
+ // JS arg.
+ auto tailArgsTuple =
+ meta::createArgsTupleFromJsi(dummyFuncPtr, args + 1, runtime);
+
+ // 4. Invoke
+ using ReturnType =
+ typename meta::FunctionTraits::return_type;
+
+ if constexpr (std::is_void_v) {
+ std::apply(
+ [&](auto &&...tailArgs) {
+ (model.get()->*FnPtr)(
+ runtime,
+ args[0], // 1. PixelData (Manually passed)
+ std::forward(
+ tailArgs)...); // 2. The rest (Auto parsed)
+ },
+ std::move(tailArgsTuple));
+ return jsi::Value::undefined();
+ } else {
+ auto result = std::apply(
+ [&](auto &&...tailArgs) {
+ return (model.get()->*FnPtr)(
+ runtime, args[0],
+ std::forward(tailArgs)...);
+ },
+ std::move(tailArgsTuple));
+
+ return jsi_conversion::getJsiValue(std::move(result), runtime);
+ }
+ } catch (const std::exception &e) {
+ throw jsi::JSError(runtime, e.what());
+ }
+ }
+
// A generic host function that resolves a promise with a result of a
- // function. JSI arguments are converted to the types provided in the function
- // signature, and the return value is converted back to JSI before resolving.
+ // function. JSI arguments are converted to the types provided in the
+ // function signature, and the return value is converted back to JSI
+ // before resolving.
template JSI_HOST_FUNCTION(promiseHostFunction) {
auto promise = Promise::createPromise(
runtime, callInvoker,
@@ -231,8 +305,8 @@ template class ModelHostObject : public JsiHostObject {
meta::createArgsTupleFromJsi(FnPtr, args, runtime);
// We need to dispatch a thread if we want the function to be
- // asynchronous. In this thread all accesses to jsi::Runtime need to
- // be done via the callInvoker.
+ // asynchronous. In this thread all accesses to jsi::Runtime
+ // need to be done via the callInvoker.
threads::GlobalThreadPool::detach([this, promise,
argsConverted =
std::move(argsConverted)]() {
@@ -240,16 +314,16 @@ template class ModelHostObject : public JsiHostObject {
if constexpr (std::is_void_v) {
- // For void functions, just call the function and resolve
- // with undefined
+ // For void functions, just call the function and
+ // resolve with undefined
std::apply(std::bind_front(FnPtr, model),
std::move(argsConverted));
callInvoker->invokeAsync([promise](jsi::Runtime &runtime) {
promise->resolve(jsi::Value::undefined());
});
} else {
- // For non-void functions, capture the result and convert
- // it
+ // For non-void functions, capture the result and
+ // convert it
auto result = std::apply(std::bind_front(FnPtr, model),
std::move(argsConverted));
// The result is copied. It should either be quickly
@@ -277,8 +351,8 @@ template class ModelHostObject : public JsiHostObject {
// This catch should be merged with the next two
// (std::runtime_error and jsi::JSError inherits from
// std::exception) HOWEVER react native has broken RTTI
- // which breaks proper exception type checking. Remove when
- // the following change is present in our version:
+ // which breaks proper exception type checking. Remove
+ // when the following change is present in our version:
// https://github.com/facebook/react-native/commit/3132cc88dd46f95898a756456bebeeb6c248f20e
callInvoker->invokeAsync([e = std::move(e), promise]() {
promise->reject(std::string(e.what()));
@@ -339,5 +413,4 @@ template class ModelHostObject : public JsiHostObject {
std::shared_ptr model;
std::shared_ptr callInvoker;
};
-
} // namespace rnexecutorch
diff --git a/packages/react-native-executorch/common/rnexecutorch/metaprogramming/FunctionHelpers.h b/packages/react-native-executorch/common/rnexecutorch/metaprogramming/FunctionHelpers.h
index 8290a810b..29ed41d9d 100644
--- a/packages/react-native-executorch/common/rnexecutorch/metaprogramming/FunctionHelpers.h
+++ b/packages/react-native-executorch/common/rnexecutorch/metaprogramming/FunctionHelpers.h
@@ -3,12 +3,39 @@
#include
#include
#include
+#include
#include
namespace rnexecutorch::meta {
using namespace facebook;
+// =========================================================================
+// 1. Function Traits (Extracts Arity, Return Type, Args)
+// =========================================================================
+
+template struct FunctionTraits;
+
+// Specialization for Member Functions
+template
+struct FunctionTraits {
+ static constexpr std::size_t arity = sizeof...(Args);
+ using return_type = R;
+ using args_tuple = std::tuple;
+};
+
+// Specialization for const Member Functions
+template
+struct FunctionTraits {
+ static constexpr std::size_t arity = sizeof...(Args);
+ using return_type = R;
+ using args_tuple = std::tuple;
+};
+
+// =========================================================================
+// 2. Argument Counting Helpers
+// =========================================================================
+
template
constexpr std::size_t getArgumentCount(R (Model::*f)(Types...)) {
return sizeof...(Types);
@@ -19,6 +46,10 @@ constexpr std::size_t getArgumentCount(R (Model::*f)(Types...) const) {
return sizeof...(Types);
}
+// =========================================================================
+// 3. JSI -> Tuple Conversion Logic
+// =========================================================================
+
template
std::tuple fillTupleFromArgs(std::index_sequence,
const jsi::Value *args,
@@ -31,7 +62,6 @@ std::tuple fillTupleFromArgs(std::index_sequence,
* arguments for method supplied with a pointer. The types in the tuple are
* inferred from the method pointer.
*/
-
template
std::tuple createArgsTupleFromJsi(R (Model::*f)(Types...),
const jsi::Value *args,
@@ -47,4 +77,37 @@ std::tuple createArgsTupleFromJsi(R (Model::*f)(Types...) const,
return fillTupleFromArgs(std::index_sequence_for{}, args,
runtime);
}
+
+// Overload for free functions (used by TailSignature dummy)
+template
+std::tuple createArgsTupleFromJsi(void (*f)(Types...),
+ const jsi::Value *args,
+ jsi::Runtime &runtime) {
+ return fillTupleFromArgs(std::index_sequence_for{}, args,
+ runtime);
+}
+
+// =========================================================================
+// 4. Tail Signature Helper (Crucial for Vision Functions)
+// =========================================================================
+
+// Extracts the "Tail" arguments of a function signature, skipping the first
+// two arguments (Runtime and FrameValue).
+template struct TailSignature;
+
+// Non-const member function specialization
+template
+struct TailSignature {
+ // A dummy function that has the signature of just the "Rest" arguments.
+ static void dummy(Rest...) {}
+};
+
+// Const member function specialization
+template
+struct TailSignature {
+ static void dummy(Rest...) {}
+};
+
} // namespace rnexecutorch::meta
\ No newline at end of file
diff --git a/packages/react-native-executorch/common/rnexecutorch/metaprogramming/TypeConcepts.h b/packages/react-native-executorch/common/rnexecutorch/metaprogramming/TypeConcepts.h
index 85a3db449..8100a471b 100644
--- a/packages/react-native-executorch/common/rnexecutorch/metaprogramming/TypeConcepts.h
+++ b/packages/react-native-executorch/common/rnexecutorch/metaprogramming/TypeConcepts.h
@@ -12,8 +12,13 @@ template
concept SameAs = std::is_same_v;
template
-concept HasGenerate = requires(T t) {
- { &T::generate };
+concept HasGenerateFromString = requires(T t) {
+ { &T::generateFromString };
+};
+
+template
+concept HasGenerateFromPixels = requires(T t) {
+ { &T::generateFromPixels };
};
template
diff --git a/packages/react-native-executorch/common/rnexecutorch/models/VisionModel.cpp b/packages/react-native-executorch/common/rnexecutorch/models/VisionModel.cpp
new file mode 100644
index 000000000..54c0adfd2
--- /dev/null
+++ b/packages/react-native-executorch/common/rnexecutorch/models/VisionModel.cpp
@@ -0,0 +1,63 @@
+#include "VisionModel.h"
+#include
+
+namespace rnexecutorch {
+namespace models {
+
+using namespace facebook;
+
+cv::Mat VisionModel::extractFromFrame(jsi::Runtime &runtime,
+ const jsi::Value &frameData) const {
+ // Extract frame using FrameProcessor utility
+ auto frameObj = frameData.asObject(runtime);
+ cv::Mat frame = utils::FrameProcessor::extractFrame(runtime, frameObj);
+
+ // Apply model-specific preprocessing
+ return preprocessFrame(frame);
+}
+
+cv::Mat VisionModel::extractFromPixels(jsi::Runtime &runtime,
+ const jsi::Object &pixelData) const {
+ // Extract width, height, and channels
+ if (!pixelData.hasProperty(runtime, "width") ||
+ !pixelData.hasProperty(runtime, "height") ||
+ !pixelData.hasProperty(runtime, "channels") ||
+ !pixelData.hasProperty(runtime, "data")) {
+ throw std::runtime_error(
+ "Invalid pixel data: must contain width, height, channels, and data");
+ }
+
+ int width = pixelData.getProperty(runtime, "width").asNumber();
+ int height = pixelData.getProperty(runtime, "height").asNumber();
+ int channels = pixelData.getProperty(runtime, "channels").asNumber();
+
+ // Get the ArrayBuffer
+ auto dataValue = pixelData.getProperty(runtime, "data");
+ if (!dataValue.isObject() ||
+ !dataValue.asObject(runtime).isArrayBuffer(runtime)) {
+ throw std::runtime_error(
+ "pixel data 'data' property must be an ArrayBuffer");
+ }
+
+ auto arrayBuffer = dataValue.asObject(runtime).getArrayBuffer(runtime);
+ size_t expectedSize = width * height * channels;
+
+ if (arrayBuffer.size(runtime) != expectedSize) {
+ throw std::runtime_error(
+ "ArrayBuffer size does not match width * height * channels");
+ }
+
+ // Create cv::Mat and copy the data
+ // OpenCV uses BGR/BGRA format internally, but we'll create as-is and let
+ // preprocessFrame handle conversion
+ int cvType = (channels == 3) ? CV_8UC3 : CV_8UC4;
+ cv::Mat image(height, width, cvType);
+
+ // Copy data from ArrayBuffer to cv::Mat
+ std::memcpy(image.data, arrayBuffer.data(runtime), expectedSize);
+
+ return image;
+}
+
+} // namespace models
+} // namespace rnexecutorch
diff --git a/packages/react-native-executorch/common/rnexecutorch/models/VisionModel.h b/packages/react-native-executorch/common/rnexecutorch/models/VisionModel.h
new file mode 100644
index 000000000..9ba5cf7e4
--- /dev/null
+++ b/packages/react-native-executorch/common/rnexecutorch/models/VisionModel.h
@@ -0,0 +1,175 @@
+#pragma once
+
+#include
+#include
+#include
+#include
+#include
+
+namespace rnexecutorch {
+namespace models {
+
+/**
+ * @brief Base class for computer vision models that support real-time camera
+ * input
+ *
+ * VisionModel extends BaseModel with thread-safe inference and automatic frame
+ * extraction from VisionCamera. This class is designed for models that need to
+ * process camera frames in real-time (e.g., at 30fps).
+ *
+ * Thread Safety:
+ * - All inference operations are protected by a mutex
+ * - generateFromFrame() uses try_lock() to skip frames when the model is busy
+ * - This prevents blocking the camera thread and maintains smooth frame rates
+ *
+ * Usage:
+ * Subclasses should:
+ * 1. Inherit from VisionModel instead of BaseModel
+ * 2. Implement preprocessFrame() with model-specific preprocessing
+ * 3. Use inference_mutex_ when calling forward() in custom generate methods
+ * 4. Use lock_guard for blocking operations (JS API)
+ * 5. Use try_lock() for non-blocking operations (camera API)
+ *
+ * Example:
+ * @code
+ * class Classification : public VisionModel {
+ * public:
+ * std::unordered_map
+ * generateFromFrame(jsi::Runtime& runtime, const jsi::Value& frameValue) {
+ * // try_lock is handled automatically
+ * auto frameObject = frameValue.asObject(runtime);
+ * cv::Mat frame = FrameExtractor::extractFrame(runtime, frameObject);
+ *
+ * // Lock before inference
+ * if (!inference_mutex_.try_lock()) {
+ * return {}; // Skip frame if busy
+ * }
+ * std::lock_guard lock(inference_mutex_, std::adopt_lock);
+ *
+ * auto preprocessed = preprocessFrame(frame);
+ * // ... run inference
+ * }
+ * };
+ * @endcode
+ */
+class VisionModel : public BaseModel {
+public:
+ /**
+ * @brief Construct a VisionModel with the same parameters as BaseModel
+ *
+ * VisionModel uses the same construction pattern as BaseModel, just adding
+ * thread-safety on top.
+ */
+ VisionModel(const std::string &modelSource,
+ std::shared_ptr callInvoker)
+ : BaseModel(modelSource, callInvoker) {}
+
+ /**
+ * @brief Virtual destructor for proper cleanup in derived classes
+ */
+ virtual ~VisionModel() = default;
+
+protected:
+ /**
+ * @brief Mutex to ensure thread-safe inference
+ *
+ * This mutex protects against race conditions when:
+ * - generateFromFrame() is called from VisionCamera worklet thread (30fps)
+ * - generate() is called from JavaScript thread simultaneously
+ *
+ * Usage guidelines:
+ * - Use std::lock_guard for blocking operations (JS API can wait)
+ * - Use try_lock() for non-blocking operations (camera should skip frames)
+ *
+ * @note Marked mutable to allow locking in const methods if needed
+ */
+ mutable std::mutex inference_mutex_;
+
+ /**
+ * @brief Preprocess a camera frame for model input
+ *
+ * This method should implement model-specific preprocessing such as:
+ * - Resizing to the model's expected input size
+ * - Color space conversion (e.g., BGR to RGB)
+ * - Normalization
+ * - Any other model-specific transformations
+ *
+ * @param frame Input frame from camera (already extracted and rotated by
+ * FrameExtractor)
+ * @return Preprocessed cv::Mat ready for tensor conversion
+ *
+ * @note The input frame is already in RGB format and rotated 90° clockwise
+ * @note This method is called under mutex protection in generateFromFrame()
+ */
+ virtual cv::Mat preprocessFrame(const cv::Mat &frame) const = 0;
+
+ /**
+ * @brief Extract and preprocess frame from VisionCamera in one call
+ *
+ * This is a convenience method that combines frame extraction and
+ * preprocessing. It handles both nativeBuffer (zero-copy) and ArrayBuffer
+ * paths automatically.
+ *
+ * @param runtime JSI runtime
+ * @param frameData JSI value containing frame data from VisionCamera
+ *
+ * @return Preprocessed cv::Mat ready for tensor conversion
+ *
+ * @throws std::runtime_error if frame extraction fails
+ *
+ * @note This method does NOT acquire the inference mutex - caller is
+ * responsible
+ * @note Typical usage:
+ * @code
+ * cv::Mat preprocessed = extractFromFrame(runtime, frameData);
+ * auto tensor = image_processing::getTensorFromMatrix(dims, preprocessed);
+ * @endcode
+ */
+ cv::Mat extractFromFrame(jsi::Runtime &runtime,
+ const jsi::Value &frameData) const;
+
+ /**
+ * @brief Extract cv::Mat from raw pixel data (ArrayBuffer) sent from
+ * JavaScript
+ *
+ * This method enables users to run inference on raw pixel data without file
+ * I/O. Useful for processing images already in memory (e.g., from canvas,
+ * image library).
+ *
+ * @param runtime JSI runtime
+ * @param pixelData JSI object containing:
+ * - data: ArrayBuffer with raw pixel values
+ * - width: number - image width
+ * - height: number - image height
+ * - channels: number - number of channels (3 for RGB, 4 for
+ * RGBA)
+ *
+ * @return cv::Mat containing the pixel data
+ *
+ * @throws std::runtime_error if pixelData format is invalid
+ *
+ * @note The returned cv::Mat owns a copy of the data
+ * @note Expected pixel format: RGB or RGBA, row-major order
+ * @note Typical usage from JS:
+ * @code
+ * const pixels = new Uint8Array([...]); // Raw pixel data
+ * const result = model.generateFromPixels({
+ * data: pixels.buffer,
+ * width: 640,
+ * height: 480,
+ * channels: 3
+ * }, 0.5);
+ * @endcode
+ */
+ cv::Mat extractFromPixels(jsi::Runtime &runtime,
+ const jsi::Object &pixelData) const;
+};
+
+} // namespace models
+// Register VisionModel constructor traits
+// Even though VisionModel is abstract, the metaprogramming system needs to know
+// its constructor signature for derived classes
+REGISTER_CONSTRUCTOR(models::VisionModel, std::string,
+ std::shared_ptr);
+
+} // namespace rnexecutorch
diff --git a/packages/react-native-executorch/common/rnexecutorch/models/classification/Classification.cpp b/packages/react-native-executorch/common/rnexecutorch/models/classification/Classification.cpp
index 0fba07108..b9fad1b88 100644
--- a/packages/react-native-executorch/common/rnexecutorch/models/classification/Classification.cpp
+++ b/packages/react-native-executorch/common/rnexecutorch/models/classification/Classification.cpp
@@ -73,4 +73,4 @@ Classification::postprocess(const Tensor &tensor) {
return probs;
}
-} // namespace rnexecutorch::models::classification
+} // namespace rnexecutorch::models::classification
\ No newline at end of file
diff --git a/packages/react-native-executorch/common/rnexecutorch/models/embeddings/image/ImageEmbeddings.cpp b/packages/react-native-executorch/common/rnexecutorch/models/embeddings/image/ImageEmbeddings.cpp
index ec3129e76..bb3b5ffbc 100644
--- a/packages/react-native-executorch/common/rnexecutorch/models/embeddings/image/ImageEmbeddings.cpp
+++ b/packages/react-native-executorch/common/rnexecutorch/models/embeddings/image/ImageEmbeddings.cpp
@@ -48,4 +48,4 @@ ImageEmbeddings::generate(std::string imageSource) {
return BaseEmbeddings::postprocess(forwardResult);
}
-} // namespace rnexecutorch::models::embeddings
+} // namespace rnexecutorch::models::embeddings
\ No newline at end of file
diff --git a/packages/react-native-executorch/common/rnexecutorch/models/embeddings/image/ImageEmbeddings.h b/packages/react-native-executorch/common/rnexecutorch/models/embeddings/image/ImageEmbeddings.h
index 7e114e939..9a1d6429b 100644
--- a/packages/react-native-executorch/common/rnexecutorch/models/embeddings/image/ImageEmbeddings.h
+++ b/packages/react-native-executorch/common/rnexecutorch/models/embeddings/image/ImageEmbeddings.h
@@ -27,4 +27,4 @@ class ImageEmbeddings final : public BaseEmbeddings {
REGISTER_CONSTRUCTOR(models::embeddings::ImageEmbeddings, std::string,
std::shared_ptr);
-} // namespace rnexecutorch
+} // namespace rnexecutorch
\ No newline at end of file
diff --git a/packages/react-native-executorch/common/rnexecutorch/models/image_segmentation/ImageSegmentation.cpp b/packages/react-native-executorch/common/rnexecutorch/models/image_segmentation/ImageSegmentation.cpp
index a2c1ae865..08f2a4683 100644
--- a/packages/react-native-executorch/common/rnexecutorch/models/image_segmentation/ImageSegmentation.cpp
+++ b/packages/react-native-executorch/common/rnexecutorch/models/image_segmentation/ImageSegmentation.cpp
@@ -167,4 +167,4 @@ std::shared_ptr ImageSegmentation::populateDictionary(
return dictPtr;
}
-} // namespace rnexecutorch::models::image_segmentation
+} // namespace rnexecutorch::models::image_segmentation
\ No newline at end of file
diff --git a/packages/react-native-executorch/common/rnexecutorch/models/image_segmentation/ImageSegmentation.h b/packages/react-native-executorch/common/rnexecutorch/models/image_segmentation/ImageSegmentation.h
index 301833ce8..ba154898f 100644
--- a/packages/react-native-executorch/common/rnexecutorch/models/image_segmentation/ImageSegmentation.h
+++ b/packages/react-native-executorch/common/rnexecutorch/models/image_segmentation/ImageSegmentation.h
@@ -45,4 +45,4 @@ class ImageSegmentation : public BaseModel {
REGISTER_CONSTRUCTOR(models::image_segmentation::ImageSegmentation, std::string,
std::shared_ptr);
-} // namespace rnexecutorch
+} // namespace rnexecutorch
\ No newline at end of file
diff --git a/packages/react-native-executorch/common/rnexecutorch/models/image_segmentation/Types.h b/packages/react-native-executorch/common/rnexecutorch/models/image_segmentation/Types.h
new file mode 100644
index 000000000..7606c464b
--- /dev/null
+++ b/packages/react-native-executorch/common/rnexecutorch/models/image_segmentation/Types.h
@@ -0,0 +1,18 @@
+#pragma once
+
+#include
+#include
+#include
+#include
+
+namespace rnexecutorch::models::image_segmentation::types {
+
+// 1. Define the Result Struct (Public)
+struct SegmentationResult {
+ std::shared_ptr argmax;
+ std::shared_ptr<
+ std::unordered_map>>
+ buffers;
+};
+
+} // namespace rnexecutorch::models::image_segmentation::types
\ No newline at end of file
diff --git a/packages/react-native-executorch/common/rnexecutorch/models/object_detection/ObjectDetection.cpp b/packages/react-native-executorch/common/rnexecutorch/models/object_detection/ObjectDetection.cpp
index 8b5bc022f..969f85cc3 100644
--- a/packages/react-native-executorch/common/rnexecutorch/models/object_detection/ObjectDetection.cpp
+++ b/packages/react-native-executorch/common/rnexecutorch/models/object_detection/ObjectDetection.cpp
@@ -2,14 +2,16 @@
#include
#include
+#include
#include
+#include
namespace rnexecutorch::models::object_detection {
ObjectDetection::ObjectDetection(
const std::string &modelSource,
std::shared_ptr callInvoker)
- : BaseModel(modelSource, callInvoker) {
+ : VisionModel(modelSource, callInvoker) {
auto inputTensors = getAllInputShapes();
if (inputTensors.size() == 0) {
throw RnExecutorchError(RnExecutorchErrorCode::UnexpectedNumInputs,
@@ -32,11 +34,6 @@ ObjectDetection::ObjectDetection(
std::vector
ObjectDetection::postprocess(const std::vector &tensors,
cv::Size originalSize, double detectionThreshold) {
- if (detectionThreshold <= 0 || detectionThreshold > 1) {
- throw RnExecutorchError(RnExecutorchErrorCode::InvalidConfig,
- "Detection threshold must be greater than 0 "
- "and less than or equal to 1.");
- }
float widthRatio =
static_cast(originalSize.width) / modelImageSize.width;
float heightRatio =
@@ -75,7 +72,10 @@ ObjectDetection::postprocess(const std::vector &tensors,
}
std::vector
-ObjectDetection::generate(std::string imageSource, double detectionThreshold) {
+ObjectDetection::generateFromString(std::string imageSource,
+ double detectionThreshold) {
+ std::lock_guard lock(inference_mutex_);
+
auto [inputTensor, originalSize] =
image_processing::readImageToTensor(imageSource, getAllInputShapes()[0]);
@@ -88,4 +88,98 @@ ObjectDetection::generate(std::string imageSource, double detectionThreshold) {
return postprocess(forwardResult.get(), originalSize, detectionThreshold);
}
+
+std::vector
+ObjectDetection::runInference(cv::Mat image, double detectionThreshold) {
+ std::lock_guard lock(inference_mutex_);
+
+ // Store original size for postprocessing
+ cv::Size originalSize = image.size();
+
+ // Preprocess the image using model-specific preprocessing
+ cv::Mat preprocessed = preprocessFrame(image);
+
+ // Create tensor and run inference
+ const std::vector tensorDims = getAllInputShapes()[0];
+ auto inputTensor =
+ image_processing::getTensorFromMatrix(tensorDims, preprocessed);
+
+ auto forwardResult = BaseModel::forward(inputTensor);
+ if (!forwardResult.ok()) {
+ throw RnExecutorchError(forwardResult.error(),
+ "The model's forward function did not succeed. "
+ "Ensure the model input is correct.");
+ }
+
+ return postprocess(forwardResult.get(), originalSize, detectionThreshold);
+}
+
+std::vector
+ObjectDetection::generateFromFrame(jsi::Runtime &runtime,
+ const jsi::Value &frameData,
+ double detectionThreshold) {
+ // Try-lock: skip frame if model is busy (non-blocking for camera)
+ if (!inference_mutex_.try_lock()) {
+ return {}; // Return empty vector, don't block camera thread
+ }
+ std::lock_guard lock(inference_mutex_, std::adopt_lock);
+
+ // Extract frame using FrameProcessor utility
+ auto frameObj = frameData.asObject(runtime);
+ cv::Mat frame =
+ rnexecutorch::utils::FrameProcessor::extractFrame(runtime, frameObj);
+
+ // Release the lock before calling runInference (which will re-acquire it)
+ lock.~lock_guard();
+ inference_mutex_.unlock();
+
+ // Use the internal helper - it handles preprocessing and inference
+ return runInference(frame, detectionThreshold);
+}
+
+std::vector
+ObjectDetection::generateFromPixels(jsi::Runtime &runtime,
+ const jsi::Value &pixelData,
+ double detectionThreshold) {
+ // Extract raw pixel data from JavaScript
+ auto pixelObj = pixelData.asObject(runtime);
+ cv::Mat image = extractFromPixels(runtime, pixelObj);
+
+ // Use the internal helper - it handles locking, preprocessing, and inference
+ return runInference(image, detectionThreshold);
+}
+
+cv::Mat ObjectDetection::preprocessFrame(const cv::Mat &frame) const {
+ // Get target size from model input shape
+ const std::vector tensorDims = getAllInputShapes()[0];
+ cv::Size tensorSize = cv::Size(tensorDims[tensorDims.size() - 1],
+ tensorDims[tensorDims.size() - 2]);
+
+ cv::Mat processed;
+
+ // Convert RGBA/BGRA to RGB if needed
+ if (frame.channels() == 4) {
+ cv::Mat rgb;
+
+// Platform-specific color conversion:
+// iOS uses BGRA format, Android uses RGBA format
+#ifdef __APPLE__
+ // iOS: BGRA → RGB
+ cv::cvtColor(frame, rgb, cv::COLOR_BGRA2RGB);
+#else
+ // Android: RGBA → RGB
+ cv::cvtColor(frame, rgb, cv::COLOR_RGBA2RGB);
+#endif
+
+ cv::resize(rgb, processed, tensorSize);
+ } else if (frame.channels() == 3) {
+ // Already RGB, just resize
+ cv::resize(frame, processed, tensorSize);
+ } else {
+ throw std::runtime_error("Unsupported frame format: " +
+ std::to_string(frame.channels()) + " channels");
+ }
+
+ return processed;
+}
} // namespace rnexecutorch::models::object_detection
diff --git a/packages/react-native-executorch/common/rnexecutorch/models/object_detection/ObjectDetection.h b/packages/react-native-executorch/common/rnexecutorch/models/object_detection/ObjectDetection.h
index bba09a6d8..fc554003b 100644
--- a/packages/react-native-executorch/common/rnexecutorch/models/object_detection/ObjectDetection.h
+++ b/packages/react-native-executorch/common/rnexecutorch/models/object_detection/ObjectDetection.h
@@ -8,7 +8,7 @@
#include "Types.h"
#include "rnexecutorch/metaprogramming/ConstructorHelpers.h"
-#include
+#include
#include
namespace rnexecutorch {
@@ -16,12 +16,24 @@ namespace models::object_detection {
using executorch::extension::TensorPtr;
using executorch::runtime::EValue;
-class ObjectDetection : public BaseModel {
+class ObjectDetection : public VisionModel {
public:
ObjectDetection(const std::string &modelSource,
std::shared_ptr callInvoker);
[[nodiscard("Registered non-void function")]] std::vector
- generate(std::string imageSource, double detectionThreshold);
+ generateFromString(std::string imageSource, double detectionThreshold);
+ [[nodiscard("Registered non-void function")]] std::vector
+ generateFromFrame(jsi::Runtime &runtime, const jsi::Value &frameData,
+ double detectionThreshold);
+ [[nodiscard("Registered non-void function")]] std::vector
+ generateFromPixels(jsi::Runtime &runtime, const jsi::Value &pixelData,
+ double detectionThreshold);
+
+protected:
+ // Internal helper for shared preprocessing and inference logic
+ std::vector runInference(cv::Mat image,
+ double detectionThreshold);
+ cv::Mat preprocessFrame(const cv::Mat &frame) const override;
private:
std::vector postprocess(const std::vector &tensors,
diff --git a/packages/react-native-executorch/common/rnexecutorch/models/style_transfer/StyleTransfer.cpp b/packages/react-native-executorch/common/rnexecutorch/models/style_transfer/StyleTransfer.cpp
index 3b9c0187b..2d04ea208 100644
--- a/packages/react-native-executorch/common/rnexecutorch/models/style_transfer/StyleTransfer.cpp
+++ b/packages/react-native-executorch/common/rnexecutorch/models/style_transfer/StyleTransfer.cpp
@@ -55,4 +55,4 @@ std::string StyleTransfer::generate(std::string imageSource) {
return postprocess(forwardResult->at(0).toTensor(), originalSize);
}
-} // namespace rnexecutorch::models::style_transfer
+} // namespace rnexecutorch::models::style_transfer
\ No newline at end of file
diff --git a/packages/react-native-executorch/common/rnexecutorch/models/style_transfer/StyleTransfer.h b/packages/react-native-executorch/common/rnexecutorch/models/style_transfer/StyleTransfer.h
index 73744c4d8..8eed3c888 100644
--- a/packages/react-native-executorch/common/rnexecutorch/models/style_transfer/StyleTransfer.h
+++ b/packages/react-native-executorch/common/rnexecutorch/models/style_transfer/StyleTransfer.h
@@ -33,4 +33,4 @@ class StyleTransfer : public BaseModel {
REGISTER_CONSTRUCTOR(models::style_transfer::StyleTransfer, std::string,
std::shared_ptr);
-} // namespace rnexecutorch
+} // namespace rnexecutorch
\ No newline at end of file
diff --git a/packages/react-native-executorch/common/rnexecutorch/tests/integration/ObjectDetectionTest.cpp b/packages/react-native-executorch/common/rnexecutorch/tests/integration/ObjectDetectionTest.cpp
index ae80208a6..074ee0751 100644
--- a/packages/react-native-executorch/common/rnexecutorch/tests/integration/ObjectDetectionTest.cpp
+++ b/packages/react-native-executorch/common/rnexecutorch/tests/integration/ObjectDetectionTest.cpp
@@ -29,7 +29,7 @@ template <> struct ModelTraits {
}
static void callGenerate(ModelType &model) {
- (void)model.generate(kValidTestImagePath, 0.5);
+ (void)model.generateFromString(kValidTestImagePath, 0.5);
}
};
} // namespace model_tests
@@ -43,49 +43,50 @@ INSTANTIATE_TYPED_TEST_SUITE_P(ObjectDetection, CommonModelTest,
// ============================================================================
TEST(ObjectDetectionGenerateTests, InvalidImagePathThrows) {
ObjectDetection model(kValidObjectDetectionModelPath, nullptr);
- EXPECT_THROW((void)model.generate("nonexistent_image.jpg", 0.5),
+ EXPECT_THROW((void)model.generateFromString("nonexistent_image.jpg", 0.5),
RnExecutorchError);
}
TEST(ObjectDetectionGenerateTests, EmptyImagePathThrows) {
ObjectDetection model(kValidObjectDetectionModelPath, nullptr);
- EXPECT_THROW((void)model.generate("", 0.5), RnExecutorchError);
+ EXPECT_THROW((void)model.generateFromString("", 0.5), RnExecutorchError);
}
TEST(ObjectDetectionGenerateTests, MalformedURIThrows) {
ObjectDetection model(kValidObjectDetectionModelPath, nullptr);
- EXPECT_THROW((void)model.generate("not_a_valid_uri://bad", 0.5),
+ EXPECT_THROW((void)model.generateFromString("not_a_valid_uri://bad", 0.5),
RnExecutorchError);
}
TEST(ObjectDetectionGenerateTests, NegativeThresholdThrows) {
ObjectDetection model(kValidObjectDetectionModelPath, nullptr);
- EXPECT_THROW((void)model.generate(kValidTestImagePath, -0.1),
+ EXPECT_THROW((void)model.generateFromString(kValidTestImagePath, -0.1),
RnExecutorchError);
}
TEST(ObjectDetectionGenerateTests, ThresholdAboveOneThrows) {
ObjectDetection model(kValidObjectDetectionModelPath, nullptr);
- EXPECT_THROW((void)model.generate(kValidTestImagePath, 1.1),
+ EXPECT_THROW((void)model.generateFromString(kValidTestImagePath, 1.1),
RnExecutorchError);
}
TEST(ObjectDetectionGenerateTests, ValidImageReturnsResults) {
ObjectDetection model(kValidObjectDetectionModelPath, nullptr);
- auto results = model.generate(kValidTestImagePath, 0.3);
+ auto results = model.generateFromString(kValidTestImagePath, 0.3);
EXPECT_GE(results.size(), 0u);
}
TEST(ObjectDetectionGenerateTests, HighThresholdReturnsFewerResults) {
ObjectDetection model(kValidObjectDetectionModelPath, nullptr);
- auto lowThresholdResults = model.generate(kValidTestImagePath, 0.1);
- auto highThresholdResults = model.generate(kValidTestImagePath, 0.9);
+ auto lowThresholdResults = model.generateFromString(kValidTestImagePath, 0.1);
+ auto highThresholdResults =
+ model.generateFromString(kValidTestImagePath, 0.9);
EXPECT_GE(lowThresholdResults.size(), highThresholdResults.size());
}
TEST(ObjectDetectionGenerateTests, DetectionsHaveValidBoundingBoxes) {
ObjectDetection model(kValidObjectDetectionModelPath, nullptr);
- auto results = model.generate(kValidTestImagePath, 0.3);
+ auto results = model.generateFromString(kValidTestImagePath, 0.3);
for (const auto &detection : results) {
EXPECT_LE(detection.x1, detection.x2);
@@ -97,7 +98,7 @@ TEST(ObjectDetectionGenerateTests, DetectionsHaveValidBoundingBoxes) {
TEST(ObjectDetectionGenerateTests, DetectionsHaveValidScores) {
ObjectDetection model(kValidObjectDetectionModelPath, nullptr);
- auto results = model.generate(kValidTestImagePath, 0.3);
+ auto results = model.generateFromString(kValidTestImagePath, 0.3);
for (const auto &detection : results) {
EXPECT_GE(detection.score, 0.0f);
@@ -107,7 +108,7 @@ TEST(ObjectDetectionGenerateTests, DetectionsHaveValidScores) {
TEST(ObjectDetectionGenerateTests, DetectionsHaveValidLabels) {
ObjectDetection model(kValidObjectDetectionModelPath, nullptr);
- auto results = model.generate(kValidTestImagePath, 0.3);
+ auto results = model.generateFromString(kValidTestImagePath, 0.3);
for (const auto &detection : results) {
EXPECT_GE(detection.label, 0);
diff --git a/packages/react-native-executorch/common/rnexecutorch/tests/unit/FileUtilsTest.cpp b/packages/react-native-executorch/common/rnexecutorch/tests/unit/FileUtilsTest.cpp
index ed9d80236..c25f2c197 100644
--- a/packages/react-native-executorch/common/rnexecutorch/tests/unit/FileUtilsTest.cpp
+++ b/packages/react-native-executorch/common/rnexecutorch/tests/unit/FileUtilsTest.cpp
@@ -26,7 +26,7 @@ TEST_F(FileIOTest, LoadBytesFromFileSuccessfully) {
}
TEST_F(FileIOTest, LoadBytesFromFileFailOnNonExistentFile) {
- EXPECT_THROW(
- { loadBytesFromFile("non_existent_file.txt"); }, RnExecutorchError);
+ EXPECT_THROW({ loadBytesFromFile("non_existent_file.txt"); },
+ RnExecutorchError);
}
} // namespace rnexecutorch::file_utils
diff --git a/packages/react-native-executorch/common/rnexecutorch/utils/FrameExtractor.cpp b/packages/react-native-executorch/common/rnexecutorch/utils/FrameExtractor.cpp
new file mode 100644
index 000000000..f64855131
--- /dev/null
+++ b/packages/react-native-executorch/common/rnexecutorch/utils/FrameExtractor.cpp
@@ -0,0 +1,151 @@
+#include "FrameExtractor.h"
+#include
+
+#ifdef __APPLE__
+#import
+#endif
+
+#ifdef __ANDROID__
+#if __ANDROID_API__ >= 26
+#include
+#endif
+#endif
+
+namespace rnexecutorch {
+namespace utils {
+
+cv::Mat FrameExtractor::extractFromNativeBuffer(uint64_t bufferPtr) {
+#ifdef __APPLE__
+ return extractFromCVPixelBuffer(reinterpret_cast(bufferPtr));
+#elif defined(__ANDROID__)
+ return extractFromAHardwareBuffer(reinterpret_cast(bufferPtr));
+#else
+ throw std::runtime_error("NativeBuffer not supported on this platform");
+#endif
+}
+
+#ifdef __APPLE__
+cv::Mat FrameExtractor::extractFromCVPixelBuffer(void *pixelBuffer) {
+ CVPixelBufferRef buffer = static_cast(pixelBuffer);
+
+ // Get buffer properties
+ size_t width = CVPixelBufferGetWidth(buffer);
+ size_t height = CVPixelBufferGetHeight(buffer);
+ size_t bytesPerRow = CVPixelBufferGetBytesPerRow(buffer);
+ OSType pixelFormat = CVPixelBufferGetPixelFormatType(buffer);
+
+ // Lock the buffer (Vision Camera should have already locked it, but ensure)
+ CVPixelBufferLockBaseAddress(buffer, kCVPixelBufferLock_ReadOnly);
+ void *baseAddress = CVPixelBufferGetBaseAddress(buffer);
+
+ cv::Mat mat;
+
+ // Log pixel format once for debugging
+ static bool loggedPixelFormat = false;
+ if (!loggedPixelFormat) {
+ log(LOG_LEVEL::Debug, "CVPixelBuffer format code: ", pixelFormat);
+ loggedPixelFormat = true;
+ }
+
+ if (pixelFormat == kCVPixelFormatType_32BGRA) {
+ // BGRA format (most common on iOS when using pixelFormat: 'rgb')
+ if (!loggedPixelFormat) {
+ log(LOG_LEVEL::Debug, "Extracting from CVPixelBuffer: BGRA format, ",
+ width, "x", height, ", stride: ", bytesPerRow);
+ }
+ mat = cv::Mat(static_cast(height), static_cast(width), CV_8UC4,
+ baseAddress, bytesPerRow);
+ } else if (pixelFormat == kCVPixelFormatType_32RGBA) {
+ // RGBA format
+ if (!loggedPixelFormat) {
+ log(LOG_LEVEL::Debug, "Extracting from CVPixelBuffer: RGBA format, ",
+ width, "x", height, ", stride: ", bytesPerRow);
+ }
+ mat = cv::Mat(static_cast(height), static_cast(width), CV_8UC4,
+ baseAddress, bytesPerRow);
+ } else if (pixelFormat == kCVPixelFormatType_24RGB) {
+ // RGB format
+ if (!loggedPixelFormat) {
+ log(LOG_LEVEL::Debug, "Extracting from CVPixelBuffer: RGB format, ",
+ width, "x", height, ", stride: ", bytesPerRow);
+ }
+ mat = cv::Mat(static_cast(height), static_cast(width), CV_8UC3,
+ baseAddress, bytesPerRow);
+ } else {
+ CVPixelBufferUnlockBaseAddress(buffer, kCVPixelBufferLock_ReadOnly);
+ throw std::runtime_error("Unsupported CVPixelBuffer format: " +
+ std::to_string(pixelFormat));
+ }
+
+ // Note: We don't unlock here - Vision Camera manages the lifecycle
+ // When frame.dispose() is called, Vision Camera will unlock and release
+
+ return mat;
+}
+#endif
+
+#ifdef __ANDROID__
+cv::Mat FrameExtractor::extractFromAHardwareBuffer(void *hardwareBuffer) {
+#if __ANDROID_API__ >= 26
+ AHardwareBuffer *buffer = static_cast(hardwareBuffer);
+
+ // Get buffer description
+ AHardwareBuffer_Desc desc;
+ AHardwareBuffer_describe(buffer, &desc);
+
+ // Lock the buffer for CPU read access
+ void *data = nullptr;
+ int lockResult = AHardwareBuffer_lock(
+ buffer, AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN, -1, nullptr, &data);
+
+ if (lockResult != 0) {
+ throw std::runtime_error("Failed to lock AHardwareBuffer");
+ }
+
+ cv::Mat mat;
+
+ // Log format once for debugging
+ static bool loggedFormat = false;
+ if (!loggedFormat) {
+ log(LOG_LEVEL::Debug, "AHardwareBuffer format code: ", desc.format);
+ loggedFormat = true;
+ }
+
+ if (desc.format == AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM) {
+ // RGBA format (expected when using pixelFormat: 'rgb' on Android)
+ if (!loggedFormat) {
+ log(LOG_LEVEL::Debug, "Extracting from AHardwareBuffer: RGBA format, ",
+ desc.width, "x", desc.height, ", stride: ", desc.stride * 4);
+ }
+ mat = cv::Mat(desc.height, desc.width, CV_8UC4, data, desc.stride * 4);
+ } else if (desc.format == AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM) {
+ // RGBX format (treated as RGBA)
+ if (!loggedFormat) {
+ log(LOG_LEVEL::Debug, "Extracting from AHardwareBuffer: RGBX format, ",
+ desc.width, "x", desc.height, ", stride: ", desc.stride * 4);
+ }
+ mat = cv::Mat(desc.height, desc.width, CV_8UC4, data, desc.stride * 4);
+ } else if (desc.format == AHARDWAREBUFFER_FORMAT_R8G8B8_UNORM) {
+ // RGB format (less common)
+ if (!loggedFormat) {
+ log(LOG_LEVEL::Debug, "Extracting from AHardwareBuffer: RGB format, ",
+ desc.width, "x", desc.height, ", stride: ", desc.stride * 3);
+ }
+ mat = cv::Mat(desc.height, desc.width, CV_8UC3, data, desc.stride * 3);
+ } else {
+ AHardwareBuffer_unlock(buffer, nullptr);
+ throw std::runtime_error("Unsupported AHardwareBuffer format: " +
+ std::to_string(desc.format));
+ }
+
+ // Note: We don't unlock here - Vision Camera manages the lifecycle
+
+ return mat;
+#else
+ throw std::runtime_error("AHardwareBuffer requires Android API 26+");
+#endif // __ANDROID_API__ >= 26
+}
+#endif // __ANDROID__
+
+} // namespace utils
+} // namespace rnexecutorch
diff --git a/packages/react-native-executorch/common/rnexecutorch/utils/FrameExtractor.h b/packages/react-native-executorch/common/rnexecutorch/utils/FrameExtractor.h
new file mode 100644
index 000000000..a90e6ad23
--- /dev/null
+++ b/packages/react-native-executorch/common/rnexecutorch/utils/FrameExtractor.h
@@ -0,0 +1,60 @@
+#pragma once
+
+#include
+#include