-
Notifications
You must be signed in to change notification settings - Fork 45
Open
Description
Hello everyone,
I'm getting empty data after I press on record in IOS, but it absolutely working on android
IOS LOGS:
(NOBRIDGE) LOG data here:
(NOBRIDGE) LOG data here:
(NOBRIDGE) LOG data here:
ANDROID:
(NOBRIDGE) LOG data here: gICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgI....
I do have the permission granted and everything..
here is my code:
useEffect(() => {
const main = async () => {
if (Platform.OS === 'android') {
await requestAudioPermission();
} else {
await requestMicrophonePermission();
}
const options = {
sampleRate: 16000, // default is 44100 but 32000 is adequate for accurate voice recognition
channels: 2, // 1 or 2, default 1
bitsPerSample: 8, // 8 or 16, default 16
audioSource: 6, // android only (see below)
bufferSize: 2048, // default is 2048
wavFile: 'test-audio.wav',
};
LiveAudioStream.init(options);
};
main();
return () => {
LiveAudioStream.stop();
};
}, []);
const startStreamRecording = async () => {
try {
let audioBuffer = Buffer.alloc(0); // Initialize an empty buffer
let sendingInterval; // Variable to hold the interval ID
// Ensure listener is registered before starting the stream
LiveAudioStream.on('data', data => {
console.log('data here: ', data);
if (data) {
const chunk = Buffer.from(data, 'base64');
audioBuffer = Buffer.concat([audioBuffer, chunk]); // Append chunk to audioBuffer
}
});
await LiveAudioStream.start(); // Start streaming after listener is set
// Set an interval to send the buffer every second
sendingInterval = setInterval(() => {
if (audioBuffer.length > 0) {
const wavData = bufferToWav(audioBuffer); // Convert accumulated buffer to WAV format
sendMessageWhenReady(wavData); // Send the WAV data
audioBuffer = Buffer.alloc(0); // Reset the buffer after sending
}
}, 1000); // 1000 milliseconds = 1 second
// Cleanup on component unmount
return () => {
clearInterval(sendingInterval); // Clear the interval on cleanup
LiveAudioStream.stop();
};
} catch (error) {
Alert.alert('Error', 'Could not start the audio stream.');
}
};how do I fix this?
would appreciate any suggestion
my npm package version:
"react-native-live-audio-stream": "^1.1.1",
Full Package JSON:
{
"name": "LiveApp",
"version": "1.0.0",
"private": true,
"scripts": {
"android": "react-native run-android",
"ios": "react-native run-ios",
"lint": "eslint .",
"start": "react-native start",
"test": "jest"
},
"dependencies": {
"@expo/vector-icons": "^14.0.4",
"@react-native-async-storage/async-storage": "^2.1.1",
"@react-native-camera-roll/camera-roll": "^7.9.0",
"@react-native-clipboard/clipboard": "^1.16.1",
"@react-native-community/blur": "^4.4.1",
"@react-native-community/template": "^0.76.6",
"@react-navigation/bottom-tabs": "^7.2.0",
"@react-navigation/elements": "^2.2.5",
"@react-navigation/native": "^7.0.14",
"@react-navigation/native-stack": "^7.2.0",
"@reduxjs/toolkit": "^2.5.0",
"@shopify/flash-list": "^1.7.3",
"axios": "^1.7.9",
"buffer": "^6.0.3",
"expo": "^52.0.0",
"expo-av": "^15.0.2",
"fs": "^0.0.1-security",
"i18n-js": "^4.5.1",
"i18next": "^24.2.2",
"lottie-react-native": "^7.2.2",
"nativewind": "^4.1.23",
"path": "^0.12.7",
"react": "18.3.1",
"react-i18next": "^15.4.1",
"react-native": "0.76.6",
"react-native-code-push": "^9.0.1",
"react-native-eject": "^1.0.2",
"react-native-fs": "^2.20.0",
"react-native-gesture-handler": "^2.22.0",
"react-native-get-random-values": "^1.11.0",
"react-native-linear-gradient": "^2.8.3",
"react-native-live-audio-stream": "^1.1.1",
"react-native-localize": "^3.4.1",
"react-native-markdown-display": "^7.0.2",
"react-native-permissions": "^5.2.5",
"react-native-reanimated": "^3.16.7",
"react-native-restart": "^0.0.27",
"react-native-safe-area-context": "^5.1.0",
"react-native-screens": "^4.5.0",
"react-native-splash-screen": "^3.3.0",
"react-native-swiper": "^1.6.0",
"react-native-vector-icons": "^10.2.0",
"react-redux": "^9.2.0",
"tailwindcss": "^3.4.17",
"uuid": "^11.0.5"
},
"devDependencies": {
"@babel/core": "^7.25.2",
"@babel/preset-env": "^7.25.3",
"@babel/runtime": "^7.25.0",
"@react-native-community/cli": "15.0.1",
"@react-native-community/cli-platform-android": "15.0.1",
"@react-native-community/cli-platform-ios": "15.0.1",
"@react-native/babel-preset": "0.76.6",
"@react-native/eslint-config": "0.76.6",
"@react-native/metro-config": "0.76.6",
"@react-native/typescript-config": "0.76.6",
"@types/react": "^18.2.6",
"@types/react-native-vector-icons": "^6.4.18",
"@types/react-test-renderer": "^18.0.0",
"babel-jest": "^29.6.3",
"eslint": "^8.19.0",
"jest": "^29.6.3",
"prettier": "2.8.8",
"react-test-renderer": "18.3.1",
"ts-node": "^10.9.2",
"typescript": "^5.0.4"
},
"engines": {
"node": ">=18"
}
}
Full Code
/* eslint-disable @typescript-eslint/no-unused-vars */
import {
View,
Text,
Alert,
Button,
Platform,
PermissionsAndroid,
} from 'react-native';
import React, {useEffect, useRef, useState} from 'react';
import {SafeAreaView} from 'react-native-safe-area-context';
import LiveAudioStream, {IAudioRecord} from 'react-native-live-audio-stream';
import {PERMISSIONS, request} from 'react-native-permissions';
import {Buffer} from 'buffer';
import {Audio} from 'expo-av';
const SettingsScreen = () => {
const ws = useRef(null);
const [transcription, setTranscription] = useState('');
const requestMicrophonePermission = async () => {
try {
const status = await request(PERMISSIONS.IOS.MICROPHONE);
console.log('status:', status);
} catch (error) {
console.error('Permission request error: ', error);
}
};
const requestAudioPermission = async () => {
if (Platform.OS === 'android') {
try {
const granted = await PermissionsAndroid.request(
PermissionsAndroid.PERMISSIONS.RECORD_AUDIO,
{
title: 'Audio Permission',
message: 'App needs access to your microphone to record audio.',
buttonNeutral: 'Ask Me Later',
buttonNegative: 'Cancel',
buttonPositive: 'OK',
},
);
if (granted !== PermissionsAndroid.RESULTS.GRANTED) {
console.log('Audio permission denied');
}
} catch (err) {
console.warn(err);
}
}
};
const bufferToWav = buffer => {
const numChannels = 1; // Mono
const sampleRate = 32000; // Update sample rate to 32000
const byteRate = sampleRate * numChannels * 2; // 16 bits = 2 bytes per sample
const wavHeader = Buffer.alloc(44);
// RIFF identifier
wavHeader.write('RIFF', 0);
// file length
wavHeader.writeUInt32LE(buffer.length + 36, 4);
// RIFF type
wavHeader.write('WAVE', 8);
// format chunk identifier
wavHeader.write('fmt ', 12);
// format chunk length
wavHeader.writeUInt32LE(16, 16);
// format type (PCM)
wavHeader.writeUInt16LE(1, 20);
// channel count
wavHeader.writeUInt16LE(numChannels, 22);
// sample rate
wavHeader.writeUInt32LE(sampleRate, 24);
// byte rate (sample rate * block align)
wavHeader.writeUInt32LE(byteRate, 28);
// block align (channel count * bytes per sample)
wavHeader.writeUInt16LE(numChannels * 2, 32);
// bits per sample
wavHeader.writeUInt16LE(16, 34);
// data chunk identifier
wavHeader.write('data', 36);
// data chunk length
wavHeader.writeUInt32LE(buffer.length, 40);
return Buffer.concat([wavHeader, buffer]);
};
useEffect(() => {
const main = async () => {
if (Platform.OS === 'android') {
await requestAudioPermission();
} else {
await requestMicrophonePermission();
}
const options = {
sampleRate: 16000, // default is 44100 but 32000 is adequate for accurate voice recognition
channels: 2, // 1 or 2, default 1
bitsPerSample: 8, // 8 or 16, default 16
audioSource: 6, // android only (see below)
bufferSize: 2048, // default is 2048
wavFile: 'test-audio.wav',
};
LiveAudioStream.init(options);
};
main();
return () => {
LiveAudioStream.stop();
};
}, []);
useEffect(() => {
const ANDROID_DEVICE = Platform.OS === 'android';
const IP_ADDRESS = ANDROID_DEVICE ? '192.168.1.2' : 'localhost';
const socket = new WebSocket(`ws://${IP_ADDRESS}:8085`);
ws.current = socket;
if (!socket) {
return;
}
const updateStatus = () => {
console.log('WebSocket state:', socket.readyState);
};
socket.addEventListener('message', event => {
// const deviceId = await AsyncStorage.getItem('deviceId');
const {url, senderId, lastSpeak} = JSON.parse(event.data);
// eslint-disable-next-line react-hooks/exhaustive-deps
lastSpeaker = lastSpeak;
// eslint-disable-next-line curly
// if (deviceId !== senderId) return;
if (url) {
audioQueue.push(url);
}
if (!isPlaying && audioQueue.length > 0) {
playNextAudio();
}
console.log('getting this shit:', event.data);
setTranscription(event.data); // Update transcription when received
});
socket.addEventListener('open', updateStatus);
socket.addEventListener('close', updateStatus);
socket.addEventListener('error', updateStatus);
return () => {
socket.removeEventListener('open', updateStatus);
socket.removeEventListener('close', updateStatus);
socket.removeEventListener('error', updateStatus);
};
}, []);
// I talk and it listen, and after 2 sec of silence It sends that it's the last word so it do the logic for the AI and so on.. and we'll see if the last speech we'll set it to record and send the audio again
let audioQueue: any[] = [];
let isPlaying = false;
let lastSpeaker = false;
const playNextAudio = async () => {
if (audioQueue.length === 0) {
console.log('🎵 No more audio to play.');
if (lastSpeaker) {
console.log('🎙️ Starting rec 🔥🔥🔥');
return;
}
isPlaying = false;
return;
}
const nextAudio = audioQueue.shift();
if (!nextAudio) {
console.warn('⚠️ nextAudio is undefined or invalid.');
playNextAudio(); // Skip to the next if it's invalid
return;
}
isPlaying = true;
try {
const {sound: newSound} = await Audio.Sound.createAsync(
{uri: nextAudio},
{shouldPlay: true},
);
newSound.setOnPlaybackStatusUpdate(async status => {
// console.log("We're PLaying sound!!!");
if (status.didJustFinish) {
await newSound.unloadAsync();
isPlaying = false;
playNextAudio(); // Play the next one
// console.log("We end PLaying Sound for !!@#$");
}
});
} catch (error) {
console.error('❌ Error playing audio:', error);
isPlaying = false;
playNextAudio(); // Continue to the next one even on error
}
};
const sendMessageWhenReady = message => {
if (ws.current.readyState === WebSocket.OPEN) {
ws.current.send(message);
}
};
const startStreamRecording = async () => {
try {
let audioBuffer = Buffer.alloc(0); // Initialize an empty buffer
let sendingInterval; // Variable to hold the interval ID
// const options = {
// sampleRate: 32000, // default is 44100 but 32000 is adequate for accurate voice recognition
// channels: 1, // 1 or 2, default 1
// bitsPerSample: 16, // 8 or 16, default 16
// audioSource: 6, // android only (see below)
// bufferSize: 4096, // default is 2048
// };
// LiveAudioStream.init(options);
// Ensure listener is registered before starting the stream
LiveAudioStream.on('data', data => {
console.log('data here: ', data);
if (data) {
const chunk = Buffer.from(data, 'base64');
audioBuffer = Buffer.concat([audioBuffer, chunk]); // Append chunk to audioBuffer
}
});
await LiveAudioStream.start(); // Start streaming after listener is set
// Set an interval to send the buffer every second
sendingInterval = setInterval(() => {
if (audioBuffer.length > 0) {
const wavData = bufferToWav(audioBuffer); // Convert accumulated buffer to WAV format
sendMessageWhenReady(wavData); // Send the WAV data
audioBuffer = Buffer.alloc(0); // Reset the buffer after sending
}
}, 1000); // 1000 milliseconds = 1 second
// Cleanup on component unmount
return () => {
clearInterval(sendingInterval); // Clear the interval on cleanup
LiveAudioStream.stop();
};
} catch (error) {
Alert.alert('Error', 'Could not start the audio stream.');
}
};
return (
<SafeAreaView className="bg-primary flex items-center justify-center h-full w-full">
<Button title="Start Recording" onPress={startStreamRecording} />
<Text className="text-white">Transcription: {transcription}</Text>
<Button title="Stop Recording" onPress={stopsStreamRecording} />
</SafeAreaView>
);
};
export default SettingsScreen;Metadata
Metadata
Assignees
Labels
No labels