Official React Native SDK for the Tuteliq API
AI-powered child safety analysis
API Docs • Dashboard • Discord
npm install @tuteliq/react-native
# or
yarn add @tuteliq/react-native- React Native 0.70+
- React 17+
Wrap your app with TuteliqProvider:
import { TuteliqProvider } from '@tuteliq/react-native';
export default function App() {
return (
<TuteliqProvider apiKey="your-api-key">
<YourApp />
</TuteliqProvider>
);
}import { useAnalyze, RiskLevel } from '@tuteliq/react-native';
import { Alert } from 'react-native';
function ChatInput() {
const { execute, loading } = useAnalyze();
const [message, setMessage] = useState('');
const handleSend = async () => {
const result = await execute({ text: message });
if (result.riskLevel !== RiskLevel.Safe) {
Alert.alert('Warning', result.summary);
return;
}
// Send message...
};
return (
<View>
<TextInput value={message} onChangeText={setMessage} />
<Button title="Send" onPress={handleSend} disabled={loading} />
</View>
);
}import { TuteliqProvider } from '@tuteliq/react-native';
<TuteliqProvider
apiKey="your-api-key"
config={{
timeout: 30000, // Request timeout in ms
maxRetries: 3, // Retry attempts
retryDelay: 1000, // Initial retry delay in ms
}}
>
{children}
</TuteliqProvider>All hooks return:
data- The result (null until executed)loading- Loading stateerror- Error if anyexecute(input)- Function to execute the operationreset()- Reset state
import { useDetectBullying } from '@tuteliq/react-native';
function MyComponent() {
const { data, loading, error, execute } = useDetectBullying();
const check = async () => {
const result = await execute({ text: 'Message to check' });
if (result.isBullying) {
console.log('Severity:', result.severity);
console.log('Types:', result.bullyingType);
}
};
}import { useDetectGrooming, MessageRole } from '@tuteliq/react-native';
const { execute } = useDetectGrooming();
const result = await execute({
messages: [
{ role: MessageRole.Adult, content: 'This is our secret' },
{ role: MessageRole.Child, content: 'Ok I wont tell' },
],
childAge: 12,
});import { useDetectUnsafe } from '@tuteliq/react-native';
const { execute } = useDetectUnsafe();
const result = await execute({ text: 'Content to check' });
if (result.unsafe) {
console.log('Categories:', result.categories);
}Quick analysis combining bullying and unsafe detection:
import { useAnalyze, RiskLevel } from '@tuteliq/react-native';
const { execute } = useAnalyze();
const result = await execute({ text: 'Message to check' });
console.log('Risk Level:', result.riskLevel);
console.log('Risk Score:', result.riskScore);
console.log('Summary:', result.summary);import { useAnalyzeEmotions } from '@tuteliq/react-native';
const { execute } = useAnalyzeEmotions();
const result = await execute({ text: 'Im so stressed about everything' });
console.log('Emotions:', result.dominantEmotions);
console.log('Trend:', result.trend);import { useGetActionPlan, Audience, Severity } from '@tuteliq/react-native';
const { execute } = useGetActionPlan();
const plan = await execute({
situation: 'Someone is spreading rumors about me',
childAge: 12,
audience: Audience.Child,
severity: Severity.Medium,
});
console.log('Steps:', plan.steps);import { useGenerateReport } from '@tuteliq/react-native';
const { execute } = useGenerateReport();
const report = await execute({
messages: [
{ sender: 'user1', content: 'Threatening message' },
{ sender: 'child', content: 'Please stop' },
],
childAge: 14,
});
console.log('Summary:', report.summary);For advanced use cases, access the client directly:
import { useTuteliqClient } from '@tuteliq/react-native';
function MyComponent() {
const { client } = useTuteliqClient();
const customAnalysis = async () => {
const result = await client.detectBullying({
text: 'Message',
externalId: 'msg_123',
metadata: { userId: 'user_456' },
});
return result;
};
}All methods support externalId and metadata for request correlation:
const result = await execute({
text: 'Message to check',
externalId: 'msg_12345',
metadata: { userId: 'usr_abc', sessionId: 'sess_xyz' },
});
// Echoed back in response
console.log(result.externalId); // "msg_12345"
console.log(result.metadata); // { userId: "usr_abc", ... }import {
useAnalyze,
AuthenticationError,
RateLimitError,
ValidationError,
} from '@tuteliq/react-native';
function MyComponent() {
const { execute, error } = useAnalyze();
const handleCheck = async () => {
try {
const result = await execute({ text: 'test' });
} catch (err) {
if (err instanceof AuthenticationError) {
console.log('Invalid API key');
} else if (err instanceof RateLimitError) {
console.log('Too many requests');
} else if (err instanceof ValidationError) {
console.log('Invalid input:', err.details);
}
}
};
// Or use the error state
if (error) {
return <Text>Error: {error.message}</Text>;
}
}import React, { useState } from 'react';
import { View, TextInput, Button, Text, Alert, StyleSheet } from 'react-native';
import { TuteliqProvider, useAnalyze, RiskLevel } from '@tuteliq/react-native';
function ChatScreen() {
const [message, setMessage] = useState('');
const { execute, loading, data } = useAnalyze();
const handleSend = async () => {
if (!message.trim()) return;
try {
const result = await execute({ text: message });
if (result.riskLevel === RiskLevel.Critical || result.riskLevel === RiskLevel.High) {
Alert.alert(
'Message Blocked',
result.summary,
[{ text: 'OK', onPress: () => setMessage('') }]
);
return;
}
// Safe to send
console.log('Sending message:', message);
setMessage('');
} catch (error) {
Alert.alert('Error', 'Failed to check message');
}
};
return (
<View style={styles.container}>
<TextInput
style={styles.input}
value={message}
onChangeText={setMessage}
placeholder="Type a message..."
editable={!loading}
/>
<Button title={loading ? 'Checking...' : 'Send'} onPress={handleSend} disabled={loading} />
{data && (
<Text style={styles.status}>
Last check: {data.riskLevel} (score: {data.riskScore.toFixed(2)})
</Text>
)}
</View>
);
}
export default function App() {
return (
<TuteliqProvider apiKey={process.env.TUTELIQ_API_KEY!}>
<ChatScreen />
</TuteliqProvider>
);
}
const styles = StyleSheet.create({
container: { flex: 1, padding: 20, justifyContent: 'flex-end' },
input: { borderWidth: 1, borderColor: '#ccc', padding: 10, marginBottom: 10, borderRadius: 8 },
status: { marginTop: 10, color: '#666', fontSize: 12 },
});The bullying and unsafe content methods analyze a single text field per request. If your app receives messages one at a time, concatenate a sliding window of recent messages into one string before calling the API. Single words or short fragments lack context for accurate detection and can be exploited to bypass safety filters.
// Bad — each message analyzed in isolation, easily evaded
for (const msg of messages) {
await client.detectBullying({ text: msg });
}
// Good — recent messages analyzed together
const window = recentMessages.slice(-10).join(' ');
await client.detectBullying({ text: window });The grooming method already accepts a messages[] array and analyzes the full conversation in context.
Enable PII_REDACTION_ENABLED=true on your Tuteliq API to automatically strip emails, phone numbers, URLs, social handles, IPs, and other PII from detection summaries and webhook payloads. The original text is still analyzed in full — only stored outputs are scrubbed.
- API Docs: api.tuteliq.ai/docs
- Discord: discord.gg/7kbTeRYRXD
- Email: support@tuteliq.ai
- Issues: GitHub Issues
MIT License - see LICENSE for details.
Before you decide to contribute or sponsor, read these numbers. They are not projections. They are not estimates from a pitch deck. They are verified statistics from the University of Edinburgh, UNICEF, NCMEC, and Interpol.
- 302 million children are victims of online sexual exploitation and abuse every year. That is 10 children every second. (Childlight / University of Edinburgh, 2024)
- 1 in 8 children globally have been victims of non-consensual sexual imagery in the past year. (Childlight, 2024)
- 370 million girls and women alive today experienced rape or sexual assault in childhood. An estimated 240–310 million boys and men experienced the same. (UNICEF, 2024)
- 29.2 million incidents of suspected child sexual exploitation were reported to NCMEC's CyberTipline in 2024 alone — containing 62.9 million files (images, videos). (NCMEC, 2025)
- 546,000 reports of online enticement (adults grooming children) in 2024 — a 192% increase from the year before. (NCMEC, 2025)
- 1,325% increase in AI-generated child sexual abuse material reports between 2023 and 2024. The technology that should protect children is being weaponized against them. (NCMEC, 2025)
- 100 sextortion reports per day to NCMEC. Since 2021, at least 36 teenage boys have taken their own lives because they were victimized by sextortion. (NCMEC, 2025)
- 84% of reports resolve outside the United States. This is not an American problem. This is a global emergency. (NCMEC, 2025)
End-to-end encryption is making platforms blind. In 2024, platforms reported 7 million fewer incidents than the year before — not because abuse stopped, but because they can no longer see it. The tools that catch known images are failing. The systems that rely on human moderators are overwhelmed. The technology to detect behavior — grooming patterns, escalation, manipulation — in real-time text conversations exists right now. It is running at api.tuteliq.ai.
The question is not whether this technology is possible. The question is whether we build the company to put it everywhere it needs to be.
Every second we wait, another child is harmed.
We have the technology. We need the support.
If this mission matters to you, consider sponsoring our open-source work so we can keep building the tools that protect children — and keep them free and accessible for everyone.
Built with care for child safety by the Tuteliq team
