-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathworker.js
More file actions
186 lines (165 loc) · 6.35 KB
/
worker.js
File metadata and controls
186 lines (165 loc) · 6.35 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
/**
* ============================================================
* Cloudflare AI Worker — Universal LLM API Proxy
* Author: Usman Ali | github.com/dotusmanali
* ============================================================
*
* FEATURES:
* - OpenAI-compatible API format (messages array)
* - Bearer token authentication (your own secret key)
* - CORS enabled — works from any frontend / make.com / n8n
* - Supports all Cloudflare Workers AI models
* - System prompt support
* - Streaming + non-streaming responses
*
* DEPLOY:
* 1. Copy this file to your Cloudflare Worker
* 2. Set environment variable: API_KEY = your-secret-key
* 3. Enable Workers AI binding: name = AI
* 4. Deploy → get your Worker URL
* 5. Use URL + API key in make.com / any backend
*
* ============================================================
*/
// ─── Default Configuration ───────────────────────────────────────────────────
const DEFAULT_MODEL = "@cf/meta/llama-3.3-70b-instruct-fp8-fast";
// List of supported Cloudflare AI models
const ALLOWED_MODELS = [
"@cf/meta/llama-3.3-70b-instruct-fp8-fast",
"@cf/meta/llama-3.1-8b-instruct",
"@cf/meta/llama-3.1-70b-instruct",
"@cf/mistral/mistral-7b-instruct-v0.1",
"@cf/google/gemma-7b-it",
"@cf/qwen/qwen1.5-7b-chat-awq",
"@cf/microsoft/phi-2",
];
// ─── CORS Headers ─────────────────────────────────────────────────────────────
const CORS_HEADERS = {
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Methods": "GET, POST, OPTIONS",
"Access-Control-Allow-Headers": "Content-Type, Authorization",
"Access-Control-Max-Age": "86400",
};
// ─── Main Handler ─────────────────────────────────────────────────────────────
export default {
async fetch(request, env) {
// Handle CORS preflight
if (request.method === "OPTIONS") {
return new Response(null, { status: 204, headers: CORS_HEADERS });
}
// Health check — GET /
if (request.method === "GET") {
return jsonResponse({
status: "ok",
name: "Cloudflare AI Worker",
models: ALLOWED_MODELS,
usage: {
endpoint: "POST /",
headers: {
"Authorization": "Bearer YOUR_API_KEY",
"Content-Type": "application/json",
},
body: {
model: DEFAULT_MODEL,
messages: [
{ role: "system", content: "You are a helpful assistant." },
{ role: "user", content: "Hello!" },
],
max_tokens: 1024,
stream: false,
},
},
});
}
// Only allow POST for inference
if (request.method !== "POST") {
return jsonResponse({ error: "Method not allowed" }, 405);
}
// ── Authentication ──────────────────────────────────────────────────────
const authHeader = request.headers.get("Authorization") || "";
const token = authHeader.startsWith("Bearer ") ? authHeader.slice(7).trim() : "";
if (!env.API_KEY) {
return jsonResponse({ error: "Server misconfigured: API_KEY not set" }, 500);
}
if (token !== env.API_KEY) {
return jsonResponse({ error: "Unauthorized — invalid API key" }, 401);
}
// ── Parse Request Body ──────────────────────────────────────────────────
let body;
try {
body = await request.json();
} catch {
return jsonResponse({ error: "Invalid JSON body" }, 400);
}
const { messages, model, max_tokens, temperature, stream } = body;
// Validate messages
if (!Array.isArray(messages) || messages.length === 0) {
return jsonResponse({ error: "messages array is required" }, 400);
}
// Select model (default if not provided or not in allowed list)
const selectedModel = ALLOWED_MODELS.includes(model) ? model : DEFAULT_MODEL;
// ── Run AI Inference ────────────────────────────────────────────────────
try {
if (stream === true) {
// Streaming response
const response = await env.AI.run(selectedModel, {
messages,
max_tokens: max_tokens || 2048,
temperature: temperature || 0.7,
stream: true,
});
return new Response(response, {
headers: {
...CORS_HEADERS,
"Content-Type": "text/event-stream",
"Cache-Control": "no-cache",
"Connection": "keep-alive",
},
});
} else {
// Non-streaming response
const result = await env.AI.run(selectedModel, {
messages,
max_tokens: max_tokens || 2048,
temperature: temperature || 0.7,
});
return jsonResponse({
id: `cf-${Date.now()}`,
object: "chat.completion",
model: selectedModel,
choices: [
{
index: 0,
message: {
role: "assistant",
content: result.response || "",
},
finish_reason: "stop",
},
],
usage: {
prompt_tokens: null,
completion_tokens: null,
total_tokens: null,
},
});
}
} catch (err) {
console.error("AI inference error:", err);
return jsonResponse(
{ error: "AI inference failed", detail: err.message },
500
);
}
},
};
// ─── Helpers ──────────────────────────────────────────────────────────────────
function jsonResponse(data, status = 200) {
return new Response(JSON.stringify(data, null, 2), {
status,
headers: {
...CORS_HEADERS,
"Content-Type": "application/json",
},
});
}