-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy path.env.example
More file actions
260 lines (207 loc) · 7.9 KB
/
.env.example
File metadata and controls
260 lines (207 loc) · 7.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
# ============================================
# AI COMMS — Environment Variables
# Copy this to .env and fill in your keys
# ============================================
# --- AI Provider API Keys (fill in the ones you want to use) ---
# Which provider to use: openai | anthropic | google | mistral | cohere | groq | ollama | deepseek | xai | perplexity | together | fireworks | codex | copilot | claude-code | claude-cowork | nvidia-nim | openclaw
AI_PROVIDER=openai
# OpenAI (GPT-4o, GPT-4, GPT-3.5)
OPENAI_API_KEY=
OPENAI_MODEL=gpt-4o
# Anthropic (Claude 4, Claude 3.5 Sonnet, etc.)
ANTHROPIC_API_KEY=
ANTHROPIC_MODEL=claude-sonnet-4-20250514
# Google Gemini
GOOGLE_API_KEY=
GOOGLE_MODEL=gemini-2.0-flash
# Mistral AI
MISTRAL_API_KEY=
MISTRAL_MODEL=mistral-large-latest
# Cohere (Command R+)
COHERE_API_KEY=
COHERE_MODEL=command-r-plus
# Groq (fast inference — LLaMA, Mixtral)
GROQ_API_KEY=
GROQ_MODEL=llama-3.3-70b-versatile
# Ollama (local LLMs — no API key needed)
OLLAMA_BASE_URL=http://localhost:11434
OLLAMA_MODEL=llama3
# DeepSeek
DEEPSEEK_API_KEY=
DEEPSEEK_MODEL=deepseek-chat
# xAI (Grok)
XAI_API_KEY=
XAI_MODEL=grok-2-latest
# Perplexity
PERPLEXITY_API_KEY=
PERPLEXITY_MODEL=sonar-pro
# Together AI (open-source models hosted)
TOGETHER_API_KEY=
TOGETHER_MODEL=meta-llama/Llama-3-70b-chat-hf
# Fireworks AI
FIREWORKS_API_KEY=
FIREWORKS_MODEL=accounts/fireworks/models/llama-v3p1-70b-instruct
# OpenAI Codex (code-optimized)
CODEX_API_KEY=
CODEX_MODEL=o4-mini
# GitHub Copilot / GitHub Models
# Get a token at https://github.com/settings/tokens (needs copilot scope)
COPILOT_TOKEN=
COPILOT_MODEL=gpt-4o
COPILOT_BASE_URL=https://models.github.ai/inference
# Claude Code (Anthropic's agentic coding model)
CLAUDE_CODE_API_KEY=
CLAUDE_CODE_MODEL=claude-sonnet-4-20250514
CLAUDE_CODE_MAX_TOKENS=16384
CLAUDE_CODE_THINKING_BUDGET=10000
# Claude Cowork (Anthropic's collaborative agent)
CLAUDE_COWORK_API_KEY=
CLAUDE_COWORK_MODEL=claude-sonnet-4-20250514
CLAUDE_COWORK_MAX_TOKENS=8192
CLAUDE_COWORK_THINKING_BUDGET=8000
# NVIDIA NIM / NemoClaw (open source models via NVIDIA inference)
# Get an API key at https://build.nvidia.com
NVIDIA_API_KEY=
NVIDIA_NIM_BASE_URL=https://integrate.api.nvidia.com/v1
NVIDIA_NIM_MODEL=nvidia/nemotron-3-super-120b-a12b
NVIDIA_NIM_MAX_TOKENS=4096
# OpenClaw (personal AI assistant — https://openclaw.ai)
# Connects to a running OpenClaw Gateway instance
OPENCLAW_BASE_URL=http://localhost:18789
OPENCLAW_AUTH_TOKEN=
OPENCLAW_SESSION=main
OPENCLAW_MODEL=default
# --- Security ---
# Allowlist — comma-separated phone numbers or Teams IDs that can use the bot
# Leave empty to allow everyone. Set SECURITY_ENABLE_ALLOWLIST=true to activate.
SECURITY_ENABLE_ALLOWLIST=false
SECURITY_ALLOWLIST=
SECURITY_BLOCKLIST=
# Rate limiting (on by default) — max messages per sender per window
SECURITY_ENABLE_RATE_LIMIT=true
SECURITY_RATE_LIMIT_MAX=20
SECURITY_RATE_LIMIT_WINDOW_MS=60000
# Max message length (chars)
SECURITY_MAX_MESSAGE_LENGTH=10000
# Agent-to-agent shared secret — all agents in your network must use the same secret
# Generate one: node -e "console.log(require('crypto').randomBytes(32).toString('hex'))"
SECURITY_AGENT_SECRET=
SECURITY_REQUIRE_AGENT_AUTH=false
# Prompt injection detection (on by default, logging only)
# Set BLOCK to true to reject suspicious messages instead of just logging
SECURITY_ENABLE_INPUT_SANITIZATION=true
SECURITY_BLOCK_PROMPT_INJECTION=false
# Replay protection — reject agent messages older than this (ms), 0 = disabled
SECURITY_MAX_MESSAGE_AGE_MS=300000
# Payload encryption — AES-256-GCM for agent-to-agent message bodies
# All agents in your network must share the same key
# Generate one: node -e "console.log(require('crypto').randomBytes(32).toString('hex'))"
SECURITY_ENCRYPTION_KEY=
# TLS for webhook servers (Cloud API + Teams)
# Provide paths to cert/key files, or use a reverse proxy (nginx/Caddy) instead
TLS_CERT_PATH=
TLS_KEY_PATH=
# --- Agent Identity ---
AGENT_NAME=MyAI
AGENT_ID=agent_001
# --- Remote Agent ---
# Allow executing real work (file edits, terminal commands) via messaging.
# ⚠️ DANGEROUS — only enable if you understand the risk.
# Only allowlisted senders can use !do and !task commands.
REMOTE_AGENT_ENABLED=false
REMOTE_AGENT_ALLOWLIST=+1234567890
REMOTE_AGENT_WORKSPACE=.
# --- Messaging Platform ---
# Which platform(s) to connect: whatsapp | teams | telegram | both
PLATFORM=whatsapp
# --- WhatsApp ---
# "baileys" for free local connection, "cloud-api" for official Meta API
WHATSAPP_MODE=baileys
# Only needed if WHATSAPP_MODE=cloud-api
WHATSAPP_PHONE_NUMBER_ID=
WHATSAPP_ACCESS_TOKEN=
WHATSAPP_VERIFY_TOKEN=
WHATSAPP_WEBHOOK_PORT=3000
# Cloud API webhook signature verification (optional but recommended)
# Get this from your Meta App Dashboard > App Secret
WHATSAPP_APP_SECRET=
# --- Microsoft Teams ---
# Register a bot at https://dev.botframework.com or Azure Portal
TEAMS_APP_ID=
TEAMS_APP_PASSWORD=
TEAMS_PORT=3978
# --- Telegram ---
# Create a bot via @BotFather on Telegram → /newbot → copy the token
TELEGRAM_BOT_TOKEN=
# Leave TELEGRAM_WEBHOOK_URL empty for long-polling (local dev)
# Set it for production webhook mode (e.g. https://yourdomain.com/telegram)
TELEGRAM_WEBHOOK_URL=
TELEGRAM_WEBHOOK_PORT=8443
# --- Health & Monitoring ---
HEALTH_PORT=9090
# --- Copilot Bridge (VS Code Extension) ---
# Port where the Copilot Bridge extension listens for requests
COPILOT_BRIDGE_PORT=3120
# Token for authenticating requests to the bridge (shared with the extension)
# COPILOT_BRIDGE_TOKEN=
# Auto-route ALL messages to Copilot Bridge when available (default: false, explicit !copilot prefix required)
# COPILOT_BRIDGE_AUTO_ROUTE=false
# --- Claude Code Bridge ---
# Port where the Claude Code bridge server listens
CLAUDE_CODE_BRIDGE_PORT=3121
# Token for authenticating requests to the Claude Code bridge
# CLAUDE_CODE_BRIDGE_TOKEN=
# --- Codex Bridge ---
# Port where the Codex bridge server listens
CODEX_BRIDGE_PORT=3122
# Token for authenticating requests to the Codex bridge
# CODEX_BRIDGE_TOKEN=
# --- Cursor Bridge ---
# Port where the Cursor bridge server listens
CURSOR_BRIDGE_PORT=3123
# Token for authenticating requests to the Cursor bridge
# CURSOR_BRIDGE_TOKEN=
# --- OpenClaw Bridge ---
# Port where the OpenClaw bridge adapter listens
OPENCLAW_BRIDGE_PORT=3124
# Token for authenticating requests to the OpenClaw bridge
# OPENCLAW_BRIDGE_TOKEN=
# --- OpenClaw Hub Connector ---
# Bridges OpenClaw Gateway ↔ Agent Hub for cross-network agent discovery
# OPENCLAW_GATEWAY=ws://127.0.0.1:18789
# OPENCLAW_HUB_NAME=openclaw
# HUB_URL=ws://127.0.0.1:8090
# HUB_SECRET=your-shared-secret
# --- Multi-Agent Setup ---
# Run multiple VS Code agents that collaborate on tasks.
# Choose ONE option:
# Option A: Simple port list (all on localhost, no auth)
# MULTI_AGENT_PORTS=3120,3121,3122
# Option B: Full registry with names, URLs, API keys, and skills (JSON)
# MULTI_AGENT_REGISTRY=[{"name":"backend","url":"http://127.0.0.1:3120","apiKey":"","skills":["api","backend"]},{"name":"frontend","url":"http://127.0.0.1:3121","apiKey":"","skills":["ui","frontend"]}]
# Option C: Agent Hub (production — agents connect via WebSocket globally)
# AGENT_HUB_URL=http://your-hub-server:8090
# AGENT_HUB_SECRET=your-shared-secret
# --- Agent Hub Server (if hosting) ---
# HUB_SECRET=your-shared-secret
# HUB_PORT=8090
# HUB_MAX_AGENTS=50
# HUB_MAX_PER_IP=5
# HUB_LOG_LEVEL=info
# HUB_ALLOWED_ORIGINS=https://yourdomain.com
# TLS_CERT_PATH=/path/to/cert.pem
# TLS_KEY_PATH=/path/to/key.pem
# --- Media ---
# Max media download size in bytes (default: 50MB)
# MAX_MEDIA_SIZE=52428800
# --- Admin ---
# Comma-separated phone numbers that can use !admin commands
ADMIN_LIST=
# --- Provider Failover ---
# Comma-separated fallback providers if the primary fails
# e.g. AI_FALLBACK_PROVIDERS=anthropic,google,groq
AI_FALLBACK_PROVIDERS=
# --- Per-Provider Rate Limits (requests per minute, optional) ---
# RATE_LIMIT_OPENAI_RPM=60
# RATE_LIMIT_ANTHROPIC_RPM=40
# RATE_LIMIT_GOOGLE_RPM=60