-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy path.env.local.example
More file actions
58 lines (49 loc) · 3.03 KB
/
.env.local.example
File metadata and controls
58 lines (49 loc) · 3.03 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
# ── LLM Provider ─────────────────────────────────────────────────────────────
# anthropic | openai | grok | openrouter | huggingface
AI_PROVIDER=grok
# ── ROMA Mode ─────────────────────────────────────────────────────────────────
# blitz non-reasoning model, no CoT (~15–30s pipeline) ← fastest
# sharp fast reasoning model (~25–45s pipeline)
# keen mid model (~40–70s pipeline) ← default
# smart quality model (~60–120s pipeline)
ROMA_MODE=blitz
ROMA_MAX_DEPTH=1 # 1 = one decomposition level (recommended); 2+ = deeper reasoning, slower
# Direct provider keys (uncomment the one matching AI_PROVIDER)
XAI_API_KEY=
# ANTHROPIC_API_KEY=sk-ant-...
# OPENAI_API_KEY=sk-...
# ── Model overrides (optional — defaults shown) ───────────────────────────────
# Anthropic — blitz/sharp/keen: haiku | smart: sonnet-4-6
# ANTHROPIC_BLITZ_MODEL=claude-haiku-4-5-20251001
# ANTHROPIC_FAST_MODEL=claude-haiku-4-5-20251001
# ANTHROPIC_MID_MODEL=claude-haiku-4-5-20251001
# ANTHROPIC_SMART_MODEL=claude-sonnet-4-6
# OpenAI — blitz/sharp/keen: gpt-4o-mini | smart: gpt-4o
# OPENAI_BLITZ_MODEL=gpt-4o-mini
# OPENAI_FAST_MODEL=gpt-4o-mini
# OPENAI_MID_MODEL=gpt-4o-mini
# OPENAI_SMART_MODEL=gpt-4o
# Grok (xAI) — blitz: grok-4-fast-non-reasoning | sharp: grok-3-mini | keen: grok-3-fast | smart: grok-3
# GROK_BLITZ_MODEL=grok-4-fast-non-reasoning
# GROK_FAST_MODEL=grok-3-mini
# GROK_MID_MODEL=grok-3-fast
# GROK_SMART_MODEL=grok-3
# OpenRouter — set AI_PROVIDER=openrouter and pick any model
# OPENROUTER_API_KEY=sk-or-v1-...
# OPENROUTER_MODEL=x-ai/grok-3 # smart tier (default: anthropic/claude-sonnet-4-6)
# OPENROUTER_MID_MODEL=x-ai/grok-3-fast # keen tier (default: OPENROUTER_MODEL)
# OPENROUTER_FAST_MODEL=x-ai/grok-3-mini # sharp tier (default: OPENROUTER_MODEL)
# HuggingFace — set AI_PROVIDER=huggingface
# Uses the serverless Inference API (OpenAI-compatible). Requires a paid HF account.
# HUGGINGFACE_API_KEY=hf_...
# HF_BASE_URL=https://router.huggingface.co/v1 # default (serverless router)
# HF_BASE_URL=https://<endpoint>.endpoints.huggingface.cloud # dedicated endpoint
# HF_BLITZ_MODEL=Qwen/Qwen2.5-1.5B-Instruct # 1.5B — fastest with reliable JSON output
# HF_FAST_MODEL=meta-llama/Llama-3.2-3B-Instruct # 3B — sharp tier
# HF_MID_MODEL=meta-llama/Llama-3.1-8B-Instruct # 8B — keen tier
# HF_SMART_MODEL=meta-llama/Llama-3.3-70B-Instruct # 70B — smart tier
# Python ROMA microservice (roma-dspy). Set to empty string to skip and use TypeScript ROMA fallback.
PYTHON_ROMA_URL=http://localhost:8001
CMC_API_KEY=
KALSHI_API_KEY=
KALSHI_PRIVATE_KEY_PATH=./kalshi_private_key.pem