-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy path.env.example
More file actions
402 lines (324 loc) · 17.6 KB
/
.env.example
File metadata and controls
402 lines (324 loc) · 17.6 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
# QuantStack Environment Configuration
# Copy this file to .env and fill in your values
# =============================================================================
# REQUIRED: Data providers — set at least one
# =============================================================================
# Alpaca — paper/live execution broker + OHLCV data fallback
# Paper account is free at alpaca.markets — no credit card required.
# When USE_REAL_TRADING=true, AlpacaBroker is used automatically if these keys are set.
ALPACA_API_KEY=
ALPACA_SECRET_KEY=
ALPACA_PAPER=true # true = paper trading endpoint (safe default)
# ALPACA_FILL_TIMEOUT=30 # seconds to wait for a market order fill (default 30)
# Polygon.io — recommended fallback (unlimited calls, $29/month Starter)
POLYGON_API_KEY=
# Alpha Vantage — free tier only (5 calls/min — research/backtest fallback)
ALPHA_VANTAGE_API_KEY=
# FinancialDatasets.ai — fundamentals, earnings, insider trades, SEC filings, news
# Primary source for fundamental data; low-priority fallback for OHLCV prices.
# Developer $200/month (1000 req/min), Pro $2000/month (unlimited).
# Get your key at: https://financialdatasets.ai
FINANCIAL_DATASETS_API_KEY=
# FINANCIAL_DATASETS_BASE_URL=https://api.financialdatasets.ai # default
# FINANCIAL_DATASETS_RATE_LIMIT_RPM=1000 # Developer tier
# FRED — free macroeconomic data (register at fred.stlouisfed.org)
FRED_API_KEY=
# SEC EDGAR — free insider/institutional/filing data (no key required, just user agent)
# Format: "CompanyName admin@company.com" (SEC policy)
EDGAR_USER_AGENT=
# Provider priority for data fetching (comma-separated, tried left to right).
# The registry skips providers whose credentials are missing automatically.
DATA_PROVIDER_PRIORITY=alpaca,polygon,financial_datasets,alpha_vantage
# =============================================================================
# OPTIONAL: Interactive Brokers (data + trading — requires IB Gateway running)
# =============================================================================
# IBKR_HOST=127.0.0.1
# IBKR_PORT=4001 # 4001=IB Gateway (lighter), 7497=TWS
# IBKR_CLIENT_ID=1 # Must be unique per connection (0-999)
# =============================================================================
# LLM Configuration
# =============================================================================
# Primary provider. Supported values:
# bedrock | anthropic | openai | vertex_ai | gemini |
# azure | groq | together_ai | fireworks_ai | mistral |
# ollama | custom_openai | bedrock_groq
#
# groq: Qwen3-32B for all operational tiers (medium/light/bulk).
# bedrock_groq: OpenAI gpt-oss-120b for heavy reasoning, Groq Qwen3-32B for all others.
# No AWS Bedrock used — Sonnet is replaced by gpt-oss-120b for heavy tasks.
LLM_PROVIDER=groq
# Fallback chain — comma-separated, tried left to right if primary is unavailable.
LLM_FALLBACK_CHAIN=openai,groq
# =============================================================================
# Provider credentials
# =============================================================================
# --- AWS Bedrock (Tier 1) ---
# Uses standard boto3 credential chain: env vars → ~/.aws/credentials → IAM → SSO.
# Set AWS_PROFILE to target a specific named SSO profile.
# AWS_PROFILE=DataScience.Admin-Analytics
BEDROCK_REGION=us-east-1
BEDROCK_MODEL_ID=us.anthropic.claude-sonnet-4-6 # pods + assistant (Sonnet 4.6 cross-region)
# ICs always use Haiku 4.5 by default — override with LLM_MODEL_IC below if needed.
# --- Anthropic Direct (Tier 1) ---
ANTHROPIC_API_KEY=
ANTHROPIC_MODEL=claude-sonnet-4-20250514
# --- OpenAI (Tier 1) ---
OPENAI_API_KEY=
OPENAI_MODEL=gpt-4o
# --- Google Vertex AI (Tier 1, requires gcloud auth + google-cloud-aiplatform) ---
# VERTEX_PROJECT=my-gcp-project
# VERTEX_LOCATION=us-central1
# VERTEX_MODEL=gemini-2.5-flash
# --- Google AI Studio (Tier 1, free tier available) ---
# GEMINI_API_KEY=
# GEMINI_MODEL=gemini-2.5-flash
# --- Azure OpenAI (Tier 2) ---
# AZURE_API_KEY=
# AZURE_API_BASE=https://your-resource.openai.azure.com/
# AZURE_API_VERSION=2024-02-15-preview
# AZURE_DEPLOYMENT_NAME=gpt-4o
# --- Groq (primary provider) ---
# Used standalone (LLM_PROVIDER=groq) or in hybrid mode (LLM_PROVIDER=bedrock_groq).
# Hybrid mode: heavy tier uses OpenAI gpt-oss-120b, all others use Groq Qwen3-32B.
GROQ_API_KEY=
GROQ_MODEL=qwen/qwen3-32b
# --- Together AI (Tier 2) ---
# TOGETHER_API_KEY=
# TOGETHER_MODEL=meta-llama/Llama-3.3-70B-Instruct-Turbo
# --- Fireworks AI (Tier 2) ---
# FIREWORKS_API_KEY=
# FIREWORKS_MODEL=accounts/fireworks/models/llama-v3p3-70b-instruct
# --- Mistral AI (Tier 2) ---
# MISTRAL_API_KEY=
# MISTRAL_MODEL=mistral-large-latest
# --- Ollama (Tier 3 — local, no API key required) ---
# To use Ollama as primary provider, set LLM_PROVIDER=ollama and configure:
# OLLAMA_BASE_URL=http://localhost:11434
# OLLAMA_MODEL=qwen3.5:35b-a3b # pods + assistant (MoE, ~20GB)
# OLLAMA_IC_MODEL=qwen3.5:9b # ICs + decoder (dense, ~6GB)
# LLM_MODEL_IC=ollama/qwen3.5:9b
# LLM_MODEL_POD=ollama/qwen3.5:35b-a3b
# LLM_MODEL_ASSISTANT=ollama/qwen3.5:35b-a3b
# LLM_MODEL_DECODER=ollama/qwen3.5:9b
# LLM_MODEL_WORKSHOP=bedrock/us.anthropic.claude-sonnet-4-20250514 # cloud for /workshop
# --- Custom OpenAI-compatible (Tier 3 — vLLM, LM Studio, etc.) ---
# CUSTOM_OPENAI_BASE_URL=http://localhost:8000/v1
# CUSTOM_OPENAI_API_KEY=not-needed
# CUSTOM_OPENAI_MODEL=local-model
# =============================================================================
# Per-agent model overrides (optional)
# Format: any LiteLLM model string — provider/model_id
# These take precedence over LLM_PROVIDER for the given tier.
# =============================================================================
# ICs — narrow focused work, cheaper/faster models are sufficient
# LLM_MODEL_IC=bedrock/us.anthropic.claude-haiku-4-5-20251001-v1:0
# LLM_MODEL_IC=groq/qwen/qwen3-32b # Groq Qwen3-32B
# LLM_MODEL_IC=gemini/gemini-2.5-flash # balanced option
# Pod Managers — synthesis, benefits from stronger models
# LLM_MODEL_POD=bedrock/us.anthropic.claude-sonnet-4-6
# LLM_MODEL_POD=gemini/gemini-2.5-flash # balanced option
# Trading Assistant — final synthesis, use the best available
# LLM_MODEL_ASSISTANT=bedrock/us.anthropic.claude-opus-4-6-v1
# LLM_MODEL_ASSISTANT=anthropic/claude-sonnet-4-20250514
# Decoder ICs — pattern recognition, same as IC tier by default
# LLM_MODEL_DECODER=gemini/gemini-2.5-flash
# Workshop — deep strategy research (cloud model recommended, smarter than local)
# LLM_MODEL_WORKSHOP=bedrock/us.anthropic.claude-sonnet-4-20250514
# =============================================================================
# LangGraph / Docker Compose Stack
# =============================================================================
# Langfuse observability (self-hosted)
LANGFUSE_SECRET_KEY=sk-lf-change-me
LANGFUSE_PUBLIC_KEY=pk-lf-change-me
LANGFUSE_HOST=http://langfuse:3000 # Internal Docker Compose URL
NEXTAUTH_SECRET=change-me-random-secret
SALT=change-me-random-salt
LANGFUSE_DB_PASSWORD=CHANGE_ME_MIN_12_CHARS
LANGFUSE_INIT_USER_PASSWORD=CHANGE_ME_MIN_12_CHARS
# Postgres (Docker Compose)
POSTGRES_PASSWORD=CHANGE_ME_MIN_12_CHARS
# Ollama (embeddings + local inference)
OLLAMA_BASE_URL=http://ollama:11434
EMBEDDINGS_OLLAMA_MODEL_NAME=nomic-embed-text
# LiteLLM router config (load-balancing across providers — optional)
# LITELLM_ROUTER_CONFIG=config/litellm_router.yaml
# Execution gate (set true after 48-hour verification phase)
EXECUTION_ENABLED=false
# =============================================================================
# OPTIONAL: Research pipeline tuning
# =============================================================================
# Self-critique confidence threshold (0.0 to disable loop, 0.7 default, 0.9 strict)
HYPOTHESIS_CONFIDENCE_THRESHOLD=0.7
# Fan-out parallel validation per symbol (default false = sequential pipeline)
RESEARCH_FAN_OUT_ENABLED=false
# SignalEngine brief caching (avoids re-running 22 collectors for same symbol)
SIGNAL_ENGINE_CACHE_ENABLED=true
SIGNAL_ENGINE_CACHE_TTL=3600
# Context load mode: parallel (pre-fetch data concurrently) or sequential (LLM tool calls)
CONTEXT_LOAD_MODE=parallel
# ML training scope: draft (fast 126-day, 3-fold) or full (252-day, 5-fold)
ML_TRAINING_MODE=draft
# Trade quality score filter threshold for WeightLearner (default 0.3)
QUALITY_SCORE_FILTER_THRESHOLD=0.3
# Auto-promote strategies from forward_testing to live when metrics pass (default true)
AUTO_PROMOTE_ENABLED=true
# Scale position sizes for unproven/forward_testing strategies (0.5 = half size)
FORWARD_TESTING_SIZE_SCALAR=0.5 # float in [0.0, 1.0]
# Allow forward_testing strategies to take live entries (default true)
USE_FORWARD_TESTING_FOR_ENTRIES=true # boolean: true|false
# Number of top universe symbols to refresh options data for (default: 30)
OPTIONS_REFRESH_TOP_N=30
# =============================================================================
# OPTIONAL: Trading Window configuration
# =============================================================================
#
# Enum defined in: src/quantstack/trading_window.py (TradingWindow, InstrumentType, WindowSpec)
# Settings class: src/quantstack/config/settings.py (TradingWindowSettings)
# Risk enforcement: src/quantstack/execution/risk_gate.py (step 1c + DTE bounds)
#
# Controls which instrument types and time horizons are allowed.
# Comma-separated. Case-insensitive. Default: all (everything allowed).
#
# OPTIONS windows (by DTE):
# options_0dte — DTE 0 (same-day expiry)
# options_weekly — DTE 1-7
# options_biweekly — DTE 7-14
# options_monthly — DTE 14-45
# options_quarterly — DTE 45-90
# options_6_month — DTE 90-180
# options_leaps — DTE 180-730
# options_all — any DTE
#
# EQUITY windows (by hold period):
# equity_scalp — intraday scalps (minutes)
# equity_day_trade — same day, close by EOD
# equity_swing — 1-10 days
# equity_position — 10-90 days
# equity_investment — 90+ days
# equity_all — any hold period
#
# COMPOSITE shortcuts:
# all — everything (default)
# options_short_term — 0dte + weekly + biweekly
# options_medium_term — monthly + quarterly
# options_long_term — 6_month + leaps
# equity_short_term — scalp + day_trade + swing
# equity_long_term — position + investment
#
# Examples:
# TRADING_WINDOW=options_short_term
# TRADING_WINDOW=options_weekly,options_monthly,equity_swing
# RESEARCH_WINDOW=all
#
# RESEARCH_WINDOW=all
# TRADING_WINDOW=options_short_term
#
# DEPRECATED — use RESEARCH_WINDOW / TRADING_WINDOW instead:
# RESEARCH_HOLDING_PERIODS=intraday,short_swing,swing,position
# TRADING_HOLDING_PERIODS=short_swing
# =============================================================================
# OPTIONAL: Execution Monitor
# =============================================================================
# EXEC_MONITOR_POLL_INTERVAL=5 # seconds between DB polls for new positions
# EXEC_MONITOR_RECONCILE_INTERVAL=60 # seconds between broker reconciliation
# EXEC_MONITOR_QUOTE_FEED=auto # auto|quotes|bars — feed selection
# EXEC_MONITOR_SHADOW_MODE=true # true=log exits without executing, false=live
# SHADOW_COMPARISON_WINDOW_SECONDS=5 # max time delta to match decisions from both systems
# SHADOW_SUMMARY_INTERVAL_MINUTES=15 # how often to log aggregate shadow mode metrics
# =============================================================================
# OPTIONAL: Risk limits (override defaults)
# =============================================================================
RISK_MAX_POSITION_PCT=0.10 # float in [0.0, 1.0] — 10% of equity per symbol
RISK_MAX_POSITION_NOTIONAL=20000 # Hard $$ cap per position
RISK_DAILY_LOSS_LIMIT_PCT=0.02 # -2% triggers daily halt
RISK_MIN_DAILY_VOLUME=500000 # Won't trade symbols < 500k ADV
# RISK_RESTRICTED_SYMBOLS= # Comma-separated never-trade list
# =============================================================================
# OPTIONAL: Storage
# =============================================================================
# PostgreSQL DSN — all operational state (positions, signals, strategies, fills)
# Local dev (direct):
# TRADER_PG_URL=postgresql://localhost/quantstack
# Docker Compose (host-to-container bridge):
# TRADER_PG_URL=postgresql://kshitijbichave@host.docker.internal:5432/quantstack
# Migration system: set to 'true' to use Alembic instead of legacy migrations.
# Default: false (legacy path). Switch to true after deploying Alembic baseline.
# Remove this flag entirely after 1 week of stable Alembic operation.
USE_ALEMBIC=false # boolean: true|false
# Connection pool ceiling for pg_storage (default 10; increase under high concurrency)
# PG_POOL_MAX=20
# Internal Docker Compose DSN — used by tools that run inside containers
# BIGTOOL_PG_URL=postgresql://quantstack:quantstack@postgres:5432/quantstack
KILL_SWITCH_SENTINEL=~/.quantstack/KILL_SWITCH_ACTIVE
# =============================================================================
# OPTIONAL: Live / paper execution broker
# =============================================================================
# Master switch — default false (uses internal PaperBroker simulation, no external broker)
# Set true to route all orders to a real broker (Alpaca paper/live or eTrade).
USE_REAL_TRADING=false # boolean: true|false
# Broker priority when USE_REAL_TRADING=true:
# 1. AlpacaBroker — if ALPACA_API_KEY + ALPACA_SECRET_KEY are set (see above)
# 2. EtradeBroker — if ETRADE_CONSUMER_KEY + ETRADE_CONSUMER_SECRET are set
# 3. PaperBroker — fallback if no broker credentials are found (logs a warning)
# eTrade credentials — only needed if using eTrade instead of Alpaca
# Get keys at: https://developer.etrade.com/getting-started
# ETRADE_CONSUMER_KEY=your_consumer_key
# ETRADE_CONSUMER_SECRET=your_consumer_secret
# ETRADE_SANDBOX=true → uses apisb.etrade.com (API test environment, fake data)
# ETRADE_SANDBOX=false → uses api.etrade.com (real market data; paper or live account)
# Always start with true. Switch to false once paper trading is validated.
# ETRADE_SANDBOX=true
# Which eTrade account to trade in (auto-selects first account if not set)
# ETRADE_ACCOUNT_ID_KEY=
# Seconds to wait for a market order to fill via eTrade (default: 30)
# ETRADE_FILL_TIMEOUT=30
# =============================================================================
# OPTIONAL: Monitoring alerts — Discord Incoming Webhook
# =============================================================================
# Paste the webhook URL from: Server Settings → Integrations → Webhooks → New Webhook
# When set, AlphaMonitor posts degradation alerts here after each trading session.
# GET /skills/degradation triggers the check on demand.
# DISCORD_WEBHOOK_URL=https://discord.com/api/webhooks/...
# =============================================================================
# Discord MCP — watchlist & results channel reader
# =============================================================================
# Your Discord token.
# User token (default) — reads channels you're already a member of.
# Get it: open Discord in browser → F12 → Network tab →
# any /api/ request → copy the Authorization header value (looks like MTExxx...)
#
# Bot token — requires a server admin to add your bot.
# Format: Bot MTExxx... (include the "Bot " prefix)
#
# DISCORD_TOKEN=your_token_here
# "user" (default) or "bot". Controls the Authorization header format.
# DISCORD_TOKEN_TYPE=user
# Optional: override which DuckDB file stores Discord messages.
# Defaults to data/trader.duckdb so messages sit alongside market data.
# DISCORD_DB_PATH=data/trader.duckdb
# =============================================================================
# OPTIONAL: Elliott Wave Forecast — member chart scraper
# Level 3 subscription credentials for member.elliottwave-forecast.com
# =============================================================================
# EWF_USERNAME=your_email@example.com
# EWF_PASSWORD=your_password
# =============================================================================
# OPTIONAL: Langfuse trace retention (stub — actual cleanup not yet implemented)
# =============================================================================
# Set LANGFUSE_RETENTION_ENABLED=true when ready to enable scheduled cleanup.
# LANGFUSE_RETENTION_DAYS controls how many days of traces to keep.
LANGFUSE_RETENTION_ENABLED=false # boolean: true/false (default: false)
LANGFUSE_RETENTION_DAYS=30 # positive integer (default: 30)
# =============================================================================
# OPTIONAL: Feedback Loop Kill Switches (Phase 7)
# =============================================================================
#
# Each flag independently enables/disables one feedback adjustment.
# Default: false (safe). Enable one at a time after verifying data accumulation.
# Data collection (IC tracking, agent skills, etc.) runs regardless of these flags.
FEEDBACK_IC_WEIGHT_ADJUSTMENT=false # Section 07: IC-based signal weight adjustment
FEEDBACK_CORRELATION_PENALTY=false # Section 08: Signal correlation penalties
FEEDBACK_CONVICTION_MULTIPLICATIVE=false # Section 10: Multiplicative conviction calibration
FEEDBACK_SHARPE_DEMOTION=false # Section 12: Live vs. backtest Sharpe demotion
FEEDBACK_DRIFT_DETECTION=false # Section 13: Concept drift detection and auto-retrain
FEEDBACK_TRANSITION_SIZING=false # Section 15: Regime transition probability sizing