-
Notifications
You must be signed in to change notification settings - Fork 5
Expand file tree
/
Copy path.env.example
More file actions
73 lines (57 loc) · 3.57 KB
/
.env.example
File metadata and controls
73 lines (57 loc) · 3.57 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
# CyberViser — Hancock Environment Configuration
# Copy this file to .env and fill in your values
# NEVER commit .env to git
# ── Backend Selection ─────────────────────────────────────────
# Primary: ollama | nvidia | openai
# Hancock uses Ollama (local) by default; falls back to OpenAI if Ollama fails.
HANCOCK_LLM_BACKEND=ollama
# ── Ollama (Primary — local LLM) ──────────────────────────────
# Install Ollama: https://ollama.com/download
# Then pull a model: ollama pull llama3.1:8b
OLLAMA_BASE_URL=http://localhost:11434
OLLAMA_MODEL=llama3.1:8b
OLLAMA_CODER_MODEL=qwen2.5-coder:7b
# ── NVIDIA NIM (Optional cloud backend) ───────────────────────
# Set HANCOCK_LLM_BACKEND=nvidia and provide your key to use NIM instead.
# Get your free API key at: https://build.nvidia.com
NVIDIA_API_KEY=nvapi-your-key-here
# ── OpenAI (Fallback) ─────────────────────────────────────────
# Get your API key at: https://platform.openai.com/api-keys
OPENAI_API_KEY=sk-your-openai-key-here
OPENAI_ORG_ID=org-your-org-id-here
# ── Model Selection (NIM / OpenAI overrides) ──────────────────
# Only used when HANCOCK_LLM_BACKEND=nvidia or =openai
HANCOCK_MODEL=mistralai/mistral-7b-instruct-v0.3
HANCOCK_CODER_MODEL=qwen/qwen2.5-coder-32b-instruct
# OpenAI fallback models
OPENAI_MODEL=gpt-4o-mini
OPENAI_CODER_MODEL=gpt-4o
# ── Server Config ─────────────────────────────────────────────
HANCOCK_PORT=5000
# ── API Security ──────────────────────────────────────────────
# Bearer token required on all /v1/* endpoints.
# Leave empty to disable auth (dev/local use only).
# Generate with: python -c "import secrets; print(secrets.token_urlsafe(32))"
HANCOCK_API_KEY=
# Max requests per IP per minute (default: 60)
HANCOCK_RATE_LIMIT=60
# Allow plaintext ip-api.com fallback for OSINT geolocation only if you accept
# indicator lookups over HTTP. Leave disabled to use HTTPS sources only.
HANCOCK_ALLOW_INSECURE_GEOIP=false
# ── Webhook Notifications ─────────────────────────────────────
# HMAC-SHA256 signature secret for /v1/webhook (optional — set to enforce signature verification)
# Sign requests: X-Hancock-Signature: sha256=<hmac_hex>
HANCOCK_WEBHOOK_SECRET=
# Slack incoming webhook URL (optional — alerts posted to channel on /v1/webhook)
HANCOCK_SLACK_WEBHOOK=
# Microsoft Teams incoming webhook URL (optional)
HANCOCK_TEAMS_WEBHOOK=
# ── Google Cloud Storage (optional — model backup) ────────────────────
# GCS_BUCKET=cyberviser-models
# GCS_PREFIX=v3
# GOOGLE_APPLICATION_CREDENTIALS=/path/to/service-account-key.json
# ── Logging ───────────────────────────────────────────────────────────────────
LOG_LEVEL=INFO
LOG_FORMAT=json
# ── Graceful shutdown ─────────────────────────────────────────────────────────
SHUTDOWN_TIMEOUT=30