-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy path.env.example
More file actions
72 lines (56 loc) · 2.23 KB
/
.env.example
File metadata and controls
72 lines (56 loc) · 2.23 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
# Aletheia - Environment Variables
# Copy to .env and fill in your API keys
# =============================================================================
# LLM API Keys (Required - Choose at least one)
# =============================================================================
# OpenAI API Key (for GPT-4o Vision, embeddings)
OPENAI_API_KEY=your_openai_api_key_here
# Google Gemini API Key (alternative Vision LLM)
GOOGLE_API_KEY=your_google_api_key_here
# Kimi Coding API Key (Vision LLM - recommended)
# Get your API key from: https://platform.kimi.com/
KIMI_API_KEY=your_kimi_api_key_here
KIMI_BASE_URL=https://api.kimi.com/coding/v1
KIMI_MODEL=kimi-coding
# =============================================================================
# Docker Infrastructure (Local Development)
# =============================================================================
# Start services with:
# docker/milvus/start.sh # Vector database
# docker/elasticsearch/start.sh # Keyword search (BM25)
# Milvus (Local Docker)
MILVUS_URI=http://localhost:19530
MILVUS_HOST=localhost
MILVUS_PORT=19530
MILVUS_TOKEN=
MILVUS_COLLECTION=aletheia_documents
# Elasticsearch (Local Docker)
ELASTICSEARCH_URL=http://localhost:9200
ELASTICSEARCH_HOST=localhost
ELASTICSEARCH_PORT=9200
ELASTIC_API_KEY=
# =============================================================================
# Embedding Configuration
# =============================================================================
# Option 1: Ollama (local/self-hosted)
EMBEDDING_PROVIDER=ollama
EMBEDDING_MODEL=mxbai-embed-large:335m
EMBEDDING_DIMENSION=1024
OLLAMA_BASE_URL=http://localhost:11434/v1
# Option 2: OpenAI (cloud-based)
# EMBEDDING_PROVIDER=openai
# EMBEDDING_MODEL=text-embedding-3-small
# EMBEDDING_DIMENSION=1536
# =============================================================================
# Retrieval & Generation Settings
# =============================================================================
# Number of final chunks to retrieve
RETRIEVAL_TOP_K=3
# Number of chunks to summarize in each batch
BATCH_SIZE=5
# Maximum retries for LLM calls
MAX_RETRIES=3
# Timeout for each LLM call (seconds)
TIMEOUT_SECONDS=30
# Optional: Cohere Rerank API key for reranking quality
COHERE_API_KEY=