-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy path.env.example
More file actions
228 lines (192 loc) · 8.67 KB
/
.env.example
File metadata and controls
228 lines (192 loc) · 8.67 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
# ============================================================================
# NotebookLLM Backend - Environment Configuration
# ============================================================================
# Copy this file to `.env` and fill in your actual values
# Never commit .env to Git! It's in .gitignore for security.
# ============================================================================
# ----------------------------------------------------------------------------
# Application Settings
# ----------------------------------------------------------------------------
APP_NAME=NotebookLLM
ENVIRONMENT=development # development | staging | production
DEBUG=true # Set to false in production
LOG_LEVEL=info # debug | info | warning | error
# ----------------------------------------------------------------------------
# API Server Configuration
# ----------------------------------------------------------------------------
API__HOST=0.0.0.0
API__PORT=8000
# IMPORTANT: In production, replace ["*"] with your actual frontend domain!
API__CORS_ORIGINS=["http://localhost:3000","http://localhost:5173"]
# ----------------------------------------------------------------------------
# Database (PostgreSQL via Supabase)
# ----------------------------------------------------------------------------
# Format: postgresql+asyncpg://user:password@host:port/database?sslmode=require
DATABASE_URL=postgresql+asyncpg://postgres:your_password@db.xxx.supabase.co:5432/postgres?sslmode=require
# ----------------------------------------------------------------------------
# Supabase Configuration
# ----------------------------------------------------------------------------
SUPABASE_URL=https://your-project.supabase.co
SUPABASE_KEY=your_supabase_anon_key_here
SUPABASE_SERVICE_ROLE_KEY=your_supabase_service_role_key_here
SUPABASE_JWT_SECRET=your_jwt_secret_here
# Storage Configuration
STORAGE_URL=https://your-project.supabase.co/storage/v1
STORAGE_PROVIDER=supabase # supabase | local | s3
STORAGE_PUBLIC_BUCKET=notebook-public
STORAGE_PRIVATE_BUCKET=notebook-private
# ----------------------------------------------------------------------------
# Vector Database (Qdrant)
# ----------------------------------------------------------------------------
QDRANT_HOST=https://your-cluster.qdrant.io:6333
QDRANT_API_KEY=your_qdrant_api_key_here
QDRANT_COLLECTION_NAME=Notebookllm
QDRANT_USE_HTTPS=true
# ----------------------------------------------------------------------------
# LLM Providers
# ----------------------------------------------------------------------------
# Default Provider
LLM__PROVIDER=gemini # gemini | openai | groq | litellm
# Gemini (Google)
GEMINI_API_KEY=your_gemini_api_key_here
LLM__GEMINI_MODEL=gemini-2.0-flash-exp
# OpenAI
OPENAI_API_KEY=your_openai_api_key_here
LLM__OPENAI_MODEL=gpt-4o-mini
# Groq
GROQ_API_KEY=your_groq_api_key_here
LLM__GROQ_MODEL=llama-3.3-70b-versatile
# LiteLLM (Optional - for unified API)
LITELLM_API_KEY=your_litellm_key_here
# LLM Generation Settings
LLM__TEMPERATURE=0.2
LLM__MAX_TOKENS=8192
# ----------------------------------------------------------------------------
# Embeddings Configuration
# ----------------------------------------------------------------------------
EMBEDDING__PROVIDER=huggingface # huggingface | openai
EMBEDDING__MODEL=all-MiniLM-L6-v2
EMBEDDING__DIMENSION=384
# ----------------------------------------------------------------------------
# Document Processing Services
# ----------------------------------------------------------------------------
# Web Scraping
FIRECRAWL_API_KEY=your_firecrawl_api_key_here
# Audio Transcription
ASSEMBLYAI_API_KEY=your_assemblyai_api_key_here
# Text-to-Speech (Optional)
ELEVENLABS_API_KEY=your_elevenlabs_key_here
# ----------------------------------------------------------------------------
# Reranking (Optional but Recommended)
# ----------------------------------------------------------------------------
COHERE_API_KEY=your_cohere_api_key_here
RERANK__PROVIDER=cohere # cohere | none
RERANK__MODEL=rerank-english-v3.0
RERANK__TOP_N=5
# ----------------------------------------------------------------------------
# RAG Configuration
# ----------------------------------------------------------------------------
RAG__CHUNK_SIZE=1000
RAG__CHUNK_OVERLAP=200
RAG__TOP_K_RESULTS=20
RAG__ENABLE_STREAMING=true
RAG__ENABLE_HYDE=true
RAG__ENABLE_MMR=false
RAG__SIMILARITY_SCORE_THRESHOLD=0.5
# Hybrid Search (Dense + Sparse)
RAG__HYBRID_ALPHA=0.7 # 0=sparse only, 1=dense only, 0.7=balanced
RAG__SPARSE_TOP_K=20
RAG__DENSE_TOP_K=20
# ----------------------------------------------------------------------------
# Task Queue (Procrastinate)
# ----------------------------------------------------------------------------
USE_TASK_QUEUE=true
# Worker Configuration
QUEUE__WORKERS_CRITICAL=2
QUEUE__WORKERS_HIGH=4
QUEUE__WORKERS_STANDARD=4
QUEUE__RETRY_ATTEMPTS_CRITICAL=5
QUEUE__RETRY_ATTEMPTS_HIGH=3
QUEUE__RETRY_ATTEMPTS_STANDARD=2
QUEUE__ENABLE_DEAD_JOB_RECOVERY=true
QUEUE__DEAD_JOB_THRESHOLD_MINUTES=30
# ----------------------------------------------------------------------------
# Observability (Langfuse)
# ----------------------------------------------------------------------------
LANGFUSE_ENABLED=true
LANGFUSE_PUBLIC_KEY=pk-lf-xxx
LANGFUSE_SECRET_KEY=sk-lf-xxx
LANGFUSE_HOST=https://cloud.langfuse.com
LANGFUSE_FLUSH_AT=10
LANGFUSE_FLUSH_INTERVAL=5
LANGFUSE_DEBUG=false
# Langchain Integration (Optional)
LANGCHAIN_TRACING_V2=true
LANGCHAIN_PROJECT=NotebookLLM
LANGCHAIN_API_KEY=your_langsmith_key_here
# ----------------------------------------------------------------------------
# File Upload Settings
# ----------------------------------------------------------------------------
UPLOAD__MAX_SIZE_MB=100
UPLOAD__ALLOWED_EXTENSIONS=[".pdf",".docx",".txt",".md",".py",".ipynb",".mp3",".mp4",".wav"]
# ----------------------------------------------------------------------------
# Rate Limiting
# ----------------------------------------------------------------------------
RATE_LIMIT__ENABLED=true
RATE_LIMIT__REQUESTS_PER_MINUTE=100
# ----------------------------------------------------------------------------
# Security
# ----------------------------------------------------------------------------
# Anonymous user ID (for shared/public notebooks)
ANONYMOUS_USER_ID=00000000-0000-0000-0000-000000000000
# Encryption key for OAuth tokens at rest (Fernet key)
# Generate with: python -c "from cryptography.fernet import Fernet; print(Fernet.generate_key().decode())"
ENCRYPTION_KEY=
# Domain name for production deployment (used by Caddy reverse proxy)
DOMAIN_NAME=your-domain.com
# CORS (Production - set specific origins!)
# API__CORS_ORIGINS=["https://your-frontend-domain.com"]
# ----------------------------------------------------------------------------
# Development/Testing Only
# ----------------------------------------------------------------------------
# Set to true to enable test users
ENABLE_TEST_USERS=false
# ----------------------------------------------------------------------------
# Google Services (OAuth & Drive)
# ----------------------------------------------------------------------------
GOOGLE_CLIENT_ID=your_google_client_id_here
GOOGLE_CLIENT_SECRET=your_google_client_secret_here
GOOGLE_REDIRECT_URI=http://localhost:3000
# ----------------------------------------------------------------------------
# SMTP Configuration (Resend)
# ----------------------------------------------------------------------------
SMTP_HOST=smtp.resend.com
SMTP_PORT=465
SMTP_USER=resend
SMTP_PASSWORD=re_your_resend_api_key_here
SMTP_FROM_EMAIL=noreply@memellm.xyz
SMTP_FROM_NAME="MemexLLM"
# ----------------------------------------------------------------------------
# Optional Integrations
# ----------------------------------------------------------------------------
# Sentry (Error Tracking)
SENTRY_DSN=your_sentry_dsn_here
SENTRY_ENVIRONMENT=development
# Redis (Caching - Optional)
REDIS_URL=redis://localhost:6379/0
# ----------------------------------------------------------------------------
# Audio Generation (Kokoro TTS)
# ----------------------------------------------------------------------------
AUDIO__TTS_PROVIDER=kokoro # kokoro | elevenlabs | edge
AUDIO__SAMPLE_RATE=24000
AUDIO__OUTPUT_DIR=data/audio_output
# ============================================================================
# Notes:
# ============================================================================
# 1. Replace all "your_*_here" placeholders with actual values
# 2. In production, set ENVIRONMENT=production and DEBUG=false
# 3. Always use HTTPS endpoints in production
# 4. Keep this file secure - never commit to Git
# 5. For Docker, create a separate .env.production file
# ============================================================================
SENTRY_DSN=