-
Notifications
You must be signed in to change notification settings - Fork 16
Expand file tree
/
Copy path.env.example
More file actions
1102 lines (964 loc) · 48.2 KB
/
.env.example
File metadata and controls
1102 lines (964 loc) · 48.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# OpenTranscribe Environment Configuration
#
# SECURITY NOTICE: This is a template file with placeholder values.
# DO NOT use these placeholder values in production!
#
# For new installations:
# - Linux/Mac: ./setup-opentranscribe.sh will auto-generate secure passwords
# - Windows: The installer will auto-generate secure passwords
# - Manual: Copy this file to .env and replace ALL placeholder values
#
# All secrets are auto-generated during installation for maximum security.
# You can customize values in .env after installation if needed.
#=============================================================================
# DATABASE CONFIGURATION
#=============================================================================
# PostgreSQL Database Settings
POSTGRES_HOST=postgres
POSTGRES_PORT=5176
POSTGRES_USER=postgres
# CRITICAL: Auto-generated during install (32-char random hex)
POSTGRES_PASSWORD=CHANGE_ME_auto_generated_on_install
POSTGRES_DB=opentranscribe
# PostgreSQL TLS: disable/allow/prefer/require/verify-ca/verify-full
POSTGRES_SSLMODE=prefer
# Database initialization is handled by Alembic migrations on backend startup.
# No external SQL init file is needed.
#=============================================================================
# OBJECT STORAGE (MinIO S3-Compatible)
#=============================================================================
# MinIO Storage Settings
MINIO_HOST=minio
MINIO_PORT=5178
MINIO_CONSOLE_PORT=5179
# CRITICAL: Auto-generated during install (32-char random hex)
MINIO_ROOT_USER=minioadmin
MINIO_ROOT_PASSWORD=CHANGE_ME_auto_generated_on_install
MEDIA_BUCKET_NAME=opentranscribe
# Use HTTPS for MinIO connections (requires MinIO TLS configuration)
MINIO_SECURE=false
# MinIO Server-Side Encryption at Rest (AES-256-GCM)
# Automatically encrypts all new objects stored in MinIO. Transparent to the application.
# Existing unencrypted objects remain readable after enabling.
# Format: <key-name>:<base64-encoded-32-byte-key>
# Generate with: echo "opentranscribe-key:$(openssl rand -base64 32)"
# Auto-generated during install. Leave empty to disable encryption at rest.
MINIO_KMS_SECRET_KEY=CHANGE_ME_auto_generated_on_install
# Set to 'on' to enable auto-encryption (requires MINIO_KMS_SECRET_KEY)
MINIO_KMS_AUTO_ENCRYPTION=on
# Public URL for presigned URLs (how browsers access MinIO)
# Dev: Leave empty - defaults to http://localhost:5178
# Prod with nginx: Set to your public MinIO URL
# Example: MINIO_PUBLIC_URL=https://yourdomain.com/minio
# Example: MINIO_PUBLIC_URL=https://minio.yourdomain.com
MINIO_PUBLIC_URL=
# Custom Storage Paths (Optional - used with docker-compose.nas.yml overlay)
# Bind-mount data directories to specific disks for optimal performance
# Usage: add -f docker-compose.nas.yml to your docker compose command
#
# MinIO media storage → NAS (high capacity)
# MINIO_NAS_PATH=/mnt/nas/opentranscribe-minio
#
# PostgreSQL database → NVMe (fast random I/O)
# POSTGRES_DATA_PATH=/mnt/nvm/opentranscribe/pg
#
# OpenSearch indices → NVMe (fast indexing and vector search)
# OPENSEARCH_DATA_PATH=/mnt/nvm/opentranscribe/os
#=============================================================================
# REDIS CACHE & MESSAGE BROKER
#=============================================================================
# Redis Configuration
REDIS_HOST=redis
REDIS_PORT=5177
# OPTIONAL: Set a password for Redis (recommended for production)
# Auto-generated during install (32-char random hex)
# Leave empty to disable Redis authentication (not recommended for production)
REDIS_PASSWORD=CHANGE_ME_auto_generated_on_install
# Use rediss:// (TLS) for Redis connections (requires Redis TLS configuration)
REDIS_USE_TLS=false
#=============================================================================
# SEARCH ENGINE (OpenSearch)
#=============================================================================
# OpenSearch Configuration
OPENSEARCH_HOST=opensearch
OPENSEARCH_PORT=5180
OPENSEARCH_ADMIN_PORT=5181
# OpenSearch credentials (security plugin disabled by default in dev)
# Enable for production by setting OPENSEARCH_SECURITY_ENABLED=true
OPENSEARCH_USER=admin
OPENSEARCH_PASSWORD=CHANGE_ME_auto_generated_on_install
# Production security: enable TLS + security plugin for OpenSearch
# Use HTTPS for OpenSearch client connections
OPENSEARCH_USE_TLS=false
# Verify TLS certificates (set true for production with valid certs)
OPENSEARCH_VERIFY_CERTS=false
# Set to "false" to enable the OpenSearch security plugin (maps to DISABLE_SECURITY_PLUGIN)
OPENSEARCH_DISABLE_SECURITY=true
OPENSEARCH_ADMIN_PASSWORD=
# OpenSearch Neural Search (Semantic/Vector Search)
OPENSEARCH_NEURAL_SEARCH_ENABLED=true
OPENSEARCH_NEURAL_MODEL=huggingface/sentence-transformers/all-MiniLM-L6-v2
OPENSEARCH_NEURAL_PIPELINE=transcript-neural-ingest
# Available models (change after setup via Admin UI or by editing this file):
# Fast (384d): all-MiniLM-L6-v2 (default, 80MB, English)
# paraphrase-multilingual-MiniLM-L12-v2 (420MB, 50+ langs)
# Balanced (768d): all-mpnet-base-v2 (420MB, English)
# paraphrase-multilingual-mpnet-base-v2 (1.1GB, 50+ langs)
# Best quality: all-distilroberta-v1 (768d, 290MB, English)
# distiluse-base-multilingual-cased-v1 (512d, 480MB, 15 langs)
# Search Performance: Collapse Optimization
# OpenSearch groups results by file server-side using collapse + inner_hits.
# Max concurrent group searches for collapse inner_hits (default: 20, 0 = sequential)
# SEARCH_COLLAPSE_MAX_CONCURRENT=20
# Search Indexing Performance Tuning
# Bulk batch size: number of chunks per OpenSearch bulk request (default: 100)
# Lower values reduce per-request latency; higher values improve throughput
# SEARCH_BULK_BATCH_SIZE=100
# Neural ingest pipeline batch size: documents batched per embedding call (default: 5)
# SEARCH_NEURAL_BATCH_SIZE=5
# Reindex refresh interval: trigger a Lucene segment flush every N files (default: 100)
# Keeps search results fresh during large reindex operations
# SEARCH_REINDEX_REFRESH_INTERVAL=100
#=============================================================================
# SECURITY & AUTHENTICATION
#=============================================================================
# JWT Token Configuration
# CRITICAL: Used for user session authentication
# Auto-generated during install (64-char random hex for maximum security)
JWT_SECRET_KEY=CHANGE_ME_auto_generated_on_install
JWT_ALGORITHM=HS256
JWT_ACCESS_TOKEN_EXPIRE_MINUTES=1440
# Session Timeout Settings (NIST SP 800-63B compliant)
# Idle timeout: Session expires after this many minutes of inactivity
SESSION_IDLE_TIMEOUT_MINUTES=15
# Absolute timeout: Session expires after this many minutes regardless of activity
SESSION_ABSOLUTE_TIMEOUT_MINUTES=480
# Rate Limiting (OWASP recommended)
# Requests per minute allowed on authentication endpoints
RATE_LIMIT_AUTH_PER_MINUTE=10
# Requests per minute allowed on general API endpoints
RATE_LIMIT_API_PER_MINUTE=100
# Enable/disable rate limiting (set to 'false' to disable)
RATE_LIMIT_ENABLED=true
# Trusted proxy IPs for X-Forwarded-For header (comma-separated IPs or CIDR ranges)
# Only trust X-Forwarded-For from these IPs. Empty = trust direct connection only.
# Example: 127.0.0.1,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16
RATE_LIMIT_TRUSTED_PROXIES=
# Account Lockout Settings (NIST AC-7 compliant)
# Enable/disable account lockout after failed login attempts
ACCOUNT_LOCKOUT_ENABLED=true
# Number of failed attempts before account is locked
ACCOUNT_LOCKOUT_THRESHOLD=5
# Base lockout duration in minutes
ACCOUNT_LOCKOUT_DURATION_MINUTES=15
# Enable progressive lockout (increases duration with each lockout)
# 1st: 15min, 2nd: 30min, 3rd: 60min, 4th+: max duration
ACCOUNT_LOCKOUT_PROGRESSIVE=true
# Maximum lockout duration in minutes (24 hours)
ACCOUNT_LOCKOUT_MAX_DURATION_MINUTES=1440
# API Key Encryption
# CRITICAL: Used to encrypt LLM API keys stored in database
# Auto-generated during install (random string with prefix to ensure proper backend processing)
# NEVER change this after first use or encrypted data will be lost!
# To manually generate: echo "opentranscribe_$(openssl rand -base64 48)"
ENCRYPTION_KEY=CHANGE_ME_auto_generated_on_install
# Frontend URL (used in password reset emails)
FRONTEND_URL=http://localhost:5173
# SMTP Settings (for password reset emails)
# Leave SMTP_HOST empty to log reset links to console (development mode)
SMTP_HOST=
SMTP_PORT=587
SMTP_USER=
SMTP_PASSWORD=
SMTP_FROM=noreply@yourdomain.com
SMTP_USE_TLS=true
#=============================================================================
# APPLICATION PORTS (External Access)
#=============================================================================
# External port configuration (accessible from host machine)
# Consistent ports across all environments to avoid confusion
FRONTEND_PORT=5173 # Web UI
BACKEND_PORT=5174 # API Server
FLOWER_PORT=5175 # Celery Task Monitor
POSTGRES_PORT=5176 # Database (for admin tools)
REDIS_PORT=5177 # Redis (for debugging)
MINIO_PORT=5178 # MinIO API
MINIO_CONSOLE_PORT=5179 # MinIO Web Console
OPENSEARCH_PORT=5180 # OpenSearch API
DOCS_PORT=5183 # Embedded documentation site (proxied at /docs/ in-app)
OPENSEARCH_ADMIN_PORT=5181 # OpenSearch Admin
# Flower Dashboard Authentication (protects the monitoring UI)
FLOWER_USER=admin
FLOWER_PASSWORD=CHANGE_ME_auto_generated_on_install
# URL prefix Flower serves under (server-side; must match nginx proxy_pass path)
FLOWER_URL_PREFIX=flower
#=============================================================================
# AI MODEL STORAGE & CACHING
#=============================================================================
# Model Cache Directory (on host machine)
# This directory persists AI models between container restarts (~2-6GB total)
MODEL_CACHE_DIR=./models
#=============================================================================
# HARDWARE DETECTION & GPU CONFIGURATION
#=============================================================================
# GPU Device Selection (for multi-GPU systems)
# Selects which GPU to use (0 = first GPU, 1 = second GPU, etc.)
GPU_DEVICE_ID=0
# GPU for speaker clustering (defaults to GPU_DEVICE_ID if not set)
# Use this to run clustering on a different GPU than transcription
# GPU_CLUSTERING_DEVICE=1
# Multi-GPU Worker Scaling (Optional - Advanced Feature)
# Enable this to run multiple parallel GPU workers on a dedicated GPU device
# This significantly increases transcription throughput for multi-GPU systems
# Example: GPU 0 = LLM, GPU 1 = Single transcription worker, GPU 2 = 4 parallel workers
#
# GPU_SCALE_ENABLED: Set to 'true' to enable multi-GPU scaling (default: false)
# GPU_SCALE_DEVICE_ID: Which GPU device to use for scaled workers (default: 2)
# GPU_SCALE_WORKERS: Number of parallel workers on the scaled GPU (default: 4)
#
# Usage: ./opentr.sh start dev --gpu-scale
# Or manually: docker compose -f docker-compose.yml -f docker-compose.gpu-scale.yml up
GPU_SCALE_ENABLED=false
GPU_SCALE_DEVICE_ID=2
GPU_SCALE_WORKERS=4
GPU_SCALE_DEFAULT_WORKER=1
# GPU_SCALE_MAX_TASKS: Worker process restarts after this many tasks (memory safety)
# Default: 500. Set to 0 for never restart (infinite). Lower values reclaim memory
# more often but incur model reload overhead (~10-15s per restart).
# GPU_SCALE_MAX_TASKS=500
# GPU Worker Max Tasks (default single worker)
# Default: 100000 (effectively never restart, keeps models warm).
# Note: Celery does not accept 0; use a high number instead.
# GPU_MAX_TASKS=100000
# GPU Default Batch Size (for default single-GPU worker)
# Auto-detected from GPU VRAM: 32 for 40GB+, 24 for 24GB, 12 for 12GB, 8 for 8GB.
# Only set this if you want to override auto-detection (e.g., multi-GPU with different cards).
# GPU_DEFAULT_BATCH_SIZE=auto
# GPU Concurrent Requests
# Controls how many transcription tasks run simultaneously on the GPU.
# The Whisper model is loaded once at startup and pinned in VRAM, then shared
# by all worker threads — no per-task model reload overhead.
# Default: 1 (sequential, safe starting point). Increase for higher throughput
# on GPUs with enough VRAM. "auto" calculates from VRAM:
# (total - 9GB for models) / 2GB per task, max 4.
# VRAM budget: ~9GB shared models + ~2GB per concurrent task.
# RTX 3080 12GB → 1, RTX 3090 24GB → 3, A6000 48GB → 4
GPU_CONCURRENT_REQUESTS=1
# GPU_WORKER_POOL: Celery pool type for GPU worker.
# Default: "threads" — model stays loaded in the process between tasks,
# keeping weights pinned in GPU VRAM. Even at concurrency=1, threads pool
# avoids the fork-per-task overhead of prefork and keeps the model warm.
# Set to "prefork" only if you need full process isolation per task (legacy).
# GPU_WORKER_POOL=threads
# Migration GPU settings are AUTO-DETECTED from available VRAM.
# These overrides are only needed for advanced tuning — most users should leave them unset.
# MIGRATION_GPU_WORKERS=1 # Override: parallel model instances per batch task
# MIGRATION_MAX_CONCURRENT_TASKS=1 # Override: max batch tasks running simultaneously
# Benchmark & Profiling (enable before running scripts/benchmark_*.py)
# ENABLE_BENCHMARK_TIMING records inter-stage timestamps (dispatch, preprocess, GPU, postprocess)
# to Redis for pipeline gap analysis. ENABLE_VRAM_PROFILING captures per-step GPU VRAM
# snapshots (NVML + PyTorch) and saves them to Redis for the admin GPU profiles endpoint.
# Both are lightweight and safe for production, but add Redis writes per task.
# ENABLE_BENCHMARK_TIMING=false
# ENABLE_VRAM_PROFILING=false
#=============================================================================
# AI MODELS CONFIGURATION
#=============================================================================
# =============================================================================
# WHISPER MODEL CONFIGURATION
# =============================================================================
# Choose based on your needs:
#
# large-v3-turbo (DEFAULT) - 6x faster, ~6GB VRAM, excellent English/multilingual
# - Best for: English content, speed-critical workflows, most use cases
# - Warning: Cannot translate to English, slightly reduced Thai/Cantonese accuracy
#
# large-v3 - Best accuracy across all 100+ languages, ~10GB VRAM
# - Best for: Non-English, low-resource languages, maximum accuracy
# - Required if using "Translate to English" feature
#
# large-v2 - Legacy model, ~10GB VRAM, good balance
# - Best for: Fallback if v3 models have issues
#
# Smaller models (for low-VRAM or CPU): tiny, base, small, medium
#
WHISPER_MODEL=large-v3-turbo
# Lightweight model for fast CPU transcription (default: base)
# Used when users select "Fast Processing" — runs on CPU, no GPU impact
# WHISPER_LIGHTWEIGHT_MODEL=base
# Speaker Diarization Model
DIARIZATION_MODEL=pyannote/speaker-diarization-3.1
MIN_SPEAKERS=1
MAX_SPEAKERS=20 # Can be increased to 50+ for large conferences (no hard limit)
# Diarization batch sizes (auto-detected by default)
# DIARIZATION_BATCH_SIZE=32 # Segmentation batch size (reduce for low VRAM)
# DIARIZATION_EMBEDDING_BATCH_SIZE=32 # Embedding batch size (stock default is 1)
# HuggingFace Token (REQUIRED for speaker diarization)
# Get your token at: https://huggingface.co/settings/tokens
# You must accept PyAnnote model terms at: https://huggingface.co/pyannote/speaker-diarization-3.1
HUGGINGFACE_TOKEN=
# =============================================================================
# TRANSCRIPTION PERFORMANCE OPTIONS
# =============================================================================
# These options can significantly reduce processing time at the cost of some features
# Whisper beam_size: lower = faster but slightly less accurate
# Default: 5 (WhisperX default). Set to 1 for greedy decoding (~25-40% faster transcription)
# Trade-off: beam_size=1 is ~1-2% lower WER for English
# WHISPER_BEAM_SIZE=5
# Whisper compute_type: quantization for faster inference
# Default: auto-detected (float16 on CUDA). Options: float16, int8_float16, int8, float32
# int8_float16 gives ~15-25% speedup with negligible quality loss (-0.1 WER)
# Requires CUDA compute capability >= 7.0
# WHISPER_COMPUTE_TYPE=float16
# VRAM profiling: captures per-step GPU memory usage and timing data
# Enable temporarily to diagnose VRAM bottlenecks or benchmark pipeline changes
# Reports are logged at INFO level when enabled
ENABLE_VRAM_PROFILING=false
#=============================================================================
# LLM PROVIDER CONFIGURATION (Optional - for AI Summarization)
#=============================================================================
# LLM Provider Selection
# IMPORTANT: Users configure LLM providers through the Web UI (Settings → LLM Provider)
# This is a system-wide default fallback when no user settings exist
#
# Options: vllm, openai, ollama, anthropic, openrouter, or leave empty
# Leave empty for transcription-only mode (no AI summarization/speaker identification)
LLM_PROVIDER=
# LLM Context Window Configuration
# Users configure max_tokens (context window) via Web UI: Settings → LLM Provider → Max Tokens
# Set to your model's actual capability for best performance:
# - GPT-4 Turbo: 128000
# - Claude 3 Opus/Sonnet/Haiku: 200000
# - Mistral 7B: 8192
# - Custom vLLM models: Check model card (e.g., gpt-oss-20b: 130000)
# Default: 8192 (conservative fallback)
# Range: 512 - 2,000,000
# ─────────────────────────────────────────────────────────────────────────
# vLLM (Self-Hosted Open Source LLM Server)
# ─────────────────────────────────────────────────────────────────────────
VLLM_BASE_URL=http://localhost:8012/v1
VLLM_MODEL_NAME=mistralai/Mistral-7B-Instruct-v0.2
VLLM_API_KEY=
# ─────────────────────────────────────────────────────────────────────────
# OpenAI (Commercial Cloud API)
# ─────────────────────────────────────────────────────────────────────────
OPENAI_API_KEY=
OPENAI_MODEL_NAME=gpt-4o-mini
OPENAI_BASE_URL=https://api.openai.com/v1
# ─────────────────────────────────────────────────────────────────────────
# Ollama (Self-Hosted Local LLM)
# ─────────────────────────────────────────────────────────────────────────
OLLAMA_BASE_URL=http://localhost:11434
OLLAMA_MODEL_NAME=llama2:7b-chat
# ─────────────────────────────────────────────────────────────────────────
# Anthropic Claude (Commercial Cloud API)
# ─────────────────────────────────────────────────────────────────────────
ANTHROPIC_API_KEY=
ANTHROPIC_MODEL_NAME=claude-3-haiku-20240307
ANTHROPIC_BASE_URL=https://api.anthropic.com
# ─────────────────────────────────────────────────────────────────────────
# OpenRouter (Multi-Provider API Gateway)
# ─────────────────────────────────────────────────────────────────────────
OPENROUTER_API_KEY=
OPENROUTER_MODEL_NAME=anthropic/claude-3-haiku
OPENROUTER_BASE_URL=https://openrouter.ai/api/v1
#=============================================================================
# ASR (SPEECH RECOGNITION) PROVIDER
#=============================================================================
# Provider: local (default, requires GPU), deepgram, assemblyai, openai,
# google, azure, aws, speechmatics, gladia
ASR_PROVIDER=local
# --- Deepgram (ASR_PROVIDER=deepgram) ---
DEEPGRAM_API_KEY=
DEEPGRAM_MODEL=nova-3
# --- AssemblyAI (ASR_PROVIDER=assemblyai) ---
ASSEMBLYAI_API_KEY=
ASSEMBLYAI_MODEL=universal
# --- OpenAI Whisper / GPT-4o Transcribe (ASR_PROVIDER=openai) ---
# Uses OPENAI_API_KEY defined above
OPENAI_ASR_MODEL=gpt-4o-transcribe
# --- Google Cloud Speech (ASR_PROVIDER=google) ---
# Provide path to service account JSON file
GOOGLE_CLOUD_CREDENTIALS=
GOOGLE_ASR_MODEL=chirp-3
# --- Azure Speech (ASR_PROVIDER=azure) ---
AZURE_SPEECH_KEY=
AZURE_SPEECH_REGION=eastus
AZURE_ASR_MODEL=whisper
# --- Amazon Transcribe (ASR_PROVIDER=aws) ---
# Access credentials — leave empty to use an IAM role / instance profile instead
AWS_ACCESS_KEY_ID=
AWS_SECRET_ACCESS_KEY=
# Region where your Transcribe jobs will run (must match bucket region)
AWS_REGION=us-east-1
AWS_ASR_MODEL=standard
# S3 bucket used by Amazon Transcribe to store intermediate output (must exist)
AWS_TRANSCRIBE_BUCKET=
# --- Speechmatics (ASR_PROVIDER=speechmatics) ---
SPEECHMATICS_API_KEY=
SPEECHMATICS_MODEL=standard
# --- Gladia (ASR_PROVIDER=gladia) ---
GLADIA_API_KEY=
GLADIA_MODEL=standard
# --- Cloud ASR options ---
# Extract speaker embeddings locally (CPU) for cross-file speaker matching
# Set false to skip (faster, but disables cross-file speaker matching)
CLOUD_ASR_EXTRACT_EMBEDDINGS=true
# Concurrency for cloud-asr worker (API-Lite mode)
CLOUD_ASR_WORKER_CONCURRENCY=4
# Lite backend image (API-Lite mode)
BACKEND_LITE_IMAGE=davidamacey/opentranscribe-backend-lite:latest
#=============================================================================
# DEPLOYMENT MODE
#=============================================================================
# full: Local GPU + optional cloud ASR (default)
# lite: Cloud-only ASR, no GPU required (~2 GB image)
DEPLOYMENT_MODE=full
#=============================================================================
# YOUTUBE ANTI-BOT DETECTION & RATE LIMITING
#=============================================================================
# Prevents YouTube from flagging your IP as a bot with progressive delays,
# cookie authentication, and per-user rate limits.
# Cookie-Based Authentication (allows yt-dlp to use browser cookies)
# ─────────────────────────────────────────────────────────────────────────
# Enables downloading sign-in required, age-restricted, or members-only videos
# Requires browser to be installed IN the Docker container
# Supported browsers: firefox, chrome, chromium, edge, safari, opera
# Leave empty to disable cookie authentication
YOUTUBE_COOKIE_BROWSER=
# Alternative: Explicit cookie file path (for headless servers)
# Use this if you export cookies.txt manually from your desktop browser
# Leave empty to use browser extraction instead
YOUTUBE_COOKIE_FILE=
# Playlist Staggering (progressive delays when dispatching playlist videos)
# ─────────────────────────────────────────────────────────────────────────
# Prevents all playlist videos from downloading simultaneously
YOUTUBE_PLAYLIST_STAGGER_ENABLED=true
YOUTUBE_PLAYLIST_STAGGER_MIN_SECONDS=5 # Minimum delay between videos
YOUTUBE_PLAYLIST_STAGGER_MAX_SECONDS=30 # Maximum delay (caps at this value)
YOUTUBE_PLAYLIST_STAGGER_INCREMENT=5 # Delay increases by this amount per video
# Pre-Download Jitter (random delay before each download starts)
# ─────────────────────────────────────────────────────────────────────────
# Adds unpredictable timing to make traffic look more human
YOUTUBE_PRE_DOWNLOAD_JITTER_ENABLED=true
YOUTUBE_PRE_DOWNLOAD_JITTER_MIN_SECONDS=2 # Minimum random delay
YOUTUBE_PRE_DOWNLOAD_JITTER_MAX_SECONDS=15 # Maximum random delay
# User Rate Limiting (per-user quotas to prevent abuse)
# ─────────────────────────────────────────────────────────────────────────
# Limits downloads per user with hourly/daily quotas tracked in Redis
YOUTUBE_USER_RATE_LIMIT_ENABLED=true
YOUTUBE_USER_RATE_LIMIT_PER_HOUR=50 # Maximum downloads per user per hour
YOUTUBE_USER_RATE_LIMIT_PER_DAY=500 # Maximum downloads per user per day
# Recovery throttle: max YouTube downloads re-queued per health-check cycle
# (every 10 min). With default of 3, max recovery rate is 18/hour.
# Keep well below YOUTUBE_USER_RATE_LIMIT_PER_HOUR to leave room for
# user-initiated downloads. Transcription retries are NOT throttled by this.
YOUTUBE_RECOVERY_BATCH_SIZE=3
# Master switch for automatic YouTube download retries.
# Set to false to stop ALL automatic retry loops (both Celery task retries and
# the periodic recovery re-queuing). Manual downloads via the UI still work.
# Re-enable by setting to true and restarting the celery-worker container.
YOUTUBE_AUTO_RETRY_ENABLED=true
# Celery task-level rate limit for YouTube downloads.
# Enforced by the download worker regardless of queue depth — tasks queue
# freely but only N execute per time window. Works in concert with the
# recovery batch size above and the per-user rate limits.
# Format: "N/h" (per hour), "N/m" (per minute), "N/s" (per second).
# Set to empty string to disable.
YOUTUBE_DOWNLOAD_RATE_LIMIT=30/h
#=============================================================================
# FRONTEND CONFIGURATION
#=============================================================================
# Node Environment
NODE_ENV=production
# API Connection URLs (used by frontend to connect to backend)
VITE_API_BASE_URL=http://localhost:5174/api
VITE_WS_BASE_URL=ws://localhost:5174/ws
# Flower (Celery Task Monitor) URL prefix for frontend links
# Both dev (Vite proxy) and prod (Nginx proxy) inject auth headers server-side
VITE_FLOWER_URL_PREFIX=flower
#=============================================================================
# ADVANCED CONFIGURATION (Internal Docker Settings)
#=============================================================================
# The following variables are used internally by Docker containers
# and typically don't need to be changed unless you're doing advanced customization
# Internal service hostnames (Docker Compose service names)
# These are DNS names within the Docker network
# POSTGRES_HOST=postgres (already set above)
# MINIO_HOST=minio (already set above)
# REDIS_HOST=redis (already set above)
# OPENSEARCH_HOST=opensearch (already set above)
# Celery Configuration (auto-configured from REDIS settings)
# CELERY_BROKER_URL is automatically constructed from REDIS_HOST, REDIS_PORT, REDIS_PASSWORD
# CELERY_RESULT_BACKEND is automatically constructed from REDIS_HOST, REDIS_PORT, REDIS_PASSWORD
# Celery Worker Concurrency Tuning (Optional)
# Adjust these for bulk processing workloads to balance throughput vs resource usage
#
# Download worker: parallel video/URL downloads
# DOWNLOAD_CONCURRENCY=3 # Number of concurrent downloads (default: 3, increase for bulk imports)
# DOWNLOAD_MAX_TASKS=10 # Restart worker process after N tasks (default: 10)
#
# NLP worker: LLM-powered summarization, speaker ID, topic extraction
# NLP_CONCURRENCY=4 # Number of concurrent NLP tasks (default: 4)
# NLP_MAX_TASKS=50 # Restart worker process after N tasks (default: 50)
# PostgreSQL Performance Tuning (Optional)
# These override the PostgreSQL defaults in docker-compose.yml.
# Defaults are tuned for a 4-8GB RAM server with SSD storage.
# Adjust based on your available RAM and storage type.
#
# PG_SHARED_BUFFERS=256MB # 25% of available RAM (default: 256MB)
# PG_EFFECTIVE_CACHE_SIZE=1GB # ~75% of available RAM for OS cache estimate (default: 1GB)
# PG_WORK_MEM=16MB # Per-sort/hash operation memory (default: 16MB)
# PG_MAINTENANCE_WORK_MEM=128MB # For VACUUM, CREATE INDEX (default: 128MB)
# PG_RANDOM_PAGE_COST=1.1 # SSD=1.1, HDD=4.0 (default: 1.1 for SSD)
# PG_EFFECTIVE_IO_CONCURRENCY=200 # SSD=200, HDD=2 (default: 200 for SSD)
# PG_MAX_CONNECTIONS=200 # Max client connections (default: 200)
# Database URL (auto-constructed from POSTGRES_* variables)
# DATABASE_URL is automatically built by the backend from individual POSTGRES_* settings
#=============================================================================
# NGINX REVERSE PROXY (Optional - for HTTPS/SSL)
#=============================================================================
# NGINX provides HTTPS access, which is REQUIRED for:
# - Browser microphone recording (browsers block mic access over HTTP)
# - Secure access from other devices on your network
# - Production deployments with custom domains
#
# Quick Setup for Homelab:
# 1. Generate certificates: ./scripts/generate-ssl-cert.sh opentranscribe.local --auto-ip
# 2. Uncomment and set NGINX_SERVER_NAME below
# 3. Run: ./opentr.sh start dev (auto-detects NGINX and adds the overlay)
# 4. Trust the certificate on client devices (see docs/NGINX_SETUP.md)
#
# For production with Let's Encrypt, see docs/NGINX_SETUP.md
# Primary hostname for NGINX (uncomment to enable NGINX)
# Examples: opentranscribe.local, transcribe.home, myserver.example.com
# NGINX_SERVER_NAME=opentranscribe.local
# Optional: Custom ports (defaults: 80 for HTTP redirect, 443 for HTTPS)
# NGINX_HTTP_PORT=80
# NGINX_HTTPS_PORT=443
# Optional: Custom certificate paths (defaults to ./nginx/ssl/server.crt and .key)
# NGINX_CERT_FILE=./nginx/ssl/server.crt
# NGINX_CERT_KEY=./nginx/ssl/server.key
#=============================================================================
# OFFLINE/AIR-GAPPED DEPLOYMENT
#=============================================================================
# For offline deployments, the installer will set these automatically
# HuggingFace Hub Offline Mode
# HF_HUB_OFFLINE=1
#=============================================================================
# LDAP/ACTIVE DIRECTORY AUTHENTICATION (Optional)
#=============================================================================
# Enable enterprise authentication via LDAP or Active Directory.
# When enabled, users can log in with their corporate credentials.
# Local database users continue to work alongside LDAP users (hybrid mode).
#
# REQUIREMENTS:
# - A read-only service account in your directory (for user lookups)
# - Network access from the backend container to your LDAP server
# - For LDAPS: Valid SSL certificate on your LDAP server
#
# HOW IT WORKS:
# 1. User enters username or email on login page
# 2. System checks if user exists locally with auth_type='local'
# 3. If not local, authenticates against LDAP server
# 4. On first LDAP login, user account is auto-created in database
# 5. Users in LDAP_ADMIN_USERS automatically get admin role
#
# SECURITY NOTES:
# - LDAP users cannot change passwords in OpenTranscribe (must use AD)
# - Password fields are hidden in UI for LDAP users
# - Service account should have minimal read-only permissions
# Enable LDAP authentication (set to 'true' to enable)
LDAP_ENABLED=false
# LDAP Server Connection
# Use ldaps:// for secure LDAP (recommended), ldap:// for unencrypted
LDAP_SERVER=ldaps://ad.yourcompany.com
LDAP_PORT=636
LDAP_USE_SSL=true
LDAP_USE_TLS=false
LDAP_TIMEOUT=10
# Service Account Credentials
# This account is used to search for users in the directory.
# Request a read-only service account from your IT department.
#
# Active Directory Example:
# LDAP_BIND_DN=CN=svc-opentranscribe,OU=Service Accounts,DC=company,DC=com
#
# OpenLDAP Example:
# LDAP_BIND_DN=cn=readonly,dc=company,dc=com
LDAP_BIND_DN="CN=svc-opentranscribe,OU=Service Accounts,DC=yourcompany,DC=com"
LDAP_BIND_PASSWORD=your-service-account-password
# User Search Configuration
# LDAP_SEARCH_BASE: Where to search for users (typically your domain root or OU)
# Example: DC=yourcompany,DC=com
# Example: OU=Users,DC=yourcompany,DC=com
#
# LDAP_USERNAME_ATTR: The attribute containing the username
# Active Directory: sAMAccountName
# OpenLDAP: uid
#
# LDAP_USER_SEARCH_FILTER: Filter to find users by username
# {username} is replaced with the login input
LDAP_SEARCH_BASE=DC=yourcompany,DC=com
LDAP_USERNAME_ATTR=sAMAccountName
LDAP_USER_SEARCH_FILTER=(sAMAccountName={username})
# User Attribute Mapping
# These attributes are read from the directory to populate user profiles
LDAP_EMAIL_ATTR=mail
LDAP_NAME_ATTR=cn
# Admin User Assignment
# Comma-separated list of LDAP usernames that should have admin role.
# These users will be granted admin privileges on first login.
# Use the username (sAMAccountName), not the full DN or email.
#
# Example: LDAP_ADMIN_USERS=john.smith,jane.doe,it.admin
LDAP_ADMIN_USERS=
# Admin Group Assignment (Alternative to LDAP_ADMIN_USERS)
# Comma-separated list of LDAP group DNs that grant admin access.
# Users who are members of any of these groups will be granted admin role.
# Example: CN=OpenTranscribe Admins,OU=Groups,DC=company,DC=com
LDAP_ADMIN_GROUPS=
# Required Groups for Access (Optional)
# Comma-separated list of LDAP group DNs required for application access.
# If set, users must be a member of at least one group to log in.
# Leave empty to allow all authenticated LDAP users.
# Example: CN=OpenTranscribe Users,OU=Groups,DC=company,DC=com
LDAP_USER_GROUPS=
# Recursive Group Membership (Active Directory nested groups)
# Enable to check nested group membership using AD's LDAP_MATCHING_RULE_IN_CHAIN
# This allows users in nested groups to be recognized as members of parent groups
LDAP_RECURSIVE_GROUPS=false
# Group Membership Attribute
# The LDAP attribute containing group memberships
# Active Directory: memberOf
# OpenLDAP: isMemberOf (or memberOf with overlay)
LDAP_GROUP_ATTR=memberOf
# -----------------------------------------------------------------------------
# CONFIGURATION EXAMPLES
# -----------------------------------------------------------------------------
#
# === ACTIVE DIRECTORY (Most Common) ===
# LDAP_ENABLED=true
# LDAP_SERVER=ldaps://ad.company.com
# LDAP_PORT=636
# LDAP_USE_SSL=true
# LDAP_USE_TLS=false
# LDAP_BIND_DN=CN=svc-opentranscribe,OU=Service Accounts,DC=company,DC=com
# LDAP_BIND_PASSWORD=YourSecurePassword
# LDAP_SEARCH_BASE=DC=company,DC=com
# LDAP_USERNAME_ATTR=sAMAccountName
# LDAP_USER_SEARCH_FILTER=(sAMAccountName={username})
# LDAP_EMAIL_ATTR=mail
# LDAP_NAME_ATTR=cn
# LDAP_ADMIN_USERS=admin.user,it.manager
#
# === OPENLDAP ===
# LDAP_ENABLED=true
# LDAP_SERVER=ldaps://ldap.company.com
# LDAP_PORT=636
# LDAP_USE_SSL=true
# LDAP_USE_TLS=false
# LDAP_BIND_DN=cn=readonly,dc=company,dc=com
# LDAP_BIND_PASSWORD=YourSecurePassword
# LDAP_SEARCH_BASE=ou=people,dc=company,dc=com
# LDAP_USERNAME_ATTR=uid
# LDAP_USER_SEARCH_FILTER=(uid={username})
# LDAP_EMAIL_ATTR=mail
# LDAP_NAME_ATTR=cn
# LDAP_ADMIN_USERS=admin,sysadmin
#
# === TESTING WITH LLDAP (Docker) ===
# See: docker-compose.ldap-test.yml
# LDAP_ENABLED=true
# LDAP_SERVER=ldap://lldap-test
# LDAP_PORT=3890
# LDAP_USE_SSL=false
# LDAP_USE_TLS=false
# LDAP_BIND_DN=uid=admin,ou=people,dc=example,dc=com
# LDAP_BIND_PASSWORD=admin_password
# LDAP_SEARCH_BASE=dc=example,dc=com
# LDAP_USERNAME_ATTR=uid
# LDAP_USER_SEARCH_FILTER=(uid={username})
# LDAP_EMAIL_ATTR=mail
# LDAP_NAME_ATTR=cn
# LDAP_ADMIN_USERS=admin,testadmin
# -----------------------------------------------------------------------------
#=============================================================================
# KEYCLOAK/OIDC AUTHENTICATION (Optional)
#=============================================================================
# Enable Single Sign-On (SSO) via Keycloak or any OpenID Connect provider.
# This allows users to authenticate using your organization's identity provider.
#
# FEATURES:
# - Single Sign-On (SSO) with corporate identity providers
# - Role-based access control synchronized from Keycloak
# - Support for LDAP/AD federation through Keycloak
# - Social login providers (Google, GitHub, etc.) via Keycloak
#
# REQUIREMENTS:
# - A running Keycloak server (or compatible OIDC provider)
# - A configured realm and client in Keycloak
# - Network access from the backend container to Keycloak
#
# SETUP GUIDE: See docs/KEYCLOAK_SETUP.md for step-by-step instructions
#
# DEV ENVIRONMENT:
# Start Keycloak: docker compose -f docker-compose.yml -f docker-compose.keycloak.yml up -d keycloak
# Admin Console: http://localhost:8180 (default: admin/admin)
# Enable Keycloak/OIDC authentication (set to 'true' to enable)
KEYCLOAK_ENABLED=false
# Keycloak Server URL (without trailing slash)
# IMPORTANT: This URL must be accessible from the user's browser
# Local Development: http://localhost:8180
# LAN Access: http://192.168.x.x:8180 (replace with your server IP)
# Production: https://keycloak.yourdomain.com
KEYCLOAK_SERVER_URL=http://localhost:8180
# Keycloak Internal URL (for backend-to-Keycloak communication)
# Use container name for Docker network communication
KEYCLOAK_INTERNAL_URL=http://transcribe-app-keycloak-1:8080
# Keycloak Realm Name
# This is the realm you created in Keycloak for OpenTranscribe
KEYCLOAK_REALM=opentranscribe
# Keycloak Client Configuration
# Create a confidential client in Keycloak with:
# - Client authentication: ON
# - Valid redirect URIs: http://localhost:5173/login (local dev)
# http://your-server-ip/login (LAN access)
# https://yourdomain.com/login (production)
KEYCLOAK_CLIENT_ID=opentranscribe-app
KEYCLOAK_CLIENT_SECRET=
# Callback URL (where Keycloak redirects after authentication)
# IMPORTANT: This must point to the FRONTEND login page, not the backend API
# Local Development: http://localhost:5173/login
# LAN Access: http://192.168.x.x/login (replace with your server IP)
# Production: https://yourdomain.com/login
KEYCLOAK_CALLBACK_URL=http://localhost:5173/login
# Admin Role Mapping
# Users with this Keycloak realm role will be granted admin access in OpenTranscribe
# Create this role in Keycloak: Realm roles → Create role → "admin"
KEYCLOAK_ADMIN_ROLE=admin
# Connection timeout in seconds
KEYCLOAK_TIMEOUT=30
# PKCE Security (OAuth 2.1 / RFC 7636)
# Enable Proof Key for Code Exchange to prevent authorization code interception
# Recommended: true for all deployments
KEYCLOAK_USE_PKCE=true
# JWT Token Validation
# Verify the audience claim in tokens (recommended for production)
KEYCLOAK_VERIFY_AUDIENCE=false
# Expected audience value (defaults to client ID if empty)
KEYCLOAK_AUDIENCE=
# Verify the issuer claim in tokens (recommended for production)
# Validates token was issued by your Keycloak realm
KEYCLOAK_VERIFY_ISSUER=true
# Keycloak Dev Server Credentials (only used by docker-compose.keycloak.yml)
# These set the initial admin credentials for the Keycloak dev container
KEYCLOAK_ADMIN=admin
KEYCLOAK_ADMIN_PASSWORD=admin
KEYCLOAK_PORT=8180
# -----------------------------------------------------------------------------
# KEYCLOAK CONFIGURATION EXAMPLES
# -----------------------------------------------------------------------------
#
# === LOCAL DEVELOPMENT (localhost access only) ===
# KEYCLOAK_ENABLED=true
# KEYCLOAK_SERVER_URL=http://localhost:8180
# KEYCLOAK_REALM=opentranscribe
# KEYCLOAK_CLIENT_ID=opentranscribe-app
# KEYCLOAK_CLIENT_SECRET=your-client-secret-from-keycloak
# KEYCLOAK_CALLBACK_URL=http://localhost:5173/login
# KEYCLOAK_ADMIN_ROLE=admin
#
# === LAN ACCESS (remote clients on local network) ===
# KEYCLOAK_ENABLED=true
# KEYCLOAK_SERVER_URL=http://192.168.30.11:8180 # Replace with your server IP
# KEYCLOAK_REALM=opentranscribe
# KEYCLOAK_CLIENT_ID=opentranscribe-app
# KEYCLOAK_CLIENT_SECRET=your-client-secret-from-keycloak
# KEYCLOAK_CALLBACK_URL=http://192.168.30.11/login # Match your server IP
# KEYCLOAK_ADMIN_ROLE=admin
#
# NOTE: For LAN access, update Keycloak client redirect URIs to include:
# http://your-server-ip/* and https://your-server-ip/*
#
# === PRODUCTION SETUP (public domain) ===
# KEYCLOAK_ENABLED=true
# KEYCLOAK_SERVER_URL=https://keycloak.yourcompany.com
# KEYCLOAK_REALM=opentranscribe
# KEYCLOAK_CLIENT_ID=opentranscribe-app
# KEYCLOAK_CLIENT_SECRET=your-secure-client-secret
# KEYCLOAK_CALLBACK_URL=https://transcribe.yourcompany.com/login
# KEYCLOAK_ADMIN_ROLE=admin
# -----------------------------------------------------------------------------
#=============================================================================
# PKI/X.509 CERTIFICATE AUTHENTICATION (Optional)
#=============================================================================
# Enable certificate-based authentication using X.509 client certificates.
# This is commonly used in high-security environments with smart cards (CAC/PIV).
#
# FEATURES:
# - Passwordless authentication using client certificates
# - Support for smart cards (CAC, PIV, hardware tokens)
# - Mutual TLS (mTLS) for enhanced security
# - Certificate-to-user mapping via Distinguished Name (DN)
#
# REQUIREMENTS:
# - Nginx configured for mutual TLS (ssl_verify_client)
# - CA certificate for validating client certificates
# - Client certificates issued by your CA
#
# HOW IT WORKS:
# 1. Nginx terminates TLS and validates client certificate against CA
# 2. Nginx extracts certificate DN and passes it via headers
# 3. Backend authenticates user based on certificate DN
# 4. User account is auto-created on first certificate login
#
# SETUP GUIDE: See docs/PKI_SETUP.md for detailed instructions
#
# DEV ENVIRONMENT:
# Start Step CA: docker compose -f docker-compose.yml -f docker-compose.keycloak.yml --profile pki up -d
# Enable PKI/X.509 authentication (set to 'true' to enable)
PKI_ENABLED=false
# CA Certificate Path (for certificate validation)
# Path to the Certificate Authority cert that issued client certificates
# This is used by Nginx to validate client certificates
PKI_CA_CERT_PATH=/etc/ssl/certs/ca.crt
# Certificate Revocation Checking
# Enable CRL or OCSP checking for revoked certificates
# Requires additional Nginx configuration (ssl_crl directive)
PKI_VERIFY_REVOCATION=false
# Header Names (must match Nginx proxy_set_header configuration)
# These headers are set by Nginx after validating the client certificate
PKI_CERT_HEADER=X-Client-Cert
PKI_CERT_DN_HEADER=X-Client-Cert-DN
# Admin Certificate DNs
# Comma-separated list of certificate Distinguished Names that grant admin access
# Users with certificates matching these DNs will be assigned the admin role
#
# IMPORTANT: DN must match exactly as it appears in the certificate
# Use: openssl x509 -in cert.crt -subject -noout
#
# Example: CN=John Admin,OU=IT,O=Company,C=US
PKI_ADMIN_DNS=
# Trusted Proxy IPs for PKI Headers
# Only accept certificate headers from these proxy IPs (comma-separated IPs or CIDR)
# CRITICAL: Must be set in production to prevent header injection attacks
# Example: 127.0.0.1,10.0.0.1
PKI_TRUSTED_PROXIES=
# Certificate Revocation Settings (when PKI_VERIFY_REVOCATION=true)
# OCSP request timeout in seconds
PKI_OCSP_TIMEOUT_SECONDS=5
# CRL cache duration in seconds (1 hour default)
PKI_CRL_CACHE_SECONDS=3600
# Soft-fail mode: allow auth if revocation check fails (availability vs security)
# true = allow login if OCSP/CRL check fails (higher availability)
# false = deny login if revocation cannot be verified (higher security)
PKI_REVOCATION_SOFT_FAIL=true
# Maximum cache entries for OCSP responses (LRU eviction)
PKI_OCSP_CACHE_MAX_SIZE=1000
# Maximum cache entries for CRLs (LRU eviction)
PKI_CRL_CACHE_MAX_SIZE=1000
# Step CA Dev Server Port (only used by docker-compose.keycloak.yml --profile pki)
STEP_CA_PORT=9000
# -----------------------------------------------------------------------------
# PKI CONFIGURATION EXAMPLE
# -----------------------------------------------------------------------------
#
# === BASIC SETUP ===
# PKI_ENABLED=true
# PKI_CA_CERT_PATH=/etc/nginx/certs/ca.crt
# PKI_VERIFY_REVOCATION=false
# PKI_CERT_HEADER=X-Client-Cert