-
Notifications
You must be signed in to change notification settings - Fork 16
Expand file tree
/
Copy pathdocker-compose.override.yml
More file actions
127 lines (116 loc) · 4.34 KB
/
docker-compose.override.yml
File metadata and controls
127 lines (116 loc) · 4.34 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
# OpenTranscribe Development Overrides
# This file is AUTOMATICALLY loaded in development (no -f flag needed!)
#
# Usage: Just run `docker compose up` - this file merges with docker-compose.yml
#
# This file contains ONLY development-specific settings:
# - Build from local Dockerfiles (instead of pulling images)
# - Mount source code for live reloading
# - Development-specific commands
services:
# All backend services share the same image to save disk space
# Image is built once and reused by all backend-based services
backend:
image: opentranscribe-backend:latest
build:
context: ./backend
dockerfile: Dockerfile.prod
volumes:
- ./backend:/app # Mount source code for development
- ${MODEL_CACHE_DIR:-./models}/sentence-transformers:/home/appuser/.cache/sentence-transformers
command: uvicorn app.main:app --host 0.0.0.0 --port 8080 --reload # Enable hot reload
celery-worker:
image: opentranscribe-backend:latest
build:
context: ./backend
dockerfile: Dockerfile.prod
volumes:
- ./backend:/app # Mount source code for development
- ${MODEL_CACHE_DIR:-./models}/huggingface:/home/appuser/.cache/huggingface
- ${MODEL_CACHE_DIR:-./models}/torch:/home/appuser/.cache/torch
- ${MODEL_CACHE_DIR:-./models}/nltk_data:/home/appuser/.cache/nltk_data
- ${MODEL_CACHE_DIR:-./models}/sentence-transformers:/home/appuser/.cache/sentence-transformers
celery-download-worker:
image: opentranscribe-backend:latest
build:
context: ./backend
dockerfile: Dockerfile.prod
volumes:
- ./backend:/app # Mount source code for development
- ${MODEL_CACHE_DIR:-./models}/huggingface:/home/appuser/.cache/huggingface
- ${MODEL_CACHE_DIR:-./models}/torch:/home/appuser/.cache/torch
celery-cpu-worker:
image: opentranscribe-backend:latest
build:
context: ./backend
dockerfile: Dockerfile.prod
volumes:
- ./backend:/app # Mount source code for development
celery-nlp-worker:
image: opentranscribe-backend:latest
build:
context: ./backend
dockerfile: Dockerfile.prod
volumes:
- ./backend:/app # Mount source code for development
celery-embedding-worker:
image: opentranscribe-backend:latest
build:
context: ./backend
dockerfile: Dockerfile.prod
volumes:
- ./backend:/app # Mount source code for development
- ${MODEL_CACHE_DIR:-./models}/sentence-transformers:/home/appuser/.cache/sentence-transformers
celery-beat:
image: opentranscribe-backend:latest
build:
context: ./backend
dockerfile: Dockerfile.prod
volumes:
- ./backend:/app # Mount source code for development
frontend:
image: opentranscribe-frontend:latest
build:
context: ./frontend
dockerfile: Dockerfile.dev # Use development Dockerfile
volumes:
- ./frontend:/app # Mount source code for development
- /app/node_modules # Exclude node_modules from host mount
ports:
- "${FRONTEND_PORT:-5173}:5173" # Dev Vite server port
environment:
- NODE_ENV=development # Development mode
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "http://127.0.0.1:5173"]
interval: 10s
timeout: 5s
retries: 5
start_period: 30s
flower:
image: opentranscribe-backend:latest
build:
context: ./backend
dockerfile: Dockerfile.prod
volumes:
# Flower only needs persistent DB storage, not hot-reload source code.
# Using flower_data:/app (not ./backend:/app) avoids /app mount conflict.
- flower_data:/app
docs:
image: opentranscribe-docs:latest
build:
context: ./docs-site
dockerfile: Dockerfile
ports:
- "${DOCS_PORT:-5183}:8080"
# GPU Scaled Worker - Development build (only active with --gpu-scale flag)
celery-worker-gpu-scaled:
image: opentranscribe-backend:latest
build:
context: ./backend
dockerfile: Dockerfile.prod
volumes:
- ./backend:/app # Mount source code for development
- ${MODEL_CACHE_DIR:-./models}/huggingface:/home/appuser/.cache/huggingface
- ${MODEL_CACHE_DIR:-./models}/torch:/home/appuser/.cache/torch
- ${MODEL_CACHE_DIR:-./models}/nltk_data:/home/appuser/.cache/nltk_data
- ${MODEL_CACHE_DIR:-./models}/sentence-transformers:/home/appuser/.cache/sentence-transformers