-
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdocker-compose.yml
More file actions
70 lines (66 loc) · 1.53 KB
/
docker-compose.yml
File metadata and controls
70 lines (66 loc) · 1.53 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
services:
# Ollama Service (Local LLM Server)
ollama:
image: ollama/ollama:latest
container_name: symchat-ollama
restart: unless-stopped
ports:
- "11434:11434"
volumes:
- ollama_data:/root/.ollama
environment:
- OLLAMA_HOST=0.0.0.0
# Uncomment below for GPU support (NVIDIA)
# deploy:
# resources:
# reservations:
# devices:
# - driver: nvidia
# count: 1
# capabilities: [gpu]
networks:
- symchat-network
# llama.cpp Server
# llamacpp:
# image: ghcr.io/ggerganov/llama.cpp:server
# container_name: symchat-llamacpp
# restart: unless-stopped
# ports:
# - "8080:8080"
# volumes:
# - ./models:/root/.cache/llama.cpp
# command: >
# --host 0.0.0.0
# --port 8080
# -hf ggml-org/gemma-3-1b-it-GGUF
# -c 4096
# -ngl 35
# # Uncomment below for GPU support (NVIDIA)
# # deploy:
# # resources:
# # reservations:
# # devices:
# # - driver: nvidia
# # count: 1
# # capabilities: [gpu]
# networks:
# - symchat-network
# SymChat Frontend
frontend:
build:
context: .
dockerfile: Dockerfile
container_name: symchat-frontend
restart: unless-stopped
ports:
- "3000:5173"
environment:
- VITE_OLLAMA_API_URL=http://localhost:11434
networks:
- symchat-network
volumes:
ollama_data:
driver: local
networks:
symchat-network:
driver: bridge