Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 0 additions & 5 deletions .env

This file was deleted.

121 changes: 121 additions & 0 deletions .github/workflows/smoke-tests.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,121 @@
name: Smoke Tests

on:
push:
branches: [ main ]
pull_request:
branches: [ main ]

jobs:
smoke-test:
runs-on: ubuntu-latest

strategy:
matrix:
project: [go-genai, py-genai, node-genai, rust-genai]

steps:
- name: Checkout code
uses: actions/checkout@v4

- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3

- name: Install Docker Model Plugin
run: |
# Add Docker's official GPG key
sudo apt-get update
sudo apt-get install ca-certificates curl
sudo install -m 0755 -d /etc/apt/keyrings
sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc
sudo chmod a+r /etc/apt/keyrings/docker.asc

# Add the repository to Apt sources
echo \
"deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \
$(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \
sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
sudo apt-get update

# Install docker-model-plugin
sudo apt-get install -y docker-model-plugin

# Verify installation
if sudo docker model version; then
echo "Docker Model Plugin installed successfully"
else
echo "Failed to install Docker Model Plugin"
exit 1
fi

- name: Build project
working-directory: ./${{ matrix.project }}
run: |
docker compose build

- name: Start services and run smoke tests
working-directory: ./${{ matrix.project }}
run: |
# Start services in detached mode
docker compose up -d

# Get the port for this project
case "${{ matrix.project }}" in
"go-genai") PORT=8080 ;;
"py-genai") PORT=8081 ;;
"node-genai") PORT=8082 ;;
"rust-genai") PORT=8083 ;;
esac

# Wait for service to be healthy
echo "Waiting for service on port $PORT to be healthy..."
timeout 120s bash -c 'until curl -f http://localhost:'$PORT'/health; do
echo "Service not ready yet, waiting..."
sleep 5
done'

echo "Service is healthy, starting smoke tests..."

# Test main page
echo "Testing main page..."
curl -f http://localhost:$PORT/ || (echo "Main page failed" && exit 1)

# Test chat API with actual model interaction
echo "Testing chat API with model..."
RESPONSE=$(curl -s -X POST http://localhost:$PORT/api/chat \
-H "Content-Type: application/json" \
-d '{"message": "Hello"}' \
--max-time 30)

if [[ $? -eq 0 ]] && [[ "$RESPONSE" == *"response"* ]]; then
echo "✅ Chat API test passed"
else
echo "❌ Chat API test failed: $RESPONSE"
# Don't fail the test for now as model interaction might be flaky
fi

# Test model info endpoint
echo "Testing model info..."
MODEL_INFO=$(curl -s -X POST http://localhost:$PORT/api/chat \
-H "Content-Type: application/json" \
-d '{"message": "!modelinfo"}' \
--max-time 10)

if [[ $? -eq 0 ]] && [[ "$MODEL_INFO" == *"model"* ]]; then
echo "✅ Model info test passed"
else
echo "❌ Model info test failed: $MODEL_INFO"
fi

echo "✅ Smoke tests completed for ${{ matrix.project }}"

- name: Check logs on failure
if: failure()
working-directory: ./${{ matrix.project }}
run: |
echo "=== Docker Compose Logs ==="
docker compose logs
echo "=== Container Status ==="
docker compose ps


4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,9 @@ To change these settings, simply edit the `.env` file in the root directory of t
cd hello-genai
```

2. Run the application using the script:
2. Start the application using Docker Compose:
```bash
./run.sh
docker compose up
```

3. Open your browser and visit the following links:
Expand Down
92 changes: 5 additions & 87 deletions docker-compose.yml
Original file line number Diff line number Diff line change
@@ -1,88 +1,6 @@
services:
go-genai:
build:
context: ./go-genai
dockerfile: Dockerfile
ports:
- "8080:8080"
environment:
- PORT=8080
env_file:
- .env
restart: unless-stopped
extra_hosts:
- "host.docker.internal:host-gateway"
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8080/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 10s
include:
- go-genai/docker-compose.yml
- py-genai/docker-compose.yml
- node-genai/docker-compose.yml
- rust-genai/docker-compose.yaml

python-genai:
build:
context: ./py-genai
dockerfile: Dockerfile
ports:
- "8081:8081"
environment:
- PORT=8081
- LOG_LEVEL=INFO
env_file:
- .env
restart: unless-stopped
extra_hosts:
- "host.docker.internal:host-gateway"
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8081/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 10s
volumes:
- ./py-genai:/app

node-genai:
build:
context: ./node-genai
dockerfile: Dockerfile
ports:
- "8082:8082"
environment:
- PORT=8082
env_file:
- .env
restart: unless-stopped
extra_hosts:
- "host.docker.internal:host-gateway"
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8082/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 10s

rust-genai:
build:
context: ./rust-genai
dockerfile: Dockerfile
ports:
- "8083:8083"
environment:
- PORT=8083
env_file:
- .env
restart: unless-stopped
extra_hosts:
- "host.docker.internal:host-gateway"
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8083/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 10s

networks:
default:
name: hello-genai-network
driver: bridge
22 changes: 22 additions & 0 deletions go-genai/docker-compose.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
services:
go-genai:
build:
context: .
dockerfile: Dockerfile
ports:
- "8080:8080"
environment:
- PORT=8080
x-genai-models:
- llama
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8080/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 10s

models:
llama:
model: ai/llama3.2:1B-Q8_0
context_size: 2048
18 changes: 10 additions & 8 deletions go-genai/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -88,14 +88,16 @@ func loadConfig() Configuration {
port = "8080"
}

llmBaseURL := os.Getenv("LLM_BASE_URL")
if llmBaseURL == "" {
logger.Println("WARNING: LLM_BASE_URL is not set. API calls will fail.")
// Use Docker Model Runner injected variables
llamaURL := os.Getenv("LLAMA_URL")
llamaModel := os.Getenv("LLAMA_MODEL")

if llamaURL == "" {
logger.Println("WARNING: No LLM endpoint configured. Set LLAMA_URL.")
}

llmModelName := os.Getenv("LLM_MODEL_NAME")
if llmModelName == "" {
logger.Println("WARNING: LLM_MODEL_NAME is not set. Using default model.")
if llamaModel == "" {
logger.Println("WARNING: No LLM model configured. Set LLAMA_MODEL.")
}

logLevel := os.Getenv("LOG_LEVEL")
Expand All @@ -105,8 +107,8 @@ func loadConfig() Configuration {

return Configuration{
Port: port,
LLMBaseURL: llmBaseURL,
LLMModelName: llmModelName,
LLMBaseURL: llamaURL,
LLMModelName: llamaModel,
LogLevel: logLevel,
Version: "1.0.0",
}
Expand Down
36 changes: 14 additions & 22 deletions node-genai/app.js
Original file line number Diff line number Diff line change
@@ -1,23 +1,20 @@
const express = require('express');
const path = require('path');
const axios = require('axios');
const dotenv = require('dotenv');
const fs = require('fs');

// Load environment variables
dotenv.config();

const app = express();
const PORT = process.env.PORT || 8080;

// Helper functions
function getLLMEndpoint() {
const baseUrl = process.env.LLM_BASE_URL;
return `${baseUrl}/chat/completions`;
// Use Docker Model Runner injected variables
const llamaUrl = process.env.LLAMA_URL;
return `${llamaUrl}/chat/completions`;
}

function getModelName() {
return process.env.LLM_MODEL_NAME;
// Use Docker Model Runner injected variables
return process.env.LLAMA_MODEL;
}

// Middleware
Expand All @@ -29,6 +26,15 @@ app.get('/', (req, res) => {
res.sendFile(path.join(__dirname, 'views', 'index.html'));
});

app.get('/health', (req, res) => {
res.json({
status: 'healthy',
timestamp: new Date().toISOString(),
llm_endpoint: getLLMEndpoint(),
model: getModelName()
});
});

app.post('/api/chat', async (req, res) => {
const { message } = req.body;

Expand Down Expand Up @@ -92,17 +98,3 @@ app.listen(PORT, () => {
console.log(`Using model: ${getModelName()}`);
});

// Check and create default .env file if it doesn't exist
function checkEnvFile() {
if (!fs.existsSync('.env')) {
console.log('Creating default .env file...');
const defaultEnv =
`# Configuration for the LLM service
LLM_BASE_URL=http://host.docker.internal:12434/engines/llama.cpp/v1
LLM_MODEL_NAME=ignaciolopezluna020/llama3.2:1b
`;
fs.writeFileSync('.env', defaultEnv);
}
}

checkEnvFile();
22 changes: 22 additions & 0 deletions node-genai/docker-compose.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
services:
node-genai:
build:
context: .
dockerfile: Dockerfile
ports:
- "8082:8080"
environment:
- PORT=8080
x-genai-models:
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why the x-genai- prefix?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

lol no clue TBH, I did this some time ago and I guess Claude came up with it, or maybe back then it was even needed?

- llama
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8080/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 10s

models:
llama:
model: ai/llama3.2:1B-Q8_0
context_size: 2048
1 change: 0 additions & 1 deletion node-genai/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@
},
"dependencies": {
"axios": "^1.6.5",
"dotenv": "^16.3.1",
"express": "^4.18.2"
},
"devDependencies": {
Expand Down
Loading