Skip to content

Commit a3644e4

Browse files
authored
Merge pull request #3 from Wombat45/mcp-bot
fix llm api call
2 parents b7b9f10 + 87b0365 commit a3644e4

3 files changed

Lines changed: 58 additions & 17 deletions

File tree

docker-compose.registry.yml

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,6 @@
1-
version: '3.8'
2-
31
services:
42
mcp-server:
5-
image: ghcr.io/${GITHUB_REPOSITORY:-your-username/sleeper-bot}-mcp-server:latest
3+
image: ghcr.io/wombat45/sleeper-bot-mcp-server:latest
64
container_name: sleeper-bot-mcp-server
75
ports:
86
- "8000:8000"
@@ -25,7 +23,7 @@ services:
2523
restart: unless-stopped
2624

2725
llm-agent:
28-
image: ghcr.io/${GITHUB_REPOSITORY:-your-username/sleeper-bot}-llm-agent:latest
26+
image: ghcr.io/wombat45/sleeper-bot-llm-agent:latest
2927
container_name: sleeper-bot-llm-agent
3028
ports:
3129
- "8001:8000"
@@ -35,9 +33,9 @@ services:
3533
- API_KEY=${API_KEY}
3634
- MCP_SERVER_URL=http://mcp-server:8000
3735
- DEFAULT_LEAGUE_ID=${DEFAULT_LEAGUE_ID}
38-
- LLM_PROVIDER=${LLM_PROVIDER:-ollama}
39-
- LLM_URL=${LLM_URL:-http://host.docker.internal:11434/api/generate}
40-
- LLM_API_KEY=${LLM_API_KEY:-}
36+
- LLM_PROVIDER=${LLM_PROVIDER:-digitalocean}
37+
- LLM_URL=${LLM_URL:-https://td4bxjc626e5xweih26eppev.agents.do-ai.run/api/v1/chat/completions}
38+
- LLM_API_KEY=${LLM_API_KEY:-O1AD1a0JeIl3B-T9sJvvBgHEHfwIoPze}
4139
- LLM_MODEL=${LLM_MODEL:-llama3.2:latest}
4240
volumes:
4341
- ./logs:/app/logs
@@ -55,7 +53,7 @@ services:
5553
restart: unless-stopped
5654

5755
discord-bot:
58-
image: ghcr.io/${GITHUB_REPOSITORY:-your-username/sleeper-bot}-discord-bot:latest
56+
image: ghcr.io/wombat45/sleeper-bot-discord-bot:latest
5957
container_name: sleeper-bot-discord-bot
6058
ports:
6159
- "8002:8000"

docker-compose.yml

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,10 @@ services:
4343
- API_KEY=${API_KEY:-your-api-key-here}
4444
- MCP_SERVER_URL=http://mcp-server:8000
4545
- DEFAULT_LEAGUE_ID=${DEFAULT_LEAGUE_ID:-}
46-
- LLM_URL=http://host.docker.internal:11434/api/generate
46+
- LLM_PROVIDER=digitalocean
47+
- LLM_URL=https://td4bxjc626e5xweih26eppev.agents.do-ai.run/api/v1/chat/completions
48+
- LLM_API_KEY=O1AD1a0JeIl3B-T9sJvvBgHEHfwIoPze
49+
- LLM_MODEL=llama3.2:latest
4750
- HOST=0.0.0.0
4851
- PORT=8000
4952
volumes:

llm-agent/agent.py

Lines changed: 48 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -323,26 +323,66 @@ async def _generate_llm_response(self, query: str, data: Dict[str, Any]) -> str:
323323
print(f"🤖 LLM Prompt length: {len(prompt)} characters")
324324
print(f"🤖 LLM Prompt preview: {prompt[:200]}...")
325325

326-
# Call the LLM
326+
# Get LLM configuration from environment
327+
llm_provider = os.getenv("LLM_PROVIDER", "ollama")
328+
llm_api_key = os.getenv("LLM_API_KEY", "")
329+
llm_model = os.getenv("LLM_MODEL", "llama3.2:latest")
330+
331+
# Call the LLM based on provider
327332
async with httpx.AsyncClient() as client:
328-
response = await client.post(
329-
LLM_URL,
330-
json={
331-
"model": "llama3.2:latest", # Use the model you have installed
333+
if llm_provider == "digitalocean":
334+
# DigitalOcean AI Agent format
335+
headers = {
336+
"Content-Type": "application/json",
337+
"Authorization": f"Bearer {llm_api_key}"
338+
}
339+
payload = {
340+
"messages": [
341+
{
342+
"role": "user",
343+
"content": prompt
344+
}
345+
],
346+
"stream": False,
347+
"include_functions_info": False,
348+
"include_retrieval_info": False,
349+
"include_guardrails_info": False
350+
}
351+
else:
352+
# Ollama format (default)
353+
headers = {"Content-Type": "application/json"}
354+
payload = {
355+
"model": llm_model,
332356
"prompt": prompt,
333357
"stream": False,
334358
"options": {
335359
"temperature": 0.7,
336360
"top_p": 0.9,
337-
"max_tokens": 1000 # Increased from 500
361+
"max_tokens": 1000
338362
}
339-
},
363+
}
364+
365+
response = await client.post(
366+
LLM_URL,
367+
json=payload,
368+
headers=headers,
340369
timeout=30
341370
)
342371

343372
if response.status_code == 200:
344373
result = response.json()
345-
llm_response = result.get("response", "").strip()
374+
375+
# Parse response based on provider
376+
if llm_provider == "digitalocean":
377+
# DigitalOcean format: response is in choices[0].message.content
378+
if "choices" in result and len(result["choices"]) > 0:
379+
llm_response = result["choices"][0]["message"]["content"].strip()
380+
else:
381+
llm_response = result.get("response", "").strip()
382+
else:
383+
# Ollama format
384+
llm_response = result.get("response", "").strip()
385+
346386
print(f"🤖 LLM Response length: {len(llm_response)} characters")
347387
print(f"🤖 LLM Response preview: {llm_response[:200]}...")
348388

0 commit comments

Comments
 (0)