Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 6 additions & 8 deletions docker-compose.registry.yml
Original file line number Diff line number Diff line change
@@ -1,8 +1,6 @@
version: '3.8'

services:
mcp-server:
image: ghcr.io/${GITHUB_REPOSITORY:-your-username/sleeper-bot}-mcp-server:latest
image: ghcr.io/wombat45/sleeper-bot-mcp-server:latest
container_name: sleeper-bot-mcp-server
ports:
- "8000:8000"
Expand All @@ -25,7 +23,7 @@ services:
restart: unless-stopped

llm-agent:
image: ghcr.io/${GITHUB_REPOSITORY:-your-username/sleeper-bot}-llm-agent:latest
image: ghcr.io/wombat45/sleeper-bot-llm-agent:latest
container_name: sleeper-bot-llm-agent
ports:
- "8001:8000"
Expand All @@ -35,9 +33,9 @@ services:
- API_KEY=${API_KEY}
- MCP_SERVER_URL=http://mcp-server:8000
- DEFAULT_LEAGUE_ID=${DEFAULT_LEAGUE_ID}
- LLM_PROVIDER=${LLM_PROVIDER:-ollama}
- LLM_URL=${LLM_URL:-http://host.docker.internal:11434/api/generate}
- LLM_API_KEY=${LLM_API_KEY:-}
- LLM_PROVIDER=${LLM_PROVIDER:-digitalocean}
- LLM_URL=${LLM_URL:-https://td4bxjc626e5xweih26eppev.agents.do-ai.run/api/v1/chat/completions}
- LLM_API_KEY=${LLM_API_KEY:-O1AD1a0JeIl3B-T9sJvvBgHEHfwIoPze}
- LLM_MODEL=${LLM_MODEL:-llama3.2:latest}
volumes:
- ./logs:/app/logs
Expand All @@ -55,7 +53,7 @@ services:
restart: unless-stopped

discord-bot:
image: ghcr.io/${GITHUB_REPOSITORY:-your-username/sleeper-bot}-discord-bot:latest
image: ghcr.io/wombat45/sleeper-bot-discord-bot:latest
container_name: sleeper-bot-discord-bot
ports:
- "8002:8000"
Expand Down
5 changes: 4 additions & 1 deletion docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,10 @@ services:
- API_KEY=${API_KEY:-your-api-key-here}
- MCP_SERVER_URL=http://mcp-server:8000
- DEFAULT_LEAGUE_ID=${DEFAULT_LEAGUE_ID:-}
- LLM_URL=http://host.docker.internal:11434/api/generate
- LLM_PROVIDER=digitalocean
- LLM_URL=https://td4bxjc626e5xweih26eppev.agents.do-ai.run/api/v1/chat/completions
- LLM_API_KEY=O1AD1a0JeIl3B-T9sJvvBgHEHfwIoPze
- LLM_MODEL=llama3.2:latest
- HOST=0.0.0.0
- PORT=8000
volumes:
Expand Down
56 changes: 48 additions & 8 deletions llm-agent/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -323,26 +323,66 @@ async def _generate_llm_response(self, query: str, data: Dict[str, Any]) -> str:
print(f"🤖 LLM Prompt length: {len(prompt)} characters")
print(f"🤖 LLM Prompt preview: {prompt[:200]}...")

# Call the LLM
# Get LLM configuration from environment
llm_provider = os.getenv("LLM_PROVIDER", "ollama")
llm_api_key = os.getenv("LLM_API_KEY", "")
llm_model = os.getenv("LLM_MODEL", "llama3.2:latest")

# Call the LLM based on provider
async with httpx.AsyncClient() as client:
response = await client.post(
LLM_URL,
json={
"model": "llama3.2:latest", # Use the model you have installed
if llm_provider == "digitalocean":
# DigitalOcean AI Agent format
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {llm_api_key}"
}
payload = {
"messages": [
{
"role": "user",
"content": prompt
}
],
"stream": False,
"include_functions_info": False,
"include_retrieval_info": False,
"include_guardrails_info": False
}
else:
# Ollama format (default)
headers = {"Content-Type": "application/json"}
payload = {
"model": llm_model,
"prompt": prompt,
"stream": False,
"options": {
"temperature": 0.7,
"top_p": 0.9,
"max_tokens": 1000 # Increased from 500
"max_tokens": 1000
}
},
}

response = await client.post(
LLM_URL,
json=payload,
headers=headers,
timeout=30
)

if response.status_code == 200:
result = response.json()
llm_response = result.get("response", "").strip()

# Parse response based on provider
if llm_provider == "digitalocean":
# DigitalOcean format: response is in choices[0].message.content
if "choices" in result and len(result["choices"]) > 0:
llm_response = result["choices"][0]["message"]["content"].strip()
else:
llm_response = result.get("response", "").strip()
else:
# Ollama format
llm_response = result.get("response", "").strip()

print(f"🤖 LLM Response length: {len(llm_response)} characters")
print(f"🤖 LLM Response preview: {llm_response[:200]}...")

Expand Down
Loading