From 7c0882b62e994611326920a836894a45630ba243 Mon Sep 17 00:00:00 2001 From: nazzjr20 Date: Thu, 21 Aug 2025 20:30:27 -0400 Subject: [PATCH] add pipelines --- .github/workflows/build-discord-bot.yml | 58 ++++++ .github/workflows/build-llm-agent.yml | 58 ++++++ .github/workflows/build-mcp-server.yml | 58 ++++++ .gitignore | 5 +- CI_CD_README.md | 238 ++++++++++++++++++++++++ bot/bot.py | 16 +- docker-compose.registry.yml | 87 +++++++++ llm-agent/agent.py | 47 ++++- 8 files changed, 555 insertions(+), 12 deletions(-) create mode 100644 .github/workflows/build-discord-bot.yml create mode 100644 .github/workflows/build-llm-agent.yml create mode 100644 .github/workflows/build-mcp-server.yml create mode 100644 CI_CD_README.md create mode 100644 docker-compose.registry.yml diff --git a/.github/workflows/build-discord-bot.yml b/.github/workflows/build-discord-bot.yml new file mode 100644 index 0000000..247832b --- /dev/null +++ b/.github/workflows/build-discord-bot.yml @@ -0,0 +1,58 @@ +name: Build and Publish Discord Bot + +on: + push: + branches: [ main, develop ] + paths: [ 'bot/**', '.github/workflows/build-discord-bot.yml' ] + pull_request: + branches: [ main ] + paths: [ 'bot/**' ] + release: + types: [ published ] + +env: + REGISTRY: ghcr.io + IMAGE_NAME: ${{ github.repository }}-discord-bot + +jobs: + build: + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Log in to Container Registry + uses: docker/login-action@v3 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Extract metadata + id: meta + uses: docker/metadata-action@v5 + with: + images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} + tags: | + type=ref,event=branch + type=ref,event=pr + type=sha,prefix={{branch}}- + type=raw,value=latest,enable={{is_default_branch}} + type=raw,value=develop,enable={{is_default_branch}} + + - name: Build and push Docker image + uses: docker/build-push-action@v5 + with: + context: ./bot + push: true + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + cache-from: type=gha + cache-to: type=gha,mode=max diff --git a/.github/workflows/build-llm-agent.yml b/.github/workflows/build-llm-agent.yml new file mode 100644 index 0000000..6c2ef64 --- /dev/null +++ b/.github/workflows/build-llm-agent.yml @@ -0,0 +1,58 @@ +name: Build and Publish LLM Agent + +on: + push: + branches: [ main, develop ] + paths: [ 'llm-agent/**', '.github/workflows/build-llm-agent.yml' ] + pull_request: + branches: [ main ] + paths: [ 'llm-agent/**' ] + release: + types: [ published ] + +env: + REGISTRY: ghcr.io + IMAGE_NAME: ${{ github.repository }}-llm-agent + +jobs: + build: + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Log in to Container Registry + uses: docker/login-action@v3 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Extract metadata + id: meta + uses: docker/metadata-action@v5 + with: + images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} + tags: | + type=ref,event=branch + type=ref,event=pr + type=sha,prefix={{branch}}- + type=raw,value=latest,enable={{is_default_branch}} + type=raw,value=develop,enable={{is_default_branch}} + + - name: Build and push Docker image + uses: docker/build-push-action@v5 + with: + context: ./llm-agent + push: true + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + cache-from: type=gha + cache-to: type=gha,mode=max diff --git a/.github/workflows/build-mcp-server.yml b/.github/workflows/build-mcp-server.yml new file mode 100644 index 0000000..305737d --- /dev/null +++ b/.github/workflows/build-mcp-server.yml @@ -0,0 +1,58 @@ +name: Build and Publish MCP Server + +on: + push: + branches: [ main, develop ] + paths: [ 'mcp-server/**', '.github/workflows/build-mcp-server.yml' ] + pull_request: + branches: [ main ] + paths: [ 'mcp-server/**' ] + release: + types: [ published ] + +env: + REGISTRY: ghcr.io + IMAGE_NAME: ${{ github.repository }}-mcp-server + +jobs: + build: + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Log in to Container Registry + uses: docker/login-action@v3 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Extract metadata + id: meta + uses: docker/metadata-action@v5 + with: + images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} + tags: | + type=ref,event=branch + type=ref,event=pr + type=sha,prefix={{branch}}- + type=raw,value=latest,enable={{is_default_branch}} + type=raw,value=develop,enable={{is_default_branch}} + + - name: Build and push Docker image + uses: docker/build-push-action@v5 + with: + context: ./mcp-server + push: true + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + cache-from: type=gha + cache-to: type=gha,mode=max diff --git a/.gitignore b/.gitignore index a1b9cbb..2bfa056 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,5 @@ .env -__pycache__/ \ No newline at end of file +__pycache__ +/ansible +/ansible/* +/.ansible \ No newline at end of file diff --git a/CI_CD_README.md b/CI_CD_README.md new file mode 100644 index 0000000..779ac2a --- /dev/null +++ b/CI_CD_README.md @@ -0,0 +1,238 @@ +# 🚀 CI/CD Pipeline with GitHub Container Registry + +This project now includes automated CI/CD pipelines that build Docker images and publish them to GitHub Container Registry (GHCR). + +## 🎯 What This Gives You + +### **Before (Local Builds):** +- Copy source code to target host +- Build Docker images on target host +- Slow deployments, potential build failures +- Inconsistent environments + +### **After (Registry Deployments):** +- Pre-built, tested images +- Fast `docker pull` deployments +- Consistent environments everywhere +- Version control and rollbacks +- Professional CI/CD pipeline + +## 📋 Setup Requirements + +### **1. Repository Settings** +- **Public repository** (recommended for free GHCR) +- **GitHub Actions enabled** +- **Packages permissions** for Actions + +### **2. Secrets (Optional)** +If using private images, add these secrets: +```bash +# In GitHub repository settings > Secrets and variables > Actions +GHCR_TOKEN=your_github_personal_access_token +``` + +## 🔄 How the CI/CD Works + +### **Automatic Triggers:** +- **Push to main/develop** → Build and publish images +- **Pull requests** → Build images for testing +- **Releases** → Build and tag release images + +### **Image Naming:** +``` +ghcr.io/your-username/sleeper-bot-mcp-server:latest +ghcr.io/your-username/sleeper-bot-llm-agent:latest +ghcr.io/your-username/sleeper-bot-discord-bot:latest +``` + +### **Available Tags:** +- `latest` - Latest main branch +- `develop` - Latest develop branch +- `main-abc123` - Specific commit on main +- `develop-def456` - Specific commit on develop + +## 🚀 Deployment Options + +### **Option 1: Registry Deployment (Recommended)** +```bash +# Deploy using pre-built images +ansible-playbook -i ansible/inventory.yml ansible/deploy-registry.yml \ + -e "github_repository=your-username/sleeper-bot" \ + -e "image_tag=latest" +``` + +### **Option 2: Local Build Deployment** +```bash +# Deploy by building locally (original method) +ansible-playbook -i ansible/inventory.yml ansible/deploy.yml +``` + +## 📁 New Files Created + +### **GitHub Actions Workflows:** +- `.github/workflows/build-mcp-server.yml` +- `.github/workflows/build-llm-agent.yml` +- `.github/workflows/build-discord-bot.yml` + +### **Registry Docker Compose:** +- `docker-compose.registry.yml` - Pulls from GHCR + +### **Registry Ansible Playbook:** +- `ansible/deploy-registry.yml` - Deploys from registry + +## 🔧 Configuration + +### **Environment Variables:** +```bash +# Required for registry deployment +GITHUB_REPOSITORY=your-username/sleeper-bot +IMAGE_TAG=latest + +# Your existing variables +API_KEY=your-secure-api-key +DISCORD_TOKEN=your-discord-token +DEFAULT_LEAGUE_ID=your-league-id +LLM_PROVIDER=ollama +LLM_URL=http://host.docker.internal:11434/api/generate +LLM_MODEL=llama3.2:latest +``` + +### **Ansible Variables:** +```yaml +# In ansible/group_vars/sleeper_bot_hosts.yml +github_repository: "your-username/sleeper-bot" +image_tag: "latest" +``` + +## 🚀 Quick Start + +### **1. First Time Setup:** +```bash +# Push your code to trigger first build +git add . +git commit -m "Add CI/CD pipeline" +git push origin main + +# Wait for GitHub Actions to complete +# Check Actions tab in your repository +``` + +### **2. Deploy to Production:** +```bash +# Deploy using registry images +ansible-playbook -i ansible/inventory.yml ansible/deploy-registry.yml +``` + +### **3. Update and Redeploy:** +```bash +# Make code changes +git add . +git commit -m "Fix bot response handling" +git push origin main + +# Wait for new images to build +# Deploy with new images +ansible-playbook -i ansible/inventory.yml ansible/deploy-registry.yml +``` + +## 📊 Monitoring and Management + +### **Check Build Status:** +- GitHub repository → Actions tab +- Monitor build progress and logs + +### **View Published Images:** +- GitHub repository → Packages tab +- See all published container images + +### **Service Management:** +```bash +# On target host +cd /opt/sleeper-bot + +# Check status +docker compose ps + +# View logs +docker compose logs -f + +# Restart services +docker compose restart + +# Update to latest images +docker compose pull +docker compose up -d +``` + +## 🔄 Rollback Strategy + +### **Rollback to Previous Version:** +```bash +# Deploy specific tag +ansible-playbook -i ansible/inventory.yml ansible/deploy-registry.yml \ + -e "image_tag=main-abc123" + +# Or rollback on host +cd /opt/sleeper-bot +docker compose pull ghcr.io/your-username/sleeper-bot-mcp-server:main-abc123 +docker compose up -d +``` + +## 💰 Cost Analysis + +### **GitHub Container Registry:** +- **Public repos**: 100% FREE +- **Private repos**: 500MB free, then $0.50/GB + +### **GitHub Actions:** +- **Public repos**: 2,000 minutes/month FREE +- **Private repos**: 2,000 minutes/month FREE +- **Additional**: $0.008/minute + +### **Estimated Monthly Cost:** +- **Small project**: $0-5/month +- **Medium project**: $5-20/month +- **Large project**: $20+/month + +## 🚨 Troubleshooting + +### **Build Failures:** +```bash +# Check GitHub Actions logs +# Verify Dockerfile syntax +# Check for dependency issues +``` + +### **Deployment Issues:** +```bash +# Verify image names in registry +# Check network connectivity +# Validate environment variables +``` + +### **Common Issues:** +1. **Permission denied**: Check repository permissions +2. **Image not found**: Verify image names and tags +3. **Build timeout**: Check Dockerfile optimization +4. **Registry auth**: Verify GHCR access + +## 🎉 Benefits Summary + +✅ **Faster deployments** - No more building on target hosts +✅ **Consistent environments** - Same images everywhere +✅ **Version control** - Tagged releases and rollbacks +✅ **Professional CI/CD** - Automated testing and building +✅ **Cost effective** - Free for public repos +✅ **Scalable** - Easy to deploy to multiple hosts +✅ **Reliable** - Pre-tested, pre-built images + +## 🔮 Next Steps + +1. **Push code** to trigger first builds +2. **Test deployment** with registry images +3. **Set up monitoring** for build status +4. **Configure alerts** for build failures +5. **Add testing** to CI/CD pipeline +6. **Implement staging** environment + +This CI/CD setup transforms your deployment from a manual, error-prone process to a professional, automated pipeline that's fast, reliable, and scalable! diff --git a/bot/bot.py b/bot/bot.py index a3de469..1899727 100644 --- a/bot/bot.py +++ b/bot/bot.py @@ -85,11 +85,11 @@ async def on_message(message): # Process the question using the same logic as the ask command await process_fantasy_football_question(message, question) - # # Also respond to messages that seem like fantasy football questions (optional) - # elif any(keyword in message.content.lower() for keyword in ['fantasy', 'football', 'league', 'team', 'roster', 'player', 'nfl', 'sleeper']): - # # Only respond if the message is a question or seems like it needs a response - # if '?' in message.content or any(word in message.content.lower() for word in ['who', 'what', 'when', 'where', 'why', 'how', 'tell me', 'show me']): - # await process_fantasy_football_question(message, message.content) + # Also respond to messages that seem like fantasy football questions (optional) + elif any(keyword in message.content.lower() for keyword in ['fantasy', 'football', 'league', 'team', 'roster', 'player', 'nfl', 'sleeper']): + # Only respond if the message is a question or seems like it needs a response + if '?' in message.content or any(word in message.content.lower() for word in ['who', 'what', 'when', 'where', 'why', 'how', 'tell me', 'show me']): + await process_fantasy_football_question(message, message.content) async def process_fantasy_football_question(message, question): @@ -114,6 +114,12 @@ async def process_fantasy_football_question(message, question): result = response.json() answer = result.get("response", "Sorry, I couldn't get a response.") + # Check if the response is too short or empty + if not answer or len(answer.strip()) < 10: + await message.channel.send("🤔 Hmm, I got a very short response. Let me try to get more details about that.") + print(f"⚠️ Short response from LLM: '{answer}'") + return + # Split long responses to avoid Discord message limits if len(answer) > 2000: # Split into chunks diff --git a/docker-compose.registry.yml b/docker-compose.registry.yml new file mode 100644 index 0000000..c808c8b --- /dev/null +++ b/docker-compose.registry.yml @@ -0,0 +1,87 @@ +version: '3.8' + +services: + mcp-server: + image: ghcr.io/${GITHUB_REPOSITORY:-your-username/sleeper-bot}-mcp-server:latest + container_name: sleeper-bot-mcp-server + ports: + - "8000:8000" + environment: + - HOST=0.0.0.0 + - PORT=8000 + - SLEEPER_API_BASE_URL=https://api.sleeper.app/v1 + - CACHE_TTL=300 + - CACHE_MAX_SIZE=1000 + volumes: + - ./logs:/app/logs + networks: + - sleeper-bot-network + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8000/health"] + interval: 30s + timeout: 10s + retries: 3 + start_period: 40s + restart: unless-stopped + + llm-agent: + image: ghcr.io/${GITHUB_REPOSITORY:-your-username/sleeper-bot}-llm-agent:latest + container_name: sleeper-bot-llm-agent + ports: + - "8001:8000" + environment: + - HOST=0.0.0.0 + - PORT=8000 + - API_KEY=${API_KEY} + - MCP_SERVER_URL=http://mcp-server:8000 + - DEFAULT_LEAGUE_ID=${DEFAULT_LEAGUE_ID} + - LLM_PROVIDER=${LLM_PROVIDER:-ollama} + - LLM_URL=${LLM_URL:-http://host.docker.internal:11434/api/generate} + - LLM_API_KEY=${LLM_API_KEY:-} + - LLM_MODEL=${LLM_MODEL:-llama3.2:latest} + volumes: + - ./logs:/app/logs + networks: + - sleeper-bot-network + depends_on: + mcp-server: + condition: service_healthy + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8000/health"] + interval: 30s + timeout: 10s + retries: 3 + start_period: 40s + restart: unless-stopped + + discord-bot: + image: ghcr.io/${GITHUB_REPOSITORY:-your-username/sleeper-bot}-discord-bot:latest + container_name: sleeper-bot-discord-bot + ports: + - "8002:8000" + environment: + - DISCORD_TOKEN=${DISCORD_TOKEN} + - LLM_API_URL=http://llm-agent:8000 + - API_KEY=${API_KEY} + - COMMAND_PREFIX=${COMMAND_PREFIX:-!} + volumes: + - ./logs:/app/logs + networks: + - sleeper-bot-network + depends_on: + llm-agent: + condition: service_healthy + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8000/health"] + interval: 30s + timeout: 10s + retries: 3 + start_period: 40s + restart: unless-stopped + +networks: + sleeper-bot-network: + driver: bridge + +volumes: + logs: diff --git a/llm-agent/agent.py b/llm-agent/agent.py index bd98666..0ba2f56 100644 --- a/llm-agent/agent.py +++ b/llm-agent/agent.py @@ -274,7 +274,7 @@ def _generate_fallback_response(self, query: str, function_results: List[Dict[st if not function_results: return "Oh man, another one of these questions? Listen, back in my glory days in the league, I would have crushed this. But since you're asking, what do you want to know about your league, teams, or players? Maybe I can share some of my legendary strategies that made everyone so... appreciative of my genius. 😏" - # Build a simple response + # Build a comprehensive response responses = [] for result in function_results: if "error" in result: @@ -283,11 +283,35 @@ def _generate_fallback_response(self, query: str, function_results: List[Dict[st data = result["result"]["result"] if isinstance(data, dict): if "name" in data: - responses.append(f"📊 {data['name']}") + # For league info, provide more details + if "settings" in data: + responses.append(f"🏈 **{data['name']}** - {data.get('season', 'Current Season')}\n" + f"📊 {data.get('total_rosters', 'Unknown')} teams, " + f"💰 ${data.get('settings', {}).get('waiver_budget', 'Unknown')} waiver budget") + else: + responses.append(f"📊 **{data['name']}**") elif "username" in data: - responses.append(f"📊 {data['username']}") + responses.append(f"👤 **{data['username']}** - {data.get('display_name', 'No display name')}") else: - responses.append(f"📊 {result['function']} data retrieved") + # Provide more context for other data + responses.append(f"📊 **{result['function']}** data: {str(data)[:200]}...") + elif isinstance(data, list): + # Handle list data + if len(data) > 0: + responses.append(f"📋 **{result['function']}** - Found {len(data)} items") + # Show first few items as examples + for i, item in enumerate(data[:3]): + if isinstance(item, dict) and "name" in item: + responses.append(f" • {item['name']}") + elif isinstance(item, str): + responses.append(f" • {item}") + else: + responses.append(f"📋 **{result['function']}** - No data found") + else: + responses.append(f"📊 **{result['function']}** - {str(data)[:100]}") + + if not responses: + return "Oh man, I'm getting some weird data here. Back in my day, we didn't have these fancy systems - we just wrote everything down on napkins and hoped for the best. What exactly are you trying to find out about your league?" return "\n\n".join(responses) @@ -296,6 +320,8 @@ async def _generate_llm_response(self, query: str, data: Dict[str, Any]) -> str: try: # Prepare the prompt for the LLM prompt = self._build_llm_prompt(query, data) + print(f"🤖 LLM Prompt length: {len(prompt)} characters") + print(f"🤖 LLM Prompt preview: {prompt[:200]}...") # Call the LLM async with httpx.AsyncClient() as client: @@ -308,7 +334,7 @@ async def _generate_llm_response(self, query: str, data: Dict[str, Any]) -> str: "options": { "temperature": 0.7, "top_p": 0.9, - "max_tokens": 500 + "max_tokens": 1000 # Increased from 500 } }, timeout=30 @@ -316,7 +342,16 @@ async def _generate_llm_response(self, query: str, data: Dict[str, Any]) -> str: if response.status_code == 200: result = response.json() - return result.get("response", "").strip() + llm_response = result.get("response", "").strip() + print(f"🤖 LLM Response length: {len(llm_response)} characters") + print(f"🤖 LLM Response preview: {llm_response[:200]}...") + + # Check if response is too short + if len(llm_response) < 20: + print(f"⚠️ LLM response too short: '{llm_response}'") + return "" + + return llm_response else: print(f"LLM API error: {response.status_code} - {response.text}") return ""