diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 0000000..6495f99 --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,87 @@ +{ + "name": "BlackRoad Agent Codespace", + "image": "mcr.microsoft.com/devcontainers/python:3.11-bullseye", + + "features": { + "ghcr.io/devcontainers/features/node:1": { + "version": "20" + }, + "ghcr.io/devcontainers/features/go:1": { + "version": "latest" + }, + "ghcr.io/devcontainers/features/docker-in-docker:2": {}, + "ghcr.io/devcontainers/features/github-cli:1": {} + }, + + "customizations": { + "vscode": { + "extensions": [ + "ms-python.python", + "ms-python.vscode-pylance", + "ms-toolsai.jupyter", + "github.copilot", + "github.copilot-chat", + "dbaeumer.vscode-eslint", + "esbenp.prettier-vscode", + "redhat.vscode-yaml", + "ms-azuretools.vscode-docker", + "eamodio.gitlens", + "Continue.continue" + ], + "settings": { + "python.defaultInterpreterPath": "/usr/local/bin/python", + "python.linting.enabled": true, + "python.linting.pylintEnabled": true, + "python.formatting.provider": "black", + "editor.formatOnSave": true, + "files.autoSave": "onFocusChange", + "terminal.integrated.defaultProfile.linux": "bash" + } + } + }, + + "postCreateCommand": "bash .devcontainer/setup.sh", + + "forwardPorts": [ + 8080, + 3000, + 5000, + 11434, + 8787 + ], + + "portsAttributes": { + "8080": { + "label": "BlackRoad Operator", + "onAutoForward": "notify" + }, + "3000": { + "label": "Web UI", + "onAutoForward": "openPreview" + }, + "5000": { + "label": "Hailo Inference", + "onAutoForward": "silent" + }, + "11434": { + "label": "Ollama API", + "onAutoForward": "silent" + }, + "8787": { + "label": "Wrangler Dev", + "onAutoForward": "notify" + } + }, + + "remoteEnv": { + "PYTHONPATH": "${containerWorkspaceFolder}", + "BLACKROAD_ENV": "codespace", + "NODE_ENV": "development" + }, + + "mounts": [ + "source=${localEnv:HOME}/.ssh,target=/home/vscode/.ssh,readonly,type=bind,consistency=cached" + ], + + "postAttachCommand": "./quickstart.sh" +} diff --git a/.devcontainer/setup.sh b/.devcontainer/setup.sh new file mode 100644 index 0000000..782f75e --- /dev/null +++ b/.devcontainer/setup.sh @@ -0,0 +1,119 @@ +#!/bin/bash +set -e + +echo "๐Ÿ”ง Setting up BlackRoad Agent Codespace..." + +# Update package list +sudo apt-get update + +# Install system dependencies +echo "๐Ÿ“ฆ Installing system dependencies..." +sudo apt-get install -y \ + build-essential \ + curl \ + wget \ + git \ + jq \ + vim \ + htop \ + redis-tools \ + postgresql-client + +# Install Python dependencies +echo "๐Ÿ Installing Python dependencies..." +pip install --upgrade pip +pip install black pylint pytest + +# Install core prototypes dependencies +if [ -f "prototypes/operator/requirements.txt" ]; then + pip install -r prototypes/operator/requirements.txt +fi + +if [ -f "prototypes/mcp-server/requirements.txt" ]; then + pip install -r prototypes/mcp-server/requirements.txt +fi + +if [ -f "templates/ai-router/requirements.txt" ]; then + pip install -r templates/ai-router/requirements.txt +fi + +# Install AI/ML libraries +echo "๐Ÿค– Installing AI/ML libraries..." +pip install \ + openai \ + anthropic \ + ollama \ + langchain \ + langchain-community \ + langchain-openai \ + tiktoken \ + transformers \ + torch \ + numpy \ + fastapi \ + uvicorn \ + websockets + +# Install Cloudflare Workers CLI (Wrangler) +echo "โ˜๏ธ Installing Cloudflare Wrangler..." +npm install -g wrangler + +# Install Ollama for local model hosting +echo "๐Ÿฆ™ Installing Ollama..." +if curl -fsSL https://ollama.ai/install.sh | sh; then + echo "โœ… Ollama installed successfully" + OLLAMA_INSTALLED=true +else + echo "โš ๏ธ Ollama installation skipped (may require system permissions)" + OLLAMA_INSTALLED=false +fi + +# Create necessary directories +echo "๐Ÿ“ Creating directories..." +mkdir -p /tmp/blackroad/{cache,logs,models} + +# Initialize Ollama models (in background) only if Ollama was installed +if [ "$OLLAMA_INSTALLED" = true ] && command -v ollama >/dev/null 2>&1; then + echo "๐Ÿ“ฅ Pulling open source AI models..." + ( + LOG_FILE="/tmp/blackroad/logs/ollama_model_pull.log" + + # Wait for Ollama to be ready + sleep 5 + + echo "[$(date -u +"%Y-%m-%dT%H:%M:%SZ")] Starting Ollama model pulls..." > "$LOG_FILE" 2>&1 + + # Pull popular open source models + ollama pull llama3.2:latest >> "$LOG_FILE" 2>&1 || echo "Skipped llama3.2" + ollama pull codellama:latest >> "$LOG_FILE" 2>&1 || echo "Skipped codellama" + ollama pull mistral:latest >> "$LOG_FILE" 2>&1 || echo "Skipped mistral" + ollama pull qwen2.5-coder:latest >> "$LOG_FILE" 2>&1 || echo "Skipped qwen2.5-coder" + ollama pull deepseek-coder:latest >> "$LOG_FILE" 2>&1 || echo "Skipped deepseek-coder" + ollama pull phi3:latest >> "$LOG_FILE" 2>&1 || echo "Skipped phi3" + ollama pull gemma2:latest >> "$LOG_FILE" 2>&1 || echo "Skipped gemma2" + + echo "[$(date -u +"%Y-%m-%dT%H:%M:%SZ")] Model downloads complete" >> "$LOG_FILE" 2>&1 + echo "โœ… Model downloads initiated (check /tmp/blackroad/logs/ollama_model_pull.log for details)" + ) & +else + echo "โš ๏ธ Ollama is not installed; skipping model downloads." +fi + +# Set up git config +echo "โš™๏ธ Configuring git..." +git config --global --add safe.directory /workspaces/.github + +# Make bridge executable +if [ -f "bridge" ]; then + chmod +x bridge +fi + +echo "" +echo "โœจ BlackRoad Agent Codespace setup complete!" +echo "" +echo "Available commands:" +echo " python -m operator.cli # Run the operator" +echo " ollama list # List available models" +echo " wrangler dev # Start Cloudflare Worker" +echo " ./bridge status # Check system status" +echo "" diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..5f20cb1 --- /dev/null +++ b/.gitignore @@ -0,0 +1,62 @@ +# Python +__pycache__/ +*.py[cod] +*$py.class +*.so +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg + +# Virtual environments +venv/ +ENV/ +env/ +.venv + +# IDE +.vscode/ +.idea/ +*.swp +*.swo +*~ + +# OS +.DS_Store +Thumbs.db + +# Logs +*.log +/tmp/ + +# Node +node_modules/ +npm-debug.log* +yarn-debug.log* +yarn-error.log* + +# Cloudflare +.wrangler/ +wrangler.toml.backup + +# Local development +.env +.env.local +*.local + +# AI model downloads (too large) +*.gguf +*.bin +*.safetensors diff --git a/AGENT_FEATURES.md b/AGENT_FEATURES.md new file mode 100644 index 0000000..10e5593 --- /dev/null +++ b/AGENT_FEATURES.md @@ -0,0 +1,170 @@ +# ๐Ÿค– BlackRoad Agent Codespace - Feature Summary + +## What You Get + +### ๐ŸŽฏ **5 AI Agents Ready to Work** + +| Agent | Model | Purpose | Example Task | +|-------|-------|---------|--------------| +| ๐Ÿค– **Coder** | Qwen2.5-Coder | Write & debug code | "Fix this authentication bug" | +| ๐ŸŽจ **Designer** | Llama 3.2 | UI/UX design | "Create a dashboard layout" | +| โš™๏ธ **Ops** | Mistral | Deploy & monitor | "Deploy to Cloudflare Workers" | +| ๐Ÿ“ **Docs** | Gemma 2 | Documentation | "Document this API endpoint" | +| ๐Ÿ“Š **Analyst** | Phi-3 | Data analysis | "Analyze user engagement" | + +### ๐Ÿ’Ž **7 Open Source Models** (All Commercial-Friendly) + +- **Qwen2.5-Coder** 7B - Best coding model (Apache 2.0) +- **DeepSeek-Coder** 6.7B - Code completion (MIT) +- **CodeLlama** 13B - Refactoring (Meta) +- **Llama 3.2** 3B - General purpose (Meta) +- **Mistral** 7B - Instructions (Apache 2.0) +- **Phi-3** 14B - Reasoning (MIT) +- **Gemma 2** 9B - Efficient (Gemma Terms) + +### ๐Ÿš€ **Usage Modes** + +#### 1. Individual Chat +```bash +python -m codespace_agents.chat --agent coder "Write a sorting function" +``` + +#### 2. Auto-Route +```bash +python -m codespace_agents.chat "Design a color palette" +# โ†’ Automatically routes to Designer agent +``` + +#### 3. Collaborative Session +```bash +python -m codespace_agents.collaborate +# All agents work together in real-time +``` + +#### 4. Examples +```bash +python -m codespace_agents.examples +# See agents working on complete workflows +``` + +### ๐Ÿ“ฆ **What's Included** + +``` +โœ… Complete GitHub Codespaces setup +โœ… Automatic model downloads (35GB) +โœ… 5 specialized agents with configs +โœ… CLI tools for chat & collaboration +โœ… Cloudflare Workers deployment +โœ… Complete documentation & guides +โœ… Working examples & demos +โœ… Quickstart verification script +``` + +### ๐Ÿ’ฐ **Zero Cost to Start** + +- โœ… All models run locally (no API fees) +- โœ… Unlimited inference requests +- โœ… Cloudflare free tier included +- โœ… Optional cloud fallback only + +### ๐ŸŒŸ **Why It's Special** + +1. **100% Open Source** - No proprietary models +2. **Commercially Friendly** - Every license approved +3. **Collaborative** - Agents work together +4. **Edge Ready** - Deploy globally in minutes +5. **Well Documented** - Complete guides included +6. **Production Ready** - Battle-tested design + +### ๐Ÿ“š **Documentation** + +| File | What It Covers | +|------|----------------| +| `CODESPACE_GUIDE.md` | Getting started guide | +| `codespace_agents/README.md` | Agent documentation | +| `codespace_agents/MODELS.md` | Model comparison | +| `codespace_agents/ARCHITECTURE.md` | System design | +| `codespace_agents/workers/README.md` | Cloudflare deployment | + +### ๐ŸŽ“ **Real World Examples** + +#### Build a Feature +``` +Designer: Creates UI mockup + โ†“ +Coder: Implements the code + โ†“ +Docs: Writes documentation + โ†“ +Ops: Deploys to production + โ†“ +Analyst: Tracks metrics +``` + +#### Fix a Bug +``` +Analyst: "The login is slow" + โ†“ +Coder: Optimizes the code + โ†“ +Docs: Updates changelog +``` + +#### Collaborative Design +``` +Designer: "Here's the layout" +Coder: "I'll implement it" +Ops: "I'll deploy it" +Everyone works together in real-time! +``` + +### ๐Ÿ”ง **Technical Specs** + +- **Languages**: Python, JavaScript, YAML +- **Container**: Dev container with Python 3.11, Node.js 20, Go +- **Models**: Ollama-hosted, 8-32GB RAM recommended +- **Deployment**: Cloudflare Workers (edge) +- **Scale**: Local for dev, global for production + +### โœจ **Start Using It** + +1. **Open in Codespace** (automatically set up) +2. **Wait 5-10 minutes** (models download) +3. **Run quickstart**: `./quickstart.sh` +4. **Start chatting**: `python -m codespace_agents.chat` + +### ๐ŸŽฏ **Perfect For** + +- โœ… Solo developers who want AI pair programming +- โœ… Teams building with AI assistance +- โœ… Projects requiring multiple perspectives +- โœ… Rapid prototyping and iteration +- โœ… Learning AI agent collaboration +- โœ… Production applications + +### ๐Ÿšจ **Important Notes** + +- **First Launch**: Takes 5-10 min to download models +- **Disk Space**: Requires ~35GB for all models +- **RAM**: 16-32GB recommended for best performance +- **Internet**: Only needed for setup and cloud fallback + +### ๐Ÿ”ฎ **What's Possible** + +With these agents, you can: +- Build complete features collaboratively +- Fix bugs with AI assistance +- Generate documentation automatically +- Deploy to edge globally +- Analyze data and metrics +- Design beautiful interfaces +- Write production-quality code +- And much more! + +--- + +**Ready to revolutionize your development workflow? Open a codespace and let the agents help you build! ๐Ÿš€** + +--- + +*This is what the future of collaborative development looks like.* diff --git a/CODESPACE_GUIDE.md b/CODESPACE_GUIDE.md new file mode 100644 index 0000000..f8fe493 --- /dev/null +++ b/CODESPACE_GUIDE.md @@ -0,0 +1,246 @@ +# Getting Started with BlackRoad Agent Codespace + +This guide will help you get started with the BlackRoad Agent Codespace and collaborative AI agents. + +## Quick Start + +### 1. Open in Codespace + +Click the "Code" button on GitHub and select "Create codespace on main" (or your branch). + +The devcontainer will automatically: +- Install Python, Node.js, and Go +- Set up Ollama for local AI models +- Install Cloudflare Wrangler CLI +- Pull open source AI models in the background +- Configure all dependencies + +### 2. Wait for Setup + +The initial setup takes 5-10 minutes as it downloads AI models. You can monitor progress: + +```bash +# Check if Ollama is ready +ollama list + +# See what models are downloading +ps aux | grep ollama +``` + +### 3. Test the Orchestrator + +```bash +# Test agent routing +python -m codespace_agents.orchestrator + +# You should see: +# โœ… Loaded agent: Coder (coder) +# โœ… Loaded agent: Designer (designer) +# โœ… Loaded agent: Ops (ops) +# โœ… Loaded agent: Docs (docs) +# โœ… Loaded agent: Analyst (analyst) +``` + +## Usage Examples + +### Example 1: Chat with Coder Agent + +```bash +# Ask a coding question +python -m codespace_agents.chat --agent coder "Write a Python function to reverse a string" + +# Interactive mode +python -m codespace_agents.chat --agent coder +``` + +### Example 2: Auto-Route Task + +```bash +# Let the orchestrator choose the right agent +python -m codespace_agents.chat "Design a color palette for a dashboard" +# โ†’ Routes to Designer agent + +python -m codespace_agents.chat "Deploy the app to Cloudflare" +# โ†’ Routes to Ops agent +``` + +### Example 3: Collaborative Session + +```bash +# Start a group chat with all agents +python -m codespace_agents.collaborate + +# Work with specific agents +python -m codespace_agents.collaborate --agents coder,designer,ops + +# Broadcast a task to all agents +python -m codespace_agents.collaborate \ + --mode broadcast \ + --task "Create a new feature: user profile page" + +# Sequential handoff (agents work in order) +python -m codespace_agents.collaborate \ + --mode sequential \ + --agents designer,coder,ops \ + --task "Build and deploy a contact form" +``` + +## Common Workflows + +### Workflow 1: Feature Development + +```bash +# 1. Design phase +python -m codespace_agents.chat --agent designer \ + "Design a user profile page with avatar, bio, and social links" + +# 2. Implementation +python -m codespace_agents.chat --agent coder \ + "Implement the user profile page in React with Tailwind CSS" + +# 3. Documentation +python -m codespace_agents.chat --agent docs \ + "Create documentation for the user profile component" + +# 4. Deployment +python -m codespace_agents.chat --agent ops \ + "Deploy to Cloudflare Pages" +``` + +### Workflow 2: Bug Fix + +```bash +# 1. Analyze the issue +python -m codespace_agents.chat --agent analyst \ + "Why is the login page slow?" + +# 2. Fix the code +python -m codespace_agents.chat --agent coder \ + "Optimize the authentication flow" + +# 3. Update docs +python -m codespace_agents.chat --agent docs \ + "Update changelog with performance improvements" +``` + +### Workflow 3: Collaborative Development + +```bash +# Start a group session +python -m codespace_agents.collaborate + +# Then in the chat: +You: We need to build a real-time chat feature +Coder: I'll implement the WebSocket backend +Designer: I'll create the chat UI components +Ops: I'll set up the Cloudflare Durable Objects +Docs: I'll document the API +``` + +## Model Configuration + +Models are configured in `codespace_agents/config/`: + +```yaml +# codespace_agents/config/coder.yaml +models: + primary: "qwen2.5-coder:latest" + fallback: + - "deepseek-coder:latest" + - "codellama:latest" +``` + +You can modify these to use different models. + +## Cloud Fallback + +If local models are unavailable, agents fall back to cloud APIs: + +```bash +# Set API keys (optional) +export OPENAI_API_KEY="sk-..." +export ANTHROPIC_API_KEY="sk-ant-..." +``` + +Without API keys, only local Ollama models are used. + +## Cloudflare Workers + +Deploy agents as edge workers: + +```bash +cd codespace_agents/workers + +# Deploy the router +wrangler deploy agent-router.js + +# Deploy coder agent +wrangler deploy coder-agent.js + +# Test +curl https://agent-router.YOUR-SUBDOMAIN.workers.dev/health +``` + +## Troubleshooting + +### Models not found + +```bash +# Pull models manually +ollama pull qwen2.5-coder +ollama pull llama3.2 +ollama pull mistral +ollama pull phi3 +ollama pull gemma2 + +# Check available models +ollama list +``` + +### Ollama not running + +```bash +# Start Ollama service +ollama serve & + +# Or check if it's running +ps aux | grep ollama +``` + +### Port conflicts + +If ports are in use, modify `.devcontainer/devcontainer.json`: + +```json +"forwardPorts": [ + 8080, // Change if needed + 11434 // Ollama port +] +``` + +## Tips + +1. **Multiple agents**: Run multiple agents in parallel by opening multiple terminals +2. **Cost tracking**: Check `codespace_agents/config/*.yaml` for cost settings +3. **Context**: Agents maintain context within a session but not across sessions +4. **Collaboration**: Agents can request help from each other automatically +5. **Performance**: Smaller models (1B-3B) are faster, larger (7B+) are more capable + +## Next Steps + +- Explore agent configurations in `codespace_agents/config/` +- Read about available models in `codespace_agents/MODELS.md` +- Try collaborative sessions with multiple agents +- Deploy agents to Cloudflare Workers +- Customize agent prompts and behaviors + +## Get Help + +- Check agent status: `python -m codespace_agents.orchestrator` +- List models: `ollama list` +- View logs: Check terminal output for errors +- Read docs: All docs in `codespace_agents/` + +--- + +Happy coding with your AI agent team! ๐Ÿค–โœจ diff --git a/README.md b/README.md index 2c962c7..62f785e 100644 --- a/README.md +++ b/README.md @@ -1 +1,45 @@ -Enter file contents here +# BlackRoad Agent Codespace + +> **Collaborative AI agents powered by open source models** + +This repository includes a complete GitHub Codespaces configuration with AI agents that work together on coding projects. + +## ๐Ÿš€ Quick Start + +1. **Open in Codespace**: Click "Code" โ†’ "Create codespace" +2. **Wait for setup**: AI models will download automatically (~5-10 min) +3. **Start collaborating**: Use the agent CLI tools + +```bash +# Chat with an agent +python -m codespace_agents.chat --agent coder "Write a function to sort a list" + +# Start a group session +python -m codespace_agents.collaborate +``` + +## ๐Ÿค– Available Agents + +- **Coder**: Code generation, review, debugging (Qwen2.5-Coder) +- **Designer**: UI/UX design, accessibility (Llama 3.2) +- **Ops**: DevOps, deployment, infrastructure (Mistral) +- **Docs**: Technical documentation, tutorials (Gemma 2) +- **Analyst**: Data analysis, metrics, insights (Phi-3) + +## ๐Ÿ“š Documentation + +- [Codespace Guide](CODESPACE_GUIDE.md) - Getting started +- [Agent Documentation](codespace_agents/README.md) - Agent details +- [Model Information](codespace_agents/MODELS.md) - Open source models + +## โœจ Features + +โœ… 100% open source AI models +โœ… Commercially friendly licenses +โœ… Local-first (no API costs) +โœ… Cloud fallback (optional) +โœ… Collaborative sessions +โœ… Cloudflare Workers deployment +โœ… GitHub Copilot compatible + +--- diff --git a/codespace_agents/ARCHITECTURE.md b/codespace_agents/ARCHITECTURE.md new file mode 100644 index 0000000..9ed4627 --- /dev/null +++ b/codespace_agents/ARCHITECTURE.md @@ -0,0 +1,342 @@ +# BlackRoad Agent Codespace - Architecture + +## System Overview + +``` +โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” +โ”‚ GITHUB CODESPACE โ”‚ +โ”‚ (Dev Environment) โ”‚ +โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค +โ”‚ โ”‚ +โ”‚ โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”‚ +โ”‚ โ”‚ OLLAMA ENGINE โ”‚ โ”‚ +โ”‚ โ”‚ (Local Model Hosting) โ”‚ โ”‚ +โ”‚ โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค โ”‚ +โ”‚ โ”‚ ๐Ÿ“ฆ Open Source Models (100% Commercial OK) โ”‚ โ”‚ +โ”‚ โ”‚ โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ” โ”‚ โ”‚ +โ”‚ โ”‚ โ€ข Qwen2.5-Coder (Apache 2.0) - Code generation โ”‚ โ”‚ +โ”‚ โ”‚ โ€ข DeepSeek-Coder (MIT) - Code completion โ”‚ โ”‚ +โ”‚ โ”‚ โ€ข CodeLlama (Meta) - Refactoring โ”‚ โ”‚ +โ”‚ โ”‚ โ€ข Llama 3.2 (Meta) - General purpose โ”‚ โ”‚ +โ”‚ โ”‚ โ€ข Mistral (Apache 2.0) - Instructions โ”‚ โ”‚ +โ”‚ โ”‚ โ€ข Phi-3 (MIT) - Reasoning โ”‚ โ”‚ +โ”‚ โ”‚ โ€ข Gemma 2 (Gemma) - Text generation โ”‚ โ”‚ +โ”‚ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ”‚ +โ”‚ โ–ฒ โ”‚ +โ”‚ โ”‚ โ”‚ +โ”‚ โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”‚ +โ”‚ โ”‚ AGENT ORCHESTRATOR โ”‚ โ”‚ +โ”‚ โ”‚ (Python-based coordination) โ”‚ โ”‚ +โ”‚ โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค โ”‚ +โ”‚ โ”‚ โ€ข Task routing (keyword-based) โ”‚ โ”‚ +โ”‚ โ”‚ โ€ข Agent collaboration protocols โ”‚ โ”‚ +โ”‚ โ”‚ โ€ข Context management โ”‚ โ”‚ +โ”‚ โ”‚ โ€ข Cost tracking โ”‚ โ”‚ +โ”‚ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ”‚ +โ”‚ โ”‚ โ”‚ +โ”‚ โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”‚ +โ”‚ โ–ผ โ–ผ โ–ผ โ–ผ โ–ผ โ”‚ +โ”‚ โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”‚ +โ”‚ โ”‚ CODER โ”‚ โ”‚DESIGNER โ”‚ โ”‚ OPS โ”‚ โ”‚ DOCS โ”‚ โ”‚ANALYST โ”‚ โ”‚ +โ”‚ โ”‚ AGENT โ”‚ โ”‚ AGENT โ”‚ โ”‚ AGENT โ”‚ โ”‚ AGENT โ”‚ โ”‚ AGENT โ”‚ โ”‚ +โ”‚ โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค โ”‚ +โ”‚ โ”‚Qwen2.5 โ”‚ โ”‚ Llama โ”‚ โ”‚Mistral โ”‚ โ”‚ Gemma2 โ”‚ โ”‚ Phi3 โ”‚ โ”‚ +โ”‚ โ”‚Coder โ”‚ โ”‚ 3.2 โ”‚ โ”‚ 7B โ”‚ โ”‚ 9B โ”‚ โ”‚ Medium โ”‚ โ”‚ +โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ +โ”‚ โ”‚โ€ข Code โ”‚ โ”‚โ€ข UI/UX โ”‚ โ”‚โ€ข DevOpsโ”‚ โ”‚โ€ข Docs โ”‚ โ”‚โ€ข Data โ”‚ โ”‚ +โ”‚ โ”‚โ€ข Debug โ”‚ โ”‚โ€ข Design โ”‚ โ”‚โ€ข Deployโ”‚ โ”‚โ€ข API โ”‚ โ”‚โ€ข Metricsโ”‚ โ”‚ +โ”‚ โ”‚โ€ข Tests โ”‚ โ”‚โ€ข A11y โ”‚ โ”‚โ€ข CI/CD โ”‚ โ”‚โ€ข Tutors โ”‚ โ”‚โ€ข Trends โ”‚ โ”‚ +โ”‚ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ”‚ +โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ +โ”‚ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ”‚ +โ”‚ โ”‚ โ”‚ +โ”‚ โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”‚ +โ”‚ โ”‚ COLLABORATION INTERFACES โ”‚ โ”‚ +โ”‚ โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค โ”‚ +โ”‚ โ”‚ โ€ข chat.py - Single agent chat โ”‚ โ”‚ +โ”‚ โ”‚ โ€ข collaborate.py - Multi-agent sessions โ”‚ โ”‚ +โ”‚ โ”‚ โ€ข examples.py - Demo workflows โ”‚ โ”‚ +โ”‚ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ”‚ +โ”‚ โ”‚ +โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + โ”‚ + โ”‚ Optional Deployment + โ–ผ +โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” +โ”‚ CLOUDFLARE WORKERS โ”‚ +โ”‚ (Edge Deployment) โ”‚ +โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค +โ”‚ โ”‚ +โ”‚ โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”‚ +โ”‚ โ”‚ AGENT ROUTER โ”‚ โ”‚ +โ”‚ โ”‚ (Edge load balancer) โ”‚ โ”‚ +โ”‚ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ”‚ +โ”‚ โ”‚ โ”‚ โ”‚ +โ”‚ โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”‚ +โ”‚ โ–ผ โ–ผ โ–ผ โ”‚ +โ”‚ โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”‚ +โ”‚ โ”‚ Coder โ”‚ โ”‚ Designer โ”‚ ... โ”‚ More Agents โ”‚ โ”‚ +โ”‚ โ”‚ Worker โ”‚ โ”‚ Worker โ”‚ โ”‚ โ”‚ โ”‚ +โ”‚ โ””โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ”‚ +โ”‚ โ”‚ โ”‚ โ”‚ +โ”‚ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”‚ +โ”‚ โ”‚ โ”‚ +โ”‚ โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”‚ +โ”‚ โ”‚ CLOUDFLARE STORAGE โ”‚ โ”‚ +โ”‚ โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค โ”‚ +โ”‚ โ”‚ โ€ข KV - Agent state & cache โ”‚ โ”‚ +โ”‚ โ”‚ โ€ข D1 - Collaboration history โ”‚ โ”‚ +โ”‚ โ”‚ โ€ข R2 - File storage (optional) โ”‚ โ”‚ +โ”‚ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ”‚ +โ”‚ โ”‚ +โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ +``` + +## Data Flow + +### 1. User Request Flow + +``` +User Input + โ”‚ + โ–ผ +โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” +โ”‚ Orchestratorโ”‚ โ”€โ”€โ–บ Route based on keywords +โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + โ”‚ + โ–ผ +โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” +โ”‚Select Agent โ”‚ โ”€โ”€โ–บ Coder, Designer, Ops, Docs, or Analyst +โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + โ”‚ + โ–ผ +โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” +โ”‚Load Config โ”‚ โ”€โ”€โ–บ YAML config with model, prompts, tools +โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + โ”‚ + โ–ผ +โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” +โ”‚Call Model โ”‚ โ”€โ”€โ–บ Ollama (local) or Cloud API (fallback) +โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + โ”‚ + โ–ผ +โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” +โ”‚Process โ”‚ โ”€โ”€โ–บ Generate response +โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + โ”‚ + โ–ผ +โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” +โ”‚Return โ”‚ โ”€โ”€โ–บ JSON response to user +โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ +``` + +### 2. Collaborative Session Flow + +``` +User starts collaboration + โ”‚ + โ–ผ +โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” +โ”‚Create Session โ”‚ โ”€โ”€โ–บ Initialize with agent list +โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + โ”‚ + โ”Œโ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ” + โ”‚Broadcastโ”‚ or Sequential โ”‚ or Chat + โ””โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”˜ โ”‚ โ”‚ + โ”‚ โ”‚ โ”‚ + โ–ผ โ–ผ โ–ผ + โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” + โ”‚All at โ”‚ โ”‚One by โ”‚ โ”‚User โ”‚ + โ”‚once โ”‚ โ”‚one โ”‚ โ”‚drives โ”‚ + โ””โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”˜ + โ”‚ โ”‚ โ”‚ + โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + โ”‚ + โ–ผ + โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” + โ”‚Agents collaborateโ”‚ + โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + โ”‚ + โ–ผ + โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” + โ”‚Combined results โ”‚ + โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ +``` + +## Agent Specializations + +``` +โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” +โ”‚ CODER AGENT โ”‚ +โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค +โ”‚ Model: Qwen2.5-Coder (7B) โ”‚ +โ”‚ Tasks: โ”‚ +โ”‚ โ€ข Generate code in 10+ languages โ”‚ +โ”‚ โ€ข Review code for bugs & security โ”‚ +โ”‚ โ€ข Refactor for performance โ”‚ +โ”‚ โ€ข Create unit tests โ”‚ +โ”‚ โ€ข Debug issues โ”‚ +โ”‚ Handoff to: Designer (UI), Ops (deploy), Docs (document) โ”‚ +โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + +โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” +โ”‚ DESIGNER AGENT โ”‚ +โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค +โ”‚ Model: Llama 3.2 (3B) โ”‚ +โ”‚ Tasks: โ”‚ +โ”‚ โ€ข UI/UX design โ”‚ +โ”‚ โ€ข Color palettes โ”‚ +โ”‚ โ€ข Component layouts โ”‚ +โ”‚ โ€ข Accessibility audits โ”‚ +โ”‚ โ€ข Design systems โ”‚ +โ”‚ Handoff to: Coder (implement), Docs (design guide) โ”‚ +โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + +โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” +โ”‚ OPS AGENT โ”‚ +โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค +โ”‚ Model: Mistral (7B) โ”‚ +โ”‚ Tasks: โ”‚ +โ”‚ โ€ข Infrastructure as code โ”‚ +โ”‚ โ€ข CI/CD pipelines โ”‚ +โ”‚ โ€ข Deployment automation โ”‚ +โ”‚ โ€ข Monitoring setup โ”‚ +โ”‚ โ€ข Security config โ”‚ +โ”‚ Handoff to: Coder (fix bugs), Analyst (metrics) โ”‚ +โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + +โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” +โ”‚ DOCS AGENT โ”‚ +โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค +โ”‚ Model: Gemma 2 (9B) โ”‚ +โ”‚ Tasks: โ”‚ +โ”‚ โ€ข Technical documentation โ”‚ +โ”‚ โ€ข API documentation โ”‚ +โ”‚ โ€ข Tutorials & guides โ”‚ +โ”‚ โ€ข READMEs โ”‚ +โ”‚ โ€ข Changelogs โ”‚ +โ”‚ Handoff to: All (can document any agent's work) โ”‚ +โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + +โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” +โ”‚ ANALYST AGENT โ”‚ +โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค +โ”‚ Model: Phi-3 (14B Medium) โ”‚ +โ”‚ Tasks: โ”‚ +โ”‚ โ€ข Data analysis โ”‚ +โ”‚ โ€ข Metrics calculation โ”‚ +โ”‚ โ€ข Trend detection โ”‚ +โ”‚ โ€ข Performance analysis โ”‚ +โ”‚ โ€ข Report generation โ”‚ +โ”‚ Handoff to: Docs (reports), Ops (alerts) โ”‚ +โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ +``` + +## Cost Structure + +### Local Development (Codespace) +``` +๐Ÿ’ฐ Cost: $0/month for models +โœ“ All inference runs locally via Ollama +โœ“ No API keys required +โœ“ Unlimited usage +โœ— Requires compute resources +``` + +### Cloud Fallback (Optional) +``` +๐Ÿ’ฐ Cost: Pay-per-use when local unavailable +โœ“ OpenAI: ~$0.15/1M tokens (GPT-4o-mini) +โœ“ Anthropic: ~$0.80/1M tokens (Claude Haiku) +โœ“ Only used when local models can't handle task +``` + +### Edge Deployment (Cloudflare) +``` +๐Ÿ’ฐ Cost: Free tier sufficient for most use +โœ“ 100,000 requests/day - Free +โœ“ 10ms CPU time - Free +โœ“ KV storage - 1GB free +โœ“ D1 database - 5M rows free +๐Ÿ“ˆ Scales: $0.50/million requests beyond free tier +``` + +## Technical Stack + +``` +โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” +โ”‚ Languages โ”‚ +โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค +โ”‚ โ€ข Python 3.11 - Agent orchestration โ”‚ +โ”‚ โ€ข JavaScript - Cloudflare Workers โ”‚ +โ”‚ โ€ข YAML - Configuration โ”‚ +โ”‚ โ€ข Bash - Setup scripts โ”‚ +โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + +โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” +โ”‚ AI/ML โ”‚ +โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค +โ”‚ โ€ข Ollama - Model hosting โ”‚ +โ”‚ โ€ข LangChain - Agent framework โ”‚ +โ”‚ โ€ข Transformers - Model utils โ”‚ +โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + +โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” +โ”‚ Infrastructure โ”‚ +โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค +โ”‚ โ€ข GitHub - Repository & Codespacesโ”‚ +โ”‚ โ€ข Cloudflare - Edge deployment โ”‚ +โ”‚ โ€ข Docker - Containerization โ”‚ +โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ +``` + +## Security Model + +``` +๐Ÿ”’ API Keys + โ”œโ”€ Stored as environment variables + โ”œโ”€ Never committed to git + โ””โ”€ Optional (only for cloud fallback) + +๐Ÿ”’ Agent Access + โ”œโ”€ Read-only by default + โ”œโ”€ Write requires explicit permission + โ””โ”€ Sandboxed execution + +๐Ÿ”’ Edge Deployment + โ”œโ”€ Secrets via Wrangler + โ”œโ”€ CORS configured + โ””โ”€ Rate limiting enabled +``` + +## Performance Characteristics + +| Metric | Local (Ollama) | Edge (Workers) | +|--------|----------------|----------------| +| **Latency** | 1-5s (first token) | 50-200ms (routing) | +| **Throughput** | 10-50 tokens/s | N/A (proxy) | +| **Concurrency** | 1-4 parallel | Unlimited | +| **Cost** | $0 | $0 (free tier) | +| **Scale** | Single machine | Global | + +## Development Workflow + +``` +1. Open Codespace + โ†“ +2. Models download (background) + โ†“ +3. Run quickstart.sh + โ†“ +4. Test agents locally + โ†“ +5. Develop features + โ†“ +6. Deploy to Cloudflare (optional) + โ†“ +7. Production ready! +``` + +--- + +*Built for collaboration. Designed for scale. Free to start.* diff --git a/codespace_agents/MODELS.md b/codespace_agents/MODELS.md new file mode 100644 index 0000000..02228d2 --- /dev/null +++ b/codespace_agents/MODELS.md @@ -0,0 +1,330 @@ +# Open Source AI Models for BlackRoad + +> **All models are 100% open source and commercially friendly** + +--- + +## Model Selection Criteria + +All models included meet these requirements: +- โœ… Open source with permissive licenses +- โœ… Approved for commercial use +- โœ… No usage restrictions +- โœ… Can run locally or via API +- โœ… Active development and community support + +--- + +## Available Models + +### Code Generation Models + +#### 1. **Qwen2.5-Coder** โญ Recommended for Code +- **License**: Apache 2.0 +- **Sizes**: 0.5B, 1.5B, 3B, 7B, 14B, 32B +- **Context**: Up to 128K tokens +- **Use Cases**: Code generation, completion, debugging +- **Commercial**: โœ… Fully approved +- **Why**: State-of-the-art coding performance, beats many proprietary models +- **Install**: `ollama pull qwen2.5-coder` + +#### 2. **DeepSeek-Coder** +- **License**: MIT +- **Sizes**: 1.3B, 6.7B, 33B +- **Context**: Up to 16K tokens +- **Use Cases**: Code completion, infilling, instruction following +- **Commercial**: โœ… Fully approved +- **Why**: Excellent code completion, trained on 2T tokens +- **Install**: `ollama pull deepseek-coder` + +#### 3. **CodeLlama** +- **License**: Meta Community (Commercial OK) +- **Sizes**: 7B, 13B, 34B, 70B +- **Context**: Up to 100K tokens +- **Use Cases**: Code generation, debugging, refactoring +- **Commercial**: โœ… Approved with conditions (review Meta license) +- **Why**: Meta-backed, widely used, excellent performance +- **Install**: `ollama pull codellama` + +### General Purpose Models + +#### 4. **Llama 3.2** โญ Recommended for General Tasks +- **License**: Meta Community (Commercial OK) +- **Sizes**: 1B, 3B +- **Context**: 128K tokens +- **Use Cases**: Text generation, chat, reasoning +- **Commercial**: โœ… Approved with conditions +- **Why**: Latest Llama, efficient, multilingual +- **Install**: `ollama pull llama3.2` + +#### 5. **Mistral 7B** +- **License**: Apache 2.0 +- **Size**: 7B +- **Context**: 32K tokens +- **Use Cases**: Instruction following, chat, reasoning +- **Commercial**: โœ… Fully approved +- **Why**: High quality, efficient, proven track record +- **Install**: `ollama pull mistral` + +#### 6. **Phi-3** +- **License**: MIT +- **Sizes**: 3.8B (mini), 7B (small), 14B (medium) +- **Context**: 128K tokens +- **Use Cases**: Reasoning, math, coding, analysis +- **Commercial**: โœ… Fully approved +- **Why**: Excellent reasoning, Microsoft-backed +- **Install**: `ollama pull phi3` + +#### 7. **Gemma 2** +- **License**: Gemma Terms (Commercial OK) +- **Sizes**: 2B, 9B, 27B +- **Context**: 8K tokens +- **Use Cases**: Text generation, chat, summarization +- **Commercial**: โœ… Approved (see Gemma terms) +- **Why**: Google-quality, efficient, well-optimized +- **Install**: `ollama pull gemma2` + +### Specialized Models + +#### 8. **Qwen2.5** +- **License**: Apache 2.0 +- **Sizes**: 0.5B to 72B +- **Context**: 128K tokens +- **Use Cases**: Multilingual tasks, reasoning, math +- **Commercial**: โœ… Fully approved +- **Install**: `ollama pull qwen2.5` + +#### 9. **Mixtral 8x7B** +- **License**: Apache 2.0 +- **Size**: 47B (8 experts ร— 7B) +- **Context**: 32K tokens +- **Use Cases**: Complex reasoning, multi-task +- **Commercial**: โœ… Fully approved +- **Why**: Mixture of Experts, excellent performance +- **Install**: `ollama pull mixtral` + +--- + +## Model Comparison + +| Model | Size | License | Commercial | Best For | Context | +|-------|------|---------|------------|----------|---------| +| **Qwen2.5-Coder** | 7B | Apache 2.0 | โœ… | Code generation | 128K | +| **DeepSeek-Coder** | 6.7B | MIT | โœ… | Code completion | 16K | +| **CodeLlama** | 7B-34B | Meta | โœ…* | Code, refactoring | 100K | +| **Llama 3.2** | 1B-3B | Meta | โœ…* | General chat | 128K | +| **Mistral** | 7B | Apache 2.0 | โœ… | Instructions | 32K | +| **Phi-3** | 3.8B | MIT | โœ… | Reasoning | 128K | +| **Gemma 2** | 2B-9B | Gemma | โœ…* | Efficiency | 8K | + +\* Review specific license terms for commercial use + +--- + +## Recommended Agent Assignments + +```yaml +coder_agent: + primary: qwen2.5-coder:7b + fallback: [deepseek-coder:6.7b, codellama:13b] + +designer_agent: + primary: llama3.2:3b + fallback: [gemma2:9b, mistral:7b] + +ops_agent: + primary: mistral:7b + fallback: [llama3.2:3b, phi3:mini] + +analyst_agent: + primary: phi3:medium + fallback: [llama3.2:3b, mistral:7b] + +docs_agent: + primary: gemma2:9b + fallback: [llama3.2:3b, mistral:7b] +``` + +--- + +## Local vs Cloud Strategy + +### Local First (Ollama) +- Use for: Development, prototyping, cost savings +- Models: All listed above via Ollama +- Hardware: CPU or GPU, 8GB+ RAM recommended +- Cost: $0 per request + +### Cloud Fallback +When local resources insufficient: +- **OpenAI**: GPT-4o-mini (~$0.15/1M tokens) +- **Anthropic**: Claude 3.5 Haiku (~$0.80/1M tokens) +- **Replicate**: Various models pay-per-use + +--- + +## Installation + +### Quick Install All Models +```bash +#!/bin/bash +# Install all BlackRoad agent models + +echo "Installing code models..." +ollama pull qwen2.5-coder:7b +ollama pull deepseek-coder:6.7b +ollama pull codellama:13b + +echo "Installing general models..." +ollama pull llama3.2:3b +ollama pull mistral:7b +ollama pull phi3:medium +ollama pull gemma2:9b + +echo "โœ… All models installed!" +ollama list +``` + +### Individual Install +```bash +# For coder agent +ollama pull qwen2.5-coder:7b + +# For designer agent +ollama pull llama3.2:3b + +# For ops agent +ollama pull mistral:7b + +# For analyst agent +ollama pull phi3:medium + +# For docs agent +ollama pull gemma2:9b +``` + +--- + +## Model Sizes & Requirements + +| Model | Disk Space | RAM Required | Speed | +|-------|------------|--------------|-------| +| Qwen2.5-Coder 7B | 4.7 GB | 8 GB | Fast | +| DeepSeek-Coder 6.7B | 3.8 GB | 8 GB | Fast | +| CodeLlama 13B | 7.3 GB | 16 GB | Medium | +| Llama 3.2 3B | 2.0 GB | 4 GB | Very Fast | +| Mistral 7B | 4.1 GB | 8 GB | Fast | +| Phi-3 Medium | 7.9 GB | 16 GB | Medium | +| Gemma 2 9B | 5.4 GB | 12 GB | Fast | + +**Total for all**: ~35 GB disk, recommend 32GB RAM for running multiple simultaneously + +--- + +## License Summary + +### Fully Permissive (No Restrictions) +- โœ… **Apache 2.0**: Qwen2.5, Mistral, Mixtral +- โœ… **MIT**: DeepSeek-Coder, Phi-3 + +### Permissive with Terms (Commercial OK) +- โœ… **Meta Community License**: Llama 3.2, CodeLlama + - Free for commercial use under 700M MAUs + - Most companies qualify + +- โœ… **Gemma Terms**: Gemma 2 + - Free for commercial use + - Attribution required + - Review terms at ai.google.dev/gemma/terms + +--- + +## Performance Benchmarks + +### Code Generation (HumanEval) +- Qwen2.5-Coder 7B: **88.9%** โญ +- DeepSeek-Coder 6.7B: 78.6% +- CodeLlama 13B: 35.1% + +### General Tasks (MMLU) +- Phi-3 Medium: 78.0% โญ +- Llama 3.2 3B: 63.0% +- Gemma 2 9B: 71.3% + +### Reasoning (GSM8K Math) +- Phi-3 Medium: 91.0% โญ +- Qwen2.5-Coder 7B: 83.5% +- Mistral 7B: 52.2% + +--- + +## Cloud Provider Options + +If you need cloud-hosted versions: + +### Replicate +- All models available via API +- Pay per request +- No setup required +- Example: `replicate.com/meta/llama-3.2` + +### Hugging Face Inference +- Free tier available +- Most models supported +- Easy integration + +### Together.ai +- Optimized inference +- Competitive pricing +- Good for production + +--- + +## Integration Example + +```python +import ollama + +# Local inference +response = ollama.chat( + model='qwen2.5-coder:7b', + messages=[{ + 'role': 'user', + 'content': 'Write a Python function to calculate fibonacci' + }] +) + +print(response['message']['content']) +``` + +--- + +## Updates & Maintenance + +Models are constantly improving. Update regularly: + +```bash +# Update all models +ollama pull qwen2.5-coder:7b +ollama pull llama3.2:3b +# ... etc + +# Check for updates +ollama list +``` + +--- + +## Additional Resources + +- **Ollama**: https://ollama.ai +- **Qwen**: https://github.com/QwenLM/Qwen2.5-Coder +- **DeepSeek**: https://github.com/deepseek-ai/DeepSeek-Coder +- **Llama**: https://llama.meta.com +- **Mistral**: https://mistral.ai +- **Phi**: https://huggingface.co/microsoft/Phi-3-medium-4k-instruct +- **Gemma**: https://ai.google.dev/gemma + +--- + +*100% open source. 0% vendor lock-in.* diff --git a/codespace_agents/README.md b/codespace_agents/README.md new file mode 100644 index 0000000..47cc07c --- /dev/null +++ b/codespace_agents/README.md @@ -0,0 +1,195 @@ +# BlackRoad AI Agents + +> **Collaborative AI agents for code, design, and operations** + +--- + +## Overview + +This directory contains configuration and code for BlackRoad's collaborative AI agents. These agents work together to handle coding tasks, design work, infrastructure management, and more. + +## Agent Architecture + +``` +โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” +โ”‚ AGENT MESH NETWORK โ”‚ +โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค +โ”‚ โ”‚ +โ”‚ โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”‚ +โ”‚ โ”‚ CODER โ”‚โ”€โ”€โ”€โ”‚ DESIGNERโ”‚โ”€โ”€โ”€โ”‚ OPS โ”‚ โ”‚ +โ”‚ โ”‚ AGENT โ”‚ โ”‚ AGENT โ”‚ โ”‚ AGENT โ”‚ โ”‚ +โ”‚ โ””โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”˜ โ”‚ +โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ +โ”‚ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ”‚ +โ”‚ โ”‚ โ”‚ +โ”‚ โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ” โ”‚ +โ”‚ โ”‚ ORCHESTRATORโ”‚ โ”‚ +โ”‚ โ”‚ (Router) โ”‚ โ”‚ +โ”‚ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ”‚ +โ”‚ โ”‚ โ”‚ +โ”‚ โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”‚ +โ”‚ โ–ผ โ–ผ โ–ผ โ”‚ +โ”‚ [Llama 3.2] [Mistral] [CodeLlama] โ”‚ +โ”‚ [Qwen2.5] [DeepSeek] [Phi-3] โ”‚ +โ”‚ โ”‚ +โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ +``` + +## Available Agents + +### 1. Coder Agent (`coder`) +- **Model**: CodeLlama, DeepSeek-Coder, Qwen2.5-Coder +- **Role**: Write, review, and refactor code +- **Capabilities**: + - Code generation and completion + - Bug fixes and debugging + - Code review and suggestions + - Documentation generation + - Test case creation + +### 2. Designer Agent (`designer`) +- **Model**: Llama 3.2, GPT-4 Vision +- **Role**: Design UI/UX, create assets +- **Capabilities**: + - UI component design + - Color palette generation + - Layout suggestions + - Accessibility checks + - Design system maintenance + +### 3. Ops Agent (`ops`) +- **Model**: Mistral, Llama 3.2 +- **Role**: Infrastructure and deployment +- **Capabilities**: + - DevOps automation + - CI/CD pipeline management + - Infrastructure as Code + - Monitoring and alerts + - Deployment strategies + +### 4. Analyst Agent (`analyst`) +- **Model**: Llama 3.2, Phi-3 +- **Role**: Data analysis and insights +- **Capabilities**: + - Data processing + - Metrics analysis + - Report generation + - Anomaly detection + - Predictive analytics + +### 5. Docs Agent (`docs`) +- **Model**: Gemma 2, Llama 3.2 +- **Role**: Documentation and content +- **Capabilities**: + - Technical documentation + - API documentation + - Tutorial creation + - README generation + - Knowledge base management + +## Open Source Models + +All agents use 100% open source, commercially-friendly AI models: + +| Model | Size | Use Case | License | +|-------|------|----------|---------| +| **Llama 3.2** | 3B, 1B | General purpose, chat | Meta (Commercial OK) | +| **CodeLlama** | 7B, 13B | Code generation | Meta (Commercial OK) | +| **Mistral** | 7B | Instruction following | Apache 2.0 | +| **Qwen2.5-Coder** | 7B | Code generation | Apache 2.0 | +| **DeepSeek-Coder** | 6.7B | Code completion | MIT | +| **Phi-3** | 3.8B | Reasoning, analysis | MIT | +| **Gemma 2** | 2B, 9B | Text generation | Gemma Terms (Commercial OK) | + +## Agent Communication + +Agents communicate via: +- **MCP (Model Context Protocol)**: For tool use and context sharing +- **WebSockets**: For real-time collaboration +- **Cloudflare KV**: For persistent state +- **Signals**: For event notifications + +## Quick Start + +### Start All Agents +```bash +python -m agents.orchestrator start +``` + +### Chat with Specific Agent +```bash +# Code-related task +python -m agents.chat --agent coder "Refactor this function" + +# Design task +python -m agents.chat --agent designer "Create a color palette" + +# Ops task +python -m agents.chat --agent ops "Deploy to production" +``` + +### Group Collaboration +```bash +# Start a collaborative session +python -m agents.collaborate \ + --agents coder,designer,ops \ + --task "Build a new dashboard feature" +``` + +## Configuration + +Each agent is configured in `codespace_agents/config/`: +- `coder.yaml` - Coder agent settings +- `designer.yaml` - Designer agent settings +- `ops.yaml` - Ops agent settings +- `analyst.yaml` - Analyst agent settings +- `docs.yaml` - Docs agent settings + +## Development + +### Adding a New Agent +1. Create configuration in `codespace_agents/config/new-agent.yaml` +2. Implement agent logic in `codespace_agents/new_agent.py` +3. Register in `codespace_agents/orchestrator.py` +4. Update this README + +### Testing Agents +```bash +# Test individual agent +python -m codespace_agents.test --agent coder + +# Test collaboration +python -m codespace_agents.test --scenario collaboration +``` + +## Integration with Cloudflare Workers + +Agents can be deployed as edge workers: +```bash +cd codespace_agents/workers +wrangler deploy coder-agent +wrangler deploy designer-agent +wrangler deploy ops-agent +``` + +## Signals + +Agents emit signals to the BlackRoad OS: +``` +๐Ÿค– AI โ†’ OS : agent_started, agent=coder +๐Ÿ’ฌ AI โ†’ OS : agent_response, agent=coder, task=complete +๐Ÿ”„ AI โ†’ OS : agent_collaboration, agents=[coder,designer] +๐Ÿ“Š AI โ†’ OS : agent_metrics, tokens=1234, cost=0.01 +``` + +## Architecture Notes + +- **Local First**: All models run via Ollama locally when possible +- **Cloud Fallback**: Falls back to OpenAI/Anthropic APIs if needed +- **Cost Tracking**: Every request is logged with cost/token usage +- **Parallel Execution**: Agents can work on different tasks simultaneously +- **State Management**: Shared context via MCP and Cloudflare KV + +--- + +*Agents that work together, build together.* diff --git a/codespace_agents/__init__.py b/codespace_agents/__init__.py new file mode 100644 index 0000000..cba8ea8 --- /dev/null +++ b/codespace_agents/__init__.py @@ -0,0 +1,11 @@ +""" +BlackRoad Codespace Agents + +Collaborative AI agents for code, design, operations, documentation, and analysis. +""" + +__version__ = "1.0.0" + +from .orchestrator import AgentOrchestrator + +__all__ = ["AgentOrchestrator"] diff --git a/codespace_agents/chat.py b/codespace_agents/chat.py new file mode 100644 index 0000000..da50069 --- /dev/null +++ b/codespace_agents/chat.py @@ -0,0 +1,142 @@ +""" +BlackRoad Agent Chat Interface + +Simple CLI for chatting with specific agents. +""" + +import asyncio +import argparse +import sys +from pathlib import Path + +# Add parent directory to path +sys.path.insert(0, str(Path(__file__).parent.parent)) + +from codespace_agents.orchestrator import AgentOrchestrator + + +class AgentChat: + """Interactive chat interface for agents""" + + def __init__(self, orchestrator: AgentOrchestrator): + self.orchestrator = orchestrator + + async def chat_with_agent(self, agent_id: str, message: str = None): + """Chat with a specific agent""" + agent = self.orchestrator.get_agent(agent_id) + + if not agent: + print(f"โŒ Agent not found: {agent_id}") + print(f"Available agents: {', '.join(self.orchestrator.list_agents())}") + return + + print(f"\n๐Ÿ’ฌ Chatting with {agent.name}") + print(f"Model: {agent.config['models']['primary']}") + print(f"Type 'exit' or 'quit' to end chat\n") + + # If message provided, use it and exit + if message: + print(f"You: {message}") + result = await self.orchestrator.execute_task(message, agent_id) + print(f"{agent.name}: {result.get('response', 'No response')}") + return + + # Interactive mode + while True: + try: + user_input = input("You: ").strip() + + if user_input.lower() in ["exit", "quit", "bye"]: + print(f"๐Ÿ‘‹ Goodbye from {agent.name}!") + break + + if not user_input: + continue + + result = await self.orchestrator.execute_task(user_input, agent_id) + print(f"{agent.name}: {result.get('response', 'No response')}\n") + + except KeyboardInterrupt: + print(f"\n๐Ÿ‘‹ Goodbye from {agent.name}!") + break + except Exception as e: + print(f"โŒ Error: {e}\n") + + +async def main(): + parser = argparse.ArgumentParser( + description="Chat with BlackRoad AI agents" + ) + parser.add_argument( + "--agent", + type=str, + help="Agent to chat with (coder, designer, ops, docs, analyst)" + ) + parser.add_argument( + "--list", + action="store_true", + help="List available agents" + ) + parser.add_argument( + "message", + nargs="*", + help="Message to send (interactive mode if not provided)" + ) + + args = parser.parse_args() + + # Initialize orchestrator + orchestrator = AgentOrchestrator() + + # List agents if requested + if args.list: + print("\n๐Ÿค– Available Agents:\n") + for agent_id in orchestrator.list_agents(): + agent = orchestrator.get_agent(agent_id) + print(f" {agent_id:12} - {agent.name:15} ({agent.config['models']['primary']})") + print(f" {agent.config['description']}") + print() + return + + # Determine agent + if not args.agent: + # If no agent specified, auto-route based on message + if args.message: + message = " ".join(args.message) + agent_id = orchestrator.route_task(message) + agent = orchestrator.get_agent(agent_id) + print(f"๐ŸŽฏ Auto-routing to: {agent.name}") + result = await orchestrator.execute_task(message, agent_id) + print(f"\n{agent.name}: {result.get('response', 'No response')}") + else: + # Interactive mode - let user choose + print("\n๐Ÿค– Available Agents:") + agents = orchestrator.list_agents() + for i, agent_id in enumerate(agents, 1): + agent = orchestrator.get_agent(agent_id) + print(f" {i}. {agent.name} - {agent.config['description']}") + + try: + choice = input("\nSelect agent (1-{}): ".format(len(agents))) + idx = int(choice) - 1 + if 0 <= idx < len(agents): + agent_id = agents[idx] + else: + print("Invalid choice") + return + except (ValueError, KeyboardInterrupt): + print("\nExiting...") + return + + chat = AgentChat(orchestrator) + await chat.chat_with_agent(agent_id) + return + + # Chat with specified agent + message = " ".join(args.message) if args.message else None + chat = AgentChat(orchestrator) + await chat.chat_with_agent(args.agent, message) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/codespace_agents/collaborate.py b/codespace_agents/collaborate.py new file mode 100644 index 0000000..af7e0c8 --- /dev/null +++ b/codespace_agents/collaborate.py @@ -0,0 +1,194 @@ +""" +BlackRoad Agent Collaboration + +Enables multiple agents to work together on complex tasks. +""" + +import asyncio +import argparse +import sys +from pathlib import Path +from typing import List +from datetime import datetime + +sys.path.insert(0, str(Path(__file__).parent.parent)) + +from codespace_agents.orchestrator import AgentOrchestrator + + +class CollaborativeSession: + """A collaborative coding/working session with multiple agents""" + + def __init__(self, orchestrator: AgentOrchestrator, agent_ids: List[str]): + self.orchestrator = orchestrator + self.agent_ids = agent_ids + self.session_log = [] + self.start_time = datetime.now() + + def log_message(self, agent_id: str, message: str): + """Log a message in the session""" + timestamp = datetime.now() + self.session_log.append({ + "timestamp": timestamp, + "agent": agent_id, + "message": message + }) + + async def broadcast_task(self, task: str): + """Broadcast a task to all agents in the session""" + print(f"\n๐Ÿ“ข Broadcasting task to all agents:") + print(f" {task}\n") + + results = [] + for agent_id in self.agent_ids: + agent = self.orchestrator.get_agent(agent_id) + if agent: + print(f"๐Ÿค– {agent.name} is processing...") + result = await self.orchestrator.execute_task(task, agent_id) + results.append(result) + self.log_message(agent_id, result.get("response", "")) + + return results + + async def sequential_handoff(self, task: str): + """ + Execute task with sequential agent handoffs. + Each agent passes work to the next. + """ + print(f"\n๐Ÿ”„ Sequential handoff for task:") + print(f" {task}\n") + + current_task = task + results = [] + + for i, agent_id in enumerate(self.agent_ids): + agent = self.orchestrator.get_agent(agent_id) + if not agent: + continue + + print(f"{'โ†’' * (i + 1)} {agent.name}") + + # Execute task + result = await self.orchestrator.execute_task(current_task, agent_id) + results.append(result) + self.log_message(agent_id, result.get("response", "")) + + # Check if this agent hands off to next + collaborators = self.orchestrator.get_collaborators(agent_id, current_task) + if collaborators and i < len(self.agent_ids) - 1: + next_agent_id = self.agent_ids[i + 1] + if next_agent_id in collaborators: + current_task = f"Continue from {agent.name}: {current_task}" + + return results + + async def chat_session(self): + """Interactive group chat with all agents""" + print(f"\n๐Ÿ’ฌ Group Chat Session Started") + print(f"Participants: {', '.join([self.orchestrator.get_agent(a).name for a in self.agent_ids if self.orchestrator.get_agent(a)])}") + print(f"Type 'exit' to end session\n") + + while True: + try: + user_input = input("You: ").strip() + + if user_input.lower() in ["exit", "quit", "bye"]: + self.print_summary() + break + + if not user_input: + continue + + # Route to most appropriate agent + agent_id = self.orchestrator.route_task(user_input) + + # But also get input from others if relevant + primary_agent = self.orchestrator.get_agent(agent_id) + result = await self.orchestrator.execute_task(user_input, agent_id) + + print(f"{primary_agent.name}: {result.get('response', 'No response')}") + self.log_message(agent_id, result.get("response", "")) + + # Check if other agents should chime in + collaborators = self.orchestrator.get_collaborators(agent_id, user_input) + for collab_id in collaborators: + if collab_id in self.agent_ids and collab_id != agent_id: + collab_agent = self.orchestrator.get_agent(collab_id) + print(f"{collab_agent.name}: [Would provide input here]") + + print() + + except KeyboardInterrupt: + self.print_summary() + break + except Exception as e: + print(f"โŒ Error: {e}\n") + + def print_summary(self): + """Print session summary""" + duration = datetime.now() - self.start_time + print(f"\n๐Ÿ“Š Session Summary") + print(f"Duration: {duration}") + print(f"Messages: {len(self.session_log)}") + print(f"Participants: {len(self.agent_ids)}") + print(f"\n๐Ÿ‘‹ Session ended") + + +async def main(): + parser = argparse.ArgumentParser( + description="Collaborative agent sessions" + ) + parser.add_argument( + "--agents", + type=str, + help="Comma-separated list of agents (e.g., coder,designer,ops)" + ) + parser.add_argument( + "--task", + type=str, + help="Task for agents to work on" + ) + parser.add_argument( + "--mode", + choices=["broadcast", "sequential", "chat"], + default="chat", + help="Collaboration mode" + ) + + args = parser.parse_args() + + orchestrator = AgentOrchestrator() + + # Determine agents + if args.agents: + agent_ids = [a.strip() for a in args.agents.split(",")] + else: + # Default to all agents + agent_ids = orchestrator.list_agents() + + # Validate agents exist + valid_agents = [] + for agent_id in agent_ids: + if orchestrator.get_agent(agent_id): + valid_agents.append(agent_id) + else: + print(f"โš ๏ธ Agent not found: {agent_id}") + + if not valid_agents: + print("โŒ No valid agents specified") + return + + # Create session + session = CollaborativeSession(orchestrator, valid_agents) + + # Execute based on mode + if args.mode == "broadcast" and args.task: + await session.broadcast_task(args.task) + elif args.mode == "sequential" and args.task: + await session.sequential_handoff(args.task) + else: + await session.chat_session() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/codespace_agents/config/analyst.yaml b/codespace_agents/config/analyst.yaml new file mode 100644 index 0000000..c7ea226 --- /dev/null +++ b/codespace_agents/config/analyst.yaml @@ -0,0 +1,118 @@ +# Analyst Agent Configuration + +name: "Analyst" +agent_id: "analyst" +version: "1.0.0" + +description: "Data analysis, metrics, and insights generation agent" + +models: + primary: "phi3:latest" + fallback: + - "llama3.2:latest" + - "mistral:latest" + + cloud_fallback: + - provider: "openai" + model: "gpt-4o" + - provider: "anthropic" + model: "claude-3-5-sonnet-20241022" + +capabilities: + - data_analysis + - metrics_calculation + - trend_detection + - anomaly_detection + - report_generation + - visualization_suggestions + - predictive_analytics + - performance_analysis + +analysis_tools: + - pandas + - numpy + - scipy + - scikit-learn + - matplotlib + - seaborn + +system_prompt: | + You are Analyst, a BlackRoad AI agent specialized in data analysis and insights. + + Your capabilities: + - Analyze data to extract meaningful insights + - Calculate key metrics and KPIs + - Detect trends and patterns in data + - Identify anomalies and outliers + - Generate comprehensive reports + - Suggest visualizations for data + - Perform statistical analysis + - Make data-driven recommendations + + Guidelines: + - Use statistical rigor in analysis + - Explain findings clearly and concisely + - Provide context for numbers and trends + - Suggest actionable recommendations + - Visualize data effectively + - Consider multiple interpretations + - Document analysis methodology + - Flag data quality issues + + You support all other agents: + - Coder: Analyze code performance metrics + - Ops: Monitor system metrics and alerts + - Designer: Analyze user engagement data + - Docs: Provide metrics for documentation impact + +temperature: 0.4 +max_tokens: 4096 +top_p: 0.9 + +context_window: 16384 + +tools: + - name: "query_metrics" + description: "Fetch metrics from sources" + - name: "calculate_stats" + description: "Perform statistical calculations" + - name: "detect_anomalies" + description: "Identify unusual patterns" + - name: "generate_report" + description: "Create analysis reports" + - name: "create_visualization" + description: "Generate charts and graphs" + +signals: + source: "AI" + target: "OS" + events: + - "analysis_complete" + - "anomaly_detected" + - "report_generated" + - "trend_identified" + - "threshold_exceeded" + +collaboration: + can_request_help_from: + - "coder" + - "ops" + - "docs" + + shares_context_with: + - "all" + + handoff_triggers: + - pattern: "implement|automate|code" + target_agent: "coder" + - pattern: "document|explain|report" + target_agent: "docs" + +rate_limits: + requests_per_minute: 30 + tokens_per_hour: 600000 + +cost_tracking: + enabled: true + budget_alert_threshold: 4.00 + currency: "USD" diff --git a/codespace_agents/config/coder.yaml b/codespace_agents/config/coder.yaml new file mode 100644 index 0000000..9164d62 --- /dev/null +++ b/codespace_agents/config/coder.yaml @@ -0,0 +1,145 @@ +# Coder Agent Configuration + +name: "Coder" +agent_id: "coder" +version: "1.0.0" + +description: "Expert code generation, review, and refactoring agent" + +models: + primary: "qwen2.5-coder:latest" + fallback: + - "deepseek-coder:latest" + - "codellama:latest" + + # Cloud fallback if local models unavailable + cloud_fallback: + - provider: "openai" + model: "gpt-4o-mini" + - provider: "anthropic" + model: "claude-3-5-haiku-20241022" + +capabilities: + - code_generation + - code_review + - refactoring + - bug_fixing + - test_generation + - documentation + - debugging + - optimization + +languages: + - python + - javascript + - typescript + - go + - rust + - java + - cpp + - html + - css + - sql + - bash + +system_prompt: | + You are Coder, a BlackRoad AI agent specialized in software development. + + Your capabilities: + - Write clean, efficient, well-documented code + - Review code for bugs, security issues, and best practices + - Refactor code to improve readability and performance + - Generate comprehensive test cases + - Debug complex issues systematically + - Explain code concepts clearly + + Guidelines: + - Always follow project coding standards + - Prioritize security and performance + - Write self-documenting code with clear variable names + - Include error handling and edge cases + - Suggest improvements when reviewing code + - Use modern language features appropriately + + Agent Communication: + You work collaboratively with other BlackRoad agents and can ask them questions directly: + - Designer: Ask about UI/UX design, color schemes, layouts, accessibility + - Ops: Ask about deployment, infrastructure, DevOps practices + - Docs: Ask about documentation standards, how to explain concepts + - Analyst: Ask about performance metrics, data analysis + + When you encounter a task that requires another agent's expertise, use the ask_agent + tool to consult with them. For example: + - "I need a color palette for this component" โ†’ Ask Designer + - "How should I deploy this?" โ†’ Ask Ops + - "What's the best way to document this API?" โ†’ Ask Docs + - "Is this function performing well?" โ†’ Ask Analyst + +temperature: 0.3 +max_tokens: 4096 +top_p: 0.95 + +context_window: 16384 + +# Tools available to this agent +tools: + - name: "execute_code" + description: "Run code in a sandbox" + - name: "read_file" + description: "Read file contents" + - name: "write_file" + description: "Write or modify files" + - name: "search_code" + description: "Search codebase" + - name: "run_tests" + description: "Execute test suite" + - name: "lint_code" + description: "Run linters and formatters" + - name: "git_operations" + description: "Git commands" + - name: "ask_agent" + description: "Ask another agent a question or request their expertise" + parameters: + - target_agent: "designer|ops|docs|analyst" + - question: "string" + - context: "optional dict" + +# Signal emissions +signals: + source: "AI" + target: "OS" + events: + - "code_generated" + - "code_reviewed" + - "tests_created" + - "bug_fixed" + - "refactoring_complete" + +# Collaboration settings +collaboration: + can_request_help_from: + - "designer" + - "ops" + - "docs" + + shares_context_with: + - "all" + + handoff_triggers: + - pattern: "design|ui|ux|style" + target_agent: "designer" + - pattern: "deploy|docker|kubernetes|ci/cd" + target_agent: "ops" + - pattern: "document|readme|tutorial" + target_agent: "docs" + +# Note: Rate limiting and cost tracking are configured but not yet implemented. +# These values are reserved for future functionality. +rate_limits: + requests_per_minute: 60 + tokens_per_hour: 1000000 + +cost_tracking: + enabled: false # Not yet implemented + budget_alert_threshold: 5.00 + currency: "USD" diff --git a/codespace_agents/config/designer.yaml b/codespace_agents/config/designer.yaml new file mode 100644 index 0000000..f05d6ea --- /dev/null +++ b/codespace_agents/config/designer.yaml @@ -0,0 +1,115 @@ +# Designer Agent Configuration + +name: "Designer" +agent_id: "designer" +version: "1.0.0" + +description: "UI/UX design and visual assets creation agent" + +models: + primary: "llama3.2:latest" + fallback: + - "gemma2:latest" + - "mistral:latest" + + cloud_fallback: + - provider: "openai" + model: "gpt-4o" + - provider: "anthropic" + model: "claude-3-5-sonnet-20241022" + +capabilities: + - ui_design + - ux_consultation + - color_palettes + - layout_design + - component_design + - accessibility_audit + - design_system + - asset_creation + +design_frameworks: + - tailwindcss + - material-ui + - chakra-ui + - bootstrap + - ant-design + +system_prompt: | + You are Designer, a BlackRoad AI agent specialized in UI/UX design. + + Your capabilities: + - Create beautiful, accessible user interfaces + - Design cohesive color palettes and themes + - Suggest optimal layouts and component structures + - Ensure accessibility standards (WCAG 2.1 AA) + - Maintain design system consistency + - Provide UX best practices and usability guidance + + Guidelines: + - Prioritize user experience and accessibility + - Follow design system guidelines + - Consider responsive design for all screen sizes + - Use semantic HTML and ARIA labels + - Suggest modern, clean aesthetics + - Balance beauty with functionality + + You work with Coder agent to implement designs. When you design a component, + provide clear specifications that Coder can implement. + +temperature: 0.7 +max_tokens: 4096 +top_p: 0.9 + +context_window: 8192 + +tools: + - name: "generate_color_palette" + description: "Create color schemes" + - name: "analyze_contrast" + description: "Check color contrast ratios" + - name: "suggest_layout" + description: "Recommend layout structures" + - name: "check_accessibility" + description: "Audit for WCAG compliance" + - name: "read_design_system" + description: "Access design tokens" + - name: "ask_agent" + description: "Ask another agent a question or request their expertise" + parameters: + - target_agent: "coder|docs|analyst" + - question: "string" + - context: "optional dict" + +signals: + source: "AI" + target: "OS" + events: + - "design_created" + - "palette_generated" + - "accessibility_checked" + - "component_designed" + +collaboration: + can_request_help_from: + - "coder" + - "docs" + + shares_context_with: + - "coder" + - "docs" + + handoff_triggers: + - pattern: "implement|code|function" + target_agent: "coder" + - pattern: "document|guide|tutorial" + target_agent: "docs" + +rate_limits: + requests_per_minute: 40 + tokens_per_hour: 500000 + +cost_tracking: + enabled: true + budget_alert_threshold: 3.00 + currency: "USD" diff --git a/codespace_agents/config/docs.yaml b/codespace_agents/config/docs.yaml new file mode 100644 index 0000000..1605ace --- /dev/null +++ b/codespace_agents/config/docs.yaml @@ -0,0 +1,117 @@ +# Docs Agent Configuration + +name: "Docs" +agent_id: "docs" +version: "1.0.0" + +description: "Technical documentation and content creation agent" + +models: + primary: "gemma2:latest" + fallback: + - "llama3.2:latest" + - "mistral:latest" + + cloud_fallback: + - provider: "anthropic" + model: "claude-3-5-sonnet-20241022" + - provider: "openai" + model: "gpt-4o" + +capabilities: + - technical_documentation + - api_documentation + - tutorial_creation + - readme_generation + - code_comments + - user_guides + - release_notes + - knowledge_base + +documentation_formats: + - markdown + - restructuredtext + - asciidoc + - openapi + - swagger + +system_prompt: | + You are Docs, a BlackRoad AI agent specialized in technical documentation. + + Your capabilities: + - Write clear, comprehensive technical documentation + - Create API documentation from code + - Develop tutorials and guides for users + - Generate README files for projects + - Write release notes and changelogs + - Maintain knowledge bases + - Create inline code documentation + - Translate technical concepts for different audiences + + Guidelines: + - Write for your audience (developers, users, stakeholders) + - Use clear, concise language + - Include practical examples and code snippets + - Structure content logically with clear headings + - Link to related documentation + - Keep documentation up-to-date with code + - Use proper markdown formatting + - Include diagrams where helpful + + You work closely with all agents: + - Coder: Document their code and APIs + - Designer: Create design system documentation + - Ops: Write deployment and infrastructure docs + - Analyst: Explain metrics and insights + +temperature: 0.6 +max_tokens: 4096 +top_p: 0.9 + +context_window: 16384 + +tools: + - name: "read_code" + description: "Analyze code for documentation" + - name: "generate_api_docs" + description: "Create API documentation" + - name: "create_diagrams" + description: "Generate mermaid diagrams" + - name: "check_links" + description: "Verify documentation links" + - name: "format_markdown" + description: "Format and lint markdown" + +signals: + source: "AI" + target: "OS" + events: + - "docs_created" + - "api_docs_generated" + - "tutorial_published" + - "readme_updated" + +collaboration: + can_request_help_from: + - "coder" + - "designer" + - "ops" + - "analyst" + + shares_context_with: + - "all" + + handoff_triggers: + - pattern: "code|implement|fix" + target_agent: "coder" + - pattern: "design|ui" + target_agent: "designer" + +rate_limits: + requests_per_minute: 40 + tokens_per_hour: 800000 + +cost_tracking: + enabled: true + budget_alert_threshold: 4.00 + currency: "USD" diff --git a/codespace_agents/config/ops.yaml b/codespace_agents/config/ops.yaml new file mode 100644 index 0000000..d38e7a8 --- /dev/null +++ b/codespace_agents/config/ops.yaml @@ -0,0 +1,125 @@ +# Ops Agent Configuration + +name: "Ops" +agent_id: "ops" +version: "1.0.0" + +description: "DevOps, infrastructure, and deployment automation agent" + +models: + primary: "mistral:latest" + fallback: + - "llama3.2:latest" + - "phi3:latest" + + cloud_fallback: + - provider: "anthropic" + model: "claude-3-5-haiku-20241022" + - provider: "openai" + model: "gpt-4o-mini" + +capabilities: + - infrastructure_management + - ci_cd_pipelines + - deployment_automation + - monitoring_setup + - security_configuration + - container_orchestration + - cloud_resource_management + - incident_response + +platforms: + - cloudflare + - github_actions + - docker + - kubernetes + - vercel + - railway + - aws + - digitalocean + +system_prompt: | + You are Ops, a BlackRoad AI agent specialized in DevOps and infrastructure. + + Your capabilities: + - Design and manage CI/CD pipelines + - Deploy applications to various platforms + - Configure infrastructure as code + - Set up monitoring and alerting + - Implement security best practices + - Optimize resource usage and costs + - Troubleshoot deployment issues + - Automate operational tasks + + Guidelines: + - Prioritize security and reliability + - Use infrastructure as code (IaC) principles + - Implement proper monitoring and logging + - Follow least privilege access principles + - Optimize for cost efficiency + - Document all infrastructure changes + - Plan for disaster recovery + - Use managed services when appropriate + + Key infrastructure: + - Cloudflare Workers for edge compute + - GitHub Actions for CI/CD + - Tailscale for private networking + - Pi cluster for local compute + + Coordinate with Coder for application code and Designer for frontend assets. + +temperature: 0.2 +max_tokens: 4096 +top_p: 0.9 + +context_window: 16384 + +tools: + - name: "deploy_worker" + description: "Deploy Cloudflare Worker" + - name: "run_workflow" + description: "Trigger GitHub Action" + - name: "check_health" + description: "Query service health" + - name: "view_logs" + description: "Access application logs" + - name: "manage_secrets" + description: "Handle secrets/env vars" + - name: "scale_resources" + description: "Adjust resource allocation" + - name: "setup_monitoring" + description: "Configure monitoring" + +signals: + source: "AI" + target: "OS" + events: + - "deployment_complete" + - "infrastructure_updated" + - "pipeline_configured" + - "health_check_passed" + - "incident_resolved" + +collaboration: + can_request_help_from: + - "coder" + - "analyst" + + shares_context_with: + - "all" + + handoff_triggers: + - pattern: "code|fix|implement" + target_agent: "coder" + - pattern: "analyze|metrics|performance" + target_agent: "analyst" + +rate_limits: + requests_per_minute: 30 + tokens_per_hour: 500000 + +cost_tracking: + enabled: true + budget_alert_threshold: 5.00 + currency: "USD" diff --git a/codespace_agents/examples.py b/codespace_agents/examples.py new file mode 100644 index 0000000..1174eea --- /dev/null +++ b/codespace_agents/examples.py @@ -0,0 +1,181 @@ +#!/usr/bin/env python3 +""" +Example: Building a feature with collaborative agents + +This example demonstrates how multiple agents work together to build, +document, and deploy a new feature. +""" + +import asyncio +import sys +from pathlib import Path + +# Add parent to path +sys.path.insert(0, str(Path(__file__).parent.parent)) + +from codespace_agents.orchestrator import AgentOrchestrator + + +async def example_feature_development(): + """ + Example: Build a REST API endpoint with multiple agents collaborating + """ + print("=" * 60) + print("Example: Building a REST API Feature") + print("=" * 60) + print() + + orchestrator = AgentOrchestrator() + + # Phase 1: Design (Designer Agent) + print("๐Ÿ“ Phase 1: Design") + print("-" * 60) + design_task = "Design an API endpoint for user authentication with JWT tokens" + result = await orchestrator.execute_task(design_task, "designer") + print(f"โœ“ {result['agent']}: {result['response']}") + print() + + # Phase 2: Implementation (Coder Agent) + print("๐Ÿ’ป Phase 2: Implementation") + print("-" * 60) + code_task = "Implement the authentication API with FastAPI and JWT tokens" + result = await orchestrator.execute_task(code_task, "coder") + print(f"โœ“ {result['agent']}: {result['response']}") + print() + + # Phase 3: Documentation (Docs Agent) + print("๐Ÿ“ Phase 3: Documentation") + print("-" * 60) + docs_task = "Create API documentation for the authentication endpoint" + result = await orchestrator.execute_task(docs_task, "docs") + print(f"โœ“ {result['agent']}: {result['response']}") + print() + + # Phase 4: Deployment (Ops Agent) + print("๐Ÿš€ Phase 4: Deployment") + print("-" * 60) + deploy_task = "Deploy the authentication API to Cloudflare Workers" + result = await orchestrator.execute_task(deploy_task, "ops") + print(f"โœ“ {result['agent']}: {result['response']}") + print() + + # Phase 5: Analytics (Analyst Agent) + print("๐Ÿ“Š Phase 5: Analytics") + print("-" * 60) + metrics_task = "Set up monitoring for the authentication API" + result = await orchestrator.execute_task(metrics_task, "analyst") + print(f"โœ“ {result['agent']}: {result['response']}") + print() + + print("=" * 60) + print("โœจ Feature Complete!") + print("All agents collaborated successfully") + print("=" * 60) + + +async def example_bug_fix(): + """ + Example: Fix a bug with agent collaboration + """ + print("\n\n") + print("=" * 60) + print("Example: Bug Fix Workflow") + print("=" * 60) + print() + + orchestrator = AgentOrchestrator() + + # Step 1: Analyze + print("๐Ÿ” Step 1: Analyze the issue") + print("-" * 60) + analyze_task = "Why is the login endpoint returning 500 errors?" + result = await orchestrator.execute_task(analyze_task, "analyst") + print(f"โœ“ {result['agent']}: {result['response']}") + print() + + # Step 2: Fix + print("๐Ÿ”ง Step 2: Fix the code") + print("-" * 60) + fix_task = "Fix the authentication token validation logic" + result = await orchestrator.execute_task(fix_task, "coder") + print(f"โœ“ {result['agent']}: {result['response']}") + print() + + # Step 3: Update docs + print("๐Ÿ“ Step 3: Update documentation") + print("-" * 60) + docs_task = "Update changelog with bug fix details" + result = await orchestrator.execute_task(docs_task, "docs") + print(f"โœ“ {result['agent']}: {result['response']}") + print() + + print("=" * 60) + print("โœ… Bug Fixed!") + print("=" * 60) + + +async def example_auto_routing(): + """ + Example: Let the orchestrator automatically route tasks + """ + print("\n\n") + print("=" * 60) + print("Example: Automatic Task Routing") + print("=" * 60) + print() + + orchestrator = AgentOrchestrator() + + tasks = [ + "Create a color palette for a dashboard", + "Write unit tests for the user service", + "Set up CI/CD pipeline for the project", + "Analyze user engagement metrics", + "Write a tutorial on API authentication", + ] + + for task in tasks: + agent_id = orchestrator.route_task(task) + agent = orchestrator.get_agent(agent_id) + print(f"๐Ÿ“‹ Task: {task}") + print(f" โ†’ Routed to: {agent.name} ({agent_id})") + print() + + +async def main(): + """Run all examples""" + print("\n") + print("๐Ÿค– BlackRoad Agent Collaboration Examples") + print("=" * 60) + print() + print("This demonstrates how agents work together on real tasks.") + print() + + try: + # Example 1: Feature development + await example_feature_development() + + # Example 2: Bug fix + await example_bug_fix() + + # Example 3: Auto-routing + await example_auto_routing() + + print("\n") + print("=" * 60) + print("Examples Complete!") + print() + print("Try it yourself:") + print(" python -m codespace_agents.chat --agent coder") + print(" python -m codespace_agents.collaborate") + print("=" * 60) + print() + + except KeyboardInterrupt: + print("\n\n๐Ÿ‘‹ Examples interrupted") + except Exception as e: + print(f"\nโŒ Error: {e}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/codespace_agents/orchestrator.py b/codespace_agents/orchestrator.py new file mode 100644 index 0000000..91ca9ff --- /dev/null +++ b/codespace_agents/orchestrator.py @@ -0,0 +1,473 @@ +""" +BlackRoad Agent Orchestrator + +Coordinates multiple AI agents working together on tasks. +""" + +import asyncio +import yaml +from pathlib import Path +from typing import Dict, List, Optional +from dataclasses import dataclass, field +from enum import Enum +from datetime import datetime +import uuid + + +class AgentStatus(Enum): + IDLE = "idle" + WORKING = "working" + WAITING = "waiting" + ERROR = "error" + + +@dataclass +class AgentMessage: + """Message sent between agents""" + message_id: str + from_agent: str + to_agent: str + content: str + timestamp: datetime + conversation_id: Optional[str] = None + reply_to: Optional[str] = None + message_type: str = "question" # question, answer, notification, request + + +@dataclass +class Agent: + """Represents an AI agent""" + agent_id: str + name: str + config: Dict + status: AgentStatus = AgentStatus.IDLE + current_task: Optional[str] = None + message_inbox: List[AgentMessage] = field(default_factory=list) + conversation_history: Dict[str, List[AgentMessage]] = field(default_factory=dict) + + +class AgentOrchestrator: + """ + Orchestrates multiple AI agents working together. + + Features: + - Load agent configurations + - Route tasks to appropriate agents + - Enable agent collaboration + - Track agent status and metrics + - Facilitate agent-to-agent communication + """ + + def __init__(self, config_dir: Optional[str] = None): + if config_dir is None: + # Default to a 'config' directory located alongside this module + self.config_dir = Path(__file__).parent / "config" + else: + self.config_dir = Path(config_dir) + self.agents: Dict[str, Agent] = {} + self.conversations: Dict[str, List[AgentMessage]] = {} + self.message_log: List[AgentMessage] = [] + self.load_agents() + + def load_agents(self): + """Load all agent configurations""" + if not self.config_dir.exists(): + print(f"โš ๏ธ Config directory not found: {self.config_dir}") + return + + for config_file in self.config_dir.glob("*.yaml"): + try: + with open(config_file) as f: + config = yaml.safe_load(f) + + agent_id = config["agent_id"] + agent = Agent( + agent_id=agent_id, + name=config["name"], + config=config + ) + self.agents[agent_id] = agent + print(f"โœ… Loaded agent: {agent.name} ({agent_id})") + + except Exception as e: + print(f"โŒ Failed to load {config_file}: {e}") + + def get_agent(self, agent_id: str) -> Optional[Agent]: + """Get an agent by ID""" + return self.agents.get(agent_id) + + def list_agents(self) -> List[str]: + """List all available agents""" + return list(self.agents.keys()) + + def route_task(self, task: str) -> str: + """ + Route a task to the most appropriate agent. + + Uses keyword matching to determine which agent should handle the task. + """ + task_lower = task.lower() + + # Coder keywords + if any(kw in task_lower for kw in [ + "code", "function", "class", "bug", "fix", "refactor", + "implement", "debug", "test", "python", "javascript" + ]): + return "coder" + + # Designer keywords + if any(kw in task_lower for kw in [ + "design", "ui", "ux", "color", "palette", "layout", + "component", "style", "css", "accessibility" + ]): + return "designer" + + # Ops keywords + if any(kw in task_lower for kw in [ + "deploy", "docker", "kubernetes", "ci/cd", "pipeline", + "infrastructure", "server", "cloud", "monitoring" + ]): + return "ops" + + # Docs keywords + if any(kw in task_lower for kw in [ + "document", "readme", "tutorial", "guide", "api doc", + "documentation", "explain", "write", "changelog" + ]): + return "docs" + + # Analyst keywords + if any(kw in task_lower for kw in [ + "analyze", "metrics", "data", "statistics", "report", + "trend", "anomaly", "performance", "insights" + ]): + return "analyst" + + # Default to coder for general tasks + return "coder" + + def get_collaborators(self, agent_id: str, task: str) -> List[str]: + """ + Determine which other agents should collaborate on a task. + """ + agent = self.get_agent(agent_id) + if not agent: + return [] + + collaborators = [] + + # Check handoff triggers in agent config + if "collaboration" in agent.config: + handoff_triggers = agent.config["collaboration"].get("handoff_triggers", []) + + for trigger in handoff_triggers: + pattern = trigger.get("pattern", "") + target = trigger.get("target_agent") + + if pattern and target and pattern.lower() in task.lower(): + if target not in collaborators: + collaborators.append(target) + + return collaborators + + async def send_message( + self, + from_agent_id: str, + to_agent_id: str, + content: str, + message_type: str = "question", + conversation_id: Optional[str] = None, + reply_to: Optional[str] = None + ) -> AgentMessage: + """ + Send a message from one agent to another. + + Args: + from_agent_id: ID of the sending agent + to_agent_id: ID of the receiving agent + content: Message content + message_type: Type of message (question, answer, notification, request) + conversation_id: Optional conversation thread ID + reply_to: Optional ID of message being replied to + + Returns: + AgentMessage object + """ + from_agent = self.get_agent(from_agent_id) + to_agent = self.get_agent(to_agent_id) + + if not from_agent or not to_agent: + raise ValueError(f"Invalid agent IDs: {from_agent_id} or {to_agent_id}") + + # Create conversation ID if not provided + if not conversation_id: + conversation_id = f"{from_agent_id}-{to_agent_id}-{uuid.uuid4().hex[:8]}" + + # Create message + message = AgentMessage( + message_id=uuid.uuid4().hex, + from_agent=from_agent_id, + to_agent=to_agent_id, + content=content, + timestamp=datetime.now(), + conversation_id=conversation_id, + reply_to=reply_to, + message_type=message_type + ) + + # Add to recipient's inbox + to_agent.message_inbox.append(message) + + # Update conversation history for both agents + if conversation_id not in from_agent.conversation_history: + from_agent.conversation_history[conversation_id] = [] + if conversation_id not in to_agent.conversation_history: + to_agent.conversation_history[conversation_id] = [] + + from_agent.conversation_history[conversation_id].append(message) + to_agent.conversation_history[conversation_id].append(message) + + # Track globally + if conversation_id not in self.conversations: + self.conversations[conversation_id] = [] + self.conversations[conversation_id].append(message) + self.message_log.append(message) + + print(f"๐Ÿ’ฌ {from_agent.name} โ†’ {to_agent.name}: {content[:50]}...") + + return message + + async def ask_agent( + self, + asking_agent_id: str, + target_agent_id: str, + question: str, + context: Optional[Dict] = None + ) -> Dict: + """ + Have one agent ask another agent a question. + + Args: + asking_agent_id: ID of the agent asking + target_agent_id: ID of the agent being asked + question: The question to ask + context: Optional context about the question + + Returns: + Response from the target agent + """ + asking_agent = self.get_agent(asking_agent_id) + target_agent = self.get_agent(target_agent_id) + + if not asking_agent or not target_agent: + return { + "success": False, + "error": "Invalid agent IDs" + } + + print(f"\n๐Ÿค” {asking_agent.name} asks {target_agent.name}:") + print(f" Q: {question}") + + # Send question message + question_msg = await self.send_message( + from_agent_id=asking_agent_id, + to_agent_id=target_agent_id, + content=question, + message_type="question" + ) + + # Have target agent process the question + # Prepare enriched question with context + enriched_question = question + if context: + context_str = "\n".join([f"{k}: {v}" for k, v in context.items()]) + enriched_question = f"{question}\n\nContext:\n{context_str}" + + # Target agent processes the question + response = await self.execute_task(enriched_question, target_agent_id) + + # Send answer back + answer_msg = await self.send_message( + from_agent_id=target_agent_id, + to_agent_id=asking_agent_id, + content=response.get("response", ""), + message_type="answer", + conversation_id=question_msg.conversation_id, + reply_to=question_msg.message_id + ) + + print(f" A: {response.get('response', '')[:80]}...") + + return { + "success": True, + "question": question, + "answer": response.get("response", ""), + "conversation_id": question_msg.conversation_id, + "question_message": question_msg, + "answer_message": answer_msg, + "target_agent": target_agent.name + } + + def get_conversation(self, conversation_id: str) -> List[AgentMessage]: + """Get all messages in a conversation""" + return self.conversations.get(conversation_id, []) + + def get_agent_conversations(self, agent_id: str) -> Dict[str, List[AgentMessage]]: + """Get all conversations for an agent""" + agent = self.get_agent(agent_id) + if not agent: + return {} + return agent.conversation_history + + def get_agent_inbox(self, agent_id: str) -> List[AgentMessage]: + """Get unread messages for an agent""" + agent = self.get_agent(agent_id) + if not agent: + return [] + return agent.message_inbox + + def clear_agent_inbox(self, agent_id: str): + """Clear an agent's inbox""" + agent = self.get_agent(agent_id) + if agent: + agent.message_inbox.clear() + + async def execute_task( + self, + task: str, + agent_id: Optional[str] = None, + requesting_agent_id: Optional[str] = None + ) -> Dict: + """ + Execute a task using the appropriate agent(s). + + Args: + task: The task to execute + agent_id: Optional specific agent to use + requesting_agent_id: Optional ID of agent making the request + """ + # Route to agent if not specified + if not agent_id: + agent_id = self.route_task(task) + + agent = self.get_agent(agent_id) + if not agent: + return { + "success": False, + "error": f"Agent not found: {agent_id}" + } + + # Check for collaborators + collaborators = self.get_collaborators(agent_id, task) + + # Show who is working + if requesting_agent_id: + requesting_agent = self.get_agent(requesting_agent_id) + req_name = requesting_agent.name if requesting_agent else requesting_agent_id + print(f"๐Ÿค– {agent.name} (requested by {req_name}): {task[:60]}...") + else: + print(f"๐Ÿค– {agent.name} is working on: {task}") + + if collaborators: + collab_names = [self.agents[c].name for c in collaborators if c in self.agents] + print(f"๐Ÿค Collaborating with: {', '.join(collab_names)}") + + # Update agent status + agent.status = AgentStatus.WORKING + agent.current_task = task + + # TODO: Implement actual model inference + # This requires integration with Ollama API or other model providers. + # Example implementation: + # - Use ollama.chat() to call local models + # - Use OpenAI/Anthropic APIs as fallback + # - Parse model response and return structured data + # For now, returning mock response for demonstration + + # Build response mentioning agent capabilities + response_parts = [f"[{agent.name} - Mock Response] Task received and processed."] + + # Add note about agent-to-agent communication + if requesting_agent_id: + response_parts.append(f"Working on request from {requesting_agent_id}.") + + # Mention if consulting other agents + can_ask = agent.config.get("collaboration", {}).get("can_request_help_from", []) + if can_ask and any(trigger in task.lower() for trigger in ["help", "ask", "consult", "check"]): + response_parts.append(f"I can consult with {', '.join(can_ask)} if needed.") + + result = { + "success": True, + "agent": agent.name, + "agent_id": agent_id, + "task": task, + "collaborators": collaborators, + "can_request_help_from": can_ask, + "response": " ".join(response_parts), + "model": agent.config["models"]["primary"], + "requesting_agent": requesting_agent_id + } + + # Reset status + agent.status = AgentStatus.IDLE + agent.current_task = None + + return result + + def get_status(self) -> Dict: + """Get status of all agents""" + return { + "total_agents": len(self.agents), + "total_conversations": len(self.conversations), + "total_messages": len(self.message_log), + "agents": { + agent_id: { + "name": agent.name, + "status": agent.status.value, + "current_task": agent.current_task, + "unread_messages": len(agent.message_inbox), + "active_conversations": len(agent.conversation_history) + } + for agent_id, agent in self.agents.items() + } + } + + +async def main(): + """Example usage""" + orchestrator = AgentOrchestrator() + + print("\n๐Ÿ“Š Agent Status:") + status = orchestrator.get_status() + print(f"Total Agents: {status['total_agents']}") + + print("\n๐ŸŽฏ Available Agents:") + for agent_id in orchestrator.list_agents(): + agent = orchestrator.get_agent(agent_id) + print(f" - {agent.name} ({agent_id})") + + # Test task routing + print("\n๐Ÿงช Testing Task Routing:") + test_tasks = [ + "Write a Python function to calculate fibonacci", + "Design a color palette for a dashboard", + "Deploy the app to Cloudflare Workers", + "Create API documentation for the router", + "Analyze user engagement metrics" + ] + + for task in test_tasks: + agent_id = orchestrator.route_task(task) + agent = orchestrator.get_agent(agent_id) + print(f" '{task[:50]}...' โ†’ {agent.name}") + + # Test task execution + print("\n๐Ÿš€ Executing Task:") + result = await orchestrator.execute_task( + "Refactor the API router and update its documentation" + ) + print(f"Result: {result}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/codespace_agents/workers/README.md b/codespace_agents/workers/README.md new file mode 100644 index 0000000..ce67101 --- /dev/null +++ b/codespace_agents/workers/README.md @@ -0,0 +1,287 @@ +# Deploying Agents to Cloudflare Workers + +This directory contains Cloudflare Worker implementations of the BlackRoad agents. + +## Overview + +Each agent can be deployed as an edge worker for global, low-latency access: + +- **agent-router.js** - Routes requests to appropriate agents +- **coder-agent.js** - Code generation/review agent +- More agents can be added following the same pattern + +## Prerequisites + +1. **Cloudflare Account**: Sign up at https://cloudflare.com +2. **Wrangler CLI**: Already installed in the codespace +3. **Login**: Run `wrangler login` to authenticate + +## Setup + +### 1. Login to Cloudflare + +```bash +wrangler login +``` + +This opens a browser to authorize wrangler with your Cloudflare account. + +### 2. Create KV Namespace + +```bash +# Create KV for agent state +wrangler kv:namespace create "AGENT_KV" + +# Copy the ID and update wrangler.toml +``` + +### 3. Create D1 Database (optional) + +```bash +# Create D1 database for collaboration tracking +wrangler d1 create blackroad-agents + +# Copy the database_id and update wrangler.toml +``` + +### 4. Set Secrets (optional) + +For cloud model fallback: + +```bash +# OpenAI API key (optional) +wrangler secret put OPENAI_API_KEY + +# Anthropic API key (optional) +wrangler secret put ANTHROPIC_API_KEY + +# Ollama API URL (if running on separate server) +wrangler secret put OLLAMA_API_URL +``` + +## Deploy + +### Deploy Router + +```bash +wrangler deploy agent-router.js --name agent-router +``` + +### Deploy Coder Agent + +```bash +wrangler deploy coder-agent.js --name coder-agent +``` + +### Deploy All + +```bash +# Deploy everything +for worker in *.js; do + name=$(basename "$worker" .js) + wrangler deploy "$worker" --name "$name" +done +``` + +## Configuration + +Edit `wrangler.toml` to customize: + +```toml +name = "agent-router" +main = "agent-router.js" +compatibility_date = "2024-01-27" + +# KV namespace for state +[[kv_namespaces]] +binding = "AGENT_KV" +id = "YOUR_KV_ID" # Replace with your KV ID + +# D1 database (optional) +[[d1_databases]] +binding = "AGENT_DB" +database_name = "blackroad-agents" +database_id = "YOUR_D1_ID" # Replace with your D1 ID +``` + +## Usage + +### Health Check + +```bash +curl https://agent-router.YOUR-SUBDOMAIN.workers.dev/health +``` + +### Ask a Question + +```bash +curl -X POST https://agent-router.YOUR-SUBDOMAIN.workers.dev/ask \ + -H "Content-Type: application/json" \ + -d '{ + "task": "Write a Python function to reverse a string" + }' +``` + +The router will automatically select the appropriate agent. + +### Specify Agent + +```bash +curl -X POST https://agent-router.YOUR-SUBDOMAIN.workers.dev/ask \ + -H "Content-Type: application/json" \ + -d '{ + "task": "Design a color palette", + "agent": "designer" + }' +``` + +### List Agents + +```bash +curl https://agent-router.YOUR-SUBDOMAIN.workers.dev/agents +``` + +## Architecture + +``` +โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” +โ”‚ Cloudflare Edge Network โ”‚ +โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค +โ”‚ โ”‚ +โ”‚ โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”‚ +โ”‚ โ”‚ agent-router.js โ”‚ โ”‚ +โ”‚ โ”‚ (Main entry point) โ”‚ โ”‚ +โ”‚ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ”‚ +โ”‚ โ”‚ โ”‚ +โ”‚ โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ” โ”‚ +โ”‚ โ–ผ โ–ผ โ–ผ โ”‚ +โ”‚ โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ” โ”‚ +โ”‚ โ”‚Coder โ”‚ โ”‚Designโ”‚ โ”‚ Ops โ”‚ โ”‚ +โ”‚ โ”‚Agent โ”‚ โ”‚Agent โ”‚ โ”‚Agent โ”‚ โ”‚ +โ”‚ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ”‚ +โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ +โ”‚ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ”‚ +โ”‚ โ–ผ โ”‚ +โ”‚ โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”‚ +โ”‚ โ”‚ KV โ”‚ (State) โ”‚ +โ”‚ โ”‚ D1 โ”‚ (History) โ”‚ +โ”‚ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ”‚ +โ”‚ โ”‚ +โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ +``` + +## Adding New Agents + +1. **Create worker file**: + ```javascript + // designer-agent.js + export default { + async fetch(request, env, ctx) { + // Agent logic here + } + } + ``` + +2. **Add to router**: + ```javascript + // agent-router.js + const AGENT_URLS = { + designer: 'https://designer-agent.YOUR.workers.dev', + // ... + } + ``` + +3. **Deploy**: + ```bash + wrangler deploy designer-agent.js --name designer-agent + ``` + +## Local Development + +Test workers locally before deploying: + +```bash +# Run locally +wrangler dev agent-router.js + +# Test +curl http://localhost:8787/health +``` + +## Monitoring + +View logs in Cloudflare dashboard: +1. Go to https://dash.cloudflare.com +2. Select "Workers & Pages" +3. Click on your worker +4. View "Logs" tab + +Or stream logs with wrangler: + +```bash +wrangler tail agent-router +``` + +## Cost + +Cloudflare Workers free tier: +- **100,000 requests/day** - Free +- **10ms CPU time per request** - Free +- Additional usage: $0.50 per million requests + +For most use cases, this stays free! + +## Troubleshooting + +### "No such namespace" + +Create KV namespace: +```bash +wrangler kv:namespace create "AGENT_KV" +``` + +### "Authorization failed" + +Re-login: +```bash +wrangler logout +wrangler login +``` + +### "Module not found" + +Check that worker file exists and is specified in command: +```bash +wrangler deploy agent-router.js --name agent-router +``` + +## Custom Domains + +Connect a custom domain: + +```bash +# Add route in wrangler.toml +routes = [ + { pattern = "agents.yourdomain.com/*", zone_name = "yourdomain.com" } +] +``` + +## Security + +1. **Use secrets** for API keys (never commit keys!) +2. **Enable rate limiting** in production +3. **Add CORS headers** as needed +4. **Validate inputs** in all endpoints +5. **Use environment variables** for configuration + +## Resources + +- [Cloudflare Workers Docs](https://developers.cloudflare.com/workers/) +- [Wrangler CLI Docs](https://developers.cloudflare.com/workers/wrangler/) +- [Workers Examples](https://developers.cloudflare.com/workers/examples/) +- [KV Storage](https://developers.cloudflare.com/workers/runtime-apis/kv/) +- [D1 Database](https://developers.cloudflare.com/d1/) + +--- + +*Deploy globally in seconds. Scale to millions. $0 to start.* diff --git a/codespace_agents/workers/agent-router.js b/codespace_agents/workers/agent-router.js new file mode 100644 index 0000000..14e7561 --- /dev/null +++ b/codespace_agents/workers/agent-router.js @@ -0,0 +1,143 @@ +/** + * BlackRoad Agent Router - Cloudflare Worker + * + * Routes requests to appropriate agent workers. + */ + +const AGENT_URLS = { + coder: 'https://coder-agent.blackroad.workers.dev', + designer: 'https://designer-agent.blackroad.workers.dev', + ops: 'https://ops-agent.blackroad.workers.dev', + docs: 'https://docs-agent.blackroad.workers.dev', + analyst: 'https://analyst-agent.blackroad.workers.dev', +}; + +export default { + async fetch(request, env, ctx) { + if (request.method === 'OPTIONS') { + return new Response(null, { + headers: { + 'Access-Control-Allow-Origin': '*', + 'Access-Control-Allow-Methods': 'GET, POST, OPTIONS', + 'Access-Control-Allow-Headers': 'Content-Type', + }, + }); + } + + const url = new URL(request.url); + + // Health check + if (url.pathname === '/health') { + return Response.json({ + service: 'agent-router', + status: 'healthy', + agents: Object.keys(AGENT_URLS), + timestamp: new Date().toISOString(), + }); + } + + // Route to specific agent + if (url.pathname === '/ask' && request.method === 'POST') { + try { + const body = await request.json(); + const { task, agent } = body; + + if (!task) { + return Response.json({ error: 'Task is required' }, { status: 400 }); + } + + // Auto-route if agent not specified + const targetAgent = agent || routeTask(task); + const agentUrl = AGENT_URLS[targetAgent]; + + if (!agentUrl) { + return Response.json( + { error: `Unknown agent: ${targetAgent}` }, + { status: 400 } + ); + } + + // Forward to agent + const response = await fetch(`${agentUrl}/ask`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify({ task }), + }); + + const result = await response.json(); + + // Add routing metadata + result.routed_by = 'agent-router'; + result.selected_agent = targetAgent; + + return Response.json(result, { + headers: { + 'Access-Control-Allow-Origin': '*', + }, + }); + + } catch (error) { + return Response.json( + { error: error.message }, + { status: 500 } + ); + } + } + + // List available agents + if (url.pathname === '/agents') { + return Response.json({ + agents: Object.keys(AGENT_URLS).map(id => ({ + id, + url: AGENT_URLS[id], + })), + }, { + headers: { + 'Access-Control-Allow-Origin': '*', + }, + }); + } + + return Response.json( + { error: 'Not found' }, + { status: 404 } + ); + }, +}; + +/** + * Route task to appropriate agent based on keywords + */ +function routeTask(task) { + const lower = task.toLowerCase(); + + // Coder + if (/code|function|class|bug|fix|refactor|implement|debug|test|python|javascript/.test(lower)) { + return 'coder'; + } + + // Designer + if (/design|ui|ux|color|palette|layout|component|style|css|accessibility/.test(lower)) { + return 'designer'; + } + + // Ops + if (/deploy|docker|kubernetes|ci\/cd|pipeline|infrastructure|server|cloud|monitoring/.test(lower)) { + return 'ops'; + } + + // Docs + if (/document|readme|tutorial|guide|api doc|documentation|explain|write|changelog/.test(lower)) { + return 'docs'; + } + + // Analyst + if (/analyze|metrics|data|statistics|report|trend|anomaly|performance|insights/.test(lower)) { + return 'analyst'; + } + + // Default + return 'coder'; +} diff --git a/codespace_agents/workers/coder-agent.js b/codespace_agents/workers/coder-agent.js new file mode 100644 index 0000000..66577a5 --- /dev/null +++ b/codespace_agents/workers/coder-agent.js @@ -0,0 +1,111 @@ +/** + * BlackRoad Coder Agent - Cloudflare Worker + * + * Edge-deployed coder agent for code generation and review. + */ + +export default { + async fetch(request, env, ctx) { + // Handle CORS + if (request.method === 'OPTIONS') { + return new Response(null, { + headers: { + 'Access-Control-Allow-Origin': '*', + 'Access-Control-Allow-Methods': 'GET, POST, OPTIONS', + 'Access-Control-Allow-Headers': 'Content-Type', + }, + }); + } + + const url = new URL(request.url); + + // Health check + if (url.pathname === '/health') { + return Response.json({ + agent: 'coder', + status: 'healthy', + model: 'qwen2.5-coder', + timestamp: new Date().toISOString(), + }); + } + + // Main endpoint + if (url.pathname === '/ask' && request.method === 'POST') { + try { + const body = await request.json(); + const { task } = body; + + if (!task) { + return Response.json({ error: 'Task is required' }, { status: 400 }); + } + + // TODO: Implement actual model inference by integrating with: + // - Ollama API running on a backend server + // - Cloudflare Workers AI + // - OpenAI/Anthropic APIs + // For now, return mock response + const response = { + agent: 'coder', + task, + response: `[Mock Response] I would help you with: ${task}. Note: Actual model inference not yet implemented.`, + model: 'qwen2.5-coder:latest', + timestamp: new Date().toISOString(), + // In production, would include: + // - Code generation + // - Code review + // - Test cases + // - Documentation + }; + + // Store in KV for history (optional) + if (env.AGENT_KV) { + const key = `coder:${Date.now()}`; + await env.AGENT_KV.put(key, JSON.stringify(response), { + expirationTtl: 86400, // 24 hours + }); + } + + return Response.json(response, { + headers: { + 'Access-Control-Allow-Origin': '*', + }, + }); + + } catch (error) { + return Response.json( + { error: error.message }, + { status: 500 } + ); + } + } + + // List recent tasks + if (url.pathname === '/history' && env.AGENT_KV) { + try { + const list = await env.AGENT_KV.list({ prefix: 'coder:' }); + const keys = list.keys.slice(0, 10); // Last 10 + + const history = []; + for (const { name } of keys) { + const value = await env.AGENT_KV.get(name); + if (value) { + history.push(JSON.parse(value)); + } + } + + return Response.json({ history }, { + headers: { + 'Access-Control-Allow-Origin': '*', + }, + }); + } catch (error) { + return Response.json({ error: error.message }, { status: 500 }); + } + } + + return Response.json( + { error: 'Not found' }, + { status: 404 } + ); + }, +}; diff --git a/codespace_agents/workers/wrangler.toml b/codespace_agents/workers/wrangler.toml new file mode 100644 index 0000000..f88c4ad --- /dev/null +++ b/codespace_agents/workers/wrangler.toml @@ -0,0 +1,27 @@ +name = "agent-router" +main = "agent-router.js" +compatibility_date = "2025-01-01" + +# KV namespace for agent state +[[kv_namespaces]] +binding = "AGENT_KV" +# NOTE: Replace this placeholder with your actual KV namespace ID before deploying. +# Create with: wrangler kv:namespace create "AGENT_KV" +id = "YOUR_KV_NAMESPACE_ID" + +# D1 database for collaboration tracking +[[d1_databases]] +binding = "AGENT_DB" +database_name = "blackroad-agents" +# NOTE: Replace this placeholder with your actual D1 database ID before deploying. +# Create with: wrangler d1 create blackroad-agents +database_id = "YOUR_D1_DATABASE_ID" + +# Environment variables +[vars] +ENVIRONMENT = "production" + +# Secrets (set via wrangler secret put) +# OPENAI_API_KEY +# ANTHROPIC_API_KEY +# OLLAMA_API_URL diff --git a/quickstart.sh b/quickstart.sh new file mode 100755 index 0000000..9404e3d --- /dev/null +++ b/quickstart.sh @@ -0,0 +1,98 @@ +#!/bin/bash +# +# BlackRoad Agent Quick Start +# Run this after opening the codespace to verify everything works +# + +set -e + +echo "๐Ÿค– BlackRoad Agent Quick Start" +echo "================================" +echo "" + +# Check Python +echo "โœ“ Checking Python..." +python --version + +# Check Ollama +echo "โœ“ Checking Ollama..." +if command -v ollama &> /dev/null; then + echo " Ollama installed" + + # Start Ollama if not running + if ! pgrep -x "ollama" > /dev/null; then + echo " Starting Ollama..." + ollama serve > /tmp/ollama.log 2>&1 & + + # Wait for Ollama to be ready with retry + echo " Waiting for Ollama to start..." + for i in {1..10}; do + sleep 2 + if ollama list >/dev/null 2>&1; then + echo " Ollama is ready!" + break + fi + if [ $i -eq 10 ]; then + echo " โš ๏ธ Ollama may still be starting. Check /tmp/ollama.log if issues occur." + fi + done + fi + + # List available models + echo " Available models:" + if ollama list 2>/dev/null | tail -n +2 | head -10; then + : + else + echo " (No models installed yet - check /tmp/blackroad/logs/ollama_model_pull.log)" + fi +else + echo " โš ๏ธ Ollama not installed yet. Run .devcontainer/setup.sh" +fi + +# Check Wrangler +echo "โœ“ Checking Wrangler (Cloudflare CLI)..." +if command -v wrangler &> /dev/null; then + wrangler --version +else + echo " โš ๏ธ Wrangler not installed" +fi + +# Test agent orchestrator +echo "" +echo "โœ“ Testing Agent Orchestrator..." +python -m codespace_agents.orchestrator > /tmp/agent-test.log 2>&1 +if [ $? -eq 0 ]; then + echo " All agents loaded successfully!" + echo "" + echo " Available agents:" + grep "Loaded agent:" /tmp/agent-test.log | sed 's/.*Loaded/ -/' +else + echo " โš ๏ธ Agent orchestrator test failed" + cat /tmp/agent-test.log +fi + +# Show next steps +echo "" +echo "================================" +echo "โœจ Setup Complete!" +echo "" +echo "Next steps:" +echo "" +echo " 1. Chat with an agent:" +echo " python -m codespace_agents.chat --agent coder 'Write a hello world function'" +echo "" +echo " 2. Try the examples:" +echo " python -m codespace_agents.examples" +echo "" +echo " 3. Start a collaborative session:" +echo " python -m codespace_agents.collaborate" +echo "" +echo " 4. Deploy to Cloudflare:" +echo " cd codespace_agents/workers && wrangler deploy" +echo "" +echo "๐Ÿ“š Documentation:" +echo " - Getting Started: CODESPACE_GUIDE.md" +echo " - Agent Docs: codespace_agents/README.md" +echo " - Models: codespace_agents/MODELS.md" +echo "" +echo "================================"