From 48bc81e831a5fbef87c01412d01f54c80d8b90ea Mon Sep 17 00:00:00 2001 From: SentienceDEV Date: Wed, 11 Mar 2026 20:00:49 -0700 Subject: [PATCH] readl examples with v1/execute to read files --- examples/file-processor-demo/.env.example | 41 + examples/file-processor-demo/.gitignore | 33 + examples/file-processor-demo/Dockerfile | 34 + .../file-processor-demo/Dockerfile.sidecar | 38 + examples/file-processor-demo/README.md | 358 +++++++ .../file-processor-demo/docker-compose.yml | 79 ++ examples/file-processor-demo/package.json | 24 + examples/file-processor-demo/policy.yaml | 260 +++++ examples/file-processor-demo/run-demo.sh | 173 ++++ .../src/file-processor-agent.ts | 891 ++++++++++++++++++ examples/file-processor-demo/tsconfig.json | 18 + .../workspace/archive/.gitkeep | 1 + .../workspace/input/sales_north.json | 7 + .../workspace/input/sales_south.json | 6 + .../workspace/input/sales_west.json | 8 + .../workspace/output/.gitkeep | 1 + 16 files changed, 1972 insertions(+) create mode 100644 examples/file-processor-demo/.env.example create mode 100644 examples/file-processor-demo/.gitignore create mode 100644 examples/file-processor-demo/Dockerfile create mode 100644 examples/file-processor-demo/Dockerfile.sidecar create mode 100644 examples/file-processor-demo/README.md create mode 100644 examples/file-processor-demo/docker-compose.yml create mode 100644 examples/file-processor-demo/package.json create mode 100644 examples/file-processor-demo/policy.yaml create mode 100755 examples/file-processor-demo/run-demo.sh create mode 100644 examples/file-processor-demo/src/file-processor-agent.ts create mode 100644 examples/file-processor-demo/tsconfig.json create mode 100644 examples/file-processor-demo/workspace/archive/.gitkeep create mode 100644 examples/file-processor-demo/workspace/input/sales_north.json create mode 100644 examples/file-processor-demo/workspace/input/sales_south.json create mode 100644 examples/file-processor-demo/workspace/input/sales_west.json create mode 100644 examples/file-processor-demo/workspace/output/.gitkeep diff --git a/examples/file-processor-demo/.env.example b/examples/file-processor-demo/.env.example new file mode 100644 index 0000000..6b8ea46 --- /dev/null +++ b/examples/file-processor-demo/.env.example @@ -0,0 +1,41 @@ +# File Processor Demo - Environment Configuration +# Copy this file to .env and fill in your values + +# ============================================================================== +# LLM Provider Configuration (choose ONE) +# ============================================================================== + +# Option 1: Anthropic Claude (recommended) +ANTHROPIC_API_KEY=your-anthropic-api-key-here +ANTHROPIC_MODEL=claude-sonnet-4-20250514 + +# Option 2: OpenAI +# OPENAI_API_KEY=your-openai-api-key-here +# OPENAI_MODEL=gpt-4o + +# Option 3: Local LLM (Ollama or LM Studio) +# LOCAL_LLM_BASE_URL=http://host.docker.internal:11434/v1 +# LOCAL_LLM_MODEL=llama3.2 + +# Force specific provider (optional - auto-detects based on keys if not set) +# LLM_PROVIDER=anthropic # or: openai, local, ollama, lmstudio + +# ============================================================================== +# Sidecar Configuration +# ============================================================================== + +# Sidecar URL (defaults to docker service name) +PREDICATE_SIDECAR_URL=http://predicate-sidecar:8787 + +# Agent principal identity +SECURECLAW_PRINCIPAL=agent:file-processor + +# ============================================================================== +# Optional Settings +# ============================================================================== + +# Enable verbose logging +SECURECLAW_VERBOSE=true + +# Cloud tracing (optional) +# PREDICATE_API_KEY=your-predicate-api-key-here diff --git a/examples/file-processor-demo/.gitignore b/examples/file-processor-demo/.gitignore new file mode 100644 index 0000000..e9ad50c --- /dev/null +++ b/examples/file-processor-demo/.gitignore @@ -0,0 +1,33 @@ +# Environment files (contain secrets) +.env +.env.local +.env.*.local + +# Dependencies +node_modules/ + +# Build output +dist/ + +# TypeScript cache +*.tsbuildinfo + +# Logs +*.log +npm-debug.log* + +# IDE +.idea/ +.vscode/ +*.swp +*.swo + +# OS files +.DS_Store +Thumbs.db + +# Workspace output (generated files) +workspace/output/* +workspace/archive/* +!workspace/output/.gitkeep +!workspace/archive/.gitkeep diff --git a/examples/file-processor-demo/Dockerfile b/examples/file-processor-demo/Dockerfile new file mode 100644 index 0000000..5d2050c --- /dev/null +++ b/examples/file-processor-demo/Dockerfile @@ -0,0 +1,34 @@ +# ============================================================================ +# File Processor Agent - Dockerfile +# ============================================================================ +# +# Builds the file processor agent with zero filesystem privileges. +# All file operations go through the sidecar's /v1/execute endpoint. +# +# ============================================================================ + +FROM node:20-slim + +WORKDIR /app + +# Install dependencies +RUN apt-get update && apt-get install -y \ + curl \ + && rm -rf /var/lib/apt/lists/* + +# Copy package files +COPY package.json tsconfig.json ./ +COPY src ./src + +# Install npm dependencies +RUN npm install + +# Build TypeScript +RUN npm run build + +# Create non-root user (agent runs with minimal privileges) +RUN useradd -m -s /bin/bash agent +USER agent + +# Entry point +CMD ["node", "dist/file-processor-agent.js"] diff --git a/examples/file-processor-demo/Dockerfile.sidecar b/examples/file-processor-demo/Dockerfile.sidecar new file mode 100644 index 0000000..72c5243 --- /dev/null +++ b/examples/file-processor-demo/Dockerfile.sidecar @@ -0,0 +1,38 @@ +# Predicate Authority Sidecar +# +# Uses Ubuntu 24.04 LTS which has GLIBC 2.39 (required by the sidecar binary). +# Downloads the binary from GitHub releases - cached in Docker layers. + +FROM ubuntu:24.04 + +# Install curl for downloading binary and health checks +RUN apt-get update && apt-get install -y curl ca-certificates && rm -rf /var/lib/apt/lists/* + +WORKDIR /app + +# Detect architecture and download appropriate binary +# This layer is cached after first build +ARG TARGETARCH +RUN ARCH=$(echo ${TARGETARCH:-$(uname -m)} | sed 's/amd64/x64/' | sed 's/x86_64/x64/' | sed 's/aarch64/arm64/') && \ + echo "Detected architecture: $ARCH" && \ + curl -fsSL -o /tmp/sidecar.tar.gz \ + "https://github.com/PredicateSystems/predicate-authority-sidecar/releases/download/v0.6.7/predicate-authorityd-linux-${ARCH}.tar.gz" && \ + tar -xzf /tmp/sidecar.tar.gz -C /usr/local/bin && \ + chmod +x /usr/local/bin/predicate-authorityd && \ + rm /tmp/sidecar.tar.gz + +# Copy policy file (at end for better caching) +COPY policy.yaml /app/policy.yaml + +EXPOSE 8787 + +# Run sidecar with delegation enabled for /v1/execute support +# The --enable-delegation flag enables mandate issuance AND mandate store +CMD ["predicate-authorityd", \ + "--host", "0.0.0.0", \ + "--port", "8787", \ + "--mode", "local_only", \ + "--policy-file", "/app/policy.yaml", \ + "--log-level", "info", \ + "--enable-delegation", \ + "run"] diff --git a/examples/file-processor-demo/README.md b/examples/file-processor-demo/README.md new file mode 100644 index 0000000..ab5a103 --- /dev/null +++ b/examples/file-processor-demo/README.md @@ -0,0 +1,358 @@ +# Zero-Trust File Processor Agent + +A demonstration of **true zero-trust AI agent execution** using the sidecar's `/v1/execute` endpoint. Unlike authorize-only mode, the agent has **zero ambient filesystem privileges** - ALL file operations are executed by the sidecar. + +## Architecture: Execute Mode vs Authorize-Only Mode + +``` +┌─────────────────────────────────────────────────────────────────────────┐ +│ EXECUTE MODE (This Demo) │ +│ │ +│ ┌───────────────┐ ┌─────────────────────────────────────────────┐ │ +│ │ Agent │───▶│ Sidecar /v1/execute │ │ +│ │ (No FS │ │ │ │ +│ │ Privileges) │ │ 1. Validate mandate │ │ +│ │ │◀───│ 2. Check resource matches authorized │ │ +│ │ Only sends │ │ 3. EXECUTE the operation │ │ +│ │ intents │ │ 4. Return result with evidence hash │ │ +│ └───────────────┘ └─────────────────────────────────────────────┘ │ +│ │ +│ TRUST BOUNDARY: Agent cannot access filesystem directly │ +│ GUARANTEE: Sidecar ensures authorized resource = executed resource │ +└─────────────────────────────────────────────────────────────────────────┘ + +┌─────────────────────────────────────────────────────────────────────────┐ +│ AUTHORIZE-ONLY MODE (market-research-agent) │ +│ │ +│ ┌───────────────┐ ┌─────────────────┐ ┌───────────────────────┐ │ +│ │ Agent │───▶│ /authorize │ │ Local Execution │ │ +│ │ (Has FS │ │ (policy check) │ │ fs.writeFileSync() │ │ +│ │ Privileges) │ └────────┬────────┘ └───────────┬───────────┘ │ +│ │ │ │ │ │ +│ │ │ ALLOW/DENY Agent executes │ +│ │ │ │ (must be trusted) │ +│ └───────────────┴─────────────┴─────────────────────────┘ │ +│ │ +│ TRUST BOUNDARY: Agent must be trusted to respect policy decisions │ +│ GAP: No proof agent executed only what was authorized │ +└─────────────────────────────────────────────────────────────────────────┘ +``` + +## What This Demo Does + +The File Processor Agent demonstrates a realistic data pipeline: + +1. **List input directory** - Find files to process +2. **Read each file** - Load JSON data +3. **Transform data** - Aggregate, filter, enrich (using LLM) +4. **Write output** - Save processed results +5. **Archive originals** - Move processed files to archive +6. **Generate report** - Create summary with shell command +7. **Attempt unauthorized access** - Demonstrate policy denial + +### Demo Scenario: Sales Data Aggregator + +``` +/workspace/input/ → Read JSON sales records + │ + ▼ + [LLM Processing] → Aggregate by region, compute totals + │ + ▼ +/workspace/output/ → Write aggregated results + │ + ▼ +/workspace/archive/ → Archive processed files + │ + ▼ +/workspace/output/report.txt → Shell: Generate summary report +``` + +## Quick Start + +```bash +# 1. Set environment variables (choose ONE LLM provider) + +# Option A: Anthropic Claude (recommended) +export ANTHROPIC_API_KEY="sk-ant-..." + +# Option B: OpenAI +export OPENAI_API_KEY="sk-..." + +# Option C: Local LLM (Ollama or LM Studio) +export LOCAL_LLM_BASE_URL="http://localhost:11434/v1" +export LOCAL_LLM_MODEL="llama3.2" + +# Optional: Cloud tracing +export PREDICATE_API_KEY="sk_pro_..." + +# 2. Run the demo +./run-demo.sh +``` + +## LLM Provider Support + +The agent supports multiple LLM providers with automatic detection: + +| Provider | Required Environment Variables | Notes | +|----------|-------------------------------|-------| +| **Anthropic** | `ANTHROPIC_API_KEY` | Default: claude-sonnet-4-20250514 | +| **OpenAI** | `OPENAI_API_KEY` | Default: gpt-4o | +| **Local (Ollama)** | `LOCAL_LLM_BASE_URL` | Default: http://localhost:11434/v1 | +| **Local (LM Studio)** | `LOCAL_LLM_BASE_URL` | Set to http://localhost:1234/v1 | + +### Explicit Provider Selection + +You can force a specific provider using `LLM_PROVIDER`: + +```bash +# Force Anthropic even if OpenAI key is also set +export LLM_PROVIDER=anthropic + +# Force local LLM +export LLM_PROVIDER=local +# or +export LLM_PROVIDER=ollama +``` + +### Running Without LLM + +The agent works without an LLM - it will skip the AI-enhanced analysis and use simple aggregation only: + +```bash +# No LLM keys set - agent still processes files +./run-demo.sh +``` + +## Running with Local LLM (Ollama) + +For fully offline operation, you can run the demo with a local LLM using Ollama: + +### Step 1: Install and Start Ollama + +```bash +# macOS +brew install ollama + +# Linux +curl -fsSL https://ollama.com/install.sh | sh + +# Start Ollama service +ollama serve +``` + +### Step 2: Pull a Model + +```bash +# Recommended: Llama 3.2 (smaller, faster) +ollama pull llama3.2 + +# Alternative: Mistral (good for analysis tasks) +ollama pull mistral + +# Alternative: Qwen 2.5 (multilingual) +ollama pull qwen2.5 +``` + +### Step 3: Run the Demo + +```bash +# Set local LLM configuration +export LOCAL_LLM_BASE_URL="http://host.docker.internal:11434/v1" +export LOCAL_LLM_MODEL="llama3.2" +export LLM_PROVIDER="local" + +# Run the demo +./run-demo.sh +``` + +> **Note**: Use `host.docker.internal` instead of `localhost` because the agent runs inside Docker and needs to reach Ollama on the host machine. + +### Using LM Studio Instead + +If you prefer LM Studio: + +```bash +# 1. Download and install LM Studio from https://lmstudio.ai +# 2. Download a model (e.g., Llama 3.2, Mistral) +# 3. Start the local server in LM Studio (default port 1234) + +export LOCAL_LLM_BASE_URL="http://host.docker.internal:1234/v1" +export LOCAL_LLM_MODEL="local-model" # LM Studio uses generic model name +export LLM_PROVIDER="local" + +./run-demo.sh +``` + +### Troubleshooting Local LLM + +| Issue | Solution | +|-------|----------| +| Connection refused | Ensure Ollama is running: `ollama serve` | +| Model not found | Pull the model first: `ollama pull llama3.2` | +| Slow responses | Use a smaller model or increase resources | +| Docker can't reach host | Use `host.docker.internal` (macOS/Windows) or `172.17.0.1` (Linux) | + +## Sample Output + +``` +══════════════════════════════════════════════════════════════════════ +║ FILE PROCESSOR AGENT - Zero-Trust Execute Mode Demo +══════════════════════════════════════════════════════════════════════ + +[Step 1] Listing input directory +┌──────────────────────────────────────────────────────────────┐ +│ EXECUTE: fs.list │ +│ Resource: /workspace/input │ +│ Mode: /v1/execute (sidecar executes) │ +└──────────────────────────────────────────────────────────────┘ + ✓ Found 3 files: sales_north.json, sales_south.json, sales_west.json + +[Step 2] Reading input files +┌──────────────────────────────────────────────────────────────┐ +│ EXECUTE: fs.read │ +│ Resource: /workspace/input/sales_north.json │ +│ Mode: /v1/execute (sidecar executes) │ +└──────────────────────────────────────────────────────────────┘ + ✓ Read 1,234 bytes (hash: a1b2c3...) + +[Step 3] Processing with LLM + → Aggregating sales by region... + ✓ Computed: North=$45,230, South=$38,100, West=$52,890 + +[Step 4] Writing output +┌──────────────────────────────────────────────────────────────┐ +│ EXECUTE: fs.write │ +│ Resource: /workspace/output/aggregated_sales.json │ +│ Mode: /v1/execute (sidecar executes) │ +└──────────────────────────────────────────────────────────────┘ + ✓ Wrote 456 bytes (hash: d4e5f6...) + +[Step 5] Archiving processed files +┌──────────────────────────────────────────────────────────────┐ +│ EXECUTE: fs.write │ +│ Resource: /workspace/archive/sales_north.json │ +│ Mode: /v1/execute (sidecar executes) │ +└──────────────────────────────────────────────────────────────┘ + ✓ Archived 3 files + +[Step 6] Generating report +┌──────────────────────────────────────────────────────────────┐ +│ EXECUTE: cli.exec │ +│ Resource: wc -l /workspace/output/*.json │ +│ Mode: /v1/execute (sidecar executes) │ +└──────────────────────────────────────────────────────────────┘ + ✓ Report generated: 42 lines total + +[Step 7] Attempting unauthorized access +┌──────────────────────────────────────────────────────────────┐ +│ EXECUTE: fs.read │ +│ Resource: /etc/passwd │ +│ Mode: /v1/execute (sidecar executes) │ +│ │ +│ ✗ DENIED: resource_mismatch │ +│ Mandate authorized: /workspace/input/* │ +│ Requested resource: /etc/passwd │ +└──────────────────────────────────────────────────────────────┘ + ✓ BLOCKED: Zero-trust enforcement working correctly + +══════════════════════════════════════════════════════════════════════ +║ AGENT COMPLETED - All operations executed via sidecar +══════════════════════════════════════════════════════════════════════ +``` + +## Key Differences from Authorize-Only Demo + +| Aspect | This Demo (Execute) | market-research (Authorize-Only) | +|--------|---------------------|----------------------------------| +| Endpoint | `/v1/execute` | `/authorize` | +| Who executes | Sidecar | Agent (local) | +| Agent FS access | None | Full | +| Resource verification | Cryptographic | Trust-based | +| Evidence hash | ✓ Returned by sidecar | Must compute locally | +| Confused deputy attack | Prevented | Possible | + +## Policy Configuration + +```yaml +rules: + # Allow reading from input directory + - name: allow-input-read + effect: allow + principals: ["agent:file-processor"] + actions: ["fs.read", "fs.list"] + resources: ["/workspace/input/*"] + + # Allow writing to output directory + - name: allow-output-write + effect: allow + principals: ["agent:file-processor"] + actions: ["fs.write"] + resources: ["/workspace/output/*"] + + # Allow archiving (write to archive) + - name: allow-archive-write + effect: allow + principals: ["agent:file-processor"] + actions: ["fs.write"] + resources: ["/workspace/archive/*"] + + # Allow safe shell commands + - name: allow-safe-shell + effect: allow + principals: ["agent:file-processor"] + actions: ["cli.exec"] + resources: + - "wc *" + - "ls *" + - "date" + - "cat /workspace/output/*" + + # Default deny + - name: default-deny + effect: deny + principals: ["*"] + actions: ["*"] + resources: ["*"] +``` + +## File Structure + +``` +file-processor-demo/ +├── README.md # This file +├── run-demo.sh # Entry point +├── docker-compose.yml # Container orchestration +├── Dockerfile # Agent container +├── policy.yaml # Authorization rules +├── src/ +│ └── file-processor-agent.ts # Main agent (uses /v1/execute) +└── workspace/ + ├── input/ # Source files + │ ├── sales_north.json + │ ├── sales_south.json + │ └── sales_west.json + ├── output/ # Processed results + └── archive/ # Archived originals +``` + +## Security Properties + +1. **No Ambient Authority**: Agent process cannot access filesystem directly +2. **Resource Binding**: Sidecar verifies requested resource matches mandate +3. **Cryptographic Evidence**: Every operation returns content hash +4. **Audit Trail**: All executions logged with mandate ID +5. **Fail Closed**: If sidecar unavailable, operations fail (not bypass) + +## Requirements + +- Docker and Docker Compose +- LLM API Key (one of the following - all optional, agent works without LLM): + - `ANTHROPIC_API_KEY` - Claude API key + - `OPENAI_API_KEY` - OpenAI API key + - `LOCAL_LLM_BASE_URL` - Ollama/LM Studio endpoint +- `PREDICATE_API_KEY` - Cloud tracing key (optional) + +--- + +*Built with OpenClaw + Predicate Authority for Zero-Trust AI Agent execution.* diff --git a/examples/file-processor-demo/docker-compose.yml b/examples/file-processor-demo/docker-compose.yml new file mode 100644 index 0000000..b867878 --- /dev/null +++ b/examples/file-processor-demo/docker-compose.yml @@ -0,0 +1,79 @@ +# ============================================================================ +# File Processor Demo - Docker Compose +# ============================================================================ +# +# Demonstrates zero-trust file processing with /v1/execute endpoint. +# +# Components: +# 1. predicate-sidecar: Rust sidecar that executes operations +# 2. file-processor-agent: TypeScript agent with zero FS privileges +# +# The agent container is configured WITHOUT filesystem mounts to its +# workspace - all file operations go through the sidecar. +# +# ============================================================================ + +version: "3.8" + +services: + # ========================================================================== + # Predicate Authority Sidecar + # Downloads binary from GitHub releases via Dockerfile.sidecar + # ========================================================================== + predicate-sidecar: + build: + context: . + dockerfile: Dockerfile.sidecar + container_name: predicate-sidecar + ports: + - "8787:8787" + volumes: + # Workspace - sidecar has access, agent doesn't + - ./workspace:/workspace + environment: + - RUST_LOG=info + # Signing key for mandate issuance (required for /v1/execute) + - LOCAL_IDP_SIGNING_KEY=demo-secret-key-replace-in-production-minimum-32-chars + healthcheck: + test: ["CMD-SHELL", "curl -sf http://localhost:8787/health || exit 1"] + interval: 2s + timeout: 5s + retries: 15 + start_period: 5s + networks: + - predicate-net + + # ========================================================================== + # File Processor Agent + # ========================================================================== + file-processor-agent: + build: + context: . + dockerfile: Dockerfile + container_name: file-processor-agent + depends_on: + predicate-sidecar: + condition: service_healthy + environment: + - PREDICATE_SIDECAR_URL=http://predicate-sidecar:8787 + - SECURECLAW_PRINCIPAL=agent:file-processor + # LLM Provider selection (auto-detects if not set) + - LLM_PROVIDER=${LLM_PROVIDER:-} + # Anthropic Claude + - ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:-} + - ANTHROPIC_MODEL=${ANTHROPIC_MODEL:-claude-sonnet-4-20250514} + # OpenAI + - OPENAI_API_KEY=${OPENAI_API_KEY:-} + - OPENAI_MODEL=${OPENAI_MODEL:-gpt-4o} + # Local LLM (Ollama/LM Studio) + - LOCAL_LLM_BASE_URL=${LOCAL_LLM_BASE_URL:-} + - LOCAL_LLM_MODEL=${LOCAL_LLM_MODEL:-llama3.2} + - NODE_ENV=production + # NOTE: No volume mounts for /workspace - agent cannot access filesystem + # All operations go through sidecar's /v1/execute endpoint + networks: + - predicate-net + +networks: + predicate-net: + driver: bridge diff --git a/examples/file-processor-demo/package.json b/examples/file-processor-demo/package.json new file mode 100644 index 0000000..e0339f2 --- /dev/null +++ b/examples/file-processor-demo/package.json @@ -0,0 +1,24 @@ +{ + "name": "file-processor-demo", + "version": "1.0.0", + "description": "Zero-trust file processing agent using /v1/execute endpoint", + "type": "module", + "main": "dist/file-processor-agent.js", + "scripts": { + "build": "tsc", + "start": "node dist/file-processor-agent.js", + "dev": "tsx src/file-processor-agent.ts", + "typecheck": "tsc --noEmit" + }, + "dependencies": { + "@anthropic-ai/sdk": "^0.39.0" + }, + "devDependencies": { + "@types/node": "^20.0.0", + "tsx": "^4.0.0", + "typescript": "^5.0.0" + }, + "engines": { + "node": ">=18.0.0" + } +} diff --git a/examples/file-processor-demo/policy.yaml b/examples/file-processor-demo/policy.yaml new file mode 100644 index 0000000..a6ac506 --- /dev/null +++ b/examples/file-processor-demo/policy.yaml @@ -0,0 +1,260 @@ +# ============================================================================ +# Predicate Authority Policy - File Processor Agent +# ============================================================================ +# +# SCENARIO: Zero-Trust File Processing Pipeline +# +# This policy demonstrates the /v1/execute endpoint pattern where the sidecar +# EXECUTES operations on behalf of the agent. The agent has zero ambient +# filesystem privileges - all operations go through the sidecar. +# +# ARCHITECTURE: +# ┌─────────────┐ Execute Request ┌─────────────────┐ +# │ Agent │ ─────────────────────────▶│ Sidecar │ +# │ (No FS │ POST /v1/execute │ /v1/execute │ +# │ Access) │ ◀─────────────────────────│ │ +# └─────────────┘ Result + Evidence │ 1. Validate │ +# │ 2. Execute │ +# │ 3. Return hash │ +# └────────┬────────┘ +# │ +# ▼ +# ┌─────────────────┐ +# │ Filesystem │ +# └─────────────────┘ +# +# ACTIONS SUPPORTED BY /v1/execute: +# fs.read - Read file contents +# fs.write - Write file contents +# fs.list - List directory entries +# fs.delete - Delete files/directories +# cli.exec - Execute shell commands +# http.fetch - HTTP requests +# env.read - Read environment variables +# +# ============================================================================ + +version: "1.0" + +metadata: + scenario: "file-processor" + author: "security-team" + last_updated: "2024-03-10" + compliance: ["SOC2", "GDPR"] + default_posture: "deny" + execution_mode: "execute" # Uses /v1/execute, not /authorize + +# ============================================================================ +# DENY RULES - Evaluated first (highest priority) +# ============================================================================ + +rules: + + # -------------------------------------------------------------------------- + # FILESYSTEM DENY RULES + # -------------------------------------------------------------------------- + + # Block access to system files + - name: deny-system-files + description: "Block access to sensitive system files" + effect: deny + principals: ["agent:*"] + actions: ["fs.read", "fs.write", "fs.list", "fs.delete"] + resources: + - "/etc/**" + - "/sys/**" + - "/proc/**" + - "/root/**" + - "/var/log/**" + + # Block hidden files (dotfiles, secrets) + - name: deny-hidden-files + description: "Block access to hidden files which may contain secrets" + effect: deny + principals: ["agent:*"] + actions: ["fs.read", "fs.write", "fs.list", "fs.delete"] + resources: + - "**/.*" + - "**/.env" + - "**/.env.*" + - "**/.git/**" + - "**/.ssh/**" + - "**/.aws/**" + + # Block credential files + - name: deny-credentials + description: "Block files commonly containing credentials" + effect: deny + principals: ["agent:*"] + actions: ["fs.read"] + resources: + - "**/credentials*" + - "**/secrets*" + - "**/password*" + - "**/*_rsa" + - "**/*_ed25519" + + # -------------------------------------------------------------------------- + # SHELL DENY RULES + # -------------------------------------------------------------------------- + + # Block dangerous shell commands + - name: deny-dangerous-commands + description: "Block shell commands that could compromise the system" + effect: deny + principals: ["agent:*"] + actions: ["cli.exec"] + resources: + - "*sudo*" + - "*rm -rf*" + - "*rm -r /*" + - "*chmod 777*" + - "*curl*|*bash*" + - "*wget*|*sh*" + - "*nc -e*" + - "*python*-c*import*socket*" + +# ============================================================================ +# ALLOW RULES - File Processor Agent Permissions +# ============================================================================ + + # -------------------------------------------------------------------------- + # INPUT DIRECTORY - Read Only + # -------------------------------------------------------------------------- + + - name: allow-input-read + description: "Allow reading and listing input directory" + effect: allow + principals: ["agent:file-processor"] + actions: ["fs.read", "fs.list"] + resources: + - "/workspace/input" + - "/workspace/input/*" + - "/workspace/input/**" + + # -------------------------------------------------------------------------- + # OUTPUT DIRECTORY - Write Only + # -------------------------------------------------------------------------- + + - name: allow-output-write + description: "Allow writing to output directory" + effect: allow + principals: ["agent:file-processor"] + actions: ["fs.write", "fs.list"] + resources: + - "/workspace/output" + - "/workspace/output/*" + - "/workspace/output/**" + + # -------------------------------------------------------------------------- + # ARCHIVE DIRECTORY - Write Only + # -------------------------------------------------------------------------- + + - name: allow-archive-write + description: "Allow archiving processed files" + effect: allow + principals: ["agent:file-processor"] + actions: ["fs.write", "fs.list"] + resources: + - "/workspace/archive" + - "/workspace/archive/*" + - "/workspace/archive/**" + + # -------------------------------------------------------------------------- + # DELETE - Only from input after archiving + # -------------------------------------------------------------------------- + + - name: allow-input-delete + description: "Allow deleting from input after archiving" + effect: allow + principals: ["agent:file-processor"] + actions: ["fs.delete"] + resources: + - "/workspace/input/*" + + # -------------------------------------------------------------------------- + # SHELL COMMANDS - Safe read-only operations + # -------------------------------------------------------------------------- + + - name: allow-safe-shell + description: "Allow minimal safe shell commands for reporting" + effect: allow + principals: ["agent:file-processor"] + actions: ["cli.exec"] + resources: + # File counting and stats + - "wc *" + - "wc -l *" + - "wc -w *" + # Directory listing + - "ls *" + - "ls -la *" + # Date/time + - "date" + - "date *" + # Reading output files + - "cat /workspace/output/*" + - "head /workspace/output/*" + - "tail /workspace/output/*" + # JSON processing (read-only) + - "jq * /workspace/output/*" + + # -------------------------------------------------------------------------- + # HTTP - Allowed endpoints for data enrichment + # -------------------------------------------------------------------------- + + - name: allow-http-apis + description: "Allow HTTP requests to approved APIs" + effect: allow + principals: ["agent:file-processor"] + actions: ["http.fetch"] + resources: + # Example: Allow currency conversion API + - "https://api.exchangerate.host/**" + # Example: Allow geocoding + - "https://nominatim.openstreetmap.org/**" + # Webhook for notifications + - "https://hooks.slack.com/services/**" + + # -------------------------------------------------------------------------- + # ENVIRONMENT - Safe variables only + # -------------------------------------------------------------------------- + + - name: allow-env-read + description: "Allow reading non-sensitive environment variables" + effect: allow + principals: ["agent:file-processor"] + actions: ["env.read"] + resources: + - "TZ" + - "LANG" + - "LC_*" + - "HOME" + - "USER" + +# ============================================================================ +# DEFAULT DENY - Catch-all (must be last) +# ============================================================================ + + - name: default-deny-all + description: "DEFAULT DENY: Block any action not explicitly allowed" + effect: deny + principals: ["*"] + actions: ["*"] + resources: ["*"] + +# ============================================================================ +# AUDIT CONFIGURATION +# ============================================================================ + +audit: + log_level: "info" + log_denials: true + log_allows: true + log_executions: true # Log actual execution results + include_evidence_hash: true + redact_patterns: + - "*password*" + - "*secret*" + - "*token*" + - "*api_key*" diff --git a/examples/file-processor-demo/run-demo.sh b/examples/file-processor-demo/run-demo.sh new file mode 100755 index 0000000..8e03080 --- /dev/null +++ b/examples/file-processor-demo/run-demo.sh @@ -0,0 +1,173 @@ +#!/bin/bash +# ============================================================================ +# File Processor Demo - Run Script +# ============================================================================ +# +# Demonstrates zero-trust file processing with /v1/execute endpoint. +# +# Usage: +# ./run-demo.sh +# +# Environment variables: +# ANTHROPIC_API_KEY - Required for LLM processing (optional) +# PREDICATE_API_KEY - Cloud tracing key (optional) +# +# ============================================================================ + +set -e + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +cd "$SCRIPT_DIR" + +# Colors +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[0;33m' +CYAN='\033[0;36m' +NC='\033[0m' # No Color + +echo -e "${CYAN}" +echo "══════════════════════════════════════════════════════════════════════" +echo "║ FILE PROCESSOR DEMO - Zero-Trust Execute Mode" +echo "══════════════════════════════════════════════════════════════════════" +echo -e "${NC}" + +# Check for Docker +if ! command -v docker &> /dev/null; then + echo -e "${RED}Error: Docker is not installed${NC}" + exit 1 +fi + +if ! command -v docker compose &> /dev/null && ! command -v docker-compose &> /dev/null; then + echo -e "${RED}Error: Docker Compose is not installed${NC}" + exit 1 +fi + +# Determine docker compose command +if command -v docker compose &> /dev/null; then + COMPOSE_CMD="docker compose" +else + COMPOSE_CMD="docker-compose" +fi + +# Check environment +echo -e "${CYAN}Checking environment...${NC}" + +if [ -z "$ANTHROPIC_API_KEY" ]; then + echo -e "${YELLOW}Note: ANTHROPIC_API_KEY not set - LLM processing will be disabled${NC}" +else + echo -e "${GREEN}✓ ANTHROPIC_API_KEY is set${NC}" +fi + +# Ensure sample files exist +echo -e "${CYAN}Setting up sample data...${NC}" + +mkdir -p workspace/input workspace/output workspace/archive + +# Create sample input files if they don't exist +if [ ! -f workspace/input/sales_north.json ]; then + echo -e " Creating sample sales data..." + + cat > workspace/input/sales_north.json << 'EOF' +[ + {"id": "N001", "region": "North", "product": "Widget A", "amount": 15200, "date": "2024-01-15", "customer": "Acme Corp"}, + {"id": "N002", "region": "North", "product": "Widget B", "amount": 8900, "date": "2024-01-18", "customer": "TechStart Inc"}, + {"id": "N003", "region": "North", "product": "Widget A", "amount": 12500, "date": "2024-01-22", "customer": "Global Systems"}, + {"id": "N004", "region": "North", "product": "Widget C", "amount": 6300, "date": "2024-01-25", "customer": "DataFlow Ltd"}, + {"id": "N005", "region": "North", "product": "Widget B", "amount": 9800, "date": "2024-01-28", "customer": "CloudNine"} +] +EOF + + cat > workspace/input/sales_south.json << 'EOF' +[ + {"id": "S001", "region": "South", "product": "Widget B", "amount": 11200, "date": "2024-01-12", "customer": "SunTech"}, + {"id": "S002", "region": "South", "product": "Widget A", "amount": 7800, "date": "2024-01-16", "customer": "Coastal Systems"}, + {"id": "S003", "region": "South", "product": "Widget C", "amount": 14500, "date": "2024-01-20", "customer": "Harbor Inc"}, + {"id": "S004", "region": "South", "product": "Widget A", "amount": 5600, "date": "2024-01-24", "customer": "Bay Networks"} +] +EOF + + cat > workspace/input/sales_west.json << 'EOF' +[ + {"id": "W001", "region": "West", "product": "Widget C", "amount": 18900, "date": "2024-01-10", "customer": "Pacific Tech"}, + {"id": "W002", "region": "West", "product": "Widget A", "amount": 13200, "date": "2024-01-14", "customer": "Valley Ventures"}, + {"id": "W003", "region": "West", "product": "Widget B", "amount": 9500, "date": "2024-01-19", "customer": "Mountain Systems"}, + {"id": "W004", "region": "West", "product": "Widget C", "amount": 16800, "date": "2024-01-23", "customer": "Redwood Inc"}, + {"id": "W005", "region": "West", "product": "Widget A", "amount": 7200, "date": "2024-01-27", "customer": "Sequoia Labs"}, + {"id": "W006", "region": "West", "product": "Widget B", "amount": 11100, "date": "2024-01-30", "customer": "Sierra Solutions"} +] +EOF + + echo -e "${GREEN}✓ Created 3 sample input files${NC}" +else + echo -e "${GREEN}✓ Sample data already exists${NC}" +fi + +# Clean up any previous runs +echo -e "${CYAN}Cleaning up previous runs...${NC}" +rm -rf workspace/output/* workspace/archive/* +echo -e "${GREEN}✓ Cleared output and archive directories${NC}" + +# Build and run +echo "" +echo -e "${CYAN}Building containers...${NC}" +$COMPOSE_CMD build + +echo "" +echo -e "${CYAN}Starting sidecar...${NC}" +$COMPOSE_CMD up -d predicate-sidecar + +# Wait for sidecar to be healthy +echo -e " Waiting for sidecar to be ready..." +for i in {1..30}; do + if curl -sf http://localhost:8787/health > /dev/null 2>&1; then + echo -e "${GREEN}✓ Sidecar is ready${NC}" + break + fi + if [ $i -eq 30 ]; then + echo -e "${RED}Error: Sidecar failed to start${NC}" + $COMPOSE_CMD logs predicate-sidecar + exit 1 + fi + sleep 1 +done + +echo "" +echo -e "${CYAN}Running file processor agent...${NC}" +echo "" + +# Run the agent +$COMPOSE_CMD run --rm file-processor-agent + +echo "" +echo -e "${CYAN}Results:${NC}" +echo "" + +# Show output files +if [ -f workspace/output/aggregated_sales.json ]; then + echo -e "${GREEN}Output file created:${NC}" + echo " workspace/output/aggregated_sales.json" + echo "" + echo "Contents:" + cat workspace/output/aggregated_sales.json | head -30 + echo "" +fi + +# Show archived files +if [ "$(ls -A workspace/archive 2>/dev/null)" ]; then + echo -e "${GREEN}Archived files:${NC}" + ls -la workspace/archive/ +fi + +echo "" +echo -e "${CYAN}Cleaning up...${NC}" +$COMPOSE_CMD down + +echo "" +echo -e "${GREEN}Demo completed successfully!${NC}" +echo "" +echo -e "${CYAN}Key takeaways:${NC}" +echo " 1. Agent had zero direct filesystem access" +echo " 2. All file operations went through sidecar /v1/execute" +echo " 3. Unauthorized access (e.g., /etc/passwd) was blocked" +echo " 4. Every operation returned cryptographic evidence hash" diff --git a/examples/file-processor-demo/src/file-processor-agent.ts b/examples/file-processor-demo/src/file-processor-agent.ts new file mode 100644 index 0000000..3e94ed4 --- /dev/null +++ b/examples/file-processor-demo/src/file-processor-agent.ts @@ -0,0 +1,891 @@ +/** + * ============================================================================ + * File Processor Agent - Zero-Trust Execution Mode Demo + * ============================================================================ + * + * This agent demonstrates the zero-trust execution pattern where: + * 1. Agent requests authorization from sidecar (/v1/authorize) + * 2. Sidecar evaluates policy and returns mandate_id if allowed + * 3. Agent calls /v1/execute with mandate_id - sidecar executes the operation + * 4. Agent receives result with cryptographic evidence + * + * ARCHITECTURE: + * Agent → /v1/authorize → Sidecar (policy check, returns mandate_id) + * ↓ + * Agent → /v1/execute → Sidecar (executes operation, returns result) + * + * SECURITY: The agent has NO filesystem access. All operations are executed + * by the sidecar, preventing confused deputy attacks. + * + * LLM PROVIDERS: + * Supports multiple LLM providers via environment variables: + * - Anthropic Claude: Set ANTHROPIC_API_KEY + * - OpenAI: Set OPENAI_API_KEY + * - Local (Ollama/LM Studio): Set LOCAL_LLM_BASE_URL + */ + +import Anthropic from "@anthropic-ai/sdk"; + +// ============================================================================ +// LLM Provider Abstraction +// ============================================================================ + +interface LLMResponse { + text: string; + usage?: { + inputTokens: number; + outputTokens: number; + }; +} + +abstract class LLMProvider { + abstract get name(): string; + abstract generate(systemPrompt: string, userPrompt: string): Promise; +} + +/** + * Anthropic Claude Provider + */ +class AnthropicProvider extends LLMProvider { + private client: Anthropic; + private model: string; + + constructor(options: { model?: string } = {}) { + super(); + this.client = new Anthropic(); + this.model = options.model ?? process.env.ANTHROPIC_MODEL ?? "claude-sonnet-4-20250514"; + } + + get name(): string { + return `Anthropic (${this.model})`; + } + + async generate(systemPrompt: string, userPrompt: string): Promise { + const response = await this.client.messages.create({ + model: this.model, + max_tokens: 1024, + system: systemPrompt, + messages: [{ role: "user", content: userPrompt }], + }); + + const textContent = response.content.find(c => c.type === "text"); + return { + text: textContent?.text ?? "", + usage: { + inputTokens: response.usage.input_tokens, + outputTokens: response.usage.output_tokens, + }, + }; + } +} + +/** + * OpenAI Provider + */ +class OpenAIProvider extends LLMProvider { + private apiKey: string; + private model: string; + private baseUrl: string; + + constructor(options: { model?: string; baseUrl?: string } = {}) { + super(); + this.apiKey = process.env.OPENAI_API_KEY ?? ""; + this.model = options.model ?? process.env.OPENAI_MODEL ?? "gpt-4o"; + this.baseUrl = options.baseUrl ?? "https://api.openai.com/v1"; + } + + get name(): string { + return `OpenAI (${this.model})`; + } + + async generate(systemPrompt: string, userPrompt: string): Promise { + const response = await fetch(`${this.baseUrl}/chat/completions`, { + method: "POST", + headers: { + "Content-Type": "application/json", + "Authorization": `Bearer ${this.apiKey}`, + }, + body: JSON.stringify({ + model: this.model, + messages: [ + { role: "system", content: systemPrompt }, + { role: "user", content: userPrompt }, + ], + max_tokens: 1024, + }), + }); + + if (!response.ok) { + throw new Error(`OpenAI API error: ${response.status} ${response.statusText}`); + } + + const data = await response.json() as any; + return { + text: data.choices?.[0]?.message?.content ?? "", + usage: { + inputTokens: data.usage?.prompt_tokens ?? 0, + outputTokens: data.usage?.completion_tokens ?? 0, + }, + }; + } +} + +/** + * Local LLM Provider (Ollama, LM Studio, or any OpenAI-compatible API) + */ +class LocalLLMProvider extends LLMProvider { + private model: string; + private baseUrl: string; + + constructor(options: { model?: string; baseUrl?: string } = {}) { + super(); + this.model = options.model ?? process.env.LOCAL_LLM_MODEL ?? "llama3.2"; + this.baseUrl = options.baseUrl ?? process.env.LOCAL_LLM_BASE_URL ?? "http://localhost:11434/v1"; + } + + get name(): string { + return `Local (${this.model} @ ${this.baseUrl})`; + } + + async generate(systemPrompt: string, userPrompt: string): Promise { + const response = await fetch(`${this.baseUrl}/chat/completions`, { + method: "POST", + headers: { + "Content-Type": "application/json", + }, + body: JSON.stringify({ + model: this.model, + messages: [ + { role: "system", content: systemPrompt }, + { role: "user", content: userPrompt }, + ], + max_tokens: 1024, + stream: false, + }), + }); + + if (!response.ok) { + throw new Error(`Local LLM API error: ${response.status} ${response.statusText}`); + } + + const data = await response.json() as any; + return { + text: data.choices?.[0]?.message?.content ?? "", + usage: { + inputTokens: data.usage?.prompt_tokens ?? 0, + outputTokens: data.usage?.completion_tokens ?? 0, + }, + }; + } +} + +/** + * Create LLM provider based on environment variables + */ +function createLLMProvider(): LLMProvider | null { + const explicitProvider = process.env.LLM_PROVIDER?.toLowerCase(); + + if (explicitProvider) { + switch (explicitProvider) { + case "anthropic": + case "claude": + if (!process.env.ANTHROPIC_API_KEY) { + console.warn("LLM_PROVIDER=anthropic but ANTHROPIC_API_KEY not set"); + return null; + } + return new AnthropicProvider(); + + case "openai": + case "gpt": + if (!process.env.OPENAI_API_KEY) { + console.warn("LLM_PROVIDER=openai but OPENAI_API_KEY not set"); + return null; + } + return new OpenAIProvider(); + + case "local": + case "ollama": + case "lmstudio": + return new LocalLLMProvider(); + + default: + console.warn(`Unknown LLM_PROVIDER: ${explicitProvider}`); + return null; + } + } + + if (process.env.ANTHROPIC_API_KEY) { + return new AnthropicProvider(); + } + + if (process.env.OPENAI_API_KEY) { + return new OpenAIProvider(); + } + + if (process.env.LOCAL_LLM_BASE_URL) { + return new LocalLLMProvider(); + } + + return null; +} + +// ============================================================================ +// Sidecar API Types +// ============================================================================ + +interface AuthorizeRequest { + principal: string; + action: string; + resource: string; +} + +interface AuthorizeResponse { + allowed: boolean; + reason?: string; + mandate_id?: string; + mandate_token?: string; + violated_rule?: string; + matched_rule?: string; +} + +interface ExecuteRequest { + mandate_id: string; + action: string; + resource: string; + payload?: ExecutePayload; +} + +type ExecutePayload = + | { type: "file_write"; content: string; create?: boolean; append?: boolean } + | { type: "file_delete"; recursive?: boolean } + | { type: "cli_exec"; command: string; args?: string[]; cwd?: string; timeout_ms?: number } + | { type: "http_fetch"; method: string; headers?: Record; body?: string } + | { type: "env_read"; keys: string[] }; + +interface ExecuteResponse { + success: boolean; + result?: ExecuteResult; + error?: string; + audit_id: string; + evidence_hash?: string; +} + +type ExecuteResult = + | { type: "file_read"; content: string; size: number; content_hash: string } + | { type: "file_write"; bytes_written: number; content_hash: string } + | { type: "file_list"; entries: DirectoryEntry[]; total_entries: number } + | { type: "file_delete"; paths_removed: number } + | { type: "cli_exec"; exit_code: number; stdout: string; stderr: string; duration_ms: number } + | { type: "http_fetch"; status_code: number; headers: Record; body: string; body_hash: string } + | { type: "env_read"; values: Record }; + +interface DirectoryEntry { + name: string; + entry_type: string; + size: number; + modified?: number; +} + +// ============================================================================ +// Configuration +// ============================================================================ + +const CONFIG = { + sidecarUrl: process.env.PREDICATE_SIDECAR_URL || "http://predicate-sidecar:8787", + principal: process.env.SECURECLAW_PRINCIPAL || "agent:file-processor", + inputDir: "/workspace/input", + outputDir: "/workspace/output", + archiveDir: "/workspace/archive", +}; + +// Terminal colors +const colors = { + reset: "\x1b[0m", + bright: "\x1b[1m", + dim: "\x1b[2m", + green: "\x1b[32m", + red: "\x1b[31m", + yellow: "\x1b[33m", + blue: "\x1b[34m", + cyan: "\x1b[36m", + white: "\x1b[37m", + bgGreen: "\x1b[42m", + bgRed: "\x1b[41m", + bgBlue: "\x1b[44m", + bgYellow: "\x1b[43m", +}; + +// ============================================================================ +// Types +// ============================================================================ + +interface SalesRecord { + id: string; + region: string; + product: string; + amount: number; + date: string; + customer?: string; +} + +interface AggregatedSales { + region: string; + totalAmount: number; + recordCount: number; + products: string[]; + dateRange: { start: string; end: string }; +} + +interface ProcessingResult { + inputFiles: string[]; + recordsProcessed: number; + aggregations: AggregatedSales[]; + outputFile: string; + archivedFiles: string[]; +} + +// ============================================================================ +// Logging Utilities +// ============================================================================ + +function logHeader(title: string): void { + const width = 70; + console.log(""); + console.log(`${colors.cyan}${"═".repeat(width)}${colors.reset}`); + console.log(`${colors.cyan}║${colors.reset} ${colors.bright}${title}${colors.reset}`); + console.log(`${colors.cyan}${"═".repeat(width)}${colors.reset}`); + console.log(""); +} + +function logStep(step: number, description: string): void { + console.log(`${colors.bright}[Step ${step}]${colors.reset} ${description}`); +} + +function logAuthorize(action: string, resource: string): void { + console.log(`┌${"─".repeat(62)}┐`); + console.log(`│ ${colors.bgBlue}${colors.white} AUTHORIZE ${colors.reset} ${action.padEnd(49)} │`); + console.log(`│ Resource: ${resource.slice(0, 50).padEnd(50)} │`); + console.log(`└${"─".repeat(62)}┘`); +} + +function logExecute(action: string, resource: string, mandateId: string): void { + console.log(`┌${"─".repeat(62)}┐`); + console.log(`│ ${colors.bgYellow}${colors.white} EXECUTE ${colors.reset} ${action.padEnd(49)} │`); + console.log(`│ Resource: ${resource.slice(0, 50).padEnd(50)} │`); + console.log(`│ Mandate: ${mandateId.slice(0, 50).padEnd(50)} │`); + console.log(`└${"─".repeat(62)}┘`); +} + +function logAllowed(mandateId: string): void { + console.log(` ${colors.green}✓ ALLOWED${colors.reset} mandate_id: ${mandateId}`); +} + +function logDenied(reason: string): void { + console.log(` ${colors.red}✗ DENIED: ${reason}${colors.reset}`); +} + +function logExecuted(evidenceHash?: string): void { + console.log(` ${colors.green}✓ EXECUTED${colors.reset} ${evidenceHash ? `evidence: ${evidenceHash.slice(0, 20)}...` : ""}`); +} + +function logSuccess(message: string): void { + console.log(` ${colors.green}✓${colors.reset} ${message}`); +} + +function logInfo(message: string): void { + console.log(` ${colors.dim}→${colors.reset} ${message}`); +} + +// ============================================================================ +// Sidecar Client (Zero-Trust Execute Mode) +// ============================================================================ + +class SidecarClient { + private readonly baseUrl: string; + private readonly timeoutMs: number; + + constructor(options: { baseUrl: string; timeoutMs?: number }) { + this.baseUrl = options.baseUrl; + this.timeoutMs = options.timeoutMs ?? 10000; + } + + /** + * Request authorization from sidecar - returns mandate_id if allowed + */ + async authorize(request: AuthorizeRequest): Promise { + const controller = new AbortController(); + const timeout = setTimeout(() => controller.abort(), this.timeoutMs); + + try { + const response = await fetch(`${this.baseUrl}/v1/authorize`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify(request), + signal: controller.signal, + }); + + if (!response.ok) { + if (response.status === 403) { + const errorJson = await response.json() as any; + return { + allowed: false, + reason: errorJson.reason || "policy_denied", + violated_rule: errorJson.violated_rule, + }; + } + throw new Error(`Sidecar returned ${response.status}: ${await response.text()}`); + } + + const result = await response.json() as any; + return { + allowed: result.allowed, + reason: result.reason, + mandate_id: result.mandate_id, + mandate_token: result.mandate_token, + matched_rule: result.scopes_authorized?.[0]?.matched_rule, + }; + } finally { + clearTimeout(timeout); + } + } + + /** + * Execute operation through sidecar using mandate_id + */ + async execute(request: ExecuteRequest): Promise { + const controller = new AbortController(); + const timeout = setTimeout(() => controller.abort(), this.timeoutMs); + + try { + const response = await fetch(`${this.baseUrl}/v1/execute`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify(request), + signal: controller.signal, + }); + + const result = await response.json() as ExecuteResponse; + + if (!response.ok) { + return { + success: false, + error: result.error || `Execute failed with status ${response.status}`, + audit_id: result.audit_id || "", + }; + } + + return result; + } finally { + clearTimeout(timeout); + } + } + + /** + * Authorize and execute in one flow + */ + async authorizeAndExecute( + principal: string, + action: string, + resource: string, + payload?: ExecutePayload + ): Promise<{ authorized: boolean; result?: ExecuteResponse; reason?: string }> { + // Step 1: Authorize + logAuthorize(action, resource); + + const authResponse = await this.authorize({ principal, action, resource }); + + if (!authResponse.allowed) { + logDenied(authResponse.reason || "policy_denied"); + return { authorized: false, reason: authResponse.reason }; + } + + if (!authResponse.mandate_id) { + logDenied("No mandate_id returned (delegation not enabled?)"); + return { authorized: false, reason: "no_mandate_id" }; + } + + logAllowed(authResponse.mandate_id); + + // Step 2: Execute + logExecute(action, resource, authResponse.mandate_id); + + const execResponse = await this.execute({ + mandate_id: authResponse.mandate_id, + action, + resource, + payload, + }); + + if (execResponse.success) { + logExecuted(execResponse.evidence_hash); + } else { + console.log(` ${colors.red}✗ EXECUTE FAILED: ${execResponse.error}${colors.reset}`); + } + + return { authorized: true, result: execResponse }; + } +} + +// ============================================================================ +// File Processor Agent +// ============================================================================ + +class FileProcessorAgent { + private sidecarClient: SidecarClient; + private llmProvider: LLMProvider | null; + + constructor() { + this.sidecarClient = new SidecarClient({ + baseUrl: CONFIG.sidecarUrl, + timeoutMs: 10000, + }); + + this.llmProvider = createLLMProvider(); + } + + /** + * Main entry point - run the file processing pipeline + */ + async run(): Promise { + logHeader("FILE PROCESSOR AGENT - Zero-Trust Execute Mode Demo"); + + console.log(`${colors.cyan}Configuration:${colors.reset}`); + console.log(` Sidecar URL: ${CONFIG.sidecarUrl}`); + console.log(` Principal: ${CONFIG.principal}`); + console.log(` Input Dir: ${CONFIG.inputDir}`); + console.log(` Output Dir: ${CONFIG.outputDir}`); + console.log(` LLM: ${this.llmProvider ? this.llmProvider.name : "disabled (no provider configured)"}`); + console.log(` Mode: ${colors.bright}Zero-Trust Execute${colors.reset} (sidecar executes all operations)`); + console.log(""); + + try { + // Step 1: List input files + logStep(1, "Listing input directory"); + const inputFiles = await this.listDirectory(CONFIG.inputDir); + + if (inputFiles.length === 0) { + logInfo("No files found in input directory"); + return; + } + + logSuccess(`Found ${inputFiles.length} files: ${inputFiles.join(", ")}`); + console.log(""); + + // Step 2: Read and parse all input files + logStep(2, "Reading input files"); + const allRecords: SalesRecord[] = []; + + for (const file of inputFiles) { + if (!file.endsWith(".json")) continue; + + const filePath = `${CONFIG.inputDir}/${file}`; + const content = await this.readFile(filePath); + + if (content) { + try { + const records = JSON.parse(content) as SalesRecord[]; + allRecords.push(...records); + logSuccess(`Read ${records.length} records from ${file}`); + } catch (e) { + logInfo(`Skipping ${file}: invalid JSON`); + } + } + } + console.log(""); + + // Step 3: Process with LLM (or simple aggregation) + logStep(3, "Processing data"); + const aggregations = await this.aggregateData(allRecords); + logSuccess(`Aggregated ${allRecords.length} records into ${aggregations.length} regions`); + + for (const agg of aggregations) { + logInfo(`${agg.region}: $${agg.totalAmount.toLocaleString()} (${agg.recordCount} records)`); + } + console.log(""); + + // Step 4: Write output + logStep(4, "Writing output"); + const outputFile = `${CONFIG.outputDir}/aggregated_sales.json`; + const outputContent = JSON.stringify({ + generatedAt: new Date().toISOString(), + totalRecords: allRecords.length, + aggregations, + }, null, 2); + + await this.writeFile(outputFile, outputContent); + logSuccess(`Wrote aggregated results to ${outputFile}`); + console.log(""); + + // Step 5: Archive processed files + logStep(5, "Archiving processed files"); + const archivedFiles: string[] = []; + + for (const file of inputFiles) { + if (!file.endsWith(".json")) continue; + + const sourcePath = `${CONFIG.inputDir}/${file}`; + const archivePath = `${CONFIG.archiveDir}/${file}`; + + const content = await this.readFile(sourcePath); + if (content) { + await this.writeFile(archivePath, content); + archivedFiles.push(file); + } + } + logSuccess(`Archived ${archivedFiles.length} files to ${CONFIG.archiveDir}`); + console.log(""); + + // Step 6: Generate report using shell command + logStep(6, "Generating report"); + const reportResult = await this.executeCommand("wc", ["-l", `${CONFIG.outputDir}/aggregated_sales.json`]); + if (reportResult) { + logSuccess(`Report: ${reportResult.trim()}`); + } + console.log(""); + + // Step 7: Demonstrate policy denial + logStep(7, "Attempting unauthorized access (demo)"); + await this.demonstrateDenial(); + console.log(""); + + // Summary + logHeader("AGENT COMPLETED - All operations executed by sidecar"); + + const result: ProcessingResult = { + inputFiles, + recordsProcessed: allRecords.length, + aggregations, + outputFile, + archivedFiles, + }; + + console.log(`${colors.cyan}Summary:${colors.reset}`); + console.log(` Files processed: ${result.inputFiles.length}`); + console.log(` Records aggregated: ${result.recordsProcessed}`); + console.log(` Regions: ${result.aggregations.length}`); + console.log(` Output file: ${result.outputFile}`); + console.log(` Archived files: ${result.archivedFiles.length}`); + console.log(""); + console.log(`${colors.green}${colors.bright}All operations executed via /v1/execute endpoint.${colors.reset}`); + console.log(`${colors.dim}Agent had ZERO filesystem access - sidecar executed all operations.${colors.reset}`); + + } catch (error) { + console.error(`${colors.red}Error: ${(error as Error).message}${colors.reset}`); + throw error; + } + } + + // ========================================================================== + // File Operations (authorize + execute through sidecar) + // ========================================================================== + + /** + * List directory contents via sidecar + */ + private async listDirectory(dirPath: string): Promise { + const { authorized, result, reason } = await this.sidecarClient.authorizeAndExecute( + CONFIG.principal, + "fs.list", + dirPath + ); + + if (!authorized || !result?.success) { + logInfo(`Could not list directory: ${reason || result?.error}`); + return []; + } + + const listResult = result.result as { type: "file_list"; entries: DirectoryEntry[]; total_entries: number }; + return listResult.entries + .filter(e => e.entry_type === "file") + .map(e => e.name); + } + + /** + * Read file contents via sidecar + */ + private async readFile(filePath: string): Promise { + const { authorized, result, reason } = await this.sidecarClient.authorizeAndExecute( + CONFIG.principal, + "fs.read", + filePath + ); + + if (!authorized || !result?.success) { + logInfo(`Could not read file: ${reason || result?.error}`); + return null; + } + + const readResult = result.result as { type: "file_read"; content: string; size: number; content_hash: string }; + logSuccess(`Read ${readResult.size} bytes (hash: ${readResult.content_hash.slice(0, 20)}...)`); + return readResult.content; + } + + /** + * Write file contents via sidecar + */ + private async writeFile(filePath: string, content: string): Promise { + const { authorized, result, reason } = await this.sidecarClient.authorizeAndExecute( + CONFIG.principal, + "fs.write", + filePath, + { type: "file_write", content, create: true, append: false } + ); + + if (!authorized || !result?.success) { + logInfo(`Could not write file: ${reason || result?.error}`); + return false; + } + + const writeResult = result.result as { type: "file_write"; bytes_written: number; content_hash: string }; + logSuccess(`Wrote ${writeResult.bytes_written} bytes (hash: ${writeResult.content_hash.slice(0, 20)}...)`); + return true; + } + + /** + * Execute shell command via sidecar + */ + private async executeCommand(command: string, args: string[]): Promise { + const fullCommand = `${command} ${args.join(" ")}`; + + const { authorized, result, reason } = await this.sidecarClient.authorizeAndExecute( + CONFIG.principal, + "cli.exec", + fullCommand, + { type: "cli_exec", command, args } + ); + + if (!authorized || !result?.success) { + logInfo(`Could not execute command: ${reason || result?.error}`); + return null; + } + + const execResult = result.result as { type: "cli_exec"; exit_code: number; stdout: string; stderr: string; duration_ms: number }; + return execResult.stdout; + } + + // ========================================================================== + // Data Processing + // ========================================================================== + + private async aggregateData(records: SalesRecord[]): Promise { + const byRegion = new Map(); + + for (const record of records) { + const region = record.region || "Unknown"; + if (!byRegion.has(region)) { + byRegion.set(region, []); + } + byRegion.get(region)!.push(record); + } + + // LLM analysis if available + if (this.llmProvider) { + logInfo(`Using LLM for enhanced analysis (${this.llmProvider.name})...`); + try { + const analysis = await this.llmAnalyze(records); + if (analysis) { + logInfo("LLM analysis complete"); + logInfo(`LLM insight: ${analysis.slice(0, 100)}...`); + } + } catch (e) { + logInfo("LLM analysis skipped: " + (e as Error).message); + } + } + + const aggregations: AggregatedSales[] = []; + + for (const [region, regionRecords] of byRegion) { + const totalAmount = regionRecords.reduce((sum, r) => sum + (r.amount || 0), 0); + const products = [...new Set(regionRecords.map(r => r.product).filter(Boolean))]; + const dates = regionRecords.map(r => r.date).filter(Boolean).sort(); + + aggregations.push({ + region, + totalAmount, + recordCount: regionRecords.length, + products, + dateRange: { + start: dates[0] || "unknown", + end: dates[dates.length - 1] || "unknown", + }, + }); + } + + return aggregations.sort((a, b) => b.totalAmount - a.totalAmount); + } + + private async llmAnalyze(records: SalesRecord[]): Promise { + if (!this.llmProvider) return null; + + const systemPrompt = "You are a sales data analyst. Provide brief, actionable insights."; + const userPrompt = `Analyze these sales records and provide a brief summary: + +${JSON.stringify(records.slice(0, 10), null, 2)} +${records.length > 10 ? `... and ${records.length - 10} more records` : ""} + +Provide: +1. Top performing region +2. Most popular product +3. Any notable trends + +Keep your response under 200 words.`; + + const response = await this.llmProvider.generate(systemPrompt, userPrompt); + return response.text; + } + + // ========================================================================== + // Security Demonstration + // ========================================================================== + + private async demonstrateDenial(): Promise { + logInfo("Attempting to read /etc/passwd (should be denied)..."); + + const authResponse = await this.sidecarClient.authorize({ + principal: CONFIG.principal, + action: "fs.read", + resource: "/etc/passwd", + }); + + logAuthorize("fs.read", "/etc/passwd"); + + if (!authResponse.allowed) { + logDenied(authResponse.reason || "resource not in authorized scope"); + logSuccess("BLOCKED: Policy enforcement working correctly"); + } else { + console.log(` ${colors.yellow}⚠ Unexpected: access was allowed${colors.reset}`); + } + + // Try writing to unauthorized location + logInfo("Attempting to write to /tmp/hack.txt (should be denied)..."); + + const writeAuth = await this.sidecarClient.authorize({ + principal: CONFIG.principal, + action: "fs.write", + resource: "/tmp/hack.txt", + }); + + logAuthorize("fs.write", "/tmp/hack.txt"); + + if (!writeAuth.allowed) { + logDenied(writeAuth.reason || "resource not in authorized scope"); + logSuccess("BLOCKED: Write to unauthorized path prevented"); + } else { + console.log(` ${colors.yellow}⚠ Unexpected: access was allowed${colors.reset}`); + } + } +} + +// ============================================================================ +// Entry Point +// ============================================================================ + +async function main(): Promise { + const agent = new FileProcessorAgent(); + await agent.run(); +} + +main().catch((error) => { + console.error(`${colors.red}Fatal error: ${error.message}${colors.reset}`); + process.exit(1); +}); diff --git a/examples/file-processor-demo/tsconfig.json b/examples/file-processor-demo/tsconfig.json new file mode 100644 index 0000000..1388f4c --- /dev/null +++ b/examples/file-processor-demo/tsconfig.json @@ -0,0 +1,18 @@ +{ + "compilerOptions": { + "target": "ES2022", + "module": "NodeNext", + "moduleResolution": "NodeNext", + "outDir": "./dist", + "rootDir": "./src", + "strict": true, + "esModuleInterop": true, + "skipLibCheck": true, + "forceConsistentCasingInFileNames": true, + "declaration": true, + "declarationMap": true, + "sourceMap": true + }, + "include": ["src/**/*"], + "exclude": ["node_modules", "dist"] +} diff --git a/examples/file-processor-demo/workspace/archive/.gitkeep b/examples/file-processor-demo/workspace/archive/.gitkeep new file mode 100644 index 0000000..8dcdea3 --- /dev/null +++ b/examples/file-processor-demo/workspace/archive/.gitkeep @@ -0,0 +1 @@ +# Archive directory - processed files moved here diff --git a/examples/file-processor-demo/workspace/input/sales_north.json b/examples/file-processor-demo/workspace/input/sales_north.json new file mode 100644 index 0000000..77ddec3 --- /dev/null +++ b/examples/file-processor-demo/workspace/input/sales_north.json @@ -0,0 +1,7 @@ +[ + {"id": "N001", "region": "North", "product": "Widget A", "amount": 15200, "date": "2024-01-15", "customer": "Acme Corp"}, + {"id": "N002", "region": "North", "product": "Widget B", "amount": 8900, "date": "2024-01-18", "customer": "TechStart Inc"}, + {"id": "N003", "region": "North", "product": "Widget A", "amount": 12500, "date": "2024-01-22", "customer": "Global Systems"}, + {"id": "N004", "region": "North", "product": "Widget C", "amount": 6300, "date": "2024-01-25", "customer": "DataFlow Ltd"}, + {"id": "N005", "region": "North", "product": "Widget B", "amount": 9800, "date": "2024-01-28", "customer": "CloudNine"} +] diff --git a/examples/file-processor-demo/workspace/input/sales_south.json b/examples/file-processor-demo/workspace/input/sales_south.json new file mode 100644 index 0000000..635aeab --- /dev/null +++ b/examples/file-processor-demo/workspace/input/sales_south.json @@ -0,0 +1,6 @@ +[ + {"id": "S001", "region": "South", "product": "Widget B", "amount": 11200, "date": "2024-01-12", "customer": "SunTech"}, + {"id": "S002", "region": "South", "product": "Widget A", "amount": 7800, "date": "2024-01-16", "customer": "Coastal Systems"}, + {"id": "S003", "region": "South", "product": "Widget C", "amount": 14500, "date": "2024-01-20", "customer": "Harbor Inc"}, + {"id": "S004", "region": "South", "product": "Widget A", "amount": 5600, "date": "2024-01-24", "customer": "Bay Networks"} +] diff --git a/examples/file-processor-demo/workspace/input/sales_west.json b/examples/file-processor-demo/workspace/input/sales_west.json new file mode 100644 index 0000000..592ae33 --- /dev/null +++ b/examples/file-processor-demo/workspace/input/sales_west.json @@ -0,0 +1,8 @@ +[ + {"id": "W001", "region": "West", "product": "Widget C", "amount": 18900, "date": "2024-01-10", "customer": "Pacific Tech"}, + {"id": "W002", "region": "West", "product": "Widget A", "amount": 13200, "date": "2024-01-14", "customer": "Valley Ventures"}, + {"id": "W003", "region": "West", "product": "Widget B", "amount": 9500, "date": "2024-01-19", "customer": "Mountain Systems"}, + {"id": "W004", "region": "West", "product": "Widget C", "amount": 16800, "date": "2024-01-23", "customer": "Redwood Inc"}, + {"id": "W005", "region": "West", "product": "Widget A", "amount": 7200, "date": "2024-01-27", "customer": "Sequoia Labs"}, + {"id": "W006", "region": "West", "product": "Widget B", "amount": 11100, "date": "2024-01-30", "customer": "Sierra Solutions"} +] diff --git a/examples/file-processor-demo/workspace/output/.gitkeep b/examples/file-processor-demo/workspace/output/.gitkeep new file mode 100644 index 0000000..fc57a66 --- /dev/null +++ b/examples/file-processor-demo/workspace/output/.gitkeep @@ -0,0 +1 @@ +# Output directory - files generated by agent