diff --git a/.github/CODE_OF_CONDUCT.md b/.github/CODE_OF_CONDUCT.md index 1ba3e26..c5db1d7 100644 --- a/.github/CODE_OF_CONDUCT.md +++ b/.github/CODE_OF_CONDUCT.md @@ -19,7 +19,7 @@ We do not tolerate: ## Reporting -If you experience or witness unacceptable behaviour, contact the project owner at conduct@maschina.io. All reports are handled confidentially. +If you experience or witness unacceptable behaviour, contact the project owner at conduct@maschina.ai. All reports are handled confidentially. --- diff --git a/.github/SECURITY.md b/.github/SECURITY.md index 9489a5e..2fc77ab 100644 --- a/.github/SECURITY.md +++ b/.github/SECURITY.md @@ -18,7 +18,7 @@ We operate a rolling release model. Only the current production version receives Report vulnerabilities privately via one of: - **GitHub private vulnerability reporting**: [Security tab → Report a vulnerability](https://github.com/RustMunkey/maschina/security/advisories/new) -- **Email**: security@maschina.io *(monitored, response within 48 hours)* +- **Email**: security@maschina.ai *(monitored, response within 48 hours)* ### What to include diff --git a/README.md b/README.md index 7cac251..2ca7c9c 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,8 @@ # Maschina -Infrastructure for autonomous digital labor. A modular framework for deploying networks of AI agents that continuously discover, evaluate, and execute digital opportunities. +Infrastructure for autonomous digital labor. A modular platform for deploying networks of AI agents that continuously discover, evaluate, and execute digital opportunities. -> **Status:** Pre-development — active scaffolding. MVP targeted Q2 2026. +> **Status:** Active development. v1.0.0 published. --- @@ -10,27 +10,29 @@ Infrastructure for autonomous digital labor. A modular framework for deploying n | Layer | Technology | |-------|-----------| -| Frontend / tooling | TypeScript, React, Tauri | +| Frontend / tooling | TypeScript, React 19, Vite, TanStack Router | | Backend API | TypeScript (Hono) | -| Agent runtime | Python (FastAPI, Celery) | -| ML / RL | Python (PyTorch, Stable-Baselines3, Gymnasium) | -| Daemon / CLI / Desktop / Mobile | Rust, Tauri 2 | -| Database | PostgreSQL (prod) / SQLite (local) via Drizzle | -| Queue | Redis + Celery | +| Agent runtime | Python (FastAPI) | +| ML / RL | Python (PyTorch, NumPy) | +| Daemon / Gateway / Realtime / CLI / Desktop | Rust (Tokio, Axum, Clap, Tauri 2) | +| Mobile | iOS: Swift + SwiftUI · Android: Kotlin + Jetpack Compose | +| Database | PostgreSQL (Neon prod) / SQLite (local dev) via Drizzle | +| Job queue | NATS JetStream | +| Cache | Redis (quota counters) | | Monorepo | pnpm + Turborepo | -| Formatting / Linting | Biome (TS), Clippy (Rust) | +| Formatting / Linting | Biome (TS), Clippy (Rust), Ruff (Python) | | Testing | Vitest (TS), cargo test (Rust), pytest (Python) | --- ## Prerequisites -- [Node.js](https://nodejs.org) >= 20 +- [Node.js](https://nodejs.org) >= 22 - [pnpm](https://pnpm.io) >= 10 — `npm install -g pnpm` -- [Rust](https://rustup.rs) stable toolchain +- [Rust](https://rustup.rs) 1.88+ toolchain - [Python](https://python.org) >= 3.12 + [uv](https://github.com/astral-sh/uv) -- [Docker](https://docker.com) (for Postgres + Redis) -- [Tauri prerequisites](https://tauri.app/start/prerequisites/) (for desktop/mobile) +- [Docker](https://docker.com) (for Postgres, Redis, NATS, and other infrastructure) +- [Tauri prerequisites](https://tauri.app/start/prerequisites/) (for desktop app) --- @@ -44,21 +46,26 @@ cd maschina # 2. Install JS dependencies pnpm install -# 3. Install git hooks (local CI runs before every push) +# 3. Install git hooks (pre-commit lint + commit-msg format check) pnpm hooks:install # 4. Install Python dependencies -uv pip install -e packages/runtime -e packages/agents -e packages/ml \ - -e packages/risk -e packages/sdk/python -e services/worker - -# 5. Copy env and configure -cp docker/.env.example .env - -# 6. Start infrastructure (Postgres + Redis) +uv pip install -e packages/runtime -e packages/agents \ + -e packages/risk -e packages/sdk/python \ + -e services/runtime -e services/worker + +# 5. Copy env files and configure +cp services/api/.env.example services/api/.env +cp services/gateway/.env.example services/gateway/.env +cp services/daemon/.env.example services/daemon/.env +cp services/realtime/.env.example services/realtime/.env +cp services/runtime/.env.example services/runtime/.env + +# 6. Start infrastructure (Postgres, Redis, NATS, Meilisearch, Qdrant, Temporal, Grafana...) pnpm docker:dev -# 7. Push DB schema -pnpm db:push +# 7. Run DB migrations +pnpm db:migrate ``` --- @@ -73,7 +80,7 @@ Native tools (`cargo`, `pytest`, etc.) still work as normal — pnpm commands ar | Command | Description | |---------|-------------| | `pnpm build` | Build everything | -| `pnpm dev` | Dev all apps and services in parallel | +| `pnpm dev` | Dev all services in parallel | | `pnpm test` | Run all tests (TS + Rust + Python) | | `pnpm check` | Biome lint + format check (TypeScript / JSON) | | `pnpm format` | Biome format with auto-fix | @@ -83,15 +90,16 @@ Native tools (`cargo`, `pytest`, etc.) still work as normal — pnpm commands ar | Command | Description | |---------|-------------| -| `pnpm app` | Operator dashboard | +| `pnpm app` | Main product dashboard | | `pnpm web` | Marketing site | | `pnpm doc` | Documentation site | -| `pnpm console` | Terminal UI | +| `pnpm console` | Internal admin console | | `pnpm desktop` | Tauri desktop app | -| `pnpm mobile` | Tauri mobile app | -| `pnpm api` | Backend API | -| `pnpm daemon` | Rust orchestrator daemon | -| `pnpm worker` | Python Celery worker | +| `pnpm api` | Backend API (Hono) | +| `pnpm daemon` | Rust agent orchestrator | +| `pnpm gateway` | Rust API gateway | +| `pnpm realtime` | Rust WebSocket/SSE hub | +| `pnpm worker` | Python NATS worker | | `pnpm cli` | Maschina CLI (dev mode) | | `pnpm code` | Maschina Code tool (dev mode) | @@ -99,105 +107,37 @@ Native tools (`cargo`, `pytest`, etc.) still work as normal — pnpm commands ar | Command | Description | |---------|-------------| -| `pnpm build:app` | Build operator dashboard | -| `pnpm build:web` | Build marketing site | -| `pnpm build:doc` | Build documentation site | -| `pnpm build:console` | Build terminal UI | -| `pnpm build:desktop` | Build Tauri desktop app | -| `pnpm build:mobile` | Build Tauri mobile app | | `pnpm build:api` | Build backend API | | `pnpm build:daemon` | Build Rust daemon | -| `pnpm build:worker` | Build Python worker | | `pnpm build:cli` | Build Maschina CLI | -| `pnpm build:code` | Build Maschina Code tool | -| `pnpm build:types` | Build shared TS types | -| `pnpm build:core` | Build core pipeline primitives | -| `pnpm build:ui` | Build shared component library | -| `pnpm build:sdk` | Build TypeScript agent SDK | -| `pnpm build:config` | Build shared config schemas | -| `pnpm build:content` | Build brand/copy content package | -| `pnpm build:telemetry` | Build telemetry/audit package | -| `pnpm build:db` | Build database package | -| `pnpm build:risk` | Build Python risk engine | -| `pnpm build:runtime` | Build Python pipeline runtime | -| `pnpm build:agents` | Build Python agent implementations | -| `pnpm build:ml` | Build Python ML/RL package | -| `pnpm build:packages` | Build all packages only | +| `pnpm build:packages` | Build all TS packages | | `pnpm build:rust` | Build all Rust crates (release) | ### Test | Command | Description | |---------|-------------| -| `pnpm test:app` | Test operator dashboard | -| `pnpm test:web` | Test marketing site | -| `pnpm test:console` | Test terminal UI | | `pnpm test:api` | Test backend API | | `pnpm test:daemon` | Test Rust daemon | -| `pnpm test:worker` | Test Python worker | -| `pnpm test:cli` | Test Maschina CLI | -| `pnpm test:code` | Test Maschina Code tool | -| `pnpm test:types` | Test shared TS types | -| `pnpm test:core` | Test core pipeline package | -| `pnpm test:ui` | Test UI component library | | `pnpm test:sdk` | Test TypeScript SDK | | `pnpm test:sdk-py` | Test Python SDK | -| `pnpm test:config` | Test config package | -| `pnpm test:content` | Test content package | -| `pnpm test:telemetry` | Test telemetry package | -| `pnpm test:db` | Test database package | -| `pnpm test:risk` | Test Python risk engine | -| `pnpm test:runtime` | Test Python runtime | -| `pnpm test:agents` | Test Python agents | -| `pnpm test:ml` | Test Python ML/RL package | -| `pnpm test:rust` | Run all Rust tests (`cargo test --workspace`) | -| `pnpm tests` | Run all tests in the root `tests/` folder | -| `pnpm test:e2e` | Run end-to-end tests | -| `pnpm test:integration` | Run integration tests | -| `pnpm test:load` | Run load tests | - -### Rust (pnpm wrappers — `cargo` still works natively) - -| Command | Description | -|---------|-------------| -| `pnpm cargo:build` | `cargo build --workspace` | -| `pnpm cargo:build:release` | `cargo build --workspace --release` | -| `pnpm cargo:test` | `cargo test --workspace` | -| `pnpm cargo:check` | `cargo check --workspace` | -| `pnpm cargo:clippy` | `cargo clippy --workspace` | -| `pnpm cargo:fmt` | `cargo fmt --all` | -| `pnpm cargo:clean` | `cargo clean` | -| `pnpm cargo:run:daemon` | Run the daemon binary | -| `pnpm cargo:run:cli` | Run the CLI binary | -| `pnpm cargo:run:code` | Run the Code tool binary | -| `pnpm check:rust` | Alias for `cargo clippy --workspace` | - -### Python (pnpm wrappers — `pytest` still works natively) - -| Command | Description | -|---------|-------------| -| `pnpm pytest` | Run all Python tests across all packages | -| `pnpm pytest:runtime` | Test Python pipeline runtime | -| `pnpm pytest:agents` | Test Python agent implementations | -| `pnpm pytest:ml` | Test ML/RL package | -| `pnpm pytest:risk` | Test risk engine | -| `pnpm pytest:sdk` | Test Python SDK | -| `pnpm pytest:worker` | Test Celery worker service | +| `pnpm test:rust` | Run all Rust tests | ### Database | Command | Description | |---------|-------------| | `pnpm db` | Open Drizzle Studio (visual DB browser) | -| `pnpm db:push` | Push schema changes directly (dev) | -| `pnpm db:migrate` | Run migrations (production) | +| `pnpm db:push` | Push schema changes directly (dev only) | +| `pnpm db:migrate` | Run migrations | | `pnpm db:generate` | Generate migration files from schema | +| `pnpm db:seed` | Seed dev fixtures | ### Docker | Command | Description | |---------|-------------| -| `pnpm docker:dev` | Start Postgres + Redis for local dev | +| `pnpm docker:dev` | Start full local infrastructure stack | | `pnpm docker:up` | Start full stack | | `pnpm docker:down` | Stop all containers | | `pnpm docker:logs` | Tail container logs | @@ -212,8 +152,6 @@ Native tools (`cargo`, `pytest`, etc.) still work as normal — pnpm commands ar | `pnpm ci:ts` | TypeScript only — check + build + test | | `pnpm ci:rust` | Rust only — clippy + build + test | | `pnpm ci:python` | Python only — pytest across all packages | -| `pnpm ci:e2e` | End-to-end test suite | -| `pnpm ci:integration` | Integration test suite | ### Setup @@ -228,55 +166,86 @@ Native tools (`cargo`, `pytest`, etc.) still work as normal — pnpm commands ar ``` maschina/ ├── apps/ # User-facing applications -│ ├── app/ # Operator dashboard -│ ├── console/ # Terminal UI -│ ├── desktop/ # Tauri desktop (Rust + web) -│ ├── docs/ # Documentation site -│ ├── mobile/ # Tauri mobile (Rust + web) +│ ├── app/ # Main product dashboard (React + Vite + TanStack) +│ ├── auth/ # Standalone auth app +│ ├── admin/ # Internal admin app +│ ├── console/ # Internal admin console +│ ├── desktop/ # Tauri 2 desktop (macOS, Windows, Linux) +│ ├── developers/ # Developer portal +│ ├── docs/ # Documentation site (Mintlify) +│ ├── mobile/ +│ │ ├── android/ # Native Android (Kotlin + Jetpack Compose + Material 3) +│ │ │ └── wear/ # Wear OS module (standalone=false) +│ │ └── ios/ # Native iOS (Swift + SwiftUI) +│ │ └── MaschinaWatch/ # watchOS extension │ └── web/ # Marketing site │ -├── services/ # Infrastructure services -│ ├── api/ # Backend API (TypeScript) -│ ├── daemon/ # Orchestrator daemon (Rust) -│ └── worker/ # Agent executor (Python / Celery) +├── services/ # Backend microservices +│ ├── api/ # TypeScript / Hono — business logic API (port 3000) +│ ├── analytics/ # Analytics service +│ ├── daemon/ # Rust / Tokio — agent job orchestrator (port 9090 health) +│ ├── email/ # Email service +│ ├── gateway/ # Rust / Axum — public edge, auth, rate limiting, proxy (port 8080) +│ ├── realtime/ # Rust / Axum — WebSocket + SSE hub (port 4000) +│ ├── runtime/ # Python / FastAPI — agent execution sandbox (port 8001) +│ └── worker/ # Python — NATS job consumer (email, webhooks, analytics) │ -├── packages/ # Shared packages -│ ├── agents/ # Agent implementations (Python) -│ ├── assets/ # Brand assets -│ │ └── docs/ # Whitepaper, one-pager -│ ├── cli/ # Maschina CLI + setup wizard (Rust) -│ ├── code/ # Maschina Code dev tool (Rust) -│ ├── config/ # Shared config schemas (TypeScript) -│ ├── content/ # Brand copy, product text (TypeScript) -│ ├── core/ # Agent pipeline primitives (TypeScript) -│ ├── db/ # Drizzle schema + migrations (TypeScript) -│ ├── ml/ # ML / RL training, agent evolution (Python) -│ ├── risk/ # Risk engine (Python) -│ ├── runtime/ # Pipeline engine + FastAPI (Python) +├── packages/ # Shared libraries +│ ├── auth/ # JWT, argon2id, sessions, API keys, RBAC, OAuth +│ ├── billing/ # Stripe Checkout, webhooks, credits +│ ├── cache/ # ioredis wrapper +│ ├── chain/ # Solana program interactions +│ ├── cli/ # Rust CLI (maschina binary) +│ ├── code/ # Rust TUI scaffold tool (maschina-code binary) +│ ├── compliance/ # Audit log export, GDPR deletion (stub) +│ ├── config/ # Shared app configuration +│ ├── connectors/ # 3rd party connector integrations (stub) +│ ├── content/ # Brand copy, product text +│ ├── core/ # Agent pipeline primitives +│ ├── crypto/ # Encryption utilities +│ ├── db/ # Drizzle schemas (pg + sqlite dual-dialect), migrations +│ ├── email/ # Resend client, React Email templates (5 templates) +│ ├── errors/ # Shared error types +│ ├── events/ # Typed NATS event definitions + subject registry +│ ├── flags/ # Feature flags (LaunchDarkly + PostHog) +│ ├── jobs/ # Job type definitions + NATS dispatch helpers +│ ├── keys/ # API key management utilities +│ ├── marketplace/ # Agent marketplace (stub) +│ ├── model/ # Model catalog, tier access gates, billing multipliers +│ ├── nats/ # NATS/JetStream client, streams, publish, consume +│ ├── notifications/ # In-app + NATS fan-out notification dispatch +│ ├── payments/ # Payment primitives +│ ├── plans/ # All 7 tiers, gates, quota limits (single source of truth) +│ ├── query/ # TanStack Query hook suite +│ ├── ratelimit/ # Rate limiting primitives +│ ├── reputation/ # On-chain reputation scoring (stub) +│ ├── risk/ # Python — input/output safety checks, PII scan +│ ├── runtime/ # Python — AgentRunner, tool calling, multi-turn loop +│ ├── agents/ # Python — Agent base class, 5 agent types │ ├── sdk/ -│ │ ├── ts/ # TypeScript agent SDK (@maschina/sdk) -│ │ ├── python/ # Python agent SDK (maschina-sdk) -│ │ └── rust/ # Rust traits + types (@maschina/rust) -│ ├── telemetry/ # Audit logging (TypeScript) +│ │ ├── ts/ # TypeScript SDK (@maschina/sdk) +│ │ ├── python/ # Python SDK (maschina-sdk) +│ │ └── rust/ # Rust SDK (maschina-sdk-rs) +│ ├── search/ # Meilisearch client, index definitions +│ ├── storage/ # Object storage (S3/CloudFront) +│ ├── telemetry/ # OpenTelemetry SDK init + tracer helpers +│ ├── treasury/ # Solana treasury management (stub) │ ├── tsconfig/ # Shared TypeScript configs │ ├── types/ # Shared TypeScript types -│ ├── ui/ # Shared component library -│ └── vitest-config/ # Shared Vitest config +│ ├── ui/ # shadcn/ui + HeroUI components (55 components) +│ ├── usage/ # Redis quota enforcement + PG audit log +│ ├── validation/ # Zod schemas, sanitization, projection helpers +│ ├── vector/ # Qdrant + pgvector client wrappers +│ └── webhooks/ # Outbound webhook signing, retry, delivery log │ -├── tests/ # Root test suites -│ ├── e2e/ -│ ├── integration/ -│ ├── load/ -│ └── scripts/ -│ -├── docker/ # Docker compose + service config -│ ├── postgres/ -│ └── redis/ +├── docker/ # Docker Compose for local dev +│ └── docker-compose.yml # Postgres, Redis, NATS, Meilisearch, Qdrant, Temporal, Grafana... │ ├── install/ # Curl install script (operators) ├── .github/ │ ├── hooks/ # Local CI git hooks -│ └── workflows/ # GitHub Actions CI +│ └── workflows/ # GitHub Actions: ci, deploy, release, semantic-release, +│ # codeql, secrets-scan, stale, dependabot-auto-merge │ ├── Cargo.toml # Rust workspace ├── biome.json # Formatter + linter (TypeScript) @@ -289,7 +258,7 @@ maschina/ Two backends supported, switched via `DATABASE_URL` in `.env`: -**PostgreSQL** (recommended — Docker handles this): +**PostgreSQL** (recommended): ```sh DATABASE_URL="postgresql://maschina:maschina@localhost:5432/maschina" pnpm docker:dev @@ -298,9 +267,10 @@ pnpm docker:dev **SQLite** (zero-config, no Docker needed): ```sh DATABASE_URL="file:./dev.db" -# also set dialect to "sqlite" in packages/db/drizzle.config.ts ``` +Client auto-detects dialect from the URL prefix. + --- ## Local CI diff --git a/apps/docs/api-reference/agents.mdx b/apps/docs/api-reference/agents.mdx new file mode 100644 index 0000000..6ca5efc --- /dev/null +++ b/apps/docs/api-reference/agents.mdx @@ -0,0 +1,231 @@ +--- +title: Agents +description: Create, manage, and run autonomous agents. +--- + +import { Robot, Plus, List, Pencil, Trash, Play, ArrowsClockwise } from "@phosphor-icons/react"; + +## Agent Object + +Every agent has a stable ID, a type, a config block, and a lifecycle status. + +```json +{ + "id": "agt_01abc...", + "name": "Research Agent", + "description": "Produces structured summaries from any topic.", + "type": "analysis", + "status": "idle", + "config": { + "systemPrompt": "You are a research analyst. Return concise, sourced summaries.", + "model": "claude-sonnet-4-6" + }, + "createdAt": "2026-03-13T00:00:00.000Z", + "updatedAt": "2026-03-13T00:00:00.000Z" +} +``` + +### Agent Types + +| Type | Description | +|---|---| +| `execution` | General-purpose task execution | +| `analysis` | Data analysis and structured output | +| `signal` | Event detection and alerting | +| `optimization` | Iterative refinement tasks | +| `reporting` | Scheduled report generation | + +### Status Values + +| Status | Description | +|---|---| +| `idle` | Ready to accept runs | +| `running` | Currently executing a run | +| `paused` | Temporarily suspended — no new runs dispatched | +| `error` | Last run failed; agent is still active | + +--- + +## Create an Agent + +```bash +POST /agents +``` + +| Parameter | Type | Required | Description | +|---|---|---|---| +| `name` | string | Yes | Display name for the agent (max 100 characters). | +| `type` | string | Yes | Agent type: `execution`, `analysis`, `signal`, `optimization`, or `reporting`. | +| `description` | string | No | Optional human-readable description shown in search results. | +| `config` | object | No | Runtime configuration for the agent. | +| `config.systemPrompt` | string | No | System prompt prepended to every run. Defaults to a generic assistant prompt. | +| `config.model` | string | No | Default model for this agent. Can be overridden per run. See [Model Selection](/guides/models). | + +**Request:** +```json +{ + "name": "Research Agent", + "type": "analysis", + "description": "Produces structured summaries from any topic.", + "config": { + "systemPrompt": "You are a research analyst. Return concise, sourced summaries.", + "model": "claude-sonnet-4-6" + } +} +``` + +**Response:** `201 Created` +```json +{ + "id": "agt_01abc...", + "name": "Research Agent", + "description": "Produces structured summaries from any topic.", + "type": "analysis", + "status": "idle", + "config": { + "systemPrompt": "You are a research analyst. Return concise, sourced summaries.", + "model": "claude-sonnet-4-6" + }, + "createdAt": "2026-03-13T00:00:00.000Z", + "updatedAt": "2026-03-13T00:00:00.000Z" +} +``` + +--- + +## List Agents + +```bash +GET /agents +``` + +| Parameter | Type | Default | Description | +|---|---|---|---| +| `limit` | number | `20` | Max results per page (max: 100). | +| `offset` | number | `0` | Pagination offset. | +| `type` | string | — | Filter by agent type. | +| `status` | string | — | Filter by status: `idle`, `running`, `paused`, `error`. | +| `q` | string | — | Full-text search across name and description (backed by Meilisearch). | + +**Response:** `200 OK` +```json +{ + "data": [ + { + "id": "agt_01abc...", + "name": "Research Agent", + "type": "analysis", + "status": "idle", + "createdAt": "2026-03-13T00:00:00.000Z" + } + ], + "total": 1, + "limit": 20, + "offset": 0 +} +``` + +--- + +## Get an Agent + +```bash +GET /agents/:id +``` + +Returns the full agent object including config. Returns `404` if the agent does not exist or belongs to a different user. + +--- + +## Update an Agent + +```bash +PATCH /agents/:id +``` + +All fields are optional. Only provided fields are updated. + +```json +{ + "name": "Updated Name", + "description": "New description.", + "config": { + "systemPrompt": "New system prompt.", + "model": "claude-haiku-4-5" + } +} +``` + +**Response:** `200 OK` — returns the updated agent object. + +--- + +## Delete an Agent + +```bash +DELETE /agents/:id +``` + +Soft-deletes the agent. Running runs complete normally. No new runs can be dispatched after deletion. + +**Response:** `200 OK` +```json +{ "success": true } +``` + +--- + +## Run an Agent + + Dispatches an agent run. Returns immediately with a `runId` — the run executes asynchronously. + +```bash +POST /agents/:id/run +``` + +| Parameter | Type | Required | Description | +|---|---|---|---| +| `input` | object | Yes | Arbitrary JSON passed to the agent runtime as the user message. Must include at least a `message` string. | +| `model` | string | No | Override the agent's default model for this run only. | +| `timeout` | number | No | Timeout in milliseconds. Default: `300000`. Max: `600000` (10 minutes). | + +**Request:** +```json +{ + "input": { "message": "Summarize recent AI research on multi-agent systems." }, + "model": "claude-sonnet-4-6", + "timeout": 120000 +} +``` + +**Response:** `202 Accepted` +```json +{ + "success": true, + "runId": "run_01xyz...", + "agentId": "agt_01abc...", + "status": "queued", + "message": "Agent run queued. Connect to /realtime for live status updates." +} +``` + + +Use [webhooks](/guides/webhooks) or the [realtime WebSocket](/guides/realtime) to receive results. Polling `GET /agents/:agentId/runs/:runId` also works. + + +--- + +## Errors + +| Code | Meaning | +|---|---| +| `400` | Missing required field or invalid config | +| `403` | Model requires a higher plan tier | +| `404` | Agent not found | +| `429` | Monthly quota exhausted | + +```json +{ + "message": "Model claude-opus-4-6 requires the m10 plan or higher." +} +``` diff --git a/apps/docs/api-reference/authentication.mdx b/apps/docs/api-reference/authentication.mdx new file mode 100644 index 0000000..7dcf28c --- /dev/null +++ b/apps/docs/api-reference/authentication.mdx @@ -0,0 +1,202 @@ +--- +title: Authentication +description: API keys, sessions, OAuth, and token management. +--- + +import { Key, Lock, ShieldCheck, User, ArrowsClockwise } from "@phosphor-icons/react"; + +Maschina supports two authentication methods: API keys (for programmatic access) and session tokens (for user-facing apps and the CLI). + +```mermaid +flowchart LR + A([Your App]) -->|"Bearer msk_live_..."| B[Gateway] + C([Dashboard / CLI]) -->|"POST /auth/login"| D[API] + D -->|"accessToken JWT"| C + C -->|"Bearer eyJ..."| B + B -->|validate| E[API] +``` + +--- + +## API Keys + + The recommended authentication method for programmatic access. Pass the key in the `Authorization` header on every request. + +```bash +Authorization: Bearer msk_live_... +``` + +Keys are prefixed `msk_live_` for production and `msk_test_` for test mode. The full key is shown exactly once at creation — it is never retrievable again. + +### Create a Key + +```bash +POST /keys +Content-Type: application/json + +{ "name": "production" } +``` + +**Response:** `201 Created` +```json +{ + "id": "key_01abc...", + "name": "production", + "key": "msk_live_xxxxxxxxxxxxxxxxxxxx", + "prefix": "msk_live_abcdef123456", + "createdAt": "2026-03-13T00:00:00.000Z" +} +``` + + +Save the `key` immediately. It is shown once and is not stored by Maschina. + + +### List Keys + +```bash +GET /keys +``` + +Returns all active keys for the authenticated user. The `key` field is never returned on list — only the `prefix` (first 20 characters) for identification. + +```json +[ + { + "id": "key_01abc...", + "name": "production", + "prefix": "msk_live_abcdef123456", + "createdAt": "2026-03-13T00:00:00.000Z" + } +] +``` + +### Revoke a Key + +```bash +DELETE /keys/:id +``` + +Revocation takes effect within seconds. Any in-flight requests using the key will fail immediately after revocation. + +**Response:** `200 OK` +```json +{ "success": true } +``` + +--- + +## Session Authentication + +Used by the dashboard and the `maschina` CLI. Returns a short-lived JWT. For programmatic use, prefer API keys. + +### Register + +```bash +POST /auth/register +Content-Type: application/json + +{ + "name": "Asher Wilson", + "email": "you@example.com", + "password": "your-secure-password" +} +``` + +**Response:** `201 Created` +```json +{ + "accessToken": "eyJ...", + "user": { + "id": "usr_01...", + "name": "Asher Wilson", + "email": "you@example.com", + "plan": "access" + } +} +``` + +### Login + +```bash +POST /auth/login +Content-Type: application/json + +{ + "email": "you@example.com", + "password": "your-secure-password" +} +``` + +**Response:** `200 OK` +```json +{ + "accessToken": "eyJ...", + "user": { ... } +} +``` + +Use the `accessToken` as a bearer token in subsequent requests: + +```bash +Authorization: Bearer eyJ... +``` + +### Logout + +```bash +POST /auth/logout +Authorization: Bearer eyJ... +``` + +Invalidates the current session token. Returns `200 { "success": true }`. + +### Refresh + +```bash +POST /auth/refresh +``` + +Exchanges a still-valid session token for a fresh one. Call this before the token expires to maintain a session without re-login. + +--- + +## Current User + +```bash +GET /users/me +Authorization: Bearer SESSION_TOKEN_OR_API_KEY +``` + +Works with both API keys and session tokens. + +```json +{ + "id": "usr_01...", + "name": "Asher Wilson", + "email": "you@example.com", + "plan": "m5", + "createdAt": "2026-03-01T00:00:00.000Z" +} +``` + +--- + +## Security Best Practices + + Follow these to keep your account secure: + +- **Never hardcode keys** — always read from environment variables (`process.env.MASCHINA_API_KEY`, `os.environ["MASCHINA_API_KEY"]`, `std::env::var("MASCHINA_API_KEY")`) +- **One key per environment** — use separate keys for production, staging, and local dev so you can revoke one without disrupting others +- **Rotate keys periodically** — create a new key, update your deployment, then revoke the old one +- **Set key names that describe the consumer** — `"production-api"`, `"ci-pipeline"`, `"dashboard-app"` makes it easy to know what's affected when revoking + +--- + +## Error Responses + +| Code | Condition | +|---|---| +| `401 Unauthorized` | Missing or invalid `Authorization` header | +| `401 Unauthorized` | Expired session token — refresh or re-login | +| `403 Forbidden` | Valid key but insufficient plan for the requested resource | diff --git a/apps/docs/api-reference/compliance.mdx b/apps/docs/api-reference/compliance.mdx new file mode 100644 index 0000000..593e8be --- /dev/null +++ b/apps/docs/api-reference/compliance.mdx @@ -0,0 +1,141 @@ +--- +title: Compliance +description: Audit log, GDPR deletion, and data retention. M10 and Enterprise only. +--- + + +Compliance endpoints require the M10 or Enterprise plan. + + +## Audit log + +Every action on your account — agent creates, runs, key operations, billing events, settings changes — is recorded in the audit log. + +### Query audit log + +```bash +GET /compliance/audit-log +``` + +**Query parameters:** + +| Parameter | Type | Description | +|---|---|---| +| `limit` | number | Max results (default: 50, max: 1000) | +| `offset` | number | Pagination offset | +| `from` | ISO date | Start of date range | +| `to` | ISO date | End of date range | +| `action` | string | Filter by action type | +| `resource` | string | Filter by resource type (`agent`, `run`, `key`, `webhook`) | +| `format` | string | `json` (default) or `csv` | + +**Response:** + +```json +{ + "logs": [ + { + "id": "log_01abc...", + "action": "agent.create", + "resource": "agent", + "resourceId": "agt_01abc...", + "actorId": "usr_01...", + "actorIp": "1.2.3.4", + "metadata": { "name": "Research Agent" }, + "createdAt": "2026-03-13T12:00:00.000Z" + } + ], + "total": 1, + "limit": 50, + "offset": 0 +} +``` + +### Export as CSV + +```bash +GET /compliance/audit-log?format=csv&from=2026-03-01&to=2026-03-31 +``` + +Returns a `text/csv` response downloadable for compliance reporting. + +### Audit log actions + +| Action | Description | +|---|---| +| `agent.create` | Agent created | +| `agent.update` | Agent config updated | +| `agent.delete` | Agent deleted | +| `run.start` | Run submitted | +| `run.complete` | Run completed | +| `run.fail` | Run failed | +| `key.create` | API key created | +| `key.revoke` | API key revoked | +| `webhook.create` | Webhook created | +| `webhook.delete` | Webhook deleted | +| `billing.subscribe` | Plan subscription created or changed | +| `billing.cancel` | Subscription cancelled | +| `user.login` | User authenticated | +| `user.logout` | User session ended | +| `gdpr.delete` | GDPR erasure request processed | + +--- + +## GDPR deletion + +Permanently anonymize your account and all associated data in compliance with GDPR Article 17 (Right to Erasure). + +```bash +POST /compliance/gdpr/delete +``` + +**No request body required.** The requesting user's data is erased. + +**What gets anonymized:** + +- All agents are soft-deleted and disassociated from your identity +- All run input and output payloads are zeroed +- Your user record is anonymized: email becomes `deleted+{id}@maschina.internal`, name is cleared +- A final audit log entry is written recording the erasure + +**Response:** + +```json +{ + "message": "User data anonymized successfully", + "userId": "usr_01..." +} +``` + + +This action is irreversible. Your account will be effectively unusable after this operation. All active sessions and API keys are invalidated immediately. + + +--- + +## Data retention + +Run payload data is retained for a configurable window based on your plan tier. + +| Plan | Retention | +|---|---| +| Access | 7 days | +| M1 | 30 days | +| M5 | 90 days | +| M10 | 365 days | +| Enterprise | Custom (unlimited available) | + +After the retention window, run input and output payloads are automatically purged. Run metadata (ID, status, token counts, timestamps) is retained indefinitely for billing purposes. + +### Configure retention (self-hosted) + +When self-hosting, set the retention window per tier in your environment: + +```bash +RETENTION_ACCESS_DAYS=7 +RETENTION_M1_DAYS=30 +RETENTION_M5_DAYS=90 +RETENTION_M10_DAYS=365 +``` + +See the [environment variables reference](/self-hosting/environment) for all retention configuration options. diff --git a/apps/docs/api-reference/keys.mdx b/apps/docs/api-reference/keys.mdx new file mode 100644 index 0000000..7ea6586 --- /dev/null +++ b/apps/docs/api-reference/keys.mdx @@ -0,0 +1,106 @@ +--- +title: API Keys +description: Create, list, and revoke API keys for machine-to-machine authentication. +--- + +import { Key, Plus, List, Trash, ShieldCheck } from "@phosphor-icons/react"; + +API keys authenticate requests from your application to the Maschina API. Keys are prefixed with `msk_live_` and displayed only once on creation. + +```bash +Authorization: Bearer msk_live_... +``` + +--- + +## Create a Key + +```bash +POST /keys +Content-Type: application/json + +{ "name": "production" } +``` + +**Response:** `201 Created` + +```json +{ + "id": "key_01...", + "name": "production", + "key": "msk_live_...", + "prefix": "msk_live_abcdefghij1234567890", + "createdAt": "2026-03-13T00:00:00.000Z" +} +``` + +The full key is shown **once** on creation and cannot be retrieved again. Store it in your secrets manager immediately. + +You can also create keys from the CLI: + +```bash +maschina keys create "production" +``` + +--- + +## List Keys + +```bash +GET /keys +``` + +Returns all active keys for the authenticated user. The full key value is never returned — only the `prefix` (first 20 characters). + +**Response:** +```json +{ + "keys": [ + { + "id": "key_01...", + "name": "production", + "prefix": "msk_live_abcdefghij12", + "lastUsedAt": "2026-03-13T11:45:00.000Z", + "createdAt": "2026-03-01T00:00:00.000Z" + } + ] +} +``` + +--- + +## Get a Key + +```bash +GET /keys/:id +``` + +Returns metadata for a single key. The full key is never returned. + +--- + +## Revoke a Key + +```bash +DELETE /keys/:id +``` + +Returns `204 No Content`. Revoked keys are **immediately** invalidated — any in-flight requests using that key will fail. + +From the CLI: + +```bash +maschina keys revoke key_01... +``` + +--- + +## Best Practices + +**One key per environment.** Create separate keys for development, staging, and production. Revoke them individually without disrupting other environments. + +**Store securely.** Use a secrets manager (Doppler, Vault, AWS Secrets Manager) or environment variables. Never commit keys to source control. + +**Rotate regularly.** Create a new key, update your application, then revoke the old key. There is no downtime during rotation. + +**Name clearly.** Use descriptive names (`ci-pipeline`, `production-app`, `local-dev`) so you know exactly what's using each key. diff --git a/apps/docs/api-reference/realtime.mdx b/apps/docs/api-reference/realtime.mdx new file mode 100644 index 0000000..605cecb --- /dev/null +++ b/apps/docs/api-reference/realtime.mdx @@ -0,0 +1,117 @@ +--- +title: Realtime +description: WebSocket and SSE endpoints for live run status streaming. +--- + +## WebSocket + +``` +GET wss://api.maschina.ai/realtime +``` + +**Query parameters:** + +| Parameter | Required | Description | +|---|---|---| +| `token` | Yes | JWT token or API key (`msk_live_...`) | + +### Client → server messages + +#### Subscribe to a run + +```json +{ "type": "subscribe", "runId": "run_01xyz..." } +``` + +#### Subscribe to all user runs + +```json +{ "type": "subscribe_all" } +``` + +#### Unsubscribe + +```json +{ "type": "unsubscribe", "runId": "run_01xyz..." } +``` + +#### Ping + +```json +{ "type": "ping" } +``` + +### Server → client messages + +#### run.status + +```json +{ + "type": "run.status", + "runId": "run_01xyz...", + "agentId": "agt_01abc...", + "status": "running", + "timestamp": "2026-03-13T12:00:01.000Z" +} +``` + +#### run.output + +```json +{ + "type": "run.output", + "runId": "run_01xyz...", + "agentId": "agt_01abc...", + "output": {}, + "model": "claude-sonnet-4-6", + "inputTokens": 312, + "outputTokens": 847, + "durationMs": 2341 +} +``` + +#### run.error + +```json +{ + "type": "run.error", + "runId": "run_01xyz...", + "errorCode": "model_unavailable", + "errorMessage": "All fallback models exhausted" +} +``` + +#### pong + +```json +{ "type": "pong" } +``` + +### Close codes + +| Code | Reason | +|---|---| +| `4001` | Unauthorized — invalid or expired token | +| `4029` | Too many connections for your plan | +| `1001` | Server going away — reconnect | + +--- + +## SSE + +``` +GET https://api.maschina.ai/realtime/sse +``` + +**Query parameters:** + +| Parameter | Required | Description | +|---|---|---| +| `token` | Yes | JWT or API key | +| `runId` | No | Subscribe to a specific run | + +**Events:** `run.status`, `run.output`, `run.error` + +Each event data field is a JSON string matching the WebSocket message formats above. + +EventSource automatically reconnects on disconnect. The server sends a `Last-Event-ID` header to resume from the last received event. diff --git a/apps/docs/api-reference/runs.mdx b/apps/docs/api-reference/runs.mdx new file mode 100644 index 0000000..5b229e8 --- /dev/null +++ b/apps/docs/api-reference/runs.mdx @@ -0,0 +1,158 @@ +--- +title: Runs +description: Inspect agent run history, results, and live status. +--- + +import { ArrowsClockwise, Play, List, CheckCircle, XCircle, Clock } from "@phosphor-icons/react"; + +## Run Object + +A run represents a single execution of an agent against a given input. + +```json +{ + "id": "run_01xyz...", + "agentId": "agt_01abc...", + "userId": "usr_01...", + "status": "completed", + "inputPayload": { + "message": "Summarize the benefits of async programming in three bullet points." + }, + "outputPayload": { + "text": "Here is the analysis..." + }, + "model": "claude-sonnet-4-6", + "inputTokens": 312, + "outputTokens": 847, + "totalTokens": 1159, + "durationMs": 2341, + "turns": 1, + "createdAt": "2026-03-13T00:00:00.000Z", + "completedAt": "2026-03-13T00:00:02.341Z" +} +``` + +### Status Values + +| Status | Description | +|---|---| +| `queued` | Accepted by the API, waiting for the daemon to pick up | +| `running` | Daemon has dispatched to the runtime — actively executing | +| `completed` | Finished successfully; `outputPayload` is populated | +| `failed` | Failed after all retry attempts; `errorCode` and `errorMessage` are set | +| `timeout` | Exceeded the configured timeout | + +```mermaid +stateDiagram-v2 + [*] --> queued: POST /agents/:id/run + queued --> running: Daemon picks up + running --> completed: Runtime returns result + running --> failed: Error after retries + running --> timeout: Exceeds timeout limit + completed --> [*] + failed --> [*] + timeout --> [*] +``` + +--- + +## Trigger a Run + +See [POST /agents/:id/run](/api-reference/agents#run-an-agent). + +--- + +## Get a Run + +```bash +GET /agents/:agentId/runs/:runId +``` + +Returns the full run object. Use this to poll for completion if you are not using webhooks or the realtime WebSocket. + +**Response:** `200 OK` — full run object as shown above. + +--- + +## List Runs for an Agent + +```bash +GET /agents/:agentId/runs +``` + +| Parameter | Type | Default | Description | +|---|---|---|---| +| `limit` | number | `20` | Max results per page (max: 100). | +| `offset` | number | `0` | Pagination offset. | +| `status` | string | — | Filter by status: `queued`, `running`, `completed`, `failed`, `timeout`. | +| `from` | string | — | ISO 8601 datetime — return runs created at or after this time. | +| `to` | string | — | ISO 8601 datetime — return runs created at or before this time. | + +**Response:** `200 OK` +```json +{ + "data": [ + { + "id": "run_01xyz...", + "status": "completed", + "model": "claude-sonnet-4-6", + "inputTokens": 312, + "outputTokens": 847, + "durationMs": 2341, + "createdAt": "2026-03-13T00:00:00.000Z", + "completedAt": "2026-03-13T00:00:02.341Z" + } + ], + "total": 47, + "limit": 20, + "offset": 0 +} +``` + +--- + +## Failed Run Details + +When a run has `status: "failed"`, the run object includes error information: + +```json +{ + "id": "run_01xyz...", + "status": "failed", + "errorCode": "runtime_error", + "errorMessage": "Runtime returned non-200 after 3 attempts", + "inputTokens": 312, + "outputTokens": 0, + "durationMs": 9120 +} +``` + +### Error Codes + +| Code | Cause | +|---|---| +| `runtime_error` | Runtime returned an error response | +| `timeout` | Run exceeded the configured timeout | +| `quota_exceeded` | Monthly token quota hit during execution | +| `model_unavailable` | Model provider returned an error; cascade fallback exhausted | +| `risk_blocked` | Input or output failed risk checks | + +--- + +## Live Status + + For real-time run status without polling, use [webhooks](/guides/webhooks) or the [realtime WebSocket](/guides/realtime). + +```typescript +// Option 1: webhook +// Register agent.run.completed / agent.run.failed on your endpoint + +// Option 2: poll +async function waitForRun(agentId: string, runId: string) { + while (true) { + const run = await maschina.agents.getRun(agentId, runId); + if (run.status === "completed" || run.status === "failed") return run; + await new Promise(r => setTimeout(r, 2000)); + } +} +``` diff --git a/apps/docs/api-reference/search.mdx b/apps/docs/api-reference/search.mdx new file mode 100644 index 0000000..77a0ded --- /dev/null +++ b/apps/docs/api-reference/search.mdx @@ -0,0 +1,112 @@ +--- +title: Search +description: Full-text search across your agents, powered by Meilisearch. +--- + +import { MagnifyingGlass, Lightning } from "@phosphor-icons/react"; + + Search is powered by Meilisearch. Results are instant — typically under 5ms. All results are scoped to the authenticated user. + +--- + +## Search Agents + +```bash +GET /search?q=research&type=agents&limit=20&offset=0 +``` + +### Parameters + +| Parameter | Type | Default | Description | +|---|---|---|---| +| `q` | string | `""` | Search query. Matches against agent name, description, and type. Empty string returns all agents. | +| `type` | string | `agents` | Index to search. Currently `agents` is the only supported index. | +| `limit` | number | `20` | Max results to return. Maximum: 100. | +| `offset` | number | `0` | Pagination offset. | + +### Response + +```json +{ + "hits": [ + { + "id": "agt_01abc...", + "name": "Research Agent", + "description": "Produces structured summaries from any topic.", + "type": "analysis", + "status": "idle", + "model": "claude-sonnet-4-6", + "createdAt": "2026-03-01T00:00:00.000Z" + }, + { + "id": "agt_02def...", + "name": "Market Research Bot", + "description": "Analyzes market trends and competitor positioning.", + "type": "analysis", + "status": "idle", + "model": "claude-haiku-4-5", + "createdAt": "2026-03-05T00:00:00.000Z" + } + ], + "total": 2, + "query": "research", + "processingTimeMs": 3 +} +``` + +--- + +## Indexed Fields + +The search index includes the following fields from each agent: + +| Field | Searchable | Filterable | +|---|---|---| +| `name` | Yes | No | +| `description` | Yes | No | +| `type` | Yes | Yes | +| `status` | No | Yes | +| `model` | No | Yes | +| `createdAt` | No | Yes (sort) | + +--- + +## Sync Behavior + +Agents are automatically synced to the search index on: +- `POST /agents` — indexed on creation +- `PATCH /agents/:id` — index updated on any change +- `DELETE /agents/:id` — removed from index on deletion + +There is no manual sync endpoint. Sync is near-instant. If an agent appears missing from search results immediately after creation, wait one second and retry. + +--- + +## SDK Usage + + + +```typescript TypeScript +const results = await maschina.search("research", { type: "agents" }); + +results.hits.forEach((agent) => { + console.log(agent.name, agent.description); +}); +``` + +```python Python +results = maschina.search("research", type="agents") + +for agent in results.hits: + print(agent.name, agent.description) +``` + + + +--- + +## Search Degradation + +If Meilisearch is unreachable, the `/search` endpoint returns an empty result set with `processingTimeMs: 0`. It does not fall back to a database scan. The API itself remains fully operational — only search is affected. + +For list-based retrieval with no search requirement, use `GET /agents` instead. diff --git a/apps/docs/api-reference/usage.mdx b/apps/docs/api-reference/usage.mdx new file mode 100644 index 0000000..9245fd1 --- /dev/null +++ b/apps/docs/api-reference/usage.mdx @@ -0,0 +1,104 @@ +--- +title: Usage +description: Query token usage, quota status, and billing period information. +--- + +import { ChartLine, Warning, Bell } from "@phosphor-icons/react"; + +## Get Usage Summary + +```bash +GET /usage +``` + +**Response:** +```json +{ + "tier": "m5", + "tokensUsed": 1234567, + "tokensLimit": 5000000, + "percentUsed": 24.7, + "periodStart": "2026-03-01T00:00:00.000Z", + "periodEnd": "2026-04-01T00:00:00.000Z", + "apiCallsToday": 142 +} +``` + +Quota resets at the start of each billing period. Unused tokens do not roll over. + +--- + +## Usage by Model + +```bash +GET /usage/models +``` + +**Response:** +```json +{ + "models": [ + { + "model": "claude-sonnet-4-6", + "multiplier": 3, + "rawTokens": 150000, + "billedTokens": 450000, + "runCount": 47 + }, + { + "model": "claude-haiku-4-5", + "multiplier": 1, + "rawTokens": 200000, + "billedTokens": 200000, + "runCount": 213 + } + ], + "totalBilledTokens": 1234567 +} +``` + +`billedTokens` is what counts against your quota. `rawTokens` is what the model actually consumed. + +--- + +## Usage History + +```bash +GET /usage/history?from=2026-03-01&to=2026-03-31 +``` + +**Query parameters:** + +| Parameter | Type | Description | +|---|---|---| +| `from` | ISO date | Start of range | +| `to` | ISO date | End of range | +| `granularity` | string | `day` (default) or `hour` | + +**Response:** +```json +{ + "data": [ + { + "date": "2026-03-13", + "tokensUsed": 45231, + "runCount": 18 + } + ] +} +``` + +--- + +## Quota Warnings + +Set up a `usage.quota_warning` webhook to get notified when you reach 80% of your monthly limit before runs start failing. + +Maschina emits two quota events: + +| Event | Threshold | Behavior | +|---|---|---| +| `usage.quota_warning` | 80% consumed | Notification only — runs continue | +| `usage.quota_exceeded` | 100% consumed | All new runs are blocked with `402` | + +To unblock runs after quota is exceeded: upgrade your plan or wait for the next billing period. diff --git a/apps/docs/api-reference/webhooks.mdx b/apps/docs/api-reference/webhooks.mdx new file mode 100644 index 0000000..debfa0b --- /dev/null +++ b/apps/docs/api-reference/webhooks.mdx @@ -0,0 +1,191 @@ +--- +title: Webhooks +description: Manage outbound webhook endpoints and delivery logs. +--- + +import { GitBranch, ShieldCheck, ArrowsClockwise, Bell } from "@phosphor-icons/react"; + +## Webhook Object + +```json +{ + "id": "wh_01abc...", + "url": "https://your-app.com/webhooks/maschina", + "events": ["agent.run.completed", "agent.run.failed"], + "status": "active", + "createdAt": "2026-03-13T00:00:00.000Z", + "updatedAt": "2026-03-13T00:00:00.000Z" +} +``` + +### Webhook Status Values + +| Status | Description | +|---|---| +| `active` | Receiving deliveries normally | +| `failing` | Last delivery failed; check the delivery log | +| `disabled` | Manually disabled via `PATCH /webhooks/:id` | + +--- + +## Create a Webhook + +```bash +POST /webhooks +``` + +| Parameter | Type | Required | Description | +|---|---|---|---| +| `url` | string | Yes | HTTPS endpoint that receives POST deliveries. | +| `events` | string[] | Yes | List of events to subscribe to. See supported events below. | + +**Request:** +```json +{ + "url": "https://your-app.com/webhooks/maschina", + "events": ["agent.run.completed", "agent.run.failed"] +} +``` + +**Response:** `201 Created` +```json +{ + "id": "wh_01abc...", + "url": "https://your-app.com/webhooks/maschina", + "events": ["agent.run.completed", "agent.run.failed"], + "status": "active", + "secret": "whsec_...", + "createdAt": "2026-03-13T00:00:00.000Z" +} +``` + + +The `secret` is shown exactly once at creation. Save it immediately — it is never retrievable again. Use it to verify `X-Maschina-Signature` on every delivery. See [signature verification](/guides/webhooks#verifying-signatures). + + +--- + +## List Webhooks + +```bash +GET /webhooks +``` + +Returns all webhook endpoints for the authenticated user. + +--- + +## Get a Webhook + +```bash +GET /webhooks/:id +``` + +Returns the full webhook object excluding the secret. + +--- + +## Update a Webhook + +```bash +PATCH /webhooks/:id +``` + +All fields are optional. + +```json +{ + "url": "https://new-url.com/webhooks", + "events": ["agent.run.completed"], + "active": true +} +``` + +**Response:** `200 OK` — returns the updated webhook object. + +--- + +## Delete a Webhook + +```bash +DELETE /webhooks/:id +``` + +**Response:** `200 OK` +```json +{ "success": true } +``` + +--- + +## Send a Test Delivery + + Sends a sample `agent.run.completed` payload to your endpoint immediately. Useful for verifying your signature verification logic before going live. + +```bash +POST /webhooks/:id/test +``` + +**Response:** `200 OK` +```json +{ + "deliveryId": "del_01abc...", + "status": "delivered", + "responseCode": 200 +} +``` + +--- + +## View Delivery Log + +```bash +GET /webhooks/:id/deliveries +``` + +Returns the last 50 delivery attempts with status, HTTP response code, and timestamp. + +**Response:** +```json +{ + "data": [ + { + "id": "del_01abc...", + "webhookId": "wh_01abc...", + "event": "agent.run.completed", + "status": "delivered", + "responseCode": 200, + "durationMs": 142, + "attemptCount": 1, + "createdAt": "2026-03-13T00:00:00.000Z" + }, + { + "id": "del_02abc...", + "webhookId": "wh_01abc...", + "event": "agent.run.failed", + "status": "failed", + "responseCode": 503, + "durationMs": 5000, + "attemptCount": 5, + "createdAt": "2026-03-13T01:00:00.000Z" + } + ] +} +``` + +--- + +## Supported Events + + Subscribe to only the events your application needs. + +| Event | Trigger | +|---|---| +| `agent.run.started` | Run picked up by the daemon and is executing | +| `agent.run.completed` | Run finished successfully — output is available | +| `agent.run.failed` | Run failed after all retry attempts | +| `subscription.updated` | Plan or billing status changed | +| `usage.quota_warning` | 80% of monthly token quota consumed | +| `usage.quota_exceeded` | Monthly token quota exhausted — runs will be rejected | + +For payload schemas and signature verification, see the [Webhooks guide](/guides/webhooks). diff --git a/apps/docs/brand/favicon.svg b/apps/docs/brand/favicon.svg new file mode 100644 index 0000000..7b7fbcb --- /dev/null +++ b/apps/docs/brand/favicon.svg @@ -0,0 +1,14 @@ + + + + + + + + + + + + + + diff --git a/apps/docs/brand/fonts/TestSohne-Buch-BF663d89cd32e6a.otf b/apps/docs/brand/fonts/TestSohne-Buch-BF663d89cd32e6a.otf new file mode 100755 index 0000000..71bae93 Binary files /dev/null and b/apps/docs/brand/fonts/TestSohne-Buch-BF663d89cd32e6a.otf differ diff --git a/apps/docs/brand/fonts/TestSohneBreit-Buch-BF663d89ca2ff42.otf b/apps/docs/brand/fonts/TestSohneBreit-Buch-BF663d89ca2ff42.otf new file mode 100755 index 0000000..0ad8dd6 Binary files /dev/null and b/apps/docs/brand/fonts/TestSohneBreit-Buch-BF663d89ca2ff42.otf differ diff --git a/apps/docs/brand/fonts/TestSohneMono-Buch-BF663d89cbcec64.otf b/apps/docs/brand/fonts/TestSohneMono-Buch-BF663d89cbcec64.otf new file mode 100755 index 0000000..d06c8d7 Binary files /dev/null and b/apps/docs/brand/fonts/TestSohneMono-Buch-BF663d89cbcec64.otf differ diff --git a/apps/docs/brand/maschina.svg b/apps/docs/brand/maschina.svg new file mode 100644 index 0000000..6f81fa5 --- /dev/null +++ b/apps/docs/brand/maschina.svg @@ -0,0 +1,10 @@ + + + + + + + + + + diff --git a/apps/docs/brand/wordmark.svg b/apps/docs/brand/wordmark.svg new file mode 100644 index 0000000..2c6d492 --- /dev/null +++ b/apps/docs/brand/wordmark.svg @@ -0,0 +1,12 @@ + + + + + + + + + + + + diff --git a/apps/docs/changelog.mdx b/apps/docs/changelog.mdx new file mode 100644 index 0000000..e0bd897 --- /dev/null +++ b/apps/docs/changelog.mdx @@ -0,0 +1,77 @@ +--- +title: Changelog +description: Release history for Maschina — platform, SDKs, CLI, and self-hosted distribution. +--- + +import { Tag, GitBranch, ArrowsClockwise, Rocket } from "@phosphor-icons/react"; + +## v1.0.0 — 2026-03-08 + +Initial public release of the Maschina platform. + +### Platform + +- Agent runtime — multi-turn LLM execution, tool calling, risk checks (Python / FastAPI) +- Model routing — Anthropic Claude (Haiku 4.5, Sonnet 4.6, Opus 4.6), Ollama fallback +- Cascade model fallback — automatic failover when a model is unavailable +- NATS JetStream job queue — durable async execution, pull consumer, retries +- SCAN → EVALUATE → EXECUTE → ANALYZE daemon pipeline (Rust) +- Gateway — JWT validation, per-IP/per-user rate limiting, HTTP + WebSocket proxy (Rust / Axum) +- Realtime — WebSocket + SSE live run status streaming, per-user fan-out (Rust / Axum) + +### API + +- Full CRUD for agents, runs, API keys, webhooks, usage, search, compliance +- Auth — JWT sessions, argon2id password hashing, scoped API keys, RBAC, OAuth +- Billing — Stripe Checkout, prepaid credits, per-model billing multipliers, quota enforcement +- Webhooks — HMAC-SHA256 signed outbound delivery, exponential backoff retry (5 attempts) +- Search — Meilisearch full-text indexing, synced on agent create / update / delete +- Compliance — audit log, GDPR Article 17 deletion, configurable data retention (M10+) + +### Plans + +| Plan | Price | Tokens | +|---|---|---| +| Access | Free | Limited | +| M1 | $20/mo | 1M | +| M5 | $60/mo | 5M | +| M10 | $100/mo | 15M | +| Mach Team | $30/seat/mo | 5M/seat | +| Enterprise | Custom | Custom | + +### SDKs + +- TypeScript SDK (`@maschina/sdk`) — agents, runs, keys, webhooks, usage, error handling +- Python SDK (`maschina-sdk`) — sync + async clients, full endpoint coverage +- Rust SDK (`maschina`) — async tokio-based client, full endpoint coverage + +### CLI + +- `maschina` binary (Rust / Clap) +- Commands: `login`, `logout`, `status`, `register` +- `agent list`, `agent deploy`, `agent inspect`, `agent run`, `agent stop`, `agent delete` +- `keys list`, `keys create`, `keys revoke` +- `usage` — quota and token usage summary +- `config get`, `config set` — manage local config at `~/.config/maschina/config.toml` +- `--profile` flag for multi-environment support +- `service start`, `service status`, `service logs` — self-hosted service management +- `setup` — interactive self-hosted setup wizard + +### Self-hosting + +- Docker Compose — all services + dependencies (PostgreSQL, Redis, NATS, Meilisearch) +- Fly.io deployment guide with managed dependencies +- Environment variable reference for all services + +### Observability + +- OpenTelemetry SDK — traces for all services +- Prometheus metrics endpoints +- Sentry error tracking integration +- Grafana-compatible metrics + +--- + +## Upcoming + +See the [Roadmap](/platform/roadmap) for what's being built next. diff --git a/apps/docs/concepts.mdx b/apps/docs/concepts.mdx new file mode 100644 index 0000000..ca073ef --- /dev/null +++ b/apps/docs/concepts.mdx @@ -0,0 +1,279 @@ +--- +title: Concepts +description: The core primitives of Maschina — agents, runs, models, plans, keys, and webhooks. +--- + +import { Robot, ArrowsClockwise, Cpu, CreditCard, Key, GitBranch, WifiHigh, ShieldCheck } from "@phosphor-icons/react"; + +## Agents + + An agent is a persistent, configurable AI worker. It has a name, a type, a system prompt, and a model preference. Agents are templates — they define behavior. Runs are the actual executions. + +```json +{ + "id": "agt_01abc...", + "name": "Research Agent", + "type": "analysis", + "status": "idle", + "config": { + "systemPrompt": "You are a research analyst. Produce structured, cited summaries.", + "model": "claude-sonnet-4-6" + }, + "createdAt": "2026-03-01T00:00:00.000Z" +} +``` + +Agents are not stateful between runs. Each run starts fresh with the system prompt and the input you provide. If you need memory across runs, pass prior context in the `input` payload. + +### Agent Types + +| Type | Best for | +|---|---| +| `execution` | General task execution, automation, integrations | +| `analysis` | Data analysis, summarization, research | +| `signal` | Event detection, monitoring, alerting | +| `optimization` | Iterative improvement, A/B evaluation, tuning | +| `reporting` | Structured report generation, scheduled digests | + +The type is metadata — it affects how the agent is categorized and searched but does not change execution behavior. + +### Agent Lifecycle + +```mermaid +stateDiagram-v2 + [*] --> Created: POST /agents + Created --> Idle: registered + Idle --> Running: run dispatched + Running --> Idle: run completed + Running --> Idle: run failed + Idle --> Deleted: DELETE /agents/:id + Deleted --> [*] +``` + +--- + +## Runs + + A run is a single execution of an agent. You provide an input, the Daemon picks it up from the queue, and the Runtime executes it. Runs are always async. + +```json +{ + "id": "run_01xyz...", + "agentId": "agt_01abc...", + "status": "completed", + "model": "claude-sonnet-4-6", + "inputPayload": { "message": "Analyze Q1 revenue trends." }, + "outputPayload": { "text": "Q1 revenue grew 14% YoY driven by..." }, + "inputTokens": 312, + "outputTokens": 847, + "durationMs": 2341, + "turns": 1, + "createdAt": "2026-03-13T12:00:00.000Z", + "completedAt": "2026-03-13T12:00:02.341Z" +} +``` + +### Run Lifecycle + +```mermaid +flowchart LR + Q([queued]) --> R([running]) + R --> C([completed]) + R --> F([failed]) + R --> X([cancelled]) + + style Q fill:#1a1a2e,stroke:#4a4a8a,color:#aaa + style R fill:#1a2e1a,stroke:#4a8a4a,color:#aaa + style C fill:#1a3d1a,stroke:#2d6a2d,color:#aaa + style F fill:#3d1a1a,stroke:#8a2d2d,color:#aaa + style X fill:#2e2e1a,stroke:#8a8a2d,color:#aaa +``` + +- **queued** — accepted by the API and waiting in NATS JetStream +- **running** — Daemon has picked it up and Runtime is executing +- **completed** — output payload is ready +- **failed** — all retry attempts exhausted; error details attached +- **cancelled** — manually cancelled before execution + +### Getting Results + +Three options — choose based on your use case: + +| Method | When to use | +|---|---| +| **Poll** `GET /agents/:id/runs/:runId` | Simple one-off runs | +| **Webhook** | Production workloads, async pipelines | +| **WebSocket / SSE** | Live UI that shows run progress in realtime | + +--- + +## Models + + Maschina routes each run to a model based on your agent config or the per-run override. Each model has a minimum plan requirement and a billing multiplier. + +### Billing Multiplier + +Multipliers are applied to raw token counts. A run using 1,000 tokens on `claude-sonnet-4-6` (3x multiplier) deducts 3,000 tokens from your quota. + +``` +tokens_deducted = (input_tokens + output_tokens) × model_multiplier +``` + +### Supported Models + +#### Anthropic + +| Model | Context | Min Plan | Multiplier | +|---|---|---|---| +| `claude-haiku-4-5` | 200k | M1 | 1x | +| `claude-sonnet-4-5` | 1M | M5 | 3x | +| `claude-sonnet-4-6` | 1M | M5 | 3x | +| `claude-opus-4-5` | 200k | M10 | 15x | +| `claude-opus-4-6` | 1M | M10 | 15x | + +#### OpenAI + +| Model | Context | Min Plan | Multiplier | +|---|---|---|---| +| `gpt-5-nano` | 128k | M1 | 1x | +| `gpt-5-mini` | 400k | M1 | 1x | +| `o4-mini` | 200k | M1 | 2x | +| `gpt-5` | 1M+ | M5 | 8x | +| `gpt-5.4` | 1M+ | M5 | 10x | +| `gpt-5.4-pro` | 1M+ | M10 | 25x | +| `o3` | 200k | M10 | 20x | + +#### Local (Ollama) + +Local models run on your own hardware or a self-hosted Ollama instance. No tokens are deducted. + +| Model | Multiplier | +|---|---| +| `ollama/llama3.2` | 0x | +| `ollama/llama3.1` | 0x | +| `ollama/mistral` | 0x | + +Any model in your Ollama instance works with the `ollama/` prefix. + +### Cascade Fallback + +If a model is unavailable, Maschina automatically falls back to the next available model in your tier rather than failing the run. + +``` +claude-opus-4-6 → claude-sonnet-4-6 → claude-haiku-4-5 +gpt-5.4-pro → gpt-5.4 → gpt-5 +``` + +### Passthrough Routing + +Unrecognized model IDs are routed by prefix. M1 or higher required. A flat 2x multiplier applies. + +``` +claude-* → Anthropic API +gpt-* / o* → OpenAI API +``` + +--- + +## Plans + + Plans control which models you can use, how many tokens you get per month, and which features are available. + +| Plan | Price | Monthly Tokens | Default Model | +|---|---|---|---| +| Access | Free | — | `ollama/llama3.2` | +| M1 | $20/mo or $204/yr | 1M | `claude-haiku-4-5` | +| M5 | $60/mo or $600/yr | 5M | `claude-sonnet-4-6` | +| M10 | $100/mo or $995/yr | 10M | `claude-opus-4-6` | +| Mach Team | $30/seat/mo | 5M per seat | `claude-sonnet-4-6` | +| Enterprise | Custom | Unlimited | Custom | + +- Tokens reset at the start of each billing period — unused tokens do not roll over +- Mach Team seats 10–24 pay $27/seat/mo; 25+ seats contact for Enterprise pricing +- Annual plans save roughly 15–17% vs monthly + +### Feature Gates + +| Feature | Access | M1 | M5 | M10 | Team | +|---|---|---|---|---|---| +| Ollama models | ✓ | ✓ | ✓ | ✓ | ✓ | +| Claude Haiku | — | ✓ | ✓ | ✓ | ✓ | +| Claude Sonnet | — | — | ✓ | ✓ | ✓ | +| Claude Opus | — | — | — | ✓ | — | +| GPT models | — | ✓ | ✓ | ✓ | ✓ | +| Webhooks | — | ✓ | ✓ | ✓ | ✓ | +| Search | — | ✓ | ✓ | ✓ | ✓ | +| Compliance / Audit Log | — | — | — | ✓ | — | + +--- + +## API Keys + + API keys are the primary authentication method for machine-to-machine access. Keys are prefixed with `msk_` and displayed once on creation. + +``` +Authorization: Bearer msk_live_... +``` + +Keys can be scoped and named. Revoke them individually without affecting other keys. Manage keys from the [dashboard](https://app.maschina.ai/keys) or the CLI. + + +API keys are shown once. Store them immediately in your secrets manager or environment variables. + + +--- + +## Webhooks + + Webhooks deliver signed HTTP POST payloads to your endpoint when events occur. Every delivery includes an `X-Maschina-Signature` header (HMAC-SHA256) that you must verify before processing. + +### Events + +| Event | Trigger | +|---|---| +| `agent.run.started` | Run picked up by Daemon and executing | +| `agent.run.completed` | Run finished with output | +| `agent.run.failed` | Run exhausted all retries | +| `subscription.updated` | Plan or billing status changed | +| `usage.quota_warning` | 80% of monthly token quota consumed | +| `usage.quota_exceeded` | Monthly quota exhausted — runs will be blocked | + +See the [webhooks guide](/guides/webhooks) for setup, signature verification, and retry behavior. + +--- + +## Realtime + + The Realtime service streams run status updates to connected clients over WebSocket or SSE. It subscribes to NATS events and fans them out per user — no polling, no missed updates. + +``` +wss://api.maschina.ai/realtime?token=YOUR_JWT +``` + +Subscribe to a specific run: + +```json +{ "type": "subscribe", "runId": "run_01xyz..." } +``` + +You'll receive `run.status` events as the run progresses through `queued → running → completed`. + +--- + +## Compliance + + Available on M10 and Enterprise plans. + +- **Audit Log** — every API action (agent create/update/delete, run start, key creation, billing event) is recorded with actor, timestamp, and IP +- **GDPR Deletion** — `POST /compliance/gdpr/delete` anonymizes your account and all associated data in accordance with Article 17 +- **Data Retention** — run payloads are retained for a configurable window based on plan tier + +| Plan | Retention | +|---|---| +| Access | 7 days | +| M1 | 30 days | +| M5 | 90 days | +| M10 | 365 days | +| Enterprise | Custom | + +See the [environment variables reference](/self-hosting/environment) for retention configuration when self-hosting. diff --git a/apps/docs/contributing.mdx b/apps/docs/contributing.mdx new file mode 100644 index 0000000..8460d64 --- /dev/null +++ b/apps/docs/contributing.mdx @@ -0,0 +1,103 @@ +--- +title: Contributing +description: How to contribute to Maschina docs and the self-hosted distribution. +--- + +import { GitBranch, Code, BookOpen, Bug, Star } from "@phosphor-icons/react"; + +Maschina's documentation and self-hosted distribution are open source. Contributions are welcome — whether that's fixing a typo, adding a missing example, or documenting a new self-hosting deployment target. + +## What's Open Source + +| Repository | License | Contents | +|---|---|---| +| [maschina-labs/docs](https://github.com/maschina-labs/docs) | Apache 2.0 | This documentation site | +| [maschina-labs/self-hosted](https://github.com/maschina-labs/self-hosted) | Apache 2.0 | Docker Compose, Fly.io configs, migration files | +| [maschina-labs/sdk-typescript](https://github.com/maschina-labs/sdk-typescript) | Apache 2.0 | TypeScript SDK | +| [maschina-labs/sdk-python](https://github.com/maschina-labs/sdk-python) | Apache 2.0 | Python SDK | +| [maschina-labs/sdk-rust](https://github.com/maschina-labs/sdk-rust) | Apache 2.0 | Rust SDK | + +The core platform (API, Daemon, Runtime, Gateway, Realtime) is proprietary. + +--- + +## Contributing to Docs + +### Fix a typo or improve wording + +Every page has an "Edit on GitHub" link at the bottom. Click it, make your change, and open a PR directly from GitHub. + +### Add a new example or clarification + +1. Fork [maschina-labs/docs](https://github.com/maschina-labs/docs) +2. Clone your fork and install dependencies: + +```bash +git clone https://github.com/YOUR_USERNAME/docs +cd docs +pnpm install +pnpm doc # starts the local dev server +``` + +3. Make your changes in `apps/docs/` +4. Open a pull request with a clear description of what you changed and why + +### Add a new page + +Pages are MDX files in `apps/docs/`. After creating the file, add it to the navigation in `apps/docs/mint.json`. + +### Report a documentation issue + +[Open an issue](https://github.com/maschina-labs/docs/issues/new) on the docs repository. Include: +- The page URL +- What's incorrect or missing +- What the correct information should be + +--- + +## Contributing to the Self-Hosted Distribution + +### Report a deployment issue + +[Open an issue](https://github.com/maschina-labs/self-hosted/issues/new) on the self-hosted repository. Include: +- Your deployment target (Docker Compose, Fly.io, Kubernetes, etc.) +- The service and error log +- Steps to reproduce + +### Add a deployment guide + +If you've successfully deployed Maschina to a platform not yet documented (Kubernetes, Railway, Render, DigitalOcean, etc.), we'd welcome a PR with a guide. Use the existing [Docker](/self-hosting/docker) and [Fly.io](/self-hosting/fly) guides as a template. + +### Improve Docker Compose or config files + +PRs improving the self-hosted distribution are welcome. Please include: +- What problem the change solves +- Whether it's tested +- Any migration notes for existing deployments + +--- + +## Contributing to the SDKs + +Each SDK is in its own repository. Follow the standard GitHub contribution flow: + +1. Fork the SDK repository +2. Create a branch: `git checkout -b fix/issue-description` +3. Make changes and write tests +4. Open a PR + +SDK PRs that add new endpoint coverage, fix response type inconsistencies, or improve error handling are especially welcome. + +--- + +## Code of Conduct + +Be direct, respectful, and constructive. Reviews are about the code, not the person. + +--- + +## License + +All open-source repositories use the Apache 2.0 license. By contributing, you agree that your contributions are licensed under the same terms. You cannot relicense contributions under a different license. + +The Maschina name, logo, and brand are proprietary and may not be used in derivative works or distributions without explicit written permission. diff --git a/apps/docs/custom.css b/apps/docs/custom.css new file mode 100644 index 0000000..ad235b7 --- /dev/null +++ b/apps/docs/custom.css @@ -0,0 +1,193 @@ +/* ============================================================ + Font Awesome 6 Free + ============================================================ */ +@import url("https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.5.1/css/all.min.css"); + +/* ============================================================ + Test Sohne — sans-serif UI + body + ============================================================ */ +@font-face { + font-family: "TestSohne"; + src: url("/brand/fonts/TestSohne-Buch-BF663d89cd32e6a.otf") format("opentype"); + font-weight: 400; + font-style: normal; + font-display: swap; +} +@font-face { + font-family: "TestSohne"; + src: url("/brand/fonts/TestSohne-Buch-BF663d89cd3e887.otf") format("opentype"); + font-weight: 400; + font-style: italic; + font-display: swap; +} +@font-face { + font-family: "TestSohne"; + src: url("/brand/fonts/TestSohne-Buch-BF663d89cd37e26.otf") format("opentype"); + font-weight: 500; + font-style: normal; + font-display: swap; +} +@font-face { + font-family: "TestSohne"; + src: url("/brand/fonts/TestSohne-Buch-BF663d89cd2bd4b.otf") format("opentype"); + font-weight: 500; + font-style: italic; + font-display: swap; +} +@font-face { + font-family: "TestSohne"; + src: url("/brand/fonts/TestSohne-Halbfett-BF663d89cd2d67b.otf") format("opentype"); + font-weight: 600; + font-style: normal; + font-display: swap; +} +@font-face { + font-family: "TestSohne"; + src: url("/brand/fonts/TestSohne-HalbfettKursiv-BF663d89cd41624.otf") format("opentype"); + font-weight: 600; + font-style: italic; + font-display: swap; +} +@font-face { + font-family: "TestSohne"; + src: url("/brand/fonts/TestSohne-Dreiviertelfett-BF663d89ccc5f66.otf") format("opentype"); + font-weight: 700; + font-style: normal; + font-display: swap; +} +@font-face { + font-family: "TestSohne"; + src: url("/brand/fonts/TestSohne-DreiviertelfettKursiv-BF663d89cd2f687.otf") format("opentype"); + font-weight: 700; + font-style: italic; + font-display: swap; +} +@font-face { + font-family: "TestSohne"; + src: url("/brand/fonts/TestSohne-Fett-BF663d89cca89ff.otf") format("opentype"); + font-weight: 800; + font-style: normal; + font-display: swap; +} + +/* ============================================================ + Test Sohne Breit — wide display / logo + ============================================================ */ +@font-face { + font-family: "TestSohneBreit"; + src: url("/brand/fonts/TestSohneBreit-Buch-BF663d89ca2ff42.otf") format("opentype"); + font-weight: 400; + font-style: normal; + font-display: swap; +} + +/* ============================================================ + Test Sohne Mono — monospace code + ============================================================ */ +@font-face { + font-family: "TestSohneMono"; + src: url("/brand/fonts/TestSohneMono-Buch-BF663d89cbcec64.otf") format("opentype"); + font-weight: 400; + font-style: normal; + font-display: swap; +} +@font-face { + font-family: "TestSohneMono"; + src: url("/brand/fonts/TestSohneMono-BuchKursiv-BF663d89cc8d4c3.otf") format("opentype"); + font-weight: 400; + font-style: italic; + font-display: swap; +} +@font-face { + font-family: "TestSohneMono"; + src: url("/brand/fonts/TestSohneMono-Kraftig-BF663d89cd2bd2d.otf") format("opentype"); + font-weight: 500; + font-style: normal; + font-display: swap; +} +@font-face { + font-family: "TestSohneMono"; + src: url("/brand/fonts/TestSohneMono-Halbfett-BF663d89cc69f9d.otf") format("opentype"); + font-weight: 600; + font-style: normal; + font-display: swap; +} +@font-face { + font-family: "TestSohneMono"; + src: url("/brand/fonts/TestSohneMono-Dreiviertelfett-BF663d89cc62070.otf") format("opentype"); + font-weight: 700; + font-style: normal; + font-display: swap; +} + +/* ============================================================ + Apply fonts + ============================================================ */ + +/* Everything — Test Sohne */ +* { + font-family: "TestSohne", ui-sans-serif, -apple-system, BlinkMacSystemFont, sans-serif !important; +} + +/* Code only — Test Sohne Mono Buch */ +code, pre, kbd, samp, +code *, pre * { + font-family: "TestSohneMono", ui-monospace, "Cascadia Code", monospace !important; + font-weight: 400 !important; +} + +/* Topbar social icon links */ +header a[href*="github.com"], +header a[href*="x.com"], +header a[href*="discord.gg"] { + font-size: 0 !important; + width: 28px !important; + height: 28px !important; + display: inline-flex !important; + align-items: center !important; + justify-content: center !important; + opacity: 0.7; + transition: opacity 0.15s; +} +header a[href*="github.com"]:hover, +header a[href*="x.com"]:hover, +header a[href*="discord.gg"]:hover { + opacity: 1; +} +header a[href*="github.com"]::before { + font-family: "Font Awesome 6 Brands" !important; + content: "\f09b" !important; + font-size: 16px !important; + color: currentColor; +} +header a[href*="x.com"]::before { + font-family: "Font Awesome 6 Brands" !important; + content: "\e61b" !important; + font-size: 15px !important; + color: currentColor; +} +header a[href*="discord.gg"]::before { + font-family: "Font Awesome 6 Brands" !important; + content: "\f392" !important; + font-size: 16px !important; + color: currentColor; +} + +/* Full-width topbar border */ +header { + border-bottom: 1px solid rgba(255, 255, 255, 0.1) !important; + box-shadow: none !important; +} + + +/* Logo wordmark — Test Sohne Breit Buch, all caps, small */ +header a[href="/"], +header a[href="/"] span, +header a[href="/"] p, +header a[href="/"] div { + font-family: "TestSohneBreit", ui-sans-serif, sans-serif !important; + font-weight: 400 !important; + font-size: 12px !important; + text-transform: uppercase !important; + letter-spacing: 0.12em !important; +} diff --git a/apps/docs/guides/cli.mdx b/apps/docs/guides/cli.mdx new file mode 100644 index 0000000..d5506e3 --- /dev/null +++ b/apps/docs/guides/cli.mdx @@ -0,0 +1,191 @@ +--- +title: CLI +description: Manage agents, keys, runs, and local services from the terminal. +--- + +import { Terminal, Robot, Key, Lightning, Wrench, User } from "@phosphor-icons/react"; + +## Installation + +```bash +curl -fsSL https://maschina.ai/install.sh | sh +``` + +Or download a binary directly from [GitHub Releases](https://github.com/maschina-labs/self-hosted/releases). + +The CLI binary is named `maschina`. Verify the installation: + +```bash +maschina --version +``` + +--- + +## Authentication + +### Login + +```bash +maschina login +# prompts for email and password +# credentials stored at ~/.config/maschina/config.toml +``` + +### Check status + +```bash +maschina status +``` + +``` +Account: ash@maschina.ai +Plan: M5 +Tokens: 1,204,322 / 5,000,000 used +API: https://api.maschina.ai ✓ reachable +``` + +### Logout + +```bash +maschina logout +``` + +--- + +## Agents + +```bash +# List all agents +maschina agent list + +# Deploy a new agent from a local config file +maschina agent deploy ./agent.json + +# Inspect agent config and run stats +maschina agent inspect AGENT_ID + +# Run an agent with inline input +maschina agent run AGENT_ID --input '{"message": "Summarize Q1 performance."}' + +# View recent run history for an agent +maschina agent runs AGENT_ID + +# Stop a queued run +maschina agent stop RUN_ID + +# Delete an agent +maschina agent delete AGENT_ID +``` + +### Agent config file + +`maschina agent deploy` reads a JSON config file: + +```json +{ + "name": "Research Agent", + "type": "analysis", + "config": { + "systemPrompt": "You are a research analyst. Produce structured summaries.", + "model": "claude-sonnet-4-6" + } +} +``` + +--- + +## API Keys + +```bash +# List all keys +maschina keys list + +# Create a new key +maschina keys create "production" +# → msk_live_xxxxxxxxxxxxxxxxxxxx (shown once) + +# Revoke a key +maschina keys revoke KEY_ID +``` + +--- + +## Usage + +```bash +maschina usage +``` + +``` +Plan: M5 +Period: 2026-03-01 → 2026-04-01 +Tokens used: 1,204,322 +Tokens limit: 5,000,000 +Quota: 24.1% consumed +Runs this month: 847 +``` + +--- + +## Configuration + +Config is stored at `~/.config/maschina/config.toml`. + +```bash +# View a config value +maschina config get api_url + +# Set a config value +maschina config set api_url https://api.maschina.ai +``` + +### Multiple profiles + +Use profiles to switch between environments (production, staging, local): + +```bash +# Login to a specific profile +maschina --profile staging login + +# Use a profile for any command +maschina --profile staging agent list +maschina --profile local agent run AGENT_ID --input '{}' +``` + +--- + +## Self-hosted setup + +If you're running Maschina locally with Docker: + +```bash +# Interactive setup wizard — sets API URL, credentials, and local config +maschina setup +``` + +```bash +# Start all local services +maschina service start + +# Check which services are running +maschina service status + +# Tail logs for a specific service +maschina service logs api --follow +maschina service logs runtime --follow +maschina service logs daemon --follow +``` + +--- + +## Scaffold with maschina-code + +The `maschina-code` TUI helps you scaffold agent configs, routes, and connectors interactively: + +```bash +# Interactive scaffold +maschina-code + +# Scaffold a new agent config directly +maschina-code agent research-agent +``` diff --git a/apps/docs/guides/faq.mdx b/apps/docs/guides/faq.mdx new file mode 100644 index 0000000..149c9f8 --- /dev/null +++ b/apps/docs/guides/faq.mdx @@ -0,0 +1,165 @@ +--- +title: FAQ +description: Common questions about agents, runs, billing, models, and self-hosting. +--- + +import { Question, Robot, CreditCard, Cpu, HardDrive, ShieldCheck, ArrowsClockwise } from "@phosphor-icons/react"; + +## Agents + +**What's the difference between agent types?** + +Agent types (`execution`, `analysis`, `signal`, `optimization`, `reporting`) are semantic labels that help categorize what an agent does. They affect how the agent appears in search and the marketplace, but all types use the same execution pipeline. The behavior difference comes entirely from the system prompt. + +**Can I change an agent's type after creation?** + +Yes. Use `PATCH /agents/:id` with `{ "type": "analysis" }`. The change takes effect immediately. + +**Are agents stateful between runs?** + +No. Each run is independent. If your agent needs memory or context from previous runs, pass it in the `input.message` field. Multi-turn context within a single run is handled automatically by the runtime. + +**How many agents can I create?** + +| Plan | Agent limit | +|---|---| +| Access | 3 | +| M1 | 25 | +| M5 | 100 | +| M10 | Unlimited | +| Enterprise | Unlimited | + +**Can I run multiple agents in parallel?** + +Yes. Each `POST /agents/:id/run` call is independent. All run concurrently up to your plan's concurrency limit. + +--- + +## Runs + +**Why is my run stuck in `queued`?** + +The Daemon picks up queued jobs from NATS JetStream. If a run stays queued longer than a few seconds, check: +- Daemon service is running (`docker compose ps` or `maschina service status`) +- NATS connection is healthy +- No quota exhaustion (`maschina usage`) + +**What happens when a run fails?** + +The Daemon retries up to 3 times with exponential backoff. If all retries fail, the run is marked `failed` and a `agent.run.failed` webhook event is dispatched (if configured). The error code and message are available on the run object. + +**Can I cancel a run that's already queued?** + +Queued runs can be cancelled via `maschina agent stop RUN_ID`. Runs that are already in the `running` state cannot be interrupted — they execute to completion or timeout. + +**What's the maximum timeout for a run?** + +600,000 ms (10 minutes). The default is 300,000 ms (5 minutes). Set `timeout` in the run request body. + +**Where does the output go?** + +Run output is stored in `outputPayload` on the run object and is available via `GET /agents/:agentId/runs/:runId`. It's also delivered via webhook and realtime WebSocket if you've configured either. + +--- + +## Models + +**Which models are available?** + +| Model | Plan required | Billing multiplier | +|---|---|---| +| `claude-haiku-4-5` | Access+ | 1× | +| `claude-sonnet-4-6` | M1+ | 2× | +| `claude-opus-4-6` | M10 | 8× | +| `ollama/*` | Self-hosted only | 1× | + +**What is the billing multiplier?** + +The multiplier adjusts the cost of premium models relative to your plan's token quota. A 1,000-token Sonnet run costs 2,000 tokens from your monthly quota. A 1,000-token Opus run costs 8,000 tokens. + +**What happens if my preferred model is unavailable?** + +Cascade fallback kicks in automatically. If `claude-opus-4-6` is unreachable, the runtime falls back to `claude-sonnet-4-6`, then `claude-haiku-4-5`. If all fail, the run is marked `failed` with error code `model_unavailable`. No action required on your end. + +**Can I use Ollama for local development?** + +Yes. When no `ANTHROPIC_API_KEY` is set, the runtime routes all requests to a local Ollama instance. Set `OLLAMA_BASE_URL` in your environment. Use `model: "ollama/llama3"` (or any Ollama-hosted model) in your run request. + +--- + +## Billing + +**What does "token quota" mean?** + +Your plan includes a monthly token allowance. Each run consumes tokens based on input + output tokens × the model's billing multiplier. Quota resets on your billing cycle date. + +**What happens when I hit my quota?** + +New run requests return `429 Quota Exceeded`. In-progress runs are not interrupted. You'll receive a `usage.quota_warning` webhook at 80% and `usage.quota_exceeded` at 100%. + +**Is there a free tier?** + +Yes. The Access plan is free and includes 3 agents and a limited monthly token quota with access to `claude-haiku-4-5`. + +**Can I upgrade mid-cycle?** + +Yes. Upgrading takes effect immediately. Your token quota resets to the new plan's limit, prorated for the remaining days in the cycle. + +**Do unused tokens roll over?** + +No. Token quotas reset at the start of each billing cycle. Unused tokens do not carry over. + +--- + +## Authentication and Security + +**How should I store my API key?** + +Store it in an environment variable — never in source code or version control. Use `MASCHINA_API_KEY` and access it with `process.env.MASCHINA_API_KEY` or your language's equivalent. + +**Can I scope API keys?** + +Currently all API keys have full access to your account's resources. Scoped keys are planned. + +**Are webhook payloads signed?** + +Yes. Every delivery includes an `X-Maschina-Signature` header with a SHA256 HMAC of the payload body signed with your webhook secret. Always verify this before processing. See [Verifying Signatures](/guides/webhooks#verifying-signatures). + +**What if my API key is compromised?** + +Revoke it immediately: `maschina keys revoke KEY_ID` or `DELETE /keys/:id`. Create a new one. Revocation takes effect within seconds. + +--- + +## Self-Hosting + +**Do I need the source code to self-host?** + +No. The `maschina-labs/self-hosted` repository contains pre-built Docker images from `ghcr.io/maschina-labs/`. You pull images, configure `.env`, and run `docker compose up`. No source code required. + +**Can I use managed dependencies instead of running everything locally?** + +Yes. For production, use: +- **PostgreSQL** → Neon, Supabase, or RDS +- **Redis** → Upstash or ElastiCache +- **NATS** → NGS (NATS Global Service) +- **Meilisearch** → Meilisearch Cloud + +**What's the minimum hardware for self-hosting?** + +- Docker Compose (dev): 4 GB RAM, 2 vCPUs +- Production (single node): 8 GB RAM, 4 vCPUs +- Production (split services): Individual services are small; Runtime benefits most from more RAM and CPU + +**Is self-hosting free?** + +The self-hosted distribution is Apache 2.0 licensed — free to run, modify, and use commercially. You're responsible for your own infrastructure costs and LLM API keys. + +**How do I stay up to date with self-hosted?** + +```bash +git pull origin main +docker compose pull +docker compose up -d +docker compose exec api pnpm db:migrate +``` diff --git a/apps/docs/guides/first-agent.mdx b/apps/docs/guides/first-agent.mdx new file mode 100644 index 0000000..da13569 --- /dev/null +++ b/apps/docs/guides/first-agent.mdx @@ -0,0 +1,246 @@ +--- +title: Building Your First Agent +description: Create, configure, and run an agent end to end. +--- + +## What you'll build + +A research agent that takes a topic as input and returns a structured summary. You'll create the agent, run it, and receive the result via webhook and realtime stream. + +## Prerequisites + +- A Maschina account — [sign up free](https://app.maschina.ai/register) +- An API key — create one from the [dashboard](https://app.maschina.ai/keys) or via `maschina keys create` +- SDK installed — `pnpm add @maschina/sdk` or `pip install maschina-sdk` + +--- + +## Step 1: Create the agent + +Every agent needs a name, a type, and a system prompt. The system prompt defines how the agent behaves on every run. + + + +```typescript TypeScript +import { MaschinaClient } from "@maschina/sdk"; + +const maschina = new MaschinaClient({ + apiKey: process.env.MASCHINA_API_KEY, +}); + +const agent = await maschina.agents.create({ + name: "Research Agent", + type: "analysis", + config: { + systemPrompt: `You are a research analyst. When given a topic, produce: +1. A one-paragraph overview +2. Three key insights +3. Two open questions worth investigating + +Be concise. Use plain language. Cite sources when possible.`, + model: "claude-sonnet-4-6", + }, +}); + +console.log("Agent created:", agent.id); +// → Agent created: agt_01abc... +``` + +```python Python +from maschina import MaschinaClient + +maschina = MaschinaClient(api_key="your-api-key") + +agent = maschina.agents.create( + name="Research Agent", + type="analysis", + config={ + "systemPrompt": """You are a research analyst. When given a topic, produce: +1. A one-paragraph overview +2. Three key insights +3. Two open questions worth investigating""", + "model": "claude-sonnet-4-6", + } +) + +print("Agent created:", agent.id) +``` + +```bash cURL +curl -X POST https://api.maschina.ai/agents \ + -H "Authorization: Bearer YOUR_API_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "name": "Research Agent", + "type": "analysis", + "config": { + "systemPrompt": "You are a research analyst...", + "model": "claude-sonnet-4-6" + } + }' +``` + + + +--- + +## Step 2: Register a webhook + +Register a webhook to receive run results automatically. Maschina will POST to your URL when a run completes or fails. + +```typescript +const webhook = await maschina.webhooks.create({ + url: "https://your-app.com/webhooks/maschina", + events: ["agent.run.completed", "agent.run.failed"], +}); + +// Save webhook.secret — shown once, used to verify all future deliveries +console.log("Webhook secret:", webhook.secret); +``` + + +Store the webhook secret immediately. It is displayed once and cannot be retrieved after creation. + + +--- + +## Step 3: Run the agent + +```typescript +const run = await maschina.agents.run(agent.id, { + input: { + message: "Summarize the current state of large language model inference optimization.", + }, +}); + +console.log("Run queued:", run.runId); +// Your webhook receives the result automatically when the run completes +``` + +The run is now in the NATS job queue. The Daemon will pick it up, run risk checks, route to the model, execute, and fire your webhook. + +--- + +## Step 4: Handle the webhook + +Verify the signature before processing. Never skip this. + + + +```typescript TypeScript +import crypto from "node:crypto"; + +function verifyWebhook(payload: string, secret: string, header: string): boolean { + const expected = "sha256=" + crypto + .createHmac("sha256", secret) + .update(payload) + .digest("hex"); + return crypto.timingSafeEqual( + Buffer.from(header), + Buffer.from(expected) + ); +} + +// Express handler +app.post("/webhooks/maschina", express.raw({ type: "application/json" }), (req, res) => { + const sig = req.headers["x-maschina-signature"] as string; + + if (!verifyWebhook(req.body.toString(), process.env.WEBHOOK_SECRET!, sig)) { + return res.status(401).send("Invalid signature"); + } + + const event = JSON.parse(req.body.toString()); + + if (event.type === "agent.run.completed") { + const { run_id, output } = event.data; + console.log("Run complete:", run_id); + console.log("Output:", output); + } + + if (event.type === "agent.run.failed") { + const { run_id, error_code, error_message } = event.data; + console.error("Run failed:", run_id, error_code, error_message); + } + + res.status(200).send("ok"); +}); +``` + +```python Python +import hashlib +import hmac +import json +from flask import Flask, request, abort + +app = Flask(__name__) + +def verify_webhook(payload: bytes, secret: str, header: str) -> bool: + expected = "sha256=" + hmac.new( + secret.encode(), + payload, + hashlib.sha256 + ).hexdigest() + return hmac.compare_digest(header, expected) + +@app.route("/webhooks/maschina", methods=["POST"]) +def handle_webhook(): + sig = request.headers.get("X-Maschina-Signature", "") + + if not verify_webhook(request.data, WEBHOOK_SECRET, sig): + abort(401) + + event = request.json + + if event["type"] == "agent.run.completed": + print("Run complete:", event["data"]["run_id"]) + + return "ok", 200 +``` + + + +--- + +## Step 5: Poll for results (alternative) + +If you prefer polling over webhooks: + +```typescript +async function waitForRun(agentId: string, runId: string) { + while (true) { + const run = await maschina.agents.getRun(agentId, runId); + + if (run.status === "completed") { + return run.outputPayload; + } + + if (run.status === "failed") { + throw new Error(run.errorMessage); + } + + await new Promise(resolve => setTimeout(resolve, 1000)); + } +} + +const output = await waitForRun(agent.id, run.runId); +console.log(output); +``` + +--- + +## Next steps + + + + Choose the right model for your use case and budget. + + + Full webhook setup and all supported events. + + + Deploy and manage agents from the terminal. + + + Full agent API reference. + + diff --git a/apps/docs/guides/models.mdx b/apps/docs/guides/models.mdx new file mode 100644 index 0000000..f3a3542 --- /dev/null +++ b/apps/docs/guides/models.mdx @@ -0,0 +1,111 @@ +--- +title: Model Selection +description: Choose the right model for your agents. +--- + +import { Cpu, ArrowsClockwise, Robot, Lightning } from "@phosphor-icons/react"; + +## Overview + +Maschina supports models from Anthropic, OpenAI, and local Ollama instances. Each model has a minimum plan requirement and a billing multiplier applied to raw token counts. + +You can specify a model when creating an agent (default for all runs) or override it per run. + +## Choosing a Model + +| Use case | Recommended model | +|---|---| +| High volume, low cost | `claude-haiku-4-5` or `gpt-5-mini` | +| Everyday tasks | `claude-sonnet-4-6` or `gpt-5` | +| Complex reasoning | `claude-opus-4-6` or `o3` | +| Local / offline / free | `ollama/llama3.2` | +| Code generation | `claude-sonnet-4-6` or `gpt-5` | +| Long documents | `claude-sonnet-4-6` (1M context) | + +## Cascade Fallback + +If a run fails due to model unavailability, Maschina automatically falls back to the next best model for your tier. You never get a hard failure from a transient model outage. + +```mermaid +flowchart LR + O[claude-opus-4-6] -->|unavailable| S[claude-sonnet-4-6] + S -->|unavailable| H[claude-haiku-4-5] + H -->|unavailable| E([run failed]) + + O2[gpt-5.4-pro] -->|unavailable| S2[gpt-5.4] + S2 -->|unavailable| H2[gpt-5] + + style E fill:#3d1a1a,stroke:#8a2d2d,color:#fff +``` + +## Setting a Model + +### On the agent (default for all runs) + +```typescript +const agent = await maschina.agents.create({ + name: "My Agent", + type: "execution", + config: { + model: "claude-sonnet-4-6", + systemPrompt: "...", + }, +}); +``` + +### Per run (override) + +```typescript +const run = await maschina.agents.run(agent.id, { + input: { message: "..." }, + model: "claude-opus-4-6", // overrides agent default for this run +}); +``` + +## Model Reference + +### Anthropic + +| Model | Context | Min plan | Multiplier | +|---|---|---|---| +| `claude-haiku-4-5` | 200k | M1 | 1x | +| `claude-sonnet-4-5` | 1M | M5 | 3x | +| `claude-sonnet-4-6` | 1M | M5 | 3x | +| `claude-opus-4-5` | 200k | M10 | 15x | +| `claude-opus-4-6` | 1M | M10 | 15x | + +### OpenAI + +| Model | Context | Min plan | Multiplier | +|---|---|---|---| +| `gpt-5-nano` | TBD | M1 | 1x | +| `gpt-5-mini` | 400k | M1 | 1x | +| `o4-mini` | 200k | M1 | 2x | +| `gpt-5` | 1M+ | M5 | 8x | +| `gpt-5.4` | 1M+ | M5 | 10x | +| `o3` | 200k | M10 | 20x | +| `gpt-5.4-pro` | 1M+ | M10 | 25x | + +### Local (Ollama) + +Local models run on your own hardware. No tokens are deducted from your quota. + +| Model | Min plan | Multiplier | +|---|---|---| +| `ollama/llama3.2` | Access (free) | 0x | +| `ollama/llama3.1` | Access (free) | 0x | +| `ollama/mistral` | Access (free) | 0x | + +Any model available in your Ollama instance can be used with the `ollama/` prefix. + +## Passthrough Models + +If you specify a model ID not in the catalog, Maschina routes it by prefix as long as you have M1 or higher. A flat 2x billing multiplier applies. + +```typescript +// Routes to Anthropic — billed at 2x +model: "claude-future-model-x" + +// Routes to OpenAI — billed at 2x +model: "gpt-6" +``` diff --git a/apps/docs/guides/realtime.mdx b/apps/docs/guides/realtime.mdx new file mode 100644 index 0000000..518bb59 --- /dev/null +++ b/apps/docs/guides/realtime.mdx @@ -0,0 +1,195 @@ +--- +title: Realtime +description: Stream live run status updates over WebSocket or SSE. +--- + +import { WifiHigh, Lightning, ArrowsClockwise, Bell } from "@phosphor-icons/react"; + +The Realtime service pushes run status events to connected clients as they happen. No polling required — connect once and receive updates the moment a run transitions state. + +## How It Works + +The Realtime service subscribes to NATS events from the Daemon and fans them out per user. When your run moves from `queued` to `running` to `completed`, your client receives each transition in real time. + +```mermaid +sequenceDiagram + participant App as Your App + participant GW as Gateway + participant RT as Realtime Service + participant NATS + participant Daemon + + App->>GW: WSS /realtime?token=... + GW->>RT: authenticated connection + App->>RT: subscribe { runId } + Daemon->>NATS: run.status (running) + Daemon->>NATS: run.status (completed) + Daemon->>NATS: run.output + NATS->>RT: fan-out events + RT->>App: run.status (running) + RT->>App: run.status (completed) + RT->>App: run.output +``` + +## WebSocket + +Connect with your JWT token: + +``` +wss://api.maschina.ai/realtime?token=YOUR_JWT +``` + +Or use an API key: + +``` +wss://api.maschina.ai/realtime?token=msk_live_... +``` + +### Subscribe to a run + +```javascript +const ws = new WebSocket(`wss://api.maschina.ai/realtime?token=${token}`); + +ws.onopen = () => { + ws.send(JSON.stringify({ + type: "subscribe", + runId: "run_01xyz...", + })); +}; + +ws.onmessage = (event) => { + const msg = JSON.parse(event.data); + + switch (msg.type) { + case "run.status": + console.log("Status:", msg.status); // queued | running | completed | failed + break; + case "run.output": + console.log("Output:", msg.output); + break; + case "run.error": + console.error("Error:", msg.error); + break; + } +}; +``` + +### Subscribe to all runs for a user + +Omit `runId` to receive events for all of your runs: + +```javascript +ws.send(JSON.stringify({ type: "subscribe_all" })); +``` + +### Unsubscribe + +```javascript +ws.send(JSON.stringify({ + type: "unsubscribe", + runId: "run_01xyz...", +})); +``` + +## SSE (Server-Sent Events) + +For environments where WebSocket isn't practical (some proxies, serverless edge): + +```javascript +const source = new EventSource( + `https://api.maschina.ai/realtime/sse?runId=run_01xyz...&token=${token}` +); + +source.addEventListener("run.status", (e) => { + const data = JSON.parse(e.data); + console.log("Status:", data.status); +}); + +source.addEventListener("run.output", (e) => { + const data = JSON.parse(e.data); + console.log("Output:", data.output); +}); + +source.onerror = () => { + console.error("SSE connection lost — reconnecting..."); + // EventSource reconnects automatically +}; +``` + +## Event Reference + +### run.status + +Emitted on every run state transition. + +```json +{ + "type": "run.status", + "runId": "run_01xyz...", + "agentId": "agt_01abc...", + "status": "running", + "timestamp": "2026-03-13T12:00:01.000Z" +} +``` + +### run.output + +Emitted when the run completes with output. + +```json +{ + "type": "run.output", + "runId": "run_01xyz...", + "agentId": "agt_01abc...", + "output": { "text": "Q1 revenue grew 14% YoY..." }, + "model": "claude-sonnet-4-6", + "inputTokens": 312, + "outputTokens": 847, + "durationMs": 2341 +} +``` + +### run.error + +Emitted when a run fails. + +```json +{ + "type": "run.error", + "runId": "run_01xyz...", + "agentId": "agt_01abc...", + "errorCode": "model_unavailable", + "errorMessage": "All fallback models exhausted", + "timestamp": "2026-03-13T12:00:05.000Z" +} +``` + +## TypeScript SDK Usage + +The SDK wraps the WebSocket connection: + +```typescript +const stream = await maschina.agents.stream(agentId, runId); + +for await (const event of stream) { + if (event.type === "run.status") { + console.log("Status:", event.status); + } + if (event.type === "run.output") { + console.log("Output:", event.output); + break; + } +} +``` + +## Connection Limits + +| Plan | Concurrent connections | +|---|---| +| Access | 1 | +| M1 | 5 | +| M5 | 20 | +| M10 | 100 | +| Enterprise | Unlimited | + +Connections exceeding the limit receive a `4029 Too Many Connections` close code. diff --git a/apps/docs/guides/search.mdx b/apps/docs/guides/search.mdx new file mode 100644 index 0000000..6c9d596 --- /dev/null +++ b/apps/docs/guides/search.mdx @@ -0,0 +1,169 @@ +--- +title: Search +description: Full-text search across your agents — instant results powered by Meilisearch. +--- + +import { MagnifyingGlass, Lightning, ArrowsClockwise } from "@phosphor-icons/react"; + + Maschina indexes your agents automatically using [Meilisearch](https://meilisearch.com). No configuration required — agents are searchable the moment you create them. + +## How It Works + +```mermaid +sequenceDiagram + participant API + participant PG as PostgreSQL + participant Meili as Meilisearch + participant You as Your App + + You->>API: POST /agents + API->>PG: insert agent + API->>Meili: index agent document + API-->>You: 201 Created + + You->>API: GET /search?q=research + API->>Meili: search(q="research", user_id=...) + Meili-->>API: hits [] + API-->>You: { hits, total, processingTimeMs } +``` + +When you create or update an agent, the API writes to PostgreSQL and immediately syncs the document to Meilisearch. Search results are always scoped to your user — you never see another user's agents. + +--- + +## Searching via SDK + + + +```typescript TypeScript +import { MaschinaClient } from "@maschina/sdk"; + +const maschina = new MaschinaClient({ + apiKey: process.env.MASCHINA_API_KEY, +}); + +// Basic search +const results = await maschina.search("research"); + +results.hits.forEach((agent) => { + console.log(agent.name, agent.description); +}); + +// With options +const results = await maschina.search("market analysis", { + type: "agents", + limit: 10, + offset: 0, +}); + +console.log(`Found ${results.total} agents in ${results.processingTimeMs}ms`); +``` + +```python Python +from maschina import MaschinaClient + +maschina = MaschinaClient() + +# Basic search +results = maschina.search("research") + +for agent in results.hits: + print(agent.name, agent.description) + +# With options +results = maschina.search("market analysis", type="agents", limit=10) +print(f"Found {results.total} agents in {results.processing_time_ms}ms") +``` + + + +--- + +## Searching via REST + +```bash +curl "https://api.maschina.ai/search?q=research&type=agents&limit=20" \ + -H "Authorization: Bearer YOUR_API_KEY" +``` + +```bash +# URL-encode multi-word queries +curl "https://api.maschina.ai/search?q=market%20analysis&type=agents" \ + -H "Authorization: Bearer YOUR_API_KEY" +``` + +--- + +## Response Structure + +```json +{ + "hits": [ + { + "id": "agt_01abc...", + "name": "Research Agent", + "description": "Produces structured summaries from any topic.", + "type": "analysis", + "status": "idle", + "model": "claude-sonnet-4-6", + "createdAt": "2026-03-01T00:00:00.000Z" + } + ], + "total": 1, + "query": "research", + "processingTimeMs": 2 +} +``` + +--- + +## What Gets Indexed + +Search matches against: +- **Name** — exact and fuzzy matching +- **Description** — full-text, including partial matches +- **Type** — `execution`, `analysis`, `signal`, `optimization`, `reporting` + +Search does **not** match against: +- System prompts (these may contain sensitive data) +- Run history or run outputs +- API keys + +--- + +## Pagination + +```typescript +async function searchAll(query: string) { + const limit = 100; + let offset = 0; + const all = []; + + while (true) { + const results = await maschina.search(query, { limit, offset }); + all.push(...results.hits); + if (all.length >= results.total) break; + offset += limit; + } + + return all; +} +``` + +--- + +## Search Availability + +Search is available on all plan tiers. If Meilisearch is unavailable (e.g. during a self-hosted outage), the search endpoint returns an empty result set — the API itself continues to function normally. + +Use `GET /agents` for list-based retrieval that doesn't require full-text search. + +--- + +## CLI Search + +```bash +maschina agent list --search "research" +``` + +The CLI passes your query to the search API and displays matching agents in a table. diff --git a/apps/docs/guides/troubleshooting.mdx b/apps/docs/guides/troubleshooting.mdx new file mode 100644 index 0000000..0557dab --- /dev/null +++ b/apps/docs/guides/troubleshooting.mdx @@ -0,0 +1,302 @@ +--- +title: Troubleshooting +description: Diagnose and fix common issues with runs, webhooks, auth, and self-hosting. +--- + +import { Warning, ArrowsClockwise, ShieldCheck, WifiHigh, HardDrive, Key } from "@phosphor-icons/react"; + +## Runs + +### Run is stuck in `queued` + +The Daemon hasn't picked up the job from NATS yet. + +**Check 1 — Is the Daemon running?** +```bash +# Self-hosted +docker compose ps daemon +maschina service status + +# Managed — check the status page +``` + +**Check 2 — Is NATS reachable?** +```bash +docker compose logs daemon | grep -i nats +``` + +Look for connection errors. If NATS is unreachable, the Daemon can't consume from the job queue. + +**Check 3 — Is quota exhausted?** +```bash +maschina usage +``` + +If you're at 100%, new runs are rejected before reaching the queue. + +--- + +### Run fails immediately with `quota_exceeded` + +Your monthly token quota is exhausted. Check usage: + +```bash +curl https://api.maschina.ai/usage \ + -H "Authorization: Bearer YOUR_API_KEY" +``` + +Wait for the next billing cycle, or upgrade your plan. Quota resets on your cycle date. + +--- + +### Run fails with `model_unavailable` + +All models in the cascade fallback chain returned errors. + +1. Verify your `ANTHROPIC_API_KEY` is valid and has remaining credits +2. Check the Anthropic status page for outages +3. If using Ollama locally, verify it's running: `curl http://localhost:11434/api/tags` + +The cascade fallback tries: `claude-opus-4-6` → `claude-sonnet-4-6` → `claude-haiku-4-5`. If all fail, the run is marked `failed`. + +--- + +### Run fails with `risk_blocked` + +Input or output failed the risk check. The runtime scans for blocked patterns and PII before and after execution. + +- Review your system prompt and input for patterns that may trigger the risk scanner +- Blocked patterns are configured by the operator — if self-hosting, check the Runtime configuration + +--- + +### Run times out + +The Runtime returned no response within the configured timeout. + +- Default timeout: 300,000 ms (5 minutes) +- Max timeout: 600,000 ms (10 minutes) +- Increase the timeout in your run request: `{ "timeout": 600000 }` +- If runs consistently timeout, check Runtime logs: `docker compose logs runtime` + +--- + +## Webhooks + +### Webhook not receiving deliveries + +**Check 1 — Is the webhook active?** +```bash +curl https://api.maschina.ai/webhooks/WEBHOOK_ID \ + -H "Authorization: Bearer YOUR_API_KEY" +``` + +If `status` is `failing` or `disabled`, re-enable it: +```bash +curl -X PATCH https://api.maschina.ai/webhooks/WEBHOOK_ID \ + -H "Authorization: Bearer YOUR_API_KEY" \ + -H "Content-Type: application/json" \ + -d '{"active": true}' +``` + +**Check 2 — Is your endpoint publicly reachable?** + +Maschina must reach your endpoint from the internet. Use [ngrok](https://ngrok.com) or a similar tunneling tool for local development: + +```bash +ngrok http 3000 +# → Forwarding https://abc123.ngrok.io → localhost:3000 +``` + +Use the ngrok URL as your webhook URL. + +**Check 3 — Check the delivery log** +```bash +curl https://api.maschina.ai/webhooks/WEBHOOK_ID/deliveries \ + -H "Authorization: Bearer YOUR_API_KEY" +``` + +Look at the `responseCode` and `status` fields. A `responseCode` of `0` means the connection was refused — your endpoint was unreachable. + +--- + +### Signature verification failing + +Double-check your implementation: + +1. **Use the raw request body** — do not parse it as JSON before computing the HMAC. Parse the string, sign the string. +2. **Use `timingSafeEqual`** — never use `===` for comparing signatures. Timing attacks are real. +3. **Use the correct secret** — the webhook secret from creation, not your API key. + +```typescript +// Correct — raw body +app.post("/webhooks", express.raw({ type: "application/json" }), (req, res) => { + const raw = req.body.toString(); // keep as string + const sig = req.headers["x-maschina-signature"] as string; + // verify against raw +}); + +// Wrong — parsed body will serialize differently +app.post("/webhooks", express.json(), (req, res) => { + const raw = JSON.stringify(req.body); // do NOT do this +}); +``` + +--- + +### Deliveries failing with 5xx from my endpoint + +Maschina retries 5 times with exponential backoff (immediate, 10s, 30s, 90s, 5min). After 5 failures the webhook is marked `failing`. + +Fix your endpoint, then re-enable the webhook and trigger a test delivery: + +```bash +curl -X POST https://api.maschina.ai/webhooks/WEBHOOK_ID/test \ + -H "Authorization: Bearer YOUR_API_KEY" +``` + +--- + +## Authentication + +### `401 Unauthorized` on every request + +- Verify the key is valid — it should start with `msk_live_` +- Verify the header format is exactly `Authorization: Bearer msk_live_...` +- The key may have been revoked. List your keys: `maschina keys list` +- If using a JWT session token, it may have expired. Re-login: `maschina login` + +--- + +### API key shown as invalid after creation + +API keys are shown exactly once at creation. If you didn't save it, revoke the key and create a new one: + +```bash +maschina keys revoke KEY_ID +maschina keys create "new-key" +``` + +--- + +### `403 Forbidden` on a specific endpoint + +- The endpoint requires a higher plan tier. Check which plan each feature requires in the [Concepts](/concepts) reference. +- The compliance endpoints (`/compliance/*`) require M10 or Enterprise. +- The `claude-opus-4-6` model requires M10. + +--- + +## Self-Hosting + +### Services fail to start + +```bash +docker compose logs +``` + +Most common causes: +- Missing required environment variable (`JWT_SECRET`, `DATABASE_URL`, `NATS_URL`) +- Port already in use — another process is on that port +- Insufficient disk space or RAM + +--- + +### Database connection refused + +PostgreSQL takes 10–15 seconds to initialize on first start. Wait, then retry migrations: + +```bash +docker compose exec api pnpm db:migrate +``` + +If it persists, check: +```bash +docker compose logs postgres +``` + +--- + +### Runtime can't connect to LLM provider + +```bash +docker compose logs runtime | grep -i error +``` + +Common causes: +- `ANTHROPIC_API_KEY` not set or invalid — runtime falls back to Ollama, which may not be running +- Outbound internet blocked for the runtime container — check your Docker network configuration + +--- + +### Meilisearch not indexing agents + +The API syncs agents to Meilisearch on create and update. Existing agents created before Meilisearch was connected won't appear in search. + +Restart the API to trigger a re-sync: +```bash +docker compose restart api +``` + +If Meilisearch is unreachable, search degrades gracefully — the API stays up, search just returns empty results. + +--- + +### Gateway returning 502 + +The Gateway can't reach the API or Realtime service. Check: + +```bash +docker compose ps +docker compose logs gateway +``` + +If any downstream service is unhealthy, the Gateway returns 502. Bring the failed service back up: + +```bash +docker compose restart api +``` + +--- + +## CLI + +### `maschina: command not found` + +The binary isn't in your PATH. Add the install directory to your shell profile: + +```bash +# If installed to ~/.local/bin +echo 'export PATH="$HOME/.local/bin:$PATH"' >> ~/.zshrc +source ~/.zshrc +``` + +Or re-run the installer: +```bash +curl -fsSL https://maschina.ai/install.sh | sh +``` + +--- + +### CLI can't connect to API + +```bash +maschina status +``` + +If the API is unreachable: + +1. Check your internet connection +2. Verify the configured API URL: `maschina config get api_url` +3. For self-hosted: verify the Gateway is running on the expected port + +--- + +### Config file corrupted + +The CLI config lives at `~/.config/maschina/config.toml`. If it's corrupted: + +```bash +rm ~/.config/maschina/config.toml +maschina login +``` diff --git a/apps/docs/guides/webhooks.mdx b/apps/docs/guides/webhooks.mdx new file mode 100644 index 0000000..c6e3d66 --- /dev/null +++ b/apps/docs/guides/webhooks.mdx @@ -0,0 +1,162 @@ +--- +title: Webhooks +description: Receive real-time notifications when agent runs complete or quotas are hit. +--- + +import { GitBranch, ShieldCheck, ArrowsClockwise, Bell, Warning } from "@phosphor-icons/react"; + +## Overview + +Webhooks let you receive HTTP POST notifications from Maschina when events occur. Every delivery is signed with HMAC-SHA256 so you can verify it came from Maschina. + +```mermaid +sequenceDiagram + participant Daemon + participant NATS + participant Worker as Webhook Worker + participant Your as Your Endpoint + + Daemon->>NATS: agent.run.completed + NATS->>Worker: consume event + Worker->>Worker: sign with HMAC-SHA256 + Worker->>Your: POST /webhooks/maschina + Your->>Your: verify X-Maschina-Signature + Your->>Worker: 200 OK + Worker->>Worker: log delivery (success) +``` + +If your endpoint returns a non-2xx response, the Worker retries with exponential backoff up to 5 attempts. + +## Creating a Webhook + +```typescript +const webhook = await maschina.webhooks.create({ + url: "https://your-app.com/webhooks/maschina", + events: ["agent.run.completed", "agent.run.failed"], +}); + +// Save this — shown once, never retrievable again +console.log(webhook.secret); +``` + +## Supported Events + +| Event | Description | +|---|---| +| `agent.run.started` | A run has been picked up and is executing | +| `agent.run.completed` | A run finished successfully | +| `agent.run.failed` | A run failed after all retry attempts | +| `subscription.updated` | Plan or billing status changed | +| `usage.quota_warning` | 80% of monthly quota consumed | +| `usage.quota_exceeded` | Monthly quota exhausted | + +## Payload Structure + +Every delivery shares the same envelope structure: + +```json +{ + "id": "del_01abc...", + "type": "agent.run.completed", + "created_at": "2026-03-13T12:00:00.000Z", + "api_version": "2026-03-13", + "data": { ... } +} +``` + +### agent.run.completed + +```json +{ + "data": { + "run_id": "run_01xyz...", + "agent_id": "agt_01abc...", + "user_id": "usr_01...", + "model": "claude-sonnet-4-6", + "input_tokens": 312, + "output_tokens": 847, + "duration_ms": 2341, + "turns": 1 + } +} +``` + +### agent.run.failed + +```json +{ + "data": { + "run_id": "run_01xyz...", + "agent_id": "agt_01abc...", + "user_id": "usr_01...", + "error_code": "timeout", + "error_message": "Run timed out after 300s" + } +} +``` + +## Verifying Signatures + + Always verify the `X-Maschina-Signature` header before processing a delivery. + + + +```typescript TypeScript +import crypto from "node:crypto"; + +function verify(payload: string, secret: string, header: string): boolean { + const expected = "sha256=" + crypto + .createHmac("sha256", secret) + .update(payload) + .digest("hex"); + return crypto.timingSafeEqual( + Buffer.from(header), + Buffer.from(expected) + ); +} +``` + +```python Python +import hashlib +import hmac + +def verify(payload: str, secret: str, header: str) -> bool: + expected = "sha256=" + hmac.new( + secret.encode(), + payload.encode(), + hashlib.sha256 + ).hexdigest() + return hmac.compare_digest(header, expected) +``` + + + +## Retry Behavior + + If your endpoint returns a non-2xx response, Maschina retries with exponential backoff: + +| Attempt | Delay | +|---|---| +| 1 | Immediate | +| 2 | 10 seconds | +| 3 | 30 seconds | +| 4 | 90 seconds | +| 5 | 5 minutes | + +After 5 failures, the webhook is marked `failing`. Re-enable it from the dashboard or via `PATCH /webhooks/:id`. + +## Testing Deliveries + +Send a test delivery from the dashboard or API: + +```bash +curl -X POST https://api.maschina.ai/webhooks/WEBHOOK_ID/test \ + -H "Authorization: Bearer YOUR_API_KEY" +``` + +## Viewing Delivery Logs + +```bash +curl https://api.maschina.ai/webhooks/WEBHOOK_ID/deliveries \ + -H "Authorization: Bearer YOUR_API_KEY" +``` diff --git a/apps/docs/install.mdx b/apps/docs/install.mdx new file mode 100644 index 0000000..32e2297 --- /dev/null +++ b/apps/docs/install.mdx @@ -0,0 +1,161 @@ +--- +title: Install +description: Install the Maschina SDK, CLI, or run the full stack with Docker. +--- + +## SDK + +Pick your language and install the official client library. + + + +```bash npm +npm install @maschina/sdk +``` + +```bash pnpm +pnpm add @maschina/sdk +``` + +```bash yarn +yarn add @maschina/sdk +``` + +```bash bun +bun add @maschina/sdk +``` + +```bash pip +pip install maschina-sdk +``` + +```toml Cargo.toml +[dependencies] +maschina = "0.1" +tokio = { version = "1", features = ["full"] } +``` + + + +--- + +## CLI + +The `maschina` CLI lets you manage agents, keys, and services from the terminal. + +```bash +curl -fsSL https://maschina.ai/install.sh | sh +``` + +Verify: + +```bash +maschina --version +``` + +Or download a binary from [GitHub Releases](https://github.com/maschina-labs/self-hosted/releases). + +### First login + +```bash +maschina login +# prompts for email and password +# stores credentials at ~/.config/maschina/config.toml +``` + +--- + +## Self-Hosted (Docker) + +Run the full Maschina stack locally with Docker Compose. + +```bash +git clone https://github.com/maschina-labs/self-hosted +cd self-hosted +cp .env.example .env +# edit .env with your API keys and secrets +docker compose up -d +docker compose exec api pnpm db:migrate +``` + +Services: + +| Service | Port | +|---|---| +| Gateway | 8080 | +| API | 3000 | +| Realtime | 8081 | +| Runtime | 8001 | + +See the [Docker guide](/self-hosting/docker) for the full walkthrough, or [Fly.io](/self-hosting/fly) for cloud deployment. + +--- + +## Quick verification + +Once installed, create and run your first agent: + + + +```typescript TypeScript +import { MaschinaClient } from "@maschina/sdk"; + +const maschina = new MaschinaClient({ + apiKey: process.env.MASCHINA_API_KEY, +}); + +const agent = await maschina.agents.create({ + name: "Test Agent", + type: "execution", + config: { systemPrompt: "You are a helpful assistant." }, +}); + +const run = await maschina.agents.run(agent.id, { + input: { message: "Say hello." }, +}); + +console.log(run.runId); +``` + +```python Python +from maschina import MaschinaClient + +maschina = MaschinaClient() + +agent = maschina.agents.create( + name="Test Agent", + type="execution", + config={"systemPrompt": "You are a helpful assistant."} +) + +run = maschina.agents.run(agent.id, input={"message": "Say hello."}) +print(run.run_id) +``` + +```bash cURL +curl -X POST https://api.maschina.ai/agents \ + -H "Authorization: Bearer YOUR_API_KEY" \ + -H "Content-Type: application/json" \ + -d '{"name":"Test Agent","type":"execution","config":{"systemPrompt":"You are helpful."}}' +``` + + + +--- + +## Next steps + + + + Full walkthrough — agent, run, result. + + + All CLI commands with examples. + + + Run on your own infrastructure. + + + Full endpoint documentation. + + diff --git a/apps/docs/llms.txt b/apps/docs/llms.txt new file mode 100644 index 0000000..b4b93be --- /dev/null +++ b/apps/docs/llms.txt @@ -0,0 +1,72 @@ +# Maschina Documentation + +Maschina is the backend infrastructure layer for autonomous AI agents. It handles runtime execution, model routing, authentication, billing, job queuing, webhooks, search, and observability. + +## Getting Started + +- [Introduction](https://docs.maschina.ai/introduction): What Maschina is, who it's for, full stack architecture overview. +- [Quickstart](https://docs.maschina.ai/quickstart): Create and run your first agent in under 5 minutes. +- [Concepts](https://docs.maschina.ai/concepts): Agents, runs, models, plans, realtime, and how they fit together. + +## Guides + +- [Building Your First Agent](https://docs.maschina.ai/guides/first-agent): End-to-end walkthrough: create an agent, register a webhook, run it, handle the result. +- [Model Selection](https://docs.maschina.ai/guides/models): Supported models, plan gates, billing multipliers, and cascade fallback behavior. +- [Webhooks](https://docs.maschina.ai/guides/webhooks): HMAC-SHA256-signed HTTP deliveries, retry behavior, all supported events, signature verification. +- [Realtime](https://docs.maschina.ai/guides/realtime): WebSocket and SSE endpoints for live run status streaming. +- [Search](https://docs.maschina.ai/guides/search): Full-text search across agents via Meilisearch. +- [CLI](https://docs.maschina.ai/guides/cli): maschina binary — auth, agents, keys, usage, service management, scaffolding. +- [FAQ](https://docs.maschina.ai/guides/faq): Common questions about agents, billing, models, and self-hosting. +- [Troubleshooting](https://docs.maschina.ai/guides/troubleshooting): Diagnosing common issues with runs, webhooks, auth, and self-hosting. + +## Platform + +- [Platform Overview](https://docs.maschina.ai/platform/overview): The 7-layer platform architecture and current vs planned capabilities. +- [The Maschina Network](https://docs.maschina.ai/platform/network): Distributed compute routing, node selection, fault tolerance. +- [Node Operators](https://docs.maschina.ai/platform/nodes): How to contribute compute, earn rewards, reputation, and staking. +- [Agent Marketplace](https://docs.maschina.ai/platform/marketplace): Skills, discovery, developer monetization, multi-agent workflows. +- [Economics](https://docs.maschina.ai/platform/economics): Billing model, token economics, Proof of Compute, revenue distribution. +- [Roadmap](https://docs.maschina.ai/platform/roadmap): Phase 1–5 development roadmap with current status. + +## SDKs + +- [TypeScript SDK](https://docs.maschina.ai/sdks/typescript): npm install @maschina/sdk — full HTTP client for TypeScript and JavaScript. +- [Python SDK](https://docs.maschina.ai/sdks/python): pip install maschina-sdk — sync and async clients for Python. +- [Rust SDK](https://docs.maschina.ai/sdks/rust): maschina = "0.1" in Cargo.toml — async Rust client. +- [REST API](https://docs.maschina.ai/sdks/rest): Base URL, auth, errors, rate limits, pagination, idempotency. + +## API Reference + +- [Authentication](https://docs.maschina.ai/api-reference/authentication): API keys, sessions, OAuth, token management. +- [Agents](https://docs.maschina.ai/api-reference/agents): CRUD operations and run dispatch. POST /agents, GET /agents, PATCH /agents/:id, POST /agents/:id/run. +- [Runs](https://docs.maschina.ai/api-reference/runs): Run object schema, status values, list filtering, error codes. +- [API Keys](https://docs.maschina.ai/api-reference/keys): Key management. POST /keys, GET /keys, DELETE /keys/:id. +- [Usage](https://docs.maschina.ai/api-reference/usage): Token usage, quota status, usage by model, usage history. +- [Webhooks](https://docs.maschina.ai/api-reference/webhooks): Webhook CRUD, test delivery, delivery logs, all supported events. +- [Search](https://docs.maschina.ai/api-reference/search): GET /search — full-text search across agents. +- [Realtime](https://docs.maschina.ai/api-reference/realtime): WebSocket and SSE protocol reference, message types, close codes. +- [Compliance](https://docs.maschina.ai/api-reference/compliance): Audit log, GDPR deletion, data retention. M10 and Enterprise only. + +## Self-Hosting + +- [Self-Hosting Overview](https://docs.maschina.ai/self-hosting/overview): Docker images, dependencies, quick start. +- [Docker](https://docs.maschina.ai/self-hosting/docker): Docker Compose setup, health checks, volumes, production config, troubleshooting. +- [Fly.io](https://docs.maschina.ai/self-hosting/fly): Deploying Maschina services to Fly.io with managed dependencies. +- [Environment Variables](https://docs.maschina.ai/self-hosting/environment): Full reference for all configuration variables across all services. +- [Architecture](https://docs.maschina.ai/self-hosting/architecture): Service architecture, request flow, data layer, daemon pipeline. + +## Contributing + +- [Contributing](https://docs.maschina.ai/contributing): How to contribute to the Maschina docs and self-hosted distribution. + +## Key Concepts + +- **Agent**: The core primitive. Has a type (execution, analysis, signal, optimization, reporting), a system prompt, and a default model. Stateless between runs. +- **Run**: A single execution of an agent. Asynchronous. Transitions: queued → running → completed/failed/timeout. +- **Daemon**: Rust service. NATS JetStream pull consumer. Four-stage pipeline: SCAN → EVALUATE → EXECUTE → ANALYZE. +- **Runtime**: Python/FastAPI service. Executes agent runs. Routes by model prefix (ollama/* vs Anthropic). Applies billing multiplier. +- **Gateway**: Rust/Axum. JWT validation, per-IP and per-user rate limiting, HTTP + WebSocket proxy. +- **NATS JetStream**: Durable job queue and event bus. Subjects follow maschina... pattern. +- **Cascade fallback**: When a model is unavailable, automatically falls back to next best model for the tier (claude-opus-4-6 → claude-sonnet-4-6 → claude-haiku-4-5). +- **Plans**: Access (free), M1 ($20/mo), M5 ($60/mo), M10 ($100/mo), Mach Team ($30/seat/mo), Enterprise (custom), Internal (bypasses all limits). +- **Proof of Compute**: Cryptographic verification of executed work — planned Phase 3 feature for distributed nodes. diff --git a/apps/docs/platform/economics.mdx b/apps/docs/platform/economics.mdx new file mode 100644 index 0000000..13e0541 --- /dev/null +++ b/apps/docs/platform/economics.mdx @@ -0,0 +1,165 @@ +--- +title: Economics +description: Token utility, network incentives, compute markets, and the Maschina economic model. +--- + +import { CreditCard, Coins, HardDrive, Users, ChartLine, ShieldCheck, Scales } from "@phosphor-icons/react"; + + +The token and on-chain economics layer is on the roadmap. The current billing system uses Stripe-based prepaid credits. This page covers both the current model and the full planned economic architecture. + + +## Current Billing Model + +Today, Maschina uses a prepaid credit system: + +- Credits are purchased in USD via Stripe Checkout +- Each plan tier includes a monthly token allocation +- Token consumption = (input tokens + output tokens) × model multiplier +- Usage is tracked in real time against your quota +- Overage is blocked by default; contact support for burst allowances + +```mermaid +flowchart LR + User([User]) -->|USD| Stripe[Stripe\nCheckout] + Stripe -->|credits| Quota[Token Quota\nRedis] + Quota -->|deduct on run| Runtime[Runtime] + Runtime -->|tokens consumed| Audit[Audit Log\nPostgreSQL] +``` + +This model is simple, auditable, and familiar. It runs entirely on Stripe + PostgreSQL. + +--- + +## Full Economic Architecture + +As the network scales, Maschina transitions from a centralized credit system to a distributed token economy that aligns incentives across all participants. + +### Participants + +| Participant | Role | Economic Relationship | +|---|---|---| +| **Users** | Submit agent runs | Pay for compute consumed | +| **Developers** | Build and publish agents | Earn margins on skill invocations | +| **Node Operators** | Contribute compute | Earn rewards for verified work | +| **Stakers** | Provide economic security | Earn yield on staked capital | +| **Maschina** | Infrastructure and protocol | Collects network fees | + +### The Maschina Token + +The native token serves as the settlement currency for all network transactions. + +**Utility:** +- Pay for compute resources on the network +- Stake to participate as a node operator +- Earn rewards for contributing verified compute +- Governance participation (protocol upgrades, parameter changes) +- Access to premium features and priority routing + +**Token flows:** + +```mermaid +flowchart TD + User([User]) -->|pays tokens| Protocol[Protocol Layer] + Protocol -->|network fee| Treasury[Protocol Treasury] + Protocol -->|compute cost| Node[Node Operator] + Protocol -->|developer margin| Dev[Agent Developer] + Treasury -->|burn| Burn[🔥 Token Burn] + Treasury -->|grants| Grants[Developer Grants] + + style User fill:#B83232,stroke:#8C1F1F,color:#fff + style Burn fill:#3d1a1a,stroke:#8a2d2d,color:#fff +``` + +### Token Emission + +The network incentivizes early participation through a structured emission schedule: + +- Tokens are emitted as rewards for verified compute contribution +- Emission rate decreases over time (deflationary pressure) +- A portion of network fees is burned, reducing circulating supply +- Early node operators receive higher emission rates as network bootstrap incentives + +### Staking Mechanics + +Node operators stake tokens to participate in job routing. Staking: + +- **Signals commitment** — operators with skin in the game are prioritized +- **Provides security** — staked capital is at risk for misbehavior (slashing) +- **Earns yield** — stakers earn a share of network fees proportional to their stake +- **Enables governance** — stake-weighted voting on protocol parameters + +Minimum stake requirements vary by node tier. Enterprise nodes require higher minimum stakes but receive proportionally higher job volume and fee share. + +### Proof of Compute + +Before a node receives payment for a completed job, it must provide a Proof of Compute — a cryptographic verification that the work was performed correctly. + +```mermaid +sequenceDiagram + participant Node + participant Verifiers + participant Protocol + + Node->>Node: execute job + Node->>Node: generate proof (hash of inputs + model + outputs + env) + Node->>Verifiers: submit proof + Verifiers->>Verifiers: independent verification (subset of nodes) + Verifiers->>Protocol: consensus reached + Protocol->>Node: release payment +``` + +If verification fails or a dispute is raised, the job is re-executed and the dispute evidence is evaluated. Nodes providing fraudulent results face stake slashing. + +### Compute Marketplace Dynamics + +As more nodes join, the compute marketplace becomes competitive: + +- Node operators compete on price, latency, and reliability +- Users benefit from lower compute costs as supply grows +- Specialized nodes (high-end GPUs, specific hardware) command premiums +- Geographic arbitrage — nodes in underserved regions can offer lower latency at competitive prices + +Supply and demand dynamics are reflected in real-time routing decisions. When a particular hardware class is in high demand, the router pays premium rates. When supply is abundant, costs fall. + +--- + +## Solana Integration + +Maschina's on-chain layer runs on Solana, chosen for: + +- **Transaction throughput** — high TPS required for per-execution micropayments +- **Low fees** — sub-cent transaction costs make micropayments viable +- **Ecosystem** — access to Solana DeFi for liquidity, staking derivatives, and composability +- **Helius** — used as the data layer for on-chain reputation, staking, and settlement events + +Smart contracts govern: +- Node registration and stake +- Slashing conditions and execution +- Reward distribution +- Protocol governance votes + +Off-chain systems (the Maschina API, Daemon, Runtime) handle the actual workload execution. On-chain settlement happens after job completion verification. + +--- + +## Revenue Distribution + +Every network fee is split across participants: + +```mermaid +pie title Network Fee Distribution + "Compute Rewards (Node Operators)" : 40 + "Developer Rewards (Agent Publishers)" : 30 + "Protocol Treasury" : 20 + "Token Burn" : 10 +``` + +| Recipient | Share | Purpose | +|---|---|---| +| Compute rewards pool | 40% | Distributed to node operators per verified job | +| Developer rewards | 30% | Distributed to agent publishers per invocation | +| Protocol treasury | 20% | Funds development, grants, and infrastructure | +| Token burn | 10% | Deflationary pressure, reduces circulating supply | + +These ratios are initial parameters subject to governance adjustment as the network matures. diff --git a/apps/docs/platform/marketplace.mdx b/apps/docs/platform/marketplace.mdx new file mode 100644 index 0000000..52d542d --- /dev/null +++ b/apps/docs/platform/marketplace.mdx @@ -0,0 +1,147 @@ +--- +title: Agent Marketplace +description: Discover, deploy, and monetize agents across the Maschina ecosystem. +--- + +import { Storefront, Code, Users, Robot, MagnifyingGlass, ShieldCheck, CurrencyDollar, GitBranch } from "@phosphor-icons/react"; + + +The marketplace is on the roadmap. This page describes the planned design. See the [roadmap](/platform/roadmap) for availability. + + +The Maschina marketplace is the economic layer where autonomous agents become discoverable services. Developers publish specialized agents, users deploy them, and the network handles discovery, routing, execution, and settlement. + +## What the Marketplace Enables + +Today, if you build a useful AI agent, your distribution options are limited: ship it as a product, wrap it in an API, or keep it internal. The marketplace creates a third option — publish it as a composable service that other agents and users can discover and invoke directly. + +This creates a new economic model for AI development: + +- **Developers** publish agents and earn per execution +- **Users** access a catalog of specialized agents without building them +- **Enterprises** deploy vetted agents with SLAs and compliance requirements +- **Agents** invoke other agents as sub-tasks in larger workflows + +## Skills + +Skills are the unit of capability in the marketplace. Every agent publishes a set of skills — structured interfaces that describe what the agent can do, what inputs it accepts, and what outputs it produces. + +```json +{ + "agentId": "agt_01abc...", + "skills": [ + { + "id": "skill_summarize", + "name": "Summarize document", + "description": "Produces a structured summary with key insights and open questions.", + "input": { + "document": "string", + "format": "bullets | prose | structured" + }, + "output": { + "summary": "string", + "keyInsights": "string[]", + "openQuestions": "string[]" + } + } + ] +} +``` + +Skills are the interface contract. A calling agent or user doesn't need to know how a skill is implemented — just what goes in and what comes out. + +## Skill Discovery + +Agents and users find skills through the skill registry: + +```typescript +// Search for agents with summarization capability +const results = await maschina.marketplace.skills.search("document summarization"); + +// Get all skills for a specific agent +const skills = await maschina.marketplace.agents.getSkills("agt_01abc..."); + +// Invoke a skill directly +const result = await maschina.marketplace.skills.invoke("skill_summarize", { + document: "...", + format: "structured", +}); +``` + +## Agent Reputation + +Marketplace agents build reputation through verified execution history: + +| Signal | Weight | +|---|---| +| Successful completions | High | +| Output quality ratings | High | +| Latency consistency | Medium | +| Error rate | Medium | +| Dispute outcomes | Very high | + +Reputation is on-chain, public, and non-transferable. An agent with 10,000 successful executions and a 4.9 star average is verifiably more trustworthy than one with 10. + +## Pricing + +Marketplace tasks are priced dynamically based on: + +- **Compute cost** — tokens consumed × model multiplier +- **Developer margin** — set by the agent publisher +- **Network fee** — a small platform fee on each execution +- **Priority surcharge** — premium for time-sensitive jobs + +Developers set their margin as a multiplier on base compute cost. A 1.5x margin on a 1,000-token Haiku run means the buyer pays for 1,500 token-equivalents — 1,000 go to compute, 500 go to the developer. + +## Multi-Agent Workflows + +The marketplace enables complex multi-agent workflows where agents delegate sub-tasks to other agents: + +``` +Orchestrator Agent + ├── → Data Collection Agent (fetches raw data) + ├── → Analysis Agent (runs statistical analysis) + ├── → Reporting Agent (generates formatted report) + └── → Notification Agent (sends digest to subscribers) +``` + +Each agent in the workflow is independently deployed, independently priced, and independently accountable. The orchestrator handles task decomposition, delegation, result aggregation, and error handling. + +This is the foundation for autonomous software organizations — networks of agents that collaborate on complex tasks without continuous human intervention. + +## Security Controls + +Marketplace agents operate within strict security boundaries: + +- **Sandboxed execution** — all agents run in isolated containers, even marketplace agents +- **Permission declarations** — agents must declare what external services they access +- **Spend limits** — calling agents can cap how much a marketplace invocation can cost +- **Rate limits** — per-agent and per-user rate limits prevent abuse +- **Audit trail** — every marketplace invocation is logged with actor, inputs, outputs, cost, and timestamp + +## Developer Participation + +Publishing to the marketplace requires: + +1. A verified Maschina developer account +2. Agent passing automated quality and safety checks +3. Defined skill interfaces with input/output schemas +4. Pricing configuration +5. Optional: SLA tier declaration (response time guarantees) + +Developers retain ownership of their agents. Maschina provides the infrastructure, discovery, billing, and settlement layer. + +## Monetization Model + +``` +Buyer pays → Network fee deducted → Compute cost deducted → Developer receives remainder +``` + +Developers receive their margin in Maschina credits that can be: +- Used to run their own agents (consuming credits) +- Withdrawn as the network matures and token liquidity exists +- Reinvested into compute stake + +--- + +The marketplace will launch progressively — first as an internal tool for teams managing multiple agents, then as a public directory for third-party developers. diff --git a/apps/docs/platform/network.mdx b/apps/docs/platform/network.mdx new file mode 100644 index 0000000..21d8420 --- /dev/null +++ b/apps/docs/platform/network.mdx @@ -0,0 +1,137 @@ +--- +title: The Maschina Network +description: How distributed compute and node orchestration work across the Maschina Network. +--- + +import { Network, GitFork, HardDrive, ArrowsOut, ShieldCheck, Globe } from "@phosphor-icons/react"; + +The Maschina Network is a distributed compute environment where agent workloads are executed across a heterogeneous pool of infrastructure providers. Rather than relying on a single cloud provider, Maschina aggregates compute from multiple sources and routes jobs to the node best suited for each workload. + +## Why a Network + +Centralized compute has three fundamental problems for a platform like Maschina: + +1. **Capacity bottlenecks** — a single provider becomes the limit on how many agents can run simultaneously +2. **Cost monopoly** — no competitive pressure on pricing +3. **Geographic constraints** — latency and data residency issues for global deployments + +A distributed network solves all three by introducing competition, geographic distribution, and redundancy across providers. + +## How It Works + +When a run is submitted, the orchestration layer routes it through a multi-step process: + +```mermaid +flowchart LR + A([Job Submitted]) --> B[Task Ingestion\nvalidate · normalize · assign metadata] + B --> C[Distributed Queue\ndurable · prioritized · retry policies] + C --> D[Compute Router\nselect optimal node] + D --> E[Execution\non selected node] + E --> F[Result Validation\n& Delivery] + + style A fill:#B83232,stroke:#B83232,color:#fff + style F fill:#1a3d1a,stroke:#2d6a2d,color:#fff +``` + +### Task Metadata + +Every job carries metadata that the router uses to make scheduling decisions: + +- **Hardware requirements** — CPU-only vs. GPU-accelerated +- **Memory requirements** — standard vs. large-context workloads +- **Priority tier** — urgent vs. background +- **Cost constraints** — max acceptable compute cost +- **Latency sensitivity** — real-time vs. batch + +### Compute Routing Factors + +The router evaluates candidate nodes against: + +| Factor | Description | +|---|---| +| Hardware capability | Does the node have the GPU/CPU/memory required? | +| Current availability | Is the node accepting new jobs? | +| Reputation score | Historical uptime and task completion rate | +| Geographic proximity | Network latency to the requesting user | +| Cost | Pricing for the required compute class | + +Nodes with strong reputation are prioritized. Nodes with poor reliability are deprioritized and eventually removed from the active pool. + +## Infrastructure Sources + +```mermaid +graph TD + Orch[Orchestration Layer\nDaemon] + Cloud[Managed Cloud\nFly.io · AWS · GCP] + Decentral[Decentralized Networks\nAkash · Render · IO.NET] + Nodes[Community Nodes\nNode operators] + + Orch -->|primary| Cloud + Orch -->|elastic capacity| Decentral + Orch -->|community compute| Nodes + + style Orch fill:#B83232,stroke:#8C1F1F,color:#fff + style Cloud fill:#1a1a1a,stroke:#555,color:#ccc + style Decentral fill:#1a1a1a,stroke:#555,color:#ccc + style Nodes fill:#1a1a1a,stroke:#555,color:#ccc +``` + +### Managed Cloud (Current) + +The initial deployment runs entirely on managed infrastructure — Fly.io for services, Neon for PostgreSQL, Upstash for Redis. This provides the predictability needed for early-stage reliability. + +### Decentralized Compute Networks (Planned) + +Integration with open compute marketplaces allows Maschina to source GPU and CPU resources on demand without owning hardware: + +- **Akash Network** — decentralized cloud, Kubernetes-compatible +- **Render Network** — GPU-focused, designed for AI workloads +- **IO.NET** — aggregated GPU capacity from data centers and consumer hardware + +These integrations allow Maschina to scale elastically and source compute at market rates rather than fixed provider pricing. + +### Community Nodes (Planned) + +Individuals and organizations can contribute compute directly to the Maschina Network by running the Maschina node client. Community nodes: + +- Register with the orchestration layer and advertise available resources +- Accept jobs that match their hardware capabilities +- Earn network incentives based on tasks completed and compute contributed + +Community nodes transform idle hardware into productive network capacity. + +## Fault Tolerance + +The distributed queue is designed for fault tolerance: + +- Jobs are durably stored in NATS JetStream — no job is lost if a node fails +- Tasks carry configurable retry policies +- If a node fails during execution, the job is automatically returned to the queue and rescheduled +- Nodes that become unreachable are removed from the active pool until they recover and re-register + +```mermaid +sequenceDiagram + participant Queue as NATS Queue + participant Daemon + participant Node as Compute Node + + Queue->>Daemon: dispatch job + Daemon->>Node: execute + Note over Node: Node fails mid-execution + Daemon->>Queue: return job to queue (retry) + Queue->>Daemon: re-dispatch job + Daemon->>Node: execute (second attempt) + Node->>Daemon: result + Daemon->>Queue: ack +``` + +## Hybrid Model + +Maschina does not commit exclusively to any infrastructure class. The orchestration layer treats all sources — managed cloud, decentralized networks, community nodes — as a unified compute pool. Jobs are routed to wherever offers the best balance of performance, reliability, and cost for that specific workload. + +This hybrid model provides: + +- **Resilience** — no single provider outage takes down the network +- **Cost efficiency** — competitive routing between providers +- **Scale** — compute capacity grows as more nodes join, not as contracts are signed +- **Geographic reach** — route to nodes near your users without manual configuration diff --git a/apps/docs/platform/nodes.mdx b/apps/docs/platform/nodes.mdx new file mode 100644 index 0000000..fc1bda5 --- /dev/null +++ b/apps/docs/platform/nodes.mdx @@ -0,0 +1,125 @@ +--- +title: Node Operators +description: Contribute compute to the Maschina Network and earn network incentives. +--- + +import { HardDrive, Trophy, Coins, ShieldCheck, Lightning, Wrench } from "@phosphor-icons/react"; + + +Node participation is on the roadmap. This page describes the planned design. Check the [roadmap](/platform/roadmap) for availability timing. + + +Node operators contribute compute resources — CPU, GPU, memory, storage — to the Maschina network. In return, they earn network incentives based on the compute they provide and the tasks they successfully execute. + +## Why Run a Node + +- **Monetize idle hardware** — turn unused machines into productive network participants +- **Earn network rewards** — incentives are distributed based on verified compute contribution +- **Stake for priority** — nodes with higher stake receive more favorable job routing +- **Build reputation** — consistent, reliable nodes rise in reputation and receive better assignments + +## Node Types + +| Type | Description | Hardware profile | +|---|---|---| +| **Light** | CPU-only, handles lightweight tasks | Consumer hardware, cloud VMs | +| **Standard** | CPU + moderate GPU, handles most agent workloads | Mid-tier GPU (e.g. RTX 3090, A10) | +| **Pro** | High-end GPU, handles large model inference | Data center GPU (A100, H100, H200) | +| **Enterprise** | Dedicated infrastructure, SLA-backed | Colocation or private cloud | + +The orchestration system matches job requirements to node capabilities. A Pro node will receive preference for GPU-intensive workloads. A Light node handles simple CPU tasks. + +## How Nodes Work + +### Registration + +Install the Maschina node client and register with the network: + +```bash +maschina node init +maschina node register --type standard --stake 1000 +``` + +During registration, the node: +1. Publishes its available hardware resources to the infrastructure registry +2. Submits a stake to participate in the job routing pool +3. Begins receiving the health check requests from the orchestration layer + +### Accepting jobs + +The orchestration system dispatches jobs to registered nodes based on capability matching, availability, reputation, and stake tier. The node client runs the job in an isolated container and returns the result. + +Node operators can configure: +- Maximum concurrent jobs +- Workload types to accept or reject +- Available hours (schedule-based participation) +- Minimum job price + +### Execution environment + +All jobs run in sandboxed containers. Node operators do not have access to job inputs or outputs — execution is isolated. TEE (Trusted Execution Environment) attestation is planned to provide verifiable proof that jobs were executed correctly without exposing content. + +## Reputation System + +Every node builds a reputation score based on: + +| Signal | Effect | +|---|---| +| Successful task completions | Increases score | +| Failed or timed-out tasks | Decreases score | +| Uptime consistency | Increases score | +| Response latency | Affects job routing preference | +| Dispute outcomes | Major impact, positive or negative | + +Nodes with high reputation are prioritized in job routing. Nodes with persistent low performance are removed from the active pool. Reputation is public and on-chain. + +## Staking + +Staking is the mechanism by which node operators signal commitment to the network. + +- Nodes must stake a minimum amount to participate +- Higher stake → higher job routing priority +- Stake is at risk if a node misbehaves (see slashing) +- Staking rewards are distributed from the network's emission schedule + +### Slashing conditions + +| Condition | Penalty | +|---|---| +| Returning incorrect results | Stake slash + temporary suspension | +| Failing to execute accepted jobs | Minor stake reduction | +| Persistent downtime | Graduated stake reduction | +| Confirmed malicious behavior | Full slash + permanent ban | + +Slashing is governed by the network's consensus mechanism. Disputes can be raised and are resolved through multi-node verification. + +## TEE Attestation (Planned) + +Trusted Execution Environments provide cryptographic proof that: +- A job was executed on the claimed hardware +- The execution environment was not tampered with +- The output was produced by the correct model + +TEE attestation is the planned mechanism for allowing nodes to prove work without exposing the inputs or outputs to the node operator. This is critical for privacy-sensitive enterprise workloads. + +## Developer Integration + +Node operators can register their nodes to serve specific agent types or skill categories. This allows agents to be routed specifically to nodes that have been verified for a particular workload class. + +```bash +# Register capability tags +maschina node capabilities add --tag gpu-inference --tag large-context --tag code-execution + +# View current node status +maschina node status + +# View earnings summary +maschina node earnings +``` + +## Getting Started + +Node participation will launch with the distributed compute phase. To be notified: + +- Follow [@MaschinaAI](https://x.com/MaschinaAI) for release announcements +- Watch the [GitHub](https://github.com/maschina-labs) for the node client release diff --git a/apps/docs/platform/overview.mdx b/apps/docs/platform/overview.mdx new file mode 100644 index 0000000..d210caf --- /dev/null +++ b/apps/docs/platform/overview.mdx @@ -0,0 +1,119 @@ +--- +title: Platform Overview +description: How Maschina is designed — the layers, the vision, and where it's going. +--- + +import { Stack, ComputerTower, Rows, Robot, Broadcast, Storefront, AppWindow, Network, ShieldCheck, Infinity, ArrowsOut } from "@phosphor-icons/react"; + +Maschina is not just an API wrapper around a language model. It is a distributed infrastructure platform designed to be the foundational layer for autonomous digital labor at scale. + +The platform is built in layers. Each layer operates independently while remaining compatible with the rest of the stack. This design allows individual components to evolve without breaking the system. + +## The Seven Layers + +```mermaid +graph TD + A["Application Layer
Dashboard · CLI · SDKs · Third-party apps"] + B["Marketplace Layer
Agent discovery · Skill registry · Pricing"] + C["Communication Layer
NATS JetStream · WebSocket · SSE · Events"] + D["Agent Runtime Layer
FastAPI executor · Tool calling · Memory"] + E["Orchestration Layer
Daemon · Job scheduling · Node routing"] + F["Compute Layer
Containerized workloads · GPU routing"] + G["Infrastructure Layer
Managed cloud · Decentralized compute nodes"] + + A --> B --> C --> D --> E --> F --> G + + style A fill:#1a1a1a,stroke:#B83232,color:#fff + style B fill:#1a1a1a,stroke:#B83232,color:#fff + style C fill:#1a1a1a,stroke:#B83232,color:#fff + style D fill:#1a1a1a,stroke:#B83232,color:#fff + style E fill:#1a1a1a,stroke:#B83232,color:#fff + style F fill:#1a1a1a,stroke:#B83232,color:#fff + style G fill:#1a1a1a,stroke:#B83232,color:#fff +``` + +### Infrastructure Layer + +The foundation. Provides the physical and virtual hardware on which all workloads execute. This layer is intentionally hybrid — Maschina aggregates resources from: + +- **Managed cloud** (Fly.io, AWS) — the initial deployment target; predictable, reliable +- **Decentralized networks** (Akash, Render, IO.NET) — elastic capacity sourced from open compute markets +- **Community nodes** — hardware contributed by node operators in exchange for network incentives + +No single provider owns the infrastructure. This is by design. + +### Compute Layer + +Manages the execution environments. All agent workloads run in containers for isolation and reproducibility. The compute layer handles: + +- Container lifecycle and resource limits +- GPU scheduling for inference-heavy workloads +- Execution environment isolation between tenants + +### Orchestration Layer + +The control plane. The Daemon (Rust) is the central orchestrator — it consumes jobs from NATS JetStream, evaluates them, routes them to the appropriate compute node, dispatches to the Runtime, and records results. + +When a run is submitted, the orchestration layer decides: +- Which node is best suited (capability, availability, reputation, cost) +- How to retry on failure +- When to escalate or dead-letter + +### Agent Runtime Layer + +Where agents actually execute. The Runtime (Python/FastAPI) receives dispatched jobs, runs input risk checks, routes to the correct model provider, executes multi-turn conversations with tool calling, runs output risk checks, and returns the result. It is intentionally isolated from the rest of the stack — it receives a job, executes it, and returns a structured result. + +### Communication Layer + +All internal and external events flow through NATS JetStream. This provides: + +- Durable job queues that survive restarts +- Fan-out event streams for realtime updates +- Ordered delivery guarantees +- Dead-letter handling for failed jobs + +External clients receive events via WebSocket or SSE through the Realtime service. + +### Marketplace Layer + +Coming. The marketplace layer will allow developers to publish agents as discoverable services with defined skills, pricing, and SLA expectations. Other agents and users can discover and invoke marketplace agents without knowing implementation details. + +### Application Layer + +The SDKs, CLI, dashboard, and any third-party apps built on top. This is where developers spend most of their time. + +--- + +## Design Principles + +**Infrastructure first.** Maschina provides the layer beneath the intelligence. Developers build what agents do — Maschina handles how they run. + +**Agent-first architecture.** Agents are the central building block, not requests or sessions. Everything — billing, auth, observability, routing — is modeled around the agent primitive. + +**Composability.** Agents can call other agents. Skills are defined through standardized interfaces. The system is designed for multi-agent workflows to emerge naturally. + +**Autonomy.** Agents should be able to operate without continuous human supervision. The platform provides the scaffolding for truly autonomous workloads. + +**Decentralization.** As the network matures, compute ownership will shift from centralized providers to a distributed network of node operators, reducing single points of failure and enabling competitive compute pricing. + +**Verifiable execution.** Tasks executed across distributed providers must produce results that can be trusted. Multi-node verification, consensus validation, and TEE attestation ensure the system remains reliable across heterogeneous infrastructure. + +--- + +## Current State vs. Full Vision + +| Area | Current State | Full Vision | +|---|---|---| +| Compute | Managed cloud (Fly.io) | Hybrid: cloud + decentralized nodes + community hardware | +| Models | Anthropic, OpenAI, Ollama | Any provider + fine-tuned models on network nodes | +| Agent execution | Single-agent runs | Multi-agent workflows, agent-to-agent calling | +| Marketplace | — | Agent discovery, skill registry, usage-based pricing | +| Economics | Stripe credits | Maschina token + on-chain settlement (Solana) | +| Identity | JWT + API keys | Platform identity + node identity + on-chain reputation | +| Nodes | — | Open node network with staking, reputation, and TEE attestation | +| Memory | Per-run context | Short-term, long-term, and shared network memory across agents | +| Governance | Centralized | Stake-weighted on-chain governance for protocol parameters | + +The platform is being built in phases. The current release covers the core infrastructure stack. Distributed compute, the marketplace, and on-chain economics are on the active roadmap. + +See the [roadmap](/platform/roadmap) for the full phased plan. diff --git a/apps/docs/platform/roadmap.mdx b/apps/docs/platform/roadmap.mdx new file mode 100644 index 0000000..11bc195 --- /dev/null +++ b/apps/docs/platform/roadmap.mdx @@ -0,0 +1,114 @@ +--- +title: Roadmap +description: What's built, what's in progress, and what's coming. +--- + +Maschina is being built in phases. Each phase delivers a complete, usable platform while laying the foundation for the next. + +## Phase 1 — Core infrastructure (current) + +The complete backend infrastructure stack for building and running agents. + +**Completed:** + +- [x] Agent runtime (Python/FastAPI) — multi-turn, tool calling, risk checks +- [x] Model routing — Anthropic (Claude 4.x), OpenAI (GPT-5 series), Ollama, passthrough +- [x] Cascade model fallback — automatic fallback on provider unavailability +- [x] Job queue — NATS JetStream pull consumer with retries and backoff +- [x] Authentication — JWT, sessions, API keys, OAuth, RBAC +- [x] Billing — Stripe Checkout, prepaid credits, per-model multipliers, quota enforcement +- [x] Webhooks — outbound delivery, HMAC-SHA256 signing, retry with exponential backoff +- [x] Realtime — WebSocket and SSE run status streaming +- [x] Search — Meilisearch full-text search across agents +- [x] Compliance — audit log, GDPR Article 17 deletion, retention policies (M10+) +- [x] Gateway — Rust/Axum JWT validation, rate limiting, reverse proxy +- [x] CLI — `maschina` binary: auth, agent management, key management, usage, setup +- [x] SDKs — TypeScript, Python, Rust +- [x] Observability — OpenTelemetry, Prometheus, Sentry, Grafana +- [x] Docker deployment — all services containerized with Compose + +**In progress:** + +- [ ] apps/auth — authentication web app +- [ ] apps/app — main user dashboard +- [ ] apps/web — public marketing site +- [ ] apps/docs — this docs site (publishing soon) +- [ ] maschina-labs/self-hosted — public Apache-2.0 self-hosting repo +- [ ] SDK publishing — npm, PyPI, crates.io + +--- + +## Phase 2 — Multi-agent and marketplace + +Enabling agents to call other agents and participate in a structured marketplace. + +- [ ] Agent-to-agent calling — agents invoke other agents as sub-tasks +- [ ] Multi-agent workflow orchestration — sequential and parallel agent pipelines +- [ ] Skill framework — structured input/output interfaces for marketplace agents +- [ ] Agent marketplace — public directory with search, discovery, and invoking +- [ ] Developer accounts — publish agents, set pricing, view earnings +- [ ] Marketplace billing — per-invocation pricing, developer margin configuration +- [ ] Agent versioning — deploy new versions without breaking running workflows + +--- + +## Phase 3 — Distributed compute + +Opening the infrastructure to external compute providers and community nodes. + +- [ ] Node client — `maschina node` binary for running a compute node +- [ ] Node registration — hardware capability advertising and identity +- [ ] Compute router — multi-factor node selection (capability, reputation, cost, latency) +- [ ] Node reputation system — on-chain score based on verified task history +- [ ] Akash Network integration — dynamic compute sourcing from decentralized cloud +- [ ] Render Network integration — GPU workloads on decentralized GPU market +- [ ] IO.NET integration — aggregated GPU capacity +- [ ] Proof of Compute — cryptographic verification of executed work +- [ ] TEE attestation — trusted execution environment support for verified nodes +- [ ] Node dashboard — earnings, reputation, job history + +--- + +## Phase 4 — Token and on-chain economics + +Full economic infrastructure for a self-sustaining network. + +- [ ] Maschina token — native settlement token on Solana +- [ ] Staking contracts — node staking, delegation, yield distribution +- [ ] Slashing contracts — automated penalty execution for misbehavior +- [ ] Token emission schedule — compute rewards, ecosystem grants, team vesting +- [ ] On-chain settlement — per-execution micropayments to node operators +- [ ] Governance contracts — stake-weighted voting on protocol parameters +- [ ] Token burn mechanism — deflationary pressure from network fees +- [ ] Liquidity program — initial DEX liquidity on Solana + +--- + +## Phase 5 — Scale and ecosystem + +Enterprise features, edge compute, and autonomous organizations. + +- [ ] Enterprise tier — dedicated infrastructure, SLAs, custom contracts +- [ ] Edge compute — IoT and edge device participation in the node network +- [ ] Hardware acceleration — specialized routing for TPU, custom ASICs +- [ ] Cross-network compute federation — interoperability with other compute networks +- [ ] Autonomous agent organizations — groups of agents with shared resources and governance +- [ ] Global compute grid — geographically distributed routing for latency-sensitive workloads +- [ ] Advanced analytics — network-wide performance, economic, and usage dashboards + +--- + +## Guiding principles + +Every phase decision is guided by: + +1. **Working software first** — each phase ships something useful, not just design +2. **Backwards compatibility** — Phase 1 integrations work in Phase 5 +3. **Progressive decentralization** — centralized components are replaced incrementally, not all at once +4. **Developer experience** — the API and SDK must remain simple as the underlying system grows complex + +--- + + +This roadmap represents current intentions. Priorities may shift based on user feedback, ecosystem developments, and engineering realities. Follow [@MaschinaAI](https://x.com/MaschinaAI) on X for updates. + diff --git a/apps/docs/quickstart.mdx b/apps/docs/quickstart.mdx new file mode 100644 index 0000000..0c5a779 --- /dev/null +++ b/apps/docs/quickstart.mdx @@ -0,0 +1,146 @@ +--- +title: Quickstart +description: Create and run your first agent in under 5 minutes. +--- + +import { Key, Package, Robot, Play, DownloadSimple, ArrowRight } from "@phosphor-icons/react"; + +## 1. Get an API key + +Sign up at [app.maschina.ai](https://app.maschina.ai/register) and create an API key from the dashboard. + +## 2. Install the SDK + + + +```bash npm +npm install @maschina/sdk +``` + +```bash pnpm +pnpm add @maschina/sdk +``` + +```bash yarn +yarn add @maschina/sdk +``` + +```bash bun +bun add @maschina/sdk +``` + +```bash pip +pip install maschina-sdk +``` + + + +## 3. Create an agent + + + +```typescript TypeScript +import { MaschinaClient } from "@maschina/sdk"; + +const maschina = new MaschinaClient({ + apiKey: process.env.MASCHINA_API_KEY, +}); + +const agent = await maschina.agents.create({ + name: "My First Agent", + type: "execution", + config: { + systemPrompt: "You are a helpful assistant. Complete tasks concisely.", + }, +}); + +console.log(agent.id); +``` + +```python Python +from maschina import MaschinaClient + +maschina = MaschinaClient(api_key="your-api-key") + +agent = maschina.agents.create( + name="My First Agent", + type="execution", + config={ + "systemPrompt": "You are a helpful assistant. Complete tasks concisely." + } +) + +print(agent.id) +``` + +```bash cURL +curl -X POST https://api.maschina.ai/agents \ + -H "Authorization: Bearer YOUR_API_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "name": "My First Agent", + "type": "execution", + "config": { + "systemPrompt": "You are a helpful assistant. Complete tasks concisely." + } + }' +``` + + + +## 4. Run the agent + + + +```typescript TypeScript +const run = await maschina.agents.run(agent.id, { + input: { + message: "Summarize the benefits of async programming in three bullet points.", + }, +}); + +console.log(run.runId); // poll or use webhooks for result +``` + +```python Python +run = maschina.agents.run(agent.id, input={ + "message": "Summarize the benefits of async programming in three bullet points." +}) + +print(run.run_id) +``` + +```bash cURL +curl -X POST https://api.maschina.ai/agents/AGENT_ID/run \ + -H "Authorization: Bearer YOUR_API_KEY" \ + -H "Content-Type: application/json" \ + -d '{"input": {"message": "Summarize the benefits of async programming in three bullet points."}}' +``` + + + +## 5. Get the result + +Runs are async. Poll the run status or set up a [webhook](/guides/webhooks) to receive results automatically. + +```bash +curl https://api.maschina.ai/agents/AGENT_ID/runs/RUN_ID \ + -H "Authorization: Bearer YOUR_API_KEY" +``` + +## Next steps + + + + Understand agents, runs, models, and plans. + + + Choose the right model for your use case. + + + Get notified when runs complete. + + + Manage agents from the terminal. + + diff --git a/apps/docs/sdks/python.mdx b/apps/docs/sdks/python.mdx new file mode 100644 index 0000000..6cb39b1 --- /dev/null +++ b/apps/docs/sdks/python.mdx @@ -0,0 +1,246 @@ +--- +title: Python SDK +description: The official Maschina SDK for Python — sync and async. +--- + +import { Code, Robot, ArrowsClockwise, Lightning, Key, GitBranch, ChartLine } from "@phosphor-icons/react"; + +## Installation + +```bash +pip install maschina-sdk +``` + +## Setup + +```python +from maschina import MaschinaClient + +# From explicit key +maschina = MaschinaClient(api_key="msk_live_...") + +# From environment variable MASCHINA_API_KEY +maschina = MaschinaClient() +``` + +--- + +## Agents + +```python +# Create +agent = maschina.agents.create( + name="Research Agent", + type="analysis", + config={ + "systemPrompt": "You are a research analyst. Return structured summaries.", + "model": "claude-sonnet-4-6", + } +) +print(agent.id) # agt_01abc... + +# List +agents = maschina.agents.list(limit=20, offset=0) +for a in agents.data: + print(a.name, a.status) + +# Get +agent = maschina.agents.get("agt_01abc...") + +# Update +maschina.agents.update("agt_01abc...", name="Updated Name") + +# Delete +maschina.agents.delete("agt_01abc...") +``` + +--- + +## Runs + +```python +# Run an agent +run = maschina.agents.run("agt_01abc...", input={ + "message": "Summarize recent developments in multi-agent systems." +}) +print(run.run_id) # run_01xyz... + +# Get run status +status = maschina.agents.get_run("agt_01abc...", run.run_id) +print(status.status) # queued | running | completed | failed + +# List runs for an agent +runs = maschina.agents.list_runs("agt_01abc...", limit=20) +for r in runs.data: + print(r.id, r.status, r.duration_ms) + +# Poll for completion +import time + +def wait_for_run(agent_id: str, run_id: str): + while True: + run = maschina.agents.get_run(agent_id, run_id) + if run.status == "completed": + return run.output_payload + if run.status == "failed": + raise RuntimeError(f"{run.error_code}: {run.error_message}") + time.sleep(1) + +output = wait_for_run(agent.id, run.run_id) +print(output) +``` + +--- + +## API Keys + +```python +# List keys +keys = maschina.keys.list() +for k in keys: + print(k.id, k.name, k.prefix) + +# Create key +key = maschina.keys.create(name="production") +print(key.key) # shown once — save it + +# Revoke key +maschina.keys.revoke("key_01abc...") +``` + +--- + +## Webhooks + +```python +# Create +webhook = maschina.webhooks.create( + url="https://your-app.com/webhooks/maschina", + events=["agent.run.completed", "agent.run.failed"], +) +print(webhook.secret) # shown once — save it + +# List +webhooks = maschina.webhooks.list() + +# Update +maschina.webhooks.update("wh_01abc...", active=False) + +# Delete +maschina.webhooks.delete("wh_01abc...") + +# Test delivery +maschina.webhooks.test("wh_01abc...") +``` + +--- + +## Usage + +```python +usage = maschina.usage.get() +print(usage.tokens_used, "/", usage.tokens_limit) +print(f"Quota: {usage.tokens_used / usage.tokens_limit * 100:.1f}%") + +# Usage by model +by_model = maschina.usage.by_model() +for entry in by_model: + print(entry.model, entry.input_tokens, entry.output_tokens) +``` + +--- + +## Async Support + + All methods are available on the async client with `await`: + +```python +import asyncio +from maschina import AsyncMaschinaClient + +async def main(): + maschina = AsyncMaschinaClient() + + agent = await maschina.agents.create( + name="Async Research Agent", + type="analysis", + config={"systemPrompt": "You are a research analyst."}, + ) + + run = await maschina.agents.run(agent.id, input={ + "message": "Summarize transformer architecture." + }) + + # Poll + while True: + status = await maschina.agents.get_run(agent.id, run.run_id) + if status.status in ("completed", "failed"): + print(status.output_payload) + break + await asyncio.sleep(1) + +asyncio.run(main()) +``` + +--- + +## Verifying Webhooks + +```python +import hashlib +import hmac + +def verify_webhook(payload: bytes, secret: str, header: str) -> bool: + expected = "sha256=" + hmac.new( + secret.encode(), + payload, + hashlib.sha256 + ).hexdigest() + return hmac.compare_digest(header, expected) + +# Flask example +from flask import Flask, request, abort + +app = Flask(__name__) + +@app.route("/webhooks/maschina", methods=["POST"]) +def handle_webhook(): + sig = request.headers.get("X-Maschina-Signature", "") + + if not verify_webhook(request.data, WEBHOOK_SECRET, sig): + abort(401) + + event = request.json + + if event["type"] == "agent.run.completed": + run_id = event["data"]["run_id"] + print("Completed:", run_id) + + if event["type"] == "agent.run.failed": + print("Failed:", event["data"]["error_code"]) + + return "ok", 200 +``` + +--- + +## Error Handling + +```python +from maschina.exceptions import MaschinaError, QuotaExceededError, NotFoundError + +try: + run = maschina.agents.run("agt_01abc...", input={"message": "hello"}) +except QuotaExceededError: + print("Monthly quota exhausted. Upgrade your plan.") +except NotFoundError: + print("Agent not found.") +except MaschinaError as e: + print(f"Error {e.status}: {e.message}") +``` + +--- + +## Source + +The Python SDK is open source. View it on [GitHub](https://github.com/maschina-labs/sdk-python). diff --git a/apps/docs/sdks/rest.mdx b/apps/docs/sdks/rest.mdx new file mode 100644 index 0000000..23aaa54 --- /dev/null +++ b/apps/docs/sdks/rest.mdx @@ -0,0 +1,147 @@ +--- +title: REST API +description: Direct HTTP integration with the Maschina API. +--- + +## Base URL + +``` +https://api.maschina.ai +``` + +All endpoints are versioned implicitly. Breaking changes are communicated via changelog and webhook `api_version` field. + +--- + +## Authentication + +All requests require a bearer API key: + +```bash +Authorization: Bearer msk_live_... +``` + +Get your API key from the [dashboard](https://app.maschina.ai/keys) or via the CLI: + +```bash +maschina keys create "my-key" +``` + +API keys are prefixed with `msk_live_` for production and `msk_test_` for test mode. Never commit keys to source control — use environment variables. + +--- + +## Request Format + +Set `Content-Type: application/json` on all POST and PATCH requests: + +```bash +curl -X POST https://api.maschina.ai/agents \ + -H "Authorization: Bearer msk_live_..." \ + -H "Content-Type: application/json" \ + -d '{"name": "My Agent", "type": "execution", "config": {"systemPrompt": "You are helpful."}}' +``` + +--- + +## Response Format + +All responses are JSON. Successful responses return the resource object directly. List responses wrap results in a `data` array with pagination metadata. + +```json +{ + "data": [...], + "total": 47, + "limit": 20, + "offset": 0 +} +``` + +--- + +## Errors + +Maschina uses standard HTTP status codes. All error responses include a `message` field. + +| Status | Meaning | +|---|---| +| `200` | Success | +| `201` | Created | +| `202` | Accepted (async — run was queued) | +| `400` | Bad request — check the `message` field | +| `401` | Invalid or missing API key | +| `403` | Insufficient plan or permissions | +| `404` | Resource not found | +| `429` | Rate limited or quota exceeded | +| `500` | Server error | + +```json +{ + "message": "Model claude-opus-4-6 requires the m10 plan or higher." +} +``` + +--- + +## Rate Limits + +Rate limits apply per API key and per IP at the Gateway layer. + +| Tier | Requests / minute | +|---|---| +| Access (free) | 60 | +| M1 | 120 | +| M5 | 300 | +| M10 | 600 | +| Mach Team | 600 per seat | +| Enterprise | Custom | + +When rate limited, the API returns `429` with a `Retry-After` header indicating seconds to wait. + +```bash +HTTP/1.1 429 Too Many Requests +Retry-After: 14 +``` + +--- + +## Pagination + +List endpoints support `limit` and `offset` query parameters. + +```bash +GET /agents?limit=20&offset=40 +``` + +Iterate through pages: + +```typescript +let offset = 0; +const limit = 100; +let all = []; + +while (true) { + const res = await maschina.agents.list({ limit, offset }); + all = all.concat(res.data); + if (all.length >= res.total) break; + offset += limit; +} +``` + +--- + +## Idempotency + +POST requests to `/agents` and `/agents/:id/run` are not idempotent by default. To avoid duplicate agents or runs on network retries, check for existing resources before creating. + +--- + +## SDKs + +The SDKs handle auth, retries, and error handling automatically. Use them when possible. + +- [TypeScript SDK](/sdks/typescript) +- [Python SDK](/sdks/python) +- [Rust SDK](/sdks/rust) + +See the full [API reference](/api-reference/authentication) for all endpoints. diff --git a/apps/docs/sdks/rust.mdx b/apps/docs/sdks/rust.mdx new file mode 100644 index 0000000..686b9cd --- /dev/null +++ b/apps/docs/sdks/rust.mdx @@ -0,0 +1,212 @@ +--- +title: Rust SDK +description: The official Maschina SDK for Rust — async, tokio-based. +--- + +import { Code, Robot, ArrowsClockwise, Key, GitBranch } from "@phosphor-icons/react"; + +## Installation + +Add to your `Cargo.toml`: + +```toml +[dependencies] +maschina = "0.1" +tokio = { version = "1", features = ["full"] } +serde_json = "1" +anyhow = "1" +``` + +--- + +## Setup + +```rust +use maschina::MaschinaClient; + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + // From environment variable MASCHINA_API_KEY + let client = MaschinaClient::from_env()?; + + // Or explicitly + let client = MaschinaClient::new(std::env::var("MASCHINA_API_KEY")?); + + Ok(()) +} +``` + +--- + +## Agents + +```rust +use maschina::agents::{CreateAgentRequest, AgentConfig, UpdateAgentRequest}; + +// Create +let agent = client.agents().create(CreateAgentRequest { + name: "Research Agent".into(), + agent_type: "analysis".into(), + description: Some("Produces structured research summaries.".into()), + config: Some(AgentConfig { + system_prompt: Some("You are a research analyst.".into()), + model: Some("claude-sonnet-4-6".into()), + }), +}).await?; + +println!("Created: {}", agent.id); + +// List +let agents = client.agents().list(None, None).await?; +for a in &agents.data { + println!("{} — {}", a.name, a.status); +} + +// Get +let agent = client.agents().get(&agent.id).await?; + +// Update +client.agents().update(&agent.id, UpdateAgentRequest { + name: Some("Updated Name".into()), + ..Default::default() +}).await?; + +// Delete +client.agents().delete(&agent.id).await?; +``` + +--- + +## Runs + +```rust +use maschina::runs::RunInput; +use serde_json::json; + +// Run an agent +let run = client.agents().run(&agent.id, RunInput { + input: json!({ "message": "Summarize transformer architecture." }), + model: None, + timeout: Some(120_000), +}).await?; + +println!("Run queued: {}", run.run_id); + +// Get run status +let status = client.agents().get_run(&agent.id, &run.run_id).await?; +println!("Status: {}", status.status); + +// Poll for completion +loop { + let r = client.agents().get_run(&agent.id, &run.run_id).await?; + match r.status.as_str() { + "completed" => { + println!("Output: {:?}", r.output_payload); + break; + } + "failed" => { + eprintln!("Failed: {} — {}", r.error_code.unwrap_or_default(), r.error_message.unwrap_or_default()); + break; + } + _ => tokio::time::sleep(std::time::Duration::from_secs(1)).await, + } +} + +// List runs +let runs = client.agents().list_runs(&agent.id, None, None).await?; +for r in &runs.data { + println!("{} — {} ({}ms)", r.id, r.status, r.duration_ms.unwrap_or(0)); +} +``` + +--- + +## API Keys + +```rust +use maschina::keys::CreateKeyRequest; + +// List +let keys = client.keys().list().await?; +for k in &keys { + println!("{} — {}", k.name, k.prefix); +} + +// Create +let key = client.keys().create(CreateKeyRequest { + name: "production".into(), +}).await?; +println!("Key: {}", key.key); // shown once + +// Revoke +client.keys().revoke(&key.id).await?; +``` + +--- + +## Webhooks + +```rust +use maschina::webhooks::{CreateWebhookRequest, UpdateWebhookRequest}; + +// Create +let webhook = client.webhooks().create(CreateWebhookRequest { + url: "https://your-app.com/webhooks/maschina".into(), + events: vec![ + "agent.run.completed".into(), + "agent.run.failed".into(), + ], +}).await?; +println!("Secret: {}", webhook.secret); // shown once + +// List +let webhooks = client.webhooks().list().await?; + +// Update +client.webhooks().update(&webhook.id, UpdateWebhookRequest { + active: Some(false), + ..Default::default() +}).await?; + +// Test +client.webhooks().test(&webhook.id).await?; + +// Delete +client.webhooks().delete(&webhook.id).await?; +``` + +--- + +## Usage + +```rust +let usage = client.usage().get().await?; +println!( + "Tokens: {} / {} ({:.1}%)", + usage.tokens_used, + usage.tokens_limit, + usage.tokens_used as f64 / usage.tokens_limit as f64 * 100.0, +); +``` + +--- + +## Error Handling + +```rust +use maschina::error::MaschinaError; + +match client.agents().run(&agent_id, input).await { + Ok(run) => println!("Queued: {}", run.run_id), + Err(MaschinaError::QuotaExceeded) => eprintln!("Quota exhausted"), + Err(MaschinaError::Forbidden(msg)) => eprintln!("Plan restriction: {}", msg), + Err(MaschinaError::NotFound) => eprintln!("Agent not found"), + Err(e) => eprintln!("Error: {}", e), +} +``` + +--- + +## Source + +The Rust SDK is open source. View it on [GitHub](https://github.com/maschina-labs/sdk-rust). diff --git a/apps/docs/sdks/typescript.mdx b/apps/docs/sdks/typescript.mdx new file mode 100644 index 0000000..bea9eff --- /dev/null +++ b/apps/docs/sdks/typescript.mdx @@ -0,0 +1,137 @@ +--- +title: TypeScript SDK +description: The official Maschina SDK for TypeScript and JavaScript. +--- + +import { Code, Robot, ArrowsClockwise, Key, GitBranch, ChartLine, Warning } from "@phosphor-icons/react"; + +## Installation + + + +```bash npm +npm install @maschina/sdk +``` + +```bash pnpm +pnpm add @maschina/sdk +``` + +```bash yarn +yarn add @maschina/sdk +``` + +```bash bun +bun add @maschina/sdk +``` + + + +## Setup + +```typescript +import { MaschinaClient } from "@maschina/sdk"; + +const maschina = new MaschinaClient({ + apiKey: process.env.MASCHINA_API_KEY, + // baseUrl: "https://api.maschina.ai" // default +}); +``` + +## Agents + +```typescript +// Create +const agent = await maschina.agents.create({ + name: "My Agent", + type: "execution", + config: { systemPrompt: "You are a helpful assistant." }, +}); + +// List +const agents = await maschina.agents.list(); + +// Get +const agent = await maschina.agents.get("agt_01..."); + +// Update +await maschina.agents.update("agt_01...", { name: "Updated Name" }); + +// Delete +await maschina.agents.delete("agt_01..."); +``` + +## Runs + +```typescript +// Run an agent +const run = await maschina.agents.run("agt_01...", { + input: { message: "Do something." }, + model: "claude-sonnet-4-6", // optional override +}); + +// Get run status +const status = await maschina.agents.getRun("agt_01...", run.runId); +``` + +## API Keys + +```typescript +// List keys +const keys = await maschina.keys.list(); + +// Create key +const key = await maschina.keys.create({ name: "production" }); +console.log(key.key); // shown once + +// Revoke key +await maschina.keys.revoke("key_01..."); +``` + +## Webhooks + +```typescript +// Create +const webhook = await maschina.webhooks.create({ + url: "https://your-app.com/webhooks", + events: ["agent.run.completed", "agent.run.failed"], +}); +console.log(webhook.secret); // shown once + +// List +const webhooks = await maschina.webhooks.list(); + +// Update +await maschina.webhooks.update("wh_01...", { active: false }); + +// Delete +await maschina.webhooks.delete("wh_01..."); + +// Test +await maschina.webhooks.test("wh_01..."); +``` + +## Usage + +```typescript +const usage = await maschina.usage.get(); +console.log(usage.tokensUsed, usage.tokensLimit); +``` + +## Error Handling + +```typescript +import { MaschinaError } from "@maschina/sdk"; + +try { + await maschina.agents.run("agt_01...", { input: {} }); +} catch (err) { + if (err instanceof MaschinaError) { + console.log(err.status, err.message); + } +} +``` + +## Source + +The TypeScript SDK is open source. View it on [GitHub](https://github.com/maschina-labs/sdk-typescript). diff --git a/apps/docs/self-hosting/architecture.mdx b/apps/docs/self-hosting/architecture.mdx new file mode 100644 index 0000000..4094778 --- /dev/null +++ b/apps/docs/self-hosting/architecture.mdx @@ -0,0 +1,115 @@ +--- +title: Architecture +description: How Maschina's services fit together — request flow, data layer, and distributed runtime. +--- + +import { Stack, Network, Database, Broadcast, ArrowsClockwise, ShieldCheck, Robot } from "@phosphor-icons/react"; + +Maschina is a layered infrastructure platform. Each service has a single responsibility and communicates through well-defined interfaces. No service is a monolith — every layer can evolve independently. + +## Request Flow + +Every client request flows through a strict pipeline: + +```mermaid +flowchart TD + Client([Client]) + Gateway[Gateway\nRust / Axum] + API[API\nTypeScript / Hono] + NATS[NATS JetStream] + Daemon[Daemon\nRust] + Runtime[Runtime\nPython / FastAPI] + LLM([LLM Provider]) + Realtime[Realtime\nRust / Axum] + + Client -->|HTTPS / WSS| Gateway + Gateway -->|JWT validate\nRate limit\nProxy| API + API -->|Auth / RBAC\nBilling quota\nAgent CRUD| NATS + NATS -->|Pull consumer| Daemon + Daemon -->|HTTP dispatch| Runtime + Runtime -->|Anthropic / OpenAI / Ollama| LLM + Runtime -->|Result| Daemon + Daemon -->|Completion event| NATS + NATS -->|Fan-out| Realtime + Realtime -->|WebSocket / SSE| Client +``` + +## Services + +| Service | Language | Role | +|---|---|---| +| **Gateway** | Rust / Axum | JWT validation, per-IP and per-user rate limiting, HTTP + WebSocket proxy | +| **API** | TypeScript / Hono | Auth, RBAC, billing, agent CRUD, webhook management, search, compliance | +| **Daemon** | Rust | NATS pull consumer, job orchestration, node routing, quota accounting | +| **Runtime** | Python / FastAPI | Agent execution, multi-turn LLM calls, tool calling, risk checks | +| **Realtime** | Rust / Axum | WebSocket + SSE, per-user event fan-out from NATS | + +## Data Layer + +```mermaid +graph LR + API --> PG[(PostgreSQL\nNeon)] + API --> Redis[(Redis\nUpstash)] + API --> Meili[(Meilisearch)] + Daemon --> PG + Daemon --> NATS[(NATS\nJetStream)] + Runtime --> PG + Realtime --> NATS +``` + +| Store | Purpose | +|---|---| +| **PostgreSQL** | Users, agents, runs, keys, webhooks, billing, audit logs — source of truth | +| **Redis** | Quota counters, rate limit state — low-latency reads | +| **NATS JetStream** | Durable job queue, event bus, webhook dispatch, realtime fan-out | +| **Meilisearch** | Full-text search index for agents — synced on create / update / delete | + +## Daemon Pipeline + + The Daemon (Rust) is the orchestration core. It runs a four-stage pipeline for every agent run: + +``` +SCAN → EVALUATE → EXECUTE → ANALYZE +``` + +1. **SCAN** — pulls jobs from the `MASCHINA_JOBS` NATS stream via pull consumer +2. **EVALUATE** — validates the job, checks quota, resolves model and target node +3. **EXECUTE** — dispatches to the Runtime via HTTP POST `/run`, awaits result +4. **ANALYZE** — records result in PostgreSQL, deducts tokens from Redis quota, publishes completion event to NATS + +If the Runtime returns an error or times out, the Daemon retries up to the configured limit, then marks the run `failed` and publishes a `agent.run.failed` event. + +## Realtime Event Flow + +```mermaid +sequenceDiagram + participant Daemon + participant NATS + participant Realtime + participant Client + + Daemon->>NATS: publish run.status (running) + Daemon->>NATS: publish run.status (completed) + NATS->>Realtime: fan-out to subscribers + Realtime->>Client: WebSocket run.status event + Realtime->>Client: WebSocket run.output event +``` + +The Realtime service maintains a per-user subscription registry (dashmap). When a user connects, they can subscribe to specific run IDs or all their runs. NATS events are routed to matching connections with zero polling. + +## Security Boundaries + +- **Gateway** terminates TLS, validates JWTs, and enforces rate limits before any request reaches the API +- **API** enforces RBAC — every route checks the authenticated user's plan tier and permissions +- **Runtime** runs inside Docker containers — no direct database access, no network access except to LLM providers +- **Risk checks** run on both input and output — blocked patterns, PII scanning, quota enforcement +- **Webhooks** are signed with HMAC-SHA256 — receivers must verify before processing + +## Deployment + +| Deployment | Infrastructure | +|---|---| +| Managed (`app.maschina.ai`) | Fly.io (services), Neon (PostgreSQL), Upstash Redis, NGS (NATS) | +| Self-hosted | Docker Compose — all services + dependencies, or point at managed dependencies | + +See the [Docker guide](/self-hosting/docker) for setup instructions. diff --git a/apps/docs/self-hosting/docker.mdx b/apps/docs/self-hosting/docker.mdx new file mode 100644 index 0000000..e63608a --- /dev/null +++ b/apps/docs/self-hosting/docker.mdx @@ -0,0 +1,210 @@ +--- +title: Docker +description: Run Maschina with Docker Compose — all services, all dependencies. +--- + +import { HardDrive, Play, Wrench, Database, ArrowsClockwise, Warning } from "@phosphor-icons/react"; + +## Prerequisites + +- Docker 24+ +- Docker Compose v2 (`docker compose` — not `docker-compose`) +- 4 GB RAM minimum (8 GB recommended for full stack) + +--- + +## Setup + +Clone the repository and copy the example environment file: + +```bash +git clone https://github.com/maschina-labs/maschina +cd maschina +cp .env.example .env +``` + +Edit `.env` with your credentials: + +```bash +# Required +JWT_SECRET=your-secret-minimum-32-chars +ANTHROPIC_API_KEY=sk-ant-... + +# Optional — enables OpenAI models +OPENAI_API_KEY=sk-... + +# Optional — enables Stripe billing +STRIPE_SECRET_KEY=sk_live_... +STRIPE_WEBHOOK_SECRET=whsec_... +``` + + +If `ANTHROPIC_API_KEY` is absent, the runtime falls back to a locally running Ollama instance. If `DATABASE_URL` is absent, the API uses SQLite for local development. + + +--- + +## Start + +```bash +docker compose up -d +``` + +All services and dependencies start together. Pull dependencies on first run may take a few minutes. + +### Service Ports + +| Service | Port | Description | +|---|---|---| +| Gateway | `8080` | Primary entry point for all client traffic | +| API | `3000` | REST API (internal, proxied by Gateway) | +| Realtime | `8081` | WebSocket + SSE (internal, proxied by Gateway) | +| Runtime | `8001` | Agent execution (internal only) | +| NATS | `4222` | Message broker (internal) | +| Meilisearch | `7700` | Search (internal) | +| PostgreSQL | `5432` | Database (internal) | +| Redis | `6379` | Cache (internal) | + + +Only the Gateway (`8080`) and Realtime (`8081`) should be exposed publicly. All other ports should remain internal. + + +--- + +## Run Migrations + +After first start, run database migrations: + +```bash +docker compose exec api pnpm db:migrate +``` + +Seed the database with default plans and internal admin (optional): + +```bash +docker compose exec api pnpm db:seed +``` + +--- + +## Health Checks + +Verify all services are healthy: + +```bash +docker compose ps +``` + +Check individual service health: + +```bash +# Gateway +curl http://localhost:8080/health + +# API +curl http://localhost:3000/health + +# Realtime +curl http://localhost:8081/health + +# Runtime +curl http://localhost:8001/health +``` + +All services return `{ "status": "ok" }` when healthy. + +--- + +## Logs + +```bash +# Follow all services +docker compose logs -f + +# Follow a specific service +docker compose logs -f api +docker compose logs -f daemon +docker compose logs -f runtime +docker compose logs -f gateway +``` + +--- + +## Stop and Restart + +```bash +# Stop all services (data preserved in volumes) +docker compose down + +# Stop and remove all data volumes (destructive) +docker compose down -v + +# Restart a single service +docker compose restart api +``` + +--- + +## Updating + +```bash +git pull origin main +docker compose pull +docker compose up -d +docker compose exec api pnpm db:migrate +``` + +--- + +## Data Volumes + +Maschina persists data in named Docker volumes: + +| Volume | Contents | +|---|---| +| `maschina_postgres` | PostgreSQL data | +| `maschina_redis` | Redis snapshots | +| `maschina_meilisearch` | Search indexes | + +--- + +## Production Configuration + +For production deployments, set these additional environment variables: + +```bash +# Postgres (replace SQLite default) +DATABASE_URL=postgresql://user:pass@host:5432/maschina + +# Redis (replace local default) +REDIS_URL=redis://user:pass@host:6379 + +# NATS (replace local default) +NATS_URL=nats://host:4222 +NATS_CREDS=/run/secrets/nats.creds + +# TLS termination +GATEWAY_TLS_CERT=/run/secrets/cert.pem +GATEWAY_TLS_KEY=/run/secrets/key.pem +``` + +For managed dependencies (Neon PostgreSQL, Upstash Redis, NGS NATS), see the [Environment Variables](/self-hosting/environment) reference. + +--- + +## Troubleshooting + +**Services fail to start** +Run `docker compose logs ` to inspect the error. The most common cause is a missing or malformed environment variable. + +**Database connection refused** +Wait 10–15 seconds after `docker compose up` before running migrations. PostgreSQL takes a moment to initialize. + +**Meilisearch not indexing agents** +The API syncs agents to Meilisearch on create/update. If existing agents are missing from search, restart the API: +```bash +docker compose restart api +``` + +**Runtime can't reach LLM provider** +Verify `ANTHROPIC_API_KEY` or `OPENAI_API_KEY` in `.env` and that the runtime container has outbound internet access. diff --git a/apps/docs/self-hosting/environment.mdx b/apps/docs/self-hosting/environment.mdx new file mode 100644 index 0000000..93ceac5 --- /dev/null +++ b/apps/docs/self-hosting/environment.mdx @@ -0,0 +1,184 @@ +--- +title: Environment Variables +description: Full reference for all configuration environment variables across every service. +--- + +import { Lock, Cpu, CreditCard, Database, Bell, Eye, Cloud } from "@phosphor-icons/react"; + +## Core — Required + +These must be set for any deployment to function. + +| Variable | Service(s) | Description | +|---|---|---| +| `JWT_SECRET` | API, Gateway | Shared secret for JWT signing and verification. Minimum 32 characters. | +| `DATABASE_URL` | API, Daemon, Runtime | PostgreSQL connection string. If absent, API uses SQLite for local dev. | +| `REDIS_URL` | API | Redis connection string. Used for quota counters and rate limit state. | +| `NATS_URL` | API, Daemon, Realtime | NATS server URL. e.g. `nats://localhost:4222` or `tls://connect.ngs.global` | + +--- + +## AI Providers + +At least one LLM provider key should be set for cloud execution. Without one, the runtime falls back to Ollama. + +| Variable | Service | Description | +|---|---|---| +| `ANTHROPIC_API_KEY` | Runtime | Enables Claude models (Haiku, Sonnet, Opus). | +| `OPENAI_API_KEY` | Runtime | Enables GPT-4o and o-series models. | +| `OLLAMA_BASE_URL` | Runtime | Ollama instance URL. Default: `http://localhost:11434`. Used when no cloud key is set. | + +### Model fallback order + +``` +ANTHROPIC_API_KEY set → use Anthropic models +OPENAI_API_KEY set → use OpenAI models +Neither set → fall back to Ollama +``` + +--- + +## NATS + +For NGS (NATS Global Service) or authenticated NATS deployments: + +| Variable | Service(s) | Description | +|---|---|---| +| `NATS_URL` | API, Daemon, Realtime | NATS connection URL. | +| `NATS_CREDS` | API, Daemon, Realtime | Path to `.creds` file for NGS authentication. | +| `NATS_JWT` | API, Daemon, Realtime | NATS JWT string (alternative to creds file). | +| `NATS_NKEY` | API, Daemon, Realtime | NATS NKey seed (alternative to creds file). | + +--- + +## Billing — Stripe + +Required to enable paid plan subscriptions. Optional if not using billing. + +| Variable | Service | Description | +|---|---|---| +| `STRIPE_SECRET_KEY` | API | Stripe secret key (`sk_live_...` or `sk_test_...`). | +| `STRIPE_WEBHOOK_SECRET` | API | Stripe webhook signing secret (`whsec_...`). | +| `STRIPE_PRICE_M1` | API | Stripe price ID for M1 plan. | +| `STRIPE_PRICE_M5` | API | Stripe price ID for M5 plan. | +| `STRIPE_PRICE_M10` | API | Stripe price ID for M10 plan. | +| `STRIPE_PRICE_TEAM` | API | Stripe price ID for Mach Team plan (per seat). | + +--- + +## Email — Resend + +Optional. Enables transactional email (verification, password reset, billing receipts). + +| Variable | Service | Description | +|---|---|---| +| `RESEND_API_KEY` | API | Resend API key. Email is completely disabled if absent. | +| `EMAIL_FROM` | API | Sender address. Default: `no-reply@maschina.ai`. | + +--- + +## Search — Meilisearch + +Optional. Search degrades gracefully if Meilisearch is unreachable. + +| Variable | Service | Description | +|---|---|---| +| `MEILISEARCH_URL` | API | Meilisearch host. Default: `http://localhost:7700`. | +| `MEILISEARCH_MASTER_KEY` | API | Meilisearch master key. | + +--- + +## Observability + +All optional. Services remain fully functional without them. + +| Variable | Service(s) | Description | +|---|---|---| +| `OTEL_EXPORTER_OTLP_ENDPOINT` | All | OpenTelemetry collector endpoint (e.g. Grafana Tempo). | +| `OTEL_SERVICE_NAME` | All | Override the service name in traces. Defaults to the service binary name. | +| `SENTRY_DSN` | All | Sentry error tracking DSN. | +| `PROMETHEUS_PORT` | All | Port to expose Prometheus `/metrics` endpoint. Default: `9090`. | + +--- + +## Gateway + +| Variable | Service | Description | +|---|---|---| +| `GATEWAY_PORT` | Gateway | Port to listen on. Default: `8080`. | +| `GATEWAY_API_URL` | Gateway | Internal URL for the API service. | +| `GATEWAY_REALTIME_URL` | Gateway | Internal URL for the Realtime service. | +| `RATE_LIMIT_PER_IP` | Gateway | Max requests per minute per IP. Default: `120`. | +| `RATE_LIMIT_PER_USER` | Gateway | Max requests per minute per authenticated user. | + +--- + +## Realtime + +| Variable | Service | Description | +|---|---|---| +| `REALTIME_PORT` | Realtime | Port to listen on. Default: `8081`. | +| `REALTIME_MAX_CONNECTIONS` | Realtime | Max concurrent WebSocket connections per user. Per-plan limits still apply. | + +--- + +## Runtime + +| Variable | Service | Description | +|---|---|---| +| `RUNTIME_PORT` | Runtime | Port to listen on. Default: `8001`. | +| `RUNTIME_MAX_TURNS` | Runtime | Max multi-turn iterations per run. Default: `10`. | +| `RUNTIME_RISK_ENABLED` | Runtime | Enable input/output risk checks. Default: `true`. | +| `RUNTIME_RISK_BLOCK_PATTERNS` | Runtime | Comma-separated regex patterns to block in inputs/outputs. | + +--- + +## Data Retention + +Configure how long run payload data is stored before automatic purge. + +| Variable | Service | Description | +|---|---|---| +| `RETENTION_ACCESS_DAYS` | API | Retention for Access tier. Default: `7`. | +| `RETENTION_M1_DAYS` | API | Retention for M1 tier. Default: `30`. | +| `RETENTION_M5_DAYS` | API | Retention for M5 tier. Default: `90`. | +| `RETENTION_M10_DAYS` | API | Retention for M10 tier. Default: `365`. | +| `RETENTION_ENTERPRISE_DAYS` | API | Retention for Enterprise. Default: `0` (unlimited). | + +Run metadata (ID, status, timestamps, token counts) is retained indefinitely for billing purposes regardless of these settings. + +--- + +## Daemon + +| Variable | Service | Description | +|---|---|---| +| `DAEMON_RUNTIME_URL` | Daemon | Internal URL for the Runtime service. | +| `DAEMON_MAX_CONCURRENT_JOBS` | Daemon | Max jobs the Daemon will process in parallel. Default: `10`. | +| `DAEMON_RETRY_LIMIT` | Daemon | Max retries per failed job before marking it `failed`. Default: `3`. | +| `DAEMON_JOB_TIMEOUT_MS` | Daemon | How long to wait for the Runtime to respond before timing out. Default: `300000`. | + +--- + +## Example `.env` for Docker Compose + +```bash +# Core +JWT_SECRET=change-me-minimum-32-chars-long-please +DATABASE_URL=postgresql://maschina:maschina@postgres:5432/maschina +REDIS_URL=redis://redis:6379 +NATS_URL=nats://nats:4222 + +# AI +ANTHROPIC_API_KEY=sk-ant-... +OPENAI_API_KEY=sk-... + +# Search +MEILISEARCH_URL=http://meilisearch:7700 +MEILISEARCH_MASTER_KEY=masterKey + +# Gateway routing +GATEWAY_API_URL=http://api:3000 +GATEWAY_REALTIME_URL=http://realtime:8081 +DAEMON_RUNTIME_URL=http://runtime:8001 +``` diff --git a/apps/docs/self-hosting/fly.mdx b/apps/docs/self-hosting/fly.mdx new file mode 100644 index 0000000..f37facf --- /dev/null +++ b/apps/docs/self-hosting/fly.mdx @@ -0,0 +1,251 @@ +--- +title: Fly.io +description: Deploy Maschina services to Fly.io with managed dependencies. +--- + +import { Cloud, HardDrive, Database, ArrowsClockwise, Rocket } from "@phosphor-icons/react"; + +Fly.io is the recommended cloud deployment target for Maschina. The managed (`app.maschina.ai`) platform runs on Fly.io. This guide walks through deploying your own instance. + +```mermaid +graph TD + Client([Client]) + FlyGW[Gateway\nFly.io — iad region] + FlyAPI[API\nFly.io — iad region] + FlyDaemon[Daemon\nFly.io — iad region] + FlyRT[Runtime\nFly.io — iad region] + FlyRealtime[Realtime\nFly.io — iad region] + Neon[(Neon\nPostgreSQL)] + Upstash[(Upstash\nRedis)] + NGS[(NGS\nNATS)] + Meili[(Meilisearch\nCloud)] + + Client --> FlyGW + FlyGW --> FlyAPI + FlyGW --> FlyRealtime + FlyAPI --> Neon + FlyAPI --> Upstash + FlyAPI --> NGS + FlyAPI --> Meili + FlyDaemon --> NGS + FlyDaemon --> FlyRT + FlyRT --> FlyDaemon +``` + +--- + +## Prerequisites + +- [Fly.io account](https://fly.io) and `flyctl` installed +- [Neon](https://neon.tech) — managed PostgreSQL +- [Upstash](https://upstash.com) — managed Redis +- [NGS](https://synadia.com/ngs) — managed NATS (or self-hosted NATS) +- Anthropic API key (and optionally OpenAI) +- Stripe keys (if enabling billing) + +--- + +## Install flyctl + +```bash +curl -L https://fly.io/install.sh | sh +fly auth login +``` + +--- + +## Clone and Configure + +```bash +git clone https://github.com/maschina-labs/self-hosted +cd self-hosted +cp .env.example .env.fly +``` + +Edit `.env.fly`: + +```bash +# Core +JWT_SECRET=your-secret-minimum-32-chars +DATABASE_URL=postgresql://user:pass@ep-xxx.neon.tech/maschina?sslmode=require + +# Redis (Upstash) +REDIS_URL=rediss://default:...@your-upstash-endpoint.upstash.io:6380 + +# NATS (NGS) +NATS_URL=tls://connect.ngs.global +NATS_CREDS=/run/secrets/ngs.creds + +# AI providers +ANTHROPIC_API_KEY=sk-ant-... +OPENAI_API_KEY=sk-... # optional + +# Search (Meilisearch Cloud) +MEILISEARCH_URL=https://your-instance.meilisearch.io +MEILISEARCH_MASTER_KEY=your-master-key + +# Stripe (optional) +STRIPE_SECRET_KEY=sk_live_... +STRIPE_WEBHOOK_SECRET=whsec_... + +# Observability (optional) +SENTRY_DSN=https://...@sentry.io/... +``` + +--- + +## Deploy Services + +Each service deploys as an independent Fly.io app. Deploy them in order. + +### API + +```bash +cd services/api +fly launch --name maschina-api --region iad --no-deploy +fly secrets import < ../../.env.fly +fly deploy +``` + +### Daemon + +```bash +cd services/daemon +fly launch --name maschina-daemon --region iad --no-deploy +fly secrets import < ../../.env.fly +fly deploy +``` + +### Runtime + +```bash +cd services/runtime +fly launch --name maschina-runtime --region iad --no-deploy +fly secrets import < ../../.env.fly +fly deploy +``` + +### Gateway + +```bash +cd services/gateway +fly launch --name maschina-gateway --region iad --no-deploy +fly secrets import < ../../.env.fly +fly secrets set API_URL=https://maschina-api.fly.dev +fly deploy +``` + +### Realtime + +```bash +cd services/realtime +fly launch --name maschina-realtime --region iad --no-deploy +fly secrets import < ../../.env.fly +fly secrets set NATS_URL=tls://connect.ngs.global +fly deploy +``` + +--- + +## Run Migrations + +After the API is deployed and healthy: + +```bash +fly ssh console --app maschina-api -C "pnpm db:migrate" +fly ssh console --app maschina-api -C "pnpm db:seed" +``` + +--- + +## Check Health + +```bash +curl https://maschina-gateway.fly.dev/health +curl https://maschina-api.fly.dev/health +curl https://maschina-realtime.fly.dev/health +``` + +All healthy services return `{ "status": "ok" }`. + +--- + +## Configure a Custom Domain + +```bash +fly certs add api.yourdomain.com --app maschina-gateway +``` + +Then add a CNAME record in your DNS: +``` +api.yourdomain.com CNAME maschina-gateway.fly.dev +``` + +--- + +## Scaling + +Fly.io makes it easy to scale individual services: + +```bash +# Scale API to 2 instances +fly scale count 2 --app maschina-api + +# Scale Runtime to 3 instances (most compute-intensive) +fly scale count 3 --app maschina-runtime + +# Adjust VM size +fly scale vm performance-2x --app maschina-runtime +``` + +The Daemon is stateful (NATS consumer) — run exactly one instance unless you understand JetStream consumer group semantics. + +--- + +## Regions + +Deploy services close to your users and your managed dependencies. Neon, Upstash, and NGS all have regional endpoints. + +```bash +# Add a secondary region +fly regions add lhr --app maschina-api # London +fly regions add nrt --app maschina-runtime # Tokyo +``` + +--- + +## Monitoring Logs + +```bash +fly logs --app maschina-api +fly logs --app maschina-daemon +fly logs --app maschina-runtime +fly logs --app maschina-gateway +``` + +--- + +## Updating + +```bash +cd services/api && fly deploy +cd ../daemon && fly deploy +cd ../runtime && fly deploy +cd ../gateway && fly deploy +cd ../realtime && fly deploy +fly ssh console --app maschina-api -C "pnpm db:migrate" +``` + +--- + +## Service URLs + +| Service | App name | Internal URL | +|---|---|---| +| Gateway | `maschina-gateway` | `https://maschina-gateway.fly.dev` | +| API | `maschina-api` | `https://maschina-api.fly.dev` | +| Realtime | `maschina-realtime` | `https://maschina-realtime.fly.dev` | +| Runtime | `maschina-runtime` | Internal to Fly private network | +| Daemon | `maschina-daemon` | Internal to Fly private network | + +The Runtime and Daemon do not need public internet exposure. Use Fly's private network (`fly.internal`) for internal service communication. diff --git a/apps/docs/self-hosting/overview.mdx b/apps/docs/self-hosting/overview.mdx new file mode 100644 index 0000000..b2c23b7 --- /dev/null +++ b/apps/docs/self-hosting/overview.mdx @@ -0,0 +1,61 @@ +--- +title: Self-Hosting +description: Run Maschina on your own infrastructure. +--- + +import { HardDrive, Stack, Database, ShieldCheck, Code } from "@phosphor-icons/react"; + +Maschina can be self-hosted. All services are packaged as Docker images and configured through environment variables. You never need access to the source code. + +## How self-hosting works + +The public `maschina-labs/self-hosted` repository contains everything you need to run Maschina: + +- A `docker-compose.yml` that pulls pre-built images from `ghcr.io/maschina-labs/` +- An `.env.example` with all configuration options +- Database migration files +- A README with step-by-step setup instructions + +Your source code stays private. You pull the same images that run on `app.maschina.ai`. + +## Services + +| Service | Image | Description | +|---|---|---| +| `api` | `ghcr.io/maschina-labs/api` | REST API, auth, billing, webhooks | +| `gateway` | `ghcr.io/maschina-labs/gateway` | JWT validation, rate limiting, reverse proxy | +| `realtime` | `ghcr.io/maschina-labs/realtime` | WebSocket and SSE for live run updates | +| `daemon` | `ghcr.io/maschina-labs/daemon` | NATS consumer, job dispatcher | +| `runtime` | `ghcr.io/maschina-labs/runtime` | Agent execution, model routing, risk checks | + +## Dependencies + +| Service | Purpose | Managed option | +|---|---|---| +| PostgreSQL | Primary database | Neon, Supabase, RDS | +| Redis | Quota counters, session cache | Upstash, ElastiCache | +| NATS JetStream | Job queue and event bus | NGS (NATS Global Service) | +| Meilisearch | Full-text search | Meilisearch Cloud | + +All dependencies can be run via Docker Compose for local development or pointed at managed services for production. + +## Quick start + +```bash +git clone https://github.com/maschina-labs/self-hosted +cd self-hosted +cp .env.example .env +# edit .env with your credentials +docker compose up -d +docker compose exec api pnpm db:migrate +``` + +See the [Docker guide](/self-hosting/docker) for the full walkthrough. + +## License + +The self-hosted distribution is released under the Apache 2.0 license. You can run it, modify it, and use it commercially. You cannot use the Maschina name or brand for your own distribution. + +## Managed hosting + +Prefer not to self-host? [app.maschina.ai](https://app.maschina.ai) is fully managed, always up to date, and handles all infrastructure for you. diff --git a/apps/mobile/ios/README.md b/apps/mobile/ios/README.md index 66e0221..355feeb 100644 --- a/apps/mobile/ios/README.md +++ b/apps/mobile/ios/README.md @@ -9,7 +9,7 @@ The Xcode project must be created manually (Xcode can't generate `.xcodeproj` fr 1. Open Xcode 2. File → New → Project → iOS → App 3. Product Name: `Maschina` -4. Bundle Identifier: `io.maschina.ios` +4. Bundle Identifier: `io.maschina.ais` 5. Interface: SwiftUI 6. Language: Swift 7. Save to `apps/mobile/ios/` @@ -21,7 +21,7 @@ Then add the existing source files: `APIClient.swift` points to: - Debug: `http://localhost:8080` (gateway running locally) -- Release: `https://api.maschina.io` +- Release: `https://api.maschina.ai` Token is stored in `UserDefaults` under `maschina_token`. diff --git a/docs/architecture/api.md b/docs/architecture/api.md index 079d5e9..a0b6560 100644 --- a/docs/architecture/api.md +++ b/docs/architecture/api.md @@ -133,7 +133,7 @@ Client → POST /agents/:id/run → services/api ### WebSocket (real-time events) -Clients connect to `wss://api.maschina.io/ws?token=`. The gateway upgrades the connection and bridges it to `services/realtime`. The realtime service fans out events from NATS core subjects to the client's live connection. +Clients connect to `wss://api.maschina.ai/ws?token=`. The gateway upgrades the connection and bridges it to `services/realtime`. The realtime service fans out events from NATS core subjects to the client's live connection. ### SSE (server-sent events) diff --git a/docs/architecture/network.md b/docs/architecture/network.md index 51eb50d..feccb2d 100644 --- a/docs/architecture/network.md +++ b/docs/architecture/network.md @@ -98,8 +98,8 @@ Upstream services trust these headers unconditionally — they are only reachabl - DDoS protection at the edge - WAF for basic request filtering - Edge TLS termination (Cloudflare → Fly.io is TLS-encrypted) -- `api.maschina.io` → Fly.io gateway -- `maschina.io`, `app.maschina.io` → Fly.io or CDN for web apps +- `api.maschina.ai` → Fly.io gateway +- `maschina.ai`, `app.maschina.ai` → Fly.io or CDN for web apps --- diff --git a/docs/operations/deployment.md b/docs/operations/deployment.md index cc9dedf..b7330b7 100644 --- a/docs/operations/deployment.md +++ b/docs/operations/deployment.md @@ -121,7 +121,7 @@ PRs automatically get a Neon database branch via the Neon GitHub integration. Pr 2. CI runs full test suite on `main` 3. On green CI, tag the release: `git tag v1.2.3` 4. CI deploys tagged commit to production -5. Run smoke tests against production (`k6 run k6/smoke.js --env BASE_URL=https://api.maschina.io`) +5. Run smoke tests against production (`k6 run k6/smoke.js --env BASE_URL=https://api.maschina.ai`) 6. Monitor Grafana + Sentry for 15 minutes post-deploy ### Database migration on deploy diff --git a/docs/security/access.md b/docs/security/access.md index fa5c7b4..b76400d 100644 --- a/docs/security/access.md +++ b/docs/security/access.md @@ -126,7 +126,7 @@ No service accepts requests from the public internet except the gateway. ## Admin Console Access -`apps/console` (internal admin console) is accessible only to users with the `Internal` plan tier. It runs on a separate subdomain (`console.maschina.io`) and requires: +`apps/console` (internal admin console) is accessible only to users with the `Internal` plan tier. It runs on a separate subdomain (`console.maschina.ai`) and requires: 1. Valid JWT with `plan: "internal"` claim 2. Additional admin password (second factor, implemented in console app) diff --git a/docs/security/api.md b/docs/security/api.md index 15cb680..181e5fb 100644 --- a/docs/security/api.md +++ b/docs/security/api.md @@ -134,7 +134,7 @@ Email addresses are normalized to `email.toLowerCase()` before any comparison or CORS is enforced by the gateway. Allowed origins are configured via `CORS_ORIGIN` environment variable. ``` -CORS_ORIGIN=https://app.maschina.io,https://maschina.io +CORS_ORIGIN=https://app.maschina.ai,https://maschina.ai ``` In local development: `CORS_ORIGIN=http://localhost:5173`. diff --git a/install.sh b/install.sh deleted file mode 100755 index ff1c556..0000000 --- a/install.sh +++ /dev/null @@ -1,227 +0,0 @@ -#!/usr/bin/env sh -# Maschina CLI installer -# Usage: curl -fsSL https://install.maschina.dev | sh -set -eu - -BOLD="\033[1m" -DIM="\033[2m" -CYAN="\033[36m" -GREEN="\033[32m" -YELLOW="\033[33m" -RED="\033[31m" -RESET="\033[0m" - -step() { printf " ${CYAN}→${RESET} %s\n" "$1"; } -ok() { printf " ${GREEN}✓${RESET} %s\n" "$1"; } -warn() { printf " ${YELLOW}!${RESET} %s\n" "$1"; } -die() { printf " ${RED}✗${RESET} %s\n" "$1"; exit 1; } -hr() { printf " ${DIM}──────────────────────────────────────────────${RESET}\n"; } - -# ── Banner ──────────────────────────────────────────────────────────────────── - -printf "\n" -printf "${BOLD} Maschina${RESET}\n" -printf "${DIM} Autonomous digital labor, at your command.${RESET}\n" -printf "\n" -hr -printf "\n" - -# ── Detect OS and architecture ──────────────────────────────────────────────── - -step "Checking platform..." - -OS="$(uname -s)" -ARCH="$(uname -m)" - -case "$OS" in - Darwin) OS_NAME="darwin" ;; - Linux) OS_NAME="linux" ;; - MINGW*|MSYS*|CYGWIN*) die "Windows is not yet supported via this script. Download the binary from https://github.com/RustMunkey/maschina/releases" ;; - *) die "Unsupported OS: $OS" ;; -esac - -case "$ARCH" in - x86_64|amd64) ARCH_NAME="x86_64" ;; - arm64|aarch64) ARCH_NAME="aarch64" ;; - *) die "Unsupported architecture: $ARCH" ;; -esac - -ok "Platform: ${OS_NAME}-${ARCH_NAME}" - -# ── Check dependencies ──────────────────────────────────────────────────────── - -step "Checking dependencies..." - -MISSING="" -for dep in curl tar; do - if ! command -v "$dep" >/dev/null 2>&1; then - MISSING="$MISSING $dep" - fi -done - -if [ -n "$MISSING" ]; then - die "Missing required tools:$MISSING — install them and re-run." -fi -ok "Dependencies satisfied" - -# ── Determine install directory ─────────────────────────────────────────────── - -INSTALL_DIR="${MASCHINA_INSTALL_DIR:-}" - -if [ -z "$INSTALL_DIR" ]; then - if [ -w "/usr/local/bin" ]; then - INSTALL_DIR="/usr/local/bin" - else - INSTALL_DIR="$HOME/.local/bin" - mkdir -p "$INSTALL_DIR" - fi -fi - -step "Installing to ${INSTALL_DIR}" - -# ── Download binary ─────────────────────────────────────────────────────────── - -GITHUB_REPO="RustMunkey/maschina" -VERSION="${MASCHINA_VERSION:-latest}" - -if [ "$VERSION" = "latest" ]; then - step "Fetching latest release..." - VERSION="$(curl -fsSL "https://api.github.com/repos/${GITHUB_REPO}/releases/latest" \ - | grep '"tag_name"' | sed 's/.*"tag_name": *"\([^"]*\)".*/\1/')" - if [ -z "$VERSION" ]; then - die "Could not fetch latest version. Check your internet connection or set MASCHINA_VERSION manually." - fi -fi - -TARBALL="maschina-${OS_NAME}-${ARCH_NAME}.tar.gz" -DOWNLOAD_URL="https://github.com/${GITHUB_REPO}/releases/download/${VERSION}/${TARBALL}" - -step "Downloading maschina ${VERSION}..." - -TMP_DIR="$(mktemp -d)" -trap 'rm -rf "$TMP_DIR"' EXIT - -curl -fsSL --progress-bar "$DOWNLOAD_URL" -o "${TMP_DIR}/${TARBALL}" \ - || die "Download failed. Visit https://github.com/${GITHUB_REPO}/releases to download manually." - -# ── Extract and install ─────────────────────────────────────────────────────── - -step "Installing binary..." - -tar -xzf "${TMP_DIR}/${TARBALL}" -C "$TMP_DIR" \ - || die "Failed to extract archive" - -BINARY="${TMP_DIR}/maschina" -[ -f "$BINARY" ] || die "Binary not found in archive" - -chmod +x "$BINARY" -mv "$BINARY" "${INSTALL_DIR}/maschina" \ - || die "Failed to install to ${INSTALL_DIR} — try: sudo MASCHINA_INSTALL_DIR=/usr/local/bin sh install.sh" - -ok "Installed maschina $(${INSTALL_DIR}/maschina --version 2>/dev/null || echo "${VERSION}")" - -# ── Download service binaries ───────────────────────────────────────────────── - -SVC_DIR="${HOME}/.local/share/maschina/bin" -mkdir -p "$SVC_DIR" - -SERVICES="maschina-api maschina-gateway maschina-realtime maschina-runtime maschina-daemon" -SVC_TARBALL="maschina-services-${OS_NAME}-${ARCH_NAME}.tar.gz" -SVC_URL="https://github.com/${GITHUB_REPO}/releases/download/${VERSION}/${SVC_TARBALL}" - -step "Downloading service binaries..." - -# Check if service tarball exists (releases may not include services separately) -SVC_HTTP_CODE="$(curl -fsSL -o /dev/null -w "%{http_code}" --head "$SVC_URL" 2>/dev/null || echo "000")" - -if [ "$SVC_HTTP_CODE" = "302" ] || [ "$SVC_HTTP_CODE" = "200" ]; then - curl -fsSL "$SVC_URL" -o "${TMP_DIR}/${SVC_TARBALL}" 2>/dev/null \ - && tar -xzf "${TMP_DIR}/${SVC_TARBALL}" -C "$TMP_DIR" 2>/dev/null \ - || true - - INSTALLED_SVCS="" - for svc in $SERVICES; do - if [ -f "${TMP_DIR}/${svc}" ]; then - chmod +x "${TMP_DIR}/${svc}" - mv "${TMP_DIR}/${svc}" "${SVC_DIR}/${svc}" - INSTALLED_SVCS="${INSTALLED_SVCS} ${svc}" - fi - done - - if [ -n "$INSTALLED_SVCS" ]; then - ok "Service binaries installed to ${SVC_DIR}" - else - warn "No service binaries found in release archive" - fi -else - warn "Service binaries not yet available for this release" - printf " ${DIM}Run: maschina service start (uses dev mode fallback)${RESET}\n" -fi - -# ── Update PATH ─────────────────────────────────────────────────────────────── - -PATH_LINE="export PATH=\"${INSTALL_DIR}:\$PATH\"" -ADDED_TO="" - -# Check if already on PATH -case ":${PATH}:" in - *":${INSTALL_DIR}:"*) ;; - *) - # Try to detect shell and add to rc file - SHELL_NAME="$(basename "${SHELL:-/bin/sh}")" - RC_FILE="" - - case "$SHELL_NAME" in - zsh) RC_FILE="$HOME/.zshrc" ;; - bash) RC_FILE="$HOME/.bashrc" ;; - fish) - FISH_DIR="$HOME/.config/fish" - mkdir -p "$FISH_DIR" - RC_FILE="$FISH_DIR/config.fish" - PATH_LINE="set -gx PATH \"${INSTALL_DIR}\" \$PATH" - ;; - *) RC_FILE="$HOME/.profile" ;; - esac - - if [ -n "$RC_FILE" ]; then - if ! grep -qF "$INSTALL_DIR" "$RC_FILE" 2>/dev/null; then - printf "\n# Added by Maschina installer\n%s\n" "$PATH_LINE" >> "$RC_FILE" - ADDED_TO="$RC_FILE" - fi - fi - ;; -esac - -if [ -n "$ADDED_TO" ]; then - ok "Added ${INSTALL_DIR} to PATH in ${ADDED_TO}" - warn "Restart your terminal or run: source ${ADDED_TO}" -else - ok "PATH already includes ${INSTALL_DIR}" -fi - -# ── Done ────────────────────────────────────────────────────────────────────── - -printf "\n" -hr -printf "\n" -printf " ${GREEN}${BOLD}Installation complete!${RESET}\n" -printf "\n" -printf " Run ${BOLD}maschina setup${RESET} to authenticate and configure your workspace.\n" -printf " Run ${BOLD}maschina service start${RESET} to launch all background services.\n" -printf "\n" -printf " ${DIM}maschina --help${RESET}${DIM} — all commands${RESET}\n" -printf " ${DIM}maschina doctor${RESET}${DIM} — diagnose your installation${RESET}\n" -printf "\n" -printf " ${DIM}Documentation: https://docs.maschina.dev/cli${RESET}\n" -printf " ${DIM}Issues: https://github.com/${GITHUB_REPO}/issues${RESET}\n" -printf "\n" - -# ── Run setup if interactive ────────────────────────────────────────────────── - -if [ -t 0 ] && [ -t 1 ]; then - printf " Run setup now? [Y/n] " - read -r REPLY - case "$REPLY" in - ""|y|Y|yes|Yes) exec "${INSTALL_DIR}/maschina" setup ;; - esac -fi diff --git a/install/install.sh b/install/install.sh old mode 100644 new mode 100755 index 3cde6c8..ff1c556 --- a/install/install.sh +++ b/install/install.sh @@ -1,42 +1,227 @@ -#!/bin/sh -# Maschina Installer -# Usage: curl -fsSL https://install.maschina.ai | sh +#!/usr/bin/env sh +# Maschina CLI installer +# Usage: curl -fsSL https://install.maschina.dev | sh +set -eu -set -e +BOLD="\033[1m" +DIM="\033[2m" +CYAN="\033[36m" +GREEN="\033[32m" +YELLOW="\033[33m" +RED="\033[31m" +RESET="\033[0m" -REPO="maschina-ai/maschina" -BIN_NAME="maschina-daemon" -INSTALL_DIR="${MASCHINA_INSTALL_DIR:-/usr/local/bin}" +step() { printf " ${CYAN}→${RESET} %s\n" "$1"; } +ok() { printf " ${GREEN}✓${RESET} %s\n" "$1"; } +warn() { printf " ${YELLOW}!${RESET} %s\n" "$1"; } +die() { printf " ${RED}✗${RESET} %s\n" "$1"; exit 1; } +hr() { printf " ${DIM}──────────────────────────────────────────────${RESET}\n"; } + +# ── Banner ──────────────────────────────────────────────────────────────────── + +printf "\n" +printf "${BOLD} Maschina${RESET}\n" +printf "${DIM} Autonomous digital labor, at your command.${RESET}\n" +printf "\n" +hr +printf "\n" + +# ── Detect OS and architecture ──────────────────────────────────────────────── + +step "Checking platform..." -# Detect OS and architecture OS="$(uname -s)" ARCH="$(uname -m)" case "$OS" in - Linux) TARGET_OS="linux" ;; - Darwin) TARGET_OS="macos" ;; - *) - echo "Unsupported OS: $OS" - exit 1 - ;; + Darwin) OS_NAME="darwin" ;; + Linux) OS_NAME="linux" ;; + MINGW*|MSYS*|CYGWIN*) die "Windows is not yet supported via this script. Download the binary from https://github.com/RustMunkey/maschina/releases" ;; + *) die "Unsupported OS: $OS" ;; esac case "$ARCH" in - x86_64) TARGET_ARCH="x86_64" ;; - arm64|aarch64) TARGET_ARCH="aarch64" ;; + x86_64|amd64) ARCH_NAME="x86_64" ;; + arm64|aarch64) ARCH_NAME="aarch64" ;; + *) die "Unsupported architecture: $ARCH" ;; +esac + +ok "Platform: ${OS_NAME}-${ARCH_NAME}" + +# ── Check dependencies ──────────────────────────────────────────────────────── + +step "Checking dependencies..." + +MISSING="" +for dep in curl tar; do + if ! command -v "$dep" >/dev/null 2>&1; then + MISSING="$MISSING $dep" + fi +done + +if [ -n "$MISSING" ]; then + die "Missing required tools:$MISSING — install them and re-run." +fi +ok "Dependencies satisfied" + +# ── Determine install directory ─────────────────────────────────────────────── + +INSTALL_DIR="${MASCHINA_INSTALL_DIR:-}" + +if [ -z "$INSTALL_DIR" ]; then + if [ -w "/usr/local/bin" ]; then + INSTALL_DIR="/usr/local/bin" + else + INSTALL_DIR="$HOME/.local/bin" + mkdir -p "$INSTALL_DIR" + fi +fi + +step "Installing to ${INSTALL_DIR}" + +# ── Download binary ─────────────────────────────────────────────────────────── + +GITHUB_REPO="RustMunkey/maschina" +VERSION="${MASCHINA_VERSION:-latest}" + +if [ "$VERSION" = "latest" ]; then + step "Fetching latest release..." + VERSION="$(curl -fsSL "https://api.github.com/repos/${GITHUB_REPO}/releases/latest" \ + | grep '"tag_name"' | sed 's/.*"tag_name": *"\([^"]*\)".*/\1/')" + if [ -z "$VERSION" ]; then + die "Could not fetch latest version. Check your internet connection or set MASCHINA_VERSION manually." + fi +fi + +TARBALL="maschina-${OS_NAME}-${ARCH_NAME}.tar.gz" +DOWNLOAD_URL="https://github.com/${GITHUB_REPO}/releases/download/${VERSION}/${TARBALL}" + +step "Downloading maschina ${VERSION}..." + +TMP_DIR="$(mktemp -d)" +trap 'rm -rf "$TMP_DIR"' EXIT + +curl -fsSL --progress-bar "$DOWNLOAD_URL" -o "${TMP_DIR}/${TARBALL}" \ + || die "Download failed. Visit https://github.com/${GITHUB_REPO}/releases to download manually." + +# ── Extract and install ─────────────────────────────────────────────────────── + +step "Installing binary..." + +tar -xzf "${TMP_DIR}/${TARBALL}" -C "$TMP_DIR" \ + || die "Failed to extract archive" + +BINARY="${TMP_DIR}/maschina" +[ -f "$BINARY" ] || die "Binary not found in archive" + +chmod +x "$BINARY" +mv "$BINARY" "${INSTALL_DIR}/maschina" \ + || die "Failed to install to ${INSTALL_DIR} — try: sudo MASCHINA_INSTALL_DIR=/usr/local/bin sh install.sh" + +ok "Installed maschina $(${INSTALL_DIR}/maschina --version 2>/dev/null || echo "${VERSION}")" + +# ── Download service binaries ───────────────────────────────────────────────── + +SVC_DIR="${HOME}/.local/share/maschina/bin" +mkdir -p "$SVC_DIR" + +SERVICES="maschina-api maschina-gateway maschina-realtime maschina-runtime maschina-daemon" +SVC_TARBALL="maschina-services-${OS_NAME}-${ARCH_NAME}.tar.gz" +SVC_URL="https://github.com/${GITHUB_REPO}/releases/download/${VERSION}/${SVC_TARBALL}" + +step "Downloading service binaries..." + +# Check if service tarball exists (releases may not include services separately) +SVC_HTTP_CODE="$(curl -fsSL -o /dev/null -w "%{http_code}" --head "$SVC_URL" 2>/dev/null || echo "000")" + +if [ "$SVC_HTTP_CODE" = "302" ] || [ "$SVC_HTTP_CODE" = "200" ]; then + curl -fsSL "$SVC_URL" -o "${TMP_DIR}/${SVC_TARBALL}" 2>/dev/null \ + && tar -xzf "${TMP_DIR}/${SVC_TARBALL}" -C "$TMP_DIR" 2>/dev/null \ + || true + + INSTALLED_SVCS="" + for svc in $SERVICES; do + if [ -f "${TMP_DIR}/${svc}" ]; then + chmod +x "${TMP_DIR}/${svc}" + mv "${TMP_DIR}/${svc}" "${SVC_DIR}/${svc}" + INSTALLED_SVCS="${INSTALLED_SVCS} ${svc}" + fi + done + + if [ -n "$INSTALLED_SVCS" ]; then + ok "Service binaries installed to ${SVC_DIR}" + else + warn "No service binaries found in release archive" + fi +else + warn "Service binaries not yet available for this release" + printf " ${DIM}Run: maschina service start (uses dev mode fallback)${RESET}\n" +fi + +# ── Update PATH ─────────────────────────────────────────────────────────────── + +PATH_LINE="export PATH=\"${INSTALL_DIR}:\$PATH\"" +ADDED_TO="" + +# Check if already on PATH +case ":${PATH}:" in + *":${INSTALL_DIR}:"*) ;; *) - echo "Unsupported architecture: $ARCH" - exit 1 + # Try to detect shell and add to rc file + SHELL_NAME="$(basename "${SHELL:-/bin/sh}")" + RC_FILE="" + + case "$SHELL_NAME" in + zsh) RC_FILE="$HOME/.zshrc" ;; + bash) RC_FILE="$HOME/.bashrc" ;; + fish) + FISH_DIR="$HOME/.config/fish" + mkdir -p "$FISH_DIR" + RC_FILE="$FISH_DIR/config.fish" + PATH_LINE="set -gx PATH \"${INSTALL_DIR}\" \$PATH" + ;; + *) RC_FILE="$HOME/.profile" ;; + esac + + if [ -n "$RC_FILE" ]; then + if ! grep -qF "$INSTALL_DIR" "$RC_FILE" 2>/dev/null; then + printf "\n# Added by Maschina installer\n%s\n" "$PATH_LINE" >> "$RC_FILE" + ADDED_TO="$RC_FILE" + fi + fi ;; esac -TARGET="${TARGET_ARCH}-${TARGET_OS}" +if [ -n "$ADDED_TO" ]; then + ok "Added ${INSTALL_DIR} to PATH in ${ADDED_TO}" + warn "Restart your terminal or run: source ${ADDED_TO}" +else + ok "PATH already includes ${INSTALL_DIR}" +fi + +# ── Done ────────────────────────────────────────────────────────────────────── -echo "Installing Maschina daemon for ${TARGET}..." +printf "\n" +hr +printf "\n" +printf " ${GREEN}${BOLD}Installation complete!${RESET}\n" +printf "\n" +printf " Run ${BOLD}maschina setup${RESET} to authenticate and configure your workspace.\n" +printf " Run ${BOLD}maschina service start${RESET} to launch all background services.\n" +printf "\n" +printf " ${DIM}maschina --help${RESET}${DIM} — all commands${RESET}\n" +printf " ${DIM}maschina doctor${RESET}${DIM} — diagnose your installation${RESET}\n" +printf "\n" +printf " ${DIM}Documentation: https://docs.maschina.dev/cli${RESET}\n" +printf " ${DIM}Issues: https://github.com/${GITHUB_REPO}/issues${RESET}\n" +printf "\n" -# TODO: replace with actual release URL once CI publishes binaries -# DOWNLOAD_URL="https://github.com/${REPO}/releases/latest/download/${BIN_NAME}-${TARGET}.tar.gz" +# ── Run setup if interactive ────────────────────────────────────────────────── -echo "" -echo "Maschina is not yet released. Check https://maschina.ai for updates." -echo "" +if [ -t 0 ] && [ -t 1 ]; then + printf " Run setup now? [Y/n] " + read -r REPLY + case "$REPLY" in + ""|y|Y|yes|Yes) exec "${INSTALL_DIR}/maschina" setup ;; + esac +fi diff --git a/packages/api-client/LICENSE b/packages/api-client/LICENSE new file mode 100644 index 0000000..f49a4e1 --- /dev/null +++ b/packages/api-client/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/packages/cli/LICENSE b/packages/cli/LICENSE new file mode 100644 index 0000000..f49a4e1 --- /dev/null +++ b/packages/cli/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/packages/compliance/package.json b/packages/compliance/package.json index 488a497..7314002 100644 --- a/packages/compliance/package.json +++ b/packages/compliance/package.json @@ -4,13 +4,8 @@ "private": true, "type": "module", "exports": { - ".": { - "import": "./dist/index.js", - "types": "./dist/index.d.ts" - } + ".": "./src/index.ts" }, - "main": "./dist/index.js", - "types": "./dist/index.d.ts", "scripts": { "build": "tsc", "dev": "tsc --watch", @@ -18,8 +13,8 @@ "clean": "rm -rf dist" }, "dependencies": { - "@maschina/types": "workspace:*", - "@maschina/db": "workspace:*" + "@maschina/db": "workspace:*", + "@maschina/plans": "workspace:*" }, "devDependencies": { "@maschina/tsconfig": "workspace:*", diff --git a/packages/compliance/src/audit.ts b/packages/compliance/src/audit.ts new file mode 100644 index 0000000..e3f364c --- /dev/null +++ b/packages/compliance/src/audit.ts @@ -0,0 +1,104 @@ +import { auditLogs, db } from "@maschina/db"; +import { and, asc, desc, gte, lte, sql } from "@maschina/db"; + +export interface AuditLogQuery { + userId: string; + from?: Date; + to?: Date; + action?: string; + resource?: string; + limit?: number; + offset?: number; +} + +export interface AuditLogRow { + id: string; + userId: string | null; + action: string; + resource: string; + resourceId: string | null; + metadata: Record | null; + createdAt: string; +} + +/** + * Query audit logs for a user with optional date range and filters. + */ +export async function queryAuditLogs(opts: AuditLogQuery): Promise<{ + rows: AuditLogRow[]; + total: number; +}> { + const limit = Math.min(opts.limit ?? 100, 1000); + const offset = opts.offset ?? 0; + + const conditions = [sql`${auditLogs.userId} = ${opts.userId}::uuid`]; + if (opts.from) conditions.push(gte(auditLogs.createdAt, opts.from)); + if (opts.to) conditions.push(lte(auditLogs.createdAt, opts.to)); + if (opts.action) conditions.push(sql`${auditLogs.action} = ${opts.action}`); + if (opts.resource) conditions.push(sql`${auditLogs.resource} = ${opts.resource}`); + + const where = and(...conditions); + + const [rows, [{ total }]] = await Promise.all([ + db + .select({ + id: auditLogs.id, + userId: auditLogs.userId, + action: auditLogs.action, + resource: auditLogs.resource, + resourceId: auditLogs.resourceId, + metadata: auditLogs.metadata, + createdAt: auditLogs.createdAt, + }) + .from(auditLogs) + .where(where) + .orderBy(desc(auditLogs.createdAt)) + .limit(limit) + .offset(offset), + db.select({ total: sql`count(*)::int` }).from(auditLogs).where(where), + ]); + + return { + rows: rows.map((r: (typeof rows)[number]) => ({ + ...r, + metadata: r.metadata as Record | null, + createdAt: r.createdAt.toISOString(), + })), + total, + }; +} + +/** + * Convert audit log rows to CSV string. + */ +export function toCSV(rows: AuditLogRow[]): string { + const headers = ["id", "user_id", "action", "resource", "resource_id", "created_at"]; + const lines = [ + headers.join(","), + ...rows.map((r) => + [r.id, r.userId ?? "", r.action, r.resource, r.resourceId ?? "", r.createdAt] + .map((v) => `"${String(v).replace(/"/g, '""')}"`) + .join(","), + ), + ]; + return lines.join("\n"); +} + +/** + * Append a single audit log entry. Fire-and-forget safe. + */ +export async function appendAuditLog(entry: { + userId: string | null; + action: string; + resource: string; + resourceId?: string; + metadata?: Record; +}): Promise { + await db.insert(auditLogs).values({ + userId: entry.userId, + action: entry.action, + resource: entry.resource, + resourceId: entry.resourceId, + metadata: entry.metadata, + }); +} diff --git a/packages/compliance/src/gdpr.ts b/packages/compliance/src/gdpr.ts new file mode 100644 index 0000000..1189013 --- /dev/null +++ b/packages/compliance/src/gdpr.ts @@ -0,0 +1,61 @@ +import { agentRuns, agents, db, users } from "@maschina/db"; +import { and, eq, isNull, sql } from "@maschina/db"; +import { appendAuditLog } from "./audit.js"; + +/** + * GDPR Article 17 — Right to erasure. + * + * Anonymizes the user record and purges run payloads. Does NOT hard-delete + * the user row (we need the tombstone for billing reconciliation and to prevent + * re-registration with the same email index). + */ +export async function deleteUserData(userId: string): Promise<{ + agentsDeleted: number; + runsAnonymized: number; +}> { + // 1. Soft-delete all agents + const deletedAgents = await db + .update(agents) + .set({ deletedAt: new Date(), status: "stopped" }) + .where(and(eq(agents.userId, userId), isNull(agents.deletedAt))) + .returning({ id: agents.id }); + + // 2. Anonymize run payloads (zero out PII-bearing fields) + const anonymizedRuns = await db + .update(agentRuns) + .set({ + inputPayload: {}, + outputPayload: {}, + }) + .where(eq(agentRuns.userId, userId)) + .returning({ id: agentRuns.id }); + + // 3. Anonymize the user row — preserve ID + tier for billing tombstone + await db + .update(users) + .set({ + email: `deleted+${userId}@maschina.internal`, + emailIndex: `deleted:${userId}`, + name: "Deleted User", + passwordHash: "", + updatedAt: new Date(), + }) + .where(eq(users.id, userId)); + + // 4. Audit log — immutable record that the deletion happened + await appendAuditLog({ + userId: null, // user is now anonymized + action: "gdpr.delete", + resource: "user", + resourceId: userId, + metadata: { + agentsDeleted: deletedAgents.length, + runsAnonymized: anonymizedRuns.length, + }, + }); + + return { + agentsDeleted: deletedAgents.length, + runsAnonymized: anonymizedRuns.length, + }; +} diff --git a/packages/compliance/src/index.ts b/packages/compliance/src/index.ts index 922b571..ed68850 100644 --- a/packages/compliance/src/index.ts +++ b/packages/compliance/src/index.ts @@ -1,2 +1,4 @@ -// @maschina/compliance — KYC, AML, audit trail -export {}; +export { appendAuditLog, queryAuditLogs, toCSV } from "./audit.js"; +export type { AuditLogQuery, AuditLogRow } from "./audit.js"; +export { deleteUserData } from "./gdpr.js"; +export { getRetentionCutoff, getRetentionDays } from "./retention.js"; diff --git a/packages/compliance/src/retention.ts b/packages/compliance/src/retention.ts new file mode 100644 index 0000000..cd26da7 --- /dev/null +++ b/packages/compliance/src/retention.ts @@ -0,0 +1,31 @@ +import type { PlanTier } from "@maschina/plans"; + +// Retention windows per tier in days. -1 = unlimited. +const RETENTION_DAYS: Record = { + access: 30, + m1: 90, + m5: 365, + m10: -1, + teams: -1, + enterprise: -1, + internal: -1, +}; + +/** + * Returns the audit log / run data retention window for a tier in days. + * Returns -1 for unlimited retention. + */ +export function getRetentionDays(tier: PlanTier): number { + return RETENTION_DAYS[tier] ?? 30; +} + +/** + * Returns the cutoff Date before which records may be purged, or null if unlimited. + */ +export function getRetentionCutoff(tier: PlanTier): Date | null { + const days = getRetentionDays(tier); + if (days === -1) return null; + const cutoff = new Date(); + cutoff.setDate(cutoff.getDate() - days); + return cutoff; +} diff --git a/packages/sdk/python/LICENSE b/packages/sdk/python/LICENSE new file mode 100644 index 0000000..f49a4e1 --- /dev/null +++ b/packages/sdk/python/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/packages/sdk/rust/LICENSE b/packages/sdk/rust/LICENSE new file mode 100644 index 0000000..f49a4e1 --- /dev/null +++ b/packages/sdk/rust/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/packages/sdk/ts/LICENSE b/packages/sdk/ts/LICENSE new file mode 100644 index 0000000..f49a4e1 --- /dev/null +++ b/packages/sdk/ts/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 5b0390b..ca17e80 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -53,7 +53,7 @@ importers: version: 24.2.9(typescript@5.9.3) turbo: specifier: latest - version: 2.8.14 + version: 2.8.17 typescript: specifier: ^5 version: 5.9.3 @@ -602,9 +602,9 @@ importers: '@maschina/db': specifier: workspace:* version: link:../db - '@maschina/types': + '@maschina/plans': specifier: workspace:* - version: link:../types + version: link:../plans devDependencies: '@maschina/tsconfig': specifier: workspace:* @@ -1459,6 +1459,9 @@ importers: '@maschina/cache': specifier: workspace:* version: link:../../packages/cache + '@maschina/compliance': + specifier: workspace:* + version: link:../../packages/compliance '@maschina/db': specifier: workspace:* version: link:../../packages/db @@ -11415,38 +11418,38 @@ packages: tunnel-agent@0.6.0: resolution: {integrity: sha512-McnNiV1l8RYeY8tBgEpuodCC1mLUdbSN+CYBL7kJsJNInOP8UjDDEwdk6Mw60vdLLrr5NHKZhMAOSrR2NZuQ+w==} - turbo-darwin-64@2.8.14: - resolution: {integrity: sha512-9sFi7n2lLfEsGWi5OEoA/eTtQU2BPKtzSYKqufMtDeRmqMT9vKjbv9gJCRkllSVE9BOXA0qXC3diyX8V8rKIKw==} + turbo-darwin-64@2.8.17: + resolution: {integrity: sha512-ZFkv2hv7zHpAPEXBF6ouRRXshllOavYc+jjcrYyVHvxVTTwJWsBZwJ/gpPzmOKGvkSjsEyDO5V6aqqtZzwVF+Q==} cpu: [x64] os: [darwin] - turbo-darwin-arm64@2.8.14: - resolution: {integrity: sha512-aS4yJuy6A1PCLws+PJpZP0qCURG8Y5iVx13z/WAbKyeDTY6W6PiGgcEllSaeLGxyn++382ztN/EZH85n2zZ6VQ==} + turbo-darwin-arm64@2.8.17: + resolution: {integrity: sha512-5DXqhQUt24ycEryXDfMNKEkW5TBHs+QmU23a2qxXwwFDaJsWcPo2obEhBxxdEPOv7qmotjad+09RGeWCcJ9JDw==} cpu: [arm64] os: [darwin] - turbo-linux-64@2.8.14: - resolution: {integrity: sha512-XC6wPUDJkakjhNLaS0NrHDMiujRVjH+naEAwvKLArgqRaFkNxjmyNDRM4eu3soMMFmjym6NTxYaF74rvET+Orw==} + turbo-linux-64@2.8.17: + resolution: {integrity: sha512-KLUbz6w7F73D/Ihh51hVagrKR0/CTsPEbRkvXLXvoND014XJ4BCrQUqSxlQ4/hu+nqp1v5WlM85/h3ldeyujuA==} cpu: [x64] os: [linux] - turbo-linux-arm64@2.8.14: - resolution: {integrity: sha512-ChfE7isyVNjZrVSPDwcfqcHLG/FuIBbOFxnt1FM8vSuBGzHAs8AlTdwFNIxlEMJfZ8Ad9mdMxdmsCUPIWiQ6cg==} + turbo-linux-arm64@2.8.17: + resolution: {integrity: sha512-pJK67XcNJH40lTAjFu7s/rUlobgVXyB3A3lDoq+/JccB3hf+SysmkpR4Itlc93s8LEaFAI4mamhFuTV17Z6wOg==} cpu: [arm64] os: [linux] - turbo-windows-64@2.8.14: - resolution: {integrity: sha512-FTbIeQL1ycLFW2t9uQNMy+bRSzi3Xhwun/e7ZhFBdM+U0VZxxrtfYEBM9CHOejlfqomk6Jh7aRz0sJoqYn39Hg==} + turbo-windows-64@2.8.17: + resolution: {integrity: sha512-EijeQ6zszDMmGZLP2vT2RXTs/GVi9rM0zv2/G4rNu2SSRSGFapgZdxgW4b5zUYLVaSkzmkpWlGfPfj76SW9yUg==} cpu: [x64] os: [win32] - turbo-windows-arm64@2.8.14: - resolution: {integrity: sha512-KgZX12cTyhY030qS7ieT8zRkhZZE2VWJasDFVUSVVn17nR7IShpv68/7j5UqJNeRLIGF1XPK0phsP5V5yw3how==} + turbo-windows-arm64@2.8.17: + resolution: {integrity: sha512-crpfeMPkfECd4V1PQ/hMoiyVcOy04+bWedu/if89S15WhOalHZ2BYUi6DOJhZrszY+mTT99OwpOsj4wNfb/GHQ==} cpu: [arm64] os: [win32] - turbo@2.8.14: - resolution: {integrity: sha512-UCTxeMNYT1cKaHiIFdLCQ7ulI+jw5i5uOnJOrRXsgUD7G3+OjlUjwVd7JfeVt2McWSVGjYA3EVW/v1FSsJ5DtA==} + turbo@2.8.17: + resolution: {integrity: sha512-YwPsNSqU2f/RXU/+Kcb7cPkPZARxom4+me7LKEdN5jsvy2tpfze3zDZ4EiGrJnvOm9Avu9rK0aaYsP7qZ3iz7A==} hasBin: true tw-animate-css@1.4.0: @@ -18852,14 +18855,14 @@ snapshots: msw: 2.12.10(@types/node@22.19.15)(typescript@5.9.3) vite: 7.3.1(@types/node@22.19.15)(jiti@2.6.1)(lightningcss@1.31.1)(terser@5.46.0)(tsx@4.21.0)(yaml@2.8.2) - '@vitest/mocker@4.0.18(msw@2.12.10(@types/node@24.12.0)(typescript@5.9.3))(vite@7.3.1(@types/node@22.19.15)(jiti@2.6.1)(lightningcss@1.31.1)(terser@5.46.0)(tsx@4.21.0)(yaml@2.8.2))': + '@vitest/mocker@4.0.18(msw@2.12.10(@types/node@24.12.0)(typescript@5.9.3))(vite@7.3.1(@types/node@24.12.0)(jiti@2.6.1)(lightningcss@1.31.1)(terser@5.46.0)(tsx@4.21.0)(yaml@2.8.2))': dependencies: '@vitest/spy': 4.0.18 estree-walker: 3.0.3 magic-string: 0.30.21 optionalDependencies: msw: 2.12.10(@types/node@24.12.0)(typescript@5.9.3) - vite: 7.3.1(@types/node@22.19.15)(jiti@2.6.1)(lightningcss@1.31.1)(terser@5.46.0)(tsx@4.21.0)(yaml@2.8.2) + vite: 7.3.1(@types/node@24.12.0)(jiti@2.6.1)(lightningcss@1.31.1)(terser@5.46.0)(tsx@4.21.0)(yaml@2.8.2) '@vitest/pretty-format@4.0.18': dependencies: @@ -24804,32 +24807,32 @@ snapshots: dependencies: safe-buffer: 5.2.1 - turbo-darwin-64@2.8.14: + turbo-darwin-64@2.8.17: optional: true - turbo-darwin-arm64@2.8.14: + turbo-darwin-arm64@2.8.17: optional: true - turbo-linux-64@2.8.14: + turbo-linux-64@2.8.17: optional: true - turbo-linux-arm64@2.8.14: + turbo-linux-arm64@2.8.17: optional: true - turbo-windows-64@2.8.14: + turbo-windows-64@2.8.17: optional: true - turbo-windows-arm64@2.8.14: + turbo-windows-arm64@2.8.17: optional: true - turbo@2.8.14: + turbo@2.8.17: optionalDependencies: - turbo-darwin-64: 2.8.14 - turbo-darwin-arm64: 2.8.14 - turbo-linux-64: 2.8.14 - turbo-linux-arm64: 2.8.14 - turbo-windows-64: 2.8.14 - turbo-windows-arm64: 2.8.14 + turbo-darwin-64: 2.8.17 + turbo-darwin-arm64: 2.8.17 + turbo-linux-64: 2.8.17 + turbo-linux-arm64: 2.8.17 + turbo-windows-64: 2.8.17 + turbo-windows-arm64: 2.8.17 tw-animate-css@1.4.0: {} @@ -25261,7 +25264,7 @@ snapshots: vitest@4.0.18(@opentelemetry/api@1.9.0)(@types/node@24.12.0)(jiti@2.6.1)(lightningcss@1.31.1)(msw@2.12.10(@types/node@24.12.0)(typescript@5.9.3))(terser@5.46.0)(tsx@4.21.0)(yaml@2.8.2): dependencies: '@vitest/expect': 4.0.18 - '@vitest/mocker': 4.0.18(msw@2.12.10(@types/node@24.12.0)(typescript@5.9.3))(vite@7.3.1(@types/node@22.19.15)(jiti@2.6.1)(lightningcss@1.31.1)(terser@5.46.0)(tsx@4.21.0)(yaml@2.8.2)) + '@vitest/mocker': 4.0.18(msw@2.12.10(@types/node@24.12.0)(typescript@5.9.3))(vite@7.3.1(@types/node@24.12.0)(jiti@2.6.1)(lightningcss@1.31.1)(terser@5.46.0)(tsx@4.21.0)(yaml@2.8.2)) '@vitest/pretty-format': 4.0.18 '@vitest/runner': 4.0.18 '@vitest/snapshot': 4.0.18 diff --git a/services/api/package.json b/services/api/package.json index e66e946..4951e9e 100644 --- a/services/api/package.json +++ b/services/api/package.json @@ -25,8 +25,9 @@ "@maschina/plans": "workspace:*", "@maschina/telemetry": "workspace:*", "@maschina/usage": "workspace:*", - "@maschina/validation": "workspace:*", + "@maschina/compliance": "workspace:*", "@maschina/search": "workspace:*", + "@maschina/validation": "workspace:*", "@maschina/webhooks": "workspace:*", "dotenv": "^17.3.1", "drizzle-orm": "^0.39.3", diff --git a/services/api/src/app.ts b/services/api/src/app.ts index 1450088..efc5ae3 100644 --- a/services/api/src/app.ts +++ b/services/api/src/app.ts @@ -7,6 +7,7 @@ import { errorHandler, notFound } from "./middleware/error.js"; import agentRoutes from "./routes/agents.js"; import authRoutes from "./routes/auth.js"; import billingRoutes from "./routes/billing.js"; +import complianceRoutes from "./routes/compliance.js"; import healthRoutes from "./routes/health.js"; import keyRoutes from "./routes/keys.js"; import searchRoutes from "./routes/search.js"; @@ -32,6 +33,7 @@ export function createApp() { app.route("/billing", billingRoutes); app.route("/webhooks", webhookRoutes); app.route("/search", searchRoutes); + app.route("/compliance", complianceRoutes); // ─── Error handling ─────────────────────────────────────────────────────── app.onError(errorHandler); diff --git a/services/api/src/routes/compliance.ts b/services/api/src/routes/compliance.ts new file mode 100644 index 0000000..60fa9e9 --- /dev/null +++ b/services/api/src/routes/compliance.ts @@ -0,0 +1,82 @@ +import { deleteUserData, getRetentionDays, queryAuditLogs, toCSV } from "@maschina/compliance"; +import { can } from "@maschina/plans"; +import { Hono } from "hono"; +import { HTTPException } from "hono/http-exception"; +import type { Variables } from "../context.js"; +import { requireAuth } from "../middleware/auth.js"; + +const app = new Hono<{ Variables: Variables }>(); + +app.use("*", requireAuth); + +// Gate all compliance routes to M10+ +app.use("*", async (c, next) => { + const user = c.get("user"); + if (!can.useCompliance(user.tier)) { + throw new HTTPException(403, { + message: "Compliance tools require the M10 plan or above.", + }); + } + return next(); +}); + +// GET /compliance/audit-log?from=&to=&action=&resource=&format=json|csv&limit=&offset= +app.get("/audit-log", async (c) => { + const { id: userId, tier } = c.get("user"); + + const fromStr = c.req.query("from"); + const toStr = c.req.query("to"); + const from = fromStr ? new Date(fromStr) : undefined; + const to = toStr ? new Date(toStr) : undefined; + const format = c.req.query("format") === "csv" ? "csv" : "json"; + const limit = Number(c.req.query("limit") ?? 100); + const offset = Number(c.req.query("offset") ?? 0); + + if (from && Number.isNaN(from.getTime())) { + throw new HTTPException(400, { message: "Invalid 'from' date" }); + } + if (to && Number.isNaN(to.getTime())) { + throw new HTTPException(400, { message: "Invalid 'to' date" }); + } + + const { rows, total } = await queryAuditLogs({ + userId, + from, + to, + action: c.req.query("action"), + resource: c.req.query("resource"), + limit, + offset, + }); + + if (format === "csv") { + return c.body(toCSV(rows), 200, { + "Content-Type": "text/csv", + "Content-Disposition": `attachment; filename="audit-log-${new Date().toISOString().slice(0, 10)}.csv"`, + }); + } + + return c.json({ + data: rows, + total, + limit, + offset, + retentionDays: getRetentionDays(tier), + }); +}); + +// POST /compliance/gdpr/delete — anonymize and purge the requesting user's data +app.post("/gdpr/delete", async (c) => { + const { id: userId } = c.get("user"); + + const result = await deleteUserData(userId); + + return c.json({ + success: true, + agentsDeleted: result.agentsDeleted, + runsAnonymized: result.runsAnonymized, + message: "Your data has been anonymized. Account access will stop working within 60 seconds.", + }); +}); + +export default app;