diff --git a/providers/community-mozart/.gitignore b/providers/community-mozart/.gitignore new file mode 100644 index 0000000..9706f86 --- /dev/null +++ b/providers/community-mozart/.gitignore @@ -0,0 +1,6 @@ +node_modules/ +dist/ +.env +*.log +package-lock.json +data/ diff --git a/providers/community-mozart/README.md b/providers/community-mozart/README.md new file mode 100644 index 0000000..05cb698 --- /dev/null +++ b/providers/community-mozart/README.md @@ -0,0 +1,59 @@ +# community-mozart + +> Multi-provider AI orchestration for Handshake58. + +The only AI orchestration layer on Handshake58. Send one goal in plain English — Mozart plans it, routes it across the optimal mix of Bittensor-native and external providers, and returns one synthesized answer. Pay once for the whole workflow. + +**Bittensor-native providers get priority:** Chutes (SN22), Desearch (SN22), Numinous (SN6), Vericore. + +## Models + +| Model ID | Description | +|---|---| +| `mozart/auto` | Full auto-orchestration — planner decides everything | +| `mozart/plan` | Dry-run — returns the execution plan without running | +| `mozart/pipeline` | User-defined DAG pipeline — full control over steps | + +## Quick Start + +```bash +cp env.example .env +# fill in your keys +npm install && npm run dev +``` + +## Deploy on Railway + +1. Fork or clone this directory into your repo +2. Create a new Railway service, connect your repo +3. Set root directory to `/community-mozart` +4. Add environment variables from `env.example` +5. Add a Railway Volume at `/app/data` (required for voucher persistence) +6. Deploy + +## Example Request + +```json +{ + "mode": "auto", + "goal": "Find the latest news on Bittensor dTAO and write a concise briefing", + "budget_usd": 0.15 +} +``` + +Send as the `content` of a user message to `POST /v1/chat/completions` with `model: "mozart/auto"` and an `X-DRAIN-Voucher` header. + +## Endpoints + +| Endpoint | Description | +|---|---| +| `GET /health` | Service health + provider address | +| `GET /v1/models` | List available models | +| `GET /v1/pricing` | Pricing details | +| `GET /v1/docs` | Full agent instructions (plain text) | +| `POST /v1/chat/completions` | Main inference endpoint | +| `POST /v1/admin/claim` | Manual USDC claim trigger | + +## Contact + +mozartorchestra@proton.com diff --git a/providers/community-mozart/env.example b/providers/community-mozart/env.example new file mode 100644 index 0000000..b7608cd --- /dev/null +++ b/providers/community-mozart/env.example @@ -0,0 +1,31 @@ +# === REQUIRED === +PROVIDER_PRIVATE_KEY=0xYOUR_POLYGON_PRIVATE_KEY +# OPENROUTER_API_KEY= # optional — Chutes is primary LLM +DESEARCH_API_KEY=your-desearch-key +CHUTES_API_KEY=your-chutes-key + +# === OPTIONAL UPSTREAM === +E2B_API_KEY= +REPLICATE_API_TOKEN= + +# === DRAIN / BLOCKCHAIN === +POLYGON_RPC_URL=https://polygon-mainnet.g.alchemy.com/v2/YOUR_KEY +CHAIN_ID=137 +CLAIM_THRESHOLD=10000000 +AUTO_CLAIM_INTERVAL_MINUTES=10 +AUTO_CLAIM_BUFFER_SECONDS=3600 +STORAGE_PATH=./data/vouchers.json + +# === SERVER === +PORT=3000 +HOST=0.0.0.0 +PROVIDER_NAME=Mozart + +# === PRICING === +MARKUP_PERCENT=30 + +# === OPTIONAL === +ADMIN_PASSWORD= +MAX_PLAN_STEPS=6 +PLANNER_MODEL=deepseek-ai/DeepSeek-V3-0324-TEE +SYNTHESIZER_MODEL=deepseek-ai/DeepSeek-V3-0324-TEE diff --git a/providers/community-mozart/package.json b/providers/community-mozart/package.json new file mode 100644 index 0000000..32cb32b --- /dev/null +++ b/providers/community-mozart/package.json @@ -0,0 +1,28 @@ +{ + "name": "@handshake58/community-mozart", + "version": "0.1.0", + "description": "Multi-provider AI orchestration for Handshake58. Send a goal, Mozart plans and executes it across Bittensor-native and external providers.", + "type": "module", + "main": "dist/index.js", + "scripts": { + "build": "tsc", + "start": "node dist/index.js", + "dev": "tsx watch src/index.ts" + }, + "dependencies": { + "cors": "^2.8.5", + "dotenv": "^16.0.0", + "express": "^4.18.0", + "openai": "^4.0.0", + "viem": "^2.0.0" + }, + "devDependencies": { + "@types/cors": "^2.8.0", + "@types/express": "^4.17.0", + "tsx": "^4.0.0", + "typescript": "^5.0.0" + }, + "keywords": ["bittensor", "handshake58", "drain", "orchestration", "ai"], + "author": "mozartorchestra@proton.com", + "license": "MIT" +} diff --git a/providers/community-mozart/railway.json b/providers/community-mozart/railway.json new file mode 100644 index 0000000..d819ac4 --- /dev/null +++ b/providers/community-mozart/railway.json @@ -0,0 +1,14 @@ +{ + "$schema": "https://railway.app/railway.schema.json", + "build": { + "builder": "NIXPACKS", + "buildCommand": "npm install && npm run build" + }, + "deploy": { + "startCommand": "npm start", + "healthcheckPath": "/health", + "healthcheckTimeout": 30, + "restartPolicyType": "ON_FAILURE", + "restartPolicyMaxRetries": 3 + } +} diff --git a/providers/community-mozart/src/constants.ts b/providers/community-mozart/src/constants.ts new file mode 100644 index 0000000..5b4ada3 --- /dev/null +++ b/providers/community-mozart/src/constants.ts @@ -0,0 +1,159 @@ +// ─── DRAIN protocol constants (DO NOT MODIFY) ──────────────────────────────── + +export const DRAIN_ADDRESSES: Record = { + 137: '0x0C2B3aA1e80629D572b1f200e6DF3586B3946A8A', + 80002: '0x61f1C1E04d6Da1C92D0aF1a3d7Dc0fEFc8794d7C', +}; + +export const USDC_DECIMALS = 6; + +export const EIP712_DOMAIN = { + name: 'DrainChannel', + version: '1', +} as const; + +export const DRAIN_CHANNEL_ABI = [ + { + inputs: [{ name: 'channelId', type: 'bytes32' }], + name: 'getChannel', + outputs: [{ + components: [ + { name: 'consumer', type: 'address' }, + { name: 'provider', type: 'address' }, + { name: 'deposit', type: 'uint256' }, + { name: 'claimed', type: 'uint256' }, + { name: 'expiry', type: 'uint256' }, + ], + name: '', type: 'tuple', + }], + stateMutability: 'view', + type: 'function', + }, + { + inputs: [{ name: 'channelId', type: 'bytes32' }], + name: 'getBalance', + outputs: [{ name: '', type: 'uint256' }], + stateMutability: 'view', + type: 'function', + }, + { + inputs: [ + { name: 'channelId', type: 'bytes32' }, + { name: 'amount', type: 'uint256' }, + { name: 'nonce', type: 'uint256' }, + { name: 'signature', type: 'bytes' }, + ], + name: 'claim', + outputs: [], + stateMutability: 'nonpayable', + type: 'function', + }, + { + inputs: [ + { name: 'channelId', type: 'bytes32' }, + { name: 'finalAmount', type: 'uint256' }, + { name: 'providerSignature', type: 'bytes' }, + ], + name: 'cooperativeClose', + outputs: [], + stateMutability: 'nonpayable', + type: 'function', + }, + { inputs: [], name: 'InvalidAmount', type: 'error' }, + { inputs: [], name: 'ChannelNotFound', type: 'error' }, + { inputs: [], name: 'InvalidSignature', type: 'error' }, + { inputs: [], name: 'NotProvider', type: 'error' }, + { inputs: [], name: 'NotExpired', type: 'error' }, + { + anonymous: false, + inputs: [ + { indexed: true, name: 'channelId', type: 'bytes32' }, + { indexed: true, name: 'provider', type: 'address' }, + { indexed: false, name: 'amount', type: 'uint256' }, + ], + name: 'ChannelClaimed', + type: 'event', + }, +] as const; + +export const PERMANENT_CLAIM_ERRORS = [ + 'InvalidAmount', + 'ChannelNotFound', + 'InvalidSignature', + 'NotProvider', + 'NotExpired', +] as const; + +// ─── Mozart pricing ─────────────────────────────────────────────────────────── + +export const MOZART_BASE_FEE_USDC = 5_000n; // $0.005 per orchestration +export const MOZART_PLAN_FEE_USDC = 10_000n; // $0.010 for plan-only + +// ─── Upstream provider base URLs ────────────────────────────────────────────── + +export const UPSTREAM = { + chutes: { base: 'https://llm.chutes.ai/v1' }, + openrouter: { base: 'https://openrouter.ai/api/v1' }, + desearch: { base: 'https://api.desearch.ai' }, + numinous: { base: 'https://api.numinous.ai', forecast: 'https://api.numinous.ai/v1/predictions' }, + vericore: { base: 'https://api.vericore.ai' }, + e2b: { base: 'https://api.e2b.dev' }, + replicate: { base: 'https://api.replicate.com/v1' }, +} as const; + +// ─── Per-provider cost estimates (USDC micro, 6 decimals) ───────────────────── + +export const PROVIDER_COST_ESTIMATES: Record = { + chutes: 20_000n, // $0.020 + openrouter: 30_000n, // $0.030 + desearch: 5_000n, // $0.005 + numinous: 10_000n, // $0.010 + vericore: 5_000n, // $0.005 + e2b: 50_000n, // $0.050 + replicate: 40_000n, // $0.040 +}; + +// ─── Planner system prompt ──────────────────────────────────────────────────── + +export const PLANNER_SYSTEM_PROMPT = `You are an AI orchestration planner. Given a user goal, produce a minimal JSON execution plan. + +Return ONLY valid JSON matching this schema (no markdown, no explanation): +{ + "goal": "", + "reasoning": "", + "estimated_total_cost_usd": , + "steps": [ + { + "id": "step_1", + "provider": "chutes" | "desearch" | "numinous" | "vericore" | "e2b" | "replicate", + "model": "", + "task": "", + "input_from": ["step_id", ...], + "parallel": true, + "required": true, + "estimated_cost_usd": + } + ] +} + +Rules: +- Use chutes for LLM tasks (model: "deepseek-ai/DeepSeek-V3-0324-TEE") +- Use desearch for web search, news, Twitter queries +- Use numinous for forecasting/prediction tasks +- Use vericore for fact-checking +- ONLY use providers: chutes, desearch, numinous, vericore, e2b, replicate +- NEVER use openrouter — it is not available +- Keep plans minimal — 1-4 steps for most goals +- Set input_from to depend on prior step IDs when output chaining is needed +- parallel: true means this step can run in parallel with other same-wave steps`; + +// ─── Synthesizer system prompt ──────────────────────────────────────────────── + +export const SYNTHESIZER_SYSTEM_PROMPT = `You are a synthesis AI. You receive outputs from multiple AI agents that each worked on part of a goal. +Your job is to merge their outputs into one clean, comprehensive answer. + +- Integrate all relevant information naturally +- Resolve any contradictions by noting them +- Keep the response focused on the original goal +- Write in clear, direct prose +- Do not mention the internal step structure or provider names`; diff --git a/providers/community-mozart/src/drain.ts b/providers/community-mozart/src/drain.ts new file mode 100644 index 0000000..a88fff6 --- /dev/null +++ b/providers/community-mozart/src/drain.ts @@ -0,0 +1,209 @@ +/** + * DrainService — identical pattern to all HS58 providers. + * Copy of openrouter drain.ts with Orchestra's ProviderConfig. + */ + +import { + createPublicClient, + createWalletClient, + http, + verifyTypedData, + type Hash, + type Hex, + type Address, +} from 'viem'; +import { polygon, polygonAmoy } from 'viem/chains'; +import { privateKeyToAccount } from 'viem/accounts'; +import { + DRAIN_ADDRESSES, + DRAIN_CHANNEL_ABI, + EIP712_DOMAIN, + PERMANENT_CLAIM_ERRORS, +} from './constants.js'; +import type { ProviderConfig, VoucherHeader, StoredVoucher, ChannelState } from './types.js'; +import { VoucherStorage } from './storage.js'; + +export class DrainService { + private config: ProviderConfig; + private storage: VoucherStorage; + private publicClient: any; + private walletClient: any; + private account: any; + private contractAddress: Address; + private autoClaimInterval: ReturnType | null = null; + + constructor(config: ProviderConfig, storage: VoucherStorage) { + this.config = config; + this.storage = storage; + + const chain = config.chainId === 137 ? polygon : polygonAmoy; + const rpcUrl = config.polygonRpcUrl; + + this.publicClient = createPublicClient({ chain, transport: http(rpcUrl) }); + this.account = privateKeyToAccount(config.providerPrivateKey); + this.walletClient = createWalletClient({ account: this.account, chain, transport: http(rpcUrl) }); + + if (rpcUrl) { + console.log(`[drain] RPC: ${rpcUrl.replace(/\/[^/]{8,}$/, '/***')}`); + } else { + console.warn('[drain] No POLYGON_RPC_URL — using public RPC (rate-limited)'); + } + + this.contractAddress = DRAIN_ADDRESSES[config.chainId] as Address; + } + + getProviderAddress(): Address { return this.account.address; } + + parseVoucherHeader(header: string): VoucherHeader | null { + try { + const p = JSON.parse(header); + if (!p.channelId || !p.amount || !p.nonce || !p.signature) return null; + return { channelId: p.channelId, amount: p.amount, nonce: p.nonce, signature: p.signature }; + } catch { return null; } + } + + async validateVoucher( + voucher: VoucherHeader, + requiredAmount: bigint + ): Promise<{ valid: boolean; error?: string; channel?: ChannelState }> { + try { + const amount = BigInt(voucher.amount); + const nonce = BigInt(voucher.nonce); + + const channelData = await this.publicClient.readContract({ + address: this.contractAddress, abi: DRAIN_CHANNEL_ABI, + functionName: 'getChannel', args: [voucher.channelId], + }) as any; + + if (channelData.consumer === '0x0000000000000000000000000000000000000000') + return { valid: false, error: 'channel_not_found' }; + + if (channelData.provider.toLowerCase() !== this.account.address.toLowerCase()) + return { valid: false, error: 'wrong_provider' }; + + let channelState = this.storage.getChannel(voucher.channelId); + if (!channelState) { + channelState = { + channelId: voucher.channelId, consumer: channelData.consumer, + deposit: channelData.deposit, totalCharged: 0n, + expiry: Number(channelData.expiry), createdAt: Date.now(), lastActivityAt: Date.now(), + }; + } else if (!channelState.expiry) { + channelState.expiry = Number(channelData.expiry); + } + + const expectedTotal = channelState.totalCharged + requiredAmount; + if (amount < expectedTotal) return { valid: false, error: 'insufficient_funds', channel: channelState }; + if (amount > channelData.deposit) return { valid: false, error: 'exceeds_deposit', channel: channelState }; + if (channelState.lastVoucher && nonce <= channelState.lastVoucher.nonce) + return { valid: false, error: 'invalid_nonce', channel: channelState }; + + const isValid = await verifyTypedData({ + address: channelData.consumer, + domain: { name: EIP712_DOMAIN.name, version: EIP712_DOMAIN.version, chainId: this.config.chainId, verifyingContract: this.contractAddress }, + types: { Voucher: [{ name: 'channelId', type: 'bytes32' }, { name: 'amount', type: 'uint256' }, { name: 'nonce', type: 'uint256' }] }, + primaryType: 'Voucher', + message: { channelId: voucher.channelId, amount, nonce }, + signature: voucher.signature, + }); + + if (!isValid) return { valid: false, error: 'invalid_signature' }; + return { valid: true, channel: channelState }; + } catch (e: any) { + return { valid: false, error: e?.message ?? 'validation_error' }; + } + } + + storeVoucher(voucher: VoucherHeader, channelState: ChannelState, cost: bigint): void { + const stored: StoredVoucher = { + channelId: voucher.channelId, amount: BigInt(voucher.amount), nonce: BigInt(voucher.nonce), + signature: voucher.signature, consumer: channelState.consumer, receivedAt: Date.now(), claimed: false, + }; + channelState.totalCharged += cost; + channelState.lastVoucher = stored; + channelState.lastActivityAt = Date.now(); + this.storage.storeVoucher(stored); + this.storage.updateChannel(voucher.channelId, channelState); + } + + async getChannelBalance(channelId: Hash): Promise { + return await this.publicClient.readContract({ + address: this.contractAddress, abi: DRAIN_CHANNEL_ABI, + functionName: 'getBalance', args: [channelId], + }) as bigint; + } + + async claimPayments(forceAll = false): Promise { + const txHashes: Hash[] = []; + for (const [channelId, voucher] of this.storage.getHighestVoucherPerChannel()) { + if (!forceAll && voucher.amount < this.config.claimThreshold) continue; + try { + const balance = await this.getChannelBalance(voucher.channelId); + if (balance === 0n) { this.storage.markClaimed(channelId, '0x0' as Hash); continue; } + } catch {} + try { + const hash = await this.walletClient.writeContract({ + address: this.contractAddress, abi: DRAIN_CHANNEL_ABI, + functionName: 'claim', args: [voucher.channelId, voucher.amount, voucher.nonce, voucher.signature], + }); + this.storage.markClaimed(channelId, hash); + txHashes.push(hash); + console.log(`[drain] Claimed ${voucher.amount} from ${channelId}: ${hash}`); + } catch (e: any) { this.handleClaimError('claim', channelId, e); } + } + return txHashes; + } + + async claimExpiring(bufferSeconds = 3600): Promise { + const txHashes: Hash[] = []; + const now = Math.floor(Date.now() / 1000); + for (const [channelId, voucher] of this.storage.getHighestVoucherPerChannel()) { + const ch = this.storage.getChannel(channelId); + if (!ch?.expiry || ch.expiry - now > bufferSeconds || voucher.amount <= 0n) continue; + try { + const balance = await this.getChannelBalance(voucher.channelId); + if (balance === 0n) { this.storage.markClaimed(channelId, '0x0' as Hash); continue; } + } catch {} + try { + const hash = await this.walletClient.writeContract({ + address: this.contractAddress, abi: DRAIN_CHANNEL_ABI, + functionName: 'claim', args: [voucher.channelId, voucher.amount, voucher.nonce, voucher.signature], + }); + this.storage.markClaimed(channelId, hash); + txHashes.push(hash); + } catch (e: any) { this.handleClaimError('auto-claim', channelId, e); } + } + return txHashes; + } + + startAutoClaim(intervalMinutes = 10, bufferSeconds = 3600): void { + if (this.autoClaimInterval) return; + console.log(`[auto-claim] Every ${intervalMinutes}min, buffer ${bufferSeconds / 60}min`); + this.autoClaimInterval = setInterval(async () => { + try { await this.claimExpiring(bufferSeconds); } catch (e) { console.error('[auto-claim]', e); } + }, intervalMinutes * 60_000); + this.claimExpiring(bufferSeconds).catch(console.error); + } + + async signCloseAuthorization(channelId: Hash): Promise<{ finalAmount: bigint; signature: Hex }> { + const highest = this.storage.getHighestVoucherPerChannel().get(channelId); + const finalAmount = highest ? highest.amount : 0n; + const signature = await this.walletClient.signTypedData({ + domain: { name: EIP712_DOMAIN.name, version: EIP712_DOMAIN.version, chainId: this.config.chainId, verifyingContract: this.contractAddress }, + types: { CloseAuthorization: [{ name: 'channelId', type: 'bytes32' }, { name: 'finalAmount', type: 'uint256' }] }, + primaryType: 'CloseAuthorization', + message: { channelId, finalAmount }, + }); + return { finalAmount, signature }; + } + + private handleClaimError(ctx: string, channelId: string, error: any): void { + const errorName = error?.cause?.data?.errorName || error?.cause?.reason; + if (errorName && PERMANENT_CLAIM_ERRORS.includes(errorName as any)) { + console.error(`[${ctx}] ${channelId}: ${errorName} (permanent, marking failed)`); + this.storage.markClaimed(channelId as Hash, '0x0' as Hash); + } else { + console.error(`[${ctx}] ${channelId}: ${error?.shortMessage || error?.message} (will retry)`); + } + } +} diff --git a/providers/community-mozart/src/executor.ts b/providers/community-mozart/src/executor.ts new file mode 100644 index 0000000..7f6624b --- /dev/null +++ b/providers/community-mozart/src/executor.ts @@ -0,0 +1,250 @@ +/** + * Orchestra Executor + * + * Runs individual plan steps against upstream providers. + * Each executor method is self-contained — it knows how to talk to its provider, + * format the task, and return a clean string output. + */ + +import type { PlanStep, StepResult, ProviderConfig } from './types.js'; +import { UPSTREAM } from './constants.js'; + +// ─── Helpers ───────────────────────────────────────────────────────────────── + +async function fetchJSON(url: string, init: RequestInit, timeoutMs = 30_000): Promise { + const controller = new AbortController(); + const timer = setTimeout(() => controller.abort(), timeoutMs); + try { + const res = await fetch(url, { ...init, signal: controller.signal }); + if (!res.ok) { + const text = await res.text().catch(() => ''); + throw new Error(`HTTP ${res.status}: ${text.slice(0, 200)}`); + } + return res.json(); + } finally { + clearTimeout(timer); + } +} + +async function llmChat( + baseURL: string, + apiKey: string, + model: string, + messages: { role: string; content: string }[], + timeoutMs = 60_000 +): Promise { + const data = await fetchJSON(`${baseURL}/chat/completions`, { + method: 'POST', + headers: { Authorization: `Bearer ${apiKey}`, 'Content-Type': 'application/json' }, + body: JSON.stringify({ model, messages, max_tokens: 2048, temperature: 0.3 }), + }, timeoutMs); + return data.choices?.[0]?.message?.content ?? ''; +} + +// ─── Provider executors ─────────────────────────────────────────────────────── + +async function runChutes(step: PlanStep, config: ProviderConfig, context: string): Promise { + return llmChat( + UPSTREAM.chutes.base, config.chutesApiKey, step.model, + [{ role: 'user', content: context ? `Context:\n${context}\n\nTask: ${step.task}` : step.task }], + 90_000 + ); +} + +async function runOpenRouter(step: PlanStep, config: ProviderConfig, context: string): Promise { + if (!config.openrouterApiKey) { + // Fall back to chutes if openrouter key not configured + return runChutes(step, config, context); + } + return llmChat( + UPSTREAM.openrouter.base, config.openrouterApiKey, step.model, + [{ role: 'user', content: context ? `Context:\n${context}\n\nTask: ${step.task}` : step.task }], + 90_000 + ); +} + +async function runDesearch(step: PlanStep, config: ProviderConfig, _context: string): Promise { + // model encodes the endpoint type: "ai-search", "web", "twitter", "crawl" + const endpoint = step.model.replace('desearch/', ''); + const query = step.task; + + let url: string; + let body: any; + let method: 'GET' | 'POST' = 'POST'; + + switch (endpoint) { + case 'ai-search': + url = `${UPSTREAM.desearch.base}/desearch/ai/search`; + body = { prompt: query, tools: ['web', 'hackernews', 'reddit'], count: 10, streaming: false, result_type: 'LINKS_WITH_FINAL_SUMMARY' }; + break; + case 'web': + url = `${UPSTREAM.desearch.base}/web?query=${encodeURIComponent(query)}`; + method = 'GET'; + body = undefined; + break; + case 'twitter': + url = `${UPSTREAM.desearch.base}/desearch/ai/search/links/twitter`; + body = { prompt: query, count: 10 }; + break; + case 'crawl': + url = `${UPSTREAM.desearch.base}/web/crawl?url=${encodeURIComponent(query)}`; + method = 'GET'; + body = undefined; + break; + default: + url = `${UPSTREAM.desearch.base}/desearch/ai/search`; + body = { prompt: query, tools: ['web'], count: 10, streaming: false, result_type: 'LINKS_WITH_FINAL_SUMMARY' }; + } + + const data = await fetchJSON(url, { + method, + headers: { Authorization: config.desearchApiKey, 'Content-Type': 'application/json' }, + body: body ? JSON.stringify(body) : undefined, + }, 30_000); + + // Extract meaningful text from Desearch responses + if (data.text) return data.text; + if (data.results) return JSON.stringify(data.results).slice(0, 3000); + if (data.organic_results) return data.organic_results.slice(0, 5).map((r: any) => `${r.title}: ${r.snippet}`).join('\n'); + return JSON.stringify(data).slice(0, 3000); +} + +async function runE2B(step: PlanStep, config: ProviderConfig, context: string): Promise { + if (!config.e2bApiKey) throw new Error('E2B API key not configured'); + // task should be executable code + const code = context ? `# Context:\n# ${context.split('\n').join('\n# ')}\n\n${step.task}` : step.task; + + // Create sandbox and run + const sandbox = await fetchJSON(`${UPSTREAM.e2b.base}/sandboxes`, { + method: 'POST', + headers: { 'X-API-Key': config.e2bApiKey, 'Content-Type': 'application/json' }, + body: JSON.stringify({ template: 'base', timeout: 30 }), + }, 15_000); + + try { + const exec = await fetchJSON(`${UPSTREAM.e2b.base}/sandboxes/${sandbox.sandboxId}/process/start`, { + method: 'POST', + headers: { 'X-API-Key': config.e2bApiKey, 'Content-Type': 'application/json' }, + body: JSON.stringify({ cmd: 'python3', args: ['-c', code] }), + }, 35_000); + + return `stdout:\n${exec.stdout || ''}\nstderr:\n${exec.stderr || ''}`.slice(0, 3000); + } finally { + // Always kill sandbox + await fetch(`${UPSTREAM.e2b.base}/sandboxes/${sandbox.sandboxId}`, { + method: 'DELETE', + headers: { 'X-API-Key': config.e2bApiKey }, + }).catch(() => {}); + } +} + +async function runNuminous(step: PlanStep, config: ProviderConfig, _context: string): Promise { + const job = await fetchJSON(UPSTREAM.numinous.forecast, { + method: 'POST', + headers: { Authorization: config.desearchApiKey, 'Content-Type': 'application/json' }, + body: JSON.stringify({ question: step.task }), + }, 15_000); + + const predictionId = job.prediction_id; + if (!predictionId) return JSON.stringify(job); + + // Poll up to 30s + for (let i = 0; i < 6; i++) { + await new Promise(r => setTimeout(r, 5000)); + const result = await fetchJSON(`${UPSTREAM.numinous.forecast}/${predictionId}`, { + headers: { Authorization: config.desearchApiKey }, + }, 10_000); + if (result.status === 'completed' || result.probability !== undefined) { + return `Probability: ${result.probability ?? 'N/A'}\nReasoning: ${result.reasoning ?? ''}\nSources: ${JSON.stringify(result.sources ?? []).slice(0, 500)}`; + } + } + return `Forecast pending (id: ${predictionId})`; +} + +async function runReplicate(step: PlanStep, config: ProviderConfig, _context: string): Promise { + if (!config.replicateApiToken) throw new Error('Replicate API token not configured'); + const prediction = await fetchJSON(`${UPSTREAM.replicate.base}/predictions`, { + method: 'POST', + headers: { Authorization: `Bearer ${config.replicateApiToken}`, 'Content-Type': 'application/json' }, + body: JSON.stringify({ version: step.model, input: { prompt: step.task } }), + }, 15_000); + + // Poll for result + let result = prediction; + for (let i = 0; i < 12; i++) { + if (result.status === 'succeeded') break; + if (result.status === 'failed') throw new Error(result.error ?? 'Replicate failed'); + await new Promise(r => setTimeout(r, 5000)); + result = await fetchJSON(result.urls.get, { + headers: { Authorization: `Bearer ${config.replicateApiToken}` }, + }, 10_000); + } + + const output = result.output; + if (Array.isArray(output)) return output.join('\n'); + return String(output ?? 'No output'); +} + +async function runVericore(step: PlanStep, config: ProviderConfig, _context: string): Promise { + // Vericore claim verification + const data = await fetchJSON(`${UPSTREAM.vericore.base}/v1/verify`, { + method: 'POST', + headers: { Authorization: `Bearer ${config.desearchApiKey}`, 'Content-Type': 'application/json' }, + body: JSON.stringify({ claim: step.task }), + }, 30_000); + return `Verdict: ${data.verdict ?? 'unknown'}\nScore: ${data.score ?? 'N/A'}\nEvidence: ${JSON.stringify(data.evidence ?? []).slice(0, 1000)}`; +} + +// ─── Main dispatcher ────────────────────────────────────────────────────────── + +export async function executeStep( + step: PlanStep, + config: ProviderConfig, + priorOutputs: Map +): Promise { + const start = Date.now(); + + // Build context from declared dependencies + const contextParts: string[] = []; + for (const depId of (step.input_from ?? [])) { + const depOutput = priorOutputs.get(depId); + if (depOutput) contextParts.push(`[${depId}]: ${depOutput}`); + } + const context = contextParts.join('\n\n'); + + try { + let output: string; + + switch (step.provider) { + case 'chutes': output = await runChutes(step, config, context); break; + case 'openrouter': output = await runOpenRouter(step, config, context); break; + case 'desearch': output = await runDesearch(step, config, context); break; + case 'e2b': output = await runE2B(step, config, context); break; + case 'numinous': output = await runNuminous(step, config, context); break; + case 'replicate': output = await runReplicate(step, config, context); break; + case 'vericore': output = await runVericore(step, config, context); break; + default: + throw new Error(`Unknown provider: ${(step as any).provider}`); + } + + return { + step_id: step.id, + provider: step.provider, + model: step.model, + status: 'done', + output: output.slice(0, 8000), // cap per-step output + cost_usd: step.estimated_cost_usd, + duration_ms: Date.now() - start, + }; + } catch (err: any) { + return { + step_id: step.id, + provider: step.provider, + model: step.model, + status: 'failed', + error: err?.message ?? String(err), + cost_usd: 0, + duration_ms: Date.now() - start, + }; + } +} diff --git a/providers/community-mozart/src/index.ts b/providers/community-mozart/src/index.ts new file mode 100644 index 0000000..05371b9 --- /dev/null +++ b/providers/community-mozart/src/index.ts @@ -0,0 +1,523 @@ +/** + * Mozart Provider + * + * The only AI orchestration layer on Handshake58. + * + * Every other provider does one thing. Orchestra does all of them — + * intelligently. Send a goal in plain English. Orchestra plans it, + * fans out to the right providers in parallel, and returns one + * synthesized answer. Pay once, get everything. + * + * Modes: + * auto — Planner decides which providers to use and in what order + * plan — Dry-run: returns the execution plan without running it + * pipeline — You define explicit steps (full control, DAG execution) + * + * Bittensor-native providers get priority: Chutes (SN22), Desearch (SN22), + * Numinous (SN6), Vericore. Falls back to OpenRouter only when needed. + * + * model IDs: + * orchestra/auto — Full auto-orchestration + * orchestra/plan — Plan-only dry run + * orchestra/pipeline — User-defined pipeline + * + * Input format: JSON in last user message matching MozartRequest schema. + * See GET /v1/docs for full reference. + */ + +import express from 'express'; +import cors from 'cors'; +import { config as dotenv } from 'dotenv'; +import { formatUnits } from 'viem'; +import type { Hex } from 'viem'; +import { DrainService } from './drain.js'; +import { VoucherStorage } from './storage.js'; +import { orchestrate } from './planner.js'; +import { + MOZART_BASE_FEE_USDC, + MOZART_PLAN_FEE_USDC, + DRAIN_ADDRESSES, + PROVIDER_COST_ESTIMATES, +} from './constants.js'; +import type { ProviderConfig, MozartRequest, MozartStreamEvent, ChannelState, VoucherHeader } from './types.js'; + +dotenv(); + +// ─── Config ─────────────────────────────────────────────────────────────────── + +function requireEnv(k: string): string { + const v = process.env[k]; + if (!v) { console.error(`[config] Missing required env: ${k}`); return `MISSING_${k}`; } + return v; +} +function optEnv(k: string, d: string): string { return process.env[k] ?? d; } + +const chainId = (parseInt(optEnv('CHAIN_ID', '137'))) as 137 | 80002; + +const config: ProviderConfig = { + port: parseInt(optEnv('PORT', '3000')), + host: optEnv('HOST', '0.0.0.0'), + chainId, + providerPrivateKey: requireEnv('PROVIDER_PRIVATE_KEY') as Hex, + polygonRpcUrl: process.env.POLYGON_RPC_URL, + claimThreshold: BigInt(optEnv('CLAIM_THRESHOLD', '10000000')), + storagePath: optEnv('STORAGE_PATH', '/app/data/vouchers.json'), + providerName: optEnv('PROVIDER_NAME', 'Mozart'), + autoClaimIntervalMinutes: parseInt(optEnv('AUTO_CLAIM_INTERVAL_MINUTES', '10')), + autoClaimBufferSeconds: parseInt(optEnv('AUTO_CLAIM_BUFFER_SECONDS', '3600')), + // Upstream keys + openrouterApiKey: optEnv('OPENROUTER_API_KEY', ''), + desearchApiKey: requireEnv('DESEARCH_API_KEY'), + chutesApiKey: requireEnv('CHUTES_API_KEY'), + e2bApiKey: process.env.E2B_API_KEY, + replicateApiToken: process.env.REPLICATE_API_TOKEN, + // Orchestra + markupMultiplier: 1 + parseInt(optEnv('MARKUP_PERCENT', '30')) / 100, + maxPlanSteps: parseInt(optEnv('MAX_PLAN_STEPS', '6')), + plannerModel: optEnv('PLANNER_MODEL', 'deepseek-ai/DeepSeek-V3-0324-TEE'), + synthesizerModel: optEnv('SYNTHESIZER_MODEL', 'deepseek-ai/DeepSeek-V3-0324-TEE'), +}; + +// ─── Services ──────────────────────────────────────────────────────────────── + +const storage = new VoucherStorage(config.storagePath); +const drainService = new DrainService(config, storage); + +const app = express(); +app.use(cors()); +app.use(express.json({ limit: '512kb' })); + +// ─── Payment middleware ─────────────────────────────────────────────────────── + +function paymentHeaders() { + return { + 'X-Payment-Protocol': 'drain-v2', + 'X-Payment-Provider': drainService.getProviderAddress(), + 'X-Payment-Contract': DRAIN_ADDRESSES[chainId], + 'X-Payment-Chain': String(chainId), + 'X-Payment-Docs': '/v1/docs', + }; +} + +async function requirePayment( + req: express.Request, + res: express.Response, + minCost: bigint +): Promise<{ voucher: VoucherHeader; channel: ChannelState | undefined } | null> { + const header = req.headers['x-drain-voucher'] as string | undefined; + if (!header) { + res.status(402).set({ ...paymentHeaders(), 'X-DRAIN-Error': 'voucher_required' }).json({ + error: { message: 'X-DRAIN-Voucher header required', code: 'voucher_required' }, + }); + return null; + } + + const voucher = drainService.parseVoucherHeader(header); + if (!voucher) { + res.status(402).json({ error: { message: 'Invalid voucher format', code: 'invalid_voucher' } }); + return null; + } + + const validation = await drainService.validateVoucher(voucher, minCost); + if (!validation.valid) { + res.status(402).set({ + 'X-DRAIN-Error': validation.error!, + 'X-DRAIN-Required': minCost.toString(), + }).json({ error: { message: `Payment error: ${validation.error}`, code: validation.error } }); + return null; + } + + return { voucher, channel: validation.channel }; +} + +// ─── GET /health ───────────────────────────────────────────────────────────── + +app.get('/health', (_req, res) => { + res.json({ + status: 'ok', + provider: drainService.getProviderAddress(), + providerName: config.providerName, + chainId: config.chainId, + models: 3, + modes: ['auto', 'plan', 'pipeline'], + bittensor_native: ['chutes', 'desearch', 'numinous', 'vericore'], + bittensor_native_providers: ['chutes', 'desearch', 'numinous', 'vericore'], + available_providers: ['chutes', 'openrouter', 'desearch', 'e2b', 'numinous', 'vericore', 'replicate'], + }); +}); + +// ─── GET /v1/models ─────────────────────────────────────────────────────────── + +app.get('/v1/models', (_req, res) => { + res.json({ + object: 'list', + data: [ + { + id: 'orchestra/auto', + object: 'model', + created: 1742000000, + owned_by: 'mozart', + context_length: 128000, + description: 'Full auto-orchestration. Send a goal, Orchestra plans and executes it using the optimal mix of providers.', + modes: ['auto'], + }, + { + id: 'orchestra/plan', + object: 'model', + created: 1742000000, + owned_by: 'mozart', + context_length: 128000, + description: 'Dry-run planning only. Returns the execution plan without running it. Use to preview costs and steps.', + modes: ['plan'], + }, + { + id: 'orchestra/pipeline', + object: 'model', + created: 1742000000, + owned_by: 'mozart', + context_length: 128000, + description: 'User-defined pipeline. You specify exact steps and provider routing. Orchestra executes with DAG scheduling.', + modes: ['pipeline'], + }, + ], + }); +}); + +// ─── GET /v1/pricing ────────────────────────────────────────────────────────── + +app.get('/v1/pricing', (_req, res) => { + const markup = Math.round((config.markupMultiplier - 1) * 100); + res.json({ + provider: drainService.getProviderAddress(), + providerName: config.providerName, + chainId: config.chainId, + currency: 'USDC', + markup: `${markup}%`, + models: { + 'orchestra/auto': { inputPer1kTokens: '0.005', outputPer1kTokens: '0.005' }, + 'orchestra/plan': { inputPer1kTokens: '0.010', outputPer1kTokens: '0.010' }, + 'orchestra/pipeline': { inputPer1kTokens: '0.005', outputPer1kTokens: '0.005' }, + }, + decimals: 6, + fees: { + base_coordination: `$${formatUnits(MOZART_BASE_FEE_USDC, 6)} per orchestration`, + plan_only: `$${formatUnits(MOZART_PLAN_FEE_USDC, 6)} per plan`, + }, + provider_cost_estimates: Object.fromEntries( + Object.entries(PROVIDER_COST_ESTIMATES).map(([k, v]) => [k, `$${formatUnits(v, 6)}`]) + ), + note: 'Total cost = base fee + sum of executed step costs. Use mode:"plan" to preview before running.', + }); +}); + +// ─── GET /v1/docs ───────────────────────────────────────────────────────────── + +app.get('/v1/docs', (_req, res) => { + const markup = Math.round((config.markupMultiplier - 1) * 100); + res.type('text/plain').send(`# ${config.providerName} — Agent Instructions + +The only AI orchestration layer on Handshake58. + +Send one goal. Orchestra plans it, runs it across multiple providers in parallel, +and returns one synthesized answer. You pay once for the whole workflow. + +Bittensor-native providers are prioritised: Chutes (SN22), Desearch (SN22), Numinous (SN6), Vericore. + +════════════════════════════════════════ + +## How to call via DRAIN + +model: "orchestra/auto" | "orchestra/plan" | "orchestra/pipeline" +messages: [{ "role": "user", "content": "" }] + +## Request schema + +{ + "mode": "auto" | "plan" | "pipeline", + "goal": "", + "context": "", + "budget_usd": 0.10, // optional spend cap (default 0.10) + "providers": ["chutes","desearch"], // optional whitelist + "stream": false, // set true to get SSE progress events + "steps": [...] // required for mode:"pipeline" only +} + +## Modes + +### mode: "auto" (recommended) +The planner (DeepSeek R1) reads your goal and decides: + - which providers to use + - what to ask each one + - which steps can run in parallel + - how to feed outputs between steps +Then the synthesizer (DeepSeek V3) merges all outputs into one answer. + +### mode: "plan" +Returns the execution plan WITHOUT running it. Zero cost beyond plan fee. +Use this to preview what Orchestra would do before committing budget. + +### mode: "pipeline" +You define the steps explicitly. Full control over provider routing. +Steps run in topological order — outputs flow between steps via input_from. + +Pipeline step schema: +{ + "id": "step_1", + "provider": "chutes" | "openrouter" | "desearch" | "e2b" | "numinous" | "vericore" | "replicate", + "model": "", + "task": "", + "input_from": ["step_1"], // pipe outputs from prior steps + "parallel": true, + "required": true, + "estimated_cost_usd": 0.01 +} + +════════════════════════════════════════ + +## Examples + +### Research + analysis +{ + "mode": "auto", + "goal": "Find the latest news on Bittensor dTAO, then write a concise briefing with probability estimate of TAO price doubling in 6 months", + "budget_usd": 0.15 +} + +### Fact-check a claim +{ + "mode": "auto", + "goal": "Verify: 'Bittensor has over 100 active subnets as of 2026'", + "providers": ["desearch", "vericore"] +} + +### Compute + summarize +{ + "mode": "pipeline", + "goal": "Fibonacci benchmark", + "steps": [ + { + "id": "step_1", "provider": "e2b", "model": "python", + "task": "import time; s=time.time(); fib=lambda n: n if n<2 else fib(n-1)+fib(n-2); print(fib(35)); print(f'Time: {time.time()-s:.2f}s')", + "input_from": [], "parallel": true, "required": true, "estimated_cost_usd": 0.02 + }, + { + "id": "step_2", "provider": "chutes", "model": "deepseek-ai/DeepSeek-V3-0324", + "task": "Interpret this benchmark result and explain what it tells us about the environment.", + "input_from": ["step_1"], "parallel": false, "required": true, "estimated_cost_usd": 0.01 + } + ] +} + +════════════════════════════════════════ + +## Pricing + +Base coordination fee: $${formatUnits(MOZART_BASE_FEE_USDC, 6)} USDC +Plan-only fee: $${formatUnits(MOZART_PLAN_FEE_USDC, 6)} USDC +Provider markup: ${markup}% +Use GET /v1/pricing for per-provider cost estimates. + +## Available providers + +| Provider | Type | Bittensor native | +|------------|---------------------|------------------| +| chutes | LLM inference | ✓ SN22 | +| desearch | Web + Twitter search| ✓ SN22 | +| numinous | Probability forecast| ✓ SN6 | +| vericore | Claim verification | ✓ | +| openrouter | LLM inference | ✗ | +| e2b | Code execution | ✗ | +| replicate | Image/video/audio | ✗ | +`); +}); + +// ─── POST /v1/chat/completions ──────────────────────────────────────────────── + +app.post('/v1/chat/completions', async (req, res) => { + // Parse request + const model = req.body.model as string ?? 'orchestra/auto'; + const messages = req.body.messages as Array<{ role: string; content: string }> ?? []; + const isStream = req.body.stream === true; + + if (!['orchestra/auto', 'orchestra/plan', 'orchestra/pipeline'].includes(model)) { + return res.status(400).json({ error: { message: `Unknown model: ${model}. Use orchestra/auto, orchestra/plan, or orchestra/pipeline.` } }); + } + + const lastUser = messages.filter(m => m.role === 'user').pop(); + if (!lastUser?.content?.trim()) { + return res.status(400).json({ error: { message: 'Send MozartRequest JSON as the user message. See /v1/docs.' } }); + } + + let orchRequest: MozartRequest; + try { + orchRequest = JSON.parse(lastUser.content); + } catch { + return res.status(400).json({ error: { message: 'User message must be valid JSON. See GET /v1/docs.' } }); + } + + // Force mode from model ID + if (model === 'orchestra/plan') orchRequest.mode = 'plan'; + if (model === 'orchestra/pipeline') orchRequest.mode = 'pipeline'; + if (model === 'orchestra/auto') orchRequest.mode = 'auto'; + + if (!orchRequest.goal) { + return res.status(400).json({ error: { message: '"goal" is required in the request.' } }); + } + + // Estimate pre-auth cost: base fee + max possible step costs + const maxSteps = config.maxPlanSteps; + const maxPerStep = Math.max(...Object.values(PROVIDER_COST_ESTIMATES).map(Number)); + const preAuthCost = orchRequest.mode === 'plan' + ? MOZART_PLAN_FEE_USDC + : MOZART_BASE_FEE_USDC + BigInt(maxSteps) * BigInt(maxPerStep); + + const payment = await requirePayment(req, res, preAuthCost); + if (!payment) return; + + const { voucher, channel } = payment; + + // ── Streaming mode ────────────────────────────────────────────────────────── + if (isStream) { + res.setHeader('Content-Type', 'text/event-stream'); + res.setHeader('Cache-Control', 'no-cache'); + res.setHeader('Connection', 'keep-alive'); + res.setHeader('X-DRAIN-Channel', voucher.channelId); + + const sendEvent = (event: MozartStreamEvent) => { + res.write(`data: ${JSON.stringify(event)}\n\n`); + }; + + try { + const result = await orchestrate(orchRequest, config, sendEvent); + + const totalCostWei = MOZART_BASE_FEE_USDC + + BigInt(Math.ceil(result.total_cost_usd * 1_000_000 * config.markupMultiplier)); + + drainService.storeVoucher(voucher, channel!, totalCostWei); + const remaining = channel!.deposit - channel!.totalCharged - totalCostWei; + + res.write(`: X-DRAIN-Cost: ${totalCostWei.toString()}\n`); + res.write(`: X-DRAIN-Remaining: ${remaining.toString()}\n`); + res.write(`data: [DONE]\n\n`); + res.end(); + } catch (err: any) { + sendEvent({ event: 'error', data: { message: err?.message }, timestamp: Date.now() }); + res.end(); + } + return; + } + + // ── Non-streaming mode ────────────────────────────────────────────────────── + try { + const result = await orchestrate(orchRequest, config); + + const totalCostWei = MOZART_BASE_FEE_USDC + + BigInt(Math.ceil(result.total_cost_usd * 1_000_000 * config.markupMultiplier)); + + drainService.storeVoucher(voucher, channel!, totalCostWei); + const remaining = channel!.deposit - channel!.totalCharged - totalCostWei; + + res.set({ + 'X-DRAIN-Cost': totalCostWei.toString(), + 'X-DRAIN-Total': (channel!.totalCharged + totalCostWei).toString(), + 'X-DRAIN-Remaining': remaining.toString(), + 'X-DRAIN-Channel': voucher.channelId, + }); + + // OpenAI chat completion envelope + res.json({ + id: `orchestra-${Date.now()}`, + object: 'chat.completion', + created: Math.floor(Date.now() / 1000), + model, + choices: [{ + index: 0, + message: { role: 'assistant', content: JSON.stringify(result, null, 2) }, + finish_reason: 'stop', + }], + usage: { + prompt_tokens: 0, + completion_tokens: Math.ceil(JSON.stringify(result).length / 4), + total_tokens: Math.ceil(JSON.stringify(result).length / 4), + }, + // Surface key fields at top level for agent convenience + orchestra: { + synthesis: result.synthesis, + providers_used: result.providers_used, + steps_completed: result.steps.filter(s => s.status === 'done').length, + steps_failed: result.steps.filter(s => s.status === 'failed').length, + total_duration_ms: result.total_duration_ms, + total_cost_usd: result.total_cost_usd, + }, + }); + } catch (err: any) { + console.error('[orchestra] Error:', err?.message); + res.status(500).json({ error: { message: err?.message ?? 'Orchestration error', code: 'orchestra_error' } }); + } +}); + +// ─── POST /v1/admin/claim ───────────────────────────────────────────────────── + +app.post('/v1/admin/claim', async (req, res) => { + try { + const txHashes = await drainService.claimPayments(req.query.force === 'true'); + res.json({ claimed: txHashes.length, transactions: txHashes }); + } catch (err: any) { + res.status(500).json({ error: err.message }); + } +}); + +// ─── GET /v1/admin/stats ────────────────────────────────────────────────────── + +app.get('/v1/admin/stats', (_req, res) => { + const stats = storage.getStats(); + res.json({ + provider: drainService.getProviderAddress(), + providerName: config.providerName, + ...stats, + totalEarned: formatUnits(stats.totalEarned, 6) + ' USDC', + }); +}); + +// ─── POST /v1/close-channel ─────────────────────────────────────────────────── + +app.post('/v1/close-channel', async (req, res) => { + try { + const { channelId } = req.body; + if (!channelId) return res.status(400).json({ error: 'channelId required' }); + const result = await drainService.signCloseAuthorization(channelId); + res.json({ channelId, finalAmount: result.finalAmount.toString(), signature: result.signature }); + } catch (err: any) { + res.status(500).json({ error: err?.message ?? 'internal_error' }); + } +}); + +// ─── Start ──────────────────────────────────────────────────────────────────── + +async function main() { + drainService.startAutoClaim(config.autoClaimIntervalMinutes, config.autoClaimBufferSeconds); + + app.listen(config.port, config.host, () => { + const markup = Math.round((config.markupMultiplier - 1) * 100); + console.log(` +╔═══════════════════════════════════════════════════════════════════════╗ +║ Mozart ║ +║ The AI Orchestration Layer for Handshake58 ║ +╠═══════════════════════════════════════════════════════════════════════╣ +║ Server: http://${config.host}:${config.port} ║ +║ Provider: ${drainService.getProviderAddress()} ║ +║ Chain: ${config.chainId === 137 ? 'Polygon Mainnet' : 'Polygon Amoy'} ║ +║ Planner: ${config.plannerModel.padEnd(40)}║ +║ Markup: ${markup}% ║ +╠═══════════════════════════════════════════════════════════════════════╣ +║ Bittensor-native: chutes (SN22) · desearch (SN22) · numinous (SN6) ║ +║ Also available: openrouter · e2b · replicate · vericore ║ +╚═══════════════════════════════════════════════════════════════════════╝ +`); + }); +} + +main().catch(err => { + console.error('[orchestra] Fatal:', err); + process.exit(1); +}); diff --git a/providers/community-mozart/src/planner.ts b/providers/community-mozart/src/planner.ts new file mode 100644 index 0000000..d3cd53f --- /dev/null +++ b/providers/community-mozart/src/planner.ts @@ -0,0 +1,268 @@ +/** + * Orchestra Planner + Orchestrator + * + * The planner uses raw fetch (same as executor) to avoid OpenAI SDK baseURL quirks. + * The orchestrator runs the plan in topological order, + * parallelizing independent steps and feeding outputs forward. + */ + +import type { + ExecutionPlan, + OrchestrationResult, + MozartRequest, + MozartStreamEvent, + PlanStep, + StepResult, + ProviderConfig, + ProviderName, +} from './types.js'; +import { + PLANNER_SYSTEM_PROMPT, + SYNTHESIZER_SYSTEM_PROMPT, + UPSTREAM, +} from './constants.js'; +import { executeStep } from './executor.js'; + +// ─── Raw LLM helper (same pattern as executor, no SDK dependency) ────────────── + +async function chutesChat( + apiKey: string, + model: string, + messages: { role: string; content: string }[], + maxTokens = 1024, + temperature = 0.1 +): Promise { + const res = await fetch(`${UPSTREAM.chutes.base}/chat/completions`, { + method: 'POST', + headers: { + Authorization: `Bearer ${apiKey}`, + 'Content-Type': 'application/json', + }, + body: JSON.stringify({ model, messages, max_tokens: maxTokens, temperature }), + signal: AbortSignal.timeout(90_000), + }); + + if (!res.ok) { + const text = await res.text().catch(() => ''); + throw new Error(`HTTP ${res.status}: ${text.slice(0, 300)}`); + } + + const data = await res.json(); + const msg = data.choices?.[0]?.message; + return (msg?.content || msg?.reasoning_content) ?? ''; +} + +// ─── Planner ────────────────────────────────────────────────────────────────── + +export async function buildPlan( + request: MozartRequest, + config: ProviderConfig +): Promise { + const userMessage = [ + `Goal: ${request.goal}`, + request.context ? `Context: ${request.context}` : '', + request.budget_usd ? `Budget cap: $${request.budget_usd} USD` : '', + request.providers?.length + ? `Allowed providers: ${request.providers.join(', ')}` + : '', + ].filter(Boolean).join('\n'); + + const raw = await chutesChat( + config.chutesApiKey, + config.plannerModel, + [ + { role: 'system', content: PLANNER_SYSTEM_PROMPT }, + { role: 'user', content: userMessage }, + ], + 1024, + 0.1 + ); + + // Strip markdown code fences if the model wraps in ```json + const cleaned = raw.replace(/^```(?:json)?\n?/m, '').replace(/\n?```$/m, '').trim(); + + let plan: ExecutionPlan; + try { + plan = JSON.parse(cleaned); + } catch { + // Fallback: single-step plan using chutes + plan = { + goal: request.goal, + reasoning: 'Fallback single-step plan (planner parse error)', + estimated_total_cost_usd: 0.03, + steps: [{ + id: 'step_1', + provider: 'chutes', + model: config.plannerModel, + task: request.goal, + input_from: [], + parallel: true, + required: true, + estimated_cost_usd: 0.03, + }], + }; + } + + // Enforce budget cap + if (request.budget_usd) { + const cap = request.budget_usd; + let cumulative = 0; + plan.steps = plan.steps.filter(s => { + cumulative += s.estimated_cost_usd; + return cumulative <= cap; + }); + plan.estimated_total_cost_usd = cumulative; + } + + // Enforce provider whitelist + if (request.providers?.length) { + plan.steps = plan.steps.filter(s => request.providers!.includes(s.provider)); + } + + // Cap at maxPlanSteps + plan.steps = plan.steps.slice(0, config.maxPlanSteps); + + return plan; +} + +// ─── Topological scheduler ──────────────────────────────────────────────────── + +function buildWaves(steps: PlanStep[]): PlanStep[][] { + const completed = new Set(); + const remaining = [...steps]; + const waves: PlanStep[][] = []; + + while (remaining.length > 0) { + const wave: PlanStep[] = []; + const still: PlanStep[] = []; + + for (const step of remaining) { + const deps = step.input_from ?? []; + const ready = deps.every(d => completed.has(d)); + if (ready) wave.push(step); + else still.push(step); + } + + if (wave.length === 0) { + waves.push(remaining); + break; + } + + waves.push(wave); + wave.forEach(s => completed.add(s.id)); + remaining.splice(0, remaining.length, ...still); + } + + return waves; +} + +// ─── Synthesizer ────────────────────────────────────────────────────────────── + +async function synthesize( + goal: string, + results: StepResult[], + config: ProviderConfig +): Promise { + const stepSummaries = results.map(r => + r.status === 'done' + ? `[${r.step_id} — ${r.provider}/${r.model}]\n${r.output}` + : `[${r.step_id} — ${r.provider}/${r.model}] FAILED: ${r.error}` + ).join('\n\n---\n\n'); + + return chutesChat( + config.chutesApiKey, + config.synthesizerModel, + [ + { role: 'system', content: SYNTHESIZER_SYSTEM_PROMPT }, + { role: 'user', content: `Goal: ${goal}\n\nStep outputs:\n\n${stepSummaries}` }, + ], + 2048, + 0.4 + ); +} + +// ─── Orchestrate ───────────────────────────────────────────────────────────── + +export async function orchestrate( + request: MozartRequest, + config: ProviderConfig, + onEvent?: (event: MozartStreamEvent) => void +): Promise { + const globalStart = Date.now(); + + const emit = (event: MozartStreamEvent['event'], data: any) => { + onEvent?.({ event, data, timestamp: Date.now() }); + }; + + // 1. Build plan + let plan: ExecutionPlan; + if (request.mode === 'pipeline' && request.steps?.length) { + plan = { + goal: request.goal, + steps: request.steps, + estimated_total_cost_usd: request.steps.reduce((s, x) => s + x.estimated_cost_usd, 0), + reasoning: 'User-provided pipeline', + }; + } else { + plan = await buildPlan(request, config); + } + + emit('plan', plan); + + if (request.mode === 'plan') { + return { + goal: plan.goal, + plan, + steps: [], + synthesis: '(plan-only mode — set mode: "auto" to execute)', + total_cost_usd: 0, + total_duration_ms: Date.now() - globalStart, + providers_used: [], + }; + } + + // 2. Execute in waves + const allResults: StepResult[] = []; + const priorOutputs = new Map(); + const waves = buildWaves(plan.steps); + + for (const wave of waves) { + emit('step_start', { steps: wave.map(s => s.id) }); + + const waveResults = await Promise.all( + wave.map(step => executeStep(step, config, priorOutputs)) + ); + + for (const result of waveResults) { + allResults.push(result); + if (result.output) priorOutputs.set(result.step_id, result.output); + + if (result.status === 'done') { + emit('step_done', result); + } else { + emit('step_fail', result); + } + } + } + + // 3. Synthesize + emit('synthesis', { status: 'synthesizing', steps_completed: allResults.filter(r => r.status === 'done').length }); + + const synthesis = await synthesize(plan.goal, allResults, config); + + const totalCost = allResults.reduce((s, r) => s + r.cost_usd, 0); + const providersUsed = [...new Set(allResults.map(r => r.provider))] as ProviderName[]; + + const result: OrchestrationResult = { + goal: plan.goal, + plan, + steps: allResults, + synthesis, + total_cost_usd: totalCost, + total_duration_ms: Date.now() - globalStart, + providers_used: providersUsed, + }; + + emit('done', result); + return result; +} diff --git a/providers/community-mozart/src/storage.ts b/providers/community-mozart/src/storage.ts new file mode 100644 index 0000000..914b1e5 --- /dev/null +++ b/providers/community-mozart/src/storage.ts @@ -0,0 +1,132 @@ +/** + * VoucherStorage — identical pattern to all HS58 providers. + * JSON file on disk, in-memory Map for fast access. + */ + +import { readFileSync, writeFileSync, mkdirSync, existsSync } from 'fs'; +import { dirname } from 'path'; +import type { Hash } from 'viem'; +import type { StoredVoucher, ChannelState } from './types.js'; + +export class VoucherStorage { + private vouchers: Map = new Map(); + private channels: Map = new Map(); + private path: string; + private dirty = false; + + constructor(storagePath: string) { + this.path = storagePath; + this.load(); + // Flush to disk every 30s + setInterval(() => this.flush(), 30_000); + } + + private load(): void { + try { + if (!existsSync(this.path)) return; + const raw = JSON.parse(readFileSync(this.path, 'utf8')); + + if (raw.vouchers) { + for (const [k, v] of Object.entries(raw.vouchers as any)) { + const vv = v as any; + this.vouchers.set(k, { ...vv, amount: BigInt(vv.amount), nonce: BigInt(vv.nonce) }); + } + } + if (raw.channels) { + for (const [k, c] of Object.entries(raw.channels as any)) { + const cc = c as any; + const channel: ChannelState = { + ...cc, + deposit: BigInt(cc.deposit), + totalCharged: BigInt(cc.totalCharged), + }; + if (cc.lastVoucher) { + channel.lastVoucher = { + ...cc.lastVoucher, + amount: BigInt(cc.lastVoucher.amount), + nonce: BigInt(cc.lastVoucher.nonce), + }; + } + this.channels.set(k, channel); + } + } + } catch { /* fresh start */ } + } + + private flush(): void { + if (!this.dirty) return; + try { + mkdirSync(dirname(this.path), { recursive: true }); + const voucherObj: any = {}; + for (const [k, v] of this.vouchers) { + voucherObj[k] = { ...v, amount: v.amount.toString(), nonce: v.nonce.toString() }; + } + const channelObj: any = {}; + for (const [k, c] of this.channels) { + channelObj[k] = { + ...c, + deposit: c.deposit.toString(), + totalCharged: c.totalCharged.toString(), + lastVoucher: c.lastVoucher + ? { ...c.lastVoucher, amount: c.lastVoucher.amount.toString(), nonce: c.lastVoucher.nonce.toString() } + : undefined, + }; + } + writeFileSync(this.path, JSON.stringify({ vouchers: voucherObj, channels: channelObj }, null, 2)); + this.dirty = false; + } catch (e) { + console.error('[storage] flush failed:', e); + } + } + + storeVoucher(v: StoredVoucher): void { + this.vouchers.set(`${v.channelId}:${v.nonce}`, v); + this.dirty = true; + } + + getChannel(channelId: string): ChannelState | null { + return this.channels.get(channelId) ?? null; + } + + updateChannel(channelId: string, state: ChannelState): void { + this.channels.set(channelId, state); + this.dirty = true; + } + + markClaimed(channelId: string, txHash: Hash): void { + for (const [k, v] of this.vouchers) { + if (v.channelId === channelId && !v.claimed) { + v.claimed = true; + v.claimedAt = Date.now(); + v.claimTxHash = txHash; + this.vouchers.set(k, v); + } + } + this.dirty = true; + } + + getUnclaimedVouchers(): StoredVoucher[] { + return [...this.vouchers.values()].filter(v => !v.claimed); + } + + getHighestVoucherPerChannel(): Map { + const highest = new Map(); + for (const v of this.vouchers.values()) { + if (v.claimed) continue; + const existing = highest.get(v.channelId); + if (!existing || v.amount > existing.amount) highest.set(v.channelId, v); + } + return highest; + } + + getStats() { + const unclaimed = this.getUnclaimedVouchers(); + const totalEarned = unclaimed.reduce((s, v) => s + v.amount, 0n); + return { + totalChannels: this.channels.size, + totalVouchers: this.vouchers.size, + unclaimedCount: unclaimed.length, + totalEarned, + }; + } +} diff --git a/providers/community-mozart/src/types.ts b/providers/community-mozart/src/types.ts new file mode 100644 index 0000000..9dd7cad --- /dev/null +++ b/providers/community-mozart/src/types.ts @@ -0,0 +1,133 @@ +import type { Hash, Hex } from 'viem'; + +// ─── DRAIN primitives ───────────────────────────────────────────────────────── + +export interface ModelPricing { + inputPer1k: bigint; + outputPer1k: bigint; +} + +export interface ProviderConfig { + port: number; + host: string; + chainId: 137 | 80002; + providerPrivateKey: Hex; + polygonRpcUrl?: string; + claimThreshold: bigint; + storagePath: string; + providerName: string; + autoClaimIntervalMinutes: number; + autoClaimBufferSeconds: number; + // Upstream keys + openrouterApiKey: string; // optional — Chutes used as primary + desearchApiKey: string; + chutesApiKey: string; + e2bApiKey?: string; + replicateApiToken?: string; + // Mozart config + markupMultiplier: number; + maxPlanSteps: number; + plannerModel: string; + synthesizerModel: string; +} + +export interface VoucherHeader { + channelId: Hash; + amount: string; + nonce: string; + signature: Hex; +} + +export interface StoredVoucher { + channelId: Hash; + amount: bigint; + nonce: bigint; + signature: Hex; + consumer: string; + receivedAt: number; + claimed: boolean; + claimedAt?: number; + claimTxHash?: Hash; +} + +export interface ChannelState { + channelId: Hash; + consumer: string; + deposit: bigint; + totalCharged: bigint; + expiry: number; + lastVoucher?: StoredVoucher; + createdAt: number; + lastActivityAt: number; +} + +// ─── Mozart / Orchestra primitives ─────────────────────────────────────────── + +export type ProviderName = + | 'chutes' + | 'openrouter' + | 'desearch' + | 'e2b' + | 'replicate' + | 'numinous' + | 'vericore'; + +export type StepStatus = 'pending' | 'running' | 'done' | 'failed' | 'skipped'; + +export interface PlanStep { + id: string; + provider: ProviderName; + model: string; + task: string; + input_from?: string[]; + parallel?: boolean; + required?: boolean; + estimated_cost_usd: number; +} + +export interface ExecutionPlan { + goal: string; + steps: PlanStep[]; + estimated_total_cost_usd: number; + reasoning: string; +} + +export interface StepResult { + step_id: string; + provider: ProviderName; + model: string; + status: StepStatus; + output?: string; + error?: string; + cost_usd: number; + duration_ms: number; + tokens_used?: number; +} + +export interface OrchestrationResult { + goal: string; + plan: ExecutionPlan; + steps: StepResult[]; + synthesis: string; + total_cost_usd: number; + total_duration_ms: number; + providers_used: ProviderName[]; +} + +export type MozartMode = 'auto' | 'plan' | 'pipeline'; + +export interface MozartRequest { + mode: MozartMode; + goal: string; + steps?: PlanStep[]; + context?: string; + budget_usd?: number; + providers?: ProviderName[]; + stream?: boolean; +} + +export interface MozartStreamEvent { + event: 'plan' | 'step_start' | 'step_done' | 'step_fail' | 'synthesis' | 'done' | 'error'; + data: any; + timestamp: number; +} diff --git a/providers/community-mozart/tsconfig.json b/providers/community-mozart/tsconfig.json new file mode 100644 index 0000000..486fed8 --- /dev/null +++ b/providers/community-mozart/tsconfig.json @@ -0,0 +1,14 @@ +{ + "compilerOptions": { + "target": "ES2022", + "module": "NodeNext", + "moduleResolution": "NodeNext", + "outDir": "dist", + "rootDir": "src", + "strict": true, + "esModuleInterop": true, + "skipLibCheck": true, + "declaration": true + }, + "include": ["src/**/*"] +} diff --git a/providers/hs58-openrouter/src/index.ts b/providers/hs58-openrouter/src/index.ts index 27781b7..d36204c 100644 --- a/providers/hs58-openrouter/src/index.ts +++ b/providers/hs58-openrouter/src/index.ts @@ -1,36 +1,104 @@ /** - * HS58-OpenRouter Provider - * - * Meta-provider with 200+ models from OpenRouter. - * Auto-discovers available models and pricing. + * HS58-OpenRouter Provider (with Groq Fast-Path) + * + * Routes requests to Groq first for supported models (ultra-low latency), + * falls back to OpenRouter for everything else. */ import express from 'express'; import cors from 'cors'; import OpenAI from 'openai'; -import { - loadConfig, - updatePricingCache, - getModelPricing, - isModelSupported, +import { + loadConfig, + updatePricingCache, + getModelPricing, + isModelSupported, getSupportedModels, getModelList, getPricingAge, - calculateCost + calculateCost, } from './config.js'; import { DrainService } from './drain.js'; import { VoucherStorage } from './storage.js'; import { getPaymentHeaders } from './constants.js'; import { formatUnits } from 'viem'; -// Load configuration +// --------------------------------------------------------------------------- +// Groq fast-path config +// --------------------------------------------------------------------------- + +// Models Groq supports natively — requests for these go to Groq first. +// Keys are the model IDs clients send; values are the Groq model names. +const GROQ_MODEL_MAP: Record = { + // Llama 3.3 + 'meta-llama/llama-3.3-70b-instruct': 'llama-3.3-70b-versatile', + 'meta-llama/llama-3.3-70b-instruct:free': 'llama-3.3-70b-versatile', + // Llama 3.1 + 'meta-llama/llama-3.1-8b-instruct': 'llama-3.1-8b-instant', + 'meta-llama/llama-3.1-70b-instruct': 'llama-3.1-70b-versatile', + // Llama 3 + 'meta-llama/llama-3-8b-instruct': 'llama3-8b-8192', + 'meta-llama/llama-3-70b-instruct': 'llama3-70b-8192', + // Mixtral + 'mistralai/mixtral-8x7b-instruct': 'mixtral-8x7b-32768', + // Gemma + 'google/gemma-2-9b-it': 'gemma2-9b-it', + 'google/gemma-7b-it': 'gemma-7b-it', + // DeepSeek R1 distill + 'deepseek/deepseek-r1-distill-llama-70b': 'deepseek-r1-distill-llama-70b', + 'deepseek/deepseek-r1-distill-qwen-32b': 'deepseek-r1-distill-qwen-32b', + // Qwen + 'qwen/qwen-2.5-72b-instruct': 'qwen-qwq-32b', + // Groq-exclusive fast models (advertised directly) + 'groq/llama-3.3-70b-versatile': 'llama-3.3-70b-versatile', + 'groq/llama-3.1-8b-instant': 'llama-3.1-8b-instant', + 'groq/mixtral-8x7b-32768': 'mixtral-8x7b-32768', + 'groq/gemma2-9b-it': 'gemma2-9b-it', + 'groq/deepseek-r1-distill-llama-70b': 'deepseek-r1-distill-llama-70b', + 'groq/qwen-qwq-32b': 'qwen-qwq-32b', + 'groq/llama-3.2-90b-vision-preview': 'llama-3.2-90b-vision-preview', + 'groq/llama-3.2-11b-vision-preview': 'llama-3.2-11b-vision-preview', +}; + +// Groq pricing per 1M tokens (USD) — used to calculate DRAIN cost +// These are Groq's public rates as of 2026-03 +const GROQ_PRICING_USD: Record = { + 'llama-3.3-70b-versatile': { input: 0.59, output: 0.79 }, + 'llama-3.1-8b-instant': { input: 0.05, output: 0.08 }, + 'llama-3.1-70b-versatile': { input: 0.59, output: 0.79 }, + 'llama3-8b-8192': { input: 0.05, output: 0.08 }, + 'llama3-70b-8192': { input: 0.59, output: 0.79 }, + 'mixtral-8x7b-32768': { input: 0.24, output: 0.24 }, + 'gemma2-9b-it': { input: 0.20, output: 0.20 }, + 'gemma-7b-it': { input: 0.07, output: 0.07 }, + 'deepseek-r1-distill-llama-70b': { input: 0.75, output: 0.99 }, + 'deepseek-r1-distill-qwen-32b': { input: 0.69, output: 0.69 }, + 'qwen-qwq-32b': { input: 0.29, output: 0.39 }, + 'llama-3.2-90b-vision-preview': { input: 0.90, output: 0.90 }, + 'llama-3.2-11b-vision-preview': { input: 0.18, output: 0.18 }, +}; + +function groqCostPerThousand(groqModel: string, markup: number): { inputPer1k: bigint; outputPer1k: bigint } { + const p = GROQ_PRICING_USD[groqModel] ?? { input: 0.59, output: 0.79 }; + // per-token USD = p.input / 1_000_000 + // per-1k tokens USD = p.input / 1000 + // per-1k tokens USDC wei = (p.input / 1000) * 1_000_000 * markup + const inputPer1k = BigInt(Math.ceil((p.input / 1000) * 1_000_000 * markup)); + const outputPer1k = BigInt(Math.ceil((p.output / 1000) * 1_000_000 * markup)); + return { inputPer1k, outputPer1k }; +} + +// --------------------------------------------------------------------------- +// Load config & init services +// --------------------------------------------------------------------------- + const config = loadConfig(); +const groqApiKey = process.env.GROQ_API_KEY; -// Initialize services -const storage = new VoucherStorage(config.storagePath); +const storage = new VoucherStorage(config.storagePath); const drainService = new DrainService(config, storage); -// OpenRouter uses OpenAI-compatible API +// OpenRouter client (primary / fallback) const openrouter = new OpenAI({ apiKey: config.openrouterApiKey, baseURL: 'https://openrouter.ai/api/v1', @@ -40,19 +108,35 @@ const openrouter = new OpenAI({ }, }); -// Create Express app +// Groq client (fast-path — only if key is set) +const groq = groqApiKey + ? new OpenAI({ apiKey: groqApiKey, baseURL: 'https://api.groq.com/openai/v1' }) + : null; + +if (groq) { + console.log('⚡ Groq fast-path enabled for', Object.keys(GROQ_MODEL_MAP).length, 'model aliases'); +} else { + console.warn('⚠️ GROQ_API_KEY not set — Groq fast-path disabled, using OpenRouter only'); +} + +// --------------------------------------------------------------------------- +// Express app +// --------------------------------------------------------------------------- + const app = express(); app.use(cors()); app.use(express.json()); -/** - * GET /v1/docs - */ +// --------------------------------------------------------------------------- +// GET /v1/docs +// --------------------------------------------------------------------------- app.get('/v1/docs', (_req, res) => { - const models = getSupportedModels(); + const models = getSupportedModels(); const modelList = getModelList(); + const groqModels = Object.keys(GROQ_MODEL_MAP).filter(m => m.startsWith('groq/')); + const topModels = models.slice(0, 10).map(m => { - const p = getModelPricing(m); + const p = getModelPricing(m); const info = modelList.find(ml => ml.id === m); const name = info?.name ? ` (${info.name})` : ''; return p @@ -60,16 +144,21 @@ app.get('/v1/docs', (_req, res) => { : `- ${m}${name}`; }).join('\n'); + const groqSection = groq + ? `\n## ⚡ Groq Ultra-Fast Models (sub-100ms)\n\n${groqModels.map(m => `- ${m}`).join('\n')}\n\nThese models are routed to Groq's inference API for maximum speed.\n` + : ''; + res.type('text/plain').send(`# ${config.providerName} — Agent Instructions -Meta-provider with ${models.length}+ models from OpenRouter. Includes GPT-4o, Claude, Llama, Gemini, Mistral, and more. +Meta-provider with ${models.length}+ models from OpenRouter${groq ? ` + ${groqModels.length} Groq ultra-fast models` : ''}. +Includes GPT-4o, Claude, Llama, Gemini, Mistral, DeepSeek, and more. All accessible via a single DRAIN payment channel. ## How to use via DRAIN 1. Open a payment channel to this provider (drain_open_channel) 2. Call drain_chat with: - - model: any model ID from the list below (OpenRouter format: provider/model-name) + - model: any model ID from the list below - messages: standard chat messages array ## Example @@ -82,14 +171,14 @@ Streaming is supported (stream: true). ## Top Models ${topModels} - +${groqSection} Full list: GET /v1/models Full pricing: GET /v1/pricing Filter pricing: GET /v1/pricing?filter=claude ## Pricing -Per-token pricing in USDC (${(config.markup - 1) * 100}% markup on OpenRouter base prices). +Per-token pricing in USDC (${(config.markup - 1) * 100}% markup on upstream prices). Input and output tokens are priced separately. Cost = (input_tokens × input_rate + output_tokens × output_rate) / 1000. Pricing auto-refreshes from the OpenRouter API. @@ -99,348 +188,323 @@ Pricing auto-refreshes from the OpenRouter API. - Standard OpenAI chat completions format (messages, max_tokens, temperature, etc.) - Streaming supported via stream: true - Responses include X-DRAIN-Cost, X-DRAIN-Remaining headers -- Model IDs use OpenRouter format: provider/model-name (e.g. "anthropic/claude-sonnet-4-20250514") +- OpenRouter model IDs use format: provider/model-name (e.g. "anthropic/claude-sonnet-4-20250514") +- Groq model IDs use format: groq/model-name (e.g. "groq/llama-3.3-70b-versatile") - One payment channel gives access to all ${models.length}+ models `); }); -/** - * GET /v1/pricing - * Returns pricing information for all models - */ +// --------------------------------------------------------------------------- +// GET /v1/pricing +// --------------------------------------------------------------------------- app.get('/v1/pricing', (req, res) => { - const models = getSupportedModels(); - const pricing: Record = {}; - - // Get query filter - const filter = req.query.filter as string | undefined; - + const models = getSupportedModels(); + const filter = req.query.filter as string | undefined; + const pricing: Record = {}; + for (const model of models) { - // Apply filter if provided - if (filter && !model.toLowerCase().includes(filter.toLowerCase())) { - continue; - } - - const modelPricing = getModelPricing(model); - if (modelPricing) { + if (filter && !model.toLowerCase().includes(filter.toLowerCase())) continue; + const p = getModelPricing(model); + if (p) { pricing[model] = { - inputPer1kTokens: formatUnits(modelPricing.inputPer1k, 6), - outputPer1kTokens: formatUnits(modelPricing.outputPer1k, 6), + inputPer1kTokens: formatUnits(p.inputPer1k, 6), + outputPer1kTokens: formatUnits(p.outputPer1k, 6), + }; + } + } + + // Add Groq-exclusive models + if (groq) { + for (const [alias, groqModel] of Object.entries(GROQ_MODEL_MAP)) { + if (!alias.startsWith('groq/')) continue; + if (filter && !alias.toLowerCase().includes(filter.toLowerCase())) continue; + const p = groqCostPerThousand(groqModel, config.markup); + pricing[alias] = { + inputPer1kTokens: formatUnits(p.inputPer1k, 6), + outputPer1kTokens: formatUnits(p.outputPer1k, 6), + backend: 'groq', }; } } res.json({ - provider: drainService.getProviderAddress(), + provider: drainService.getProviderAddress(), providerName: config.providerName, - chainId: config.chainId, - currency: 'USDC', - decimals: 6, - markup: `${(config.markup - 1) * 100}%`, - totalModels: models.length, - pricingAge: `${getPricingAge()}s ago`, - models: pricing, + chainId: config.chainId, + currency: 'USDC', + decimals: 6, + markup: `${(config.markup - 1) * 100}%`, + totalModels: models.length + (groq ? Object.keys(GROQ_MODEL_MAP).filter(m => m.startsWith('groq/')).length : 0), + pricingAge: `${getPricingAge()}s ago`, + models: pricing, }); }); -/** - * GET /v1/models - * OpenAI-compatible models endpoint - */ -app.get('/v1/models', (req, res) => { +// --------------------------------------------------------------------------- +// GET /v1/models +// --------------------------------------------------------------------------- +app.get('/v1/models', (_req, res) => { const modelList = getModelList(); - + const models = modelList.map(m => ({ - id: m.id, - object: 'model', - created: Date.now(), - owned_by: config.providerName.toLowerCase(), - name: m.name, + id: m.id, + object: 'model', + created: Date.now(), + owned_by: config.providerName.toLowerCase(), + name: m.name, context_length: m.context_length, })); - res.json({ - object: 'list', - data: models, - total: models.length, - }); + // Append Groq-exclusive entries + if (groq) { + for (const alias of Object.keys(GROQ_MODEL_MAP).filter(m => m.startsWith('groq/'))) { + models.push({ + id: alias, + object: 'model', + created: Date.now(), + owned_by: 'groq', + name: alias.replace('groq/', '') + ' (Groq — ultra-fast)', + context_length: 32768, + }); + } + } + + res.json({ object: 'list', data: models, total: models.length }); }); -/** - * POST /v1/admin/refresh-pricing - * Force refresh pricing from OpenRouter - */ -app.post('/v1/admin/refresh-pricing', async (req, res) => { +// --------------------------------------------------------------------------- +// POST /v1/admin/refresh-pricing +// --------------------------------------------------------------------------- +app.post('/v1/admin/refresh-pricing', async (_req, res) => { try { await updatePricingCache(config.openrouterApiKey, config.markup, config.providerName); - res.json({ - success: true, - models: getSupportedModels().length, - refreshedAt: new Date().toISOString(), - }); + res.json({ success: true, models: getSupportedModels().length, refreshedAt: new Date().toISOString() }); } catch (error) { - res.status(500).json({ - success: false, - error: error instanceof Error ? error.message : 'Refresh failed', - }); + res.status(500).json({ success: false, error: error instanceof Error ? error.message : 'Refresh failed' }); } }); -/** - * POST /v1/chat/completions - * OpenAI-compatible chat endpoint with DRAIN payments - */ +// --------------------------------------------------------------------------- +// POST /v1/chat/completions — DRAIN-gated, with Groq fast-path +// --------------------------------------------------------------------------- app.post('/v1/chat/completions', async (req, res) => { const voucherHeader = req.headers['x-drain-voucher'] as string | undefined; - - // 1. Check voucher header present + if (!voucherHeader) { res.status(402).set(getPaymentHeaders(drainService.getProviderAddress(), config.chainId)).json({ - error: { - message: 'X-DRAIN-Voucher header required', - type: 'payment_required', - code: 'voucher_required', - }, + error: { message: 'X-DRAIN-Voucher header required', type: 'payment_required', code: 'voucher_required' }, }); return; } - // 2. Parse voucher - const voucher = drainService.parseVoucherHeader(voucherHeader); - if (!voucher) { - res.status(402).set({ - 'X-DRAIN-Error': 'invalid_voucher_format', - }).json({ - error: { - message: 'Invalid X-DRAIN-Voucher format', - type: 'payment_required', - code: 'invalid_voucher_format', - }, - }); + // Parse voucher + let voucher: any; + try { + voucher = JSON.parse(Buffer.from(voucherHeader, 'base64').toString('utf-8')); + } catch { + res.status(400).json({ error: { message: 'Invalid X-DRAIN-Voucher: not valid base64 JSON', type: 'invalid_request_error', code: 'invalid_voucher' } }); return; } - // 3. Check model supported - const model = req.body.model as string; - if (!isModelSupported(model)) { - res.status(400).json({ - error: { - message: `Model '${model}' not found. Use GET /v1/models to see available models.`, - type: 'invalid_request_error', - code: 'model_not_supported', - }, - }); + const model: string = req.body.model; + if (!model) { + res.status(400).json({ error: { message: 'model field required', type: 'invalid_request_error', code: 'missing_model' } }); return; } - const pricing = getModelPricing(model)!; - const isStreaming = req.body.stream === true; - - // 4. Pre-auth check: estimate minimum cost - const estimatedInputTokens = JSON.stringify(req.body.messages).length / 4; - const minOutputTokens = 50; - const estimatedMinCost = calculateCost(pricing, Math.ceil(estimatedInputTokens), minOutputTokens); - - // 5. Validate voucher with estimated cost - const validation = await drainService.validateVoucher(voucher, estimatedMinCost); - - if (!validation.valid) { - const errorHeaders: Record = { - 'X-DRAIN-Error': validation.error!, - }; - - if (validation.error === 'insufficient_funds' && validation.channel) { - errorHeaders['X-DRAIN-Required'] = estimatedMinCost.toString(); - errorHeaders['X-DRAIN-Provided'] = (BigInt(voucher.amount) - validation.channel.totalCharged).toString(); + // Determine backend + const groqModel = groq ? GROQ_MODEL_MAP[model] : undefined; + const usingGroq = !!groqModel; + + // Determine pricing + let pricing: { inputPer1k: bigint; outputPer1k: bigint } | null = null; + if (usingGroq) { + pricing = groqCostPerThousand(groqModel!, config.markup); + } else { + pricing = getModelPricing(model); + if (!pricing) { + res.status(400).json({ error: { message: `Model '${model}' not supported. GET /v1/models for full list.`, type: 'invalid_request_error', code: 'model_not_found' } }); + return; } - - res.status(402).set(errorHeaders).json({ - error: { - message: `Payment validation failed: ${validation.error}`, - type: 'payment_required', - code: validation.error, - }, - }); + } + + // Validate channel + let channelState: any; + try { + channelState = await drainService.getChannelState(voucher.channelId); + } catch (e) { + res.status(402).json({ error: { message: 'Channel not found or expired', type: 'payment_required', code: 'channel_not_found' } }); return; } - const channelState = validation.channel!; + // Pre-flight voucher check (estimate 500 tokens output) + const estimatedCost = calculateCost(pricing, req.body.messages?.reduce((n: number, m: any) => n + (m.content?.length ?? 0) / 4, 0) || 200, 500); + const preValidation = await drainService.validateVoucher(voucher, estimatedCost); + if (!preValidation.valid) { + res.status(402).set({ 'X-DRAIN-Error': 'insufficient_funds', 'X-DRAIN-Required': estimatedCost.toString() }).json({ + error: { message: 'Insufficient channel balance', type: 'payment_required', code: 'insufficient_funds' }, + }); + return; + } try { - if (isStreaming) { - // === STREAMING RESPONSE === + const stream: boolean = !!req.body.stream; + + if (stream) { + // ── Streaming path ──────────────────────────────────────────────────── res.setHeader('Content-Type', 'text/event-stream'); res.setHeader('Cache-Control', 'no-cache'); res.setHeader('Connection', 'keep-alive'); - res.setHeader('X-DRAIN-Channel', voucher.channelId); - let inputTokens = 0; - let outputTokens = 0; - let fullContent = ''; + const client = usingGroq ? groq! : openrouter; + const streamModel = usingGroq ? groqModel! : model; - const stream = await openrouter.chat.completions.create({ - model: model, - messages: req.body.messages, + const streamResp = await client.chat.completions.create({ + model: streamModel, + messages: req.body.messages, max_tokens: req.body.max_tokens, - stream: true, - }); + stream: true, + } as any); - for await (const chunk of stream) { - const content = chunk.choices[0]?.delta?.content || ''; - fullContent += content; - - // Forward chunk as-is (already OpenAI format) - res.write(`data: ${JSON.stringify(chunk)}\n\n`); - - // Track usage if available - if ((chunk as any).usage) { - inputTokens = (chunk as any).usage.prompt_tokens || 0; - outputTokens = (chunk as any).usage.completion_tokens || 0; + let inputTokens = 0; + let outputTokens = 0; + + for await (const chunk of streamResp as any) { + if (chunk.usage) { + inputTokens = chunk.usage.prompt_tokens ?? inputTokens; + outputTokens = chunk.usage.completion_tokens ?? outputTokens; } + res.write(`data: ${JSON.stringify(chunk)}\n\n`); } - // Estimate tokens if not provided - if (inputTokens === 0) { - inputTokens = Math.ceil(JSON.stringify(req.body.messages).length / 4); - } - if (outputTokens === 0) { - outputTokens = Math.ceil(fullContent.length / 4); - } + // Fallback token estimate if not provided + if (outputTokens === 0) outputTokens = 200; - // Calculate final cost const actualCost = calculateCost(pricing, inputTokens, outputTokens); - - // Store voucher with actual cost - drainService.storeVoucher(voucher, channelState, actualCost); + const postValidation = await drainService.validateVoucher(voucher, actualCost); + if (postValidation.valid) { + drainService.storeVoucher(voucher, channelState, actualCost); + } - // Send cost info - const remaining = channelState.deposit - channelState.totalCharged - actualCost; - res.write(`data: [DONE]\n\n`); - res.write(`: X-DRAIN-Cost: ${actualCost.toString()}\n`); - res.write(`: X-DRAIN-Total: ${(channelState.totalCharged + actualCost).toString()}\n`); - res.write(`: X-DRAIN-Remaining: ${remaining.toString()}\n`); - + res.write('data: [DONE]\n\n'); res.end(); } else { - // === NON-STREAMING RESPONSE === - const completion = await openrouter.chat.completions.create({ - model: model, - messages: req.body.messages, + // ── Non-streaming path ──────────────────────────────────────────────── + const client = usingGroq ? groq! : openrouter; + const callModel = usingGroq ? groqModel! : model; + + const completion = await client.chat.completions.create({ + model: callModel, + messages: req.body.messages, max_tokens: req.body.max_tokens, - }); + } as any); - // Get actual token counts - const inputTokens = completion.usage?.prompt_tokens ?? 0; - const outputTokens = completion.usage?.completion_tokens ?? 0; + const inputTokens = (completion as any).usage?.prompt_tokens ?? 0; + const outputTokens = (completion as any).usage?.completion_tokens ?? 0; + const actualCost = calculateCost(pricing, inputTokens, outputTokens); - // Calculate actual cost - const actualCost = calculateCost(pricing, inputTokens, outputTokens); - - // Verify voucher covers actual cost const actualValidation = await drainService.validateVoucher(voucher, actualCost); - if (!actualValidation.valid) { - res.status(402).set({ - 'X-DRAIN-Error': 'insufficient_funds_post', - 'X-DRAIN-Required': actualCost.toString(), - }).json({ - error: { - message: 'Voucher insufficient for actual cost', - type: 'payment_required', - code: 'insufficient_funds_post', - }, + res.status(402).set({ 'X-DRAIN-Error': 'insufficient_funds_post', 'X-DRAIN-Required': actualCost.toString() }).json({ + error: { message: 'Voucher insufficient for actual cost', type: 'payment_required', code: 'insufficient_funds_post' }, }); return; } - // Store voucher drainService.storeVoucher(voucher, channelState, actualCost); - // Calculate remaining const remaining = channelState.deposit - channelState.totalCharged - actualCost; - // Send response (already OpenAI format) res.set({ - 'X-DRAIN-Cost': actualCost.toString(), - 'X-DRAIN-Total': (channelState.totalCharged + actualCost).toString(), + 'X-DRAIN-Cost': actualCost.toString(), + 'X-DRAIN-Total': (channelState.totalCharged + actualCost).toString(), 'X-DRAIN-Remaining': remaining.toString(), - 'X-DRAIN-Channel': voucher.channelId, + 'X-DRAIN-Channel': voucher.channelId, + 'X-DRAIN-Backend': usingGroq ? 'groq' : 'openrouter', }).json(completion); } + } catch (error) { - console.error('OpenRouter API error:', error); - - const message = error instanceof Error ? error.message : 'OpenRouter API error'; - res.status(500).json({ - error: { - message, - type: 'api_error', - code: 'openrouter_error', - }, - }); + console.error(`[${usingGroq ? 'Groq' : 'OpenRouter'}] API error:`, error); + + // If Groq fails, retry on OpenRouter + if (usingGroq) { + console.warn('⚡ Groq failed — falling back to OpenRouter for', model); + try { + const orPricing = getModelPricing(model); + const fallbackModel = model; // try original model on OpenRouter + const completion = await openrouter.chat.completions.create({ + model: fallbackModel, + messages: req.body.messages, + max_tokens: req.body.max_tokens, + } as any); + + const p2 = orPricing ?? pricing; + const inputTokens = (completion as any).usage?.prompt_tokens ?? 0; + const outputTokens = (completion as any).usage?.completion_tokens ?? 0; + const actualCost = calculateCost(p2, inputTokens, outputTokens); + + drainService.storeVoucher(voucher, channelState, actualCost); + const remaining = channelState.deposit - channelState.totalCharged - actualCost; + + res.set({ + 'X-DRAIN-Cost': actualCost.toString(), + 'X-DRAIN-Remaining': remaining.toString(), + 'X-DRAIN-Backend': 'openrouter-fallback', + }).json(completion); + return; + } catch (fallbackError) { + console.error('OpenRouter fallback also failed:', fallbackError); + } + } + + const message = error instanceof Error ? error.message : 'API error'; + res.status(500).json({ error: { message, type: 'api_error', code: 'provider_error' } }); } }); -/** - * POST /v1/admin/claim - * Trigger payment claims - */ -app.post('/v1/admin/claim', async (req, res) => { +// --------------------------------------------------------------------------- +// Admin & health endpoints (unchanged from original) +// --------------------------------------------------------------------------- + +app.post('/v1/admin/claim', async (_req, res) => { try { - const forceAll = req.query.force === 'true'; - const txHashes = await drainService.claimPayments(forceAll); - res.json({ - success: true, - claimed: txHashes.length, - transactions: txHashes, - forced: forceAll, - }); + const txHashes = await drainService.claimPayments(false); + res.json({ success: true, claimed: txHashes.length, transactions: txHashes }); } catch (error) { - res.status(500).json({ - success: false, - error: error instanceof Error ? error.message : 'Claim failed', - }); + res.status(500).json({ success: false, error: error instanceof Error ? error.message : 'Claim failed' }); } }); -/** - * GET /v1/admin/stats - * Get provider statistics - */ -app.get('/v1/admin/stats', (req, res) => { +app.get('/v1/admin/stats', (_req, res) => { const stats = storage.getStats(); res.json({ - provider: drainService.getProviderAddress(), + provider: drainService.getProviderAddress(), providerName: config.providerName, - chainId: config.chainId, + chainId: config.chainId, ...stats, - totalEarned: formatUnits(stats.totalEarned, 6) + ' USDC', + totalEarned: formatUnits(stats.totalEarned, 6) + ' USDC', claimThreshold: formatUnits(config.claimThreshold, 6) + ' USDC', - totalModels: getSupportedModels().length, - pricingAge: `${getPricingAge()}s ago`, + totalModels: getSupportedModels().length, + pricingAge: `${getPricingAge()}s ago`, + groqEnabled: !!groq, + groqModels: groq ? Object.keys(GROQ_MODEL_MAP).length : 0, }); }); -/** - * GET /v1/admin/vouchers - * Get pending vouchers - */ -app.get('/v1/admin/vouchers', (req, res) => { - const unclaimed = storage.getUnclaimedVouchers(); +app.get('/v1/admin/vouchers', (_req, res) => { const highest = storage.getHighestVoucherPerChannel(); - res.json({ - provider: drainService.getProviderAddress(), + provider: drainService.getProviderAddress(), providerName: config.providerName, - unclaimedCount: unclaimed.length, channels: Array.from(highest.entries()).map(([channelId, voucher]) => ({ channelId, - amount: formatUnits(voucher.amount, 6) + ' USDC', - amountRaw: voucher.amount.toString(), - nonce: voucher.nonce.toString(), - consumer: voucher.consumer, - claimed: voucher.claimed, + amount: formatUnits(voucher.amount, 6) + ' USDC', + amountRaw: voucher.amount.toString(), + nonce: voucher.nonce.toString(), + consumer: voucher.consumer, + claimed: voucher.claimed, receivedAt: new Date(voucher.receivedAt).toISOString(), })), }); @@ -450,75 +514,51 @@ app.post('/v1/close-channel', async (req, res) => { try { const { channelId } = req.body; if (!channelId) return res.status(400).json({ error: 'channelId required' }); - const result = await drainService.signCloseAuthorization(channelId); - res.json({ - channelId, - finalAmount: result.finalAmount.toString(), - signature: result.signature, - }); + res.json({ channelId, finalAmount: result.finalAmount.toString(), signature: result.signature }); } catch (error: any) { - console.error('[close-channel] Error:', error?.message || error); res.status(500).json({ error: 'internal_error' }); } }); -/** - * Health check - */ -app.get('/health', (req, res) => { - res.json({ - status: 'ok', - provider: drainService.getProviderAddress(), +app.get('/health', (_req, res) => { + res.json({ + status: 'ok', + provider: drainService.getProviderAddress(), providerName: config.providerName, - models: getSupportedModels().length, + models: getSupportedModels().length, + groqEnabled: !!groq, + groqModels: groq ? Object.keys(GROQ_MODEL_MAP).length : 0, }); }); -/** - * Initialize and start server - */ +// --------------------------------------------------------------------------- +// Start +// --------------------------------------------------------------------------- async function main() { - // Initial pricing load console.log(`🚀 Starting ${config.providerName} Provider...`); await updatePricingCache(config.openrouterApiKey, config.markup, config.providerName); - - // Schedule periodic pricing refresh + setInterval(async () => { - try { - await updatePricingCache(config.openrouterApiKey, config.markup, config.providerName); - } catch (error) { - console.error('Failed to refresh pricing:', error); - } + try { await updatePricingCache(config.openrouterApiKey, config.markup, config.providerName); } + catch (e) { console.error('Failed to refresh pricing:', e); } }, config.pricingRefreshInterval); - // Start auto-claim: check every 10 min, claim channels expiring within 1 hour drainService.startAutoClaim(config.autoClaimIntervalMinutes, config.autoClaimBufferSeconds); - // Start server app.listen(config.port, config.host, () => { console.log(` ╔═══════════════════════════════════════════════════════════════╗ -║ ${config.providerName} Provider ║ -║ 200+ Models Available ║ +║ ${config.providerName} Provider (+ Groq Fast-Path) ║ ╠═══════════════════════════════════════════════════════════════╣ ║ Server: http://${config.host}:${config.port} ║ ║ Provider: ${drainService.getProviderAddress()} ║ ║ Chain: ${config.chainId === 137 ? 'Polygon Mainnet' : 'Polygon Amoy (Testnet)'} ║ -║ Models: ${getSupportedModels().length} models loaded ║ -║ Markup: ${(config.markup - 1) * 100}% on OpenRouter prices ║ +║ Models: ${getSupportedModels().length} OpenRouter + ${Object.keys(GROQ_MODEL_MAP).length} Groq aliases ║ +║ Markup: ${(config.markup - 1) * 100}% on upstream prices ║ +║ Groq: ${groq ? '⚡ ENABLED' : '❌ disabled (set GROQ_API_KEY)'} ║ ║ Auto-claim: Every ${config.autoClaimIntervalMinutes} min, buffer ${config.autoClaimBufferSeconds}s ║ ╚═══════════════════════════════════════════════════════════════╝ - -Endpoints: - GET /v1/pricing - View pricing (all models) - GET /v1/pricing?filter=gpt - Filter by model name - GET /v1/models - List all models - POST /v1/chat/completions - Chat (requires X-DRAIN-Voucher) - POST /v1/admin/claim - Claim pending payments - POST /v1/admin/refresh-pricing - Force refresh pricing - GET /v1/admin/stats - View statistics - GET /health - Health check `); }); } diff --git a/providers/hs58-orchestra/Dockerfile b/providers/hs58-orchestra/Dockerfile new file mode 100644 index 0000000..ef1396c --- /dev/null +++ b/providers/hs58-orchestra/Dockerfile @@ -0,0 +1,10 @@ +FROM node:20-alpine +WORKDIR /app +COPY package.json ./ +RUN npm install +COPY tsconfig.json ./ +COPY src/ ./src/ +RUN npm run build +RUN mkdir -p /app/data +EXPOSE 3000 +CMD ["npm", "start"] diff --git a/providers/hs58-orchestra/package.json b/providers/hs58-orchestra/package.json new file mode 100644 index 0000000..a7535e0 --- /dev/null +++ b/providers/hs58-orchestra/package.json @@ -0,0 +1,24 @@ +{ + "name": "@handshake58/hs58-orchestra", + "version": "1.0.0", + "type": "module", + "scripts": { + "build": "tsc", + "start": "node dist/index.js", + "dev": "tsx watch src/index.ts" + }, + "dependencies": { + "express": "^4.18.2", + "cors": "^2.8.5", + "openai": "^4.28.0", + "viem": "^2.7.0", + "dotenv": "^16.4.1" + }, + "devDependencies": { + "@types/express": "^4.17.21", + "@types/cors": "^2.8.17", + "@types/node": "^20.11.0", + "typescript": "^5.3.3", + "tsx": "^4.7.0" + } +} \ No newline at end of file diff --git a/providers/hs58-orchestra/src/constants.ts b/providers/hs58-orchestra/src/constants.ts new file mode 100644 index 0000000..ab94f4e --- /dev/null +++ b/providers/hs58-orchestra/src/constants.ts @@ -0,0 +1,93 @@ +import type { ProviderName } from './types.js'; + +export const DRAIN_ADDRESSES: Record = { + 137: '0x0C2B3aA1e80629D572b1f200e6DF3586B3946A8A', + 80002: '0x61f1C1E04d6Da1C92D0aF1a3d7Dc0fEFc8794d7C', +}; + +export const USDC_DECIMALS = 6; +export const EIP712_DOMAIN = { name: 'DrainChannel', version: '1' } as const; + +export const DRAIN_CHANNEL_ABI = [ + { inputs: [{ name: 'channelId', type: 'bytes32' }], name: 'getChannel', outputs: [{ components: [{ name: 'consumer', type: 'address' }, { name: 'provider', type: 'address' }, { name: 'deposit', type: 'uint256' }, { name: 'claimed', type: 'uint256' }, { name: 'expiry', type: 'uint256' }], name: '', type: 'tuple' }], stateMutability: 'view', type: 'function' }, + { inputs: [{ name: 'channelId', type: 'bytes32' }], name: 'getBalance', outputs: [{ name: '', type: 'uint256' }], stateMutability: 'view', type: 'function' }, + { inputs: [{ name: 'channelId', type: 'bytes32' }, { name: 'amount', type: 'uint256' }, { name: 'nonce', type: 'uint256' }, { name: 'signature', type: 'bytes' }], name: 'claim', outputs: [], stateMutability: 'nonpayable', type: 'function' }, + { inputs: [{ name: 'channelId', type: 'bytes32' }, { name: 'finalAmount', type: 'uint256' }, { name: 'providerSignature', type: 'bytes' }], name: 'cooperativeClose', outputs: [], stateMutability: 'nonpayable', type: 'function' }, + { inputs: [], name: 'InvalidAmount', type: 'error' }, + { inputs: [], name: 'ChannelNotFound', type: 'error' }, + { inputs: [], name: 'InvalidSignature', type: 'error' }, + { inputs: [], name: 'NotProvider', type: 'error' }, + { inputs: [], name: 'NotExpired', type: 'error' }, + { anonymous: false, inputs: [{ indexed: true, name: 'channelId', type: 'bytes32' }, { indexed: true, name: 'provider', type: 'address' }, { indexed: false, name: 'amount', type: 'uint256' }], name: 'ChannelClaimed', type: 'event' }, +] as const; + +export const PERMANENT_CLAIM_ERRORS = ['InvalidAmount', 'ChannelNotFound', 'InvalidSignature', 'NotProvider', 'NotExpired'] as const; + +export const ORCHESTRA_BASE_FEE_USDC = 5_000n; +export const ORCHESTRA_PLAN_FEE_USDC = 10_000n; + +export const PROVIDER_COST_ESTIMATES: Record = { + chutes: 50_000n, openrouter: 30_000n, desearch: 5_000n, + e2b: 20_000n, replicate: 50_000n, numinous: 10_000n, vericore: 5_000n, +}; + +export const UPSTREAM = { + openrouter: { base: 'https://openrouter.ai/api/v1' }, + chutes: { base: 'https://llm.chutes.ai/v1' }, + desearch: { base: 'https://api.desearch.ai' }, + e2b: { base: 'https://api.e2b.dev' }, + replicate: { base: 'https://api.replicate.com/v1' }, + numinous: { base: 'https://api.numinous.ai', forecast: 'https://api.desearch.ai/numinous/forecasts' }, + vericore: { base: 'https://api.vericore.ai' }, +} as const; + +export const PLANNER_SYSTEM_PROMPT = `You are Orchestra — an AI task planner for the Handshake58 marketplace. + +Your job: given a user's goal, produce a precise, minimal execution plan as JSON. + +Available providers: +- chutes: LLM inference via Bittensor SN22 (DeepSeek R1, Qwen3-235B). Use for reasoning, writing, analysis. +- openrouter: LLM inference, 200+ models. Use when a specific frontier model is needed. +- desearch: Web/Twitter search, URL crawl via Bittensor SN22. Use for real-time information. +- e2b: Sandboxed code execution (Python, JS). Use for computation, data analysis. +- replicate: Image/audio/video generation. Use for media tasks. +- numinous: Probability forecasts via Bittensor SN6. Use for prediction tasks. +- vericore: Fact verification against live web. Use for claim checking. + +Rules: +1. Use minimum steps to achieve the goal. +2. Parallelise where possible (parallel: true on steps with no dependencies). +3. Mark required: false if step failure does not block the answer. +4. Estimate costs conservatively in USD. +5. Prefer Bittensor-native providers (chutes, desearch, numinous, vericore). +6. Never hallucinate providers or models. + +Output ONLY valid JSON, no markdown: +{ + "goal": "", + "reasoning": "<1-2 sentences>", + "estimated_total_cost_usd": 0.05, + "steps": [ + { "id": "step_1", "provider": "chutes", "model": "deepseek-ai/DeepSeek-V3-0324", "task": "", "input_from": [], "parallel": true, "required": true, "estimated_cost_usd": 0.03 } + ] +}`; + +export const SYNTHESIZER_SYSTEM_PROMPT = `You are the synthesis engine for Orchestra, an AI orchestration platform. +You receive the original goal and outputs from multiple specialized providers. +Produce a single, clear, well-structured final answer. +- Integrate all relevant information from step outputs. +- Be concise but complete. +- If steps failed, work around them gracefully. +- Cite sources where relevant. +- Do not mention Orchestra, steps, or internal mechanics.`; + +export function getPaymentHeaders(providerAddress: string, chainId: number) { + return { + 'X-DRAIN-Error': 'voucher_required', + 'X-Payment-Protocol': 'drain-v2', + 'X-Payment-Provider': providerAddress, + 'X-Payment-Contract': DRAIN_ADDRESSES[chainId], + 'X-Payment-Chain': String(chainId), + 'X-Payment-Docs': '/v1/docs', + }; +} diff --git a/providers/hs58-orchestra/src/drain.ts b/providers/hs58-orchestra/src/drain.ts new file mode 100644 index 0000000..4e0519a --- /dev/null +++ b/providers/hs58-orchestra/src/drain.ts @@ -0,0 +1,138 @@ +import { createPublicClient, createWalletClient, http, verifyTypedData, type Hash, type Hex, type Address } from 'viem'; +import { polygon, polygonAmoy } from 'viem/chains'; +import { privateKeyToAccount } from 'viem/accounts'; +import { DRAIN_ADDRESSES, DRAIN_CHANNEL_ABI, EIP712_DOMAIN, PERMANENT_CLAIM_ERRORS } from './constants.js'; +import type { ProviderConfig, VoucherHeader, StoredVoucher, ChannelState } from './types.js'; +import { VoucherStorage } from './storage.js'; + +export class DrainService { + private config: ProviderConfig; + private storage: VoucherStorage; + private publicClient: any; + private walletClient: any; + private account: any; + private contractAddress: Address; + private autoClaimInterval: ReturnType | null = null; + + constructor(config: ProviderConfig, storage: VoucherStorage) { + this.config = config; + this.storage = storage; + const chain = config.chainId === 137 ? polygon : polygonAmoy; + const rpcUrl = config.polygonRpcUrl; + this.publicClient = createPublicClient({ chain, transport: http(rpcUrl) }); + this.account = privateKeyToAccount(config.providerPrivateKey); + this.walletClient = createWalletClient({ account: this.account, chain, transport: http(rpcUrl) }); + if (rpcUrl) console.log(`[drain] RPC: ${rpcUrl.replace(/\/[^/]{8,}$/, '/***')}`); + else console.warn('[drain] No POLYGON_RPC_URL — using public RPC'); + this.contractAddress = DRAIN_ADDRESSES[config.chainId] as Address; + } + + getProviderAddress(): Address { return this.account.address; } + + parseVoucherHeader(header: string): VoucherHeader | null { + try { + const p = JSON.parse(header); + if (!p.channelId || !p.amount || !p.nonce || !p.signature) return null; + return { channelId: p.channelId, amount: p.amount, nonce: p.nonce, signature: p.signature }; + } catch { return null; } + } + + async validateVoucher(voucher: VoucherHeader, requiredAmount: bigint): Promise<{ valid: boolean; error?: string; channel?: ChannelState }> { + try { + const amount = BigInt(voucher.amount); + const nonce = BigInt(voucher.nonce); + const channelData = await this.publicClient.readContract({ address: this.contractAddress, abi: DRAIN_CHANNEL_ABI, functionName: 'getChannel', args: [voucher.channelId] }) as any; + if (channelData.consumer === '0x0000000000000000000000000000000000000000') return { valid: false, error: 'channel_not_found' }; + if (channelData.provider.toLowerCase() !== this.account.address.toLowerCase()) return { valid: false, error: 'wrong_provider' }; + let ch = this.storage.getChannel(voucher.channelId); + if (!ch) ch = { channelId: voucher.channelId, consumer: channelData.consumer, deposit: channelData.deposit, totalCharged: 0n, expiry: Number(channelData.expiry), createdAt: Date.now(), lastActivityAt: Date.now() }; + else if (!ch.expiry) ch.expiry = Number(channelData.expiry); + if (amount < ch.totalCharged + requiredAmount) return { valid: false, error: 'insufficient_funds', channel: ch }; + if (amount > channelData.deposit) return { valid: false, error: 'exceeds_deposit', channel: ch }; + if (ch.lastVoucher && nonce <= ch.lastVoucher.nonce) return { valid: false, error: 'invalid_nonce', channel: ch }; + const isValid = await verifyTypedData({ + address: channelData.consumer, + domain: { name: EIP712_DOMAIN.name, version: EIP712_DOMAIN.version, chainId: this.config.chainId, verifyingContract: this.contractAddress }, + types: { Voucher: [{ name: 'channelId', type: 'bytes32' }, { name: 'amount', type: 'uint256' }, { name: 'nonce', type: 'uint256' }] }, + primaryType: 'Voucher', + message: { channelId: voucher.channelId, amount, nonce }, + signature: voucher.signature, + }); + if (!isValid) return { valid: false, error: 'invalid_signature' }; + return { valid: true, channel: ch }; + } catch (e: any) { return { valid: false, error: e?.message ?? 'validation_error' }; } + } + + storeVoucher(voucher: VoucherHeader, channelState: ChannelState, cost: bigint): void { + const stored: StoredVoucher = { channelId: voucher.channelId, amount: BigInt(voucher.amount), nonce: BigInt(voucher.nonce), signature: voucher.signature, consumer: channelState.consumer, receivedAt: Date.now(), claimed: false }; + channelState.totalCharged += cost; + channelState.lastVoucher = stored; + channelState.lastActivityAt = Date.now(); + this.storage.storeVoucher(stored); + this.storage.updateChannel(voucher.channelId, channelState); + } + + async getChannelBalance(channelId: Hash): Promise { + return await this.publicClient.readContract({ address: this.contractAddress, abi: DRAIN_CHANNEL_ABI, functionName: 'getBalance', args: [channelId] }) as bigint; + } + + async claimPayments(forceAll = false): Promise { + const txHashes: Hash[] = []; + for (const [channelId, voucher] of this.storage.getHighestVoucherPerChannel()) { + if (!forceAll && voucher.amount < this.config.claimThreshold) continue; + try { const b = await this.getChannelBalance(voucher.channelId); if (b === 0n) { this.storage.markClaimed(channelId, '0x0' as Hash); continue; } } catch {} + try { + const hash = await this.walletClient.writeContract({ address: this.contractAddress, abi: DRAIN_CHANNEL_ABI, functionName: 'claim', args: [voucher.channelId, voucher.amount, voucher.nonce, voucher.signature] }); + this.storage.markClaimed(channelId, hash); + txHashes.push(hash); + console.log(`[drain] Claimed ${voucher.amount} from ${channelId}: ${hash}`); + } catch (e: any) { this.handleClaimError('claim', channelId, e); } + } + return txHashes; + } + + async claimExpiring(bufferSeconds = 3600): Promise { + const txHashes: Hash[] = []; + const now = Math.floor(Date.now() / 1000); + for (const [channelId, voucher] of this.storage.getHighestVoucherPerChannel()) { + const ch = this.storage.getChannel(channelId); + if (!ch?.expiry || ch.expiry - now > bufferSeconds || voucher.amount <= 0n) continue; + try { const b = await this.getChannelBalance(voucher.channelId); if (b === 0n) { this.storage.markClaimed(channelId, '0x0' as Hash); continue; } } catch {} + try { + const hash = await this.walletClient.writeContract({ address: this.contractAddress, abi: DRAIN_CHANNEL_ABI, functionName: 'claim', args: [voucher.channelId, voucher.amount, voucher.nonce, voucher.signature] }); + this.storage.markClaimed(channelId, hash); + txHashes.push(hash); + } catch (e: any) { this.handleClaimError('auto-claim', channelId, e); } + } + return txHashes; + } + + startAutoClaim(intervalMinutes = 10, bufferSeconds = 3600): void { + if (this.autoClaimInterval) return; + console.log(`[auto-claim] Every ${intervalMinutes}min, buffer ${bufferSeconds / 60}min`); + this.autoClaimInterval = setInterval(async () => { try { await this.claimExpiring(bufferSeconds); } catch (e) { console.error('[auto-claim]', e); } }, intervalMinutes * 60_000); + this.claimExpiring(bufferSeconds).catch(console.error); + } + + async signCloseAuthorization(channelId: Hash): Promise<{ finalAmount: bigint; signature: Hex }> { + const highest = this.storage.getHighestVoucherPerChannel().get(channelId); + const finalAmount = highest ? highest.amount : 0n; + const signature = await this.walletClient.signTypedData({ + domain: { name: EIP712_DOMAIN.name, version: EIP712_DOMAIN.version, chainId: this.config.chainId, verifyingContract: this.contractAddress }, + types: { CloseAuthorization: [{ name: 'channelId', type: 'bytes32' }, { name: 'finalAmount', type: 'uint256' }] }, + primaryType: 'CloseAuthorization', + message: { channelId, finalAmount }, + }); + return { finalAmount, signature }; + } + + private handleClaimError(ctx: string, channelId: string, error: any): void { + const errorName = error?.cause?.data?.errorName || error?.cause?.reason; + if (errorName && PERMANENT_CLAIM_ERRORS.includes(errorName as any)) { + console.error(`[${ctx}] ${channelId}: ${errorName} (permanent)`); + this.storage.markClaimed(channelId as Hash, '0x0' as Hash); + } else { + console.error(`[${ctx}] ${channelId}: ${error?.shortMessage || error?.message} (will retry)`); + } + } +} diff --git a/providers/hs58-orchestra/src/executor.ts b/providers/hs58-orchestra/src/executor.ts new file mode 100644 index 0000000..d596a00 --- /dev/null +++ b/providers/hs58-orchestra/src/executor.ts @@ -0,0 +1,123 @@ +import type { PlanStep, StepResult, ProviderConfig } from './types.js'; +import { UPSTREAM } from './constants.js'; + +async function fetchJSON(url: string, init: RequestInit, timeoutMs = 30_000): Promise { + const controller = new AbortController(); + const timer = setTimeout(() => controller.abort(), timeoutMs); + try { + const res = await fetch(url, { ...init, signal: controller.signal }); + if (!res.ok) { const text = await res.text().catch(() => ''); throw new Error(`HTTP ${res.status}: ${text.slice(0, 200)}`); } + return res.json(); + } finally { clearTimeout(timer); } +} + +async function llmChat(baseURL: string, apiKey: string, model: string, messages: { role: string; content: string }[], timeoutMs = 60_000): Promise { + const data = await fetchJSON(`${baseURL}/chat/completions`, { + method: 'POST', + headers: { Authorization: `Bearer ${apiKey}`, 'Content-Type': 'application/json' }, + body: JSON.stringify({ model, messages, max_tokens: 2048, temperature: 0.3 }), + }, timeoutMs); + return data.choices?.[0]?.message?.content ?? ''; +} + +async function runChutes(step: PlanStep, config: ProviderConfig, context: string): Promise { + return llmChat(UPSTREAM.chutes.base, config.chutesApiKey, step.model, + [{ role: 'user', content: context ? `Context:\n${context}\n\nTask: ${step.task}` : step.task }], 90_000); +} + +async function runOpenRouter(step: PlanStep, config: ProviderConfig, context: string): Promise { + return llmChat(UPSTREAM.openrouter.base, config.openrouterApiKey, step.model, + [{ role: 'user', content: context ? `Context:\n${context}\n\nTask: ${step.task}` : step.task }], 90_000); +} + +async function runDesearch(step: PlanStep, config: ProviderConfig, _context: string): Promise { + const endpoint = step.model.replace('desearch/', ''); + const query = step.task; + let url: string; let body: any; let method: 'GET' | 'POST' = 'POST'; + switch (endpoint) { + case 'ai-search': + url = `${UPSTREAM.desearch.base}/desearch/ai/search`; + body = { prompt: query, tools: ['web', 'hackernews', 'reddit'], count: 10, streaming: false, result_type: 'LINKS_WITH_FINAL_SUMMARY' }; + break; + case 'web': + url = `${UPSTREAM.desearch.base}/web?query=${encodeURIComponent(query)}`; method = 'GET'; body = undefined; break; + case 'twitter': + url = `${UPSTREAM.desearch.base}/desearch/ai/search/links/twitter`; + body = { prompt: query, count: 10 }; break; + case 'crawl': + url = `${UPSTREAM.desearch.base}/web/crawl?url=${encodeURIComponent(query)}`; method = 'GET'; body = undefined; break; + default: + url = `${UPSTREAM.desearch.base}/desearch/ai/search`; + body = { prompt: query, tools: ['web'], count: 10, streaming: false, result_type: 'LINKS_WITH_FINAL_SUMMARY' }; + } + const data = await fetchJSON(url, { method, headers: { Authorization: config.desearchApiKey, 'Content-Type': 'application/json' }, body: body ? JSON.stringify(body) : undefined }, 30_000); + if (data.text) return data.text; + if (data.results) return JSON.stringify(data.results).slice(0, 3000); + if (data.organic_results) return data.organic_results.slice(0, 5).map((r: any) => `${r.title}: ${r.snippet}`).join('\n'); + return JSON.stringify(data).slice(0, 3000); +} + +async function runE2B(step: PlanStep, config: ProviderConfig, context: string): Promise { + if (!config.e2bApiKey) throw new Error('E2B API key not configured'); + const code = context ? `# Context:\n# ${context.split('\n').join('\n# ')}\n\n${step.task}` : step.task; + const sandbox = await fetchJSON(`${UPSTREAM.e2b.base}/sandboxes`, { method: 'POST', headers: { 'X-API-Key': config.e2bApiKey, 'Content-Type': 'application/json' }, body: JSON.stringify({ template: 'base', timeout: 30 }) }, 15_000); + try { + const exec = await fetchJSON(`${UPSTREAM.e2b.base}/sandboxes/${sandbox.sandboxId}/process/start`, { method: 'POST', headers: { 'X-API-Key': config.e2bApiKey, 'Content-Type': 'application/json' }, body: JSON.stringify({ cmd: 'python3', args: ['-c', code] }) }, 35_000); + return `stdout:\n${exec.stdout || ''}\nstderr:\n${exec.stderr || ''}`.slice(0, 3000); + } finally { + await fetch(`${UPSTREAM.e2b.base}/sandboxes/${sandbox.sandboxId}`, { method: 'DELETE', headers: { 'X-API-Key': config.e2bApiKey } }).catch(() => {}); + } +} + +async function runNuminous(step: PlanStep, config: ProviderConfig, _context: string): Promise { + const job = await fetchJSON(UPSTREAM.numinous.forecast, { method: 'POST', headers: { Authorization: config.desearchApiKey, 'Content-Type': 'application/json' }, body: JSON.stringify({ question: step.task }) }, 15_000); + const predictionId = job.prediction_id; + if (!predictionId) return JSON.stringify(job); + for (let i = 0; i < 6; i++) { + await new Promise(r => setTimeout(r, 5000)); + const result = await fetchJSON(`${UPSTREAM.numinous.forecast}/${predictionId}`, { headers: { Authorization: config.desearchApiKey } }, 10_000); + if (result.status === 'completed' || result.probability !== undefined) return `Probability: ${result.probability ?? 'N/A'}\nReasoning: ${result.reasoning ?? ''}\nSources: ${JSON.stringify(result.sources ?? []).slice(0, 500)}`; + } + return `Forecast pending (id: ${predictionId})`; +} + +async function runReplicate(step: PlanStep, config: ProviderConfig, _context: string): Promise { + if (!config.replicateApiToken) throw new Error('Replicate API token not configured'); + const prediction = await fetchJSON(`${UPSTREAM.replicate.base}/predictions`, { method: 'POST', headers: { Authorization: `Bearer ${config.replicateApiToken}`, 'Content-Type': 'application/json' }, body: JSON.stringify({ version: step.model, input: { prompt: step.task } }) }, 15_000); + let result = prediction; + for (let i = 0; i < 12; i++) { + if (result.status === 'succeeded') break; + if (result.status === 'failed') throw new Error(result.error ?? 'Replicate failed'); + await new Promise(r => setTimeout(r, 5000)); + result = await fetchJSON(result.urls.get, { headers: { Authorization: `Bearer ${config.replicateApiToken}` } }, 10_000); + } + return Array.isArray(result.output) ? result.output.join('\n') : String(result.output ?? 'No output'); +} + +async function runVericore(step: PlanStep, config: ProviderConfig, _context: string): Promise { + const data = await fetchJSON(`${UPSTREAM.vericore.base}/v1/verify`, { method: 'POST', headers: { Authorization: `Bearer ${config.desearchApiKey}`, 'Content-Type': 'application/json' }, body: JSON.stringify({ claim: step.task }) }, 30_000); + return `Verdict: ${data.verdict ?? 'unknown'}\nScore: ${data.score ?? 'N/A'}\nEvidence: ${JSON.stringify(data.evidence ?? []).slice(0, 1000)}`; +} + +export async function executeStep(step: PlanStep, config: ProviderConfig, priorOutputs: Map): Promise { + const start = Date.now(); + const contextParts: string[] = []; + for (const depId of (step.input_from ?? [])) { const out = priorOutputs.get(depId); if (out) contextParts.push(`[${depId}]: ${out}`); } + const context = contextParts.join('\n\n'); + try { + let output: string; + switch (step.provider) { + case 'chutes': output = await runChutes(step, config, context); break; + case 'openrouter': output = await runOpenRouter(step, config, context); break; + case 'desearch': output = await runDesearch(step, config, context); break; + case 'e2b': output = await runE2B(step, config, context); break; + case 'numinous': output = await runNuminous(step, config, context); break; + case 'replicate': output = await runReplicate(step, config, context); break; + case 'vericore': output = await runVericore(step, config, context); break; + default: throw new Error(`Unknown provider: ${(step as any).provider}`); + } + return { step_id: step.id, provider: step.provider, model: step.model, status: 'done', output: output.slice(0, 8000), cost_usd: step.estimated_cost_usd, duration_ms: Date.now() - start }; + } catch (err: any) { + return { step_id: step.id, provider: step.provider, model: step.model, status: 'failed', error: err?.message ?? String(err), cost_usd: 0, duration_ms: Date.now() - start }; + } +} diff --git a/providers/hs58-orchestra/src/index.ts b/providers/hs58-orchestra/src/index.ts new file mode 100644 index 0000000..9261c51 --- /dev/null +++ b/providers/hs58-orchestra/src/index.ts @@ -0,0 +1,175 @@ +import express from 'express'; +import cors from 'cors'; +import { config as dotenv } from 'dotenv'; +import { formatUnits } from 'viem'; +import type { Hex } from 'viem'; +import { DrainService } from './drain.js'; +import { VoucherStorage } from './storage.js'; +import { orchestrate } from './planner.js'; +import { ORCHESTRA_BASE_FEE_USDC, ORCHESTRA_PLAN_FEE_USDC, PROVIDER_COST_ESTIMATES, DRAIN_ADDRESSES } from './constants.js'; +import type { ProviderConfig, OrchestraRequest, OrchestraStreamEvent } from './types.js'; + +dotenv(); + +function requireEnv(k: string): string { const v = process.env[k]; if (!v) { console.error(`[config] Missing: ${k}`); return `MISSING_${k}`; } return v; } +function optEnv(k: string, d: string): string { return process.env[k] ?? d; } + +const chainId = parseInt(optEnv('CHAIN_ID', '137')) as 137 | 80002; + +const config: ProviderConfig = { + port: parseInt(optEnv('PORT', '3000')), host: optEnv('HOST', '0.0.0.0'), chainId, + providerPrivateKey: requireEnv('PROVIDER_PRIVATE_KEY') as Hex, + polygonRpcUrl: process.env.POLYGON_RPC_URL, + claimThreshold: BigInt(optEnv('CLAIM_THRESHOLD', '10000000')), + storagePath: optEnv('STORAGE_PATH', '/app/data/vouchers.json'), + providerName: optEnv('PROVIDER_NAME', 'HS58-Orchestra'), + autoClaimIntervalMinutes: parseInt(optEnv('AUTO_CLAIM_INTERVAL_MINUTES', '10')), + autoClaimBufferSeconds: parseInt(optEnv('AUTO_CLAIM_BUFFER_SECONDS', '3600')), + openrouterApiKey: requireEnv('OPENROUTER_API_KEY'), + desearchApiKey: requireEnv('DESEARCH_API_KEY'), + chutesApiKey: requireEnv('CHUTES_API_KEY'), + e2bApiKey: process.env.E2B_API_KEY, + replicateApiToken: process.env.REPLICATE_API_TOKEN, + markupMultiplier: 1 + parseInt(optEnv('MARKUP_PERCENT', '30')) / 100, + maxPlanSteps: parseInt(optEnv('MAX_PLAN_STEPS', '6')), + plannerModel: optEnv('PLANNER_MODEL', 'deepseek-ai/DeepSeek-R1'), + synthesizerModel: optEnv('SYNTHESIZER_MODEL', 'deepseek-ai/DeepSeek-V3-0324'), +}; + +const storage = new VoucherStorage(config.storagePath); +const drainService = new DrainService(config, storage); +const app = express(); +app.use(cors()); +app.use(express.json({ limit: '512kb' })); + +function paymentHeaders() { + return { 'X-Payment-Protocol': 'drain-v2', 'X-Payment-Provider': drainService.getProviderAddress(), 'X-Payment-Contract': DRAIN_ADDRESSES[chainId], 'X-Payment-Chain': String(chainId), 'X-Payment-Docs': '/v1/docs' }; +} + +async function requirePayment(req: express.Request, res: express.Response, minCost: bigint): Promise<{ voucher: any; channel: any } | null> { + const header = req.headers['x-drain-voucher'] as string | undefined; + if (!header) { res.status(402).set({ ...paymentHeaders(), 'X-DRAIN-Error': 'voucher_required' }).json({ error: { message: 'X-DRAIN-Voucher header required', code: 'voucher_required' } }); return null; } + const voucher = drainService.parseVoucherHeader(header); + if (!voucher) { res.status(402).json({ error: { message: 'Invalid voucher format', code: 'invalid_voucher' } }); return null; } + const validation = await drainService.validateVoucher(voucher, minCost); + if (!validation.valid) { res.status(402).set({ 'X-DRAIN-Error': validation.error!, 'X-DRAIN-Required': minCost.toString() }).json({ error: { message: `Payment error: ${validation.error}`, code: validation.error } }); return null; } + return { voucher, channel: validation.channel }; +} + +app.get('/health', (_req, res) => { + res.json({ status: 'ok', provider: drainService.getProviderAddress(), providerName: config.providerName, chainId: config.chainId, models:3, modes: ['auto', 'plan', 'pipeline'], bittensor_native: ['chutes', 'desearch', 'numinous', 'vericore'] }); +}); + +app.get('/v1/models', (_req, res) => { + res.json({ object: 'list', data: [ + { id: 'orchestra/auto', object: 'model', created: 1742000000, owned_by: 'hs58-orchestra', name: 'Orchestra: Auto', context_length: 128000 }, +{ id: 'orchestra/plan', object: 'model', created: 1742000000, owned_by: 'hs58-orchestra', name: 'Orchestra: Plan', context_length: 128000 }, +{ id: 'orchestra/pipeline', object: 'model', created: 1742000000, owned_by: 'hs58-orchestra', name: 'Orchestra: Pipeline', context_length: 128000 }, + ]}); +}); + +app.get('/v1/pricing', (_req, res) => { + const markup = Math.round((config.markupMultiplier - 1) * 100); + res.json({ + provider: drainService.getProviderAddress(), providerName: config.providerName, chainId: config.chainId, currency: 'USDC', decimals:6, markup: `${markup}%`, + fees: { base_coordination: `$${formatUnits(ORCHESTRA_BASE_FEE_USDC, 6)} per orchestration`, plan_only: `$${formatUnits(ORCHESTRA_PLAN_FEE_USDC, 6)} per plan` }, + provider_cost_estimates: Object.fromEntries(Object.entries(PROVIDER_COST_ESTIMATES).map(([k, v]) => [k, `$${formatUnits(v as bigint, 6)}`])), + }); +}); + +app.get('/v1/docs', (_req, res) => { + const markup = Math.round((config.markupMultiplier - 1) * 100); + res.type('text/plain').send(`# HS58-Orchestra\n\nThe only AI orchestration layer on Handshake58.\nSend one goal. Orchestra plans it, runs it across providers in parallel, returns one synthesized answer.\n\n## Request schema\n{ "mode": "auto|plan|pipeline", "goal": "", "context": "", "budget_usd": 0.10, "providers": [], "stream": false, "steps": [] }\n\n## Pricing\nBase fee: $${formatUnits(ORCHESTRA_BASE_FEE_USDC, 6)} | Plan fee: $${formatUnits(ORCHESTRA_PLAN_FEE_USDC, 6)} | Markup: ${markup}%\n`); +}); + +app.post('/v1/chat/completions', async (req, res) => { + const model = (req.body.model as string) ?? 'orchestra/auto'; + const messages = (req.body.messages as Array<{ role: string; content: string }>) ?? []; + const isStream = req.body.stream === true; + + if (!['orchestra/auto', 'orchestra/plan', 'orchestra/pipeline'].includes(model)) + return res.status(400).json({ error: { message: `Unknown model: ${model}` } }); + + const lastUser = messages.filter(m => m.role === 'user').pop(); + if (!lastUser?.content?.trim()) return res.status(400).json({ error: { message: 'Send OrchestraRequest JSON as user message.' } }); + + let orchRequest: OrchestraRequest; + try { orchRequest = JSON.parse(lastUser.content); } + catch { return res.status(400).json({ error: { message: 'User message must be valid JSON. See GET /v1/docs.' } }); } + + if (model === 'orchestra/plan') orchRequest.mode = 'plan'; + if (model === 'orchestra/pipeline') orchRequest.mode = 'pipeline'; + if (model === 'orchestra/auto') orchRequest.mode = 'auto'; + if (!orchRequest.goal) return res.status(400).json({ error: { message: '"goal" is required.' } }); + + const maxPerStep = Math.max(...Object.values(PROVIDER_COST_ESTIMATES).map(Number)); + const preAuthCost = orchRequest.mode === 'plan' + ? ORCHESTRA_PLAN_FEE_USDC + : ORCHESTRA_BASE_FEE_USDC + BigInt(config.maxPlanSteps) * BigInt(maxPerStep); + + const payment = await requirePayment(req, res, preAuthCost); + if (!payment) return; + const { voucher, channel } = payment; + + if (isStream) { + res.setHeader('Content-Type', 'text/event-stream'); + res.setHeader('Cache-Control', 'no-cache'); + res.setHeader('Connection', 'keep-alive'); + res.setHeader('X-DRAIN-Channel', voucher.channelId); + const sendEvent = (event: OrchestraStreamEvent) => res.write(`data: ${JSON.stringify(event)}\n\n`); + try { + const result = await orchestrate(orchRequest, config, sendEvent); + const totalCostWei = ORCHESTRA_BASE_FEE_USDC + BigInt(Math.ceil(result.total_cost_usd * 1_000_000 * config.markupMultiplier)); + drainService.storeVoucher(voucher, channel, totalCostWei); + res.write(`data: [DONE]\n\n`); res.end(); + } catch (err: any) { sendEvent({ event: 'error', data: { message: err?.message }, timestamp: Date.now() }); res.end(); } + return; + } + + try { + const result = await orchestrate(orchRequest, config); + const totalCostWei = ORCHESTRA_BASE_FEE_USDC + BigInt(Math.ceil(result.total_cost_usd * 1_000_000 * config.markupMultiplier)); + drainService.storeVoucher(voucher, channel, totalCostWei); + const remaining = BigInt(channel.deposit) - BigInt(channel.totalCharged) - totalCostWei; + res.set({ 'X-DRAIN-Cost': totalCostWei.toString(), 'X-DRAIN-Total': (channel.totalCharged + totalCostWei).toString(), 'X-DRAIN-Remaining': remaining.toString(), 'X-DRAIN-Channel': voucher.channelId }); + res.json({ + id: `orchestra-${Date.now()}`, object: 'chat.completion', created: Math.floor(Date.now() / 1000), model, + choices: [{ index: 0, message: { role: 'assistant', content: JSON.stringify(result, null, 2) }, finish_reason: 'stop' }], + usage: { prompt_tokens: 0, completion_tokens: Math.ceil(JSON.stringify(result).length / 4), total_tokens: Math.ceil(JSON.stringify(result).length / 4) }, + orchestra: { synthesis: result.synthesis, providers_used: result.providers_used, steps_completed: result.steps.filter(s => s.status === 'done').length, steps_failed: result.steps.filter(s => s.status === 'failed').length, total_duration_ms: result.total_duration_ms, total_cost_usd: result.total_cost_usd }, + }); + } catch (err: any) { + console.error('[orchestra] Error:', err?.message); + res.status(500).json({ error: { message: err?.message ?? 'Orchestration error', code: 'orchestra_error' } }); + } +}); + +app.post('/v1/admin/claim', async (req, res) => { + try { const tx = await drainService.claimPayments(req.query.force === 'true'); res.json({ claimed: tx.length, transactions: tx }); } + catch (err: any) { res.status(500).json({ error: err.message }); } +}); + +app.get('/v1/admin/stats', (_req, res) => { + const stats = storage.getStats(); + res.json({ provider: drainService.getProviderAddress(), providerName: config.providerName, ...stats, totalEarned: formatUnits(stats.totalEarned, 6) + ' USDC' }); +}); + +app.post('/v1/close-channel', async (req, res) => { + try { + const { channelId } = req.body; + if (!channelId) return res.status(400).json({ error: 'channelId required' }); + const result = await drainService.signCloseAuthorization(channelId); + res.json({ channelId, finalAmount: result.finalAmount.toString(), signature: result.signature }); + } catch (err: any) { res.status(500).json({ error: err?.message ?? 'internal_error' }); } +}); + +async function main() { + drainService.startAutoClaim(config.autoClaimIntervalMinutes, config.autoClaimBufferSeconds); + app.listen(config.port, config.host, () => { + console.log(`[orchestra] Listening on ${config.host}:${config.port}`); + console.log(`[orchestra] Provider: ${drainService.getProviderAddress()}`); + console.log(`[orchestra] Chain: ${chainId === 137 ? 'Polygon Mainnet' : 'Polygon Amoy'}`); + }); +} + +main().catch(err => { console.error('[orchestra] Fatal:', err); process.exit(1); }); diff --git a/providers/hs58-orchestra/src/planner.ts b/providers/hs58-orchestra/src/planner.ts new file mode 100644 index 0000000..83cb308 --- /dev/null +++ b/providers/hs58-orchestra/src/planner.ts @@ -0,0 +1,108 @@ +import OpenAI from 'openai'; +import type { ExecutionPlan, OrchestrationResult, OrchestraRequest, OrchestraStreamEvent, PlanStep, StepResult, ProviderConfig, ProviderName } from './types.js'; +import { PLANNER_SYSTEM_PROMPT, SYNTHESIZER_SYSTEM_PROMPT, UPSTREAM } from './constants.js'; +import { executeStep } from './executor.js'; + +export async function buildPlan(request: OrchestraRequest, config: ProviderConfig): Promise { + const client = new OpenAI({ apiKey: config.chutesApiKey, baseURL: UPSTREAM.chutes.base }); + const userMessage = [ + `Goal: ${request.goal}`, + request.context ? `Context: ${request.context}` : '', + request.budget_usd ? `Budget cap: $${request.budget_usd} USD` : '', + request.providers?.length ? `Allowed providers: ${request.providers.join(', ')}` : '', + ].filter(Boolean).join('\n'); + + const completion = await client.chat.completions.create({ + model: config.plannerModel, + messages: [{ role: 'system', content: PLANNER_SYSTEM_PROMPT }, { role: 'user', content: userMessage }], + max_tokens: 1024, temperature: 0.1, + }); + + const raw = completion.choices[0]?.message?.content ?? '{}'; + const cleaned = raw.replace(/^```(?:json)?\n?/m, '').replace(/\n?```$/m, '').trim(); + let plan: ExecutionPlan; + try { plan = JSON.parse(cleaned); } + catch { + plan = { goal: request.goal, reasoning: 'Fallback single-step plan', estimated_total_cost_usd: 0.03, + steps: [{ id: 'step_1', provider: 'chutes', model: config.plannerModel, task: request.goal, input_from: [], parallel: true, required: true, estimated_cost_usd: 0.03 }] }; + } + + if (request.budget_usd) { + let cumulative = 0; + plan.steps = plan.steps.filter(s => { cumulative += s.estimated_cost_usd; return cumulative <= request.budget_usd!; }); + plan.estimated_total_cost_usd = cumulative; + } + if (request.providers?.length) plan.steps = plan.steps.filter(s => request.providers!.includes(s.provider)); + plan.steps = plan.steps.slice(0, config.maxPlanSteps); + return plan; +} + +function buildWaves(steps: PlanStep[]): PlanStep[][] { + const completed = new Set(); + const remaining = [...steps]; + const waves: PlanStep[][] = []; + while (remaining.length > 0) { + const wave: PlanStep[] = []; const still: PlanStep[] = []; + for (const step of remaining) { + if ((step.input_from ?? []).every(d => completed.has(d))) wave.push(step); + else still.push(step); + } + if (wave.length === 0) { waves.push(remaining); break; } + waves.push(wave); + wave.forEach(s => completed.add(s.id)); + remaining.splice(0, remaining.length, ...still); + } + return waves; +} + +async function synthesize(goal: string, results: StepResult[], config: ProviderConfig): Promise { + const client = new OpenAI({ apiKey: config.chutesApiKey, baseURL: UPSTREAM.chutes.base }); + const stepSummaries = results.map(r => + r.status === 'done' + ? `[${r.step_id} — ${r.provider}/${r.model}]\n${r.output}` + : `[${r.step_id} — ${r.provider}/${r.model}] FAILED: ${r.error}` + ).join('\n\n---\n\n'); + const completion = await client.chat.completions.create({ + model: config.synthesizerModel, + messages: [{ role: 'system', content: SYNTHESIZER_SYSTEM_PROMPT }, { role: 'user', content: `Goal: ${goal}\n\nStep outputs:\n\n${stepSummaries}` }], + max_tokens: 2048, temperature: 0.4, + }); + return completion.choices[0]?.message?.content ?? 'Unable to synthesize result.'; +} + +export async function orchestrate(request: OrchestraRequest, config: ProviderConfig, onEvent?: (event: OrchestraStreamEvent) => void): Promise { + const globalStart = Date.now(); + const emit = (event: OrchestraStreamEvent['event'], data: any) => onEvent?.({ event, data, timestamp: Date.now() }); + + let plan: ExecutionPlan; + if (request.mode === 'pipeline' && request.steps?.length) { + plan = { goal: request.goal, steps: request.steps, estimated_total_cost_usd: request.steps.reduce((s, x) => s + x.estimated_cost_usd, 0), reasoning: 'User-provided pipeline' }; + } else { + plan = await buildPlan(request, config); + } + emit('plan', plan); + + if (request.mode === 'plan') { + return { goal: plan.goal, plan, steps: [], synthesis: '(plan-only mode — set mode: "auto" to execute)', total_cost_usd: 0, total_duration_ms: Date.now() - globalStart, providers_used: [] }; + } + + const allResults: StepResult[] = []; + const priorOutputs = new Map(); + for (const wave of buildWaves(plan.steps)) { + emit('step_start', { steps: wave.map(s => s.id) }); + const waveResults = await Promise.all(wave.map(step => executeStep(step, config, priorOutputs))); + for (const result of waveResults) { + allResults.push(result); + if (result.output) priorOutputs.set(result.step_id, result.output); + emit(result.status === 'done' ? 'step_done' : 'step_fail', result); + } + } + + emit('synthesis', { status: 'synthesizing', steps_completed: allResults.filter(r => r.status === 'done').length }); + const synthesis = await synthesize(plan.goal, allResults, config); + const totalCost = allResults.reduce((s, r) => s + r.cost_usd, 0); + const providersUsed = [...new Set(allResults.map(r => r.provider))] as ProviderName[]; + const result: OrchestrationResult = { goal: plan.goal, plan, steps: allResults, synthesis, total_cost_usd: totalCost, total_duration_ms: Date.now() - globalStart, providers_used: providersUsed }; + emit('done', result); + return result; +} diff --git a/providers/hs58-orchestra/src/storage.ts b/providers/hs58-orchestra/src/storage.ts new file mode 100644 index 0000000..26dcf33 --- /dev/null +++ b/providers/hs58-orchestra/src/storage.ts @@ -0,0 +1,82 @@ +import { readFileSync, writeFileSync, mkdirSync, existsSync } from 'fs'; +import { dirname } from 'path'; +import type { Hash } from 'viem'; +import type { StoredVoucher, ChannelState } from './types.js'; + +export class VoucherStorage { + private vouchers: Map = new Map(); + private channels: Map = new Map(); + private path: string; + private dirty = false; + + constructor(storagePath: string) { + this.path = storagePath; + this.load(); + setInterval(() => this.flush(), 30_000); + } + + private load(): void { + try { + if (!existsSync(this.path)) return; + const raw = JSON.parse(readFileSync(this.path, 'utf8')); + if (raw.vouchers) { + for (const [k, v] of Object.entries(raw.vouchers as any)) { + const vv = v as any; + this.vouchers.set(k, { ...vv, amount: BigInt(vv.amount), nonce: BigInt(vv.nonce) }); + } + } + if (raw.channels) { + for (const [k, c] of Object.entries(raw.channels as any)) { + const cc = c as any; + const channel: ChannelState = { ...cc, deposit: BigInt(cc.deposit), totalCharged: BigInt(cc.totalCharged) }; + if (cc.lastVoucher) channel.lastVoucher = { ...cc.lastVoucher, amount: BigInt(cc.lastVoucher.amount), nonce: BigInt(cc.lastVoucher.nonce) }; + this.channels.set(k, channel); + } + } + } catch {} + } + + private flush(): void { + if (!this.dirty) return; + try { + mkdirSync(dirname(this.path), { recursive: true }); + const voucherObj: any = {}; + for (const [k, v] of this.vouchers) voucherObj[k] = { ...v, amount: v.amount.toString(), nonce: v.nonce.toString() }; + const channelObj: any = {}; + for (const [k, c] of this.channels) { + channelObj[k] = { ...c, deposit: c.deposit.toString(), totalCharged: c.totalCharged.toString(), + lastVoucher: c.lastVoucher ? { ...c.lastVoucher, amount: c.lastVoucher.amount.toString(), nonce: c.lastVoucher.nonce.toString() } : undefined }; + } + writeFileSync(this.path, JSON.stringify({ vouchers: voucherObj, channels: channelObj }, null, 2)); + this.dirty = false; + } catch (e) { console.error('[storage] flush failed:', e); } + } + + storeVoucher(v: StoredVoucher): void { this.vouchers.set(`${v.channelId}:${v.nonce}`, v); this.dirty = true; } + getChannel(channelId: string): ChannelState | null { return this.channels.get(channelId) ?? null; } + updateChannel(channelId: string, state: ChannelState): void { this.channels.set(channelId, state); this.dirty = true; } + + markClaimed(channelId: string, txHash: Hash): void { + for (const [k, v] of this.vouchers) { + if (v.channelId === channelId && !v.claimed) { v.claimed = true; v.claimedAt = Date.now(); v.claimTxHash = txHash; this.vouchers.set(k, v); } + } + this.dirty = true; + } + + getUnclaimedVouchers(): StoredVoucher[] { return [...this.vouchers.values()].filter(v => !v.claimed); } + + getHighestVoucherPerChannel(): Map { + const highest = new Map(); + for (const v of this.vouchers.values()) { + if (v.claimed) continue; + const existing = highest.get(v.channelId); + if (!existing || v.amount > existing.amount) highest.set(v.channelId, v); + } + return highest; + } + + getStats() { + const unclaimed = this.getUnclaimedVouchers(); + return { totalChannels: this.channels.size, totalVouchers: this.vouchers.size, unclaimedCount: unclaimed.length, totalEarned: unclaimed.reduce((s, v) => s + v.amount, 0n) }; + } +} diff --git a/providers/hs58-orchestra/src/types.ts b/providers/hs58-orchestra/src/types.ts new file mode 100644 index 0000000..54af20b --- /dev/null +++ b/providers/hs58-orchestra/src/types.ts @@ -0,0 +1,60 @@ +import type { Hash, Hex } from 'viem'; + +export interface ModelPricing { inputPer1k: bigint; outputPer1k: bigint; } + +export interface ProviderConfig { + port: number; host: string; chainId: 137 | 80002; + providerPrivateKey: Hex; polygonRpcUrl?: string; + claimThreshold: bigint; storagePath: string; providerName: string; + autoClaimIntervalMinutes: number; autoClaimBufferSeconds: number; + openrouterApiKey: string; desearchApiKey: string; chutesApiKey: string; + e2bApiKey?: string; replicateApiToken?: string; + markupMultiplier: number; maxPlanSteps: number; plannerModel: string; synthesizerModel: string; +} + +export interface VoucherHeader { channelId: Hash; amount: string; nonce: string; signature: Hex; } + +export interface StoredVoucher { + channelId: Hash; amount: bigint; nonce: bigint; signature: Hex; + consumer: string; receivedAt: number; claimed: boolean; + claimedAt?: number; claimTxHash?: Hash; +} + +export interface ChannelState { + channelId: Hash; consumer: string; deposit: bigint; totalCharged: bigint; + expiry: number; lastVoucher?: StoredVoucher; createdAt: number; lastActivityAt: number; +} + +export type ProviderName = 'chutes' | 'openrouter' | 'desearch' | 'e2b' | 'replicate' | 'numinous' | 'vericore'; +export type StepStatus = 'pending' | 'running' | 'done' | 'failed' | 'skipped'; + +export interface PlanStep { + id: string; provider: ProviderName; model: string; task: string; + input_from?: string[]; parallel?: boolean; required?: boolean; estimated_cost_usd: number; +} + +export interface ExecutionPlan { + goal: string; steps: PlanStep[]; estimated_total_cost_usd: number; reasoning: string; +} + +export interface StepResult { + step_id: string; provider: ProviderName; model: string; status: StepStatus; + output?: string; error?: string; cost_usd: number; duration_ms: number; tokens_used?: number; +} + +export interface OrchestrationResult { + goal: string; plan: ExecutionPlan; steps: StepResult[]; synthesis: string; + total_cost_usd: number; total_duration_ms: number; providers_used: ProviderName[]; +} + +export type OrchestraMode = 'auto' | 'plan' | 'pipeline'; + +export interface OrchestraRequest { + mode: OrchestraMode; goal: string; steps?: PlanStep[]; context?: string; + budget_usd?: number; providers?: ProviderName[]; stream?: boolean; +} + +export interface OrchestraStreamEvent { + event: 'plan' | 'step_start' | 'step_done' | 'step_fail' | 'synthesis' | 'done' | 'error'; + data: any; timestamp: number; +} \ No newline at end of file diff --git a/providers/hs58-orchestra/tsconfig.json b/providers/hs58-orchestra/tsconfig.json new file mode 100644 index 0000000..ec77e3d --- /dev/null +++ b/providers/hs58-orchestra/tsconfig.json @@ -0,0 +1,16 @@ +{ + "compilerOptions": { + "target": "ES2022", + "module": "ES2022", + "moduleResolution": "bundler", + "outDir": "dist", + "rootDir": "src", + "strict": true, + "esModuleInterop": true, + "skipLibCheck": true, + "resolveJsonModule": true + }, + "include": [ + "src" + ] +} \ No newline at end of file