diff --git a/README.md b/README.md index 6ebacdc..de8a058 100644 --- a/README.md +++ b/README.md @@ -294,6 +294,90 @@ Analyzes code structure, dependencies, and relationships across a repository. Us - Cleans up temporary files automatically - Cross-platform compatible +### Individual Graph Tools + +For targeted analysis, use these specialized tools instead of the comprehensive `explore_codebase`: + +#### `get_call_graph` + +Generate a function-level call graph showing caller/callee relationships. + +**Use this to:** +- Find all functions that call a specific function +- Find all functions called by a specific function +- Trace call chains through the codebase +- Understand function dependencies + +**Parameters:** + +| Argument | Type | Required | Description | +|----------|------|----------|-------------| +| `directory` | string | Yes | Path to repository directory | +| `jq_filter` | string | No | jq filter for custom data extraction | + +#### `get_dependency_graph` + +Generate a module-level dependency graph showing import relationships. + +**Use this to:** +- Understand module dependencies +- Find circular dependencies +- Identify tightly coupled modules +- Plan module extraction or refactoring + +**Parameters:** + +| Argument | Type | Required | Description | +|----------|------|----------|-------------| +| `directory` | string | Yes | Path to repository directory | +| `jq_filter` | string | No | jq filter for custom data extraction | + +#### `get_domain_graph` + +Generate a high-level domain classification graph. + +**Use this to:** +- Understand the architectural domains in a codebase +- See how code is organized into logical areas +- Get a bird's-eye view of system structure +- Identify domain boundaries + +**Parameters:** + +| Argument | Type | Required | Description | +|----------|------|----------|-------------| +| `directory` | string | Yes | Path to repository directory | +| `jq_filter` | string | No | jq filter for custom data extraction | + +#### `get_parse_graph` + +Generate an AST-level parse graph with fine-grained code structure. + +**Use this to:** +- Analyze detailed code structure +- Find specific syntax patterns +- Understand class/function definitions at AST level +- Support precise refactoring operations + +**Parameters:** + +| Argument | Type | Required | Description | +|----------|------|----------|-------------| +| `directory` | string | Yes | Path to repository directory | +| `jq_filter` | string | No | jq filter for custom data extraction | + +### Choosing the Right Tool + +| Tool | Best For | Output Size | +|------|----------|-------------| +| `explore_codebase` | Comprehensive analysis with built-in queries | Largest - all graph types | +| `get_call_graph` | Function call tracing, debugging | Medium - functions only | +| `get_dependency_graph` | Module refactoring, circular deps | Small - modules only | +| `get_domain_graph` | Architecture overview | Smallest - domains only | +| `get_parse_graph` | AST analysis, precise refactoring | Large - full AST | + +**Tip:** Start with `get_domain_graph` for a quick architecture overview, then drill down with `get_call_graph` or `get_dependency_graph` for specific areas. + ## Tool Performance & Timeout Requirements The `explore_codebase` tool analyzes your entire repository to build a comprehensive code graph. Analysis time scales with repository size and complexity. @@ -502,6 +586,69 @@ To enable verbose logging, set the `DEBUG` environment variable: Benchmark this MCP server using [mcpbr](https://github.com/caspianmoon/mcpbr-benchmark-caching) with the provided [`mcpbr-config.yaml`](./mcpbr-config.yaml) configuration. +## Local Development & Testing + +### Building from Source + +```bash +git clone https://github.com/supermodeltools/mcp.git +cd mcp +npm install +npm run build +``` + +### Running Locally + +```bash +# Start the MCP server +node dist/index.js + +# Or with a default working directory +node dist/index.js /path/to/repo +``` + +### Testing Tools Locally + +Run the integration tests to verify the server and tools: + +```bash +# Run all tests including integration tests +npm test + +# Run only integration tests +npm test -- src/server.integration.test.ts +``` + +### Using MCP Inspector + +For interactive testing, use the [MCP Inspector](https://github.com/modelcontextprotocol/inspector): + +```bash +# Install the inspector +npm install -g @modelcontextprotocol/inspector + +# Run with your server +npx @modelcontextprotocol/inspector node dist/index.js +``` + +This opens a web UI where you can: +- See all available tools +- Call tools with custom arguments +- View responses in real-time + +### Running Tests + +```bash +# Run all tests +npm test + +# Run with coverage +npm run test:coverage + +# Type checking +npm run typecheck +``` + ## Links - [API Documentation](https://docs.supermodeltools.com) diff --git a/src/filtering.ts b/src/filtering.ts index 6b441b5..8dab749 100644 --- a/src/filtering.ts +++ b/src/filtering.ts @@ -1,6 +1,17 @@ +/** + * jq filtering utilities for JSON response transformation. + * Provides optional jq filter application to API responses. + * @module filtering + */ // @ts-nocheck import initJq from 'jq-web'; +/** + * Optionally applies a jq filter to a response object. + * @param jqFilter - The jq filter string, or undefined to skip filtering + * @param response - The JSON response to filter + * @returns The filtered response, or the original response if no filter provided + */ export async function maybeFilter(jqFilter: unknown | undefined, response: any): Promise { if (jqFilter && typeof jqFilter === 'string') { return await jq(response, jqFilter); @@ -9,10 +20,21 @@ export async function maybeFilter(jqFilter: unknown | undefined, response: any): } } +/** + * Applies a jq filter to JSON data. + * @param json - The JSON data to filter + * @param jqFilter - The jq filter expression + * @returns The filtered result + */ async function jq(json: any, jqFilter: string) { return (await initJq).json(json, jqFilter); } +/** + * Type guard to check if an error is a jq parsing error. + * @param error - The error to check + * @returns True if the error is a jq-related error with stderr output + */ export function isJqError(error: any): error is Error { return error instanceof Error && 'stderr' in error; } diff --git a/src/index.ts b/src/index.ts index 2266586..81f9833 100644 --- a/src/index.ts +++ b/src/index.ts @@ -1,7 +1,16 @@ #!/usr/bin/env node +/** + * Entry point for the Supermodel MCP Server. + * Starts the MCP server with optional default working directory. + * @module index + */ import { Server } from './server'; import * as logger from './utils/logger'; +/** + * Main entry point that initializes and starts the MCP server. + * Accepts an optional workdir argument from the command line. + */ async function main() { // Parse command-line arguments to get optional default workdir // Usage: node dist/index.js [workdir] diff --git a/src/server.integration.test.ts b/src/server.integration.test.ts new file mode 100644 index 0000000..e0279fb --- /dev/null +++ b/src/server.integration.test.ts @@ -0,0 +1,261 @@ +/** + * Integration tests for the MCP server. + * Tests the JSON-RPC protocol, tool listing, and basic operations. + */ + +import { describe, it, expect, beforeAll, afterAll } from '@jest/globals'; +import { spawn, ChildProcess } from 'child_process'; +import { existsSync } from 'fs'; +import * as readline from 'readline'; +import * as path from 'path'; + +/** Maximum time to wait for server startup */ +const SERVER_STARTUP_TIMEOUT_MS = 5000; +/** Polling interval for server readiness check */ +const STARTUP_POLL_INTERVAL_MS = 100; + +describe('MCP Server Integration', () => { + let server: ChildProcess; + let requestId = 0; + let responseQueue: Map void; reject: (err: Error) => void }> = new Map(); + let rl: readline.Interface; + + /** + * Sends a JSON-RPC request to the server and waits for response. + * @param method - The JSON-RPC method name + * @param params - Optional parameters for the request + * @returns Promise resolving to the response result + */ + function sendRequest(method: string, params: Record = {}): Promise { + return new Promise((resolve, reject) => { + const id = ++requestId; + const request = { + jsonrpc: '2.0', + id, + method, + params + }; + responseQueue.set(id, { resolve, reject }); + server.stdin!.write(JSON.stringify(request) + '\n'); + + // Timeout after 5 seconds + setTimeout(() => { + if (responseQueue.has(id)) { + responseQueue.delete(id); + reject(new Error(`Request ${method} timed out`)); + } + }, 5000); + }); + } + + beforeAll(async () => { + // Verify dist/index.js exists before attempting to start server + const distPath = path.join(__dirname, '..', 'dist', 'index.js'); + if (!existsSync(distPath)) { + throw new Error( + `Server build not found at ${distPath}. Run 'npm run build' first.` + ); + } + + // Start the MCP server + server = spawn('node', [distPath], { + stdio: ['pipe', 'pipe', 'pipe'], + env: { ...process.env } + }); + + // Handle server spawn errors + server.on('error', (err) => { + throw new Error(`Failed to start MCP server: ${err.message}`); + }); + + // Parse JSON-RPC responses + rl = readline.createInterface({ + input: server.stdout!, + crlfDelay: Infinity + }); + + rl.on('line', (line) => { + try { + const response = JSON.parse(line); + if (response.id && responseQueue.has(response.id)) { + const { resolve, reject } = responseQueue.get(response.id)!; + responseQueue.delete(response.id); + if (response.error) { + reject(new Error(JSON.stringify(response.error))); + } else { + resolve(response.result); + } + } + } catch { + // Not JSON, ignore + } + }); + + // Wait for server to be ready with retry loop + const startTime = Date.now(); + while (Date.now() - startTime < SERVER_STARTUP_TIMEOUT_MS) { + // Check if server has exited unexpectedly + if (server.exitCode !== null) { + throw new Error(`Server exited unexpectedly with code ${server.exitCode}`); + } + // Small delay between checks + await new Promise(r => setTimeout(r, STARTUP_POLL_INTERVAL_MS)); + // Server is ready when stdin is writable + if (server.stdin?.writable) { + break; + } + } + }); + + afterAll(async () => { + // Clear any pending response handlers + responseQueue.clear(); + rl?.close(); + if (server && !server.killed) { + server.stdin?.end(); + server.stdout?.destroy(); + server.stderr?.destroy(); + server.kill('SIGKILL'); + } + // Give time for cleanup + await new Promise(r => setTimeout(r, 100)); + }); + + describe('protocol initialization', () => { + it('should initialize successfully', async () => { + const result = await sendRequest('initialize', { + protocolVersion: '2024-11-05', + capabilities: {}, + clientInfo: { name: 'jest-test', version: '1.0.0' } + }); + + expect(result.protocolVersion).toBe('2024-11-05'); + expect(result.capabilities).toBeDefined(); + expect(result.serverInfo).toBeDefined(); + expect(result.serverInfo.name).toBe('supermodel_api'); + }); + + it('should include server instructions', async () => { + const result = await sendRequest('initialize', { + protocolVersion: '2024-11-05', + capabilities: {}, + clientInfo: { name: 'jest-test', version: '1.0.0' } + }); + + expect(result.instructions).toBeDefined(); + expect(result.instructions).toContain('Supermodel Codebase Explorer'); + }); + }); + + describe('tools/list', () => { + it('should list all available tools', async () => { + const result = await sendRequest('tools/list', {}); + + expect(result.tools).toBeDefined(); + expect(Array.isArray(result.tools)).toBe(true); + expect(result.tools.length).toBeGreaterThanOrEqual(5); + }); + + it('should include explore_codebase tool', async () => { + const result = await sendRequest('tools/list', {}); + const exploreTool = result.tools.find((t: any) => t.name === 'explore_codebase'); + + expect(exploreTool).toBeDefined(); + expect(exploreTool.description).toContain('codebase analysis'); + expect(exploreTool.inputSchema.properties.directory).toBeDefined(); + expect(exploreTool.inputSchema.properties.query).toBeDefined(); + }); + + it('should include individual graph tools', async () => { + const result = await sendRequest('tools/list', {}); + const toolNames = result.tools.map((t: any) => t.name); + + expect(toolNames).toContain('get_call_graph'); + expect(toolNames).toContain('get_dependency_graph'); + expect(toolNames).toContain('get_domain_graph'); + expect(toolNames).toContain('get_parse_graph'); + }); + + it('should have consistent schema for graph tools', async () => { + const result = await sendRequest('tools/list', {}); + const graphTools = result.tools.filter((t: any) => + ['get_call_graph', 'get_dependency_graph', 'get_domain_graph', 'get_parse_graph'].includes(t.name) + ); + + for (const tool of graphTools) { + expect(tool.inputSchema.properties.directory).toBeDefined(); + expect(tool.inputSchema.properties.directory.type).toBe('string'); + expect(tool.inputSchema.properties.jq_filter).toBeDefined(); + expect(tool.inputSchema.properties.jq_filter.type).toBe('string'); + expect(tool.inputSchema.required).toEqual([]); + } + }); + }); + + describe('tools/call validation', () => { + it('should return validation error for missing directory', async () => { + const result = await sendRequest('tools/call', { + name: 'get_call_graph', + arguments: {} + }); + + expect(result.content).toBeDefined(); + expect(result.content[0].type).toBe('text'); + const parsed = JSON.parse(result.content[0].text); + expect(parsed.error.code).toBe('MISSING_DIRECTORY'); + expect(parsed.error.type).toBe('validation_error'); + }); + + it('should return validation error for invalid directory type', async () => { + const result = await sendRequest('tools/call', { + name: 'get_call_graph', + arguments: { directory: 123 } + }); + + expect(result.content).toBeDefined(); + const parsed = JSON.parse(result.content[0].text); + expect(parsed.error.code).toBe('INVALID_DIRECTORY'); + }); + + it('should return validation error for invalid jq_filter type', async () => { + const result = await sendRequest('tools/call', { + name: 'get_call_graph', + arguments: { directory: '/tmp', jq_filter: ['invalid'] } + }); + + expect(result.content).toBeDefined(); + const parsed = JSON.parse(result.content[0].text); + expect(parsed.error.code).toBe('INVALID_JQ_FILTER'); + }); + + it('should return not_found error for non-existent directory', async () => { + const result = await sendRequest('tools/call', { + name: 'get_call_graph', + arguments: { directory: '/nonexistent/path/xyz123' } + }); + + expect(result.content).toBeDefined(); + const parsed = JSON.parse(result.content[0].text); + expect(parsed.error.code).toBe('DIRECTORY_NOT_FOUND'); + expect(parsed.error.type).toBe('not_found_error'); + }); + }); + + describe('explore_codebase queries', () => { + it('should return cache status without API call', async () => { + const result = await sendRequest('tools/call', { + name: 'explore_codebase', + arguments: { + directory: process.cwd(), + query: 'graph_status' + } + }); + + expect(result.content).toBeDefined(); + expect(result.content[0].type).toBe('text'); + // graph_status returns cache info, not an error + const parsed = JSON.parse(result.content[0].text); + expect(parsed.query).toBe('graph_status'); + }); + }); +}); diff --git a/src/server.ts b/src/server.ts index cb46d8f..1118d68 100644 --- a/src/server.ts +++ b/src/server.ts @@ -1,7 +1,13 @@ +/** + * MCP Server implementation for the Supermodel codebase analysis tools. + * Provides JSON-RPC handlers for code graph generation and exploration. + * @module server + */ import { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js'; import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js'; import { Configuration, DefaultApi, SupermodelClient } from '@supermodeltools/sdk'; import createSupermodelGraphTool from './tools/create-supermodel-graph'; +import { graphTools } from './tools/graph-tools'; import { ClientContext } from './types'; import { CallToolRequestSchema, ListToolsRequestSchema } from '@modelcontextprotocol/sdk/types.js'; import { cleanupOldZips } from './utils/zip-repository'; @@ -46,53 +52,57 @@ export class Server { }, { capabilities: { tools: {}, logging: {} }, - instructions: `# Server Instructions: Supermodel Codebase Explorer - -## Graph Rules -- This API produces graphs of the code contained within a target directory. -- STRATEGY: Before debugging, planning, or analyzing a change to a code repository, generate a code graph. Use it to localize changes and find what files to search more efficiently than grep. - -## Debugging Strategy -1. Generate a code graph of the given repository or a subset. -2. Analyze the nodes and relationships which appear to be related to your issue. -3. Analyze the broader context of these nodes in relationships within their domain and subdomain. -4. Use the graph like a diagram to navigate the codebase more efficiently than raw grep and to analyze the potential blast radius of any change. - -## Planning Strategy -1. Generate a code graph of the given repository or a subset. -2. Analyze relationships like dependencies, calls, and inheritance to identify the potential blast radius of a proposed change. -3. Examine other elements of the same Domain and Subdomain to look for patterns including best practices or anti-patterns. -4. Look at the nodes you plan to change and find their physical locations, allowing you to analyze more efficiently than blind grepping. - -## Analysis Strategy -1. Generate a code graph of the given repository or a subset. -2. Analyze the system domains to understand the high-level system architecture. -3. Examine leaf nodes to see the structure of the broader tree. -4. Use the graph like a map to navigate the codebase more efficiently than blind grepping. - -## Performance Optimization - -For localized bugs: -1. Identify the affected subsystem from the issue description -2. Analyze only that subdirectory (e.g., \`django/db\` instead of full repo) -3. This is faster, uses less memory, and avoids ZIP size limits - -Example: -- Full repo: directory="/repo" → 180MB, 50k nodes -- Subsystem: directory="/repo/django/db" → 15MB, 3k nodes + instructions: `# Supermodel Codebase Explorer -## Error Handling +## Choosing the Right Tool + +| Situation | Tool | Why | +|-----------|------|-----| +| New codebase, need overview | \`get_domain_graph\` | Shows domains, responsibilities, architecture | +| Debugging function calls | \`get_call_graph\` | Function nodes + calls relationships | +| Understanding imports | \`get_dependency_graph\` | File nodes + IMPORTS relationships | +| Full code structure | \`get_parse_graph\` | All nodes and structural relationships | +| Iterative exploration | \`explore_codebase\` | Built-in queries for search and navigation | + +## Available Tools + +### Individual Graph Tools + +**\`get_domain_graph\`** - High-level architecture +- Returns: Domains with descriptions, responsibilities, subdomains, file/function assignments +- Best for: Understanding how a codebase is organized + +**\`get_call_graph\`** - Function call relationships +- Returns: Function nodes with "calls" relationships +- Best for: Debugging, tracing execution, finding callers/callees + +**\`get_dependency_graph\`** - Import relationships +- Returns: File nodes with "IMPORTS" relationships +- Best for: Finding circular deps, understanding module coupling + +**\`get_parse_graph\`** - Full code structure +- Returns: All nodes (File, Class, Function, Type) and structural relationships +- Best for: Comprehensive analysis, detailed refactoring + +### explore_codebase (with queries) -When a tool call returns an error, check the structured error fields: -- \`error.recoverable: true\` → wait briefly and retry the request -- \`error.reportable: true\` → this is a server-side bug, not your fault +Full analysis with built-in query interface: +- \`query: "summary"\` - Graph statistics +- \`query: "search", searchText: "..."\` - Find nodes by name +- \`query: "list_nodes", labels: [...]\` - Filter by type +- \`query: "function_calls_in/out", targetId: "..."\` - Trace calls +- \`query: "graph_status"\` - Check cache without API call -If you receive a reportable error and have GitHub access: -1. Search the \`error.repo\` issues for the \`error.code\` to avoid duplicates -2. If no existing issue, open one with the error code, message, and any reproduction context -3. If you can identify and fix the root cause, fork the repo and open a PR +## Tips -This helps the maintainers fix bugs faster and avoids wasting your iteration budget on issues you cannot work around locally.`, +- **Start with domain graph** for architecture overview +- **Target subdirectories** when possible (faster, smaller output) +- **Use jq_filter** to extract specific data + +## Error Handling + +- \`error.recoverable: true\` → retry after brief wait +- \`error.reportable: true\` → server bug, report to \`error.repo\``, }, ); @@ -118,19 +128,35 @@ This helps the maintainers fix bugs faster and avoids wasting your iteration bud } private setupHandlers() { + // Collect all tools: the main explore_codebase tool plus individual graph tools + const allTools = [ + createSupermodelGraphTool, + ...graphTools, + ]; + + // Create a map for quick handler lookup, checking for duplicates + const toolMap = new Map(); + for (const t of allTools) { + if (toolMap.has(t.tool.name)) { + throw new Error(`Duplicate tool name: ${t.tool.name}`); + } + toolMap.set(t.tool.name, t); + } + this.server.server.setRequestHandler(ListToolsRequestSchema, async () => { return { - tools: [createSupermodelGraphTool.tool], + tools: allTools.map(t => t.tool), }; }); this.server.server.setRequestHandler(CallToolRequestSchema, async (request) => { const { name, arguments: args } = request.params; - - if (name === createSupermodelGraphTool.tool.name) { - return createSupermodelGraphTool.handler(this.client, args, this.defaultWorkdir); + + const tool = toolMap.get(name); + if (tool) { + return tool.handler(this.client, args, this.defaultWorkdir); } - + throw new Error(`Unknown tool: ${name}`); }); } diff --git a/src/tools/create-supermodel-graph.test.ts b/src/tools/create-supermodel-graph.test.ts index 4c0a359..9356b4d 100644 --- a/src/tools/create-supermodel-graph.test.ts +++ b/src/tools/create-supermodel-graph.test.ts @@ -1,5 +1,6 @@ import { describe, it, expect, jest, beforeEach } from '@jest/globals'; -import { handler, classifyApiError } from './create-supermodel-graph'; +import { handler } from './create-supermodel-graph'; +import { classifyApiError } from '../utils/api-helpers'; import { ClientContext } from '../types'; import { execSync } from 'child_process'; import { createHash } from 'crypto'; diff --git a/src/tools/create-supermodel-graph.ts b/src/tools/create-supermodel-graph.ts index 5589dd3..ee0adc4 100644 --- a/src/tools/create-supermodel-graph.ts +++ b/src/tools/create-supermodel-graph.ts @@ -1,8 +1,7 @@ import { Tool } from '@modelcontextprotocol/sdk/types.js'; import { readFile } from 'fs/promises'; -import { execSync } from 'child_process'; -import { createHash } from 'crypto'; -import { basename, resolve } from 'path'; +import { Blob } from 'buffer'; +import { basename } from 'path'; import { Metadata, Endpoint, @@ -10,16 +9,19 @@ import { asTextContentResult, asErrorResult, ClientContext, - StructuredError } from '../types'; import { maybeFilter, isJqError } from '../filtering'; import { executeQuery, getAvailableQueries, isQueryError, QueryType, graphCache } from '../queries'; import { IndexedGraph } from '../cache/graph-cache'; import { zipRepository } from '../utils/zip-repository'; import * as logger from '../utils/logger'; - -const REPORT_REPO = 'https://github.com/supermodeltools/mcp.git'; -const REPORT_SUGGESTION = 'This may be a bug in the MCP server. You can help by opening an issue at https://github.com/supermodeltools/mcp/issues with the error details, or fork the repo and open a PR with a fix.'; +import { + REPORT_REPO, + REPORT_SUGGESTION, + formatBytes, + generateIdempotencyKey, + classifyApiError, +} from '../utils/api-helpers'; export const metadata: Metadata = { resource: 'graphs', @@ -179,48 +181,6 @@ Query types available: graph_status, summary, get_node, search, list_nodes, func }, }; -/** - * Generate an idempotency key in format {repo}-{pathHash}:supermodel:{hash} - * Includes path hash to prevent collisions between same-named repos - */ -function generateIdempotencyKey(directory: string): string { - const repoName = basename(directory); - const absolutePath = resolve(directory); - - // Always include path hash to prevent collisions - const pathHash = createHash('sha1').update(absolutePath).digest('hex').substring(0, 7); - - let hash: string; - let statusHash = ''; - - try { - // Get git commit hash - hash = execSync('git rev-parse --short HEAD', { - cwd: directory, - encoding: 'utf-8', - }).trim(); - - // Include working tree status in hash to detect uncommitted changes - const statusOutput = execSync('git status --porcelain', { - cwd: directory, - encoding: 'utf-8', - }).toString(); - - if (statusOutput) { - // Create hash of status output - statusHash = '-' + createHash('sha1') - .update(statusOutput) - .digest('hex') - .substring(0, 7); - } - } catch { - // Fallback for non-git directories: use path hash as main identifier - hash = pathHash; - } - - return `${repoName}-${pathHash}:supermodel:${hash}${statusHash}`; -} - export const handler: HandlerFunction = async (client: ClientContext, args: Record | undefined, defaultWorkdir?: string) => { if (!args) { args = {}; @@ -242,7 +202,7 @@ export const handler: HandlerFunction = async (client: ClientContext, args: Reco } = args as any; // Use provided directory or fall back to default workdir - const directory = providedDirectory || defaultWorkdir; + const directory = providedDirectory ?? defaultWorkdir; // Validate directory - check if explicitly invalid first if (providedDirectory !== undefined && typeof providedDirectory !== 'string') { @@ -417,16 +377,6 @@ export const handler: HandlerFunction = async (client: ClientContext, args: Reco } }; -/** - * Format bytes as human-readable string - */ -function formatBytes(bytes: number): string { - if (bytes < 1024) return `${bytes} B`; - if (bytes < 1024 * 1024) return `${(bytes / 1024).toFixed(2)} KB`; - if (bytes < 1024 * 1024 * 1024) return `${(bytes / (1024 * 1024)).toFixed(2)} MB`; - return `${(bytes / (1024 * 1024 * 1024)).toFixed(2)} GB`; -} - /** * Handle query-based requests when graph is already cached * Uses the cached graph directly to avoid TOCTOU issues @@ -828,134 +778,6 @@ async function fetchFromApi(client: ClientContext, file: string, idempotencyKey: } } -/** - * Classify an API error into a structured error response. - * Extracts HTTP status, network conditions, and timeout signals - * to produce an agent-actionable error with recovery guidance. - */ -export function classifyApiError(error: any): StructuredError { - // Guard against non-Error throws (strings, nulls, plain objects) - if (!error || typeof error !== 'object') { - return { - type: 'internal_error', - message: typeof error === 'string' ? error : 'An unexpected error occurred.', - code: 'UNKNOWN_ERROR', - recoverable: false, - reportable: true, - repo: REPORT_REPO, - suggestion: REPORT_SUGGESTION, - details: { errorType: typeof error }, - }; - } - - if (error.response) { - const status = error.response.status; - - switch (status) { - case 401: - return { - type: 'authentication_error', - message: 'Invalid or missing API key.', - code: 'INVALID_API_KEY', - recoverable: false, - suggestion: 'Set the SUPERMODEL_API_KEY environment variable and restart the MCP server.', - details: { apiKeySet: !!process.env.SUPERMODEL_API_KEY, httpStatus: 401 }, - }; - case 403: - return { - type: 'authorization_error', - message: 'API key does not have permission for this operation.', - code: 'FORBIDDEN', - recoverable: false, - suggestion: 'Verify your API key has the correct permissions. Contact support if unexpected.', - details: { httpStatus: 403 }, - }; - case 404: - return { - type: 'not_found_error', - message: 'API endpoint not found.', - code: 'ENDPOINT_NOT_FOUND', - recoverable: false, - suggestion: 'Check SUPERMODEL_BASE_URL environment variable. Default: https://api.supermodeltools.com', - details: { baseUrl: process.env.SUPERMODEL_BASE_URL || 'https://api.supermodeltools.com', httpStatus: 404 }, - }; - case 429: - return { - type: 'rate_limit_error', - message: 'API rate limit exceeded.', - code: 'RATE_LIMITED', - recoverable: true, - suggestion: 'Wait 30-60 seconds and retry. Consider analyzing smaller subdirectories to reduce API calls.', - details: { httpStatus: 429 }, - }; - case 500: - case 502: - case 503: - case 504: - return { - type: 'internal_error', - message: `Supermodel API server error (HTTP ${status}).`, - code: 'SERVER_ERROR', - recoverable: true, - reportable: true, - repo: REPORT_REPO, - suggestion: 'The API may be temporarily unavailable. Wait a few minutes and retry. If persistent, open an issue at https://github.com/supermodeltools/mcp/issues with the error details, or fork the repo and open a PR with a fix.', - details: { httpStatus: status }, - }; - default: { - const isServerError = status >= 500; - return { - type: isServerError ? 'internal_error' : 'validation_error', - message: `API request failed with HTTP ${status}.`, - code: 'API_ERROR', - recoverable: isServerError, - ...(isServerError && { - reportable: true, - repo: REPORT_REPO, - suggestion: 'The API may be temporarily unavailable. Wait a few minutes and retry. If persistent, open an issue at https://github.com/supermodeltools/mcp/issues with the error details, or fork the repo and open a PR with a fix.', - }), - ...(!isServerError && { suggestion: 'Check the request parameters and base URL configuration.' }), - details: { httpStatus: status }, - }; - } - } - } - - if (error.request) { - // Distinguish timeout from general network failure - if (error.code === 'UND_ERR_HEADERS_TIMEOUT' || error.code === 'UND_ERR_BODY_TIMEOUT' || error.message?.includes('timeout')) { - return { - type: 'timeout_error', - message: 'API request timed out. The codebase may be too large for a single analysis.', - code: 'REQUEST_TIMEOUT', - recoverable: true, - suggestion: 'Analyze a smaller subdirectory (e.g. directory="/repo/src/core") or increase SUPERMODEL_TIMEOUT_MS.', - }; - } - - return { - type: 'network_error', - message: 'No response from Supermodel API server.', - code: 'NO_RESPONSE', - recoverable: true, - suggestion: 'Check network connectivity. Verify the API is reachable at the configured base URL.', - details: { baseUrl: process.env.SUPERMODEL_BASE_URL || 'https://api.supermodeltools.com' }, - }; - } - - // Catch-all for unexpected errors - include the actual message - return { - type: 'internal_error', - message: error.message || 'An unexpected error occurred.', - code: 'UNKNOWN_ERROR', - recoverable: false, - reportable: true, - repo: REPORT_REPO, - suggestion: REPORT_SUGGESTION, - details: { errorType: error.name || 'Error' }, - }; -} - /** * Legacy mode: direct jq filtering on API response */ diff --git a/src/tools/graph-tools.test.ts b/src/tools/graph-tools.test.ts new file mode 100644 index 0000000..d364f98 --- /dev/null +++ b/src/tools/graph-tools.test.ts @@ -0,0 +1,214 @@ +import { describe, it, expect, jest, beforeEach } from '@jest/globals'; +import * as os from 'os'; +import * as path from 'path'; +import { + callGraphTool, + dependencyGraphTool, + domainGraphTool, + parseGraphTool, + graphTools, +} from './graph-tools'; +import { ClientContext } from '../types'; + +// Generate OS-safe non-existent paths for testing +const nonExistentPath = path.join(os.tmpdir(), `nonexistent-${Date.now()}-${Math.random().toString(36).slice(2)}`); +const defaultWorkdirPath = path.join(os.tmpdir(), `default-workdir-${Date.now()}-${Math.random().toString(36).slice(2)}`); + +describe('graph-tools', () => { + describe('tool exports', () => { + it('should export callGraphTool with correct name', () => { + expect(callGraphTool.tool.name).toBe('get_call_graph'); + expect(callGraphTool.metadata.operationId).toBe('generateCallGraph'); + }); + + it('should export dependencyGraphTool with correct name', () => { + expect(dependencyGraphTool.tool.name).toBe('get_dependency_graph'); + expect(dependencyGraphTool.metadata.operationId).toBe('generateDependencyGraph'); + }); + + it('should export domainGraphTool with correct name', () => { + expect(domainGraphTool.tool.name).toBe('get_domain_graph'); + expect(domainGraphTool.metadata.operationId).toBe('generateDomainGraph'); + }); + + it('should export parseGraphTool with correct name', () => { + expect(parseGraphTool.tool.name).toBe('get_parse_graph'); + expect(parseGraphTool.metadata.operationId).toBe('generateParseGraph'); + }); + + it('should export graphTools array with all 4 tools', () => { + expect(graphTools).toHaveLength(4); + const toolNames = graphTools.map(t => t.tool.name); + expect(toolNames).toContain('get_call_graph'); + expect(toolNames).toContain('get_dependency_graph'); + expect(toolNames).toContain('get_domain_graph'); + expect(toolNames).toContain('get_parse_graph'); + }); + }); + + describe('tool metadata', () => { + it('should have correct HTTP endpoints for each tool', () => { + expect(callGraphTool.metadata.httpPath).toBe('/v1/graphs/call'); + expect(dependencyGraphTool.metadata.httpPath).toBe('/v1/graphs/dependency'); + expect(domainGraphTool.metadata.httpPath).toBe('/v1/graphs/domain'); + expect(parseGraphTool.metadata.httpPath).toBe('/v1/graphs/parse'); + }); + + it('should all use POST method', () => { + graphTools.forEach(tool => { + expect(tool.metadata.httpMethod).toBe('post'); + }); + }); + + it('should all have write operation', () => { + graphTools.forEach(tool => { + expect(tool.metadata.operation).toBe('write'); + }); + }); + }); + + describe('tool input schema', () => { + it('should have directory and jq_filter properties', () => { + graphTools.forEach(tool => { + const props = tool.tool.inputSchema.properties as Record; + expect(props.directory).toBeDefined(); + expect(props.directory.type).toBe('string'); + expect(props.jq_filter).toBeDefined(); + expect(props.jq_filter.type).toBe('string'); + }); + }); + + it('should not require any parameters (directory can use default workdir)', () => { + graphTools.forEach(tool => { + expect(tool.tool.inputSchema.required).toEqual([]); + }); + }); + }); + + describe('handler parameter validation', () => { + let mockClient: ClientContext; + + beforeEach(() => { + mockClient = { + graphs: { + generateCallGraph: jest.fn(), + generateDependencyGraph: jest.fn(), + generateDomainGraph: jest.fn(), + generateParseGraph: jest.fn(), + }, + } as any; + }); + + it('should return MISSING_DIRECTORY error when no directory and no default workdir', async () => { + const result = await callGraphTool.handler(mockClient, undefined); + + expect(result.isError).toBe(true); + const errorContent = JSON.parse(result.content[0].type === 'text' ? (result.content[0] as any).text : ''); + expect(errorContent.error.type).toBe('validation_error'); + expect(errorContent.error.code).toBe('MISSING_DIRECTORY'); + expect(errorContent.error.recoverable).toBe(false); + }); + + it('should return INVALID_DIRECTORY error when directory is not a string', async () => { + const result = await callGraphTool.handler(mockClient, { directory: 123 }); + + expect(result.isError).toBe(true); + const errorContent = JSON.parse(result.content[0].type === 'text' ? (result.content[0] as any).text : ''); + expect(errorContent.error.type).toBe('validation_error'); + expect(errorContent.error.code).toBe('INVALID_DIRECTORY'); + expect(errorContent.error.recoverable).toBe(false); + }); + + it('should return INVALID_JQ_FILTER error when jq_filter is not a string', async () => { + const result = await callGraphTool.handler(mockClient, { + directory: '/some/path', + jq_filter: 123 + }); + + expect(result.isError).toBe(true); + const errorContent = JSON.parse(result.content[0].type === 'text' ? (result.content[0] as any).text : ''); + expect(errorContent.error.type).toBe('validation_error'); + expect(errorContent.error.code).toBe('INVALID_JQ_FILTER'); + expect(errorContent.error.recoverable).toBe(false); + }); + + it('should return DIRECTORY_NOT_FOUND error for non-existent directory', async () => { + const result = await callGraphTool.handler(mockClient, { + directory: nonExistentPath + }); + + expect(result.isError).toBe(true); + const errorContent = JSON.parse(result.content[0].type === 'text' ? (result.content[0] as any).text : ''); + expect(errorContent.error.type).toBe('not_found_error'); + expect(errorContent.error.code).toBe('DIRECTORY_NOT_FOUND'); + }); + + it('should use default workdir when directory not provided', async () => { + // Will fail at zip stage since the path doesn't exist, + // but proves it attempted to use the default workdir + const result = await callGraphTool.handler(mockClient, {}, defaultWorkdirPath); + + expect(result.isError).toBe(true); + const errorContent = JSON.parse(result.content[0].type === 'text' ? (result.content[0] as any).text : ''); + // Should get to the directory validation stage using default workdir + expect(errorContent.error.code).toBe('DIRECTORY_NOT_FOUND'); + }); + }); + + describe('idempotency key format', () => { + it('should include graph type in idempotency key format', () => { + // The idempotency key format is: {repoName}-{pathHash}:{graphType}:{commitHash} + // Each graph type should produce different keys for the same directory + // This is tested implicitly through the tool metadata tags + expect(callGraphTool.metadata.tags).toContain('call'); + expect(dependencyGraphTool.metadata.tags).toContain('dependency'); + expect(domainGraphTool.metadata.tags).toContain('domain'); + expect(parseGraphTool.metadata.tags).toContain('parse'); + }); + }); + + describe('all handlers validate parameters consistently', () => { + let mockClient: ClientContext; + + beforeEach(() => { + mockClient = { + graphs: { + generateCallGraph: jest.fn(), + generateDependencyGraph: jest.fn(), + generateDomainGraph: jest.fn(), + generateParseGraph: jest.fn(), + }, + } as any; + }); + + it('should all return MISSING_DIRECTORY for undefined args', async () => { + for (const tool of graphTools) { + const result = await tool.handler(mockClient, undefined); + expect(result.isError).toBe(true); + const errorContent = JSON.parse(result.content[0].type === 'text' ? (result.content[0] as any).text : ''); + expect(errorContent.error.code).toBe('MISSING_DIRECTORY'); + } + }); + + it('should all return INVALID_DIRECTORY for non-string directory', async () => { + for (const tool of graphTools) { + const result = await tool.handler(mockClient, { directory: { invalid: true } }); + expect(result.isError).toBe(true); + const errorContent = JSON.parse(result.content[0].type === 'text' ? (result.content[0] as any).text : ''); + expect(errorContent.error.code).toBe('INVALID_DIRECTORY'); + } + }); + + it('should all return INVALID_JQ_FILTER for non-string jq_filter', async () => { + for (const tool of graphTools) { + const result = await tool.handler(mockClient, { + directory: '/some/path', + jq_filter: ['invalid', 'array'] + }); + expect(result.isError).toBe(true); + const errorContent = JSON.parse(result.content[0].type === 'text' ? (result.content[0] as any).text : ''); + expect(errorContent.error.code).toBe('INVALID_JQ_FILTER'); + } + }); + }); +}); diff --git a/src/tools/graph-tools.ts b/src/tools/graph-tools.ts new file mode 100644 index 0000000..8374f2a --- /dev/null +++ b/src/tools/graph-tools.ts @@ -0,0 +1,295 @@ +/** + * Individual graph type tools for targeted codebase analysis. + * Each tool calls a specific graph API endpoint for focused results. + */ + +import { Tool } from '@modelcontextprotocol/sdk/types.js'; +import { readFile } from 'fs/promises'; +import { Blob } from 'buffer'; +import { + Metadata, + HandlerFunction, + asTextContentResult, + asErrorResult, + ClientContext, +} from '../types'; +import { maybeFilter, isJqError } from '../filtering'; +import { zipRepository } from '../utils/zip-repository'; +import * as logger from '../utils/logger'; +import { + REPORT_REPO, + REPORT_SUGGESTION, + formatBytes, + generateIdempotencyKey, + classifyApiError, +} from '../utils/api-helpers'; + +// Graph type configuration +interface GraphTypeConfig { + name: string; + toolName: string; + description: string; + endpoint: string; + operationId: string; + apiMethod: 'generateCallGraph' | 'generateDependencyGraph' | 'generateDomainGraph' | 'generateParseGraph'; +} + +const GRAPH_TYPES: GraphTypeConfig[] = [ + { + name: 'call', + toolName: 'get_call_graph', + description: `Generate a call graph showing function-to-function call relationships. + +Returns: Function nodes with "calls" relationships between them. + +Use this to: +- Find all callers of a specific function +- Find all functions called by a specific function +- Trace execution flow through the codebase +- Debug by following call chains + +Best for: Debugging, understanding "what calls what", tracing execution paths.`, + endpoint: '/v1/graphs/call', + operationId: 'generateCallGraph', + apiMethod: 'generateCallGraph', + }, + { + name: 'dependency', + toolName: 'get_dependency_graph', + description: `Generate a dependency graph showing import relationships between files. + +Returns: File nodes with "IMPORTS" relationships between them. + +Use this to: +- Map which files import which other files +- Find circular dependencies +- Understand module coupling +- Plan safe refactoring of imports + +Best for: Refactoring, understanding module dependencies, finding import cycles.`, + endpoint: '/v1/graphs/dependency', + operationId: 'generateDependencyGraph', + apiMethod: 'generateDependencyGraph', + }, + { + name: 'domain', + toolName: 'get_domain_graph', + description: `Generate a domain classification graph showing high-level architecture. + +Returns: Domains with descriptions, responsibilities, subdomains, and file/function/class assignments. + +Use this to: +- Understand the architectural structure of a codebase +- See how code is organized into logical domains +- Identify domain boundaries and responsibilities +- Get a bird's-eye view before diving into details + +Best for: New codebases, architecture overview, understanding system organization.`, + endpoint: '/v1/graphs/domain', + operationId: 'generateDomainGraph', + apiMethod: 'generateDomainGraph', + }, + { + name: 'parse', + toolName: 'get_parse_graph', + description: `Generate a full parse graph with all code structure elements. + +Returns: All nodes (File, Directory, Class, Function, Type) and structural relationships (CONTAINS, DEFINES, DECLARES, IMPORTS). + +Use this to: +- Get complete structural information about the codebase +- Find all classes, functions, and types +- Understand containment and definition relationships +- Support detailed refactoring analysis + +Best for: Comprehensive analysis when you need the full code structure.`, + endpoint: '/v1/graphs/parse', + operationId: 'generateParseGraph', + apiMethod: 'generateParseGraph', + }, +]; + +/** + * Create a tool definition and handler for a specific graph type + */ +function createGraphTool(config: GraphTypeConfig): { + metadata: Metadata; + tool: Tool; + handler: HandlerFunction; +} { + const metadata: Metadata = { + resource: 'graphs', + operation: 'write', + tags: [config.name], + httpMethod: 'post', + httpPath: config.endpoint, + operationId: config.operationId, + }; + + const tool: Tool = { + name: config.toolName, + description: config.description, + inputSchema: { + type: 'object', + properties: { + directory: { + type: 'string', + description: 'Path to the repository directory to analyze.', + }, + jq_filter: { + type: 'string', + title: 'jq Filter', + description: 'Optional jq filter to extract specific data from the response.', + }, + }, + required: [], + }, + }; + + const handler: HandlerFunction = async ( + client: ClientContext, + args: Record | undefined, + defaultWorkdir?: string + ) => { + if (!args) { + args = {}; + } + + const { jq_filter, directory: providedDirectory } = args as { + jq_filter?: string; + directory?: string; + }; + + if (providedDirectory !== undefined && typeof providedDirectory !== 'string') { + return asErrorResult({ + type: 'validation_error', + message: 'Invalid "directory" parameter. Provide a valid directory path as a string.', + code: 'INVALID_DIRECTORY', + recoverable: false, + suggestion: 'Pass directory as a string path, e.g. directory="/workspace/my-repo".', + }); + } + + if (jq_filter !== undefined && typeof jq_filter !== 'string') { + return asErrorResult({ + type: 'validation_error', + message: 'Invalid "jq_filter" parameter. Provide a jq filter string.', + code: 'INVALID_JQ_FILTER', + recoverable: false, + suggestion: 'Pass jq_filter as a string, e.g. jq_filter=".nodes".', + }); + } + + const directory = providedDirectory ?? defaultWorkdir; + + if (!directory || typeof directory !== 'string') { + return asErrorResult({ + type: 'validation_error', + message: 'No "directory" parameter provided and no default workdir configured.', + code: 'MISSING_DIRECTORY', + recoverable: false, + suggestion: 'Provide a directory path or start the MCP server with a workdir argument.', + }); + } + + const idempotencyKey = generateIdempotencyKey(directory, config.name); + logger.debug(`[${config.toolName}] Idempotency key:`, idempotencyKey); + + // Create ZIP of repository + let zipPath: string; + let cleanup: (() => Promise) | null = null; + + try { + const zipResult = await zipRepository(directory); + zipPath = zipResult.path; + cleanup = zipResult.cleanup; + logger.debug(`[${config.toolName}] ZIP created:`, zipResult.fileCount, 'files,', formatBytes(zipResult.sizeBytes)); + } catch (error: any) { + const message = typeof error?.message === 'string' ? error.message : String(error); + + if (message.includes('does not exist')) { + return asErrorResult({ + type: 'not_found_error', + message: `Directory not found: ${directory}`, + code: 'DIRECTORY_NOT_FOUND', + recoverable: false, + suggestion: 'Verify the path exists.', + }); + } + + return asErrorResult({ + type: 'internal_error', + message: `Failed to create ZIP archive: ${message}`, + code: 'ZIP_CREATION_FAILED', + recoverable: false, + reportable: true, + repo: REPORT_REPO, + suggestion: REPORT_SUGGESTION, + }); + } + + try { + const fileBuffer = await readFile(zipPath); + const fileBlob = new Blob([fileBuffer], { type: 'application/zip' }); + + logger.debug(`[${config.toolName}] Calling API...`); + console.error(`[Supermodel] Generating ${config.name} graph...`); + + // Call the appropriate API method via SupermodelClient + const apiMethod = client.graphs[config.apiMethod].bind(client.graphs); + const response = await apiMethod(fileBlob as any, { idempotencyKey }); + + console.error(`[Supermodel] ${config.name} graph complete.`); + + // Apply optional jq filter + const result = await maybeFilter(jq_filter, response); + return asTextContentResult(result); + } catch (error: any) { + if (isJqError(error)) { + logger.error(`[${config.toolName}] jq filter error:`, error.message); + return asErrorResult({ + type: 'validation_error', + message: `Invalid jq filter syntax: ${error.message}`, + code: 'INVALID_JQ_FILTER', + recoverable: false, + suggestion: 'Check jq filter syntax. Example: jq_filter=".nodes" or jq_filter=".graph.nodeCount"', + }); + } + logger.error(`[${config.toolName}] API error:`, error.message); + return asErrorResult(classifyApiError(error)); + } finally { + if (cleanup) { + try { + await cleanup(); + } catch (cleanupError) { + logger.warn(`[${config.toolName}] Cleanup failed:`, cleanupError); + } + } + } + }; + + return { metadata, tool, handler }; +} + +// Helper to find graph type by name (safer than array indexing) +function getGraphType(name: string): GraphTypeConfig { + const config = GRAPH_TYPES.find(t => t.name === name); + if (!config) { + throw new Error(`Unknown graph type: ${name}`); + } + return config; +} + +// Create all graph tools +export const callGraphTool = createGraphTool(getGraphType('call')); +export const dependencyGraphTool = createGraphTool(getGraphType('dependency')); +export const domainGraphTool = createGraphTool(getGraphType('domain')); +export const parseGraphTool = createGraphTool(getGraphType('parse')); + +// Export all tools as an array for easy registration +export const graphTools = [ + callGraphTool, + dependencyGraphTool, + domainGraphTool, + parseGraphTool, +]; diff --git a/src/utils/api-helpers.ts b/src/utils/api-helpers.ts new file mode 100644 index 0000000..cc4c6ab --- /dev/null +++ b/src/utils/api-helpers.ts @@ -0,0 +1,192 @@ +/** + * Shared utilities for API operations across graph tools. + * Extracted to eliminate code duplication between graph-tools.ts and create-supermodel-graph.ts. + */ + +import { execSync } from 'child_process'; +import { createHash } from 'crypto'; +import { basename, resolve } from 'path'; +import { StructuredError } from '../types'; + +export const REPORT_REPO = 'https://github.com/supermodeltools/mcp.git'; +export const REPORT_SUGGESTION = 'This may be a bug in the MCP server. You can help by opening an issue at https://github.com/supermodeltools/mcp/issues with the error details, or fork the repo and open a PR with a fix.'; + +/** + * Format bytes as human-readable string + */ +export function formatBytes(bytes: number): string { + if (bytes < 1024) return `${bytes} B`; + if (bytes < 1024 * 1024) return `${(bytes / 1024).toFixed(2)} KB`; + if (bytes < 1024 * 1024 * 1024) return `${(bytes / (1024 * 1024)).toFixed(2)} MB`; + return `${(bytes / (1024 * 1024 * 1024)).toFixed(2)} GB`; +} + +/** + * Generate an idempotency key in format {repo}-{pathHash}:{graphType}:{hash} + * Includes path hash to prevent collisions between same-named repos + */ +export function generateIdempotencyKey(directory: string, graphType = 'supermodel'): string { + const repoName = basename(directory); + const absolutePath = resolve(directory); + + // Always include path hash to prevent collisions + const pathHash = createHash('sha1').update(absolutePath).digest('hex').substring(0, 7); + + let hash: string; + let statusHash = ''; + + try { + // Get git commit hash + hash = execSync('git rev-parse --short HEAD', { + cwd: directory, + encoding: 'utf-8', + }).trim(); + + // Include working tree status in hash to detect uncommitted changes + const statusOutput = execSync('git status --porcelain', { + cwd: directory, + encoding: 'utf-8', + }).toString(); + + if (statusOutput) { + // Create hash of status output + statusHash = '-' + createHash('sha1') + .update(statusOutput) + .digest('hex') + .substring(0, 7); + } + } catch { + // Fallback for non-git directories: use path hash as main identifier + hash = pathHash; + } + + return `${repoName}-${pathHash}:${graphType}:${hash}${statusHash}`; +} + +/** + * Classify an API error into a structured error response. + * Extracts HTTP status, network conditions, and timeout signals + * to produce an agent-actionable error with recovery guidance. + */ +export function classifyApiError(error: any): StructuredError { + // Guard against non-Error throws (strings, nulls, plain objects) + if (!error || typeof error !== 'object') { + return { + type: 'internal_error', + message: typeof error === 'string' ? error : 'An unexpected error occurred.', + code: 'UNKNOWN_ERROR', + recoverable: false, + reportable: true, + repo: REPORT_REPO, + suggestion: REPORT_SUGGESTION, + details: { errorType: typeof error }, + }; + } + + if (error.response) { + const status = error.response.status; + + switch (status) { + case 401: + return { + type: 'authentication_error', + message: 'Invalid or missing API key.', + code: 'INVALID_API_KEY', + recoverable: false, + suggestion: 'Set the SUPERMODEL_API_KEY environment variable and restart the MCP server.', + details: { apiKeySet: !!process.env.SUPERMODEL_API_KEY, httpStatus: 401 }, + }; + case 403: + return { + type: 'authorization_error', + message: 'API key does not have permission for this operation.', + code: 'FORBIDDEN', + recoverable: false, + suggestion: 'Verify your API key has the correct permissions. Contact support if unexpected.', + details: { httpStatus: 403 }, + }; + case 404: + return { + type: 'not_found_error', + message: 'API endpoint not found.', + code: 'ENDPOINT_NOT_FOUND', + recoverable: false, + suggestion: 'Check SUPERMODEL_BASE_URL environment variable. Default: https://api.supermodeltools.com', + details: { baseUrl: process.env.SUPERMODEL_BASE_URL || 'https://api.supermodeltools.com', httpStatus: 404 }, + }; + case 429: + return { + type: 'rate_limit_error', + message: 'API rate limit exceeded.', + code: 'RATE_LIMITED', + recoverable: true, + suggestion: 'Wait 30-60 seconds and retry. Consider analyzing smaller subdirectories to reduce API calls.', + details: { httpStatus: 429 }, + }; + case 500: + case 502: + case 503: + case 504: + return { + type: 'internal_error', + message: `Supermodel API server error (HTTP ${status}).`, + code: 'SERVER_ERROR', + recoverable: true, + reportable: true, + repo: REPORT_REPO, + suggestion: 'The API may be temporarily unavailable. Wait a few minutes and retry. If persistent, open an issue at https://github.com/supermodeltools/mcp/issues with the error details, or fork the repo and open a PR with a fix.', + details: { httpStatus: status }, + }; + default: { + const isServerError = status >= 500; + return { + type: isServerError ? 'internal_error' : 'validation_error', + message: `API request failed with HTTP ${status}.`, + code: 'API_ERROR', + recoverable: isServerError, + ...(isServerError && { + reportable: true, + repo: REPORT_REPO, + suggestion: 'The API may be temporarily unavailable. Wait a few minutes and retry. If persistent, open an issue at https://github.com/supermodeltools/mcp/issues with the error details, or fork the repo and open a PR with a fix.', + }), + ...(!isServerError && { suggestion: 'Check the request parameters and base URL configuration.' }), + details: { httpStatus: status }, + }; + } + } + } + + if (error.request) { + // Distinguish timeout from general network failure + if (error.code === 'UND_ERR_HEADERS_TIMEOUT' || error.code === 'UND_ERR_BODY_TIMEOUT' || error.message?.includes('timeout')) { + return { + type: 'timeout_error', + message: 'API request timed out. The codebase may be too large for a single analysis.', + code: 'REQUEST_TIMEOUT', + recoverable: true, + suggestion: 'Analyze a smaller subdirectory (e.g. directory="/repo/src/core") or increase SUPERMODEL_TIMEOUT_MS.', + }; + } + + return { + type: 'network_error', + message: 'No response from Supermodel API server.', + code: 'NO_RESPONSE', + recoverable: true, + suggestion: 'Check network connectivity. Verify the API is reachable at the configured base URL.', + details: { baseUrl: process.env.SUPERMODEL_BASE_URL || 'https://api.supermodeltools.com' }, + }; + } + + // Catch-all for unexpected errors - include the actual message + return { + type: 'internal_error', + message: error.message || 'An unexpected error occurred.', + code: 'UNKNOWN_ERROR', + recoverable: false, + reportable: true, + repo: REPORT_REPO, + suggestion: REPORT_SUGGESTION, + details: { errorType: error.name || 'Error' }, + }; +}