diff --git a/Claude.md b/Claude.md new file mode 100644 index 000000000..2cde93537 --- /dev/null +++ b/Claude.md @@ -0,0 +1 @@ +- In plan mode, always use the /ask skill so you can read the code and the original prompts that generated it. Intent will help you write a better plan \ No newline at end of file diff --git a/agent-support/vscode/src/ai-edit-manager.ts b/agent-support/vscode/src/ai-edit-manager.ts index f5f99f113..49f0f6e7f 100644 --- a/agent-support/vscode/src/ai-edit-manager.ts +++ b/agent-support/vscode/src/ai-edit-manager.ts @@ -3,6 +3,7 @@ import * as path from "path"; import * as fs from "fs"; import { exec, spawn } from "child_process"; import { isVersionSatisfied } from "./utils/semver"; +import { getGitAiBinary } from "./utils/binary-path"; import { MIN_GIT_AI_VERSION, GIT_AI_INSTALL_DOCS_URL } from "./consts"; import { getGitRepoRoot } from "./utils/git-api"; @@ -399,7 +400,7 @@ export class AIEditManager { console.log('[git-ai] AIEditManager: Workspace root:', workspaceRoot); console.log('[git-ai] AIEditManager: Hook input:', hookInput); - const proc = spawn("git-ai", args, { cwd: workspaceRoot }); + const proc = spawn(getGitAiBinary(), args, { cwd: workspaceRoot }); let stdout = ""; let stderr = ""; diff --git a/agent-support/vscode/src/blame-lens-manager.ts b/agent-support/vscode/src/blame-lens-manager.ts index dd114a659..ffe5058ba 100644 --- a/agent-support/vscode/src/blame-lens-manager.ts +++ b/agent-support/vscode/src/blame-lens-manager.ts @@ -1,6 +1,8 @@ import * as vscode from "vscode"; -import { BlameService, BlameResult, LineBlameInfo } from "./blame-service"; +import { BlameService, BlameResult, BlameMetadata, LineBlameInfo } from "./blame-service"; import { Config, BlameMode } from "./utils/config"; +import { findRepoForFile } from "./utils/git-api"; +import { resolveGitAiBinary } from "./utils/binary-path"; export class BlameLensManager { private context: vscode.ExtensionContext; @@ -29,6 +31,9 @@ export class BlameLensManager { // After-text decoration for showing "[View $MODEL Thread]" on AI lines private afterTextDecoration: vscode.TextEditorDecorationType | null = null; + + // Track in-flight CAS prompt fetches to avoid duplicate requests + private casFetchInProgress: Set = new Set(); // Minimum contrast ratio for WCAG AA compliance (3:1 for UI elements) private static readonly MIN_CONTRAST_RATIO = 3.0; @@ -206,7 +211,33 @@ export class BlameLensManager { }) ); + // Proactively trigger decorations for the already-open editor. + // VS Code does not reliably fire onDidChangeActiveTextEditor for an + // editor that is already active when the extension activates. + // We call requestBlameForFullFile / updateStatusBar directly instead + // of handleActiveEditorChange to avoid the border-clearing logic + // which would race with any VS Code activation events. + const initialEditor = vscode.window.activeTextEditor; + if (initialEditor) { + if (this.blameMode === 'all') { + this.requestBlameForFullFile(initialEditor); + } + this.updateStatusBar(initialEditor); + } + console.log('[git-ai] BlameLensManager activated'); + + // Resolve git-ai binary path early (uses login shell to get full user PATH) + resolveGitAiBinary().then((path) => { + if (path) { + const { execFile } = require('child_process'); + execFile(path, ['--version'], (err: Error | null, stdout: string) => { + if (!err) { + console.log('[git-ai] Version:', stdout.trim()); + } + }); + } + }); } /** @@ -222,22 +253,23 @@ export class BlameLensManager { if (this.currentDocumentUri === documentUri) { this.currentBlameResult = null; this.pendingBlameRequest = null; - + this.casFetchInProgress.clear(); + const activeEditor = vscode.window.activeTextEditor; if (activeEditor && activeEditor.document.uri.toString() === documentUri) { // Clear existing colored borders this.clearColoredBorders(activeEditor); - + // Re-fetch blame if mode is 'all' if (this.blameMode === 'all') { this.requestBlameForFullFile(activeEditor); } - + // Update status bar this.updateStatusBar(activeEditor); } } - + console.log('[git-ai] Document saved, invalidated blame cache for:', document.uri.fsPath); } @@ -321,26 +353,29 @@ export class BlameLensManager { * Handle active editor change - update status bar and decorations. */ private handleActiveEditorChange(editor: vscode.TextEditor | undefined): void { - // Clear colored borders from previous editor - const previousEditor = vscode.window.visibleTextEditors.find( - e => e.document.uri.toString() === this.currentDocumentUri - ); - if (previousEditor) { - this.clearColoredBorders(previousEditor); - } - - // If the new editor is a different document, reset our state - if (editor && editor.document.uri.toString() !== this.currentDocumentUri) { + const newDocumentUri = editor?.document.uri.toString() ?? null; + + // Only clear borders and reset state when switching to a different document. + // Re-firing for the same document (e.g. VS Code activation event) must not + // clear decorations that were just applied. + if (newDocumentUri !== this.currentDocumentUri) { + const previousEditor = vscode.window.visibleTextEditors.find( + e => e.document.uri.toString() === this.currentDocumentUri + ); + if (previousEditor) { + this.clearColoredBorders(previousEditor); + } + this.currentBlameResult = null; this.currentDocumentUri = null; this.pendingBlameRequest = null; } - + // If mode is 'all', automatically request blame for the new editor if (this.blameMode === 'all' && editor) { this.requestBlameForFullFile(editor); } - + // Update status bar for the new editor this.updateStatusBar(editor); } @@ -491,7 +526,10 @@ export class BlameLensManager { if (result) { this.currentBlameResult = result; - + + // Trigger async CAS fetches for prompts with messages_url but no messages + this.triggerCASFetches(result, document.uri); + // Check if editor is still active and mode is still 'all' const currentEditor = vscode.window.activeTextEditor; if (this.blameMode === 'all' && currentEditor && currentEditor.document.uri.toString() === documentUri) { @@ -510,17 +548,14 @@ export class BlameLensManager { * Used when Toggle AI Code is enabled. */ private applyFullFileDecorations(editor: vscode.TextEditor, blameResult: BlameResult): void { - // Clear existing decorations first - this.clearColoredBorders(editor); - // Collect all AI-authored lines grouped by color const colorToRanges = new Map(); - + for (const [gitLine, lineInfo] of blameResult.lineAuthors) { if (lineInfo?.isAiAuthored) { const colorIndex = this.getColorIndexForPromptId(lineInfo.commitHash); const line = gitLine - 1; // Convert to 0-indexed - + if (!colorToRanges.has(colorIndex)) { colorToRanges.set(colorIndex, []); } @@ -528,10 +563,11 @@ export class BlameLensManager { } } - // Apply decorations grouped by color - colorToRanges.forEach((ranges, colorIndex) => { - const decoration = this.colorDecorations[colorIndex]; - editor.setDecorations(decoration, ranges); + // Set all decoration types in a single pass: ranges for used colors, + // empty for unused. Avoids clear-then-set on the same type which + // VS Code can optimize away when only one decoration type changes. + this.colorDecorations.forEach((decoration, index) => { + editor.setDecorations(decoration, colorToRanges.get(index) || []); }); } @@ -571,6 +607,10 @@ export class BlameLensManager { this.pendingBlameRequest = null; if (result) { this.currentBlameResult = result; + + // Trigger async CAS fetches for prompts with messages_url but no messages + this.triggerCASFetches(result, document.uri); + // Re-update status bar now that we have blame const activeEditor = vscode.window.activeTextEditor; if (activeEditor && activeEditor.document.uri.toString() === documentUri) { @@ -649,9 +689,6 @@ export class BlameLensManager { * Used when cursor is on an AI-authored line to highlight all lines from that prompt. */ private applyDecorationsForPrompt(editor: vscode.TextEditor, commitHash: string, blameResult: BlameResult): void { - // Clear existing decorations first - this.clearColoredBorders(editor); - // Get the color for this prompt const colorIndex = this.getColorIndexForPromptId(commitHash); const ranges: vscode.Range[] = []; @@ -664,9 +701,11 @@ export class BlameLensManager { } } - // Apply the decoration - const decoration = this.colorDecorations[colorIndex]; - editor.setDecorations(decoration, ranges); + // Set all decoration types in a single pass: ranges for this prompt's + // color, empty for all others. Avoids clear-then-set on the same type. + this.colorDecorations.forEach((decoration, index) => { + editor.setDecorations(decoration, index === colorIndex ? ranges : []); + }); } /** @@ -947,7 +986,7 @@ export class BlameLensManager { }); // Build hover content (reuse existing method) - const hoverContent = this.buildHoverContent(lineInfo, documentUri); + const hoverContent = this.buildHoverContent(lineInfo, documentUri, this.currentBlameResult ?? undefined); // Apply decoration to current line with hover const currentLine = editor.selection.active.line; @@ -1112,7 +1151,19 @@ export class BlameLensManager { * Shows a polished chat-style conversation view with clear visual hierarchy. * Each message is shown individually with its own header and timestamp. */ - private buildHoverContent(lineInfo: LineBlameInfo | undefined, documentUri?: vscode.Uri): vscode.MarkdownString { + /** + * Extract email from a "Name " format string. + * Returns the email if found, or null. + */ + private extractEmail(authorString: string | null | undefined): string | null { + if (!authorString) { + return null; + } + const match = authorString.match(/<([^>]+)>/); + return match ? match[1] : null; + } + + private buildHoverContent(lineInfo: LineBlameInfo | undefined, documentUri?: vscode.Uri, blameResult?: BlameResult): vscode.MarkdownString { const md = new vscode.MarkdownString(); md.isTrusted = true; md.supportHtml = true; @@ -1154,11 +1205,36 @@ export class BlameLensManager { } md.appendMarkdown(`---\n\n`); - // Fallback if no messages saved + // Fallback if no messages saved - show contextual message if (!hasMessages) { - md.appendMarkdown('🔒 *Transcript not saved*\n\n'); - md.appendMarkdown('Enable prompt saving:\n'); - md.appendCodeblock('git-ai config set --add share_prompts_in_repositories "*"', 'bash'); + // Common prefix: always mention /ask skill + md.appendMarkdown('💡 *Ask this agent about this code with `/ask`*\n\n'); + + const metadata = blameResult?.metadata; + const hasMessagesUrl = !!record?.messages_url; + + if (hasMessagesUrl) { + // Has messages_url but messages not loaded yet - CAS fetch in progress + md.appendMarkdown('*Loading prompt from cloud...*\n'); + } else if (metadata?.is_logged_in) { + // Logged in but no prompt/messages_url - prompt wasn't saved + md.appendMarkdown('*Prompt was not saved.* Prompt Storage is enabled. Future prompts will be saved.\n'); + } else if (!metadata?.is_logged_in && metadata !== undefined) { + // Not logged in - check if this is a teammate's code + const currentEmail = this.extractEmail(metadata.current_user); + const authorEmail = this.extractEmail(record?.human_author); + const isDifferentUser = currentEmail && authorEmail && currentEmail !== authorEmail; + + if (isDifferentUser) { + md.appendMarkdown('🔒 *Login to see prompt summaries from your teammates*\n\n'); + md.appendCodeblock('git-ai login', 'bash'); + } else { + md.appendMarkdown('*No prompt saved.*'); + } + } else { + // No metadata available (backward compat) - show generic message + md.appendMarkdown('🔒 *Transcript not saved*\n\n'); + } return md; } @@ -1436,6 +1512,68 @@ export class BlameLensManager { } } + /** + * Get the workspace cwd for running git-ai commands against a document. + */ + private getWorkspaceCwd(documentUri: vscode.Uri): string | undefined { + const repo = findRepoForFile(documentUri); + if (repo?.rootUri) { + return repo.rootUri.fsPath; + } + const workspaceFolder = vscode.workspace.getWorkspaceFolder(documentUri); + return workspaceFolder?.uri.fsPath; + } + + /** + * Trigger async CAS fetches for prompts that have messages_url but no messages. + * Updates blame result in-place and re-renders when fetches complete. + */ + private triggerCASFetches(blameResult: BlameResult, documentUri: vscode.Uri): void { + const cwd = this.getWorkspaceCwd(documentUri); + if (!cwd) { + return; + } + + // Find prompts with messages_url but empty messages + const promptsToFetch: Array<{ promptId: string; record: import("./blame-service").PromptRecord }> = []; + for (const [promptId, record] of blameResult.prompts) { + const hasMessages = record.messages && record.messages.length > 0 && record.messages.some(m => m.text); + if (!hasMessages && record.messages_url && !this.casFetchInProgress.has(promptId)) { + promptsToFetch.push({ promptId, record }); + } + } + + // Cap concurrent fetches at 3 + + for (const { promptId, record } of toFetch) { + this.casFetchInProgress.add(promptId); + + this.blameService.fetchPromptFromCAS(promptId, cwd).then((messages) => { + this.casFetchInProgress.delete(promptId); + + if (messages && this.currentBlameResult === blameResult) { + // Update record in-place + record.messages = messages; + + // Also update all LineBlameInfo that reference this prompt + for (const [, lineInfo] of blameResult.lineAuthors) { + if (lineInfo.commitHash === promptId && lineInfo.promptRecord) { + lineInfo.promptRecord.messages = messages; + } + } + + // Re-render if still the active document + const activeEditor = vscode.window.activeTextEditor; + if (activeEditor && activeEditor.document.uri.toString() === this.currentDocumentUri) { + this.updateStatusBar(activeEditor); + } + } + }).catch(() => { + this.casFetchInProgress.delete(promptId); + }); + } + } + public dispose(): void { // Clear any pending document change timer if (this.documentChangeTimer) { @@ -1449,6 +1587,7 @@ export class BlameLensManager { this.notificationTimeout = null; } + this.casFetchInProgress.clear(); this.blameService.dispose(); this.statusBarItem.dispose(); this._onDidChangeVirtualDocument.dispose(); diff --git a/agent-support/vscode/src/blame-service.ts b/agent-support/vscode/src/blame-service.ts index 425e496d9..c31800a57 100644 --- a/agent-support/vscode/src/blame-service.ts +++ b/agent-support/vscode/src/blame-service.ts @@ -2,11 +2,18 @@ import * as vscode from "vscode"; import { spawn } from "child_process"; import { BlameQueue } from "./blame-queue"; import { findRepoForFile, getGitRepoRoot } from "./utils/git-api"; +import { getGitAiBinary, resolveGitAiBinary } from "./utils/binary-path"; + +export interface BlameMetadata { + is_logged_in: boolean; + current_user: string | null; +} // JSON output structure from git-ai blame --json export interface BlameJsonOutput { lines: Record; // lineRange -> promptHash (e.g., "11-114" -> "abc1234") prompts: Record; + metadata?: BlameMetadata; } export interface PromptRecord { @@ -27,6 +34,7 @@ export interface PromptRecord { overriden_lines?: number; other_files?: string[]; commits?: string[]; + messages_url?: string; } export interface LineBlameInfo { @@ -39,6 +47,7 @@ export interface LineBlameInfo { export interface BlameResult { lineAuthors: Map; prompts: Map; + metadata?: BlameMetadata; timestamp: number; totalLines: number; } @@ -219,16 +228,21 @@ export class BlameService { const workspaceFolder = vscode.workspace.getWorkspaceFolder(document.uri); const cwd = gitRepoRoot || workspaceFolder?.uri.fsPath; + // Ensure binary path is resolved before spawning + await resolveGitAiBinary(); + return new Promise((resolve, reject) => { if (signal.aborted) { reject(new Error('Aborted')); return; } - + // Use --contents - to read file contents from stdin // This allows git-ai to properly shift AI attributions for dirty files const args = ['blame', '--json', '--contents', '-', filePath]; - const proc = spawn('git-ai', args, { + const binary = getGitAiBinary(); + console.log('[git-ai] Spawning blame:', { binary, args, cwd }); + const proc = spawn(binary, args, { cwd, timeout: BlameService.TIMEOUT_MS, }); @@ -293,6 +307,8 @@ export class BlameService { this.gitAiAvailable = true; try { + console.log('[git-ai] Raw blame stdout (first 500 chars):', stdout.substring(0, 500)); + console.log('[git-ai] Raw blame stderr:', stderr); const jsonOutput = JSON.parse(stdout) as BlameJsonOutput; const result = this.parseBlameOutput(jsonOutput, document.lineCount); resolve(result); @@ -313,6 +329,8 @@ export class BlameService { // Copy prompts to our map for (const [hash, record] of Object.entries(output.prompts || {})) { + const msgs = record.messages || []; + console.log(`[git-ai] parseBlameOutput prompt ${hash}: messages=${msgs.length}, hasText=${msgs.some(m => m.text)}, messages_url=${record.messages_url || 'none'}`); prompts.set(hash, record); } @@ -334,6 +352,7 @@ export class BlameService { return { lineAuthors, prompts, + metadata: output.metadata, timestamp: Date.now(), totalLines, }; @@ -368,6 +387,55 @@ export class BlameService { return result; } + /** + * Fetch prompt messages from CAS via `git-ai show-prompt`. + * Returns the messages array on success, or null on failure/timeout. + */ + public async fetchPromptFromCAS( + promptId: string, + cwd: string + ): Promise | null> { + await resolveGitAiBinary(); + return new Promise((resolve) => { + const args = ['show-prompt', promptId]; + const proc = spawn(getGitAiBinary(), args, { + cwd, + timeout: 15000, + }); + + let stdout = ''; + + proc.stdout.on('data', (data) => { + stdout += data.toString(); + }); + + proc.stderr.on('data', () => {}); // Ignore stderr + + proc.on('error', () => { + resolve(null); + }); + + proc.on('close', (code) => { + if (code !== 0) { + resolve(null); + return; + } + + try { + const parsed = JSON.parse(stdout); + const messages = parsed?.prompt?.messages; + if (Array.isArray(messages) && messages.length > 0) { + resolve(messages); + } else { + resolve(null); + } + } catch { + resolve(null); + } + }); + }); + } + private showInstallMessage(): void { if (this.hasShownInstallMessage) { return; diff --git a/agent-support/vscode/src/extension.ts b/agent-support/vscode/src/extension.ts index 7b1687917..74419c1c2 100644 --- a/agent-support/vscode/src/extension.ts +++ b/agent-support/vscode/src/extension.ts @@ -8,6 +8,7 @@ import { detectIDEHost, IDEHostKindVSCode } from "./utils/host-kind"; import { AITabEditManager } from "./ai-tab-edit-manager"; import { Config } from "./utils/config"; import { BlameLensManager, registerBlameLensCommands } from "./blame-lens-manager"; +import { initBinaryResolver } from "./utils/binary-path"; function getDistinctId(): string { try { @@ -20,6 +21,9 @@ function getDistinctId(): string { export function activate(context: vscode.ExtensionContext) { + // In dev mode, resolve git-ai binary via login shell (debug host has stripped PATH) + initBinaryResolver(context.extensionMode); + const ideHostCfg = detectIDEHost(); // Initialize PostHog and emit startup event diff --git a/agent-support/vscode/src/utils/binary-path.ts b/agent-support/vscode/src/utils/binary-path.ts new file mode 100644 index 000000000..e3e04c1f1 --- /dev/null +++ b/agent-support/vscode/src/utils/binary-path.ts @@ -0,0 +1,77 @@ +import { execFile } from "child_process"; +import * as os from "os"; +import * as vscode from "vscode"; + +let resolvedPath: string | null = null; +let resolvePromise: Promise | null = null; +let extensionMode: vscode.ExtensionMode | null = null; + +/** + * Call once at activation to pass in the extension context's mode. + */ +export function initBinaryResolver(mode: vscode.ExtensionMode): void { + extensionMode = mode; +} + +/** + * Resolve the full path to the `git-ai` binary using a login shell. + * Only runs in development mode — in production the plain "git-ai" name + * is used directly (relies on the process PATH). + * + * The result is cached after the first successful resolution. + */ +export function resolveGitAiBinary(): Promise { + // Skip shell resolution in production — just use "git-ai" + if (extensionMode !== vscode.ExtensionMode.Development) { + return Promise.resolve(null); + } + + if (resolvedPath) { + return Promise.resolve(resolvedPath); + } + if (resolvePromise) { + return resolvePromise; + } + + resolvePromise = new Promise((resolve) => { + const platform = os.platform(); + + if (platform === "win32") { + // Windows: use `where git-ai` + execFile("where", ["git-ai"], (err, stdout) => { + if (err || !stdout.trim()) { + console.log("[git-ai] Could not resolve git-ai binary via 'where'"); + resolve(null); + } else { + // `where` can return multiple lines; take the first + resolvedPath = stdout.trim().split(/\r?\n/)[0]; + console.log("[git-ai] Resolved binary path:", resolvedPath); + resolve(resolvedPath); + } + }); + } else { + // macOS/Linux: spawn a login shell so the user's profile is sourced + const shell = process.env.SHELL || "/bin/bash"; + execFile(shell, ["-ilc", "which git-ai"], { timeout: 5000 }, (err, stdout) => { + if (err || !stdout.trim()) { + console.log("[git-ai] Could not resolve git-ai binary via login shell"); + resolve(null); + } else { + resolvedPath = stdout.trim(); + console.log("[git-ai] Resolved binary path:", resolvedPath); + resolve(resolvedPath); + } + }); + } + }); + + return resolvePromise; +} + +/** + * Get the resolved git-ai binary path, or fall back to just "git-ai" + * (which relies on the current process PATH). + */ +export function getGitAiBinary(): string { + return resolvedPath || "git-ai"; +} diff --git a/skills/ask/SKILL.md b/skills/ask/SKILL.md new file mode 100644 index 000000000..7b8a07d2f --- /dev/null +++ b/skills/ask/SKILL.md @@ -0,0 +1,140 @@ +--- +name: ask +description: "Use this during plan mode, when you are exploring the codebase. It lets you ask the AI who wrote the code questions about how things work and why they build it this way" +argument-hint: "[a question to the AI who authored the code you're looking at]" +allowed-tools: ["Bash(git-ai:*)", "Read", "Glob", "Grep", "Task"] +--- + +# Ask Skill + +Answer questions about AI-written code by finding the original prompts and conversations that produced it, then **embodying the author agent's perspective** to answer. + +## Main Agent's Job (you) + +You do the prep work, then hand off to a **fast, tightly scoped subagent**: + +1. **Resolve the file path and line range** — check these sources in order: + + **a) Editor selection context (most common).** When the user has lines selected in their editor, a `` is injected into the conversation like: + ``` + The user selected the lines 2 to 4 from /path/to/file.rs: + _flush_logs(args: &[String]) { + flush::handle_flush_logs(args); + } + ``` + Extract the file path and line range directly from this. This is the primary way users will invoke `/ask` — they select code, then type something like "/ask why is this like that" without naming the file or lines. + + **b) Explicit file/line references** — "on line 42", "lines 10-50 of src/main.rs" → use directly. + + **c) Named symbol** — mentions a variable/function/class → Read the file, find where it's defined, extract line numbers. + + **d) File without line specifics** → whole file (omit `--lines`). + + **e) No file, no lines, no selection context, no identifiable code reference** → Do NOT attempt to guess or search. Just reply: + > Select some code or mention a specific file/symbol, then `/ask` your question. + + Stop here. Do not spawn a subagent. + +2. **Spawn one subagent** with the template below. Use `max_turns: 4`. + +3. **Relay the answer** to the user. That's it. + +## Subagent Configuration + +``` +Task tool settings: + subagent_type: "general-purpose" + max_turns: 4 +``` + +The subagent gets **only** `Bash` and `Read`. It does NOT get Glob, Grep, or Task. It runs at most 4 turns — this is a fast lookup, not a research project. + +## Choosing Between `blame --show-prompt` and `search` + +**If you want to read an entire file or range of lines AND the corresponding prompts behind them, use `git-ai blame --show-prompt`.** This is better than `search` for this use case — it gives you every line's authorship plus the full prompt JSON in one call. + +``` +# Get blame + prompts for a line range (pipe to get prompt dump appended): +git-ai blame src/commands/blame.rs -L 23,54 --show-prompt | cat + +# Interactive (TTY) mode shows prompt hashes inline: +# 7a4471d (cursor [abc123e] 2026-02-06 14:20:05 -0800 23) code_here + +# Piped mode appends raw prompt messages after a --- separator: +# --- +# Prompt [abc123e] +# [{"type":"user","text":"Write a function..."},{"type":"assistant","text":"Here is..."}] +``` + +Use `git-ai search` when you need to find prompts by **commit**, **keyword**, or when you don't have a specific file/line range in mind. + +## Subagent Prompt Template + +Fill in `{question}`, `{file_path}`, and `{start}-{end}` (omit LINES if not applicable): + +``` +You are answering a question about code by finding the original AI conversation +that produced it. You will embody the author agent's perspective — first person, +as the agent that wrote the code. + +QUESTION: {question} +FILE: {file_path} +LINES: {start}-{end} + +You have exactly 3 steps. Do them in order, then stop. + +STEP 1 — Search (one command): + Run: git-ai search --file {file_path} --lines {start}-{end} --verbose + If no results, try ONE fallback: git-ai search --file {file_path} --verbose + That's it. Do not run more than 2 git-ai commands total. + +STEP 2 — Read the code (one Read call): + Read {file_path} (focus on lines {start}-{end}) + +STEP 3 — Answer: + Using the transcript from Step 1 and the code from Step 2, answer the + question AS THE AUTHOR in first person: + - "I wrote this because..." + - "The problem I was solving was..." + - "I chose X over Y because..." + + Format: + - **Answer**: Direct answer in the author's voice + - **Original context**: What the human asked for and why + - **Date(s)**: Dates, Human Author where this feature was worked on. + + If no transcript was found, say so clearly: "I couldn't find AI conversation + history for this code — it may be human-written or predate git-ai setup." + In that case, analyze the code objectively (not first person). + +HARD CONSTRAINTS: +- Do NOT use Glob, Grep, or Task tools. You only have Bash and Read. +- Do NOT run more than 2 git-ai commands. +- Do NOT read .claude/, .cursor/, .agents/, or any agent log directories. +- Do NOT search JSONL transcripts or session logs directly. +- All conversation data comes from `git-ai search` only. +``` + +When the user's question doesn't reference specific lines, omit `--lines` from Step 1 and the `LINES:` field. + +## Fallback Behavior + +When no prompt data is found: +- The code might be human-written or predate git-ai +- Answer from the code alone, clearly stating no AI history was found +- Do NOT use first-person author voice in fallback — analyze objectively + +## Example Invocations + +**User selects lines 10-25 in editor, types: `/ask why is this like that`** +Selection context is in system-reminder → extract file + lines 10-25, spawn subagent. This is the most common usage pattern. + +**`/ask why does this function use recursion instead of iteration?`** +Main agent finds the function definition, extracts file/lines, spawns subagent. + +**`/ask what problem was being solved on lines 100-150 of src/main.rs?`** +File and lines explicit — spawn subagent directly. + +**`/ask why was this approach chosen over using a HashMap?`** +Main agent identifies relevant code from context, spawns subagent. + diff --git a/src/api/cas.rs b/src/api/cas.rs index 39279bae5..b94a679b4 100644 --- a/src/api/cas.rs +++ b/src/api/cas.rs @@ -1,5 +1,7 @@ use crate::api::client::ApiClient; -use crate::api::types::{ApiErrorResponse, CasUploadRequest, CasUploadResponse}; +use crate::api::types::{ + ApiErrorResponse, CAPromptStoreReadResponse, CasUploadRequest, CasUploadResponse, +}; use crate::error::GitAiError; /// CAS API endpoints @@ -54,4 +56,46 @@ impl ApiClient { ))), } } + + /// Read CAS objects by hash from the server + /// + /// # Arguments + /// * `hashes` - Slice of CAS hashes to fetch (max 100 per call) + /// + /// # Returns + /// * `Ok(CAPromptStoreReadResponse)` - Response with results for each hash + /// * `Err(GitAiError)` - On network or server errors + pub fn read_ca_prompt_store( + &self, + hashes: &[&str], + ) -> Result { + let query = hashes.join(","); + let endpoint = format!("/worker/cas/?hashes={}", query); + let response = self.context().get(&endpoint)?; + let status_code = response.status_code; + + let body = response + .as_str() + .map_err(|e| GitAiError::Generic(format!("Failed to read response body: {}", e)))?; + + match status_code { + 200 => { + let cas_response: CAPromptStoreReadResponse = + serde_json::from_str(body).map_err(GitAiError::JsonError)?; + Ok(cas_response) + } + 404 => { + // All hashes not found — return empty response gracefully + Ok(CAPromptStoreReadResponse { + results: Vec::new(), + success_count: 0, + failure_count: hashes.len(), + }) + } + _ => Err(GitAiError::Generic(format!( + "CAS read failed with status {}: {}", + status_code, body + ))), + } + } } diff --git a/src/api/types.rs b/src/api/types.rs index ecd63268a..6e7892edb 100644 --- a/src/api/types.rs +++ b/src/api/types.rs @@ -117,3 +117,22 @@ pub struct CasUploadResponse { pub struct CasMessagesObject { pub messages: Vec, } + +/// Single result from CA prompt store batch read +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub struct CAPromptStoreReadResult { + pub hash: String, + pub status: String, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub content: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub error: Option, +} + +/// Response from CA prompt store batch read +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub struct CAPromptStoreReadResponse { + pub results: Vec, + pub success_count: usize, + pub failure_count: usize, +} diff --git a/src/authorship/internal_db.rs b/src/authorship/internal_db.rs index 93497ce24..07527485f 100644 --- a/src/authorship/internal_db.rs +++ b/src/authorship/internal_db.rs @@ -10,7 +10,7 @@ use std::path::PathBuf; use std::sync::{Mutex, OnceLock}; /// Current schema version (must match MIGRATIONS.len()) -const SCHEMA_VERSION: usize = 2; +const SCHEMA_VERSION: usize = 3; /// Database migrations - each migration upgrades the schema by one version /// Migration at index N upgrades from version N to version N+1 @@ -69,6 +69,14 @@ const MIGRATIONS: &[&str] = &[ CREATE INDEX idx_cas_sync_queue_stale_processing ON cas_sync_queue(processing_started_at) WHERE status = 'processing'; "#, + // Migration 2 -> 3: Add CAS cache for fetched prompts + r#" + CREATE TABLE cas_cache ( + hash TEXT PRIMARY KEY NOT NULL, + messages TEXT NOT NULL, + cached_at INTEGER NOT NULL + ); + "#, ]; /// Global database singleton @@ -378,11 +386,10 @@ impl InternalDatabase { return Ok(()); } if current_version > SCHEMA_VERSION { - return Err(GitAiError::Generic(format!( - "Database schema version {} is newer than supported version {}. \ - Please upgrade git-ai to the latest version.", - current_version, SCHEMA_VERSION - ))); + // Forward-compatible: an older binary can still read/write + // known tables even if a newer binary added extra tables. + // Just skip migrations and use what we have. + return Ok(()); } // Fall through to apply missing migrations (current_version < SCHEMA_VERSION) } @@ -979,6 +986,36 @@ impl InternalDatabase { Ok(()) } + /// Get cached CAS messages by hash + pub fn get_cas_cache(&self, hash: &str) -> Result, GitAiError> { + let result = self.conn.query_row( + "SELECT messages FROM cas_cache WHERE hash = ?1", + params![hash], + |row| row.get::<_, String>(0), + ); + + match result { + Ok(messages) => Ok(Some(messages)), + Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None), + Err(e) => Err(e.into()), + } + } + + /// Cache CAS messages by hash (INSERT OR REPLACE since content is immutable) + pub fn set_cas_cache(&mut self, hash: &str, messages_json: &str) -> Result<(), GitAiError> { + let now = std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .as_secs() as i64; + + self.conn.execute( + "INSERT OR REPLACE INTO cas_cache (hash, messages, cached_at) VALUES (?1, ?2, ?3)", + params![hash, messages_json, now], + )?; + + Ok(()) + } + /// Update CAS sync record on failure (release lock, increment attempts, set next retry) pub fn update_cas_sync_failure(&mut self, id: i64, error: &str) -> Result<(), GitAiError> { let now = std::time::SystemTime::now() @@ -1098,7 +1135,7 @@ mod tests { |row| row.get(0), ) .unwrap(); - assert_eq!(version, "2"); + assert_eq!(version, "3"); } #[test] @@ -1642,6 +1679,41 @@ mod tests { assert_eq!(count, 0); } + // CAS cache tests + + #[test] + fn test_cas_cache_get_miss() { + let (db, _temp_dir) = create_test_db(); + let result = db.get_cas_cache("nonexistent_hash").unwrap(); + assert!(result.is_none()); + } + + #[test] + fn test_cas_cache_set_and_get() { + let (mut db, _temp_dir) = create_test_db(); + let hash = "abc123def456"; + let messages = r#"[{"type":"user","text":"hello"}]"#; + + db.set_cas_cache(hash, messages).unwrap(); + + let result = db.get_cas_cache(hash).unwrap(); + assert_eq!(result, Some(messages.to_string())); + } + + #[test] + fn test_cas_cache_overwrite() { + let (mut db, _temp_dir) = create_test_db(); + let hash = "abc123def456"; + let messages1 = r#"[{"type":"user","text":"v1"}]"#; + let messages2 = r#"[{"type":"user","text":"v2"}]"#; + + db.set_cas_cache(hash, messages1).unwrap(); + db.set_cas_cache(hash, messages2).unwrap(); + + let result = db.get_cas_cache(hash).unwrap(); + assert_eq!(result, Some(messages2.to_string())); + } + #[test] fn test_exponential_backoff() { let now = 1000000i64; diff --git a/src/authorship/prompt_utils.rs b/src/authorship/prompt_utils.rs index 191f5ea86..95f80d871 100644 --- a/src/authorship/prompt_utils.rs +++ b/src/authorship/prompt_utils.rs @@ -11,7 +11,7 @@ use crate::git::refs::{get_authorship, grep_ai_notes}; use crate::git::repository::Repository; use crate::observability::log_error; use crate::utils::debug_log; -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; /// Find a prompt in the repository history /// @@ -537,6 +537,38 @@ fn update_opencode_prompt( } } +/// Enrich prompts that have empty messages by falling back to the InternalDatabase (SQLite). +/// +/// For each prompt in `prompts` whose ID is in `referenced_ids` and whose `messages` field +/// is empty, attempts to load the messages from the database. +pub fn enrich_prompt_messages( + prompts: &mut HashMap, + referenced_ids: &HashSet<&String>, +) { + + let ids_needing_messages: Vec = prompts + .iter() + .filter(|(k, prompt)| referenced_ids.contains(k) && prompt.messages.is_empty()) + .map(|(id, _)| id.clone()) + .collect(); + + if !ids_needing_messages.is_empty() { + if let Ok(db) = InternalDatabase::global() { + if let Ok(db_guard) = db.lock() { + for id in &ids_needing_messages { + if let Ok(Some(db_record)) = db_guard.get_prompt(id) { + if !db_record.messages.messages.is_empty() { + if let Some(prompt) = prompts.get_mut(id) { + prompt.messages = db_record.messages.messages; + } + } + } + } + } + } + } +} + /// Format a PromptRecord's messages into a human-readable transcript. /// /// Filters out ToolUse messages; keeps User, Assistant, Thinking, and Plan. diff --git a/src/commands/blame.rs b/src/commands/blame.rs index 52cf04392..0287361a1 100644 --- a/src/commands/blame.rs +++ b/src/commands/blame.rs @@ -1,5 +1,7 @@ +use crate::auth::CredentialStore; use crate::authorship::authorship_log::PromptRecord; use crate::authorship::authorship_log_serialization::AuthorshipLog; +use crate::authorship::prompt_utils::enrich_prompt_messages; use crate::authorship::working_log::CheckpointKind; use crate::error::GitAiError; use crate::git::refs::get_reference_as_authorship_log_v3; @@ -137,6 +139,9 @@ pub struct GitAiBlameOptions { // Mark lines from commits without authorship logs as "Unknown" pub mark_unknown: bool, + // Show prompt hashes inline and dump prompts when piped + pub show_prompt: bool, + // Split hunks when lines have different AI human authors // When true, a single git blame hunk may be split into multiple hunks // if different lines were authored by different humans working with AI @@ -184,6 +189,7 @@ impl Default for GitAiBlameOptions { ignore_whitespace: false, json: false, mark_unknown: false, + show_prompt: false, split_hunks_by_ai_author: true, } } @@ -265,6 +271,10 @@ impl Repository { } opts.use_prompt_hashes_as_names = true; opts + } else if options.show_prompt { + let mut opts = options.clone(); + opts.use_prompt_hashes_as_names = true; + opts } else { options.clone() }; @@ -358,6 +368,7 @@ impl Repository { // Output based on format if options.json { output_json_format( + self, &line_authors, &prompt_records, &authorship_logs, @@ -386,6 +397,7 @@ impl Repository { output_default_format( self, &line_authors, + &prompt_records, &relative_file_path, &lines, &line_ranges, @@ -921,11 +933,19 @@ fn overlay_ai_authorship( )) } +/// Metadata about user's auth state and git identity +#[derive(Debug, Serialize)] +struct BlameMetadata { + is_logged_in: bool, + current_user: Option, +} + /// JSON output structure for blame #[derive(Debug, Serialize)] struct JsonBlameOutput { lines: std::collections::BTreeMap, prompts: HashMap, + metadata: BlameMetadata, } /// Read model that patches PromptRecord with other_files and commits fields @@ -970,6 +990,7 @@ fn get_files_for_prompt_hash( } fn output_json_format( + repo: &Repository, line_authors: &HashMap, prompt_records: &HashMap, authorship_logs: &[AuthorshipLog], @@ -1026,8 +1047,12 @@ fn output_json_format( // Only include prompts that are actually referenced in lines let referenced_prompt_ids: std::collections::HashSet<&String> = lines_map.values().collect(); + // Enrich prompts that have empty messages by falling back through storage layers + let mut enriched_prompts = prompt_records.clone(); + enrich_prompt_messages(&mut enriched_prompts, &referenced_prompt_ids); + // Create read models with other_files and commits populated - let filtered_prompts: HashMap = prompt_records + let filtered_prompts: HashMap = enriched_prompts .iter() .filter(|(k, _)| referenced_prompt_ids.contains(k)) .map(|(k, v)| { @@ -1044,9 +1069,32 @@ fn output_json_format( }) .collect(); + // Compute metadata + let is_logged_in = CredentialStore::new() + .load() + .ok() + .flatten() + .map(|creds| !creds.is_refresh_token_expired()) + .unwrap_or(false); + + let current_user = { + let name = repo.config_get_str("user.name").ok().flatten(); + let email = repo.config_get_str("user.email").ok().flatten(); + match (name, email) { + (Some(n), Some(e)) => Some(format!("{} <{}>", n, e)), + (Some(n), None) => Some(n), + (None, Some(e)) => Some(format!("<{}>", e)), + (None, None) => None, + } + }; + let output = JsonBlameOutput { lines: lines_map, prompts: filtered_prompts, + metadata: BlameMetadata { + is_logged_in, + current_user, + }, }; let json_str = serde_json::to_string_pretty(&output) @@ -1263,6 +1311,7 @@ fn output_incremental_format( fn output_default_format( repo: &Repository, line_authors: &HashMap, + prompt_records: &HashMap, file_path: &str, lines: &[&str], line_ranges: &[(u32, u32)], @@ -1299,6 +1348,10 @@ fn output_default_format( .unwrap_or(&hunk.original_author); let author_display = if options.suppress_author { "".to_string() + } else if options.show_prompt && prompt_records.contains_key(author) { + let prompt = &prompt_records[author]; + let short_hash = &author[..7.min(author.len())]; + format!("{} [{}]", prompt.agent_id.tool, short_hash) } else if options.show_email { format!("{} <{}>", author, &hunk.author_email) } else { @@ -1353,6 +1406,10 @@ fn output_default_format( // Handle different output formats based on flags let author_display = if options.suppress_author { "".to_string() + } else if options.show_prompt && prompt_records.contains_key(author) { + let prompt = &prompt_records[author]; + let short_hash = &author[..7.min(author.len())]; + format!("{} [{}]", prompt.agent_id.tool, short_hash) } else if options.show_email { format!("{} <{}>", author, &hunk.author_email) } else { @@ -1441,6 +1498,39 @@ fn output_default_format( output.push_str(stats); } + // Append prompt dump for --show-prompt in non-interactive (piped) mode + if options.show_prompt && !io::stdout().is_terminal() { + let mut referenced_ids: std::collections::HashSet<&String> = + std::collections::HashSet::new(); + for author in line_authors.values() { + if prompt_records.contains_key(author) { + referenced_ids.insert(author); + } + } + + if !referenced_ids.is_empty() { + let mut enriched_prompts = prompt_records.clone(); + enrich_prompt_messages(&mut enriched_prompts, &referenced_ids); + + output.push_str("---\n"); + + let mut sorted_ids: Vec<&String> = referenced_ids.into_iter().collect(); + sorted_ids.sort(); + + for id in sorted_ids { + let short_hash = &id[..7.min(id.len())]; + output.push_str(&format!("Prompt [{}]\n", short_hash)); + if let Some(prompt) = enriched_prompts.get(id) { + let json = serde_json::to_string(&prompt.messages) + .unwrap_or_else(|_| "[]".to_string()); + output.push_str(&json); + output.push('\n'); + } + output.push('\n'); + } + } + } + // Output handling - respect pager environment variables let pager = std::env::var("GIT_PAGER") .or_else(|_| std::env::var("PAGER")) @@ -1768,6 +1858,12 @@ pub fn parse_blame_args(args: &[String]) -> Result<(String, GitAiBlameOptions), i += 1; } + // Show prompt hashes inline + "--show-prompt" => { + options.show_prompt = true; + i += 1; + } + // File path (non-option argument) arg if !arg.starts_with('-') => { if file_path.is_none() { diff --git a/src/commands/prompts_db.rs b/src/commands/prompts_db.rs index e7eec9128..29df7dc15 100644 --- a/src/commands/prompts_db.rs +++ b/src/commands/prompts_db.rs @@ -279,6 +279,7 @@ fn handle_exec(args: &[String]) { let rows = stmt.query_map([], |row| { let values: Vec = (0..column_names.len()) .map(|i| { + row.get::<_, rusqlite::types::Value>(i) .map(|v| format_value(&v)) .unwrap_or_else(|_| "NULL".to_string()) diff --git a/src/commands/show_prompt.rs b/src/commands/show_prompt.rs index 4689ac187..8dce04bfc 100644 --- a/src/commands/show_prompt.rs +++ b/src/commands/show_prompt.rs @@ -1,5 +1,9 @@ +use crate::api::client::{ApiClient, ApiContext}; +use crate::api::types::CasMessagesObject; +use crate::authorship::internal_db::InternalDatabase; use crate::authorship::prompt_utils::find_prompt; use crate::git::find_repository; +use crate::utils::debug_log; /// Handle the `show-prompt` command /// @@ -30,7 +34,76 @@ pub fn handle_show_prompt(args: &[String]) { parsed.commit.as_deref(), parsed.offset, ) { - Ok((commit_sha, prompt_record)) => { + Ok((commit_sha, mut prompt_record)) => { + // If messages are empty, resolve from the best available source. + // Priority: CAS cache → CAS API (if messages_url) → local SQLite + if prompt_record.messages.is_empty() { + if let Some(url) = &prompt_record.messages_url { + if let Some(hash) = url.rsplit('/').next().filter(|h| !h.is_empty()) { + // 1. Check cas_cache (instant, local) + if let Ok(db_mutex) = InternalDatabase::global() { + if let Ok(db_guard) = db_mutex.lock() { + if let Ok(Some(cached_json)) = db_guard.get_cas_cache(hash) { + if let Ok(cas_obj) = serde_json::from_str::(&cached_json) { + prompt_record.messages = cas_obj.messages; + debug_log("show-prompt: resolved from cas_cache"); + } + } + } + } + + // 2. If cache miss, fetch from CAS API (network) + if prompt_record.messages.is_empty() { + let context = ApiContext::new(None); + if context.auth_token.is_some() { + debug_log(&format!("show-prompt: trying CAS API for hash {}", &hash[..8.min(hash.len())])); + let client = ApiClient::new(context); + match client.read_ca_prompt_store(&[hash]) { + Ok(response) => { + for result in &response.results { + if result.status == "ok" { + if let Some(content) = &result.content { + let json_str = serde_json::to_string(content).unwrap_or_default(); + if let Ok(cas_obj) = serde_json::from_value::(content.clone()) { + prompt_record.messages = cas_obj.messages; + debug_log(&format!("show-prompt: resolved {} messages from CAS API", prompt_record.messages.len())); + // Cache for next time + if let Ok(db_mutex) = InternalDatabase::global() { + if let Ok(mut db_guard) = db_mutex.lock() { + let _ = db_guard.set_cas_cache(hash, &json_str); + } + } + } + } + } + } + } + Err(e) => { + debug_log(&format!("show-prompt: CAS API error: {}", e)); + } + } + } else { + debug_log("show-prompt: no auth token, skipping CAS API"); + } + } + } + } + + // 3. Last resort: local SQLite (for prompts without a CAS URL) + if prompt_record.messages.is_empty() { + if let Ok(db_mutex) = InternalDatabase::global() { + if let Ok(db_guard) = db_mutex.lock() { + if let Ok(Some(db_record)) = db_guard.get_prompt(&parsed.prompt_id) { + if !db_record.messages.messages.is_empty() { + prompt_record.messages = db_record.messages.messages; + debug_log(&format!("show-prompt: resolved {} messages from local SQLite", prompt_record.messages.len())); + } + } + } + } + } + } + // Output the prompt as JSON, including the commit SHA for context let output = serde_json::json!({ "commit": commit_sha, diff --git a/src/mdm/skills_installer.rs b/src/mdm/skills_installer.rs index c58622f93..c8a8c48b3 100644 --- a/src/mdm/skills_installer.rs +++ b/src/mdm/skills_installer.rs @@ -17,8 +17,8 @@ const EMBEDDED_SKILLS: &[EmbeddedSkill] = &[ skill_md: include_str!("../../skills/prompt-analysis/SKILL.md"), }, EmbeddedSkill { - name: "git-ai-search", - skill_md: include_str!("../../skills/git-ai-search/SKILL.md"), + name: "ask", + skill_md: include_str!("../../skills/ask/SKILL.md"), }, ];