diff --git a/.claude/skills/base/skill.md b/.claude/skills/base/skill.md index d214096..14cc8f4 100644 --- a/.claude/skills/base/skill.md +++ b/.claude/skills/base/skill.md @@ -18,8 +18,9 @@ Node.js (ESM) | Commander | Vitest | es-module-lexer | @clack/prompts | picocolo - `npm test` — Run vitest suite - `npm start` / `node bin/cli.js` — Run CLI - `aspens scan [path]` — Deterministic repo analysis (no LLM) -- `aspens doc init [path]` — Generate skills via Claude CLI +- `aspens doc init [path]` — Generate skills + hooks + CLAUDE.md - `aspens doc sync [path]` — Incremental skill updates from git diffs +- `aspens doc graph [path]` — Rebuild import graph cache (`.claude/graph.json`) - `aspens add [name]` — Install templates (agents, commands, hooks) - `aspens customize agents` — Inject project context into installed agents @@ -28,11 +29,14 @@ CLI entry (`bin/cli.js`) → command handlers (`src/commands/`) → lib modules - `src/lib/scanner.js` — Deterministic repo scanner (languages, frameworks, domains, structure) - `src/lib/graph-builder.js` — Static import analysis via es-module-lexer (hub files, clusters, priority) +- `src/lib/graph-persistence.js` — Graph serialization, subgraph extraction, code-map + index generation - `src/lib/runner.js` — Claude CLI wrapper (`claude -p --output-format stream-json`) - `src/lib/context-builder.js` — Assembles repo files into prompt-friendly context -- `src/lib/skill-writer.js` — Writes parsed `` output to disk +- `src/lib/skill-writer.js` — Writes skill files, generates skill-rules.json, merges settings +- `src/lib/skill-reader.js` — Parses skill files, frontmatter, activation patterns, keywords +- `src/lib/errors.js` — `CliError` class (structured errors caught by CLI top-level handler) - `src/prompts/` — Prompt templates with `{{partial}}` and `{{variable}}` substitution -- `src/templates/` — Bundled agents, commands, and hooks for `aspens add` +- `src/templates/` — Bundled agents, commands, hooks, and settings for `aspens add` / `doc init` ## Critical Conventions - **Pure ESM** — `"type": "module"` throughout; use `import`/`export`, never `require()` @@ -41,14 +45,15 @@ CLI entry (`bin/cli.js`) → command handlers (`src/commands/`) → lib modules - **Path sanitization** — `parseFileOutput()` restricts writes to `.claude/` and `CLAUDE.md` only; no absolute paths or `..` traversal - **Prompt partials** — `{{name}}` in prompt files resolves to `src/prompts/partials/name.md` first, then falls back to template variables - **Scanner is deterministic** — no LLM calls; pure filesystem analysis +- **CliError pattern** — command handlers throw `CliError` instead of calling `process.exit()`; caught at top level in `bin/cli.js` ## Structure -- `bin/` — CLI entry point (commander setup) -- `src/commands/` — Command handlers (scan, doc-init, doc-sync, add, customize) +- `bin/` — CLI entry point (commander setup, CliError handler) +- `src/commands/` — Command handlers (scan, doc-init, doc-sync, doc-graph, add, customize) - `src/lib/` — Core library modules - `src/prompts/` — Prompt templates + partials -- `src/templates/` — Installable agents, commands, hooks +- `src/templates/` — Installable agents, commands, hooks, settings - `tests/` — Vitest test files --- -**Last Updated:** 2026-03-21 +**Last Updated:** 2026-03-24 diff --git a/.claude/skills/claude-runner/skill.md b/.claude/skills/claude-runner/skill.md index 5dcc026..a559a39 100644 --- a/.claude/skills/claude-runner/skill.md +++ b/.claude/skills/claude-runner/skill.md @@ -1,6 +1,6 @@ --- name: claude-runner -description: Claude CLI execution layer — prompt loading, stream-json parsing, file output extraction, path sanitization, and skill file writing +description: Claude CLI execution layer — prompt loading, stream-json parsing, file output extraction, path sanitization, skill file writing, and skill rule generation --- ## Activation @@ -8,26 +8,35 @@ description: Claude CLI execution layer — prompt loading, stream-json parsing, This skill triggers when editing claude-runner files: - `src/lib/runner.js` - `src/lib/skill-writer.js` +- `src/lib/skill-reader.js` - `src/prompts/**/*.md` - `tests/*extract*`, `tests/*parse*`, `tests/*prompt*`, `tests/*skill-writer*` --- -You are working on the **Claude CLI execution layer** — the bridge between assembled prompts and the `claude -p` CLI. +You are working on the **Claude CLI execution layer** — the bridge between assembled prompts and the `claude -p` CLI, plus skill file I/O. ## Key Files -- `src/lib/runner.js` — `runClaude()`, `loadPrompt()`, `parseFileOutput()`, `extractResultFromStream()` -- `src/lib/skill-writer.js` — `writeSkillFiles()` with dryRun/force/skip semantics +- `src/lib/runner.js` — `runClaude()`, `loadPrompt()`, `parseFileOutput()`, `validateSkillFiles()`, `extractResultFromStream()` +- `src/lib/skill-writer.js` — `writeSkillFiles()`, `extractRulesFromSkills()`, `generateDomainPatterns()`, `mergeSettings()` +- `src/lib/skill-reader.js` — `findSkillFiles()`, `parseFrontmatter()`, `parseActivationPatterns()`, `parseKeywords()` - `src/prompts/` — Markdown prompt templates; `partials/` subdir holds reusable fragments ## Key Concepts -- **Stream-JSON protocol:** `runClaude()` always passes `--verbose --output-format stream-json` (both flags required together with `-p`). Output is NDJSON: `type: 'result'` has final text + cumulative usage; `type: 'assistant'` has text blocks and tool_use blocks; `type: 'user'` has tool_result blocks. -- **Prompt templating:** `loadPrompt(name, vars)` resolves `{{partial-name}}` from `src/prompts/partials/` first, then substitutes `{{varName}}` from `vars`. Partials use lowercase-kebab-case; unresolved partials that aren't in `vars` trigger a console warning. -- **File output parsing:** Primary format is `content` XML tags. Fallback: `` comment markers. Only paths under `.claude/` or exactly `CLAUDE.md` are allowed. +- **Stream-JSON protocol:** `runClaude()` always passes `--verbose --output-format stream-json`. Output is NDJSON: `type: 'result'` has final text + usage; `type: 'assistant'` has text/tool_use blocks; `type: 'user'` has tool_result blocks. +- **Prompt templating:** `loadPrompt(name, vars)` resolves `{{partial-name}}` from `src/prompts/partials/` first, then substitutes `{{varName}}` from `vars`. +- **File output parsing:** Primary: `content` XML tags. Fallback: `` comment markers. Handles code fences correctly. +- **Validation:** `validateSkillFiles()` checks for truncation (XML tag collisions), missing frontmatter, missing sections, bad file path references. +- **Skill rules generation:** `extractRulesFromSkills()` reads all skills via `skill-reader.js`, produces `skill-rules.json` (v2.0) with file patterns, keywords, and intent patterns. +- **Domain patterns:** `generateDomainPatterns()` converts file patterns to bash `detect_skill_domain()` function using `BEGIN/END` markers. +- **Settings merge:** `mergeSettings()` merges aspens hook config into existing `settings.json`, detecting aspens-managed hooks by command path markers. ## Critical Rules - **Both `--verbose` and `--output-format stream-json` are required** — omitting either breaks stream parsing. - **Path sanitization is non-negotiable** — `sanitizePath()` blocks `..` traversal, absolute paths, and any path not under `.claude/` or exactly `CLAUDE.md`. -- **Prompt partials resolve before variables** — `{{skill-format}}` resolves to `partials/skill-format.md` first. If a partial file doesn't exist, it falls through to variable substitution. +- **Prompt partials resolve before variables** — `{{skill-format}}` resolves to `partials/skill-format.md` first. If no file, falls through to variable substitution. - **Timeout auto-scales** — small: 120s, medium: 300s, large: 600s, very-large: 900s. User `--timeout` overrides. -- **`writeSkillFiles` respects force/skip** — without `--force`, existing files are skipped. Dry-run writes nothing. +- **`mergeSettings` preserves non-aspens hooks** — identifies aspens hooks by `ASPENS_HOOK_MARKERS` (`skill-activation-prompt`, `post-tool-use-tracker`), replaces matching entries, preserves everything else. + +--- +**Last Updated:** 2026-03-24 diff --git a/.claude/skills/doc-sync/skill.md b/.claude/skills/doc-sync/skill.md index 159277a..699e4d8 100644 --- a/.claude/skills/doc-sync/skill.md +++ b/.claude/skills/doc-sync/skill.md @@ -14,27 +14,31 @@ This skill triggers when editing doc-sync-related files: You are working on **doc-sync**, the incremental skill update command (`aspens doc sync`). ## Key Files -- `src/commands/doc-sync.js` — Main command: git diff → skill mapping → Claude update → write +- `src/commands/doc-sync.js` — Main command: git diff → graph rebuild → skill mapping → Claude update → write - `src/prompts/doc-sync.md` — System prompt sent to Claude (uses `{{skill-format}}` partial) - `src/lib/runner.js` — `runClaude()`, `loadPrompt()`, `parseFileOutput()` shared across commands - `src/lib/skill-writer.js` — `writeSkillFiles()` writes `{ path, content }[]` to disk -- `src/lib/scanner.js` — `scanRepo()` used to detect domains for skill mapping +- `src/lib/graph-persistence.js` — `loadGraph()`, `extractSubgraph()`, `formatNavigationContext()` for graph context ## Key Concepts -- **Diff-based flow:** Gets `git diff HEAD~N..HEAD` and `git log`, feeds them plus existing skill contents to Claude via `runClaude()` with read-only tools (`Read`, `Glob`, `Grep`). -- **Skill mapping:** `mapChangesToSkills()` matches changed file names and meaningful path segments against each skill's `## Activation` section. Generic segments (`src`, `lib`, `components`, etc.) are excluded via `GENERIC_PATH_SEGMENTS`. Base skill is flagged only when structural files change (`package.json`, `Dockerfile`, etc.). -- **Token optimization:** Affected skills are sent in full; non-affected skills send only path + description line. -- **Diff truncation:** `truncateDiff()` caps at 15,000 chars, cutting at the last `diff --git` boundary. CLAUDE.md is capped at 5,000 chars. -- **Output parsing:** Claude returns `` XML tags; `parseFileOutput()` in runner.js handles parsing and path sanitization (blocks `..`, absolute paths, only allows `.claude/` and `CLAUDE.md`). -- **Git hook:** `--install-hook` installs a `post-commit` hook with a 5-minute cooldown lock file (`/tmp/aspens-sync-*.lock`). Runs `npx aspens doc sync --commits 1` in background. Appends to existing hooks if present. -- **Force writes:** doc-sync always calls `writeSkillFiles` with `force: true` — it overwrites existing skills without prompting. +- **Diff-based flow:** Gets `git diff HEAD~N..HEAD` and `git log`, feeds them plus existing skill contents and graph context to Claude. +- **Graph rebuild on every sync:** Calls `buildRepoGraph` + `persistGraphArtifacts` to keep `.claude/graph.json` fresh. Graph failure is non-fatal. +- **Graph-aware skill mapping:** `mapChangesToSkills()` checks not just direct file matches but also whether changed files are imported by files matching a skill's activation block. +- **Interactive file picker:** When diff exceeds 80k chars and TTY is available, offers multiselect with skill-relevant files pre-selected. +- **Prioritized diff:** Skill-relevant files get 60k char budget, everything else gets 20k (80k total). Cuts at `diff --git` boundaries. +- **Skill mapping:** Matches changed file names and meaningful path segments against `## Activation` sections. Generic segments excluded via `GENERIC_PATH_SEGMENTS`. +- **Token optimization:** Affected skills sent in full; non-affected skills send only path + description line. +- **Diff truncation:** `truncateDiff()` caps at configurable limit, cutting at the last `diff --git` boundary. CLAUDE.md capped at 5,000 chars. +- **Git hook:** `installGitHook()` creates a `post-commit` hook with 5-minute cooldown lock file. `removeGitHook()` removes via `>>>` / `<<<` markers. +- **Force writes:** doc-sync always calls `writeSkillFiles` with `force: true`. ## Critical Rules -- `runClaude` is called with `allowedTools: ['Read', 'Glob', 'Grep']` — doc-sync must never grant write tools to the inner Claude call. -- `parseFileOutput` restricts output paths to `.claude/` prefix and `CLAUDE.md` exactly — any other path is silently dropped. Do not change these guards. -- The `getGitDiff` function gracefully falls back from N commits to 1 if the repo has fewer commits than requested. `actualCommits` tracks what was actually used. -- The command exits early with an error if `.claude/skills/` doesn't exist — it requires `aspens doc init` to have been run first. -- The hook cooldown mechanism uses `/tmp` lock files keyed by repo path hash — don't change the naming scheme without updating cleanup logic. +- `runClaude` is called with `allowedTools: ['Read', 'Glob', 'Grep']` — doc-sync must never grant write tools. +- `parseFileOutput` restricts paths to `.claude/` prefix and `CLAUDE.md` exactly — any other path is silently dropped. +- `getGitDiff` gracefully falls back from N commits to 1 if fewer available. `actualCommits` tracks what was used. +- The command exits early with `CliError` if `.claude/skills/` doesn't exist. +- The hook cooldown uses `/tmp/aspens-sync-*.lock` keyed by repo path hash — don't change naming without updating cleanup. +- `checkMissingHooks()` in `bin/cli.js` warns when skills exist but hooks are missing (pre-0.2.2 installs). --- -**Last Updated:** 2026-03-21 +**Last Updated:** 2026-03-24 diff --git a/.claude/skills/import-graph/skill.md b/.claude/skills/import-graph/skill.md index 8ff2f0e..4e24212 100644 --- a/.claude/skills/import-graph/skill.md +++ b/.claude/skills/import-graph/skill.md @@ -7,48 +7,42 @@ description: Static import analysis that builds dependency graphs, domain cluste This skill triggers when editing import-graph-related files: - `src/lib/graph-builder.js` +- `src/lib/graph-persistence.js` +- `src/commands/doc-graph.js` - `tests/graph-builder.test.js` --- -You are working on the **import graph builder** — a static analysis module that parses JS/TS and Python source files to produce dependency graphs, hub rankings, domain clusters, and churn-based hotspots. +You are working on the **import graph system** — static analysis that parses JS/TS and Python source files to produce dependency graphs, plus persistence/query layers for runtime use. ## Key Files -- `src/lib/graph-builder.js` — All graph logic (691 lines, single file) +- `src/lib/graph-builder.js` — Core graph logic: walk, parse, metrics, ranking, clustering (691 lines) +- `src/lib/graph-persistence.js` — Serialize, persist, load, subgraph extraction, code-map, graph-index +- `src/commands/doc-graph.js` — Standalone `aspens doc graph` command +- `src/lib/scanner.js` — Provides `detectEntryPoints()`, only internal dependency of graph-builder - `tests/graph-builder.test.js` — Tests using temp fixture directories -- `src/lib/scanner.js` — Provides `detectEntryPoints()`, the only internal dependency ## Key Concepts -`buildRepoGraph(repoPath, languages?)` is the sole public entry point. It runs a **9-step pipeline**: -1. Walk source files (skip `SKIP_DIRS`, vendored, generated) -2. Parse imports per file (es-module-lexer for JS/TS, regex for Python) -3. Populate `importedBy` reverse edges -4. Git churn analysis (6-month window via `git log`) -5. Compute per-file metrics (fanIn, fanOut, exportCount, churn, priority) -6. Rank files by priority descending -7. Identify hub files (top 20 by fanIn) -8. Domain clustering via BFS connected components -9. Identify hotspots (`churn > 3 && lines > 50`) +**graph-builder.js** — `buildRepoGraph(repoPath, languages?)` runs a 9-step pipeline: +1. Walk source files → 2. Parse imports → 3. Reverse edges → 4. Git churn → 5. Per-file metrics → 6. Priority ranking → 7. Hub detection → 8. Domain clustering → 9. Hotspots + +**graph-persistence.js** — Persistence and query layer: +- `serializeGraph()` converts raw graph to indexed format (O(1) lookups, file→cluster mapping) +- `persistGraphArtifacts()` writes `.claude/graph.json` + `.claude/code-map.md` + `.claude/graph-index.json` + auto-gitignores them +- `extractSubgraph(graph, filePaths)` returns 1-hop neighborhood of mentioned files + relevant hubs/hotspots/clusters +- `formatNavigationContext(subgraph)` renders compact markdown (~50 line budget) for prompt injection +- `extractFileReferences(prompt, graph)` tiered extraction: explicit paths → bare filenames → cluster keywords +- `generateCodeMap()` standalone overview for graph hook consumption +- `generateGraphIndex()` tiny inverted index (export names → files, hub basenames, cluster labels) ## Critical Rules -- **`await init` before any `parseJsImports` call.** es-module-lexer requires WASM initialization. `buildRepoGraph` calls it at the top; standalone usage of `parseJsImports` must also await it. -- **Priority formula is load-bearing:** `fanIn * 3.0 + exportCount * 1.5 + (isEntry ? 10.0 : 0) + churn * 2.0 + (1/(depth+1)) * 1.0`. Downstream consumers (doc-init, scan commands) depend on this ranking. -- **All paths are repo-relative strings** (e.g. `src/lib/scanner.js`), never absolute. Resolution functions convert abs→relative before returning. -- **Import resolution tries extensions in order:** `.js, .ts, .tsx, .jsx, .mjs` then `/index` variants. Changing this order changes which file wins when ambiguous. -- **Python regex uses global flags** — `lastIndex` is reset before each exec loop. Forgetting this causes missed imports. -- **Errors are swallowed, not thrown:** parse failures, unreadable files, and missing git all return empty/null. The graph must always complete. - -## Key Patterns -- **Internal vs external imports:** Relative/aliased imports that resolve to a file on disk → internal (edges). Everything else → `externalImports` array (no edges). -- **Path alias support:** Reads `tsconfig.json`/`jsconfig.json` from root + monorepo subdirs. Strips comments before JSON.parse. -- **Tests use `createFixture(name, files)`** to build temp directories under `tests/fixtures/graph-builder/`, cleaned up in `afterAll`. - -## Exported API -- `buildRepoGraph(repoPath, languages?)` — main entry, returns `{ files, edges, ranked, hubs, clusters, hotspots, entryPoints, stats }` -- `parseJsImports(content, relPath)` — `{ imports: string[], exports: string[] }` -- `parsePyImports(content)` — `string[]` of raw specifiers -- `resolveRelativeImport(repoPath, fromFile, specifier)` — `string | null` -- `computeDomainClusters(files, edges)` — `{ components, coupling }` +- **`await init` before any `parseJsImports` call.** es-module-lexer requires WASM initialization. +- **Priority formula is load-bearing:** `fanIn * 3.0 + exportCount * 1.5 + (isEntry ? 10.0 : 0) + churn * 2.0 + (1/(depth+1)) * 1.0`. Downstream consumers depend on this ranking. +- **All paths are repo-relative strings**, never absolute. Resolution functions convert abs→relative. +- **Graph artifacts are gitignored** — `ensureGraphGitignore()` adds `.claude/graph.json`, `.claude/graph-index.json`, `.claude/code-map.md` to prevent commit loops. +- **Errors are swallowed, not thrown** in graph-builder — parse failures return empty/null. The graph must always complete. +- **`extractSubgraph` logic is mirrored** in `graph-context-prompt.mjs` (standalone hook, no imports). Keep both in sync. +- **doc-sync rebuilds graph on every sync** — calls `buildRepoGraph` + `persistGraphArtifacts` to keep it fresh. --- -**Last Updated:** 2026-03-21 +**Last Updated:** 2026-03-24 diff --git a/.claude/skills/skill-generation/skill.md b/.claude/skills/skill-generation/skill.md index 2670ef5..ead588e 100644 --- a/.claude/skills/skill-generation/skill.md +++ b/.claude/skills/skill-generation/skill.md @@ -7,35 +7,44 @@ description: LLM-powered generation pipeline for Claude Code skills and CLAUDE.m This skill triggers when editing skill-generation files: - `src/commands/doc-init.js` -- `src/commands/doc-sync.js` -- `src/commands/customize.js` +- `src/commands/doc-graph.js` - `src/lib/context-builder.js` - `src/lib/runner.js` - `src/lib/skill-writer.js` +- `src/lib/skill-reader.js` - `src/prompts/**/*` --- -You are working on **aspens' skill generation pipeline** — the system that scans repos and uses Claude CLI to generate `.claude/skills/` files and `CLAUDE.md`. +You are working on **aspens' skill generation pipeline** — the system that scans repos and uses Claude CLI to generate `.claude/skills/` files, hooks, and `CLAUDE.md`. ## Key Files -- `src/commands/doc-init.js` — Main pipeline: scan → parallel discovery → strategy → mode → generate → write -- `src/lib/runner.js` — `runClaude()` spawns `claude -p --verbose --output-format stream-json`; `loadPrompt()` resolves `{{partial}}` from `src/prompts/partials/`; `parseFileOutput()` extracts `` XML tags +- `src/commands/doc-init.js` — Main 9-step pipeline: scan → graph → discovery → strategy → mode → generate → validate → write → hooks +- `src/commands/doc-graph.js` — Standalone graph rebuild command (`aspens doc graph`) +- `src/lib/runner.js` — `runClaude()`, `loadPrompt()`, `parseFileOutput()`, `validateSkillFiles()` - `src/lib/context-builder.js` — Assembles prompt context from scan results, manifests, configs, domain files, git log -- `src/lib/skill-writer.js` — Writes files with force/skip/overwrite semantics -- `src/prompts/` — Prompt templates; partials in `src/prompts/partials/` are inlined via `{{name}}` +- `src/lib/skill-writer.js` — Writes files, generates `skill-rules.json`, domain bash patterns, merges `settings.json` +- `src/lib/skill-reader.js` — Parses skill frontmatter, activation patterns, keywords (used by skill-writer) +- `src/prompts/` — Prompt templates; `discover-domains.md` and `discover-architecture.md` for discovery agents ## Key Concepts -- **3-layer pipeline:** (1) `scanRepo` + `buildRepoGraph` (2) parallel discovery agents (domain + architecture via `Promise.all`) (3) generation (all-at-once or chunked) -- **Generation modes:** `all-at-once` = single Claude call for everything; `chunked` = base skill + per-domain (up to 3 parallel via `PARALLEL_LIMIT`) + CLAUDE.md; `base-only` = just base skill -- **Existing docs strategies:** `improve` (preserve hand-written content), `rewrite` (fresh), `skip-existing` (only generate new domains) -- **Auto-timeout:** Scales by repo size category — small=120s, medium=300s, large=600s, very-large=900s -- **Read-only tools:** Claude agents only get `['Read', 'Glob', 'Grep']` — no writes -- **Output format:** Claude must return `content` XML tags. If tags are missing, the pipeline retries with a format reminder. +- **9-step pipeline:** (1) scan + graph (2) parallel discovery agents (3) strategy (4) mode (5) generate (6) validate (7) preview (8) write (9) install hooks +- **Parallel discovery:** Two agents run via `Promise.all` — domain discovery and architecture analysis — before any user prompt +- **Generation modes:** `all-at-once` = single Claude call; `chunked` = base + per-domain (up to 3 parallel via `PARALLEL_LIMIT`) + CLAUDE.md; `base-only` = just base skill +- **`--domains` flag:** Filters which domains to generate in chunked mode; enables `domainsOnly` mode that skips base + CLAUDE.md (for retrying failed domains) +- **`--hooks-only` flag:** Skips generation entirely, just installs/updates hooks from existing skills +- **Retry logic:** Base skill and CLAUDE.md retry up to 2 times if `parseFileOutput` returns empty (format correction prompt) +- **Validation:** `validateSkillFiles()` checks for truncation, missing frontmatter, missing sections, bad file path references +- **Hook installation (step 9):** Generates `skill-rules.json`, copies hook scripts, generates `post-tool-use-tracker.sh` with domain patterns, merges `settings.json` +- **Graph context:** `buildGraphContext()` and `buildDomainGraphContext()` inject import graph data into prompts ## Critical Rules -- **Base skill + CLAUDE.md are essential** — if `parseFileOutput` returns empty for either, the pipeline retries automatically with a format correction prompt. Domain skills failing is acceptable (user can retry). -- **`improve` strategy preserves hand-written content** — Claude must read existing skills first and not discard human-authored rules, conventions, or gotchas. -- **Discovery runs before user prompt** — the domain picker shows Claude-discovered domains, not scanner directory names. -- **PARALLEL_LIMIT = 3** — domain skills generate in batches of 3 concurrent Claude calls. Base skill is always sequential first. CLAUDE.md is always sequential last. +- **Base skill + CLAUDE.md are essential** — pipeline retries automatically with format correction. Domain skill failures are acceptable (user retries with `--domains`). +- **`improve` strategy preserves hand-written content** — Claude must read existing skills first and not discard human-authored rules. +- **Discovery runs before user prompt** — domain picker shows Claude-discovered domains, not scanner directory names. +- **PARALLEL_LIMIT = 3** — domain skills generate in batches of 3 concurrent Claude calls. Base skill always sequential first. CLAUDE.md always sequential last. - **Skills must be 35-60 lines** — every line earns its place. No generic advice, no framework documentation. +- **CliError, not process.exit()** — all error exits throw `CliError`; cancellations `return` early. + +--- +**Last Updated:** 2026-03-24 diff --git a/.claude/skills/template-library/skill.md b/.claude/skills/template-library/skill.md index 4f18397..5a9ebf2 100644 --- a/.claude/skills/template-library/skill.md +++ b/.claude/skills/template-library/skill.md @@ -1,6 +1,6 @@ --- name: template-library -description: Bundled agents, commands, and hooks that users install via `aspens add` into their .claude/ directories +description: Bundled agents, commands, hooks, and settings that users install via `aspens add` and `aspens doc init` into their .claude/ directories --- ## Activation @@ -12,28 +12,30 @@ This skill triggers when editing template-library files: --- -You are working on the **template library** — bundled agents, slash commands, and hooks that users browse and install into their repos. +You are working on the **template library** — bundled agents, slash commands, hooks, and settings that users browse and install into their repos. ## Key Files - `src/commands/add.js` — Core `aspens add [name]` command; copies templates to `.claude/` dirs -- `src/commands/customize.js` — `aspens customize agents` post-install step; uses Claude to inject project context into installed agents +- `src/commands/customize.js` — `aspens customize agents` post-install step; uses Claude to inject project context - `src/templates/agents/*.md` — Agent persona templates (9 bundled) - `src/templates/commands/*.md` — Slash command templates (2 bundled) -- `src/templates/hooks/*.sh` — Hook script templates (2 bundled) +- `src/templates/hooks/` — Hook scripts (5 bundled): `skill-activation-prompt.sh/mjs`, `graph-context-prompt.sh/mjs`, `post-tool-use-tracker.sh` +- `src/templates/settings/settings.json` — Default settings with hook configuration ## Key Concepts -- **Three resource types:** `agent` → `.claude/agents`, `command` → `.claude/commands`, `hook` → `.claude/hooks`. Defined in `RESOURCE_TYPES` constant. -- **Template discovery:** `listAvailable()` reads template dir, filters for `.md`/`.sh` files, regex-parses `name:` and `description:` from file content (not proper YAML parsing). -- **Install modes:** Interactive multiselect (no name), by exact name, or `all` for bulk install. -- **No-overwrite policy:** `addResource()` skips files that already exist at the target path via `existsSync` check — never overwrites user-modified files. -- **Customize flow:** Reads CLAUDE.md + skills as context → sends each agent through Claude → writes updated agents back. Only supports `agents` target currently. +- **Four resource types for `add`:** `agent` → `.claude/agents`, `command` → `.claude/commands`, `hook` → `.claude/hooks`. Settings installed automatically by `doc init`. +- **Hook templates:** `skill-activation-prompt` reads `skill-rules.json` and injects relevant skills into prompts. `graph-context-prompt` loads graph data for code navigation. `post-tool-use-tracker` detects skill domains from file access patterns. +- **`doc init` hook installation (step 9):** Generates `skill-rules.json` from skills, copies hook files, generates `post-tool-use-tracker.sh` with domain patterns (via `BEGIN/END` markers), merges `settings.json` with backup. +- **Template discovery:** `listAvailable()` reads template dir, filters `.md`/`.sh` files, regex-parses `name:` and `description:`. +- **No-overwrite policy:** `addResource()` skips files that already exist via `existsSync` check. +- **Customize flow:** Reads CLAUDE.md + skills as context → sends each agent through Claude → writes updated agents back. Only supports `agents` target. ## Critical Rules -- Template files **must** contain `name: ` and `description: ` lines parseable by regex — without these, the template gets a fallback filename-based name and empty description. -- Only `.md` and `.sh` extensions are discovered by `listAvailable()`. Other file types are silently ignored. +- Template files **must** contain `name: ` and `description: ` lines parseable by regex. +- Only `.md` and `.sh` extensions are discovered by `listAvailable()`. `.mjs` files are copied by `doc init` directly, not by `add`. - The templates dir resolves from `src/commands/` via `join(__dirname, '..', 'templates')` — moving `add.js` breaks template resolution. -- `customize` requires existing `.claude/agents/` AND either CLAUDE.md or `.claude/skills/` — it exits cleanly if neither exists, telling users to run `doc init` first. -- `customize` uses `writeSkillFiles` with `{ force: true }` to allow writing to `.claude/agents/` paths, bypassing the normal skills-only path restriction. +- `customize` requires `.claude/agents/` AND either CLAUDE.md or `.claude/skills/` — exits cleanly otherwise. +- Commands throw `CliError` for expected failures instead of calling `process.exit()`. --- -**Last Updated:** 2026-03-21 +**Last Updated:** 2026-03-24 diff --git a/.gitignore b/.gitignore index 9096439..64bcd5e 100644 --- a/.gitignore +++ b/.gitignore @@ -7,3 +7,8 @@ dist/ !.env.example .claude/settings.local.json .claude/tsc-cache/ + +# aspens graph artifacts (generated — do not commit) +.claude/graph.json +.claude/graph-index.json +.claude/code-map.md diff --git a/CHANGELOG.md b/CHANGELOG.md index b5f6395..e0ae2fa 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,30 @@ ## [Unreleased] +## [0.4.0] - 2026-03-24 + +### Added +- **`doc sync --refresh`** — review and update all skills against the current codebase state without requiring a git diff +- **`add skill` command** — scaffold custom skills (`aspens add skill my-convention`) or generate from reference docs (`aspens add skill release --from dev/release.md`) +- **Interactive file picker** — when diff exceeds 80k chars, prompts to select which files Claude should analyze (skill-relevant files pre-selected) +- **Diff prioritization** — skill-relevant files get 60k of the 80k char budget so they survive truncation +- **Git hook hardening** — 5-minute cooldown, skip aspens-only commits, log rotation, stale lock cleanup, POSIX-compatible cleanup +- **Graph artifact gitignore** — `graph.json`, `graph-index.json`, `code-map.md` auto-added to `.gitignore` to prevent sync loops +- **35 new tests** — coverage for `resolveTimeout`, activation matching (`getActivationBlock`, `fileMatchesActivation`), `skillToDomain`, and `add skill` scaffold mode (162 → 197 tests) + +### Changed +- **Module split** — extracted `git-helpers.js`, `diff-helpers.js`, `git-hook.js` from doc-sync.js (813 → 540 lines); pure orchestration remains +- **Shared activation matching** — deduplicated 3 copies of file-to-skill matching into `getActivationBlock()` and `fileMatchesActivation()` in skill-reader.js, fixing inconsistent regex +- **Security hardening** — all git commands use `execFileSync` (no shell interpolation), `chmodSync` replaces shell `chmod`, `fileMatchesActivation` guards against empty inputs +- **Skill rules regeneration** — `doc sync` now regenerates `skill-rules.json` after every write (was only done in refresh mode) +- **Consistent timeout warnings** — all three commands (`doc-sync`, `doc-init`, `customize`) now surface warnings for invalid `ASPENS_TIMEOUT` values +- **CliError cause chain** — errors from Claude calls now preserve the original error via `{ cause: err }` for better debugging +- **Gitignore matching** — line-based `Set` lookup replaces substring `includes()` to prevent false positives + +### Fixed +- **Empty file selection** — interactive picker now cancels cleanly instead of silently sending the full diff when all files are deselected +- **Mid-line truncation** — `truncateDiff` falls back to last newline boundary instead of cutting mid-line when no hunk boundary is found + ## [0.3.0] - 2026-03-23 ### Added diff --git a/CLAUDE.md b/CLAUDE.md index b5c85e0..77a01f9 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -8,8 +8,9 @@ CLI tool that generates and maintains AI-ready documentation (skills + CLAUDE.md npm test # vitest run npm start # node bin/cli.js aspens scan [path] # detect tech stack, domains, structure -aspens doc init # generate skills + CLAUDE.md from code +aspens doc init # generate skills + hooks + CLAUDE.md aspens doc sync # update skills from recent commits +aspens doc graph # rebuild import graph cache aspens add # install agents/commands/hooks from template library aspens customize agents # inject project context into installed agents ``` @@ -17,18 +18,21 @@ aspens customize agents # inject project context into installed agents ## Architecture ``` -bin/cli.js # entry point — Commander program, all subcommands registered here -src/commands/ # command handlers: scan, doc-init, doc-sync, add, customize +bin/cli.js # entry point — Commander program, CliError handler +src/commands/ # command handlers: scan, doc-init, doc-sync, doc-graph, add, customize src/lib/ scanner.js # deterministic repo analysis (languages, frameworks, domains) graph-builder.js # static import graph, domain clusters, hub detection + graph-persistence.js # graph serialization, subgraph extraction, code-map, graph-index context-builder.js # assembles context payloads for Claude prompts runner.js # Claude CLI execution, stream-json parsing, file output extraction - skill-writer.js # writes skill .md files and CLAUDE.md to disk + skill-writer.js # writes skill .md files, generates skill-rules.json, merges settings + skill-reader.js # parses skill frontmatter, activation patterns, keywords + errors.js # CliError class for structured error handling src/templates/ agents/ # 9 agent templates (.md) commands/ # 2 command templates (.md) - hooks/ # 2 hook templates (.sh) + hooks/ # 5 hook templates (.sh + .mjs) settings/ # settings templates tests/ # vitest tests + fixtures ``` @@ -40,12 +44,12 @@ The project ships as both a CLI and a set of Claude Code skills registered in th | Skill | Description | |---|---| | agent-customization | LLM-powered injection of project context into agents | -| claude-runner | Prompt loading, stream-json parsing, file output extraction | +| claude-runner | Prompt loading, stream-json parsing, file output extraction, skill rule generation | | doc-sync | Maps git diffs to affected skills, optional post-commit hook | -| import-graph | Dependency graphs, domain clusters, hub files, churn hotspots | +| import-graph | Dependency graphs, domain clusters, hub files, churn hotspots, graph persistence | | repo-scanning | Language/framework detection, structure mapping, domain discovery | -| skill-generation | LLM generation pipeline for skills and CLAUDE.md | -| template-library | Bundled agents, commands, hooks installed via `aspens add` | +| skill-generation | LLM generation pipeline for skills, hooks, and CLAUDE.md | +| template-library | Bundled agents, commands, hooks, settings installed via `aspens add` | ## Dev docs @@ -67,6 +71,7 @@ Or comment `@coderabbitai review` on any open PR. - **ESM only** — `"type": "module"` everywhere, no CommonJS - **Node >= 20** required +- **CliError pattern** — command handlers throw `CliError` (not `process.exit()`); caught at top level in `bin/cli.js` - No linter configured yet; `npm run lint` is a no-op - Dependencies: commander, es-module-lexer, picocolors, @clack/prompts - Tests live in `tests/` and use vitest — run with `npm test` diff --git a/bin/cli.js b/bin/cli.js index 4be8378..cf0e72f 100755 --- a/bin/cli.js +++ b/bin/cli.js @@ -54,11 +54,13 @@ function showWelcome() { ${pc.green('aspens doc init --verbose')} See what Claude is reading ${pc.green('aspens doc sync')} ${pc.dim('[path]')} Update skills from recent commits ${pc.green('aspens doc sync --commits 5')} Sync from last 5 commits + ${pc.green('aspens doc sync --refresh')} Refresh all skills from current code ${pc.bold('Add Components')} ${pc.green('aspens add agent')} ${pc.dim('[name]')} Add AI agents ${pc.dim(`(${countTemplates('agents')} available)`)} ${pc.green('aspens add command')} ${pc.dim('[name]')} Add slash commands ${pc.dim(`(${countTemplates('commands')} available)`)} ${pc.green('aspens add hook')} ${pc.dim('[name]')} Add auto-triggering hooks ${pc.dim(`(${countTemplates('hooks')} available)`)} + ${pc.green('aspens add skill')} ${pc.dim('')} Add custom skills (conventions, workflows) ${pc.green('aspens add agent --list')} Browse the library ${pc.green('aspens customize agents')} Inject project context into agents @@ -148,6 +150,7 @@ doc .description('Update skills from recent commits') .argument('[path]', 'Path to repo', '.') .option('--commits ', 'Number of commits to analyze', parseCommits, 1) + .option('--refresh', 'Refresh all skills from current codebase state (no git diff)') .option('--install-hook', 'Install git post-commit hook') .option('--remove-hook', 'Remove git post-commit hook') .option('--dry-run', 'Preview without writing files') @@ -169,10 +172,14 @@ doc // Add command program .command('add') - .description('Add agents, hooks, or commands from the library') - .argument('', 'What to add: agent, hook, command') + .description('Add agents, hooks, commands, or custom skills') + .argument('', 'What to add: agent, hook, command, skill') .argument('[name]', 'Name of the resource') .option('--list', 'List available resources') + .option('--from ', 'Generate skill from a reference document (skill type only)') + .option('--timeout ', 'Claude timeout in seconds (skill --from)', parseTimeout) + .option('--model ', 'Claude model to use (skill --from)') + .option('--verbose', 'Show Claude activity (skill --from)') .action((type, name, options) => { checkMissingHooks(resolve('.')); return addCommand(type, name, options); diff --git a/package-lock.json b/package-lock.json index cdafebb..50095c4 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "aspens", - "version": "0.3.0", + "version": "0.4.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "aspens", - "version": "0.3.0", + "version": "0.4.0", "hasInstallScript": true, "license": "MIT", "dependencies": { diff --git a/package.json b/package.json index 9c28305..6cc0422 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "aspens", - "version": "0.3.0", + "version": "0.4.0", "description": "Generate and maintain AI-ready documentation for any codebase", "type": "module", "bin": { diff --git a/src/commands/add.js b/src/commands/add.js index 1a27719..4f09812 100644 --- a/src/commands/add.js +++ b/src/commands/add.js @@ -1,9 +1,13 @@ import { resolve, join, dirname, basename } from 'path'; -import { existsSync, readFileSync, copyFileSync, mkdirSync, readdirSync } from 'fs'; +import { existsSync, readFileSync, writeFileSync, copyFileSync, mkdirSync, readdirSync } from 'fs'; import { fileURLToPath } from 'url'; import pc from 'picocolors'; import * as p from '@clack/prompts'; import { CliError } from '../lib/errors.js'; +import { resolveTimeout } from '../lib/timeout.js'; +import { runClaude, loadPrompt, parseFileOutput } from '../lib/runner.js'; +import { extractRulesFromSkills } from '../lib/skill-writer.js'; +import { findSkillFiles } from '../lib/skill-reader.js'; const __dirname = dirname(fileURLToPath(import.meta.url)); const TEMPLATES_DIR = join(__dirname, '..', 'templates'); @@ -32,6 +36,11 @@ const RESOURCE_TYPES = { export async function addCommand(type, name, options) { const repoPath = resolve('.'); + // Skill type — handled separately (not template-based) + if (type === 'skill') { + return addSkillCommand(repoPath, name, options); + } + // Validate type if (!RESOURCE_TYPES[type]) { console.log(` @@ -41,11 +50,14 @@ export async function addCommand(type, name, options) { ${pc.green('agent')} ${RESOURCE_TYPES.agent.description} ${pc.green('command')} ${RESOURCE_TYPES.command.description} ${pc.green('hook')} ${RESOURCE_TYPES.hook.description} + ${pc.green('skill')} Custom skills for conventions, workflows, and processes Usage: ${pc.dim('aspens add agent [name]')} ${pc.dim('aspens add command [name]')} ${pc.dim('aspens add hook [name]')} + ${pc.dim('aspens add skill ')} + ${pc.dim('aspens add skill --from doc.md')} ${pc.dim('aspens add agent --list')} `); throw new CliError(`Unknown type: ${type}`, { logged: true }); @@ -188,3 +200,194 @@ function addResource(repoPath, resourceType, name, available) { copyFileSync(sourceFile, targetFile); console.log(` ${pc.green('+')} ${resourceType.targetDir}/${resource.fileName}`); } + +// --- Custom skill --- + +async function addSkillCommand(repoPath, name, options) { + const skillsDir = join(repoPath, '.claude', 'skills'); + + // --list mode: show existing skills + if (options.list) { + const skills = existsSync(skillsDir) ? findSkillFiles(skillsDir) : []; + console.log(` + ${pc.bold('Skills')} ${pc.dim(`(${skills.length} installed)`)} + ${pc.dim('Custom skills for conventions, workflows, and processes.')} +`); + if (skills.length === 0) { + console.log(pc.dim(' None installed yet.\n')); + } else { + for (const skill of skills) { + const desc = skill.frontmatter?.description || ''; + console.log(` ${pc.green(skill.name)}`); + if (desc) console.log(` ${pc.dim(desc)}`); + console.log(); + } + } + console.log(pc.dim(' Create one: aspens add skill my-convention')); + console.log(pc.dim(' From a doc: aspens add skill release --from dev/release.md')); + console.log(); + return; + } + + // Name is required for skills + if (!name) { + console.log(` + ${pc.bold('Add a custom skill')} + + Usage: + ${pc.green('aspens add skill ')} Scaffold a new skill + ${pc.green('aspens add skill --from ')} Generate from a reference doc + ${pc.green('aspens add skill --list')} Show existing skills + + Examples: + ${pc.dim('aspens add skill ui-conventions')} + ${pc.dim('aspens add skill release --from dev/release.md')} + ${pc.dim('aspens add skill code-review --from docs/review-process.md')} +`); + return; + } + + // Sanitize skill name + const safeName = name.toLowerCase().replace(/[^a-z0-9-]/g, '-').replace(/-+/g, '-').replace(/^-|-$/g, ''); + if (!safeName) { + throw new CliError('Invalid skill name. Use letters, numbers, and hyphens.'); + } + + const skillDir = join(skillsDir, safeName); + const skillPath = join(skillDir, 'skill.md'); + const relPath = `.claude/skills/${safeName}/skill.md`; + + if (existsSync(skillPath)) { + console.log(pc.yellow(`\n Skill already exists: ${relPath}`)); + console.log(pc.dim(' Edit it directly or delete and re-create.\n')); + return; + } + + // --from mode: generate from reference doc + if (options.from) { + return generateSkillFromDoc(repoPath, safeName, options); + } + + // Scaffold mode: create blank template + const today = new Date().toISOString().split('T')[0]; + const scaffold = `--- +name: ${safeName} +description: TODO — describe what this skill covers +--- + +## Activation + +This skill triggers when working on ${safeName}-related tasks. +- \`TODO: add file patterns\` + +Keywords: ${safeName} + +--- + +You are working on **${safeName}**. + +## Key Files +- \`TODO\` — Add key files relevant to this skill + +## Key Concepts +- **TODO:** Add key concepts, conventions, or workflows + +## Critical Rules +- TODO: Add rules that must not be violated + +--- +**Last Updated:** ${today} +`; + + mkdirSync(skillDir, { recursive: true }); + writeFileSync(skillPath, scaffold, 'utf8'); + + console.log(`\n ${pc.green('+')} ${relPath}`); + console.log(pc.dim(`\n Edit the skill to add your conventions and file patterns.`)); + console.log(pc.dim(` Then run ${pc.cyan('aspens doc init --hooks-only')} to update activation rules.\n`)); + + updateSkillRules(skillsDir); +} + +async function generateSkillFromDoc(repoPath, skillName, options) { + const fromPath = resolve(options.from); + if (!existsSync(fromPath)) { + throw new CliError(`Reference file not found: ${options.from}`); + } + + const skillDir = join(repoPath, '.claude', 'skills', skillName); + const relPath = `.claude/skills/${skillName}/skill.md`; + const verbose = !!options.verbose; + + const { timeoutMs } = resolveTimeout(options.timeout, 120); + + let refContent = readFileSync(fromPath, 'utf8'); + const REF_MAX_CHARS = 50000; + if (refContent.length > REF_MAX_CHARS) { + refContent = refContent.slice(0, REF_MAX_CHARS) + '\n... (truncated)'; + p.log.warn(`Reference doc truncated to ${Math.round(REF_MAX_CHARS / 1024)}k chars.`); + } + const today = new Date().toISOString().split('T')[0]; + + const systemPrompt = loadPrompt('add-skill', { + skillPath: relPath, + }); + + const userPrompt = `Skill name: ${skillName} +Today's date: ${today} +Repository path: ${repoPath} + +## Reference Document (${options.from}) +\`\`\` +${refContent} +\`\`\``; + + const fullPrompt = `${systemPrompt}\n\n---\n\n${userPrompt}`; + + const genSpinner = p.spinner(); + genSpinner.start(`Generating ${pc.cyan(skillName)} skill from ${options.from}...`); + + let result; + try { + result = await runClaude(fullPrompt, { + timeout: timeoutMs, + allowedTools: ['Read', 'Glob', 'Grep'], + verbose, + model: options.model || null, + onActivity: verbose ? (msg) => genSpinner.message(pc.dim(msg)) : null, + }); + } catch (err) { + genSpinner.stop(pc.red('Failed')); + throw new CliError(err.message, { cause: err }); + } + + const files = parseFileOutput(result.text); + if (files.length === 0) { + genSpinner.stop(pc.red('No skill generated')); + throw new CliError('Claude did not produce a skill file. Try a different reference document or write the skill manually.'); + } + + genSpinner.stop(`Generated ${pc.cyan(skillName)} skill`); + + // Write the skill + mkdirSync(skillDir, { recursive: true }); + for (const file of files) { + const fullPath = join(repoPath, file.path); + mkdirSync(dirname(fullPath), { recursive: true }); + writeFileSync(fullPath, file.content, 'utf8'); + console.log(`\n ${pc.green('+')} ${file.path}`); + } + + const skillsDir = join(repoPath, '.claude', 'skills'); + updateSkillRules(skillsDir); + + console.log(pc.dim(`\n Review the generated skill and adjust as needed.`)); + console.log(pc.dim(` Run ${pc.cyan('aspens doc init --hooks-only')} to update activation hooks.\n`)); +} + +function updateSkillRules(skillsDir) { + try { + const rules = extractRulesFromSkills(skillsDir); + writeFileSync(join(skillsDir, 'skill-rules.json'), JSON.stringify(rules, null, 2) + '\n'); + } catch { /* non-fatal — hooks-only will catch up */ } +} diff --git a/src/commands/customize.js b/src/commands/customize.js index 40e064f..a46d07c 100644 --- a/src/commands/customize.js +++ b/src/commands/customize.js @@ -5,12 +5,14 @@ import * as p from '@clack/prompts'; import { runClaude, loadPrompt, parseFileOutput } from '../lib/runner.js'; import { writeSkillFiles } from '../lib/skill-writer.js'; import { CliError } from '../lib/errors.js'; +import { resolveTimeout } from '../lib/timeout.js'; const READ_ONLY_TOOLS = ['Read', 'Glob', 'Grep']; export async function customizeCommand(what, options) { const repoPath = resolve('.'); - const timeoutMs = (typeof options.timeout === 'number' ? options.timeout : 300) * 1000; + const { timeoutMs, envWarning } = resolveTimeout(options.timeout, 300); + if (envWarning) p.log.warn('ASPENS_TIMEOUT is not a valid number — using default timeout.'); const verbose = !!options.verbose; if (what !== 'agents') { diff --git a/src/commands/doc-init.js b/src/commands/doc-init.js index 43ea39a..cd4f125 100644 --- a/src/commands/doc-init.js +++ b/src/commands/doc-init.js @@ -8,8 +8,9 @@ import { buildRepoGraph } from '../lib/graph-builder.js'; import { runClaude, loadPrompt, parseFileOutput, validateSkillFiles } from '../lib/runner.js'; import { writeSkillFiles, extractRulesFromSkills, generateDomainPatterns, mergeSettings } from '../lib/skill-writer.js'; import { persistGraphArtifacts } from '../lib/graph-persistence.js'; -import { installGitHook } from './doc-sync.js'; +import { installGitHook } from '../lib/git-hook.js'; import { CliError } from '../lib/errors.js'; +import { resolveTimeout } from '../lib/timeout.js'; const __dirname = dirname(fileURLToPath(import.meta.url)); const TEMPLATES_DIR = join(__dirname, '..', 'templates'); @@ -19,11 +20,11 @@ const READ_ONLY_TOOLS = ['Read', 'Glob', 'Grep']; // Auto-scale timeout based on repo size function autoTimeout(scan, userTimeout) { - if (typeof userTimeout === 'number' && userTimeout > 0) { - return userTimeout * 1000; - } - const defaults = { 'small': 120000, 'medium': 300000, 'large': 600000, 'very-large': 900000 }; - return defaults[scan.size?.category] || 300000; + const sizeDefaults = { 'small': 120, 'medium': 300, 'large': 600, 'very-large': 900 }; + const fallback = sizeDefaults[scan.size?.category] || 300; + const { timeoutMs, envWarning } = resolveTimeout(userTimeout, fallback); + if (envWarning) console.warn('Warning: ASPENS_TIMEOUT is not a valid number — using auto-scaled timeout.'); + return timeoutMs; } function makeClaudeOptions(timeoutMs, verbose, model, spinner) { diff --git a/src/commands/doc-sync.js b/src/commands/doc-sync.js index ddac6b4..ea0f3b4 100644 --- a/src/commands/doc-sync.js +++ b/src/commands/doc-sync.js @@ -1,20 +1,25 @@ -import { resolve, join, relative } from 'path'; -import { existsSync, readFileSync, writeFileSync, mkdirSync, readdirSync, statSync, unlinkSync } from 'fs'; -import { execSync } from 'child_process'; +import { resolve, join, relative, dirname } from 'path'; +import { existsSync, readFileSync, writeFileSync } from 'fs'; import pc from 'picocolors'; import * as p from '@clack/prompts'; import { scanRepo } from '../lib/scanner.js'; import { runClaude, loadPrompt, parseFileOutput } from '../lib/runner.js'; -import { writeSkillFiles } from '../lib/skill-writer.js'; +import { writeSkillFiles, extractRulesFromSkills } from '../lib/skill-writer.js'; import { buildRepoGraph } from '../lib/graph-builder.js'; import { persistGraphArtifacts, loadGraph, extractSubgraph, formatNavigationContext } from '../lib/graph-persistence.js'; +import { findSkillFiles, parseActivationPatterns, getActivationBlock, fileMatchesActivation } from '../lib/skill-reader.js'; +import { buildDomainContext, buildBaseContext } from '../lib/context-builder.js'; import { CliError } from '../lib/errors.js'; +import { resolveTimeout } from '../lib/timeout.js'; +import { installGitHook, removeGitHook } from '../lib/git-hook.js'; +import { isGitRepo, getGitDiff, getGitLog, getChangedFiles } from '../lib/git-helpers.js'; +import { getSelectedFilesDiff, buildPrioritizedDiff, truncate } from '../lib/diff-helpers.js'; const READ_ONLY_TOOLS = ['Read', 'Glob', 'Grep']; +const PARALLEL_LIMIT = 3; export async function docSyncCommand(path, options) { const repoPath = resolve(path); - const timeoutMs = (typeof options.timeout === 'number' ? options.timeout : 300) * 1000; const verbose = !!options.verbose; const commits = typeof options.commits === 'number' ? options.commits : 1; @@ -26,6 +31,11 @@ export async function docSyncCommand(path, options) { return removeGitHook(repoPath); } + // Refresh mode — skip diff, review all skills against current codebase + if (options.refresh) { + return refreshAllSkills(repoPath, options); + } + p.intro(pc.cyan('aspens doc sync')); // Step 1: Check prerequisites @@ -93,10 +103,62 @@ export async function docSyncCommand(path, options) { p.log.info('No skills directly affected, but Claude will check for structural changes.'); } + // Timeout priority: --timeout flag > ASPENS_TIMEOUT env var > auto-scaled default + const autoTimeout = Math.min(300 + affectedSkills.length * 60, 900); + const { timeoutMs, envWarning } = resolveTimeout(options.timeout, autoTimeout); + if (envWarning) p.log.warn('ASPENS_TIMEOUT is not a valid number — using auto-scaled timeout.'); + // Step 4: Build prompt const today = new Date().toISOString().split('T')[0]; const systemPrompt = loadPrompt('doc-sync'); + // Skill-relevant files (for diff prioritization and interactive picker pre-selection) + const relevantFiles = changedFiles.filter(f => + affectedSkills.some(skill => fileMatchesActivation(f, getActivationBlock(skill.content))) + ); + + // Interactive file picker: offer when diff is large and a TTY is available + let selectedFiles = changedFiles; + if (diff.length > 80000 && process.stdout.isTTY) { + const fullKb = Math.round(diff.length / 1024); + console.log(); + p.log.warn(`Large diff (${fullKb}k chars) — select which files Claude should analyze:`); + console.log(pc.dim(' Skill-relevant files are pre-selected. Deselect large docs/data files to save time.\n')); + const picked = await p.multiselect({ + message: 'Files to include in analysis', + options: changedFiles.map(f => ({ + value: f, + label: f, + hint: relevantFiles.includes(f) ? pc.cyan('skill-relevant') : '', + })), + initialValues: relevantFiles.length > 0 ? relevantFiles : changedFiles, + }); + if (p.isCancel(picked)) { + p.cancel('Sync cancelled'); + return; + } + if (picked.length === 0) { + p.cancel('No files selected'); + return; + } + selectedFiles = picked; + } + + // Build diff from selected files only, or use full prioritized diff + let activeDiff; + if (selectedFiles.length < changedFiles.length) { + activeDiff = getSelectedFilesDiff(repoPath, selectedFiles, actualCommits); + if (activeDiff.includes('(diff truncated')) { + p.log.warn('Selected files still exceed 80k — diff truncated. Claude will use Read tool for the rest.'); + } + } else { + activeDiff = buildPrioritizedDiff(diff, relevantFiles); + if (activeDiff.includes('(diff truncated')) { + const fullKb = Math.round(diff.length / 1024); + p.log.warn(`Large commit (${fullKb}k chars) — diff truncated. Claude will use Read tool for full file contents.`); + } + } + // Send affected skills in full, others as just path + description (save tokens) const affectedPaths = new Set(affectedSkills.map(s => s.path)); const skillContents = existingSkills.map(s => { @@ -122,12 +184,12 @@ ${commitLog} ## Git Diff \`\`\`diff -${truncateDiff(diff, 15000)} +${activeDiff} \`\`\` ## Changed Files -${changedFiles.join('\n')} -${graphContext ? `\n## Import Graph Context\n${graphContext}` : ''} +${selectedFiles.join('\n')} +${graphContext ? `\n## Import Graph Context\n${graphContext}\n` : ''} ## Existing Skills ${skillContents} @@ -153,7 +215,7 @@ ${truncate(claudeMdContent, 5000)} }); } catch (err) { syncSpinner.stop(pc.red('Failed')); - throw new CliError(err.message); + throw new CliError(err.message, { cause: err }); } // Step 6: Parse output @@ -189,151 +251,50 @@ ${truncate(claudeMdContent, 5000)} const results = writeSkillFiles(repoPath, files, { force: true }); console.log(); - for (const result of results) { - const icon = result.status === 'overwritten' ? pc.yellow('~') : pc.green('+'); - console.log(` ${icon} ${result.path}`); - } - - console.log(); - p.outro(`${results.length} file(s) updated`); -} - -// --- Git helpers --- - -function isGitRepo(repoPath) { - try { - execSync('git rev-parse --git-dir', { cwd: repoPath, stdio: 'pipe', timeout: 5000 }); - return true; - } catch { - return false; - } -} - -function getGitDiff(repoPath, commits) { - // Try requested commit count, fall back to fewer - for (let n = commits; n >= 1; n--) { - try { - const diff = execSync(`git diff HEAD~${n}..HEAD`, { - cwd: repoPath, - encoding: 'utf8', - maxBuffer: 10 * 1024 * 1024, - timeout: 30000, - stdio: ['pipe', 'pipe', 'pipe'], - }); - return { diff, actualCommits: n }; - } catch { - continue; - } + for (const wr of results) { + const icon = wr.status === 'overwritten' ? pc.yellow('~') : pc.green('+'); + console.log(` ${icon} ${wr.path}`); } - return { diff: '', actualCommits: 0 }; -} -function getGitLog(repoPath, commits) { + // Regenerate skill-rules.json so hooks see updated activation patterns try { - return execSync(`git log --oneline -${commits}`, { - cwd: repoPath, - encoding: 'utf8', - maxBuffer: 5 * 1024 * 1024, - timeout: 10000, - stdio: ['pipe', 'pipe', 'pipe'], - }).trim(); - } catch { - return ''; - } -} + const skillsDir = join(repoPath, '.claude', 'skills'); + const rules = extractRulesFromSkills(skillsDir); + writeFileSync(join(skillsDir, 'skill-rules.json'), JSON.stringify(rules, null, 2) + '\n'); + } catch { /* non-fatal */ } -function getChangedFiles(repoPath, commits) { - try { - const output = execSync(`git diff --name-only HEAD~${commits}..HEAD`, { - cwd: repoPath, - encoding: 'utf8', - maxBuffer: 5 * 1024 * 1024, - timeout: 15000, - stdio: ['pipe', 'pipe', 'pipe'], - }); - return output.trim().split('\n').filter(Boolean); - } catch { - return []; - } + console.log(); + p.outro(`${results.length} file(s) updated`); } // --- Skill mapping --- function findExistingSkills(repoPath) { const skillsDir = join(repoPath, '.claude', 'skills'); - const skills = []; - - if (!existsSync(skillsDir)) return skills; - - function walkDir(dir) { - try { - const entries = readdirSync(dir); - for (const entry of entries) { - const full = join(dir, entry); - if (statSync(full).isDirectory()) { - walkDir(full); - } else if (entry === 'skill.md' || entry.endsWith('.md')) { - const content = readFileSync(full, 'utf8'); - const nameMatch = content.match(/name:\s*(.+)/); - const relativePath = relative(repoPath, full); - skills.push({ - name: nameMatch ? nameMatch[1].trim() : entry, - path: relativePath, - content, - }); - } - } - } catch { /* skip unreadable dirs */ } - } - - walkDir(skillsDir); - return skills; + return findSkillFiles(skillsDir).map(s => ({ + name: s.name, + path: relative(repoPath, s.path), + content: s.content, + })); } -// Path segments too generic to use for skill matching -const GENERIC_PATH_SEGMENTS = new Set([ - 'src', 'app', 'lib', 'api', 'v1', 'v2', 'components', 'services', - 'utils', 'helpers', 'common', 'core', 'config', 'middleware', - 'models', 'types', 'hooks', 'pages', 'routes', 'tests', 'test', - 'public', 'assets', 'styles', 'scripts', -]); - function mapChangesToSkills(changedFiles, existingSkills, scan, repoGraph = null) { const affected = []; for (const skill of existingSkills) { if (skill.name === 'base') continue; // base handled separately below - // Extract file paths and specific names from the activation section - const activationMatch = skill.content.match(/## Activation[\s\S]*?---/); - if (!activationMatch) continue; + const activationBlock = getActivationBlock(skill.content); + if (!activationBlock) continue; - const activationBlock = activationMatch[0].toLowerCase(); - - let isAffected = changedFiles.some(file => { - const fileLower = file.toLowerCase(); - // Check the filename itself (e.g., billing_service.py) - const fileName = fileLower.split('/').pop(); - if (activationBlock.includes(fileName)) return true; - - // Check meaningful path segments (skip generic ones) - const parts = fileLower.split('/').filter(p => !GENERIC_PATH_SEGMENTS.has(p) && p.length > 2); - return parts.some(part => activationBlock.includes(part)); - }); + let isAffected = changedFiles.some(file => fileMatchesActivation(file, activationBlock)); // Graph-aware: check if changed files are imported by files in this skill's domain if (!isAffected && repoGraph) { isAffected = changedFiles.some(file => { const info = repoGraph.files[file]; if (!info) return false; - // Check if any file that imports the changed file matches the activation block - return (info.importedBy || []).some(dep => { - const depLower = dep.toLowerCase(); - const depName = depLower.split('/').pop(); - if (activationBlock.includes(depName)) return true; - const parts = depLower.split('/').filter(p => !GENERIC_PATH_SEGMENTS.has(p) && p.length > 2); - return parts.some(part => activationBlock.includes(part)); - }); + return (info.importedBy || []).some(dep => fileMatchesActivation(dep, activationBlock)); }); } @@ -354,135 +315,236 @@ function mapChangesToSkills(changedFiles, existingSkills, scan, repoGraph = null return affected; } -// --- Git hook --- +// --- Refresh mode --- -function resolveAspensPath() { - const cmd = process.platform === 'win32' ? 'where aspens' : 'which aspens'; - try { - const resolved = execSync(cmd, { - encoding: 'utf8', - timeout: 5000, - stdio: ['pipe', 'pipe', 'pipe'], - }).trim(); - if (resolved && existsSync(resolved)) return resolved; - } catch { /* not in PATH */ } - return 'npx aspens'; -} +async function refreshAllSkills(repoPath, options) { + const verbose = !!options.verbose; -export function installGitHook(repoPath) { - const hookDir = join(repoPath, '.git', 'hooks'); - const hookPath = join(hookDir, 'post-commit'); + p.intro(pc.cyan('aspens doc sync --refresh')); - if (!existsSync(join(repoPath, '.git'))) { + // Prerequisites + if (!isGitRepo(repoPath)) { throw new CliError('Not a git repository.'); } + const skillsDir = join(repoPath, '.claude', 'skills'); + if (!existsSync(skillsDir)) { + throw new CliError('No .claude/skills/ found. Run aspens doc init first.'); + } - mkdirSync(hookDir, { recursive: true }); + // Step 1: Scan + graph + const scanSpinner = p.spinner(); + scanSpinner.start('Scanning repo and building import graph...'); - const aspensCmd = resolveAspensPath(); + const scan = scanRepo(repoPath); + try { + const rawGraph = await buildRepoGraph(repoPath, scan.languages); + persistGraphArtifacts(repoPath, rawGraph); + } catch (err) { + p.log.warn(`Graph build failed — continuing without it. (${err.message})`); + } - const hookBlock = ` -# >>> aspens doc-sync hook (do not edit) >>> -__aspens_doc_sync() { - REPO_ROOT="\$(git rev-parse --show-toplevel 2>/dev/null)" || return 0 - REPO_HASH="\$(echo "\$REPO_ROOT" | (shasum 2>/dev/null || sha1sum 2>/dev/null || md5sum 2>/dev/null) | cut -c1-8)" - ASPENS_LOCK="/tmp/aspens-sync-\${REPO_HASH}.lock" - ASPENS_LOG="/tmp/aspens-sync-\${REPO_HASH}.log" + scanSpinner.stop('Scan complete'); - # Cooldown: skip if last sync was less than 5 minutes ago - if [ -f "\$ASPENS_LOCK" ]; then - LAST_RUN=\$(cat "\$ASPENS_LOCK" 2>/dev/null || echo 0) - NOW=\$(date +%s) - if [ \$((NOW - LAST_RUN)) -lt 300 ]; then - return 0 - fi - fi - echo \$(date +%s) > "\$ASPENS_LOCK" + // Step 2: Load existing skills + const existingSkills = findExistingSkills(repoPath); + if (existingSkills.length === 0) { + throw new CliError('No skills found in .claude/skills/. Run aspens doc init first.'); + } - # Clean up stale lock files older than 1 hour - find /tmp -maxdepth 1 -name "aspens-sync-*.lock" -mmin +60 -delete 2>/dev/null + const baseSkill = existingSkills.find(s => s.name === 'base'); + const domainSkills = existingSkills.filter(s => s.name !== 'base'); - # Truncate log if over 200 lines - if [ -f "\$ASPENS_LOG" ] && [ "\$(wc -l < "\$ASPENS_LOG" 2>/dev/null || echo 0)" -gt 200 ]; then - tail -100 "\$ASPENS_LOG" > "\$ASPENS_LOG.tmp" && mv "\$ASPENS_LOG.tmp" "\$ASPENS_LOG" - fi + p.log.info(`Found ${existingSkills.length} skill(s): ${existingSkills.map(s => pc.cyan(s.name)).join(', ')}`); - # Run in background with logging - (echo "[sync] \$(date '+%Y-%m-%d %H:%M:%S') started" >> "\$ASPENS_LOG" && ${aspensCmd} doc sync --commits 1 "\$REPO_ROOT" >> "\$ASPENS_LOG" 2>&1; echo "[sync] \$(date '+%Y-%m-%d %H:%M:%S') finished (exit \$?)" >> "\$ASPENS_LOG") & -} -__aspens_doc_sync -# <<< aspens doc-sync hook <<< -`; - - // Check for existing hook - if (existsSync(hookPath)) { - const existing = readFileSync(hookPath, 'utf8'); - if (existing.includes('aspens doc-sync hook') || existing.includes('aspens doc sync')) { - console.log(pc.yellow('\n Hook already installed.\n')); - return; + // Timeout: --timeout flag > ASPENS_TIMEOUT env > auto-scaled + const autoTimeout = Math.min(120 + existingSkills.length * 60, 900); + const { timeoutMs: perSkillTimeout } = resolveTimeout(options.timeout, autoTimeout); + + const today = new Date().toISOString().split('T')[0]; + const systemPrompt = loadPrompt('doc-sync-refresh'); + const allUpdatedFiles = []; + + // Step 3: Refresh base skill first + if (baseSkill) { + const baseSpinner = p.spinner(); + baseSpinner.start('Refreshing base skill...'); + + try { + const baseContext = buildBaseContext(repoPath, scan); + const prompt = `${systemPrompt}\n\n---\n\nRepository path: ${repoPath}\nToday's date: ${today}\n\n## Existing Skill\n\`\`\`\n${baseSkill.content}\n\`\`\`\n\n## Current Codebase\n${baseContext}`; + + const result = await runClaude(prompt, { + timeout: perSkillTimeout, + allowedTools: READ_ONLY_TOOLS, + verbose, + model: options.model || null, + onActivity: verbose ? (msg) => baseSpinner.message(pc.dim(msg)) : null, + }); + + const files = parseFileOutput(result.text); + if (files.length > 0) { + allUpdatedFiles.push(...files); + baseSpinner.stop(pc.yellow('base') + ' — updated'); + } else { + baseSpinner.stop(pc.dim('base') + ' — up to date'); + } + } catch (err) { + baseSpinner.stop(pc.red('base — failed: ') + err.message); } - // Append to existing hook (outside shebang) - writeFileSync(hookPath, existing + '\n' + hookBlock, 'utf8'); - console.log(pc.green('\n Appended aspens doc-sync to existing post-commit hook.\n')); - } else { - writeFileSync(hookPath, '#!/bin/sh\n' + hookBlock, 'utf8'); - execSync(`chmod +x "${hookPath}"`); - console.log(pc.green('\n Installed post-commit hook.\n')); } - console.log(pc.dim(' Skills will auto-update after every commit.')); - console.log(pc.dim(' Log: /tmp/aspens-sync-*.log')); - console.log(pc.dim(' Remove with: aspens doc sync --remove-hook\n')); -} + // Step 4: Refresh domain skills in parallel batches + if (domainSkills.length > 0) { + for (let i = 0; i < domainSkills.length; i += PARALLEL_LIMIT) { + const batch = domainSkills.slice(i, i + PARALLEL_LIMIT); -export function removeGitHook(repoPath) { - const hookPath = join(repoPath, '.git', 'hooks', 'post-commit'); + const batchResults = await Promise.all(batch.map(async (skill) => { + const skillSpinner = p.spinner(); + skillSpinner.start(`Refreshing ${pc.cyan(skill.name)}...`); - if (!existsSync(hookPath)) { - console.log(pc.yellow('\n No post-commit hook found.\n')); - return; + try { + const domain = skillToDomain(skill); + const domainContext = buildDomainContext(repoPath, scan, domain); + + const prompt = `${systemPrompt}\n\n---\n\nRepository path: ${repoPath}\nToday's date: ${today}\n\n## Existing Skill\n\`\`\`\n${skill.content}\n\`\`\`\n\n## Current Codebase (${skill.name} domain)\n${domainContext}`; + + const result = await runClaude(prompt, { + timeout: perSkillTimeout, + allowedTools: READ_ONLY_TOOLS, + verbose, + model: options.model || null, + onActivity: verbose ? (msg) => skillSpinner.message(pc.dim(msg)) : null, + }); + + const files = parseFileOutput(result.text); + if (files.length > 0) { + skillSpinner.stop(pc.yellow(skill.name) + ' — updated'); + return files; + } else { + skillSpinner.stop(pc.dim(skill.name) + ' — up to date'); + return []; + } + } catch (err) { + skillSpinner.stop(pc.red(`${skill.name} — failed: `) + err.message); + return []; + } + })); + + for (const files of batchResults) { + allUpdatedFiles.push(...files); + } + } } - const content = readFileSync(hookPath, 'utf8'); - const hasMarkers = content.includes('# >>> aspens doc-sync hook'); - const hasLegacy = !hasMarkers && content.includes('aspens doc sync'); + // Step 5: Refresh CLAUDE.md if it exists + const claudeMdPath = join(repoPath, 'CLAUDE.md'); + if (existsSync(claudeMdPath)) { + const claudeSpinner = p.spinner(); + claudeSpinner.start('Checking CLAUDE.md...'); - if (!hasMarkers && !hasLegacy) { - console.log(pc.yellow('\n Post-commit hook does not contain aspens.\n')); - return; + try { + const claudeMd = readFileSync(claudeMdPath, 'utf8'); + const skillSummaries = existingSkills.map(s => { + const descMatch = s.content.match(/description:\s*(.+)/); + return `- **${s.name}**: ${descMatch ? descMatch[1].trim() : ''}`; + }).join('\n'); + + const claudePrompt = `${systemPrompt}\n\n---\n\nRepository path: ${repoPath}\nToday's date: ${today}\n\n## Existing Skill\n\`\`\`\n${claudeMd}\n\`\`\`\n\n## Installed Skills\n${skillSummaries}\n\n## Current Codebase\n${buildBaseContext(repoPath, scan)}`; + + const claudeResult = await runClaude(claudePrompt, { + timeout: perSkillTimeout, + allowedTools: READ_ONLY_TOOLS, + verbose, + model: options.model || null, + onActivity: verbose ? (msg) => claudeSpinner.message(pc.dim(msg)) : null, + }); + + const claudeFiles = parseFileOutput(claudeResult.text); + if (claudeFiles.length > 0) { + allUpdatedFiles.push(...claudeFiles); + claudeSpinner.stop(pc.yellow('CLAUDE.md') + ' — updated'); + } else { + claudeSpinner.stop(pc.dim('CLAUDE.md') + ' — up to date'); + } + } catch (err) { + claudeSpinner.stop(pc.red('CLAUDE.md — failed: ') + err.message); + } } - if (hasMarkers) { - const cleaned = content - .replace(/\n?# >>> aspens doc-sync hook \(do not edit\) >>>[\s\S]*?# <<< aspens doc-sync hook <<<\n?/, '') - .trim(); + // Step 6: Check for uncovered domains + const coveredNames = new Set(existingSkills.map(s => s.name.toLowerCase())); + const uncoveredDomains = (scan.domains || []).filter(d => + !coveredNames.has(d.name.toLowerCase()) + ); - if (!cleaned || cleaned === '#!/bin/sh') { - unlinkSync(hookPath); - console.log(pc.green('\n Removed post-commit hook.\n')); - } else { - writeFileSync(hookPath, cleaned + '\n', 'utf8'); - console.log(pc.green('\n Removed aspens doc-sync from post-commit hook.\n')); + if (uncoveredDomains.length > 0) { + console.log(); + p.log.info(`Potential uncovered domains: ${uncoveredDomains.map(d => pc.yellow(d.name)).join(', ')}`); + p.log.info(pc.dim('Run aspens doc init --mode chunked --domains "' + uncoveredDomains.map(d => d.name).join(',') + '" to generate skills for these.')); + } + + // Step 7: Write results + if (allUpdatedFiles.length === 0) { + console.log(); + p.outro('All skills are up to date'); + return; + } + + // Dry run + if (options.dryRun) { + console.log(); + p.log.info(`Dry run — ${allUpdatedFiles.length} file(s) would be updated:`); + for (const file of allUpdatedFiles) { + console.log(pc.dim(' ') + pc.yellow('~') + ' ' + file.path); } - } else { - console.log(pc.yellow('\n Legacy aspens hook detected (no removal markers).')); - console.log(pc.dim(' Re-install first: aspens doc sync --install-hook')); - console.log(pc.dim(' Or edit manually: .git/hooks/post-commit\n')); + p.outro('Dry run complete'); + return; + } + + const results = writeSkillFiles(repoPath, allUpdatedFiles, { force: true }); + + console.log(); + for (const result of results) { + const icon = result.status === 'overwritten' ? pc.yellow('~') : pc.green('+'); + console.log(` ${icon} ${result.path}`); } -} -// --- Helpers --- + // Step 8: Regenerate skill-rules.json + try { + const rules = extractRulesFromSkills(skillsDir); + writeFileSync(join(skillsDir, 'skill-rules.json'), JSON.stringify(rules, null, 2) + '\n'); + p.log.info('Updated skill-rules.json'); + } catch { /* non-fatal */ } -function truncateDiff(diff, maxChars) { - if (diff.length <= maxChars) return diff; - // Cut at the last complete diff hunk boundary to avoid mid-line truncation - const truncated = diff.slice(0, maxChars); - const lastHunkBoundary = truncated.lastIndexOf('\ndiff --git'); - const cutPoint = lastHunkBoundary > 0 ? lastHunkBoundary : maxChars; - return diff.slice(0, cutPoint) + `\n\n... (diff truncated — use Read tool to see full files)`; + console.log(); + p.outro(`${results.length} file(s) refreshed`); } -function truncate(text, maxChars) { - if (text.length <= maxChars) return text; - return text.slice(0, maxChars) + '\n... (truncated)'; +/** + * Convert a skill's activation patterns into a domain object + * compatible with buildDomainContext(). + */ +export function skillToDomain(skill) { + const patterns = parseActivationPatterns(skill.content); + const directories = new Set(); + const files = []; + + for (const pattern of patterns) { + if (pattern.includes('*')) { + // Glob pattern — extract directory prefix + const dir = pattern.replace(/\/\*.*$/, '').replace(/\*.*$/, ''); + if (dir) directories.add(dir); + } else { + // Exact file path + files.push(pattern); + const dir = dirname(pattern); + if (dir && dir !== '.') directories.add(dir); + } + } + + return { + name: skill.name, + directories: [...directories], + files, + }; } diff --git a/src/lib/diff-helpers.js b/src/lib/diff-helpers.js new file mode 100644 index 0000000..96a4bff --- /dev/null +++ b/src/lib/diff-helpers.js @@ -0,0 +1,64 @@ +import { execFileSync } from 'child_process'; + +export function getSelectedFilesDiff(repoPath, files, commits) { + try { + const result = execFileSync('git', ['diff', `HEAD~${commits}..HEAD`, '--', ...files], { + cwd: repoPath, + encoding: 'utf8', + maxBuffer: 10 * 1024 * 1024, + timeout: 30000, + }); + return truncateDiff(result, 80000); + } catch { + return ''; + } +} + +// Build a diff that puts skill-relevant files first so they survive truncation. +// Relevant files get 60k, everything else gets 20k (80k total). +export function buildPrioritizedDiff(fullDiff, relevantFiles) { + const MAX_CHARS = 80000; + if (fullDiff.length <= MAX_CHARS || relevantFiles.length === 0) { + return truncateDiff(fullDiff, MAX_CHARS); + } + + // Split full diff into per-file chunks + const chunks = []; + const parts = fullDiff.split(/(?=^diff --git )/m); + for (const part of parts) { + const m = part.match(/^diff --git a\/(.*?) b\//m); + chunks.push({ file: m ? m[1] : '', text: part }); + } + + // Separate relevant from other chunks + const relevantSet = new Set(relevantFiles); + const relevant = chunks.filter(c => relevantSet.has(c.file)); + const others = chunks.filter(c => !relevantSet.has(c.file)); + + // Relevant files get the bulk of the budget; others get a smaller slice + const relevantDiff = truncateDiff(relevant.map(c => c.text).join(''), 60000); + const otherDiff = truncateDiff(others.map(c => c.text).join(''), 20000); + + return (relevantDiff + (otherDiff ? '\n' + otherDiff : '')).trim(); +} + +export function truncateDiff(diff, maxChars) { + if (diff.length <= maxChars) return diff; + // Cut at the last complete diff hunk boundary to avoid mid-line truncation + const truncated = diff.slice(0, maxChars); + const lastHunkBoundary = truncated.lastIndexOf('\ndiff --git'); + let cutPoint; + if (lastHunkBoundary > 0) { + cutPoint = lastHunkBoundary; + } else { + // No hunk boundary found — fall back to last newline to avoid mid-line cut + const lastNewline = truncated.lastIndexOf('\n'); + cutPoint = lastNewline > 0 ? lastNewline : maxChars; + } + return diff.slice(0, cutPoint) + `\n\n... (diff truncated — use Read tool to see full files)`; +} + +export function truncate(text, maxChars) { + if (text.length <= maxChars) return text; + return text.slice(0, maxChars) + '\n... (truncated)'; +} diff --git a/src/lib/errors.js b/src/lib/errors.js index 00cddfc..8918d15 100644 --- a/src/lib/errors.js +++ b/src/lib/errors.js @@ -9,8 +9,8 @@ export class CliError extends Error { * - exitCode: process exit code (default 1) * - logged: if true, the top-level handler won't re-print the message */ - constructor(message, { exitCode = 1, logged = false } = {}) { - super(message); + constructor(message, { exitCode = 1, logged = false, cause } = {}) { + super(message, cause ? { cause } : undefined); this.name = 'CliError'; this.exitCode = exitCode; this.logged = logged; diff --git a/src/lib/git-helpers.js b/src/lib/git-helpers.js new file mode 100644 index 0000000..8092d50 --- /dev/null +++ b/src/lib/git-helpers.js @@ -0,0 +1,55 @@ +import { execFileSync } from 'child_process'; + +export function isGitRepo(repoPath) { + try { + execFileSync('git', ['rev-parse', '--git-dir'], { cwd: repoPath, stdio: 'pipe', timeout: 5000 }); + return true; + } catch { + return false; + } +} + +export function getGitDiff(repoPath, commits) { + // Try requested commit count, fall back to fewer + for (let n = commits; n >= 1; n--) { + try { + const diff = execFileSync('git', ['diff', `HEAD~${n}..HEAD`], { + cwd: repoPath, + encoding: 'utf8', + maxBuffer: 10 * 1024 * 1024, + timeout: 30000, + }); + return { diff, actualCommits: n }; + } catch { + continue; + } + } + return { diff: '', actualCommits: 0 }; +} + +export function getGitLog(repoPath, commits) { + try { + return execFileSync('git', ['log', '--oneline', `-${commits}`], { + cwd: repoPath, + encoding: 'utf8', + maxBuffer: 5 * 1024 * 1024, + timeout: 10000, + }).trim(); + } catch { + return ''; + } +} + +export function getChangedFiles(repoPath, commits) { + try { + const output = execFileSync('git', ['diff', '--name-only', `HEAD~${commits}..HEAD`], { + cwd: repoPath, + encoding: 'utf8', + maxBuffer: 5 * 1024 * 1024, + timeout: 15000, + }); + return output.trim().split('\n').filter(Boolean); + } catch { + return []; + } +} diff --git a/src/lib/git-hook.js b/src/lib/git-hook.js new file mode 100644 index 0000000..86d8e73 --- /dev/null +++ b/src/lib/git-hook.js @@ -0,0 +1,127 @@ +import { join } from 'path'; +import { existsSync, readFileSync, writeFileSync, mkdirSync, unlinkSync, chmodSync } from 'fs'; +import { execSync } from 'child_process'; +import pc from 'picocolors'; +import { CliError } from './errors.js'; + +function resolveAspensPath() { + const cmd = process.platform === 'win32' ? 'where aspens' : 'which aspens'; + try { + const resolved = execSync(cmd, { + encoding: 'utf8', + timeout: 5000, + stdio: ['pipe', 'pipe', 'pipe'], + }).trim(); + if (resolved && existsSync(resolved)) return resolved; + } catch { /* not in PATH */ } + return 'npx aspens'; +} + +export function installGitHook(repoPath) { + const hookDir = join(repoPath, '.git', 'hooks'); + const hookPath = join(hookDir, 'post-commit'); + + if (!existsSync(join(repoPath, '.git'))) { + throw new CliError('Not a git repository.'); + } + + mkdirSync(hookDir, { recursive: true }); + + const aspensCmd = resolveAspensPath(); + + const hookBlock = ` +# >>> aspens doc-sync hook (do not edit) >>> +__aspens_doc_sync() { + REPO_ROOT="\$(git rev-parse --show-toplevel 2>/dev/null)" || return 0 + REPO_HASH="\$(echo "\$REPO_ROOT" | (shasum 2>/dev/null || sha1sum 2>/dev/null || md5sum 2>/dev/null) | cut -c1-8)" + ASPENS_LOCK="/tmp/aspens-sync-\${REPO_HASH}.lock" + ASPENS_LOG="/tmp/aspens-sync-\${REPO_HASH}.log" + + # Skip aspens-only commits (skills, CLAUDE.md, graph artifacts) + CHANGED="\$(git diff-tree --no-commit-id --name-only -r HEAD 2>/dev/null)" + NON_ASPENS="\$(echo "\$CHANGED" | grep -v '^\.claude/' | grep -v '^CLAUDE\.md\$' || true)" + if [ -z "\$NON_ASPENS" ]; then + return 0 + fi + + # Cooldown: skip if last sync was less than 5 minutes ago + if [ -f "\$ASPENS_LOCK" ]; then + LAST_RUN=\$(cat "\$ASPENS_LOCK" 2>/dev/null || echo 0) + NOW=\$(date +%s) + if [ \$((NOW - LAST_RUN)) -lt 300 ]; then + return 0 + fi + fi + echo \$(date +%s) > "\$ASPENS_LOCK" + + # Clean up stale lock files older than 1 hour + find /tmp -maxdepth 1 -name "aspens-sync-*.lock" -mmin +60 -exec rm -f {} \\; 2>/dev/null + + # Truncate log if over 200 lines + if [ -f "\$ASPENS_LOG" ] && [ "\$(wc -l < "\$ASPENS_LOG" 2>/dev/null || echo 0)" -gt 200 ]; then + tail -100 "\$ASPENS_LOG" > "\$ASPENS_LOG.tmp" && mv "\$ASPENS_LOG.tmp" "\$ASPENS_LOG" + fi + + # Run fully detached so git returns immediately (POSIX-compatible — no disown needed) + (echo "[sync] \$(date '+%Y-%m-%d %H:%M:%S') started" >> "\$ASPENS_LOG" && ${aspensCmd} doc sync --commits 1 "\$REPO_ROOT" >> "\$ASPENS_LOG" 2>&1; echo "[sync] \$(date '+%Y-%m-%d %H:%M:%S') finished (exit \$?)" >> "\$ASPENS_LOG") /dev/null 2>&1 & +} +__aspens_doc_sync +# <<< aspens doc-sync hook <<< +`; + + // Check for existing hook + if (existsSync(hookPath)) { + const existing = readFileSync(hookPath, 'utf8'); + if (existing.includes('aspens doc-sync hook') || existing.includes('aspens doc sync')) { + console.log(pc.yellow('\n Hook already installed.\n')); + return; + } + // Append to existing hook (outside shebang) + writeFileSync(hookPath, existing + '\n' + hookBlock, 'utf8'); + console.log(pc.green('\n Appended aspens doc-sync to existing post-commit hook.\n')); + } else { + writeFileSync(hookPath, '#!/bin/sh\n' + hookBlock, 'utf8'); + chmodSync(hookPath, 0o755); + console.log(pc.green('\n Installed post-commit hook.\n')); + } + + console.log(pc.dim(' Skills will auto-update after every commit.')); + console.log(pc.dim(' Log: /tmp/aspens-sync-*.log')); + console.log(pc.dim(' Remove with: aspens doc sync --remove-hook\n')); +} + +export function removeGitHook(repoPath) { + const hookPath = join(repoPath, '.git', 'hooks', 'post-commit'); + + if (!existsSync(hookPath)) { + console.log(pc.yellow('\n No post-commit hook found.\n')); + return; + } + + const content = readFileSync(hookPath, 'utf8'); + const hasMarkers = content.includes('# >>> aspens doc-sync hook'); + const hasLegacy = !hasMarkers && content.includes('aspens doc sync'); + + if (!hasMarkers && !hasLegacy) { + console.log(pc.yellow('\n Post-commit hook does not contain aspens.\n')); + return; + } + + if (hasMarkers) { + const cleaned = content + .replace(/\n?# >>> aspens doc-sync hook \(do not edit\) >>>[\s\S]*?# <<< aspens doc-sync hook <<<\n?/, '') + .trim(); + + if (!cleaned || cleaned === '#!/bin/sh') { + unlinkSync(hookPath); + console.log(pc.green('\n Removed post-commit hook.\n')); + } else { + writeFileSync(hookPath, cleaned + '\n', 'utf8'); + console.log(pc.green('\n Removed aspens doc-sync from post-commit hook.\n')); + } + } else { + console.log(pc.yellow('\n Legacy aspens hook detected (no removal markers).')); + console.log(pc.dim(' Re-install first: aspens doc sync --install-hook')); + console.log(pc.dim(' Or edit manually: .git/hooks/post-commit\n')); + } +} diff --git a/src/lib/graph-persistence.js b/src/lib/graph-persistence.js index 815db01..c2e83a2 100644 --- a/src/lib/graph-persistence.js +++ b/src/lib/graph-persistence.js @@ -495,5 +495,30 @@ export function persistGraphArtifacts(repoPath, rawGraph) { writeCodeMap(repoPath, serialized); const index = generateGraphIndex(serialized); saveGraphIndex(repoPath, index); + ensureGraphGitignore(repoPath); return serialized; } + +/** + * Ensure .claude/graph artifacts are gitignored to prevent the post-commit + * loop where graph.json's gitHash/timestamp changes every sync → new commit + * → sync runs again → repeat. + */ +function ensureGraphGitignore(repoPath) { + const gitignorePath = join(repoPath, '.gitignore'); + const entries = [ + '.claude/graph.json', + '.claude/graph-index.json', + '.claude/code-map.md', + ]; + + let existing = ''; + try { existing = readFileSync(gitignorePath, 'utf8'); } catch { /* no .gitignore yet */ } + + const existingLines = new Set(existing.split('\n').map(l => l.trim())); + const toAdd = entries.filter(e => !existingLines.has(e)); + if (toAdd.length === 0) return; + + const block = '\n# aspens graph artifacts (generated — do not commit)\n' + toAdd.join('\n') + '\n'; + writeFileSync(gitignorePath, existing + block, 'utf8'); +} diff --git a/src/lib/skill-reader.js b/src/lib/skill-reader.js index c8da0be..8b9b0e8 100644 --- a/src/lib/skill-reader.js +++ b/src/lib/skill-reader.js @@ -90,6 +90,38 @@ export function parseActivationPatterns(content) { return patterns; } +// Path segments too generic to use for skill matching +export const GENERIC_PATH_SEGMENTS = new Set([ + 'src', 'app', 'lib', 'api', 'v1', 'v2', 'components', 'services', + 'utils', 'helpers', 'common', 'core', 'config', 'middleware', + 'models', 'types', 'hooks', 'pages', 'routes', 'tests', 'test', + 'public', 'assets', 'styles', 'scripts', +]); + +/** + * Extract the ## Activation section from skill content, lowercased. + * Uses the robust lookahead regex that stops at --- or another ## heading. + */ +export function getActivationBlock(content) { + if (!content || typeof content !== 'string') return ''; + const match = content.match(/## Activation[\s\S]*?(?=\n---|\n## (?!Activation)|$)/); + return match ? match[0].toLowerCase() : ''; +} + +/** + * Check if a file path matches an activation block. + * Tests filename and meaningful path segments (skipping generic ones). + */ +export function fileMatchesActivation(filePath, activationBlock, genericSegments = GENERIC_PATH_SEGMENTS) { + if (!filePath || !activationBlock) return false; + const lower = filePath.toLowerCase(); + const parts = lower.split('/').filter(Boolean); + const name = parts.pop(); + if (name && activationBlock.includes(name)) return true; + const segs = parts.filter(seg => !genericSegments.has(seg) && seg.length > 2); + return segs.some(seg => activationBlock.includes(seg)); +} + /** * Extract keywords from ## Activation Keywords: line. * Returns string[] or empty array. diff --git a/src/lib/timeout.js b/src/lib/timeout.js new file mode 100644 index 0000000..d737557 --- /dev/null +++ b/src/lib/timeout.js @@ -0,0 +1,26 @@ +/** + * Resolve timeout from multiple sources with consistent priority: + * --timeout flag > ASPENS_TIMEOUT env var > fallback default + * + * @param {number|undefined} flagValue Value from --timeout option + * @param {number} fallbackSeconds Default if neither flag nor env set + * @returns {{ timeoutMs: number, envWarning: boolean }} + */ +export function resolveTimeout(flagValue, fallbackSeconds) { + // --timeout flag wins + if (typeof flagValue === 'number' && flagValue > 0) { + return { timeoutMs: flagValue * 1000, envWarning: false }; + } + + // ASPENS_TIMEOUT env var + if (process.env.ASPENS_TIMEOUT) { + const parsed = parseInt(process.env.ASPENS_TIMEOUT, 10); + if (Number.isFinite(parsed) && parsed > 0) { + return { timeoutMs: parsed * 1000, envWarning: false }; + } + // Invalid env value — fall through to default, signal a warning + return { timeoutMs: fallbackSeconds * 1000, envWarning: true }; + } + + return { timeoutMs: fallbackSeconds * 1000, envWarning: false }; +} diff --git a/src/prompts/add-skill.md b/src/prompts/add-skill.md new file mode 100644 index 0000000..42a86ba --- /dev/null +++ b/src/prompts/add-skill.md @@ -0,0 +1,31 @@ +You are a skill file generator for Claude Code. Your job is to create a **skill file** from a reference document. + +{{skill-format}} + +## Your task + +You are given: +1. A skill name +2. A reference document containing information about a topic, workflow, or convention +3. Read-only tools (Read, Glob, Grep) to explore the codebase for more context + +**How to work:** +1. Read the reference document to understand the topic +2. Use Read/Glob/Grep to find related files, patterns, or conventions in the codebase +3. Synthesize a skill file that captures the essential knowledge an AI assistant needs + +## Output format + +Return exactly one file: + + +[full skill content] + + +## Rules + +1. **Extract actionable knowledge.** Focus on what an AI needs to write correct code or follow correct processes — not background reading. +2. **Be specific.** Use actual file paths, commands, and patterns from the reference doc and codebase. +3. **Write activation patterns.** Include file patterns and keywords that should trigger this skill. +4. **Keep it concise.** 30-60 lines. Distill the reference document down to its essential rules and patterns. +5. **Use the exact output format.** One `` tag with the path shown above. diff --git a/src/prompts/doc-init-claudemd.md b/src/prompts/doc-init-claudemd.md index 2766e89..185c132 100644 --- a/src/prompts/doc-init-claudemd.md +++ b/src/prompts/doc-init-claudemd.md @@ -22,3 +22,5 @@ Return exactly one file: 2. Reference skills by their path (e.g., `.claude/skills/billing/skill.md`). 3. Include actual commands from the scan data, not placeholders. 4. Do NOT duplicate what's already in the skills — just reference them. +5. Always include a `## Behavior` section with this rule verbatim: + - **Verify before claiming** — Never state that something is configured, running, scheduled, or complete without confirming it first. If you haven't verified it in this session, say so rather than assuming. diff --git a/src/prompts/doc-sync-refresh.md b/src/prompts/doc-sync-refresh.md new file mode 100644 index 0000000..142fe15 --- /dev/null +++ b/src/prompts/doc-sync-refresh.md @@ -0,0 +1,39 @@ +You are a documentation refresher for software projects. Your job is to review and update an existing **skill file** so it accurately reflects the current codebase. + +{{skill-format}} + +## Your task + +You are given: +1. An existing skill file that may be stale or incomplete +2. The current codebase context (file listings, source code samples) for this skill's domain +3. Read-only tools (Read, Glob, Grep) to explore the codebase for more context + +**How to work:** +1. Read the existing skill carefully — understand what it claims +2. Use Read/Glob/Grep to verify every claim: do referenced files still exist? Are described patterns still accurate? Are key concepts still current? +3. Check for new files, patterns, or conventions in the domain that the skill doesn't cover +4. Update the skill to reflect reality — fix stale references, add new patterns, remove deleted files + +## Output format + +Return ONLY the files that need updating, wrapped in XML tags: + + +[full updated skill content — not a patch, the complete file] + + +**Important:** +- Only output files that actually need changes. If the skill is already accurate, output nothing. +- Output the COMPLETE file content, not a diff or patch. +- Use `` and `` tags exactly as shown. + +## Rules + +1. **Preserve hand-written instructions.** Any explicitly written conventions, gotchas, or team decisions must be kept — these were added for a reason. +2. **Be specific.** Use actual file paths and patterns from the codebase, not placeholders. +3. **Remove stale references.** If a file no longer exists or a pattern has changed, update or remove it. +4. **Add missing coverage.** If new files, patterns, or conventions exist in the domain, add them. +5. **Update timestamps.** Change `Last Updated` to today's date on any skill you modify. +6. **Keep skills concise.** 30-60 lines. Every line earns its place. +7. **Don't fabricate.** Only document what you can verify exists in the codebase right now. diff --git a/src/prompts/doc-sync.md b/src/prompts/doc-sync.md index 3e3d2b1..5c75c78 100644 --- a/src/prompts/doc-sync.md +++ b/src/prompts/doc-sync.md @@ -12,10 +12,11 @@ You are given: **How to work:** 1. Read the git diff to understand what changed 2. Read the existing skills that are affected -3. If needed, use Read/Glob/Grep to understand the new code in context -4. Update only the skills that need changes — don't rewrite skills for unrelated domains -5. If a change introduces a new domain that has no skill yet, create one -6. Update CLAUDE.md if the changes affect repo-level structure, commands, or conventions +3. **If the diff ends with `... (diff truncated)`**, use the Read tool to read the full content of the changed files listed in the Changed Files section — do not assume changes are trivial just because the diff is cut off +4. If needed, use Read/Glob/Grep to understand the new code in context +5. Update only the skills that need changes — don't rewrite skills for unrelated domains +6. If a change introduces a new domain that has no skill yet, create one +7. Update CLAUDE.md if the changes affect repo-level structure, commands, or conventions ## Output format diff --git a/tests/add-skill.test.js b/tests/add-skill.test.js new file mode 100644 index 0000000..9ad3a5e --- /dev/null +++ b/tests/add-skill.test.js @@ -0,0 +1,81 @@ +import { describe, it, expect, beforeEach, afterEach } from 'vitest'; +import { existsSync, readFileSync, rmSync, mkdirSync } from 'fs'; +import { join } from 'path'; +import { addCommand } from '../src/commands/add.js'; + +const TEST_DIR = join(import.meta.dirname, 'tmp-add-skill'); + +let originalCwd; + +beforeEach(() => { + if (existsSync(TEST_DIR)) rmSync(TEST_DIR, { recursive: true }); + mkdirSync(TEST_DIR, { recursive: true }); + originalCwd = process.cwd(); + process.chdir(TEST_DIR); +}); + +afterEach(() => { + process.chdir(originalCwd); + if (existsSync(TEST_DIR)) rmSync(TEST_DIR, { recursive: true }); +}); + +describe('addCommand skill scaffold', () => { + it('creates skill.md with correct frontmatter', async () => { + await addCommand('skill', 'my-convention', {}); + const skillPath = join(TEST_DIR, '.claude', 'skills', 'my-convention', 'skill.md'); + expect(existsSync(skillPath)).toBe(true); + + const content = readFileSync(skillPath, 'utf8'); + expect(content).toContain('name: my-convention'); + expect(content).toContain('description: TODO'); + expect(content).toContain('## Activation'); + expect(content).toContain('Keywords: my-convention'); + }); + + it('sanitizes names with spaces', async () => { + await addCommand('skill', 'my skill', {}); + const skillPath = join(TEST_DIR, '.claude', 'skills', 'my-skill', 'skill.md'); + expect(existsSync(skillPath)).toBe(true); + + const content = readFileSync(skillPath, 'utf8'); + expect(content).toContain('name: my-skill'); + }); + + it('sanitizes names with uppercase', async () => { + await addCommand('skill', 'My-Convention', {}); + const skillPath = join(TEST_DIR, '.claude', 'skills', 'my-convention', 'skill.md'); + expect(existsSync(skillPath)).toBe(true); + }); + + it('throws CliError for invalid names', async () => { + await expect(addCommand('skill', '!!!', {})).rejects.toThrow('Invalid skill name'); + }); + + it('detects duplicate and skips', async () => { + await addCommand('skill', 'existing', {}); + const skillPath = join(TEST_DIR, '.claude', 'skills', 'existing', 'skill.md'); + const firstContent = readFileSync(skillPath, 'utf8'); + + // Second call should not overwrite + await addCommand('skill', 'existing', {}); + const secondContent = readFileSync(skillPath, 'utf8'); + expect(secondContent).toBe(firstContent); + }); + + it('creates skill in correct directory structure', async () => { + await addCommand('skill', 'test-skill', {}); + const skillDir = join(TEST_DIR, '.claude', 'skills', 'test-skill'); + expect(existsSync(skillDir)).toBe(true); + expect(existsSync(join(skillDir, 'skill.md'))).toBe(true); + }); + + it('includes today date in Last Updated', async () => { + await addCommand('skill', 'dated', {}); + const content = readFileSync( + join(TEST_DIR, '.claude', 'skills', 'dated', 'skill.md'), + 'utf8', + ); + const today = new Date().toISOString().split('T')[0]; + expect(content).toContain(`**Last Updated:** ${today}`); + }); +}); diff --git a/tests/git-hook.test.js b/tests/git-hook.test.js index 2ec8519..1d44148 100644 --- a/tests/git-hook.test.js +++ b/tests/git-hook.test.js @@ -1,7 +1,7 @@ import { describe, it, expect, beforeEach, afterAll } from 'vitest'; import { existsSync, readFileSync, rmSync, mkdirSync, writeFileSync, statSync } from 'fs'; import { join } from 'path'; -import { installGitHook, removeGitHook } from '../src/commands/doc-sync.js'; +import { installGitHook, removeGitHook } from '../src/lib/git-hook.js'; const TEST_DIR = join(import.meta.dirname, 'tmp-hook'); const HOOKS_DIR = join(TEST_DIR, '.git', 'hooks'); diff --git a/tests/skill-mapper.test.js b/tests/skill-mapper.test.js new file mode 100644 index 0000000..2e8781e --- /dev/null +++ b/tests/skill-mapper.test.js @@ -0,0 +1,156 @@ +import { describe, it, expect } from 'vitest'; +import { getActivationBlock, fileMatchesActivation, GENERIC_PATH_SEGMENTS } from '../src/lib/skill-reader.js'; +import { skillToDomain } from '../src/commands/doc-sync.js'; + +describe('getActivationBlock', () => { + it('extracts and lowercases the activation section', () => { + const content = `--- +name: test +--- + +## Activation + +- \`src/lib/Scanner.js\` +- \`src/commands/Doc-Init.js\` + +Keywords: scanning + +--- + +You are working on scanning.`; + + const block = getActivationBlock(content); + expect(block).toContain('scanner.js'); + expect(block).toContain('doc-init.js'); + expect(block).not.toContain('Scanner.js'); // lowercased + }); + + it('stops at next ## heading', () => { + const content = `## Activation + +- \`src/lib/runner.js\` + +## Key Files + +- \`src/lib/other.js\``; + + const block = getActivationBlock(content); + expect(block).toContain('runner.js'); + expect(block).not.toContain('other.js'); + }); + + it('returns empty string when no activation section', () => { + expect(getActivationBlock('# Just a title\nSome content')).toBe(''); + }); + + it('returns empty string for null/undefined input', () => { + expect(getActivationBlock(null)).toBe(''); + expect(getActivationBlock(undefined)).toBe(''); + expect(getActivationBlock('')).toBe(''); + }); +}); + +describe('fileMatchesActivation', () => { + const block = '## activation\n- `src/lib/scanner.js`\n- `src/commands/doc-init.js`\nkeywords: scanning, graph-builder'; + + it('matches by filename', () => { + expect(fileMatchesActivation('src/lib/scanner.js', block)).toBe(true); + }); + + it('matches case-insensitively', () => { + expect(fileMatchesActivation('src/lib/Scanner.js', block)).toBe(true); + }); + + it('matches by meaningful path segment', () => { + // "scanner" appears in the block as part of "scanner.js" + expect(fileMatchesActivation('other/scanner/index.js', block)).toBe(true); + }); + + it('does not match generic segments', () => { + // "src" and "lib" are generic — they shouldn't trigger a match on their own + const narrowBlock = '## activation\nkeywords: specific-thing'; + expect(fileMatchesActivation('src/lib/unrelated.js', narrowBlock)).toBe(false); + }); + + it('filters segments shorter than 3 chars', () => { + const narrowBlock = '## activation\n- `xx/thing.js`'; + // "xx" is only 2 chars, should be ignored + expect(fileMatchesActivation('xx/other.js', narrowBlock)).toBe(false); + }); + + it('returns false when nothing matches', () => { + expect(fileMatchesActivation('totally/unrelated/file.py', block)).toBe(false); + }); + + it('accepts custom generic segments set', () => { + const customGeneric = new Set(['custom']); + const narrowBlock = '## activation\n- `custom/thing.js`'; + // "custom" is in the generic set, so only "thing.js" or meaningful segments should match + expect(fileMatchesActivation('custom/other.js', narrowBlock, customGeneric)).toBe(false); + expect(fileMatchesActivation('custom/thing.js', narrowBlock, customGeneric)).toBe(true); + }); + + it('returns false for empty filePath', () => { + expect(fileMatchesActivation('', block)).toBe(false); + }); + + it('returns false for empty activationBlock', () => { + expect(fileMatchesActivation('src/lib/scanner.js', '')).toBe(false); + }); + + it('returns false for null/undefined inputs', () => { + expect(fileMatchesActivation(null, block)).toBe(false); + expect(fileMatchesActivation('src/lib/scanner.js', null)).toBe(false); + }); +}); + +describe('skillToDomain', () => { + function makeSkill(name, activationLines) { + const content = `--- +name: ${name} +description: test +--- + +## Activation + +${activationLines.map(l => `- \`${l}\``).join('\n')} + +--- + +Content here.`; + return { name, content }; + } + + it('extracts directories from glob patterns', () => { + const domain = skillToDomain(makeSkill('test', ['src/lib/*.js', 'src/commands/**/*'])); + expect(domain.directories).toContain('src/lib'); + expect(domain.directories).toContain('src/commands'); + expect(domain.files).toHaveLength(0); + }); + + it('extracts files and directories from exact paths', () => { + const domain = skillToDomain(makeSkill('test', ['src/lib/scanner.js', 'bin/cli.js'])); + expect(domain.files).toContain('src/lib/scanner.js'); + expect(domain.files).toContain('bin/cli.js'); + expect(domain.directories).toContain('src/lib'); + expect(domain.directories).toContain('bin'); + }); + + it('returns empty arrays for skills with no activation patterns', () => { + const skill = { name: 'empty', content: '---\nname: empty\n---\n\nNo activation section.' }; + const domain = skillToDomain(skill); + expect(domain.directories).toHaveLength(0); + expect(domain.files).toHaveLength(0); + }); + + it('deduplicates directories', () => { + const domain = skillToDomain(makeSkill('test', ['src/lib/a.js', 'src/lib/b.js'])); + const libCount = domain.directories.filter(d => d === 'src/lib').length; + expect(libCount).toBe(1); + }); + + it('sets the skill name on the domain', () => { + const domain = skillToDomain(makeSkill('my-skill', ['src/lib/*.js'])); + expect(domain.name).toBe('my-skill'); + }); +}); diff --git a/tests/timeout.test.js b/tests/timeout.test.js new file mode 100644 index 0000000..9cad030 --- /dev/null +++ b/tests/timeout.test.js @@ -0,0 +1,73 @@ +import { describe, it, expect, beforeEach, afterEach } from 'vitest'; +import { resolveTimeout } from '../src/lib/timeout.js'; + +describe('resolveTimeout', () => { + let savedEnv; + + beforeEach(() => { + savedEnv = process.env.ASPENS_TIMEOUT; + delete process.env.ASPENS_TIMEOUT; + }); + + afterEach(() => { + if (savedEnv !== undefined) { + process.env.ASPENS_TIMEOUT = savedEnv; + } else { + delete process.env.ASPENS_TIMEOUT; + } + }); + + it('flag wins over env var', () => { + process.env.ASPENS_TIMEOUT = '999'; + const { timeoutMs } = resolveTimeout(60, 120); + expect(timeoutMs).toBe(60000); + }); + + it('env var wins over fallback', () => { + process.env.ASPENS_TIMEOUT = '200'; + const { timeoutMs } = resolveTimeout(undefined, 120); + expect(timeoutMs).toBe(200000); + }); + + it('falls back to default when neither flag nor env set', () => { + const { timeoutMs, envWarning } = resolveTimeout(undefined, 300); + expect(timeoutMs).toBe(300000); + expect(envWarning).toBe(false); + }); + + it('converts seconds to milliseconds', () => { + const { timeoutMs } = resolveTimeout(45, 120); + expect(timeoutMs).toBe(45000); + }); + + it('returns envWarning when env var is NaN', () => { + process.env.ASPENS_TIMEOUT = 'abc'; + const { timeoutMs, envWarning } = resolveTimeout(undefined, 120); + expect(envWarning).toBe(true); + expect(timeoutMs).toBe(120000); + }); + + it('returns envWarning when env var is negative', () => { + process.env.ASPENS_TIMEOUT = '-5'; + const { timeoutMs, envWarning } = resolveTimeout(undefined, 120); + expect(envWarning).toBe(true); + expect(timeoutMs).toBe(120000); + }); + + it('returns envWarning when env var is zero', () => { + process.env.ASPENS_TIMEOUT = '0'; + const { timeoutMs, envWarning } = resolveTimeout(undefined, 120); + expect(envWarning).toBe(true); + expect(timeoutMs).toBe(120000); + }); + + it('ignores flag when value is not a positive number', () => { + const { timeoutMs } = resolveTimeout(0, 120); + expect(timeoutMs).toBe(120000); + }); + + it('ignores flag when value is negative', () => { + const { timeoutMs } = resolveTimeout(-10, 120); + expect(timeoutMs).toBe(120000); + }); +});