diff --git a/.coverage b/.coverage
index 4ad650f..f6286a8 100644
Binary files a/.coverage and b/.coverage differ
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 085ad95..0e3188d 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -6,6 +6,10 @@ on:
pull_request:
branches: [main]
+permissions:
+ contents: read
+ pull-requests: write
+
jobs:
test:
runs-on: slopometry-linux-x64
@@ -27,3 +31,80 @@ jobs:
- name: Run tests
run: uv run pytest --cov-report=xml
+ qpe:
+ name: Slopometry QPE
+ runs-on: ubuntu-latest
+ if: github.event_name == 'pull_request'
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+
+ - name: Set up uv
+ uses: astral-sh/setup-uv@v4
+
+ - name: Install slopometry from checkout
+ run: uv tool install .
+
+ - name: Run QPE check
+ id: qpe
+ run: |
+ slopometry summoner qpe
+ echo "json=$(slopometry summoner qpe --json | jq -c)" >> $GITHUB_OUTPUT
+
+ - name: Comment on PR
+ uses: actions/github-script@v7
+ with:
+ script: |
+ const qpe = ${{ steps.qpe.outputs.json }};
+ const smells = Object.entries(qpe.smell_counts)
+ .filter(([_, v]) => v > 0)
+ .sort((a, b) => b[1] - a[1])
+ .map(([k, v]) => `| ${k.replace(/_/g, ' ')} | ${v} |`)
+ .join('\n');
+
+ const body = `## 📊 Slopometry QPE Report
+
+ **QPE Score: ${qpe.qpe.toFixed(4)}**
+
+ | Metric | Value | Description |
+ |--------|-------|-------------|
+ | MI (normalized) | ${qpe.mi_normalized.toFixed(3)} | Maintainability Index / 100 |
+ | Smell Penalty | ${qpe.smell_penalty.toFixed(3)} | Weighted code smell deduction |
+ | Adjusted Quality | ${qpe.adjusted_quality.toFixed(3)} | MI × (1 - smell_penalty) |
+ | Effort Factor | ${qpe.effort_factor.toFixed(2)} | log(Halstead Effort + 1) |
+
+
+ Code Smell Breakdown
+
+ | Smell | Count |
+ |-------|-------|
+ ${smells}
+
+
+
+ > Higher QPE = better quality per unit effort`;
+
+ const { data: comments } = await github.rest.issues.listComments({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ issue_number: context.issue.number,
+ });
+
+ const existing = comments.find(c => c.body.includes('Slopometry QPE Report'));
+
+ if (existing) {
+ await github.rest.issues.updateComment({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ comment_id: existing.id,
+ body
+ });
+ } else {
+ await github.rest.issues.createComment({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ issue_number: context.issue.number,
+ body
+ });
+ }
+
diff --git a/coverage.xml b/coverage.xml
index 79fffd1..314758b 100644
--- a/coverage.xml
+++ b/coverage.xml
@@ -1,5 +1,5 @@
-
+
@@ -16,60 +16,60 @@
-
+
-
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
-
-
+
+
+
+
+
+
+
-
-
-
-
-
-
-
+
+
+
+
+
+
+
-
+
@@ -78,7 +78,7 @@
-
+
@@ -87,130 +87,261 @@
-
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
-
+
+
+
-
+
+
-
-
-
-
+
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
+
@@ -249,367 +380,367 @@
-
-
-
-
-
-
+
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
-
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
+
+
+
+
-
-
-
-
-
-
-
+
+
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
+
+
+
-
+
@@ -626,78 +757,78 @@
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
-
-
-
-
-
-
+
+
+
+
+
+
-
+
-
-
-
-
-
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
+
+
+
@@ -769,447 +900,469 @@
-
+
-
+
+
+
+
-
+
-
-
-
-
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
+
+
+
+
-
-
-
-
-
-
-
+
-
-
-
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
+
-
-
+
-
+
+
-
+
+
+
+
+
+
-
-
-
-
-
-
-
+
+
+
+
+
+
+
-
+
-
-
-
+
-
-
-
-
-
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
+
+
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
+
+
+
+
+
+
+
+
+
+
+
+
-
+
+
+
-
+
+
-
-
-
-
-
-
-
-
+
+
+
+
-
-
+
+
+
+
+
+
-
-
-
-
-
-
-
+
+
+
+
+
+
-
-
-
-
-
+
+
+
+
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
-
-
-
-
+
+
+
+
+
+
-
-
-
-
-
-
+
-
-
-
+
-
-
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
-
-
+
+
+
+
+
+
+
-
+
+
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
-
+
+
+
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
-
-
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
+
+
+
+
+
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
-
+
-
-
-
-
-
-
+
+
+
+
+
+
-
-
-
-
-
-
-
+
+
+
+
+
+
+
-
+
-
-
-
-
-
+
+
+
+
+
-
+
-
+
@@ -1219,85 +1372,85 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
+
+
+
-
+
-
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
+
+
+
-
+
-
+
-
-
-
-
-
-
-
+
+
+
+
+
+
+
-
+
-
+
@@ -1320,71 +1473,71 @@
-
-
-
-
-
-
+
+
+
+
+
+
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
-
-
-
+
+
+
-
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
+
+
+
-
-
-
+
+
+
-
-
-
-
-
-
+
+
+
+
+
+
-
+
@@ -1392,7 +1545,7 @@
-
+
@@ -1439,7 +1592,7 @@
-
+
@@ -1456,36 +1609,36 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
+
+
+
+
-
+
@@ -1494,176 +1647,174 @@
-
-
-
-
-
+
+
+
+
+
-
-
+
+
+
-
-
-
-
-
-
+
+
+
+
+
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
-
-
-
-
-
+
+
+
-
-
-
+
+
-
-
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
-
+
+
+
+
+
+
+
+
-
-
+
+
+
-
-
-
@@ -1675,41 +1826,41 @@
-
+
-
+
+
-
+
-
-
+
+
+
-
-
-
-
+
+
+
+
-
-
+
+
+
-
-
-
@@ -1721,51 +1872,50 @@
-
+
+
-
-
+
+
-
-
+
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
-
+
-
+
-
-
@@ -1773,117 +1923,123 @@
+
-
+
+
+
-
+
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -1892,71 +2048,71 @@
-
+
-
-
-
-
-
-
-
+
+
+
+
+
+
+
-
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
+
+
-
+
@@ -1978,175 +2134,175 @@
-
+
-
+
-
-
-
-
+
+
+
+
-
+
-
+
-
+
-
-
-
+
+
+
-
+
-
+
-
-
-
-
+
+
+
+
-
-
-
-
-
-
-
+
+
+
+
+
+
+
-
+
-
+
-
-
+
+
-
+
-
+
-
-
-
-
-
-
-
+
+
+
+
+
+
+
-
+
-
+
-
-
-
-
-
-
-
+
+
+
+
+
+
+
-
+
-
+
-
+
-
-
-
+
+
+
-
+
-
+
-
-
+
+
-
-
-
-
-
-
+
+
+
+
+
+
-
-
+
+
-
-
-
-
+
+
+
+
-
+
-
+
-
-
+
+
-
-
+
+
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
-
+
@@ -2311,17 +2467,17 @@
-
+
-
+
-
+
-
-
+
+
@@ -2334,14 +2490,10 @@
-
-
-
-
-
-
-
-
+
+
+
+
@@ -2350,665 +2502,701 @@
-
-
-
+
+
+
-
-
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
+
+
+
+
-
+
-
-
+
-
-
-
+
+
+
-
-
-
-
-
+
+
-
-
-
-
-
+
-
-
-
-
-
+
+
+
+
+
+
+
+
-
-
+
+
+
+
+
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
-
+
+
+
+
-
+
+
+
+
+
+
-
-
-
-
+
+
+
+
-
-
-
-
-
-
-
-
-
+
-
-
-
-
-
-
-
-
-
+
-
-
-
+
+
+
+
-
-
+
+
+
+
-
-
+
+
-
-
+
+
-
-
-
+
-
-
+
+
-
-
+
+
-
-
+
-
-
+
+
+
+
-
-
+
-
-
-
+
+
+
+
+
-
-
-
-
+
+
-
-
+
+
+
-
+
-
-
+
-
-
+
+
-
-
+
+
-
-
+
+
-
-
+
+
-
-
+
+
-
+
+
+
-
-
+
+
-
-
+
+
+
+
-
+
+
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
-
+
+
+
+
-
+
-
-
-
-
-
+
+
+
+
-
-
-
-
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
+
-
+
+
+
-
-
-
+
+
+
+
+
+
-
+
+
+
+
+
-
-
+
+
+
+
-
+
-
-
+
+
+
-
-
-
+
+
+
-
-
-
-
-
-
-
-
+
+
-
-
+
-
-
-
-
-
+
+
+
+
+
+
-
+
-
-
-
+
+
+
+
-
-
-
-
-
-
-
+
+
+
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
-
-
+
+
+
-
-
-
-
+
+
+
+
-
-
-
+
+
+
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
-
-
+
+
+
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
+
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
+
+
+
+
+
-
-
-
-
-
+
+
+
+
+
-
-
-
-
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
+
+
+
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
-
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
-
+
+
+
+
+
+
-
-
-
-
-
+
+
+
+
+
@@ -3020,40 +3208,40 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
+
+
@@ -3068,7 +3256,7 @@
-
+
@@ -3084,152 +3272,152 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
-
-
-
-
-
-
+
+
+
+
+
+
-
-
-
-
-
-
+
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
+
+
+
+
+
@@ -3237,99 +3425,119 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
+
+
+
-
-
-
-
-
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
+
@@ -3348,9 +3556,9 @@
-
-
-
+
+
+
@@ -3375,299 +3583,299 @@
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
+
-
-
-
-
-
-
+
+
+
+
+
-
+
+
+
-
+
+
+
+
+
+
+
+
-
+
+
+
-
-
-
-
+
-
-
-
-
+
+
+
-
-
-
-
-
-
-
-
+
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
-
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
+
+
+
+
+
-
-
+
+
-
-
-
-
+
+
+
+
-
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
-
-
-
-
-
+
+
+
+
+
@@ -3675,22 +3883,22 @@
-
+
-
+
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
@@ -3710,9 +3918,9 @@
-
-
-
+
+
+
@@ -3725,7 +3933,7 @@
-
+
@@ -3747,7 +3955,7 @@
-
+
@@ -3763,496 +3971,476 @@
-
-
-
-
+
-
+
-
+
-
+
+
-
+
-
+
-
-
+
+
+
-
-
+
-
-
+
+
+
-
+
-
-
-
+
+
+
-
-
-
-
+
+
+
+
+
+
-
-
-
-
-
-
-
-
+
-
-
-
-
-
-
-
-
+
+
-
+
-
+
-
-
-
-
+
+
+
+
+
+
+
+
-
+
-
+
-
-
-
-
+
+
-
+
-
+
+
+
+
+
+
-
+
+
-
-
-
+
+
+
+
+
+
+
+
-
-
-
+
-
-
+
-
+
+
+
-
+
-
-
-
-
-
-
-
+
+
+
+
+
+
-
-
-
-
+
-
+
+
+
+
-
+
+
+
+
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
-
-
-
+
+
-
-
-
-
-
+
-
+
+
+
-
-
-
-
+
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
+
+
-
+
-
-
-
-
-
-
-
+
+
+
+
+
+
-
+
-
-
-
-
-
-
-
-
+
+
+
+
-
-
-
-
-
-
-
-
-
+
+
-
+
-
-
+
+
+
-
+
-
+
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
-
-
+
+
-
+
-
+
+
+
+
+
-
-
-
-
+
-
-
-
+
+
+
+
+
+
+
-
+
-
-
-
-
+
+
+
+
+
+
+
+
+
-
-
-
-
-
+
+
+
+
+
+
+
+
-
-
-
-
-
+
+
-
-
-
+
-
+
+
+
+
+
+
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
+
+
+
-
-
-
-
-
+
+
+
+
-
-
-
-
-
-
+
+
+
+
+
+
-
-
-
-
+
+
-
-
-
-
-
-
-
+
+
+
-
-
-
-
+
+
+
+
-
-
+
+
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
+
+
-
+
-
+
-
-
-
+
+
-
-
-
-
-
-
+
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
-
-
-
-
-
-
-
-
+
+
+
+
+
-
+
-
-
-
+
+
@@ -4263,281 +4451,382 @@
+
-
-
+
+
-
-
-
+
+
-
-
-
-
+
+
+
+
+
+
+
+
-
+
+
+
+
+
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
-
+
+
+
+
+
+
-
-
-
-
+
+
-
-
-
-
-
+
+
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
+
+
+
+
+
+
+
+
+
+
+
-
+
+
+
-
-
-
-
-
-
-
+
+
-
+
-
-
-
-
-
+
+
+
+
+
+
+
+
-
+
-
-
-
-
-
-
-
-
-
-
-
+
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
+
-
+
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
-
+
+
+
+
-
-
-
+
+
-
+
+
+
-
-
-
-
-
-
-
-
-
+
+
+
+
+
-
+
+
+
+
+
-
-
-
-
-
-
-
-
+
+
+
+
+
-
-
-
-
-
-
+
+
+
+
+
-
+
-
+
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
-
-
+
+
+
-
-
-
-
-
-
+
+
+
+
+
-
-
-
+
+
+
+
+
+
+
+
-
-
-
-
+
+
+
-
-
-
-
-
+
+
-
+
-
+
+
-
+
+
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
-
-
+
@@ -4548,426 +4837,436 @@
+
-
+
-
-
+
-
+
+
+
+
-
+
-
-
-
-
+
+
+
-
+
-
-
+
-
+
+
-
-
-
+
+
+
-
-
-
-
-
-
+
+
+
-
+
+
-
+
-
-
+
+
+
+
+
-
-
-
-
-
+
+
+
+
+
-
-
+
-
-
-
+
+
-
-
+
-
+
+
-
+
+
-
-
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
+
-
-
-
-
-
-
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
-
-
+
+
-
-
-
-
-
+
+
+
+
-
-
-
-
+
+
+
+
-
-
+
-
-
+
+
-
-
+
-
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
+
-
-
-
+
-
+
+
+
+
+
+
+
+
+
+
+
-
+
-
+
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
@@ -4975,7 +5274,7 @@
-
+
@@ -4983,7 +5282,7 @@
-
+
@@ -4992,7 +5291,7 @@
-
+
@@ -5000,7 +5299,7 @@
-
+
@@ -5017,14 +5316,14 @@
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
@@ -5048,37 +5347,37 @@
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
-
-
+
+
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
+
+
+
+
@@ -5115,121 +5414,116 @@
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
-
+
-
+
+
-
+
-
+
-
+
+
-
-
+
+
-
-
+
-
+
-
+
-
+
+
-
-
-
+
+
-
+
-
+
+
-
-
-
-
-
+
+
+
+
-
-
+
+
-
-
+
-
+
+
+
-
-
-
-
-
-
-
+
+
+
+
+
+
-
+
-
-
+
+
-
-
@@ -5246,64 +5540,65 @@
+
+
-
+
-
+
-
+
-
-
+
+
-
+
+
-
-
+
+
-
-
+
-
+
-
-
-
-
-
-
+
+
+
+
+
+
-
+
-
+
-
-
+
+
+
-
-
@@ -5312,128 +5607,128 @@
+
+
+
-
-
-
-
+
+
+
+
-
-
-
+
+
-
-
-
-
-
-
-
+
+
+
+
+
+
-
+
+
-
-
+
-
+
-
+
-
-
+
-
+
+
+
-
-
-
-
+
+
+
-
-
+
+
+
-
-
-
-
-
+
+
+
+
-
+
-
+
-
+
-
+
-
-
-
-
-
-
-
+
+
+
+
+
+
+
-
+
-
+
-
+
-
+
-
-
-
+
+
+
-
-
-
-
-
+
+
+
+
+
-
@@ -5454,65 +5749,65 @@
+
-
+
-
+
-
+
-
-
-
-
-
+
+
+
+
+
-
+
-
+
-
+
-
+
-
-
-
-
-
+
+
+
+
+
-
+
-
+
-
+
-
+
-
@@ -5520,205 +5815,258 @@
+
-
+
-
-
-
-
-
-
+
+
+
+
+
+
-
+
-
+
-
+
-
+
-
-
-
-
-
-
-
+
+
+
+
+
+
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
-
-
-
-
-
+
+
+
+
+
+
-
+
-
+
-
+
-
-
+
+
-
+
-
-
+
+
-
+
-
-
+
+
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -5726,7 +6074,7 @@
-
+
@@ -5741,7 +6089,7 @@
-
+
@@ -5753,7 +6101,7 @@
-
+
@@ -5770,201 +6118,199 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
+
-
+
-
-
-
-
-
-
-
+
+
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
+
-
+
-
-
+
-
-
-
-
-
+
+
-
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -5972,10 +6318,10 @@
-
-
-
-
+
+
+
+
@@ -5988,38 +6334,38 @@
-
-
-
-
-
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -6127,43 +6473,43 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
+
-
+
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
@@ -6172,11 +6518,11 @@
-
-
-
-
-
+
+
+
+
+
@@ -6192,7 +6538,7 @@
-
+
@@ -6202,7 +6548,7 @@
-
+
@@ -6211,17 +6557,17 @@
-
+
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
@@ -6243,41 +6589,41 @@
-
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -6349,67 +6695,67 @@
-
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
+
-
-
+
+
-
+
-
+
-
-
-
-
+
+
+
+
-
-
-
-
+
+
+
+
-
-
-
-
+
+
+
+
-
+
@@ -6427,238 +6773,238 @@
-
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
-
+
-
-
+
-
+
+
+
-
-
-
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
-
-
+
+
-
-
+
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
-
+
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
+
diff --git a/pyproject.toml b/pyproject.toml
index 8897d18..e6762e7 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
[project]
name = "slopometry"
-version = "20260113-1"
+version = "20260114-1"
description = "Opinionated code quality metrics for code agents and humans"
readme = "README.md"
requires-python = ">=3.13"
@@ -40,6 +40,17 @@ dependencies = [
"tiktoken>=0.7.0",
]
+[project.optional-dependencies]
+dev = [
+ "ruff>=0.0.244",
+ "isort>=5.12.0",
+ "pre-commit>=4.2.0",
+ "pytest>=8.3.5",
+ "pytest-cov>=4.1.0",
+ "types-toml>=0.10.8.20240310",
+ "pyrefly>=0.46.0",
+]
+
[project.urls]
Homepage = "https://github.com/TensorTemplar/slopometry"
Repository = "https://github.com/TensorTemplar/slopometry.git"
@@ -114,15 +125,3 @@ precision = 2
[tool.coverage.html]
directory = "htmlcov"
-
-[dependency-groups]
-dev = [
- "mypy>=1.0.0",
- "ruff>=0.0.244",
- "isort>=5.12.0",
- "pre-commit>=4.2.0",
- "pytest>=8.3.5",
- "pytest-cov>=4.1.0",
- "types-toml>=0.10.8.20240310",
- "pyrefly>=0.45.2",
-]
diff --git a/src/slopometry/core/compact_analyzer.py b/src/slopometry/core/compact_analyzer.py
new file mode 100644
index 0000000..910ddff
--- /dev/null
+++ b/src/slopometry/core/compact_analyzer.py
@@ -0,0 +1,235 @@
+"""Compact event analyzer for extracting compact events from Claude Code transcripts."""
+
+import json
+import logging
+from datetime import datetime
+from pathlib import Path
+from typing import TYPE_CHECKING
+
+from pydantic import BaseModel
+
+from slopometry.core.models import CompactEvent
+
+if TYPE_CHECKING:
+ from slopometry.core.database import EventDatabase
+
+logger = logging.getLogger(__name__)
+
+
+class CompactBoundary(BaseModel, extra="allow"):
+ """Parsed compact_boundary event from transcript."""
+
+ type: str | None = None
+ subtype: str | None = None
+ content: str | None = None
+ timestamp: str | None = None
+ uuid: str | None = None
+ compactMetadata: dict | None = None
+ version: str | None = None
+ gitBranch: str | None = None
+
+
+class CompactSummary(BaseModel, extra="allow"):
+ """Parsed isCompactSummary event from transcript."""
+
+ type: str | None = None
+ parentUuid: str | None = None
+ isCompactSummary: bool | None = None
+ message: dict | None = None
+ timestamp: str | None = None
+
+
+class CompactEventAnalyzer:
+ """Analyzes Claude Code transcripts to extract compact events."""
+
+ def analyze_transcript(self, transcript_path: Path) -> list[CompactEvent]:
+ """Parse transcript JSONL and extract compact events.
+
+ Compact events consist of:
+ 1. A boundary line with type="system", subtype="compact_boundary"
+ 2. A summary line with isCompactSummary=true linked via parentUuid
+
+ Args:
+ transcript_path: Path to the JSONL transcript file
+
+ Returns:
+ List of CompactEvent objects found in the transcript
+ """
+ compact_events: list[CompactEvent] = []
+ pending_boundaries: dict[str, tuple[int, CompactBoundary]] = {}
+
+ try:
+ with open(transcript_path, encoding="utf-8") as f:
+ for line_number, line in enumerate(f, start=1):
+ try:
+ raw_event = json.loads(line)
+ except json.JSONDecodeError:
+ continue
+
+ if self._is_compact_boundary(raw_event):
+ boundary = CompactBoundary.model_validate(raw_event)
+ if boundary.uuid:
+ pending_boundaries[boundary.uuid] = (line_number, boundary)
+
+ elif self._is_compact_summary(raw_event):
+ summary = CompactSummary.model_validate(raw_event)
+ parent_uuid = summary.parentUuid
+
+ if parent_uuid and parent_uuid in pending_boundaries:
+ line_num, boundary = pending_boundaries.pop(parent_uuid)
+ compact_event = self._create_compact_event(line_num, boundary, summary)
+ if compact_event:
+ compact_events.append(compact_event)
+
+ except OSError as e:
+ logger.warning(f"Failed to read transcript file {transcript_path}: {e}")
+
+ return compact_events
+
+ def _is_compact_boundary(self, raw_event: dict) -> bool:
+ """Check if event is a compact_boundary system event."""
+ return raw_event.get("type") == "system" and raw_event.get("subtype") == "compact_boundary"
+
+ def _is_compact_summary(self, raw_event: dict) -> bool:
+ """Check if event is a compact summary (isCompactSummary=true)."""
+ return raw_event.get("isCompactSummary") is True
+
+ def _create_compact_event(
+ self, line_number: int, boundary: CompactBoundary, summary: CompactSummary
+ ) -> CompactEvent | None:
+ """Create a CompactEvent from boundary and summary data."""
+ metadata = boundary.compactMetadata or {}
+ trigger = metadata.get("trigger", "unknown")
+ pre_tokens = metadata.get("preTokens", 0)
+
+ summary_content = ""
+ if summary.message:
+ content = summary.message.get("content", "")
+ if isinstance(content, str):
+ summary_content = content
+
+ timestamp_str = boundary.timestamp or summary.timestamp
+ if not timestamp_str:
+ logger.warning(f"Compact event at line {line_number} missing timestamp, skipping")
+ return None
+
+ try:
+ timestamp = datetime.fromisoformat(timestamp_str.replace("Z", "+00:00"))
+ except ValueError:
+ logger.warning(f"Compact event at line {line_number} has invalid timestamp '{timestamp_str}', skipping")
+ return None
+
+ return CompactEvent(
+ line_number=line_number,
+ trigger=trigger,
+ pre_tokens=pre_tokens,
+ summary_content=summary_content,
+ timestamp=timestamp,
+ uuid=boundary.uuid or "",
+ version=boundary.version or "n/a",
+ git_branch=boundary.gitBranch or "n/a",
+ )
+
+
+def discover_transcripts(working_directory: Path, db: "EventDatabase") -> list[Path]:
+ """Find all transcripts relevant to the given project.
+
+ Sources:
+ 1. Database: Query sessions with matching working_directory
+ 2. Claude Code default: ~/.claude/transcripts/*.jsonl
+
+ Args:
+ working_directory: Project directory to filter by
+ db: EventDatabase instance for querying sessions
+
+ Returns:
+ List of unique transcript paths
+ """
+
+ transcripts: set[Path] = set()
+ normalized_wd = working_directory.resolve()
+
+ sessions = db.list_sessions_by_repository(working_directory)
+ for session_id in sessions:
+ stats = db.get_session_statistics(session_id)
+ if stats and stats.transcript_path:
+ path = Path(stats.transcript_path)
+ if path.exists():
+ transcripts.add(path)
+
+ claude_transcripts_dir = Path.home() / ".claude" / "transcripts"
+ if claude_transcripts_dir.exists():
+ for transcript in claude_transcripts_dir.glob("**/*.jsonl"):
+ if _transcript_matches_project(transcript, normalized_wd):
+ transcripts.add(transcript)
+
+ return list(transcripts)
+
+
+def _transcript_matches_project(transcript_path: Path, working_directory: Path) -> bool:
+ """Check if transcript's cwd matches the target working directory."""
+ try:
+ with open(transcript_path, encoding="utf-8") as f:
+ first_line = f.readline()
+ if not first_line:
+ return False
+ data = json.loads(first_line)
+ cwd = data.get("cwd", "")
+ if not cwd:
+ return False
+ return Path(cwd).resolve() == working_directory
+ except (OSError, json.JSONDecodeError) as e:
+ logger.warning(f"Failed to read transcript {transcript_path} for project matching: {e}")
+ return False
+
+
+def find_compact_instructions(transcript_path: Path, compact_line_number: int, lookback_lines: int = 50) -> str | None:
+ """Search backwards for /compact command that triggered this compact.
+
+ Args:
+ transcript_path: Path to the transcript file
+ compact_line_number: Line number of the compact_boundary event
+ lookback_lines: How many lines to search backwards
+
+ Returns:
+ The user's compact instructions if found, None otherwise
+ """
+ try:
+ with open(transcript_path, encoding="utf-8") as f:
+ all_lines = f.readlines()
+
+ start = max(0, compact_line_number - lookback_lines - 1)
+ end = compact_line_number - 1
+ lines_to_search = all_lines[start:end]
+
+ for line in reversed(lines_to_search):
+ try:
+ data = json.loads(line)
+ if data.get("type") != "user":
+ continue
+
+ message = data.get("message", {})
+ content = message.get("content", "")
+
+ if isinstance(content, str) and "/compact" in content.lower():
+ return content
+ except json.JSONDecodeError:
+ continue
+
+ except OSError as e:
+ logger.warning(f"Failed to read transcript {transcript_path} for compact instructions: {e}")
+
+ return None
+
+
+def analyze_transcript_compacts(transcript_path: Path) -> list[CompactEvent]:
+ """Convenience function to analyze compact events from a transcript.
+
+ Args:
+ transcript_path: Path to Claude Code transcript JSONL
+
+ Returns:
+ List of CompactEvent objects
+ """
+ analyzer = CompactEventAnalyzer()
+ return analyzer.analyze_transcript(transcript_path)
diff --git a/src/slopometry/core/database.py b/src/slopometry/core/database.py
index 1a99559..dfcd1cd 100644
--- a/src/slopometry/core/database.py
+++ b/src/slopometry/core/database.py
@@ -578,6 +578,16 @@ def get_session_statistics(self, session_id: str) -> SessionStatistics | None:
logger.debug(f"Failed to calculate plan evolution for session {session_id}: {e}")
stats.plan_evolution = None
+ if stats.transcript_path:
+ try:
+ from slopometry.core.compact_analyzer import analyze_transcript_compacts
+
+ transcript_path = Path(stats.transcript_path)
+ if transcript_path.exists():
+ stats.compact_events = analyze_transcript_compacts(transcript_path)
+ except Exception as e:
+ logger.debug(f"Failed to analyze compact events for session {session_id}: {e}")
+
try:
stats.context_coverage = self._calculate_context_coverage(stats.transcript_path, stats.working_directory)
except Exception as e:
@@ -673,6 +683,11 @@ def _calculate_plan_evolution(self, session_id: str) -> PlanEvolution:
if tool_name == "TodoWrite":
if tool_input:
analyzer.analyze_todo_write_event(tool_input, timestamp)
+ elif tool_name == "Write":
+ # Track Write events for plan files (in addition to counting as implementation)
+ if tool_input:
+ analyzer.analyze_write_event(tool_input)
+ analyzer.increment_event_count(tool_type, tool_input)
else:
analyzer.increment_event_count(tool_type, tool_input)
@@ -810,6 +825,37 @@ def list_sessions(self, limit: int | None = None) -> list[str]:
rows = conn.execute(query).fetchall()
return [row[0] for row in rows]
+ def list_sessions_by_repository(self, repository_path: Path, limit: int | None = None) -> list[str]:
+ """List session IDs filtered by repository working directory.
+
+ Sessions are identified by their first event's working_directory.
+
+ Args:
+ repository_path: The repository path to filter by
+ limit: Optional limit on number of sessions to return
+
+ Returns:
+ List of session IDs that started in this repository, ordered by most recent first
+ """
+ with self._get_db_connection() as conn:
+ normalized_path = str(repository_path.resolve())
+
+ query = """
+ SELECT session_id, MIN(timestamp) as first_event
+ FROM hook_events
+ WHERE working_directory = ?
+ GROUP BY session_id
+ ORDER BY first_event DESC
+ """
+ params: list = [normalized_path]
+
+ if limit:
+ query += " LIMIT ?"
+ params.append(limit)
+
+ rows = conn.execute(query, params).fetchall()
+ return [row[0] for row in rows]
+
def get_sessions_summary(self, limit: int | None = None) -> list[dict]:
"""Get lightweight session summaries for list display."""
with self._get_db_connection() as conn:
diff --git a/src/slopometry/core/hook_handler.py b/src/slopometry/core/hook_handler.py
index 39bb79d..86374ce 100644
--- a/src/slopometry/core/hook_handler.py
+++ b/src/slopometry/core/hook_handler.py
@@ -375,18 +375,23 @@ def handle_stop_event(session_id: str, parsed_input: "StopInput | SubagentStopIn
current_metrics, delta = db.calculate_extended_complexity_metrics(stats.working_directory)
feedback_parts: list[str] = []
+ cache_stable_parts: list[str] = [] # Only code-based feedback (stable between tool calls)
# Get edited files from git (more reliable than transcript-based context coverage)
edited_files = get_modified_python_files(stats.working_directory)
# Code smells - ALWAYS check (independent of enable_complexity_feedback)
+ # This is stable (based on code state, not session activity)
if current_metrics:
smell_feedback, has_smells, _ = format_code_smell_feedback(
current_metrics, delta, edited_files, session_id, stats.working_directory
)
if has_smells:
feedback_parts.append(smell_feedback)
+ cache_stable_parts.append(smell_feedback)
+ # Context coverage - informational but NOT stable (changes with every Read/Glob/Grep)
+ # Excluded from cache hash to avoid invalidation on tool calls
if settings.enable_complexity_feedback and stats.context_coverage and stats.context_coverage.files_edited:
context_feedback = format_context_coverage_feedback(stats.context_coverage)
if context_feedback:
@@ -400,10 +405,11 @@ def handle_stop_event(session_id: str, parsed_input: "StopInput | SubagentStopIn
if feedback_parts:
feedback = "\n\n".join(feedback_parts)
- # Hash feedback content BEFORE adding session-specific metadata
- # This ensures cache hits work across different sessions with same feedback
+ # Hash ONLY code-based feedback (smell_feedback) for cache key
+ # Context coverage changes with every tool call and would invalidate cache
# Use blake2b for arm64/amd64 performance
- feedback_hash = hashlib.blake2b(feedback.encode(), digest_size=8).hexdigest()
+ cache_content = "\n\n".join(cache_stable_parts) if cache_stable_parts else ""
+ feedback_hash = hashlib.blake2b(cache_content.encode(), digest_size=8).hexdigest()
feedback += (
f"\n\n---\n**Session**: `{session_id}` | Details: `slopometry solo show {session_id} --smell-details`"
diff --git a/src/slopometry/core/models.py b/src/slopometry/core/models.py
index 323d925..4623f7a 100644
--- a/src/slopometry/core/models.py
+++ b/src/slopometry/core/models.py
@@ -294,6 +294,40 @@ class PlanEvolution(BaseModel):
token_usage: TokenUsage | None = Field(
default=None, description="Token usage breakdown by exploration vs implementation"
)
+ plan_files_created: int = Field(default=0, description="Number of plan files written to ~/.claude/plans/")
+ plan_file_paths: list[str] = Field(default_factory=list, description="Paths to plan files created during session")
+ final_todos: list[TodoItem] = Field(default_factory=list, description="Final state of todos at session end")
+
+
+class CompactEvent(BaseModel):
+ """Represents a compact event from Claude Code transcript.
+
+ Compact events occur when the conversation is compacted to save context.
+ They consist of a compact_boundary system event followed by an isCompactSummary user message.
+ """
+
+ line_number: int = Field(description="Line number in transcript where compact occurred")
+ trigger: str = Field(description="Trigger type: 'auto' or 'manual'")
+ pre_tokens: int = Field(description="Token count before this compact")
+ summary_content: str = Field(description="The compact summary content")
+ timestamp: datetime = Field(description="When the compact occurred")
+ uuid: str = Field(description="UUID of the compact_boundary event")
+ version: str = Field(default="n/a", description="Claude Code version at compact time")
+ git_branch: str = Field(default="n/a", description="Git branch at compact time")
+
+
+class SavedCompact(BaseModel):
+ """Saved compact event with instructions and results for export."""
+
+ transcript_path: str = Field(description="Path to source transcript")
+ line_number: int = Field(description="Line number in transcript")
+ timestamp: datetime
+ trigger: str
+ pre_tokens: int
+ summary_content: str
+ instructions: str | None = Field(default=None, description="Compact instructions if found")
+ version: str = Field(default="n/a", description="Claude Code version at compact time")
+ git_branch: str = Field(default="n/a", description="Git branch at compact time")
class SessionStatistics(BaseModel):
@@ -318,6 +352,9 @@ class SessionStatistics(BaseModel):
context_coverage: "ContextCoverage | None" = None
project: Project | None = None
transcript_path: str | None = None
+ compact_events: list[CompactEvent] = Field(
+ default_factory=list, description="Compacts that occurred during session"
+ )
class PreToolUseInput(BaseModel):
diff --git a/src/slopometry/core/plan_analyzer.py b/src/slopometry/core/plan_analyzer.py
index 4b7ecaa..ae72ce1 100644
--- a/src/slopometry/core/plan_analyzer.py
+++ b/src/slopometry/core/plan_analyzer.py
@@ -1,5 +1,6 @@
"""Plan evolution analysis for TodoWrite events."""
+import re
from datetime import datetime
from typing import Any
@@ -9,6 +10,8 @@
class PlanAnalyzer:
"""Analyzes TodoWrite events to track plan evolution."""
+ PLAN_FILE_PATTERN = re.compile(r"\.claude[/\\]plans[/\\][a-zA-Z0-9_-]+\.md$")
+
SEARCH_TOOLS = {
ToolType.GREP,
ToolType.GLOB,
@@ -58,6 +61,7 @@ def __init__(self):
self.events_since_last_todo = 0
self.search_events_since_last_todo = 0
self.implementation_events_since_last_todo = 0
+ self.plan_file_paths: set[str] = set()
def analyze_todo_write_event(self, tool_input: dict[str, Any], timestamp: datetime) -> None:
"""Analyze a TodoWrite event and track plan evolution.
@@ -87,6 +91,18 @@ def analyze_todo_write_event(self, tool_input: dict[str, Any], timestamp: dateti
self.search_events_since_last_todo = 0
self.implementation_events_since_last_todo = 0
+ def analyze_write_event(self, tool_input: dict[str, Any]) -> None:
+ """Track Write events that create plan files.
+
+ Detects writes to ~/.claude/plans/*.md and records the file paths.
+
+ Args:
+ tool_input: The tool input containing file_path
+ """
+ file_path = tool_input.get("file_path", "")
+ if self.PLAN_FILE_PATTERN.search(file_path):
+ self.plan_file_paths.add(file_path)
+
def increment_event_count(
self, tool_type: ToolType | None = None, tool_input: dict[str, Any] | None = None
) -> None:
@@ -114,9 +130,17 @@ def get_plan_evolution(self) -> PlanEvolution:
Returns:
PlanEvolution with aggregated statistics
"""
- if not self.plan_steps:
+ if not self.plan_steps and not self.plan_file_paths:
return PlanEvolution()
+ if not self.plan_steps:
+ # No TodoWrite events, but may have plan files
+ return PlanEvolution(
+ plan_files_created=len(self.plan_file_paths),
+ plan_file_paths=sorted(self.plan_file_paths),
+ final_todos=list(self.previous_todos.values()),
+ )
+
all_todo_contents = set()
completed_todo_contents = set()
@@ -158,6 +182,9 @@ def get_plan_evolution(self) -> PlanEvolution:
total_search_events=total_search_events,
total_implementation_events=total_implementation_events,
exploration_percentage=exploration_percentage,
+ plan_files_created=len(self.plan_file_paths),
+ plan_file_paths=sorted(self.plan_file_paths),
+ final_todos=list(self.previous_todos.values()),
)
def _calculate_plan_step(self, current_todos: dict[str, TodoItem], timestamp: datetime) -> PlanStep | None:
diff --git a/src/slopometry/core/python_feature_analyzer.py b/src/slopometry/core/python_feature_analyzer.py
index 52a9311..ffa8496 100644
--- a/src/slopometry/core/python_feature_analyzer.py
+++ b/src/slopometry/core/python_feature_analyzer.py
@@ -603,7 +603,7 @@ def visit_AsyncFunctionDef(self, node: ast.AsyncFunctionDef) -> None:
self.generic_visit(node)
self._scope_depth -= 1
- def _visit_func_common(self, node):
+ def _visit_func_common(self, node: ast.FunctionDef | ast.AsyncFunctionDef) -> None:
self.functions += 1
if ast.get_docstring(node):
@@ -745,13 +745,51 @@ def visit_Try(self, node: ast.Try) -> None:
self.generic_visit(node)
def _is_swallowed_exception(self, handler: ast.ExceptHandler) -> bool:
- """Check if exception handler just swallows (pass/continue/empty body)."""
+ """Check if exception handler just swallows (pass/continue/empty body).
+
+ Not considered swallowed if the handler logs the exception.
+ """
if not handler.body:
return True
+
+ # Check if any statement in the handler is a logging call
+ for stmt in handler.body:
+ if self._is_logging_call(stmt):
+ return False
+
+ # Single statement that's pass/continue is swallowed
if len(handler.body) == 1:
stmt = handler.body[0]
if isinstance(stmt, ast.Pass | ast.Continue):
return True
+
+ return False
+
+ def _is_logging_call(self, stmt: ast.stmt) -> bool:
+ """Check if a statement is a logging/print call."""
+ if not isinstance(stmt, ast.Expr):
+ return False
+ if not isinstance(stmt.value, ast.Call):
+ return False
+
+ call = stmt.value
+ func = call.func
+
+ # Check for print() call
+ if isinstance(func, ast.Name) and func.id == "print":
+ return True
+
+ # Check for attribute calls like logger.warning, logging.info, console.print
+ if isinstance(func, ast.Attribute):
+ # logger.*, logging.*
+ if isinstance(func.value, ast.Name):
+ if func.value.id in ("logger", "logging", "log", "console"):
+ return True
+ # self.logger.*
+ if isinstance(func.value, ast.Attribute):
+ if func.value.attr in ("logger", "log"):
+ return True
+
return False
def visit_Import(self, node: ast.Import) -> None:
diff --git a/src/slopometry/display/formatters.py b/src/slopometry/display/formatters.py
index 138da74..2c52ac4 100644
--- a/src/slopometry/display/formatters.py
+++ b/src/slopometry/display/formatters.py
@@ -7,7 +7,7 @@
from rich.console import Console
from rich.table import Table
-from slopometry.core.models import ZScoreInterpretation
+from slopometry.core.models import CompactEvent, TokenUsage, ZScoreInterpretation
from slopometry.core.settings import settings
logger = logging.getLogger(__name__)
@@ -183,12 +183,26 @@ def display_session_summary(
console.print(f"Total events: {stats.total_events}")
+ if stats.plan_evolution and (
+ stats.plan_evolution.total_plan_steps > 0
+ or stats.plan_evolution.plan_files_created > 0
+ or stats.plan_evolution.final_todos
+ ):
+ _display_plan_info(stats.plan_evolution)
+
if stats.events_by_type:
_display_events_by_type_table(stats.events_by_type)
if stats.tool_usage:
_display_tool_usage_table(stats.tool_usage)
+ if stats.compact_events:
+ _display_compact_events(stats.compact_events)
+
+ token_usage = stats.plan_evolution.token_usage if stats.plan_evolution else None
+ if token_usage or stats.compact_events:
+ _display_token_impact(token_usage, stats.compact_events)
+
if stats.average_tool_duration_ms:
console.print(f"\nAverage tool duration: {stats.average_tool_duration_ms:.0f}ms")
@@ -204,11 +218,8 @@ def display_session_summary(
if stats.complexity_delta:
_display_complexity_delta(stats, baseline, assessment, show_file_details=show_file_details)
- if stats.plan_evolution and stats.plan_evolution.total_plan_steps > 0:
- _display_work_summary(stats.plan_evolution)
-
if stats.context_coverage and stats.context_coverage.files_edited:
- _display_context_coverage(stats.context_coverage)
+ _display_context_coverage(stats.context_coverage, show_file_details=show_file_details)
def _display_events_by_type_table(events_by_type: dict) -> None:
@@ -235,15 +246,77 @@ def _display_tool_usage_table(tool_usage: dict) -> None:
console.print(table)
+def _display_compact_events(compact_events: list[CompactEvent]) -> None:
+ """Display compact events table.
+
+ Args:
+ compact_events: List of compact events from the session
+ """
+ if not compact_events:
+ return
+
+ console.print(f"\n[bold]Compacts ({len(compact_events)})[/bold]")
+ table = Table()
+ table.add_column("Time", style="cyan")
+ table.add_column("Trigger", style="yellow")
+ table.add_column("Pre-Tokens", justify="right")
+ table.add_column("Version", style="dim")
+ table.add_column("Branch", style="magenta")
+ table.add_column("Line", justify="right", style="dim")
+
+ for compact in compact_events:
+ table.add_row(
+ compact.timestamp.strftime("%H:%M:%S"),
+ compact.trigger,
+ _format_token_count(compact.pre_tokens),
+ compact.version,
+ compact.git_branch,
+ str(compact.line_number),
+ )
+ console.print(table)
+
+
+def _display_token_impact(token_usage: TokenUsage | None, compact_events: list[CompactEvent]) -> None:
+ """Display token impact section with exploration/implementation breakdown.
+
+ Args:
+ token_usage: Token usage metrics from plan evolution
+ compact_events: List of compact events for calculating 'without compact'
+ """
+ if not token_usage:
+ return
+
+ console.print("\n[bold]Token Impact:[/bold]")
+ token_table = Table(show_header=True)
+ token_table.add_column("Metric", style="cyan")
+ token_table.add_column("Value", justify="right")
+
+ token_table.add_row("Changeset Tokens", _format_token_count(token_usage.total_tokens))
+ token_table.add_row("Exploration Tokens", _format_token_count(token_usage.exploration_tokens))
+ token_table.add_row("Implementation Tokens", _format_token_count(token_usage.implementation_tokens))
+
+ if token_usage.subagent_tokens > 0:
+ token_table.add_row("Subagent Tokens", _format_token_count(token_usage.subagent_tokens))
+
+ if compact_events:
+ tokens_without_compact = sum(c.pre_tokens for c in compact_events)
+ token_table.add_row(
+ "[yellow]Tokens Without Compact[/yellow]",
+ f"[yellow]{_format_token_count(tokens_without_compact)}[/yellow]",
+ )
+
+ console.print(token_table)
+
+
def _display_git_metrics(stats: SessionStatistics) -> None:
"""Display git metrics section."""
console.print("\n[bold]Git Metrics[/bold]")
console.print(f"Commits made: [green]{stats.commits_made}[/green]")
- if stats.initial_git_state.current_branch:
+ if stats.initial_git_state and stats.initial_git_state.current_branch:
console.print(f"Branch: {stats.initial_git_state.current_branch}")
- if stats.initial_git_state.has_uncommitted_changes:
+ if stats.initial_git_state and stats.initial_git_state.has_uncommitted_changes:
console.print("[yellow]Had uncommitted changes at start[/yellow]")
if stats.final_git_state and stats.final_git_state.has_uncommitted_changes:
@@ -498,7 +571,10 @@ def _display_complexity_delta(
file_changes_table.add_column("Change", justify="right", width=10)
for file_path, change in sorted_changes:
- current_complexity = stats.complexity_metrics.files_by_complexity.get(file_path, 0)
+ files_by_complexity = (
+ stats.complexity_metrics.files_by_complexity if stats.complexity_metrics else {}
+ )
+ current_complexity = files_by_complexity.get(file_path, 0)
previous_complexity = current_complexity - change
change_color = "green" if change < 0 else "red"
@@ -578,7 +654,6 @@ def _display_galen_rate(galen_metrics: GalenMetrics, title: str = "Galen Rate")
rate_color = "green" if rate >= 1.0 else "yellow" if rate >= 0.5 else "red"
galen_table.add_row("Galen Rate", f"[{rate_color}]{rate:.2f} Galens[/{rate_color}]")
- # Tokens needed per day (only if below 1 Galen)
if galen_metrics.tokens_per_day_to_reach_one_galen is not None:
needed = galen_metrics.tokens_per_day_to_reach_one_galen
galen_table.add_row("Tokens/day to 1 Galen", f"[yellow]+{needed:,.0f}/day needed[/yellow]")
@@ -586,6 +661,90 @@ def _display_galen_rate(galen_metrics: GalenMetrics, title: str = "Galen Rate")
console.print(galen_table)
+def _display_plan_info(evolution: PlanEvolution) -> None:
+ """Display plan and todo information section.
+
+ Shows:
+ - TodoWrite usage counts and completion rate
+ - Plan file paths with existence check and clickable links
+ - Final todo items with status indicators
+
+ Args:
+ evolution: The plan evolution data containing todos and plan files
+ """
+ console.print("\n[bold]Plans & Todos[/bold]")
+
+ if evolution.total_todos_created > 0:
+ efficiency_color = (
+ "green"
+ if evolution.planning_efficiency >= 0.8
+ else "yellow"
+ if evolution.planning_efficiency >= 0.5
+ else "red"
+ )
+ console.print(
+ f"Tasks: {evolution.total_todos_completed}/{evolution.total_todos_created} completed "
+ f"([{efficiency_color}]{evolution.planning_efficiency:.0%}[/{efficiency_color}])"
+ )
+
+ if evolution.plan_files_created > 0:
+ console.print(f"\n[bold]Plan Files ({evolution.plan_files_created}):[/bold]")
+ for plan_path in evolution.plan_file_paths:
+ expanded_path = Path(plan_path).expanduser()
+ truncated = truncate_path(plan_path, max_width=60)
+ status = _get_file_status(expanded_path)
+ if status == "exists":
+ console.print(f" [link=file://{expanded_path}]{truncated}[/link] [green](exists)[/green]")
+ elif status == "empty":
+ console.print(f" [link=file://{expanded_path}]{truncated}[/link] [dim](empty)[/dim]")
+ else:
+ console.print(f" {truncated} [dim](deleted)[/dim]")
+
+ if evolution.final_todos:
+ console.print(f"\n[bold]Final Todos ({len(evolution.final_todos)}):[/bold]")
+ for todo in evolution.final_todos:
+ status_indicator = _get_todo_status_indicator(todo.status)
+ console.print(f" {status_indicator} {todo.content}")
+
+
+def _get_todo_status_indicator(status: str) -> str:
+ """Get the status indicator for a todo item.
+
+ Args:
+ status: The todo status ('completed', 'in_progress', or 'pending')
+
+ Returns:
+ Formatted status indicator string with color
+ """
+ if status == "completed":
+ return "[green]✓[/green]"
+ elif status == "in_progress":
+ return "[yellow]→[/yellow]"
+ else: # pending
+ return "[dim]â—‹[/dim]"
+
+
+def _get_file_status(file_path: Path) -> str:
+ """Check if a file exists and has content.
+
+ Args:
+ file_path: Path to the file to check
+
+ Returns:
+ 'exists' if file has content, 'empty' if file is empty/whitespace-only, 'deleted' if missing
+ """
+ if not file_path.exists():
+ return "deleted"
+ if file_path.stat().st_size == 0:
+ return "empty"
+ # For small files, check if content is just whitespace or empty JSON
+ if file_path.stat().st_size < 100:
+ content = file_path.read_text().strip()
+ if not content or content in ("", "{}", "[]", "null"):
+ return "empty"
+ return "exists"
+
+
def _display_work_summary(evolution: PlanEvolution) -> None:
"""Display compact work summary with task completion and work style."""
console.print(
@@ -606,7 +765,7 @@ def _display_work_summary(evolution: PlanEvolution) -> None:
)
-def _display_context_coverage(coverage: ContextCoverage) -> None:
+def _display_context_coverage(coverage: ContextCoverage, show_file_details: bool = False) -> None:
"""Display context coverage section showing what files were read before editing."""
console.print("\n[bold]Context Coverage[/bold]")
console.print(f"Files edited: {len(coverage.files_edited)}")
@@ -641,9 +800,14 @@ def _display_context_coverage(coverage: ContextCoverage) -> None:
console.print(table)
if coverage.blind_spots:
- console.print(f"\n[yellow]Potential blind spots ({len(coverage.blind_spots)} files):[/yellow]")
- for blind_spot in coverage.blind_spots:
- console.print(f" • {truncate_path(blind_spot, max_width=70)}")
+ if show_file_details:
+ console.print(f"\n[yellow]Potential blind spots ({len(coverage.blind_spots)} files):[/yellow]")
+ for blind_spot in coverage.blind_spots:
+ console.print(f" • {truncate_path(blind_spot, max_width=70)}")
+ else:
+ console.print(
+ f"\n[dim]Potential blind spots: {len(coverage.blind_spots)} files (use --file-details to list)[/dim]"
+ )
def _format_coverage_ratio(read: int, total: int) -> str:
@@ -963,16 +1127,26 @@ def _get_impact_color(category: ImpactCategory) -> str:
return "red"
case ImpactCategory.SIGNIFICANT_DEGRADATION:
return "red bold"
+ case _:
+ return "white"
+
+def display_current_impact_analysis(
+ analysis: CurrentChangesAnalysis,
+ compact_events: list[CompactEvent] | None = None,
+ show_file_details: bool = False,
+) -> None:
+ """Display uncommitted changes impact analysis with Rich formatting.
-def display_current_impact_analysis(analysis: CurrentChangesAnalysis) -> None:
- """Display uncommitted changes impact analysis with Rich formatting."""
+ Args:
+ analysis: The current changes analysis to display
+ compact_events: Optional list of compact events from session transcript
+ show_file_details: Whether to show detailed file lists (blind spots)
+ """
console.print("\n[bold]Uncommitted Changes Impact Analysis[/bold]")
console.print(f"Repository: {analysis.repository_path}")
- # Dropped list of changed files as requested by user to reduce noise
-
display_baseline_comparison(
baseline=analysis.baseline,
assessment=analysis.assessment,
@@ -989,6 +1163,14 @@ def display_current_impact_analysis(analysis: CurrentChangesAnalysis) -> None:
token_table.add_row(
"Complete Picture Context Size", f"[bold]{_format_token_count(analysis.complete_picture_context_size)}[/bold]"
)
+
+ if compact_events:
+ tokens_without_compact = sum(c.pre_tokens for c in compact_events)
+ token_table.add_row(
+ "[yellow]Tokens Without Compact[/yellow]",
+ f"[yellow]{_format_token_count(tokens_without_compact)}[/yellow]",
+ )
+
console.print(token_table)
if analysis.galen_metrics:
@@ -1042,16 +1224,20 @@ def display_current_impact_analysis(analysis: CurrentChangesAnalysis) -> None:
console.print(cov_table)
if analysis.blind_spots:
- console.print(f"\n[yellow]Potential blind spots ({len(analysis.blind_spots)} files):[/yellow]")
- # Show all blind spots as requested
- for blind_spot in analysis.blind_spots:
- console.print(f" • {truncate_path(blind_spot, max_width=70)}")
+ if show_file_details:
+ console.print(f"\n[yellow]Potential blind spots ({len(analysis.blind_spots)} files):[/yellow]")
+ for blind_spot in analysis.blind_spots:
+ console.print(f" • {truncate_path(blind_spot, max_width=70)}")
+ else:
+ console.print(
+ f"\n[dim]Potential blind spots: {len(analysis.blind_spots)} files (use --file-details to list)[/dim]"
+ )
filter_set = set(analysis.changed_files) if analysis.changed_files else None
_display_code_smells_detailed(metrics, filter_files=filter_set)
-def _display_code_smells_detailed(metrics, filter_files: set[str] | None = None) -> None:
+def _display_code_smells_detailed(metrics: ExtendedComplexityMetrics, filter_files: set[str] | None = None) -> None:
"""Display a detailed table of code smells with complete file lists.
Args:
diff --git a/src/slopometry/solo/cli/commands.py b/src/slopometry/solo/cli/commands.py
index eb7449b..e05fb4a 100644
--- a/src/slopometry/solo/cli/commands.py
+++ b/src/slopometry/solo/cli/commands.py
@@ -2,10 +2,14 @@
import logging
from pathlib import Path
+from typing import TYPE_CHECKING
import click
from rich.console import Console
+if TYPE_CHECKING:
+ from slopometry.core.models import ImpactAssessment, RepoBaseline, SessionStatistics
+
# Imports moved inside functions to optimize startup time
console = Console()
@@ -13,12 +17,21 @@
def complete_session_id(ctx: click.Context, param: click.Parameter, incomplete: str) -> list[str]:
- """Complete session IDs from the database."""
+ """Complete session IDs, filtered by current repository if in a git repo."""
+ from slopometry.core.git_tracker import GitTracker
from slopometry.solo.services.session_service import SessionService
try:
session_service = SessionService()
- sessions = session_service.list_sessions()
+ cwd = Path.cwd()
+ git_tracker = GitTracker(cwd)
+ git_state = git_tracker.get_git_state()
+
+ if git_state.is_git_repo:
+ sessions = session_service.list_sessions_by_repository(cwd)
+ else:
+ sessions = session_service.list_sessions()
+
return [session for session in sessions if session.startswith(incomplete)]
except Exception:
return []
@@ -191,12 +204,10 @@ def latest(smell_details: bool, file_details: bool) -> None:
console.print(f"\n[dim]Analysis completed in {elapsed:.1f}s[/dim]")
-def _compute_session_baseline(stats):
- """Compute baseline and assessment for a session's complexity delta.
-
- Returns:
- Tuple of (baseline, assessment) or (None, None) if unavailable
- """
+def _compute_session_baseline(
+ stats: "SessionStatistics",
+) -> tuple["RepoBaseline", "ImpactAssessment"] | tuple[None, None]:
+ """Compute baseline and assessment for a session's complexity delta."""
if not stats.complexity_delta:
return None, None
@@ -238,7 +249,6 @@ def cleanup(session_id: str | None, all_sessions: bool, yes: bool) -> None:
If SESSION_ID is provided, delete that specific session.
If --all is provided, delete all sessions.
- If --all is provided, delete all sessions.
Otherwise, show usage help.
"""
from slopometry.solo.services.session_service import SessionService
@@ -457,15 +467,6 @@ def _find_plan_names_from_transcript(transcript_path: Path) -> list[str]:
return list(plan_names)
-def _find_session_todos(session_id: str) -> list[Path]:
- """Find todo files matching session ID pattern in ~/.claude/todos/."""
- todos_dir = Path.home() / ".claude" / "todos"
- if not todos_dir.exists():
- return []
-
- return list(todos_dir.glob(f"{session_id}-*.json"))
-
-
@solo.command()
@click.argument("session_id", required=False, shell_complete=complete_session_id)
@click.option("--output-dir", "-o", default=".", help="Directory to save the transcript to (default: current)")
@@ -483,7 +484,6 @@ def save_transcript(session_id: str | None, output_dir: str, yes: bool) -> None:
session_service = SessionService()
- # If no session_id provided, use the latest session
if not session_id:
session_id = session_service.get_most_recent_session()
if not session_id:
@@ -495,7 +495,6 @@ def save_transcript(session_id: str | None, output_dir: str, yes: bool) -> None:
console.print(f"[red]No data found for latest session {session_id}[/red]")
return
- # Show session info and ask for confirmation
console.print(f"[bold]Latest session: {session_id}[/bold]")
console.print(f"Start time: {stats.start_time.strftime('%Y-%m-%d %H:%M:%S')}")
console.print(f"Total events: {stats.total_events}")
@@ -543,10 +542,14 @@ def save_transcript(session_id: str | None, output_dir: str, yes: bool) -> None:
shutil.copy2(plan_source, plans_dir / plan_name)
console.print(f"[green]✓[/green] Saved plan: {plan_name}")
- todo_files = _find_session_todos(session_id)
- if todo_files:
- todos_dir = session_dir / "todos"
- todos_dir.mkdir(exist_ok=True)
- for todo_file in todo_files:
- shutil.copy2(todo_file, todos_dir / todo_file.name)
- console.print(f"[green]✓[/green] Saved todo: {todo_file.name}")
+ # Save final todos from session statistics (Claude Code empties todo files on completion)
+ if stats.plan_evolution and stats.plan_evolution.final_todos:
+ import json
+
+ todos_file = session_dir / "final_todos.json"
+ todos_data = [
+ {"content": todo.content, "status": todo.status, "activeForm": todo.activeForm}
+ for todo in stats.plan_evolution.final_todos
+ ]
+ todos_file.write_text(json.dumps(todos_data, indent=2))
+ console.print(f"[green]✓[/green] Saved {len(todos_data)} todos to: final_todos.json")
diff --git a/src/slopometry/solo/services/hook_service.py b/src/slopometry/solo/services/hook_service.py
index 3f925bb..5e2d4ab 100644
--- a/src/slopometry/solo/services/hook_service.py
+++ b/src/slopometry/solo/services/hook_service.py
@@ -9,25 +9,26 @@
from slopometry.core.settings import get_default_config_dir, get_default_data_dir, settings
-class HookCommand(BaseModel):
+class HookCommand(BaseModel, extra="allow"):
"""A single hook command to execute."""
type: str = "command"
command: str
-class HookConfig(BaseModel):
+class HookConfig(BaseModel, extra="allow"):
"""Configuration for a hook event handler."""
- matcher: str | None = None # Only for PreToolUse/PostToolUse
+ matcher: str | None = None
hooks: list[HookCommand]
+ def is_slopometry_hook(self) -> bool:
+ """Check if this config contains slopometry hooks."""
+ return any("slopometry hook-" in hook.command for hook in self.hooks)
-class ClaudeSettingsHooks(BaseModel, extra="allow"):
- """Hooks section of Claude Code settings.json.
- Uses extra="allow" to tolerate unknown hook types from future Claude versions.
- """
+class ClaudeSettingsHooks(BaseModel, extra="allow"):
+ """Hooks section of Claude Code settings.json."""
PreToolUse: list[HookConfig] = Field(default_factory=list)
PostToolUse: list[HookConfig] = Field(default_factory=list)
@@ -35,6 +36,87 @@ class ClaudeSettingsHooks(BaseModel, extra="allow"):
Stop: list[HookConfig] = Field(default_factory=list)
SubagentStop: list[HookConfig] = Field(default_factory=list)
+ def _all_hook_lists(self) -> list[tuple[str, list[HookConfig]]]:
+ """Return all hook lists with their names for iteration."""
+ return [
+ ("PreToolUse", self.PreToolUse),
+ ("PostToolUse", self.PostToolUse),
+ ("Notification", self.Notification),
+ ("Stop", self.Stop),
+ ("SubagentStop", self.SubagentStop),
+ ]
+
+ def remove_slopometry_hooks(self) -> bool:
+ """Remove all slopometry hooks. Returns True if any removed."""
+ removed = False
+ for name, configs in self._all_hook_lists():
+ original_len = len(configs)
+ filtered = [c for c in configs if not c.is_slopometry_hook()]
+ if len(filtered) < original_len:
+ removed = True
+ match name:
+ case "PreToolUse":
+ self.PreToolUse = filtered
+ case "PostToolUse":
+ self.PostToolUse = filtered
+ case "Notification":
+ self.Notification = filtered
+ case "Stop":
+ self.Stop = filtered
+ case "SubagentStop":
+ self.SubagentStop = filtered
+ return removed
+
+ def add_slopometry_hooks(self, hook_configs: dict[str, list[dict]]) -> None:
+ """Add slopometry hooks, replacing any existing ones."""
+ self.remove_slopometry_hooks()
+ for hook_type, configs in hook_configs.items():
+ parsed = [HookConfig.model_validate(c) for c in configs]
+ match hook_type:
+ case "PreToolUse":
+ self.PreToolUse.extend(parsed)
+ case "PostToolUse":
+ self.PostToolUse.extend(parsed)
+ case "Notification":
+ self.Notification.extend(parsed)
+ case "Stop":
+ self.Stop.extend(parsed)
+ case "SubagentStop":
+ self.SubagentStop.extend(parsed)
+
+ def has_slopometry_hooks(self) -> bool:
+ """Check if any slopometry hooks are installed."""
+ for _, configs in self._all_hook_lists():
+ for config in configs:
+ if config.is_slopometry_hook():
+ return True
+ return False
+
+
+class ClaudePermissions(BaseModel, extra="allow"):
+ """Permissions section of Claude Code settings.json."""
+
+ allow: list[str] = Field(default_factory=list)
+
+
+class ClaudeSettings(BaseModel, extra="allow"):
+ """Complete Claude Code settings.json structure."""
+
+ hooks: ClaudeSettingsHooks = Field(default_factory=ClaudeSettingsHooks)
+ permissions: ClaudePermissions = Field(default_factory=ClaudePermissions)
+
+ @classmethod
+ def load(cls, path: Path) -> "ClaudeSettings":
+ """Load settings from file."""
+ if not path.exists():
+ return cls()
+ return cls.model_validate_json(path.read_text())
+
+ def save(self, path: Path) -> None:
+ """Save settings to file."""
+ path.parent.mkdir(exist_ok=True)
+ path.write_text(self.model_dump_json(indent=2, exclude_defaults=True))
+
class HookService:
"""Handles Claude Code hook installation and management."""
@@ -99,22 +181,6 @@ def create_hook_configuration(self) -> dict:
"SubagentStop": [{"hooks": [{"type": "command", "command": base_command.format("subagent-stop")}]}],
}
- @staticmethod
- def _is_slopometry_hook_config(config: dict) -> bool:
- """Check if a hook config dict contains slopometry hooks.
-
- Args:
- config: Raw hook config dict from settings.json
-
- Returns:
- True if this config contains slopometry hook commands
- """
- try:
- parsed = HookConfig.model_validate(config)
- return any("slopometry hook-" in hook.command for hook in parsed.hooks)
- except Exception:
- return False
-
def _ensure_global_directories(self) -> None:
"""Create global config and data directories if they don't exist."""
config_dir = get_default_config_dir()
@@ -136,49 +202,22 @@ def install_hooks(self, global_: bool = False) -> tuple[bool, str]:
settings_dir = Path.home() / ".claude" if global_ else Path.cwd() / ".claude"
settings_file = settings_dir / "settings.json"
- settings_dir.mkdir(exist_ok=True)
-
- existing_settings = {}
- if settings_file.exists():
- try:
- with open(settings_file) as f:
- existing_settings = json.load(f)
- except json.JSONDecodeError:
- return False, f"Invalid JSON in {settings_file}"
+ try:
+ claude_settings = ClaudeSettings.load(settings_file)
+ except (json.JSONDecodeError, ValueError):
+ return False, f"Invalid JSON in {settings_file}"
- if "hooks" in existing_settings and settings.backup_existing_settings:
+ if claude_settings.hooks.has_slopometry_hooks() and settings.backup_existing_settings:
backup_file = settings_dir / f"settings.backup.{datetime.now().strftime('%Y%m%d_%H%M%S')}.json"
- with open(backup_file, "w") as f:
- json.dump(existing_settings, f, indent=2)
+ backup_file.write_text(settings_file.read_text())
- slopometry_hooks = self.create_hook_configuration()
-
- if "hooks" not in existing_settings:
- existing_settings["hooks"] = {}
-
- for hook_type, hook_configs in slopometry_hooks.items():
- if hook_type not in existing_settings["hooks"]:
- existing_settings["hooks"][hook_type] = []
-
- existing_settings["hooks"][hook_type] = [
- h for h in existing_settings["hooks"][hook_type] if not self._is_slopometry_hook_config(h)
- ]
-
- existing_settings["hooks"][hook_type].extend(hook_configs)
-
- if "permissions" not in existing_settings:
- existing_settings["permissions"] = {}
- if "allow" not in existing_settings["permissions"]:
- existing_settings["permissions"]["allow"] = []
-
- existing_allows = set(existing_settings["permissions"]["allow"])
- for cmd in self.WHITELISTED_COMMANDS:
- if cmd not in existing_allows:
- existing_settings["permissions"]["allow"].append(cmd)
+ claude_settings.hooks.add_slopometry_hooks(self.create_hook_configuration())
+ claude_settings.permissions.allow = list(
+ set(claude_settings.permissions.allow) | set(self.WHITELISTED_COMMANDS)
+ )
try:
- with open(settings_file, "w") as f:
- json.dump(existing_settings, f, indent=2)
+ claude_settings.save(settings_file)
except Exception as e:
return False, f"Failed to write settings: {e}"
@@ -207,40 +246,17 @@ def uninstall_hooks(self, global_: bool = False) -> tuple[bool, str]:
return True, f"No settings file found {scope}"
try:
- with open(settings_file) as f:
- settings_data = json.load(f)
- except json.JSONDecodeError:
+ claude_settings = ClaudeSettings.load(settings_file)
+ except (json.JSONDecodeError, ValueError):
return False, f"Invalid JSON in {settings_file}"
- if "hooks" not in settings_data:
- return True, "No hooks configuration found"
-
- removed_any = False
- for hook_type in settings_data["hooks"]:
- original_length = len(settings_data["hooks"][hook_type])
- settings_data["hooks"][hook_type] = [
- h for h in settings_data["hooks"][hook_type] if not self._is_slopometry_hook_config(h)
- ]
- if len(settings_data["hooks"][hook_type]) < original_length:
- removed_any = True
-
- settings_data["hooks"] = {k: v for k, v in settings_data["hooks"].items() if v}
-
- if not settings_data["hooks"]:
- del settings_data["hooks"]
-
- if "permissions" in settings_data and "allow" in settings_data["permissions"]:
- settings_data["permissions"]["allow"] = [
- cmd for cmd in settings_data["permissions"]["allow"] if cmd not in self.WHITELISTED_COMMANDS
- ]
- if not settings_data["permissions"]["allow"]:
- del settings_data["permissions"]["allow"]
- if not settings_data["permissions"]:
- del settings_data["permissions"]
+ removed_any = claude_settings.hooks.remove_slopometry_hooks()
+ claude_settings.permissions.allow = [
+ cmd for cmd in claude_settings.permissions.allow if cmd not in self.WHITELISTED_COMMANDS
+ ]
try:
- with open(settings_file, "w") as f:
- json.dump(settings_data, f, indent=2)
+ claude_settings.save(settings_file)
except Exception as e:
return False, f"Failed to write settings: {e}"
@@ -252,23 +268,13 @@ def uninstall_hooks(self, global_: bool = False) -> tuple[bool, str]:
def check_hooks_installed(self, settings_file: Path) -> bool:
"""Check if slopometry hooks are installed in a settings file."""
- if not settings_file.exists():
- return False
-
try:
- with open(settings_file) as f:
- settings_data = json.load(f)
-
- hooks = settings_data.get("hooks", {})
- for hook_configs in hooks.values():
- for hook_config in hook_configs:
- if self._is_slopometry_hook_config(hook_config):
- return True
- return False
- except (json.JSONDecodeError, KeyError):
+ claude_settings = ClaudeSettings.load(settings_file)
+ return claude_settings.hooks.has_slopometry_hooks()
+ except (json.JSONDecodeError, ValueError):
return False
- def get_installation_status(self) -> dict[str, bool]:
+ def get_installation_status(self) -> dict[str, bool | str]:
"""Get installation status for global and local hooks."""
global_settings = Path.home() / ".claude" / "settings.json"
local_settings = Path.cwd() / ".claude" / "settings.json"
diff --git a/src/slopometry/solo/services/session_service.py b/src/slopometry/solo/services/session_service.py
index d1dd710..f3c1d80 100644
--- a/src/slopometry/solo/services/session_service.py
+++ b/src/slopometry/solo/services/session_service.py
@@ -1,5 +1,7 @@
"""Session management service for solo-leveler features."""
+from pathlib import Path
+
from slopometry.core.database import EventDatabase
from slopometry.core.models import SessionStatistics
@@ -14,6 +16,18 @@ def list_sessions(self, limit: int | None = None) -> list[str]:
"""List recent sessions, optionally limited."""
return self.db.list_sessions(limit=limit)
+ def list_sessions_by_repository(self, repository_path: Path, limit: int | None = None) -> list[str]:
+ """List sessions that occurred in a specific repository.
+
+ Args:
+ repository_path: The repository path to filter by
+ limit: Optional limit on number of sessions to return
+
+ Returns:
+ List of session IDs that started in this repository
+ """
+ return self.db.list_sessions_by_repository(repository_path, limit=limit)
+
def get_session_statistics(self, session_id: str) -> SessionStatistics | None:
"""Get detailed statistics for a session."""
return self.db.get_session_statistics(session_id)
diff --git a/src/slopometry/summoner/cli/commands.py b/src/slopometry/summoner/cli/commands.py
index 66886ca..5eb59d1 100644
--- a/src/slopometry/summoner/cli/commands.py
+++ b/src/slopometry/summoner/cli/commands.py
@@ -290,10 +290,16 @@ def _show_commit_range_baseline_comparison(repo_path: Path, start: str, end: str
default=4,
help="Maximum parallel workers for baseline computation (default: 4)",
)
+@click.option(
+ "--file-details",
+ is_flag=True,
+ help="Show detailed file lists (blind spots)",
+)
def current_impact(
repo_path: Path | None,
recompute_baseline: bool,
max_workers: int,
+ file_details: bool,
) -> None:
"""Analyze impact of uncommitted changes against repository baseline.
@@ -379,7 +385,7 @@ def current_impact(
except Exception as e:
logger.debug(f"Coverage analysis failed (optional): {e}")
- display_current_impact_analysis(analysis)
+ display_current_impact_analysis(analysis, show_file_details=file_details)
except Exception as e:
console.print(f"[red]Failed to analyze uncommitted changes: {e}[/red]")
@@ -708,10 +714,6 @@ def list_user_stories(limit: int) -> None:
try:
entries = user_story_service.get_user_story_entries(limit)
- if not entries:
- console.print("[yellow]No user story entries found[/yellow]")
- return
-
if not entries:
console.print("[yellow]No user story entries found[/yellow]")
return
@@ -1108,3 +1110,111 @@ def compare_projects(append_paths: tuple[Path, ...]) -> None:
sys.exit(0)
display_leaderboard(leaderboard)
+
+
+@summoner.command("save-compacts")
+@click.option(
+ "--output-dir",
+ "-o",
+ type=click.Path(path_type=Path),
+ default=".",
+ help="Output directory (default: current)",
+)
+@click.option(
+ "--repo-path",
+ "-r",
+ type=click.Path(exists=True, path_type=Path),
+ help="Repository path (default: current directory)",
+)
+def save_compacts(output_dir: Path, repo_path: Path | None) -> None:
+ """Save compact events from all sessions related to this project.
+
+ Finds transcripts from both SQLite database and Claude Code default
+ location (~/.claude/transcripts/), filters to those matching the
+ current project's working directory, and saves compact events.
+
+ Output structure:
+ .slopometry/compacts//
+ compact_.json
+ """
+ from slopometry.core.compact_analyzer import (
+ CompactEventAnalyzer,
+ discover_transcripts,
+ find_compact_instructions,
+ )
+ from slopometry.core.database import EventDatabase
+ from slopometry.core.models import SavedCompact
+
+ if repo_path is None:
+ repo_path = Path.cwd()
+
+ db = EventDatabase()
+ analyzer = CompactEventAnalyzer()
+
+ console.print(f"[bold]Discovering transcripts for: {repo_path}[/bold]")
+ transcripts = discover_transcripts(repo_path, db)
+
+ if not transcripts:
+ console.print("[yellow]No transcripts found for this project[/yellow]")
+ return
+
+ console.print(f"Found {len(transcripts)} transcript(s)")
+
+ compacts_dir = output_dir / ".slopometry" / "compacts"
+ total_compacts = 0
+
+ for transcript in transcripts:
+ compacts = analyzer.analyze_transcript(transcript)
+ if not compacts:
+ continue
+
+ session_id = _extract_session_id_from_transcript(transcript)
+ session_dir = compacts_dir / session_id
+ session_dir.mkdir(parents=True, exist_ok=True)
+
+ for compact in compacts:
+ instructions = find_compact_instructions(transcript, compact.line_number)
+
+ saved = SavedCompact(
+ transcript_path=str(transcript),
+ line_number=compact.line_number,
+ timestamp=compact.timestamp,
+ trigger=compact.trigger,
+ pre_tokens=compact.pre_tokens,
+ summary_content=compact.summary_content,
+ instructions=instructions,
+ version=compact.version,
+ git_branch=compact.git_branch,
+ )
+
+ output_file = session_dir / f"compact_{compact.line_number}.json"
+ output_file.write_text(saved.model_dump_json(indent=2))
+ total_compacts += 1
+
+ trigger_label = f"[yellow]{compact.trigger}[/yellow]"
+ console.print(f" Saved compact at line {compact.line_number} ({trigger_label})")
+
+ console.print(f"[green]✓[/green] Session {session_id}: {len(compacts)} compact(s)")
+
+ if total_compacts > 0:
+ console.print(f"\n[bold green]Saved {total_compacts} compact(s) to {compacts_dir}[/bold green]")
+ else:
+ console.print("[yellow]No compact events found in any transcript[/yellow]")
+
+
+def _extract_session_id_from_transcript(transcript_path: Path) -> str:
+ """Extract session ID from transcript path or content."""
+ import json
+
+ try:
+ with open(transcript_path, encoding="utf-8") as f:
+ first_line = f.readline()
+ if first_line:
+ data = json.loads(first_line)
+ session_id = data.get("sessionId")
+ if session_id:
+ return session_id
+ except (OSError, json.JSONDecodeError) as e:
+ logger.warning(f"Failed to extract session ID from {transcript_path}: {e}")
+
+ return transcript_path.stem
diff --git a/src/slopometry/summoner/services/current_impact_service.py b/src/slopometry/summoner/services/current_impact_service.py
index 70698a9..bd5c997 100644
--- a/src/slopometry/summoner/services/current_impact_service.py
+++ b/src/slopometry/summoner/services/current_impact_service.py
@@ -9,6 +9,7 @@
from slopometry.core.models import (
ComplexityDelta,
CurrentChangesAnalysis,
+ ExtendedComplexityMetrics,
GalenMetrics,
RepoBaseline,
)
@@ -67,7 +68,6 @@ def analyze_uncommitted_changes(
coverage_analyzer = ContextCoverageAnalyzer(repo_path)
blind_spots = coverage_analyzer.get_affected_dependents(set(changed_files))
- filtered_coverage = None
filtered_coverage = None
try:
from slopometry.core.coverage_analyzer import CoverageAnalyzer
@@ -81,16 +81,13 @@ def analyze_uncommitted_changes(
if file_path in cov_result.file_coverage:
filtered_coverage[file_path] = cov_result.file_coverage[file_path]
except Exception:
- # Coverage analysis is optional
pass
- # Calculate token impact
blind_spot_tokens = 0
changed_files_tokens = 0
- # Helper to get token count for a file path
def get_token_count(path_str: str) -> int:
- # path_str is relative
+ """Get token count for a relative file path."""
return current_metrics.files_by_token_count.get(path_str, 0)
for file_path in changed_files:
@@ -101,7 +98,6 @@ def get_token_count(path_str: str) -> int:
complete_picture_context_size = changed_files_tokens + blind_spot_tokens
- # Calculate Galen metrics based on commit history token growth
galen_metrics = self._calculate_galen_metrics(baseline, current_metrics)
return CurrentChangesAnalysis(
@@ -123,7 +119,7 @@ def get_token_count(path_str: str) -> int:
def _calculate_galen_metrics(
self,
baseline: RepoBaseline,
- current_metrics,
+ current_metrics: ExtendedComplexityMetrics,
) -> GalenMetrics | None:
"""Calculate Galen productivity metrics from commit history token growth.
@@ -150,8 +146,8 @@ def _calculate_galen_metrics(
def _compute_delta(
self,
- baseline_metrics,
- current_metrics,
+ baseline_metrics: ExtendedComplexityMetrics,
+ current_metrics: ExtendedComplexityMetrics,
) -> ComplexityDelta:
"""Compute complexity delta between baseline and current metrics."""
return ComplexityDelta(
@@ -166,7 +162,3 @@ def _compute_delta(
avg_mi_change=current_metrics.average_mi - baseline_metrics.average_mi,
net_files_change=(current_metrics.total_files_analyzed - baseline_metrics.total_files_analyzed),
)
-
-
-# NOTE: Backwards compatibility alias for renamed service
-StagedImpactService = CurrentImpactService
diff --git a/tests/test_compact_analyzer.py b/tests/test_compact_analyzer.py
new file mode 100644
index 0000000..7e03362
--- /dev/null
+++ b/tests/test_compact_analyzer.py
@@ -0,0 +1,237 @@
+"""Tests for compact event analysis from Claude Code transcripts."""
+
+import json
+from pathlib import Path
+
+from slopometry.core.compact_analyzer import (
+ CompactEventAnalyzer,
+ analyze_transcript_compacts,
+ find_compact_instructions,
+)
+from slopometry.core.models import CompactEvent
+
+
+class TestCompactEventAnalyzer:
+ """Tests for CompactEventAnalyzer class."""
+
+ def test_analyze_transcript__finds_compact_events_in_fixture(self) -> None:
+ """Test that analyzer finds compact events in the real transcript fixture."""
+ fixture_path = Path(__file__).parent / "fixtures" / "transcript.jsonl"
+ assert fixture_path.exists(), "Transcript fixture required at tests/fixtures/transcript.jsonl"
+
+ analyzer = CompactEventAnalyzer()
+ compacts = analyzer.analyze_transcript(fixture_path)
+
+ assert len(compacts) >= 1
+ assert all(isinstance(c, CompactEvent) for c in compacts)
+
+ def test_analyze_transcript__extracts_correct_metadata(self) -> None:
+ """Test that analyzer extracts correct metadata from compact events."""
+ fixture_path = Path(__file__).parent / "fixtures" / "transcript.jsonl"
+ assert fixture_path.exists(), "Transcript fixture required at tests/fixtures/transcript.jsonl"
+
+ analyzer = CompactEventAnalyzer()
+ compacts = analyzer.analyze_transcript(fixture_path)
+
+ assert len(compacts) >= 1
+ first_compact = compacts[0]
+
+ assert first_compact.trigger == "auto"
+ assert first_compact.pre_tokens == 155317
+ assert first_compact.line_number == 398
+ assert first_compact.uuid == "947c352a-de46-478b-aadd-16ba1db38bbb"
+ assert "This session is being continued" in first_compact.summary_content
+ assert first_compact.version == "2.0.65"
+ assert first_compact.git_branch == "opinionated-metrics"
+
+ def test_analyze_transcript__handles_missing_file(self) -> None:
+ """Test that analyzer handles missing file gracefully."""
+ analyzer = CompactEventAnalyzer()
+ compacts = analyzer.analyze_transcript(Path("/nonexistent/path/transcript.jsonl"))
+
+ assert compacts == []
+
+ def test_analyze_transcript__handles_empty_file(self, tmp_path: Path) -> None:
+ """Test that analyzer handles empty file gracefully."""
+ empty_file = tmp_path / "empty.jsonl"
+ empty_file.write_text("")
+
+ analyzer = CompactEventAnalyzer()
+ compacts = analyzer.analyze_transcript(empty_file)
+
+ assert compacts == []
+
+ def test_analyze_transcript__handles_malformed_json(self, tmp_path: Path) -> None:
+ """Test that analyzer handles malformed JSON lines gracefully."""
+ malformed_file = tmp_path / "malformed.jsonl"
+ malformed_file.write_text("not valid json\n{also: invalid}")
+
+ analyzer = CompactEventAnalyzer()
+ compacts = analyzer.analyze_transcript(malformed_file)
+
+ assert compacts == []
+
+ def test_analyze_transcript__parses_compact_boundary_and_summary_pair(self, tmp_path: Path) -> None:
+ """Test that analyzer correctly pairs compact_boundary with isCompactSummary."""
+ transcript_file = tmp_path / "transcript.jsonl"
+
+ boundary_event = {
+ "type": "system",
+ "subtype": "compact_boundary",
+ "content": "Conversation compacted",
+ "timestamp": "2025-12-12T14:31:13.441Z",
+ "uuid": "test-uuid-123",
+ "compactMetadata": {"trigger": "manual", "preTokens": 50000},
+ }
+ summary_event = {
+ "type": "user",
+ "parentUuid": "test-uuid-123",
+ "isCompactSummary": True,
+ "message": {"content": "Summary of previous conversation..."},
+ "timestamp": "2025-12-12T14:31:13.442Z",
+ }
+
+ with open(transcript_file, "w") as f:
+ f.write(json.dumps(boundary_event) + "\n")
+ f.write(json.dumps(summary_event) + "\n")
+
+ analyzer = CompactEventAnalyzer()
+ compacts = analyzer.analyze_transcript(transcript_file)
+
+ assert len(compacts) == 1
+ compact = compacts[0]
+ assert compact.trigger == "manual"
+ assert compact.pre_tokens == 50000
+ assert compact.line_number == 1
+ assert compact.uuid == "test-uuid-123"
+ assert compact.summary_content == "Summary of previous conversation..."
+
+ def test_analyze_transcript__ignores_orphan_boundary(self, tmp_path: Path) -> None:
+ """Test that analyzer ignores compact_boundary without matching summary."""
+ transcript_file = tmp_path / "transcript.jsonl"
+
+ boundary_event = {
+ "type": "system",
+ "subtype": "compact_boundary",
+ "content": "Conversation compacted",
+ "timestamp": "2025-12-12T14:31:13.441Z",
+ "uuid": "orphan-uuid",
+ "compactMetadata": {"trigger": "auto", "preTokens": 10000},
+ }
+
+ with open(transcript_file, "w") as f:
+ f.write(json.dumps(boundary_event) + "\n")
+
+ analyzer = CompactEventAnalyzer()
+ compacts = analyzer.analyze_transcript(transcript_file)
+
+ assert compacts == []
+
+ def test_analyze_transcript__handles_multiple_compacts(self, tmp_path: Path) -> None:
+ """Test that analyzer finds multiple compact events."""
+ transcript_file = tmp_path / "transcript.jsonl"
+
+ events = []
+ for i in range(3):
+ boundary = {
+ "type": "system",
+ "subtype": "compact_boundary",
+ "timestamp": f"2025-12-12T14:3{i}:00.000Z",
+ "uuid": f"uuid-{i}",
+ "compactMetadata": {"trigger": "auto", "preTokens": 50000 * (i + 1)},
+ }
+ summary = {
+ "type": "user",
+ "parentUuid": f"uuid-{i}",
+ "isCompactSummary": True,
+ "message": {"content": f"Summary {i}"},
+ "timestamp": f"2025-12-12T14:3{i}:01.000Z",
+ }
+ events.extend([boundary, summary])
+
+ with open(transcript_file, "w") as f:
+ for event in events:
+ f.write(json.dumps(event) + "\n")
+
+ analyzer = CompactEventAnalyzer()
+ compacts = analyzer.analyze_transcript(transcript_file)
+
+ assert len(compacts) == 3
+ assert compacts[0].pre_tokens == 50000
+ assert compacts[1].pre_tokens == 100000
+ assert compacts[2].pre_tokens == 150000
+
+
+class TestFindCompactInstructions:
+ """Tests for find_compact_instructions function."""
+
+ def test_find_compact_instructions__finds_compact_command(self, tmp_path: Path) -> None:
+ """Test that function finds /compact command before compact event."""
+ transcript_file = tmp_path / "transcript.jsonl"
+
+ events = [
+ {"type": "user", "message": {"content": "Let's fix this bug"}},
+ {"type": "assistant", "message": {"content": "Working on it..."}},
+ {
+ "type": "user",
+ "message": {"content": "/compact please summarize what we've done"},
+ },
+ {
+ "type": "system",
+ "subtype": "compact_boundary",
+ "uuid": "uuid-1",
+ "compactMetadata": {"trigger": "manual"},
+ },
+ ]
+
+ with open(transcript_file, "w") as f:
+ for event in events:
+ f.write(json.dumps(event) + "\n")
+
+ instructions = find_compact_instructions(transcript_file, 4)
+
+ assert instructions is not None
+ assert "/compact" in instructions.lower()
+ assert "summarize" in instructions
+
+ def test_find_compact_instructions__returns_none_for_auto_compact(self, tmp_path: Path) -> None:
+ """Test that function returns None when no /compact command found."""
+ transcript_file = tmp_path / "transcript.jsonl"
+
+ events = [
+ {"type": "user", "message": {"content": "Regular message"}},
+ {"type": "assistant", "message": {"content": "Response"}},
+ {
+ "type": "system",
+ "subtype": "compact_boundary",
+ "uuid": "uuid-1",
+ "compactMetadata": {"trigger": "auto"},
+ },
+ ]
+
+ with open(transcript_file, "w") as f:
+ for event in events:
+ f.write(json.dumps(event) + "\n")
+
+ instructions = find_compact_instructions(transcript_file, 3)
+
+ assert instructions is None
+
+ def test_find_compact_instructions__handles_missing_file(self) -> None:
+ """Test that function handles missing file gracefully."""
+ instructions = find_compact_instructions(Path("/nonexistent/transcript.jsonl"), 10)
+ assert instructions is None
+
+
+class TestAnalyzeTranscriptCompactsConvenience:
+ """Tests for analyze_transcript_compacts convenience function."""
+
+ def test_analyze_transcript_compacts__works_with_fixture(self) -> None:
+ """Test convenience function works with real fixture."""
+ fixture_path = Path(__file__).parent / "fixtures" / "transcript.jsonl"
+ assert fixture_path.exists(), "Transcript fixture required at tests/fixtures/transcript.jsonl"
+
+ compacts = analyze_transcript_compacts(fixture_path)
+
+ assert len(compacts) >= 1
+ assert all(isinstance(c, CompactEvent) for c in compacts)
diff --git a/tests/test_database.py b/tests/test_database.py
index d56bdfc..920dd69 100644
--- a/tests/test_database.py
+++ b/tests/test_database.py
@@ -5,10 +5,10 @@
from pathlib import Path
from slopometry.core.database import EventDatabase
-from slopometry.core.models import LeaderboardEntry, UserStoryEntry
+from slopometry.core.models import HookEvent, HookEventType, LeaderboardEntry, ToolType, UserStoryEntry
-def test_user_story_export_functionality():
+def test_user_story_export_functionality() -> None:
"""Test exporting user stories with existing or minimal test data."""
db = EventDatabase()
@@ -67,7 +67,7 @@ def test_user_story_export_functionality():
output_path.unlink()
-def test_user_story_stats():
+def test_user_story_stats() -> None:
"""Test user story statistics calculation."""
db = EventDatabase()
@@ -80,7 +80,7 @@ def test_user_story_stats():
assert "rating_distribution" in stats
-def test_user_story_generation_cli_integration():
+def test_user_story_generation_cli_integration() -> None:
"""Test that the CLI command for generating user story entries works.
Note: Does not run the actual command as it requires LLM access.
@@ -98,7 +98,7 @@ def test_user_story_generation_cli_integration():
assert "--head-commit" in result.output
-def test_leaderboard_upsert__updates_existing_project_on_new_commit():
+def test_leaderboard_upsert__updates_existing_project_on_new_commit() -> None:
"""Test that saving a leaderboard entry with same project_path but different commit updates the entry."""
with tempfile.TemporaryDirectory() as tmp_dir:
db = EventDatabase(db_path=Path(tmp_dir) / "test.db")
@@ -147,3 +147,97 @@ def test_leaderboard_upsert__updates_existing_project_on_new_commit():
assert leaderboard[0].commit_sha_full == "def5678901234"
assert leaderboard[0].qpe_score == 0.8
assert leaderboard[0].measured_at == datetime(2024, 6, 1)
+
+
+def test_list_sessions_by_repository__filters_correctly() -> None:
+ """Sessions should be filtered by working directory."""
+ with tempfile.TemporaryDirectory() as tmp_dir:
+ db = EventDatabase(db_path=Path(tmp_dir) / "test.db")
+
+ # Session 1 - in repo A
+ db.save_event(
+ HookEvent(
+ session_id="session-repo-a",
+ event_type=HookEventType.PRE_TOOL_USE,
+ sequence_number=1,
+ working_directory="/path/to/repo-a",
+ tool_name="Read",
+ tool_type=ToolType.READ,
+ )
+ )
+
+ # Session 2 - in repo B
+ db.save_event(
+ HookEvent(
+ session_id="session-repo-b",
+ event_type=HookEventType.PRE_TOOL_USE,
+ sequence_number=1,
+ working_directory="/path/to/repo-b",
+ tool_name="Read",
+ tool_type=ToolType.READ,
+ )
+ )
+
+ # Session 3 - also in repo A
+ db.save_event(
+ HookEvent(
+ session_id="session-repo-a-2",
+ event_type=HookEventType.PRE_TOOL_USE,
+ sequence_number=1,
+ working_directory="/path/to/repo-a",
+ tool_name="Write",
+ tool_type=ToolType.WRITE,
+ )
+ )
+
+ sessions = db.list_sessions_by_repository(Path("/path/to/repo-a"))
+
+ assert len(sessions) == 2
+ assert "session-repo-a" in sessions
+ assert "session-repo-a-2" in sessions
+ assert "session-repo-b" not in sessions
+
+
+def test_list_sessions_by_repository__returns_empty_for_unknown_repo() -> None:
+ """Unknown repository should return empty list."""
+ with tempfile.TemporaryDirectory() as tmp_dir:
+ db = EventDatabase(db_path=Path(tmp_dir) / "test.db")
+
+ # Create a session in a known repo
+ db.save_event(
+ HookEvent(
+ session_id="session-known",
+ event_type=HookEventType.PRE_TOOL_USE,
+ sequence_number=1,
+ working_directory="/path/to/known-repo",
+ tool_name="Read",
+ tool_type=ToolType.READ,
+ )
+ )
+
+ sessions = db.list_sessions_by_repository(Path("/path/to/unknown"))
+
+ assert sessions == []
+
+
+def test_list_sessions_by_repository__respects_limit() -> None:
+ """Session list should respect the limit parameter."""
+ with tempfile.TemporaryDirectory() as tmp_dir:
+ db = EventDatabase(db_path=Path(tmp_dir) / "test.db")
+
+ # Create 3 sessions in the same repo
+ for i in range(3):
+ db.save_event(
+ HookEvent(
+ session_id=f"session-{i}",
+ event_type=HookEventType.PRE_TOOL_USE,
+ sequence_number=1,
+ working_directory="/path/to/repo",
+ tool_name="Read",
+ tool_type=ToolType.READ,
+ )
+ )
+
+ sessions = db.list_sessions_by_repository(Path("/path/to/repo"), limit=2)
+
+ assert len(sessions) == 2
diff --git a/tests/test_hook_service.py b/tests/test_hook_service.py
index f5d65d7..dcec14e 100644
--- a/tests/test_hook_service.py
+++ b/tests/test_hook_service.py
@@ -146,3 +146,74 @@ def test_install_hooks__updates_gitignore_for_local_install(tmp_path, monkeypatc
assert ".slopometry/" in message
assert (tmp_path / ".gitignore").exists()
assert ".slopometry/" in (tmp_path / ".gitignore").read_text()
+
+
+def test_install_hooks__preserves_unknown_fields(tmp_path, monkeypatch):
+ """Verify install preserves unknown top-level fields and unknown hook types."""
+ monkeypatch.chdir(tmp_path)
+ service = HookService()
+
+ settings_dir = tmp_path / ".claude"
+ settings_dir.mkdir()
+ settings_file = settings_dir / "settings.json"
+
+ initial_data = {
+ "hooks": {
+ "PreToolUse": [{"matcher": ".*", "hooks": [{"command": "echo 'user hook'"}]}],
+ "UnknownHookType": [{"matcher": "special", "hooks": [{"command": "custom"}]}],
+ },
+ "permissions": {"allow": ["Bash(echo:*)"], "deny": ["Bash(rm:*)"]},
+ "unknown_top_level": "should_be_preserved",
+ "another_unknown": {"nested": "value"},
+ }
+ with open(settings_file, "w") as f:
+ json.dump(initial_data, f)
+
+ service.install_hooks(global_=False)
+
+ with open(settings_file) as f:
+ data = json.load(f)
+
+ assert data["unknown_top_level"] == "should_be_preserved"
+ assert data["another_unknown"] == {"nested": "value"}
+ assert "UnknownHookType" in data["hooks"]
+ assert data["hooks"]["UnknownHookType"] == [{"matcher": "special", "hooks": [{"command": "custom"}]}]
+ assert "deny" in data["permissions"]
+ assert data["permissions"]["deny"] == ["Bash(rm:*)"]
+
+
+def test_uninstall_hooks__preserves_unknown_fields(tmp_path, monkeypatch):
+ """Verify uninstall preserves unknown fields."""
+ monkeypatch.chdir(tmp_path)
+ service = HookService()
+
+ settings_dir = tmp_path / ".claude"
+ settings_dir.mkdir()
+ settings_file = settings_dir / "settings.json"
+
+ initial_data = {
+ "hooks": {
+ "PreToolUse": [
+ {"matcher": ".*", "hooks": [{"command": "slopometry hook-pre-tool-use"}]},
+ {"matcher": ".*", "hooks": [{"command": "echo 'user hook'"}]},
+ ],
+ "CustomHookType": [{"hooks": [{"command": "special"}]}],
+ },
+ "custom_setting": True,
+ }
+ with open(settings_file, "w") as f:
+ json.dump(initial_data, f)
+
+ service.uninstall_hooks(global_=False)
+
+ with open(settings_file) as f:
+ data = json.load(f)
+
+ assert data["custom_setting"] is True
+ assert "CustomHookType" in data["hooks"]
+ assert data["hooks"]["CustomHookType"] == [{"hooks": [{"command": "special"}]}]
+ assert any(
+ "echo 'user hook'" in h.get("command", "")
+ for item in data["hooks"]["PreToolUse"]
+ for h in item.get("hooks", [])
+ )
diff --git a/tests/test_models.py b/tests/test_models.py
index f109a19..706343c 100644
--- a/tests/test_models.py
+++ b/tests/test_models.py
@@ -9,7 +9,7 @@
class TestExtendedComplexityMetrics:
"""Test the extended complexity metrics model."""
- def test_model_creation_without_required_fields__raises_validation_error(self):
+ def test_model_creation_without_required_fields__raises_validation_error(self) -> None:
"""Test that ValidationError is raised when required Halstead fields are missing."""
with pytest.raises(ValidationError) as exc_info:
ExtendedComplexityMetrics()
@@ -26,7 +26,7 @@ def test_model_creation_without_required_fields__raises_validation_error(self):
assert "total_mi" in missing_fields
assert "average_mi" in missing_fields
- def test_model_creation_with_values__creates_metrics_when_values_provided(self):
+ def test_model_creation_with_values__creates_metrics_when_values_provided(self) -> None:
"""Test creating model with specific values when values provided."""
metrics = ExtendedComplexityMetrics(
total_complexity=150,
@@ -53,7 +53,7 @@ def test_model_creation_with_values__creates_metrics_when_values_provided(self):
class TestUserStoryStatistics:
"""Test the user story statistics model."""
- def test_model_creation_with_values__creates_statistics_when_values_provided(self):
+ def test_model_creation_with_values__creates_statistics_when_values_provided(self) -> None:
"""Test creating statistics model with specific values when values provided."""
stats = UserStoryStatistics(
total_entries=25,
@@ -73,7 +73,7 @@ def test_model_creation_with_values__creates_statistics_when_values_provided(sel
class TestUserStoryDisplayData:
"""Test the user story display data model."""
- def test_model_creation__creates_display_data_when_values_provided(self):
+ def test_model_creation__creates_display_data_when_values_provided(self) -> None:
"""Test creating display data model when values provided."""
display_data = UserStoryDisplayData(
entry_id="abc12345",
diff --git a/tests/test_plan_analyzer.py b/tests/test_plan_analyzer.py
index cb9f232..efb00be 100644
--- a/tests/test_plan_analyzer.py
+++ b/tests/test_plan_analyzer.py
@@ -2,7 +2,7 @@
from slopometry.core.plan_analyzer import PlanAnalyzer
-def test_increment_event_count__task_explore_increments_search_metrics():
+def test_increment_event_count__task_explore_increments_search_metrics() -> None:
"""Verify that a TASK tool with 'Explore' subagent type is counted as a search event."""
analyzer = PlanAnalyzer()
@@ -20,7 +20,7 @@ def test_increment_event_count__task_explore_increments_search_metrics():
assert analyzer.events_since_last_todo == 1
-def test_increment_event_count__task_plan_increments_implementation_metrics():
+def test_increment_event_count__task_plan_increments_implementation_metrics() -> None:
"""Verify that a TASK tool with 'Plan' subagent type is counted as an implementation event."""
analyzer = PlanAnalyzer()
@@ -38,7 +38,7 @@ def test_increment_event_count__task_plan_increments_implementation_metrics():
assert analyzer.events_since_last_todo == 1
-def test_increment_event_count__task_unknown_defaults_to_implementation():
+def test_increment_event_count__task_unknown_defaults_to_implementation() -> None:
"""Verify that a TASK tool with unknown or missing subagent type defaults to implementation."""
analyzer = PlanAnalyzer()
@@ -54,7 +54,7 @@ def test_increment_event_count__task_unknown_defaults_to_implementation():
assert analyzer.search_events_since_last_todo == 0
-def test_increment_event_count__standard_search_tools_still_count_as_search():
+def test_increment_event_count__standard_search_tools_still_count_as_search() -> None:
"""Verify that standard search tools (like READ) are still counted as search."""
analyzer = PlanAnalyzer()
@@ -64,7 +64,7 @@ def test_increment_event_count__standard_search_tools_still_count_as_search():
assert analyzer.implementation_events_since_last_todo == 0
-def test_increment_event_count__standard_implementation_tools_still_count_as_implementation():
+def test_increment_event_count__standard_implementation_tools_still_count_as_implementation() -> None:
"""Verify that standard implementation tools (like WRITE) are still counted as implementation."""
analyzer = PlanAnalyzer()
@@ -72,3 +72,111 @@ def test_increment_event_count__standard_implementation_tools_still_count_as_imp
assert analyzer.search_events_since_last_todo == 0
assert analyzer.implementation_events_since_last_todo == 1
+
+
+def test_analyze_write_event__detects_plan_file() -> None:
+ """Write to ~/.claude/plans/*.md should be tracked as a plan file."""
+ analyzer = PlanAnalyzer()
+
+ tool_input = {"file_path": "/home/user/.claude/plans/zany-strolling-eich.md"}
+ analyzer.analyze_write_event(tool_input)
+
+ evolution = analyzer.get_plan_evolution()
+ assert evolution.plan_files_created == 1
+ assert "/home/user/.claude/plans/zany-strolling-eich.md" in evolution.plan_file_paths
+
+
+def test_analyze_write_event__ignores_non_plan_file() -> None:
+ """Write to regular files should not be tracked as plan files."""
+ analyzer = PlanAnalyzer()
+
+ tool_input = {"file_path": "/home/user/project/src/main.py"}
+ analyzer.analyze_write_event(tool_input)
+
+ evolution = analyzer.get_plan_evolution()
+ assert evolution.plan_files_created == 0
+ assert len(evolution.plan_file_paths) == 0
+
+
+def test_analyze_write_event__handles_windows_paths() -> None:
+ """Write to .claude\\plans\\ on Windows should be tracked."""
+ analyzer = PlanAnalyzer()
+
+ tool_input = {"file_path": "C:\\Users\\test\\.claude\\plans\\my-plan.md"}
+ analyzer.analyze_write_event(tool_input)
+
+ evolution = analyzer.get_plan_evolution()
+ assert evolution.plan_files_created == 1
+
+
+def test_analyze_write_event__deduplicates_same_file() -> None:
+ """Multiple writes to the same plan file should count as one."""
+ analyzer = PlanAnalyzer()
+
+ tool_input = {"file_path": "/home/user/.claude/plans/test-plan.md"}
+ analyzer.analyze_write_event(tool_input)
+ analyzer.analyze_write_event(tool_input) # Same file again
+
+ evolution = analyzer.get_plan_evolution()
+ assert evolution.plan_files_created == 1
+ assert len(evolution.plan_file_paths) == 1
+
+
+def test_analyze_write_event__handles_empty_file_path() -> None:
+ """Missing or empty file_path should not cause errors."""
+ analyzer = PlanAnalyzer()
+
+ # Empty string
+ analyzer.analyze_write_event({"file_path": ""})
+ # Missing key
+ analyzer.analyze_write_event({})
+
+ evolution = analyzer.get_plan_evolution()
+ assert evolution.plan_files_created == 0
+
+
+def test_get_plan_evolution__includes_final_todos() -> None:
+ """Verify that final_todos contains the todo items from the last TodoWrite."""
+ from datetime import datetime
+
+ analyzer = PlanAnalyzer()
+
+ # Simulate a TodoWrite event with multiple todos
+ tool_input = {
+ "todos": [
+ {"content": "First task", "status": "completed", "activeForm": "Completing first task"},
+ {"content": "Second task", "status": "in_progress", "activeForm": "Working on second task"},
+ {"content": "Third task", "status": "pending", "activeForm": "Pending third task"},
+ ]
+ }
+ analyzer.analyze_todo_write_event(tool_input, datetime.now())
+
+ evolution = analyzer.get_plan_evolution()
+
+ # Verify final_todos is populated
+ assert len(evolution.final_todos) == 3
+
+ # Verify each todo has correct content and status
+ contents = {todo.content for todo in evolution.final_todos}
+ assert "First task" in contents
+ assert "Second task" in contents
+ assert "Third task" in contents
+
+ # Verify statuses are preserved
+ status_by_content = {todo.content: todo.status for todo in evolution.final_todos}
+ assert status_by_content["First task"] == "completed"
+ assert status_by_content["Second task"] == "in_progress"
+ assert status_by_content["Third task"] == "pending"
+
+
+def test_get_plan_evolution__final_todos_empty_when_no_todowrite() -> None:
+ """Verify final_todos is empty when no TodoWrite events occurred."""
+ analyzer = PlanAnalyzer()
+
+ # Only add a plan file, no TodoWrite events
+ analyzer.analyze_write_event({"file_path": "/home/user/.claude/plans/test.md"})
+
+ evolution = analyzer.get_plan_evolution()
+
+ assert evolution.final_todos == []
+ assert evolution.plan_files_created == 1
diff --git a/tests/test_python_feature_analyzer.py b/tests/test_python_feature_analyzer.py
index 61d4ee4..27cb200 100644
--- a/tests/test_python_feature_analyzer.py
+++ b/tests/test_python_feature_analyzer.py
@@ -756,6 +756,76 @@ def test_visit_try__ignores_except_with_multiple_statements(self) -> None:
assert visitor.swallowed_exceptions == 0
+ def test_visit_try__ignores_except_with_logger_call(self) -> None:
+ """Test that except block with logger.warning() is not flagged."""
+ code = """
+try:
+ risky()
+except Exception:
+ logger.warning("Something went wrong")
+"""
+ tree = ast.parse(code)
+ visitor = FeatureVisitor()
+ visitor.visit(tree)
+
+ assert visitor.swallowed_exceptions == 0
+
+ def test_visit_try__ignores_except_with_logging_module(self) -> None:
+ """Test that except block with logging.info() is not flagged."""
+ code = """
+try:
+ risky()
+except Exception:
+ logging.info("Caught exception")
+"""
+ tree = ast.parse(code)
+ visitor = FeatureVisitor()
+ visitor.visit(tree)
+
+ assert visitor.swallowed_exceptions == 0
+
+ def test_visit_try__ignores_except_with_print(self) -> None:
+ """Test that except block with print() is not flagged."""
+ code = """
+try:
+ risky()
+except Exception:
+ print("Error occurred")
+"""
+ tree = ast.parse(code)
+ visitor = FeatureVisitor()
+ visitor.visit(tree)
+
+ assert visitor.swallowed_exceptions == 0
+
+ def test_visit_try__ignores_except_with_console_print(self) -> None:
+ """Test that except block with console.print() is not flagged."""
+ code = """
+try:
+ risky()
+except Exception:
+ console.print("Error occurred")
+"""
+ tree = ast.parse(code)
+ visitor = FeatureVisitor()
+ visitor.visit(tree)
+
+ assert visitor.swallowed_exceptions == 0
+
+ def test_visit_try__ignores_except_with_self_logger(self) -> None:
+ """Test that except block with self.logger.error() is not flagged."""
+ code = """
+try:
+ risky()
+except Exception:
+ self.logger.error("Error occurred")
+"""
+ tree = ast.parse(code)
+ visitor = FeatureVisitor()
+ visitor.visit(tree)
+
+ assert visitor.swallowed_exceptions == 0
+
class TestTypeIgnoreDetection:
"""Tests for type: ignore comment detection."""
diff --git a/tests/test_save_transcript.py b/tests/test_save_transcript.py
index 11f9098..187caf8 100644
--- a/tests/test_save_transcript.py
+++ b/tests/test_save_transcript.py
@@ -6,10 +6,9 @@
from click.testing import CliRunner
-from slopometry.core.models import SessionStatistics
+from slopometry.core.models import PlanEvolution, SessionStatistics, TodoItem
from slopometry.solo.cli.commands import (
_find_plan_names_from_transcript,
- _find_session_todos,
save_transcript,
)
@@ -17,7 +16,7 @@
class TestFindPlanNamesFromTranscript:
"""Tests for _find_plan_names_from_transcript helper."""
- def test_find_plan_names__extracts_plan_names_from_transcript(self, tmp_path):
+ def test_find_plan_names__extracts_plan_names_from_transcript(self, tmp_path) -> None:
"""Test extracting plan names from transcript content."""
transcript = tmp_path / "transcript.jsonl"
transcript.write_text(
@@ -29,7 +28,7 @@ def test_find_plan_names__extracts_plan_names_from_transcript(self, tmp_path):
assert set(result) == {"reactive-chasing-dawn.md", "elegant-leaping-panda.md"}
- def test_find_plan_names__returns_empty_for_no_plans(self, tmp_path):
+ def test_find_plan_names__returns_empty_for_no_plans(self, tmp_path) -> None:
"""Test returns empty list when no plans found."""
transcript = tmp_path / "transcript.jsonl"
transcript.write_text('{"message": "No plan references here"}\n')
@@ -38,7 +37,7 @@ def test_find_plan_names__returns_empty_for_no_plans(self, tmp_path):
assert result == []
- def test_find_plan_names__handles_missing_file(self, tmp_path):
+ def test_find_plan_names__handles_missing_file(self, tmp_path) -> None:
"""Test gracefully handles missing transcript file."""
missing_path = tmp_path / "nonexistent.jsonl"
@@ -46,7 +45,7 @@ def test_find_plan_names__handles_missing_file(self, tmp_path):
assert result == []
- def test_find_plan_names__deduplicates_plan_names(self, tmp_path):
+ def test_find_plan_names__deduplicates_plan_names(self, tmp_path) -> None:
"""Test that duplicate plan names are deduplicated."""
transcript = tmp_path / "transcript.jsonl"
transcript.write_text('{"message": "plans/same-plan.md"}\n{"message": "plans/same-plan.md again"}\n')
@@ -56,50 +55,10 @@ def test_find_plan_names__deduplicates_plan_names(self, tmp_path):
assert result == ["same-plan.md"]
-class TestFindSessionTodos:
- """Tests for _find_session_todos helper."""
-
- def test_find_session_todos__finds_matching_todos(self, tmp_path):
- """Test finding todos matching session ID pattern."""
- session_id = "abc123"
- todos_dir = tmp_path / ".claude" / "todos"
- todos_dir.mkdir(parents=True)
-
- # Create matching todo files
- (todos_dir / f"{session_id}-agent-{session_id}.json").write_text("[]")
- (todos_dir / f"{session_id}-agent-other.json").write_text("[]")
- # Non-matching file
- (todos_dir / "other-session-agent-xyz.json").write_text("[]")
-
- with patch.object(Path, "home", return_value=tmp_path):
- result = _find_session_todos(session_id)
-
- assert len(result) == 2
- assert all(session_id in str(p) for p in result)
-
- def test_find_session_todos__returns_empty_when_no_todos_dir(self, tmp_path):
- """Test returns empty list when todos directory doesn't exist."""
- with patch.object(Path, "home", return_value=tmp_path):
- result = _find_session_todos("any-session")
-
- assert result == []
-
- def test_find_session_todos__returns_empty_when_no_matches(self, tmp_path):
- """Test returns empty list when no matching todos found."""
- todos_dir = tmp_path / ".claude" / "todos"
- todos_dir.mkdir(parents=True)
- (todos_dir / "other-session-agent.json").write_text("[]")
-
- with patch.object(Path, "home", return_value=tmp_path):
- result = _find_session_todos("nonexistent-session")
-
- assert result == []
-
-
class TestSaveTranscript:
"""Test save-transcript command functionality."""
- def test_save_transcript__creates_session_directory_structure(self, tmp_path):
+ def test_save_transcript__creates_session_directory_structure(self, tmp_path) -> None:
"""Test creating .slopometry// directory structure."""
session_id = "test-session-123"
transcript_path = tmp_path / "transcript.jsonl"
@@ -118,7 +77,6 @@ def test_save_transcript__creates_session_directory_structure(self, tmp_path):
with (
patch("slopometry.solo.services.session_service.SessionService") as mock_service_class,
patch("slopometry.solo.cli.commands._find_plan_names_from_transcript", return_value=[]),
- patch("slopometry.solo.cli.commands._find_session_todos", return_value=[]),
):
mock_service = Mock()
mock_service_class.return_value = mock_service
@@ -136,7 +94,7 @@ def test_save_transcript__creates_session_directory_structure(self, tmp_path):
assert (session_dir / "transcript.jsonl").exists()
assert (session_dir / "transcript.jsonl").read_text() == '{"test": "data"}'
- def test_save_transcript__copies_plans_from_transcript_references(self, tmp_path):
+ def test_save_transcript__copies_plans_from_transcript_references(self, tmp_path) -> None:
"""Test copying plans referenced in transcript."""
session_id = "test-session-123"
transcript_path = tmp_path / "transcript.jsonl"
@@ -160,7 +118,6 @@ def test_save_transcript__copies_plans_from_transcript_references(self, tmp_path
with (
patch("slopometry.solo.services.session_service.SessionService") as mock_service_class,
patch.object(Path, "home", return_value=tmp_path),
- patch("slopometry.solo.cli.commands._find_session_todos", return_value=[]),
):
mock_service = Mock()
mock_service_class.return_value = mock_service
@@ -177,8 +134,10 @@ def test_save_transcript__copies_plans_from_transcript_references(self, tmp_path
assert copied_plan.exists()
assert copied_plan.read_text() == "# My Plan"
- def test_save_transcript__copies_todos_matching_session_id(self, tmp_path):
- """Test copying todos that match session ID pattern."""
+ def test_save_transcript__saves_final_todos_from_plan_evolution(self, tmp_path) -> None:
+ """Test saving final_todos.json from plan_evolution."""
+ import json
+
session_id = "test-session-123"
transcript_path = tmp_path / "transcript.jsonl"
transcript_path.write_text('{"test": "data"}')
@@ -186,22 +145,24 @@ def test_save_transcript__copies_todos_matching_session_id(self, tmp_path):
output_dir = tmp_path / "output"
output_dir.mkdir()
- # Create mock todo files
- todos_dir = tmp_path / ".claude" / "todos"
- todos_dir.mkdir(parents=True)
- todo_file = todos_dir / f"{session_id}-agent-{session_id}.json"
- todo_file.write_text('[{"task": "test"}]')
+ # Create mock stats with plan_evolution containing final_todos
+ mock_plan_evolution = PlanEvolution(
+ final_todos=[
+ TodoItem(content="Task 1", status="completed", activeForm="Completing task 1"),
+ TodoItem(content="Task 2", status="in_progress", activeForm="Working on task 2"),
+ ]
+ )
mock_stats = SessionStatistics(
session_id=session_id,
start_time=datetime.now(),
working_directory=str(tmp_path),
transcript_path=str(transcript_path),
+ plan_evolution=mock_plan_evolution,
)
with (
patch("slopometry.solo.services.session_service.SessionService") as mock_service_class,
- patch.object(Path, "home", return_value=tmp_path),
patch("slopometry.solo.cli.commands._find_plan_names_from_transcript", return_value=[]),
):
mock_service = Mock()
@@ -212,14 +173,20 @@ def test_save_transcript__copies_todos_matching_session_id(self, tmp_path):
result = runner.invoke(save_transcript, [session_id, "-o", str(output_dir)])
assert result.exit_code == 0
- assert f"Saved todo: {session_id}-agent-{session_id}.json" in result.output
+ assert "Saved 2 todos to: final_todos.json" in result.output
+
+ # Verify final_todos.json was created with correct content
+ todos_file = output_dir / ".slopometry" / session_id / "final_todos.json"
+ assert todos_file.exists()
- # Verify todo was copied
- copied_todo = output_dir / ".slopometry" / session_id / "todos" / f"{session_id}-agent-{session_id}.json"
- assert copied_todo.exists()
- assert copied_todo.read_text() == '[{"task": "test"}]'
+ saved_todos = json.loads(todos_file.read_text())
+ assert len(saved_todos) == 2
+ assert saved_todos[0]["content"] == "Task 1"
+ assert saved_todos[0]["status"] == "completed"
+ assert saved_todos[1]["content"] == "Task 2"
+ assert saved_todos[1]["status"] == "in_progress"
- def test_save_transcript__handles_missing_plans_gracefully(self, tmp_path):
+ def test_save_transcript__handles_missing_plans_gracefully(self, tmp_path) -> None:
"""Test graceful handling when referenced plan doesn't exist."""
session_id = "test-session-123"
transcript_path = tmp_path / "transcript.jsonl"
@@ -238,7 +205,6 @@ def test_save_transcript__handles_missing_plans_gracefully(self, tmp_path):
with (
patch("slopometry.solo.services.session_service.SessionService") as mock_service_class,
patch.object(Path, "home", return_value=tmp_path),
- patch("slopometry.solo.cli.commands._find_session_todos", return_value=[]),
):
mock_service = Mock()
mock_service_class.return_value = mock_service
@@ -253,7 +219,7 @@ def test_save_transcript__handles_missing_plans_gracefully(self, tmp_path):
# No plan saved message
assert "Saved plan:" not in result.output
- def test_save_transcript__shows_error_when_session_not_found(self):
+ def test_save_transcript__shows_error_when_session_not_found(self) -> None:
"""Test error handling when session doesn't exist."""
session_id = "non-existent"
@@ -268,7 +234,7 @@ def test_save_transcript__shows_error_when_session_not_found(self):
assert result.exit_code == 0
assert "No data found for session" in result.output
- def test_save_transcript__shows_error_when_no_transcript_path(self):
+ def test_save_transcript__shows_error_when_no_transcript_path(self) -> None:
"""Test error handling when session has no transcript path."""
session_id = "test-session"
mock_stats = SessionStatistics(
@@ -290,7 +256,7 @@ def test_save_transcript__shows_error_when_no_transcript_path(self):
assert "No transcript path found" in result.output
assert "older session" in result.output
- def test_save_transcript__uses_latest_session_when_no_id_provided(self, tmp_path):
+ def test_save_transcript__uses_latest_session_when_no_id_provided(self, tmp_path) -> None:
"""Test using latest session when no session ID is provided."""
session_id = "latest-session-456"
transcript_path = tmp_path / "transcript.jsonl"
@@ -307,7 +273,6 @@ def test_save_transcript__uses_latest_session_when_no_id_provided(self, tmp_path
with (
patch("slopometry.solo.services.session_service.SessionService") as mock_service_class,
patch("slopometry.solo.cli.commands._find_plan_names_from_transcript", return_value=[]),
- patch("slopometry.solo.cli.commands._find_session_todos", return_value=[]),
):
mock_service = Mock()
mock_service_class.return_value = mock_service
@@ -323,7 +288,7 @@ def test_save_transcript__uses_latest_session_when_no_id_provided(self, tmp_path
assert "Save transcript for this session?" in result.output
assert "Saved transcript to:" in result.output
- def test_save_transcript__skips_confirmation_with_yes_flag(self, tmp_path):
+ def test_save_transcript__skips_confirmation_with_yes_flag(self, tmp_path) -> None:
"""Test skipping confirmation with --yes flag."""
session_id = "latest-session-456"
transcript_path = tmp_path / "transcript.jsonl"
@@ -340,7 +305,6 @@ def test_save_transcript__skips_confirmation_with_yes_flag(self, tmp_path):
with (
patch("slopometry.solo.services.session_service.SessionService") as mock_service_class,
patch("slopometry.solo.cli.commands._find_plan_names_from_transcript", return_value=[]),
- patch("slopometry.solo.cli.commands._find_session_todos", return_value=[]),
):
mock_service = Mock()
mock_service_class.return_value = mock_service
@@ -354,7 +318,7 @@ def test_save_transcript__skips_confirmation_with_yes_flag(self, tmp_path):
assert "Save transcript for this session?" not in result.output
assert "Saved transcript to:" in result.output
- def test_save_transcript__shows_error_when_no_sessions_exist(self):
+ def test_save_transcript__shows_error_when_no_sessions_exist(self) -> None:
"""Test error handling when no sessions exist at all."""
with patch("slopometry.solo.services.session_service.SessionService") as mock_service_class:
mock_service = Mock()
@@ -367,7 +331,7 @@ def test_save_transcript__shows_error_when_no_sessions_exist(self):
assert result.exit_code == 0
assert "No sessions found" in result.output
- def test_save_transcript__cancels_when_user_declines_confirmation(self, tmp_path):
+ def test_save_transcript__cancels_when_user_declines_confirmation(self, tmp_path) -> None:
"""Test cancellation when user declines confirmation for latest session."""
session_id = "latest-session-456"
transcript_path = tmp_path / "transcript.jsonl"
diff --git a/uv.lock b/uv.lock
index edbe85d..4461daf 100644
--- a/uv.lock
+++ b/uv.lock
@@ -1226,47 +1226,6 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/81/db/e655086b7f3a705df045bf0933bdd9c2f79bb3c97bfef1384598bb79a217/keyring-25.7.0-py3-none-any.whl", hash = "sha256:be4a0b195f149690c166e850609a477c532ddbfbaed96a404d4e43f8d5e2689f", size = 39160, upload-time = "2025-11-16T16:26:08.402Z" },
]
-[[package]]
-name = "librt"
-version = "0.7.4"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/93/e4/b59bdf1197fdf9888452ea4d2048cdad61aef85eb83e99dc52551d7fdc04/librt-0.7.4.tar.gz", hash = "sha256:3871af56c59864d5fd21d1ac001eb2fb3b140d52ba0454720f2e4a19812404ba", size = 145862, upload-time = "2025-12-15T16:52:43.862Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/fe/4d/46a53ccfbb39fd0b493fd4496eb76f3ebc15bb3e45d8c2e695a27587edf5/librt-0.7.4-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:d44a1b1ba44cbd2fc3cb77992bef6d6fdb1028849824e1dd5e4d746e1f7f7f0b", size = 55745, upload-time = "2025-12-15T16:51:46.636Z" },
- { url = "https://files.pythonhosted.org/packages/7f/2b/3ac7f5212b1828bf4f979cf87f547db948d3e28421d7a430d4db23346ce4/librt-0.7.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:c9cab4b3de1f55e6c30a84c8cee20e4d3b2476f4d547256694a1b0163da4fe32", size = 57166, upload-time = "2025-12-15T16:51:48.219Z" },
- { url = "https://files.pythonhosted.org/packages/e8/99/6523509097cbe25f363795f0c0d1c6a3746e30c2994e25b5aefdab119b21/librt-0.7.4-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:2857c875f1edd1feef3c371fbf830a61b632fb4d1e57160bb1e6a3206e6abe67", size = 165833, upload-time = "2025-12-15T16:51:49.443Z" },
- { url = "https://files.pythonhosted.org/packages/fe/35/323611e59f8fe032649b4fb7e77f746f96eb7588fcbb31af26bae9630571/librt-0.7.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b370a77be0a16e1ad0270822c12c21462dc40496e891d3b0caf1617c8cc57e20", size = 174818, upload-time = "2025-12-15T16:51:51.015Z" },
- { url = "https://files.pythonhosted.org/packages/41/e6/40fb2bb21616c6e06b6a64022802228066e9a31618f493e03f6b9661548a/librt-0.7.4-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d05acd46b9a52087bfc50c59dfdf96a2c480a601e8898a44821c7fd676598f74", size = 189607, upload-time = "2025-12-15T16:51:52.671Z" },
- { url = "https://files.pythonhosted.org/packages/32/48/1b47c7d5d28b775941e739ed2bfe564b091c49201b9503514d69e4ed96d7/librt-0.7.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:70969229cb23d9c1a80e14225838d56e464dc71fa34c8342c954fc50e7516dee", size = 184585, upload-time = "2025-12-15T16:51:54.027Z" },
- { url = "https://files.pythonhosted.org/packages/75/a6/ee135dfb5d3b54d5d9001dbe483806229c6beac3ee2ba1092582b7efeb1b/librt-0.7.4-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:4450c354b89dbb266730893862dbff06006c9ed5b06b6016d529b2bf644fc681", size = 178249, upload-time = "2025-12-15T16:51:55.248Z" },
- { url = "https://files.pythonhosted.org/packages/04/87/d5b84ec997338be26af982bcd6679be0c1db9a32faadab1cf4bb24f9e992/librt-0.7.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:adefe0d48ad35b90b6f361f6ff5a1bd95af80c17d18619c093c60a20e7a5b60c", size = 199851, upload-time = "2025-12-15T16:51:56.933Z" },
- { url = "https://files.pythonhosted.org/packages/86/63/ba1333bf48306fe398e3392a7427ce527f81b0b79d0d91618c4610ce9d15/librt-0.7.4-cp313-cp313-win32.whl", hash = "sha256:21ea710e96c1e050635700695095962a22ea420d4b3755a25e4909f2172b4ff2", size = 43249, upload-time = "2025-12-15T16:51:58.498Z" },
- { url = "https://files.pythonhosted.org/packages/f9/8a/de2c6df06cdfa9308c080e6b060fe192790b6a48a47320b215e860f0e98c/librt-0.7.4-cp313-cp313-win_amd64.whl", hash = "sha256:772e18696cf5a64afee908662fbcb1f907460ddc851336ee3a848ef7684c8e1e", size = 49417, upload-time = "2025-12-15T16:51:59.618Z" },
- { url = "https://files.pythonhosted.org/packages/31/66/8ee0949efc389691381ed686185e43536c20e7ad880c122dd1f31e65c658/librt-0.7.4-cp313-cp313-win_arm64.whl", hash = "sha256:52e34c6af84e12921748c8354aa6acf1912ca98ba60cdaa6920e34793f1a0788", size = 42824, upload-time = "2025-12-15T16:52:00.784Z" },
- { url = "https://files.pythonhosted.org/packages/74/81/6921e65c8708eb6636bbf383aa77e6c7dad33a598ed3b50c313306a2da9d/librt-0.7.4-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:4f1ee004942eaaed6e06c087d93ebc1c67e9a293e5f6b9b5da558df6bf23dc5d", size = 55191, upload-time = "2025-12-15T16:52:01.97Z" },
- { url = "https://files.pythonhosted.org/packages/0d/d6/3eb864af8a8de8b39cc8dd2e9ded1823979a27795d72c4eea0afa8c26c9f/librt-0.7.4-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:d854c6dc0f689bad7ed452d2a3ecff58029d80612d336a45b62c35e917f42d23", size = 56898, upload-time = "2025-12-15T16:52:03.356Z" },
- { url = "https://files.pythonhosted.org/packages/49/bc/b1d4c0711fdf79646225d576faee8747b8528a6ec1ceb6accfd89ade7102/librt-0.7.4-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:a4f7339d9e445280f23d63dea842c0c77379c4a47471c538fc8feedab9d8d063", size = 163725, upload-time = "2025-12-15T16:52:04.572Z" },
- { url = "https://files.pythonhosted.org/packages/2c/08/61c41cd8f0a6a41fc99ea78a2205b88187e45ba9800792410ed62f033584/librt-0.7.4-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:39003fc73f925e684f8521b2dbf34f61a5deb8a20a15dcf53e0d823190ce8848", size = 172469, upload-time = "2025-12-15T16:52:05.863Z" },
- { url = "https://files.pythonhosted.org/packages/8b/c7/4ee18b4d57f01444230bc18cf59103aeab8f8c0f45e84e0e540094df1df1/librt-0.7.4-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6bb15ee29d95875ad697d449fe6071b67f730f15a6961913a2b0205015ca0843", size = 186804, upload-time = "2025-12-15T16:52:07.192Z" },
- { url = "https://files.pythonhosted.org/packages/a1/af/009e8ba3fbf830c936842da048eda1b34b99329f402e49d88fafff6525d1/librt-0.7.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:02a69369862099e37d00765583052a99d6a68af7e19b887e1b78fee0146b755a", size = 181807, upload-time = "2025-12-15T16:52:08.554Z" },
- { url = "https://files.pythonhosted.org/packages/85/26/51ae25f813656a8b117c27a974f25e8c1e90abcd5a791ac685bf5b489a1b/librt-0.7.4-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:ec72342cc4d62f38b25a94e28b9efefce41839aecdecf5e9627473ed04b7be16", size = 175595, upload-time = "2025-12-15T16:52:10.186Z" },
- { url = "https://files.pythonhosted.org/packages/48/93/36d6c71f830305f88996b15c8e017aa8d1e03e2e947b40b55bbf1a34cf24/librt-0.7.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:776dbb9bfa0fc5ce64234b446995d8d9f04badf64f544ca036bd6cff6f0732ce", size = 196504, upload-time = "2025-12-15T16:52:11.472Z" },
- { url = "https://files.pythonhosted.org/packages/08/11/8299e70862bb9d704735bf132c6be09c17b00fbc7cda0429a9df222fdc1b/librt-0.7.4-cp314-cp314-win32.whl", hash = "sha256:0f8cac84196d0ffcadf8469d9ded4d4e3a8b1c666095c2a291e22bf58e1e8a9f", size = 39738, upload-time = "2025-12-15T16:52:12.962Z" },
- { url = "https://files.pythonhosted.org/packages/54/d5/656b0126e4e0f8e2725cd2d2a1ec40f71f37f6f03f135a26b663c0e1a737/librt-0.7.4-cp314-cp314-win_amd64.whl", hash = "sha256:037f5cb6fe5abe23f1dc058054d50e9699fcc90d0677eee4e4f74a8677636a1a", size = 45976, upload-time = "2025-12-15T16:52:14.441Z" },
- { url = "https://files.pythonhosted.org/packages/60/86/465ff07b75c1067da8fa7f02913c4ead096ef106cfac97a977f763783bfb/librt-0.7.4-cp314-cp314-win_arm64.whl", hash = "sha256:a5deebb53d7a4d7e2e758a96befcd8edaaca0633ae71857995a0f16033289e44", size = 39073, upload-time = "2025-12-15T16:52:15.621Z" },
- { url = "https://files.pythonhosted.org/packages/b3/a0/24941f85960774a80d4b3c2aec651d7d980466da8101cae89e8b032a3e21/librt-0.7.4-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:b4c25312c7f4e6ab35ab16211bdf819e6e4eddcba3b2ea632fb51c9a2a97e105", size = 57369, upload-time = "2025-12-15T16:52:16.782Z" },
- { url = "https://files.pythonhosted.org/packages/77/a0/ddb259cae86ab415786c1547d0fe1b40f04a7b089f564fd5c0242a3fafb2/librt-0.7.4-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:618b7459bb392bdf373f2327e477597fff8f9e6a1878fffc1b711c013d1b0da4", size = 59230, upload-time = "2025-12-15T16:52:18.259Z" },
- { url = "https://files.pythonhosted.org/packages/31/11/77823cb530ab8a0c6fac848ac65b745be446f6f301753b8990e8809080c9/librt-0.7.4-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:1437c3f72a30c7047f16fd3e972ea58b90172c3c6ca309645c1c68984f05526a", size = 183869, upload-time = "2025-12-15T16:52:19.457Z" },
- { url = "https://files.pythonhosted.org/packages/a4/ce/157db3614cf3034b3f702ae5ba4fefda4686f11eea4b7b96542324a7a0e7/librt-0.7.4-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c96cb76f055b33308f6858b9b594618f1b46e147a4d03a4d7f0c449e304b9b95", size = 194606, upload-time = "2025-12-15T16:52:20.795Z" },
- { url = "https://files.pythonhosted.org/packages/30/ef/6ec4c7e3d6490f69a4fd2803516fa5334a848a4173eac26d8ee6507bff6e/librt-0.7.4-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:28f990e6821204f516d09dc39966ef8b84556ffd648d5926c9a3f681e8de8906", size = 206776, upload-time = "2025-12-15T16:52:22.229Z" },
- { url = "https://files.pythonhosted.org/packages/ad/22/750b37bf549f60a4782ab80e9d1e9c44981374ab79a7ea68670159905918/librt-0.7.4-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:bc4aebecc79781a1b77d7d4e7d9fe080385a439e198d993b557b60f9117addaf", size = 203205, upload-time = "2025-12-15T16:52:23.603Z" },
- { url = "https://files.pythonhosted.org/packages/7a/87/2e8a0f584412a93df5faad46c5fa0a6825fdb5eba2ce482074b114877f44/librt-0.7.4-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:022cc673e69283a42621dd453e2407cf1647e77f8bd857d7ad7499901e62376f", size = 196696, upload-time = "2025-12-15T16:52:24.951Z" },
- { url = "https://files.pythonhosted.org/packages/e5/ca/7bf78fa950e43b564b7de52ceeb477fb211a11f5733227efa1591d05a307/librt-0.7.4-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:2b3ca211ae8ea540569e9c513da052699b7b06928dcda61247cb4f318122bdb5", size = 217191, upload-time = "2025-12-15T16:52:26.194Z" },
- { url = "https://files.pythonhosted.org/packages/d6/49/3732b0e8424ae35ad5c3166d9dd5bcdae43ce98775e0867a716ff5868064/librt-0.7.4-cp314-cp314t-win32.whl", hash = "sha256:8a461f6456981d8c8e971ff5a55f2e34f4e60871e665d2f5fde23ee74dea4eeb", size = 40276, upload-time = "2025-12-15T16:52:27.54Z" },
- { url = "https://files.pythonhosted.org/packages/35/d6/d8823e01bd069934525fddb343189c008b39828a429b473fb20d67d5cd36/librt-0.7.4-cp314-cp314t-win_amd64.whl", hash = "sha256:721a7b125a817d60bf4924e1eec2a7867bfcf64cfc333045de1df7a0629e4481", size = 46772, upload-time = "2025-12-15T16:52:28.653Z" },
- { url = "https://files.pythonhosted.org/packages/36/e9/a0aa60f5322814dd084a89614e9e31139702e342f8459ad8af1984a18168/librt-0.7.4-cp314-cp314t-win_arm64.whl", hash = "sha256:76b2ba71265c0102d11458879b4d53ccd0b32b0164d14deb8d2b598a018e502f", size = 39724, upload-time = "2025-12-15T16:52:29.836Z" },
-]
-
[[package]]
name = "logfire"
version = "4.16.0"
@@ -1523,42 +1482,6 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/6c/28/dd72947e59a6a8c856448a5e74da6201cb5502ddff644fbc790e4bd40b9a/multiprocess-0.70.18-py39-none-any.whl", hash = "sha256:e78ca805a72b1b810c690b6b4cc32579eba34f403094bbbae962b7b5bf9dfcb8", size = 133478, upload-time = "2025-04-17T03:11:26.253Z" },
]
-[[package]]
-name = "mypy"
-version = "1.19.1"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "librt", marker = "platform_python_implementation != 'PyPy'" },
- { name = "mypy-extensions" },
- { name = "pathspec" },
- { name = "typing-extensions" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/f5/db/4efed9504bc01309ab9c2da7e352cc223569f05478012b5d9ece38fd44d2/mypy-1.19.1.tar.gz", hash = "sha256:19d88bb05303fe63f71dd2c6270daca27cb9401c4ca8255fe50d1d920e0eb9ba", size = 3582404, upload-time = "2025-12-15T05:03:48.42Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/de/9f/a6abae693f7a0c697dbb435aac52e958dc8da44e92e08ba88d2e42326176/mypy-1.19.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e3157c7594ff2ef1634ee058aafc56a82db665c9438fd41b390f3bde1ab12250", size = 13201927, upload-time = "2025-12-15T05:02:29.138Z" },
- { url = "https://files.pythonhosted.org/packages/9a/a4/45c35ccf6e1c65afc23a069f50e2c66f46bd3798cbe0d680c12d12935caa/mypy-1.19.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:bdb12f69bcc02700c2b47e070238f42cb87f18c0bc1fc4cdb4fb2bc5fd7a3b8b", size = 12206730, upload-time = "2025-12-15T05:03:01.325Z" },
- { url = "https://files.pythonhosted.org/packages/05/bb/cdcf89678e26b187650512620eec8368fded4cfd99cfcb431e4cdfd19dec/mypy-1.19.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f859fb09d9583a985be9a493d5cfc5515b56b08f7447759a0c5deaf68d80506e", size = 12724581, upload-time = "2025-12-15T05:03:20.087Z" },
- { url = "https://files.pythonhosted.org/packages/d1/32/dd260d52babf67bad8e6770f8e1102021877ce0edea106e72df5626bb0ec/mypy-1.19.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c9a6538e0415310aad77cb94004ca6482330fece18036b5f360b62c45814c4ef", size = 13616252, upload-time = "2025-12-15T05:02:49.036Z" },
- { url = "https://files.pythonhosted.org/packages/71/d0/5e60a9d2e3bd48432ae2b454b7ef2b62a960ab51292b1eda2a95edd78198/mypy-1.19.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:da4869fc5e7f62a88f3fe0b5c919d1d9f7ea3cef92d3689de2823fd27e40aa75", size = 13840848, upload-time = "2025-12-15T05:02:55.95Z" },
- { url = "https://files.pythonhosted.org/packages/98/76/d32051fa65ecf6cc8c6610956473abdc9b4c43301107476ac03559507843/mypy-1.19.1-cp313-cp313-win_amd64.whl", hash = "sha256:016f2246209095e8eda7538944daa1d60e1e8134d98983b9fc1e92c1fc0cb8dd", size = 10135510, upload-time = "2025-12-15T05:02:58.438Z" },
- { url = "https://files.pythonhosted.org/packages/de/eb/b83e75f4c820c4247a58580ef86fcd35165028f191e7e1ba57128c52782d/mypy-1.19.1-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:06e6170bd5836770e8104c8fdd58e5e725cfeb309f0a6c681a811f557e97eac1", size = 13199744, upload-time = "2025-12-15T05:03:30.823Z" },
- { url = "https://files.pythonhosted.org/packages/94/28/52785ab7bfa165f87fcbb61547a93f98bb20e7f82f90f165a1f69bce7b3d/mypy-1.19.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:804bd67b8054a85447c8954215a906d6eff9cabeabe493fb6334b24f4bfff718", size = 12215815, upload-time = "2025-12-15T05:02:42.323Z" },
- { url = "https://files.pythonhosted.org/packages/0a/c6/bdd60774a0dbfb05122e3e925f2e9e846c009e479dcec4821dad881f5b52/mypy-1.19.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:21761006a7f497cb0d4de3d8ef4ca70532256688b0523eee02baf9eec895e27b", size = 12740047, upload-time = "2025-12-15T05:03:33.168Z" },
- { url = "https://files.pythonhosted.org/packages/32/2a/66ba933fe6c76bd40d1fe916a83f04fed253152f451a877520b3c4a5e41e/mypy-1.19.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:28902ee51f12e0f19e1e16fbe2f8f06b6637f482c459dd393efddd0ec7f82045", size = 13601998, upload-time = "2025-12-15T05:03:13.056Z" },
- { url = "https://files.pythonhosted.org/packages/e3/da/5055c63e377c5c2418760411fd6a63ee2b96cf95397259038756c042574f/mypy-1.19.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:481daf36a4c443332e2ae9c137dfee878fcea781a2e3f895d54bd3002a900957", size = 13807476, upload-time = "2025-12-15T05:03:17.977Z" },
- { url = "https://files.pythonhosted.org/packages/cd/09/4ebd873390a063176f06b0dbf1f7783dd87bd120eae7727fa4ae4179b685/mypy-1.19.1-cp314-cp314-win_amd64.whl", hash = "sha256:8bb5c6f6d043655e055be9b542aa5f3bdd30e4f3589163e85f93f3640060509f", size = 10281872, upload-time = "2025-12-15T05:03:05.549Z" },
- { url = "https://files.pythonhosted.org/packages/8d/f4/4ce9a05ce5ded1de3ec1c1d96cf9f9504a04e54ce0ed55cfa38619a32b8d/mypy-1.19.1-py3-none-any.whl", hash = "sha256:f1235f5ea01b7db5468d53ece6aaddf1ad0b88d9e7462b86ef96fe04995d7247", size = 2471239, upload-time = "2025-12-15T05:03:07.248Z" },
-]
-
-[[package]]
-name = "mypy-extensions"
-version = "1.1.0"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/a2/6e/371856a3fb9d31ca8dac321cda606860fa4548858c0cc45d9d1d4ca2628b/mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558", size = 6343, upload-time = "2025-04-22T14:54:24.164Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/79/7b/2c79738432f5c924bef5071f933bcc9efd0473bac3b4aa584a6f7c1c8df8/mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505", size = 4963, upload-time = "2025-04-22T14:54:22.983Z" },
-]
-
[[package]]
name = "nexus-rpc"
version = "1.2.0"
@@ -1857,15 +1780,6 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/7d/eb/b6260b31b1a96386c0a880edebe26f89669098acea8e0318bff6adb378fd/pathable-0.4.4-py3-none-any.whl", hash = "sha256:5ae9e94793b6ef5a4cbe0a7ce9dbbefc1eec38df253763fd0aeeacf2762dbbc2", size = 9592, upload-time = "2025-01-10T18:43:11.88Z" },
]
-[[package]]
-name = "pathspec"
-version = "0.12.1"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/ca/bc/f35b8446f4531a7cb215605d100cd88b7ac6f44ab3fc94870c120ab3adbf/pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712", size = 51043, upload-time = "2023-12-10T22:30:45Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/cc/20/ff623b09d963f88bfde16306a54e12ee5ea43e9b597108672ff3a408aad6/pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08", size = 31191, upload-time = "2023-12-10T22:30:43.14Z" },
-]
-
[[package]]
name = "pathvalidate"
version = "3.3.1"
@@ -2836,7 +2750,7 @@ wheels = [
[[package]]
name = "slopometry"
-version = "20260108.post1"
+version = "20260113.post1"
source = { editable = "." }
dependencies = [
{ name = "click" },
@@ -2855,10 +2769,9 @@ dependencies = [
{ name = "toml" },
]
-[package.dev-dependencies]
+[package.optional-dependencies]
dev = [
{ name = "isort" },
- { name = "mypy" },
{ name = "pre-commit" },
{ name = "pyrefly" },
{ name = "pytest" },
@@ -2873,29 +2786,25 @@ requires-dist = [
{ name = "coverage", specifier = ">=7.0.0" },
{ name = "datasets", specifier = ">=2.14.0" },
{ name = "huggingface-hub", specifier = ">=0.20.0" },
+ { name = "isort", marker = "extra == 'dev'", specifier = ">=5.12.0" },
{ name = "pandas", specifier = ">=2.0.0" },
+ { name = "pre-commit", marker = "extra == 'dev'", specifier = ">=4.2.0" },
{ name = "pyarrow", specifier = ">=14.0.0" },
{ name = "pydantic", specifier = ">=2.0" },
{ name = "pydantic-ai", specifier = ">=1.33.0" },
{ name = "pydantic-settings", specifier = ">=2.0" },
+ { name = "pyrefly", marker = "extra == 'dev'", specifier = ">=0.46.0" },
+ { name = "pytest", marker = "extra == 'dev'", specifier = ">=8.3.5" },
+ { name = "pytest-cov", marker = "extra == 'dev'", specifier = ">=4.1.0" },
{ name = "radon", specifier = ">=6.0.1" },
{ name = "rich", specifier = ">=13.0" },
+ { name = "ruff", marker = "extra == 'dev'", specifier = ">=0.0.244" },
{ name = "sqlite-utils", specifier = ">=3.0" },
{ name = "tiktoken", specifier = ">=0.7.0" },
{ name = "toml", specifier = ">=0.10.2" },
+ { name = "types-toml", marker = "extra == 'dev'", specifier = ">=0.10.8.20240310" },
]
-
-[package.metadata.requires-dev]
-dev = [
- { name = "isort", specifier = ">=5.12.0" },
- { name = "mypy", specifier = ">=1.0.0" },
- { name = "pre-commit", specifier = ">=4.2.0" },
- { name = "pyrefly", specifier = ">=0.45.2" },
- { name = "pytest", specifier = ">=8.3.5" },
- { name = "pytest-cov", specifier = ">=4.1.0" },
- { name = "ruff", specifier = ">=0.0.244" },
- { name = "types-toml", specifier = ">=0.10.8.20240310" },
-]
+provides-extras = ["dev"]
[[package]]
name = "sniffio"