Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
47 changes: 47 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,53 @@ The `--include-tests` flag now works end-to-end in `ckb impact diff`:
- Properly sets `IsTest` flag on references based on file path
- Filters test files from changed symbols when `--include-tests=false`

#### Dependency Cycle Detection (`findCycles`)
Detect circular dependencies in module, directory, or file dependency graphs using Tarjan's SCC algorithm:

```bash
# Via MCP
findCycles { "granularity": "directory", "targetPath": "internal/" }
```

- Uses Tarjan's strongly connected components to find real cycles
- Recommends which edge to break (lowest coupling cost)
- Severity classification: size ≥5 = high, ≥3 = medium, 2 = low
- Available in `refactor` preset

#### Move/Relocate Change Type
`prepareChange` and `planRefactor` now support `changeType: "move"` with a `targetPath` parameter:

```bash
prepareChange { "target": "internal/old/handler.go", "changeType": "move", "targetPath": "pkg/handler.go" }
```

- Scans all source files for import path references that need updating
- Detects target directory conflicts (existing files with same name)
- Generates move-specific refactoring steps in `planRefactor`

#### Extract Variable Flow Analysis
`prepareChange` with `changeType: "extract"` now provides tree-sitter-based variable flow analysis when CGO is available:

- Identifies parameters (variables defined outside selection, used inside)
- Identifies return values (variables defined inside, used after selection)
- Classifies local variables (defined and consumed within selection)
- Generates language-appropriate function signatures (Go, Python, JS/TS)
- Graceful degradation: falls back to line-count heuristics without CGO

#### Suggested Refactoring Detection (`suggestRefactorings`)
Proactive detection of refactoring opportunities by combining existing analyzers in parallel:

```bash
suggestRefactorings { "scope": "internal/query", "minSeverity": "medium" }
```

- **Complexity**: High cyclomatic/cognitive functions → `extract_function`, `simplify_function`
- **Coupling**: Highly correlated file pairs → `reduce_coupling`, `split_file`
- **Dead code**: Unused symbols → `remove_dead_code`
- **Test gaps**: High-risk untested code → `add_tests`
- Each suggestion includes severity, effort estimate, and priority score
- Available in `refactor` preset

## [8.0.2] - 2026-01-22

### Added
Expand Down
79 changes: 60 additions & 19 deletions internal/audit/analyzer.go
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
package audit

import (
Expand All @@ -11,22 +11,29 @@
"strings"
"time"

"github.com/SimplyLiz/CodeMCP/internal/complexity"
"github.com/SimplyLiz/CodeMCP/internal/coupling"
)

// Analyzer performs risk analysis on codebases
type Analyzer struct {
repoRoot string
logger *slog.Logger
couplingAnalyzer *coupling.Analyzer
repoRoot string
logger *slog.Logger
couplingAnalyzer *coupling.Analyzer
complexityAnalyzer *complexity.Analyzer
}

// NewAnalyzer creates a new risk analyzer
func NewAnalyzer(repoRoot string, logger *slog.Logger) *Analyzer {
var ca *complexity.Analyzer
if complexity.IsAvailable() {
ca = complexity.NewAnalyzer()
}
return &Analyzer{
repoRoot: repoRoot,
logger: logger,
couplingAnalyzer: coupling.NewAnalyzer(repoRoot, logger),
repoRoot: repoRoot,
logger: logger,
couplingAnalyzer: coupling.NewAnalyzer(repoRoot, logger),
complexityAnalyzer: ca,
}
}

Expand Down Expand Up @@ -116,12 +123,12 @@
factors := make([]RiskFactor, 0, 8)
fullPath := filepath.Join(repoRoot, file)

// 1. Complexity (0-20 contribution)
complexity := a.getComplexity(fullPath)
complexityContrib := min(float64(complexity)/100, 1.0) * 20
// 1. Complexity (0-20 contribution) with per-function breakdown
totalComplexity, functionRisks := a.getComplexityDetailed(ctx, fullPath)
complexityContrib := min(float64(totalComplexity)/100, 1.0) * 20
factors = append(factors, RiskFactor{
Factor: FactorComplexity,
Value: fmt.Sprintf("%d", complexity),
Value: fmt.Sprintf("%d", totalComplexity),
Weight: RiskWeights[FactorComplexity],
Contribution: complexityContrib,
})
Expand Down Expand Up @@ -230,11 +237,12 @@
recommendation := a.generateRecommendation(factors)

return &RiskItem{
File: file,
RiskScore: totalScore,
RiskLevel: GetRiskLevel(totalScore),
Factors: factors,
Recommendation: recommendation,
File: file,
RiskScore: totalScore,
RiskLevel: GetRiskLevel(totalScore),
Factors: factors,
Recommendation: recommendation,
FunctionComplexity: functionRisks,
}, nil
}

Expand Down Expand Up @@ -269,18 +277,51 @@
return files, err
}

// getComplexity estimates complexity based on file size and structure
func (a *Analyzer) getComplexity(filePath string) int {
// getComplexityDetailed returns total complexity and per-function breakdown.
// When the tree-sitter complexity analyzer is available, delegates to it for
// accurate per-function cyclomatic+cognitive scores. Falls back to string-counting heuristic.
func (a *Analyzer) getComplexityDetailed(ctx context.Context, filePath string) (int, []FunctionRisk) {
// Try tree-sitter analyzer first
if a.complexityAnalyzer != nil {
fc, err := a.complexityAnalyzer.AnalyzeFile(ctx, filePath)
if err == nil && fc != nil && fc.Error == "" && len(fc.Functions) > 0 {
// Convert to FunctionRisk and sort by cyclomatic descending
risks := make([]FunctionRisk, 0, len(fc.Functions))
for _, f := range fc.Functions {
risks = append(risks, FunctionRisk{
Name: f.Name,
StartLine: f.StartLine,
EndLine: f.EndLine,
Cyclomatic: f.Cyclomatic,
Cognitive: f.Cognitive,
Lines: f.Lines,
})
}
sort.Slice(risks, func(i, j int) bool {
return risks[i].Cyclomatic > risks[j].Cyclomatic
})
// Cap at top 10 per file
if len(risks) > 10 {
risks = risks[:10]
}
return fc.TotalCyclomatic, risks
}
}

// Fallback: simple heuristic, no per-function breakdown
return a.getComplexityHeuristic(filePath), nil
}

// getComplexityHeuristic estimates complexity based on string counting.
func (a *Analyzer) getComplexityHeuristic(filePath string) int {
content, err := os.ReadFile(filePath)
if err != nil {
return 0
}

// Simple heuristic: count decision points
text := string(content)
complexity := 1 // Base complexity

// Count various complexity indicators
complexity += strings.Count(text, "if ") + strings.Count(text, "if(")
complexity += strings.Count(text, "else ")
complexity += strings.Count(text, "for ") + strings.Count(text, "for(")
Expand Down
8 changes: 4 additions & 4 deletions internal/audit/audit_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -258,21 +258,21 @@ func main() {
t.Fatal(err)
}

complexity := analyzer.getComplexity(testFile)
complexity := analyzer.getComplexityHeuristic(testFile)
// Should detect: 2 if, 1 for, 1 switch, 2 case, 1 &&
// Base complexity 1 + 2 + 1 + 1 + 2 + 1 = 8
if complexity < 5 {
t.Errorf("getComplexity() = %d, want >= 5", complexity)
t.Errorf("getComplexityHeuristic() = %d, want >= 5", complexity)
}
}

func TestGetComplexityNonexistent(t *testing.T) {
logger := slog.New(slog.NewTextHandler(io.Discard, nil))
analyzer := NewAnalyzer("/tmp", logger)

complexity := analyzer.getComplexity("/nonexistent/file.go")
complexity := analyzer.getComplexityHeuristic("/nonexistent/file.go")
if complexity != 0 {
t.Errorf("getComplexity() for nonexistent file = %d, want 0", complexity)
t.Errorf("getComplexityHeuristic() for nonexistent file = %d, want 0", complexity)
}
}

Expand Down
23 changes: 17 additions & 6 deletions internal/audit/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,14 +14,25 @@ type RiskAnalysis struct {
QuickWins []QuickWin `json:"quickWins"`
}

// FunctionRisk contains per-function complexity metrics within a risky file.
type FunctionRisk struct {
Name string `json:"name"`
StartLine int `json:"startLine"`
EndLine int `json:"endLine"`
Cyclomatic int `json:"cyclomatic"`
Cognitive int `json:"cognitive"`
Lines int `json:"lines"`
}

// RiskItem represents a single file/module with risk assessment
type RiskItem struct {
File string `json:"file"`
Module string `json:"module,omitempty"`
RiskScore float64 `json:"riskScore"` // 0-100
RiskLevel string `json:"riskLevel"` // "critical" | "high" | "medium" | "low"
Factors []RiskFactor `json:"factors"`
Recommendation string `json:"recommendation,omitempty"`
File string `json:"file"`
Module string `json:"module,omitempty"`
RiskScore float64 `json:"riskScore"` // 0-100
RiskLevel string `json:"riskLevel"` // "critical" | "high" | "medium" | "low"
Factors []RiskFactor `json:"factors"`
Recommendation string `json:"recommendation,omitempty"`
FunctionComplexity []FunctionRisk `json:"functionComplexity,omitempty"`
}

// RiskFactor represents a contributing factor to the risk score
Expand Down
Loading
Loading