From e1c66bf253900a78906ab0b4356f539e400c373f Mon Sep 17 00:00:00 2001 From: Lisa Date: Sat, 31 Jan 2026 12:10:28 +0100 Subject: [PATCH] feat: add v8.2 refactoring tools improvements MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Five features based on Cursor agent feedback from live refactoring trial: 1. Function-level complexity in auditRisk — wire tree-sitter complexity analyzer into audit, returning per-function cyclomatic+cognitive scores sorted by complexity (top 10 per file). Falls back to heuristic. 2. Graceful degradation messaging — new DegradationWarning type with capability percentages and fix commands. Wired into explore, understand, prepareChange, auditRisk, and findDeadCode MCP handlers. 3. Test gap analysis — new testgap package + analyzeTestGaps MCP tool. Cross-references complexity analysis with SCIP references or heuristic name matching to identify untested functions, sorted by risk. 4. Richer prepareChange for rename/extract — RenameDetail (call sites, type refs, imports with context snippets) and ExtractDetail (boundary analysis) added as parallel goroutines in PrepareChange. 5. Unified planRefactor compound tool — aggregates prepareChange + auditRisk + analyzeTestGaps in parallel, generates ordered refactoring steps by change type (rename/extract/delete/modify). Co-Authored-By: Claude Opus 4.5 --- internal/audit/analyzer.go | 79 +++++-- internal/audit/audit_test.go | 8 +- internal/audit/types.go | 23 +- internal/mcp/presets.go | 30 ++- internal/mcp/presets_test.go | 15 +- internal/mcp/tool_impls_compound.go | 66 +++++- internal/mcp/tool_impls_deadcode.go | 17 +- internal/mcp/tool_impls_testgap.go | 50 ++++ internal/mcp/tool_impls_v65.go | 26 ++- internal/mcp/tools.go | 50 ++++ internal/query/compound.go | 36 +++ internal/query/compound_refactor.go | 344 ++++++++++++++++++++++++++++ internal/query/degradation.go | 63 +++++ internal/query/prepare_extract.go | 100 ++++++++ internal/query/prepare_rename.go | 158 +++++++++++++ internal/query/testgap.go | 25 ++ internal/testgap/analyzer.go | 315 +++++++++++++++++++++++++ 17 files changed, 1342 insertions(+), 63 deletions(-) create mode 100644 internal/mcp/tool_impls_testgap.go create mode 100644 internal/query/compound_refactor.go create mode 100644 internal/query/degradation.go create mode 100644 internal/query/prepare_extract.go create mode 100644 internal/query/prepare_rename.go create mode 100644 internal/query/testgap.go create mode 100644 internal/testgap/analyzer.go diff --git a/internal/audit/analyzer.go b/internal/audit/analyzer.go index ef3054f3..fc15205a 100644 --- a/internal/audit/analyzer.go +++ b/internal/audit/analyzer.go @@ -11,22 +11,29 @@ import ( "strings" "time" + "github.com/SimplyLiz/CodeMCP/internal/complexity" "github.com/SimplyLiz/CodeMCP/internal/coupling" ) // Analyzer performs risk analysis on codebases type Analyzer struct { - repoRoot string - logger *slog.Logger - couplingAnalyzer *coupling.Analyzer + repoRoot string + logger *slog.Logger + couplingAnalyzer *coupling.Analyzer + complexityAnalyzer *complexity.Analyzer } // NewAnalyzer creates a new risk analyzer func NewAnalyzer(repoRoot string, logger *slog.Logger) *Analyzer { + var ca *complexity.Analyzer + if complexity.IsAvailable() { + ca = complexity.NewAnalyzer() + } return &Analyzer{ - repoRoot: repoRoot, - logger: logger, - couplingAnalyzer: coupling.NewAnalyzer(repoRoot, logger), + repoRoot: repoRoot, + logger: logger, + couplingAnalyzer: coupling.NewAnalyzer(repoRoot, logger), + complexityAnalyzer: ca, } } @@ -116,12 +123,12 @@ func (a *Analyzer) analyzeFile(ctx context.Context, repoRoot, file string) (*Ris factors := make([]RiskFactor, 0, 8) fullPath := filepath.Join(repoRoot, file) - // 1. Complexity (0-20 contribution) - complexity := a.getComplexity(fullPath) - complexityContrib := min(float64(complexity)/100, 1.0) * 20 + // 1. Complexity (0-20 contribution) with per-function breakdown + totalComplexity, functionRisks := a.getComplexityDetailed(ctx, fullPath) + complexityContrib := min(float64(totalComplexity)/100, 1.0) * 20 factors = append(factors, RiskFactor{ Factor: FactorComplexity, - Value: fmt.Sprintf("%d", complexity), + Value: fmt.Sprintf("%d", totalComplexity), Weight: RiskWeights[FactorComplexity], Contribution: complexityContrib, }) @@ -230,11 +237,12 @@ func (a *Analyzer) analyzeFile(ctx context.Context, repoRoot, file string) (*Ris recommendation := a.generateRecommendation(factors) return &RiskItem{ - File: file, - RiskScore: totalScore, - RiskLevel: GetRiskLevel(totalScore), - Factors: factors, - Recommendation: recommendation, + File: file, + RiskScore: totalScore, + RiskLevel: GetRiskLevel(totalScore), + Factors: factors, + Recommendation: recommendation, + FunctionComplexity: functionRisks, }, nil } @@ -269,18 +277,51 @@ func (a *Analyzer) findSourceFiles(repoRoot string) ([]string, error) { return files, err } -// getComplexity estimates complexity based on file size and structure -func (a *Analyzer) getComplexity(filePath string) int { +// getComplexityDetailed returns total complexity and per-function breakdown. +// When the tree-sitter complexity analyzer is available, delegates to it for +// accurate per-function cyclomatic+cognitive scores. Falls back to string-counting heuristic. +func (a *Analyzer) getComplexityDetailed(ctx context.Context, filePath string) (int, []FunctionRisk) { + // Try tree-sitter analyzer first + if a.complexityAnalyzer != nil { + fc, err := a.complexityAnalyzer.AnalyzeFile(ctx, filePath) + if err == nil && fc != nil && fc.Error == "" && len(fc.Functions) > 0 { + // Convert to FunctionRisk and sort by cyclomatic descending + risks := make([]FunctionRisk, 0, len(fc.Functions)) + for _, f := range fc.Functions { + risks = append(risks, FunctionRisk{ + Name: f.Name, + StartLine: f.StartLine, + EndLine: f.EndLine, + Cyclomatic: f.Cyclomatic, + Cognitive: f.Cognitive, + Lines: f.Lines, + }) + } + sort.Slice(risks, func(i, j int) bool { + return risks[i].Cyclomatic > risks[j].Cyclomatic + }) + // Cap at top 10 per file + if len(risks) > 10 { + risks = risks[:10] + } + return fc.TotalCyclomatic, risks + } + } + + // Fallback: simple heuristic, no per-function breakdown + return a.getComplexityHeuristic(filePath), nil +} + +// getComplexityHeuristic estimates complexity based on string counting. +func (a *Analyzer) getComplexityHeuristic(filePath string) int { content, err := os.ReadFile(filePath) if err != nil { return 0 } - // Simple heuristic: count decision points text := string(content) complexity := 1 // Base complexity - // Count various complexity indicators complexity += strings.Count(text, "if ") + strings.Count(text, "if(") complexity += strings.Count(text, "else ") complexity += strings.Count(text, "for ") + strings.Count(text, "for(") diff --git a/internal/audit/audit_test.go b/internal/audit/audit_test.go index ee262674..1ebbd5e6 100644 --- a/internal/audit/audit_test.go +++ b/internal/audit/audit_test.go @@ -258,11 +258,11 @@ func main() { t.Fatal(err) } - complexity := analyzer.getComplexity(testFile) + complexity := analyzer.getComplexityHeuristic(testFile) // Should detect: 2 if, 1 for, 1 switch, 2 case, 1 && // Base complexity 1 + 2 + 1 + 1 + 2 + 1 = 8 if complexity < 5 { - t.Errorf("getComplexity() = %d, want >= 5", complexity) + t.Errorf("getComplexityHeuristic() = %d, want >= 5", complexity) } } @@ -270,9 +270,9 @@ func TestGetComplexityNonexistent(t *testing.T) { logger := slog.New(slog.NewTextHandler(io.Discard, nil)) analyzer := NewAnalyzer("/tmp", logger) - complexity := analyzer.getComplexity("/nonexistent/file.go") + complexity := analyzer.getComplexityHeuristic("/nonexistent/file.go") if complexity != 0 { - t.Errorf("getComplexity() for nonexistent file = %d, want 0", complexity) + t.Errorf("getComplexityHeuristic() for nonexistent file = %d, want 0", complexity) } } diff --git a/internal/audit/types.go b/internal/audit/types.go index a8c436bc..149b9b1b 100644 --- a/internal/audit/types.go +++ b/internal/audit/types.go @@ -14,14 +14,25 @@ type RiskAnalysis struct { QuickWins []QuickWin `json:"quickWins"` } +// FunctionRisk contains per-function complexity metrics within a risky file. +type FunctionRisk struct { + Name string `json:"name"` + StartLine int `json:"startLine"` + EndLine int `json:"endLine"` + Cyclomatic int `json:"cyclomatic"` + Cognitive int `json:"cognitive"` + Lines int `json:"lines"` +} + // RiskItem represents a single file/module with risk assessment type RiskItem struct { - File string `json:"file"` - Module string `json:"module,omitempty"` - RiskScore float64 `json:"riskScore"` // 0-100 - RiskLevel string `json:"riskLevel"` // "critical" | "high" | "medium" | "low" - Factors []RiskFactor `json:"factors"` - Recommendation string `json:"recommendation,omitempty"` + File string `json:"file"` + Module string `json:"module,omitempty"` + RiskScore float64 `json:"riskScore"` // 0-100 + RiskLevel string `json:"riskLevel"` // "critical" | "high" | "medium" | "low" + Factors []RiskFactor `json:"factors"` + Recommendation string `json:"recommendation,omitempty"` + FunctionComplexity []FunctionRisk `json:"functionComplexity,omitempty"` } // RiskFactor represents a contributing factor to the risk score diff --git a/internal/mcp/presets.go b/internal/mcp/presets.go index cbf1b028..9ddf48ce 100644 --- a/internal/mcp/presets.go +++ b/internal/mcp/presets.go @@ -61,6 +61,9 @@ var Presets = map[string][]string{ "getStatus", "switchProject", // v8.1: Dynamic project switching + // v8.2 Refactoring compound tool + "planRefactor", + // Meta (always included) "expandToolset", }, @@ -72,7 +75,9 @@ var Presets = map[string][]string{ "searchSymbols", "getSymbol", "explainSymbol", "explainFile", "findReferences", "getCallGraph", "traceUsage", "getArchitecture", "getModuleOverview", "listKeyConcepts", - "analyzeImpact", "getHotspots", "getStatus", "switchProject", "expandToolset", + "analyzeImpact", "getHotspots", "getStatus", "switchProject", + "planRefactor", // v8.2 + "expandToolset", // Review-specific "summarizeDiff", "summarizePr", @@ -94,12 +99,14 @@ var Presets = map[string][]string{ "justifySymbol", "analyzeCoupling", "findDeadCodeCandidates", - "findDeadCode", // v7.6: Static dead code detection (no telemetry needed) - "getAffectedTests", // v7.6: Find tests affected by changes - "compareAPI", // v7.6: Breaking change detection + "findDeadCode", // v7.6: Static dead code detection (no telemetry needed) + "getAffectedTests", // v7.6: Find tests affected by changes + "compareAPI", // v7.6: Breaking change detection "auditRisk", "explainOrigin", - "scanSecrets", // v8.0: Secret detection for security audits + "scanSecrets", // v8.0: Secret detection for security audits + "analyzeTestGaps", // v8.2: Test gap analysis + "planRefactor", // v8.2: Unified refactor planning }, // Federation: core + federation tools @@ -109,7 +116,9 @@ var Presets = map[string][]string{ "searchSymbols", "getSymbol", "explainSymbol", "explainFile", "findReferences", "getCallGraph", "traceUsage", "getArchitecture", "getModuleOverview", "listKeyConcepts", - "analyzeImpact", "getHotspots", "getStatus", "switchProject", "expandToolset", + "analyzeImpact", "getHotspots", "getStatus", "switchProject", + "planRefactor", // v8.2 + "expandToolset", // Federation-specific "listFederations", "federationStatus", @@ -134,7 +143,9 @@ var Presets = map[string][]string{ "searchSymbols", "getSymbol", "explainSymbol", "explainFile", "findReferences", "getCallGraph", "traceUsage", "getArchitecture", "getModuleOverview", "listKeyConcepts", - "analyzeImpact", "getHotspots", "getStatus", "switchProject", "expandToolset", + "analyzeImpact", "getHotspots", "getStatus", "switchProject", + "planRefactor", // v8.2 + "expandToolset", // Docs-specific "indexDocs", "getDocsForSymbol", @@ -151,7 +162,9 @@ var Presets = map[string][]string{ "searchSymbols", "getSymbol", "explainSymbol", "explainFile", "findReferences", "getCallGraph", "traceUsage", "getArchitecture", "getModuleOverview", "listKeyConcepts", - "analyzeImpact", "getHotspots", "getStatus", "switchProject", "expandToolset", + "analyzeImpact", "getHotspots", "getStatus", "switchProject", + "planRefactor", // v8.2 + "expandToolset", // Ops-specific "doctor", "reindex", @@ -223,6 +236,7 @@ var coreToolOrder = []string{ "getHotspots", "getStatus", "switchProject", + "planRefactor", // v8.2 "expandToolset", } diff --git a/internal/mcp/presets_test.go b/internal/mcp/presets_test.go index 634561fb..ad117d23 100644 --- a/internal/mcp/presets_test.go +++ b/internal/mcp/presets_test.go @@ -12,10 +12,10 @@ func TestPresetFiltering(t *testing.T) { server := NewMCPServer("test", nil, logger) // Test core preset (default) - // v8.1: Core now includes 5 compound tools + switchProject + // v8.2: Core now includes 5 compound tools + switchProject + planRefactor coreTools := server.GetFilteredTools() - if len(coreTools) != 20 { - t.Errorf("expected 20 core tools (v8.1 includes switchProject), got %d", len(coreTools)) + if len(coreTools) != 21 { + t.Errorf("expected 21 core tools (v8.2 includes planRefactor), got %d", len(coreTools)) } // Verify compound tools come first (preferred for AI workflows) @@ -24,7 +24,8 @@ func TestPresetFiltering(t *testing.T) { "searchSymbols", "getSymbol", "explainSymbol", "explainFile", "findReferences", "getCallGraph", "traceUsage", "getArchitecture", "getModuleOverview", "listKeyConcepts", - "analyzeImpact", "getHotspots", "getStatus", "switchProject", "expandToolset", + "analyzeImpact", "getHotspots", "getStatus", "switchProject", + "planRefactor", "expandToolset", } for i, expected := range expectedFirst { if i >= len(coreTools) { @@ -41,9 +42,9 @@ func TestPresetFiltering(t *testing.T) { t.Fatalf("failed to set full preset: %v", err) } fullTools := server.GetFilteredTools() - // v8.1: Full now includes switchProject (88 = 87 + 1) - if len(fullTools) != 88 { - t.Errorf("expected 88 full tools (v8.1 includes switchProject), got %d", len(fullTools)) + // v8.2: Full now includes switchProject + analyzeTestGaps + planRefactor (90 = 88 + 2) + if len(fullTools) != 90 { + t.Errorf("expected 90 full tools (v8.2 includes analyzeTestGaps + planRefactor), got %d", len(fullTools)) } // Full preset should still have core tools first diff --git a/internal/mcp/tool_impls_compound.go b/internal/mcp/tool_impls_compound.go index 0d6e7dcd..41e73f76 100644 --- a/internal/mcp/tool_impls_compound.go +++ b/internal/mcp/tool_impls_compound.go @@ -57,9 +57,11 @@ func (s *MCPServer) toolExplore(params map[string]interface{}) (*envelope.Respon return nil, s.enrichNotFoundError(err) } - return NewToolResponse(). - Data(result). - Build(), nil + resp := NewToolResponse().Data(result) + for _, dw := range engine.GetDegradationWarnings() { + resp.Warning(dw.Message) + } + return resp.Build(), nil } // toolUnderstand provides comprehensive symbol deep-dive @@ -100,9 +102,11 @@ func (s *MCPServer) toolUnderstand(params map[string]interface{}) (*envelope.Res return nil, s.enrichNotFoundError(err) } - return NewToolResponse(). - Data(result). - Build(), nil + resp := NewToolResponse().Data(result) + for _, dw := range engine.GetDegradationWarnings() { + resp.Warning(dw.Message) + } + return resp.Build(), nil } // toolPrepareChange provides pre-change analysis @@ -140,9 +144,11 @@ func (s *MCPServer) toolPrepareChange(params map[string]interface{}) (*envelope. return nil, s.enrichNotFoundError(err) } - return NewToolResponse(). - Data(result). - Build(), nil + resp := NewToolResponse().Data(result) + for _, dw := range engine.GetDegradationWarnings() { + resp.Warning(dw.Message) + } + return resp.Build(), nil } // toolSwitchProject switches CKB to a different project directory @@ -257,3 +263,45 @@ func (s *MCPServer) toolBatchSearch(params map[string]interface{}) (*envelope.Re Data(result). Build(), nil } + +// toolPlanRefactor provides unified refactoring planning +func (s *MCPServer) toolPlanRefactor(params map[string]interface{}) (*envelope.Response, error) { + target, ok := params["target"].(string) + if !ok || target == "" { + return nil, errors.NewInvalidParameterError("target", "required") + } + + changeType := query.ChangeModify + if v, ok := params["changeType"].(string); ok { + switch v { + case "modify": + changeType = query.ChangeModify + case "rename": + changeType = query.ChangeRename + case "delete": + changeType = query.ChangeDelete + case "extract": + changeType = query.ChangeExtract + } + } + + engine, err := s.GetEngine() + if err != nil { + return nil, err + } + + ctx := context.Background() + result, err := engine.PlanRefactor(ctx, query.PlanRefactorOptions{ + Target: target, + ChangeType: changeType, + }) + if err != nil { + return nil, s.enrichNotFoundError(err) + } + + resp := NewToolResponse().Data(result) + for _, dw := range engine.GetDegradationWarnings() { + resp.Warning(dw.Message) + } + return resp.Build(), nil +} diff --git a/internal/mcp/tool_impls_deadcode.go b/internal/mcp/tool_impls_deadcode.go index 53307e76..603a904b 100644 --- a/internal/mcp/tool_impls_deadcode.go +++ b/internal/mcp/tool_impls_deadcode.go @@ -64,8 +64,17 @@ func (s *MCPServer) toolFindDeadCode(params map[string]interface{}) (*envelope.R return nil, err } - // Build response - return NewToolResponse(). - Data(result). - Build(), nil + // Build response with degradation warnings + resp := NewToolResponse().Data(result) + engine, engineErr := s.GetEngine() + if engineErr == nil { + for _, dw := range engine.GetDegradationWarnings() { + resp.Warning(dw.Message) + } + } + // Warn specifically if results are empty — likely missing SCIP + if result != nil && len(result.DeadCode) == 0 { + resp.Warning("Dead code detection requires SCIP index. Run 'ckb index' to enable.") + } + return resp.Build(), nil } diff --git a/internal/mcp/tool_impls_testgap.go b/internal/mcp/tool_impls_testgap.go new file mode 100644 index 00000000..0d9583eb --- /dev/null +++ b/internal/mcp/tool_impls_testgap.go @@ -0,0 +1,50 @@ +package mcp + +import ( + "context" + + "github.com/SimplyLiz/CodeMCP/internal/envelope" + "github.com/SimplyLiz/CodeMCP/internal/errors" + "github.com/SimplyLiz/CodeMCP/internal/query" +) + +// v8.2 Test Gap Analysis tool implementation + +// toolAnalyzeTestGaps identifies functions that lack test coverage. +func (s *MCPServer) toolAnalyzeTestGaps(params map[string]interface{}) (*envelope.Response, error) { + target, ok := params["target"].(string) + if !ok || target == "" { + return nil, errors.NewInvalidParameterError("target", "required") + } + + minLines := 3 + if v, ok := params["minLines"].(float64); ok { + minLines = int(v) + } + + limit := 50 + if v, ok := params["limit"].(float64); ok { + limit = int(v) + } + + engine, err := s.GetEngine() + if err != nil { + return nil, err + } + + ctx := context.Background() + result, err := engine.AnalyzeTestGaps(ctx, query.AnalyzeTestGapsOptions{ + Target: target, + MinLines: minLines, + Limit: limit, + }) + if err != nil { + return nil, errors.NewOperationError("analyze test gaps", err) + } + + resp := NewToolResponse().Data(result) + for _, dw := range engine.GetDegradationWarnings() { + resp.Warning(dw.Message) + } + return resp.Build(), nil +} diff --git a/internal/mcp/tool_impls_v65.go b/internal/mcp/tool_impls_v65.go index 2053e4af..7e83b8fb 100644 --- a/internal/mcp/tool_impls_v65.go +++ b/internal/mcp/tool_impls_v65.go @@ -215,17 +215,31 @@ func (s *MCPServer) toolAuditRisk(params map[string]interface{}) (*envelope.Resp return nil, errors.NewOperationError("audit risk", err) } + // Collect degradation warnings + var degradationWarnings []string + engine, engineErr := s.GetEngine() + if engineErr == nil { + for _, dw := range engine.GetDegradationWarnings() { + degradationWarnings = append(degradationWarnings, dw.Message) + } + } + // If quickWins mode, return only quick wins if quickWins { - return NewToolResponse(). + resp := NewToolResponse(). Data(map[string]interface{}{ "quickWins": result.QuickWins, "summary": result.Summary, - }). - Build(), nil + }) + for _, w := range degradationWarnings { + resp.Warning(w) + } + return resp.Build(), nil } - return NewToolResponse(). - Data(result). - Build(), nil + resp := NewToolResponse().Data(result) + for _, w := range degradationWarnings { + resp.Warning(w) + } + return resp.Build(), nil } diff --git a/internal/mcp/tools.go b/internal/mcp/tools.go index 4a50d29c..0608e6c7 100644 --- a/internal/mcp/tools.go +++ b/internal/mcp/tools.go @@ -2122,6 +2122,52 @@ func (s *MCPServer) GetToolDefinitions() []Tool { "required": []string{"queries"}, }, }, + // v8.2 Test Gap Analysis + { + Name: "analyzeTestGaps", + Description: "Find functions that lack test coverage. Returns untested functions sorted by complexity (highest-risk untested code first). Uses SCIP references when available, falls back to heuristic name matching.", + InputSchema: map[string]interface{}{ + "type": "object", + "properties": map[string]interface{}{ + "target": map[string]interface{}{ + "type": "string", + "description": "File or directory path to analyze (relative to repo root)", + }, + "minLines": map[string]interface{}{ + "type": "integer", + "default": 3, + "description": "Minimum function lines to include (skips trivial getters/setters)", + }, + "limit": map[string]interface{}{ + "type": "integer", + "default": 50, + "description": "Maximum number of gaps to return", + }, + }, + "required": []string{"target"}, + }, + }, + // v8.2 Unified Refactor Planning + { + Name: "planRefactor", + Description: "Plan a refactoring operation with combined risk assessment, impact analysis, test strategy, and ordered steps. Aggregates prepareChange + auditRisk + analyzeTestGaps into a single call.", + InputSchema: map[string]interface{}{ + "type": "object", + "properties": map[string]interface{}{ + "target": map[string]interface{}{ + "type": "string", + "description": "File path or symbol ID to refactor", + }, + "changeType": map[string]interface{}{ + "type": "string", + "enum": []string{"modify", "rename", "delete", "extract"}, + "default": "modify", + "description": "Type of refactoring change", + }, + }, + "required": []string{"target"}, + }, + }, } } @@ -2233,6 +2279,10 @@ func (s *MCPServer) RegisterTools() { s.tools["prepareChange"] = s.toolPrepareChange s.tools["batchGet"] = s.toolBatchGet s.tools["batchSearch"] = s.toolBatchSearch + // v8.2 Test Gap Analysis + s.tools["analyzeTestGaps"] = s.toolAnalyzeTestGaps + // v8.2 Unified Refactor Planning + s.tools["planRefactor"] = s.toolPlanRefactor // v8.0 Streaming support s.RegisterStreamableTools() diff --git a/internal/query/compound.go b/internal/query/compound.go index 3d5715f7..27546421 100644 --- a/internal/query/compound.go +++ b/internal/query/compound.go @@ -294,6 +294,12 @@ func (e *Engine) Explore(ctx context.Context, opts ExploreOptions) (*ExploreResp completeness.Reason = "limited-scip-unavailable" } + // Add degradation warnings + degradationWarnings := e.GetDegradationWarnings() + for _, dw := range degradationWarnings { + warnings = append(warnings, dw.Message) + } + health.Warnings = warnings return &ExploreResponse{ @@ -1245,6 +1251,8 @@ type PrepareChangeResponse struct { RelatedTests []PrepareTest `json:"relatedTests"` CoChangeFiles []PrepareCoChange `json:"coChangeFiles,omitempty"` RiskAssessment *PrepareRisk `json:"riskAssessment"` + RenameDetail *RenameDetail `json:"renameDetail,omitempty"` + ExtractDetail *ExtractDetail `json:"extractDetail,omitempty"` } // PrepareChangeTarget describes what will be changed. @@ -1326,6 +1334,8 @@ func (e *Engine) PrepareChange(ctx context.Context, opts PrepareChangeOptions) ( var transitiveImpact *PrepareTransitive var relatedTests []PrepareTest var coChangeFiles []PrepareCoChange + var renameDetail *RenameDetail + var extractDetail *ExtractDetail var riskFactors []string var warnings []string @@ -1385,6 +1395,30 @@ func (e *Engine) PrepareChange(ctx context.Context, opts PrepareChangeOptions) ( }() } + // Get rename detail for rename operations + if opts.ChangeType == ChangeRename && target.SymbolId != "" { + wg.Add(1) + go func() { + defer wg.Done() + rd := e.getPrepareRenameDetail(ctx, target.SymbolId) + mu.Lock() + renameDetail = rd + mu.Unlock() + }() + } + + // Get extract detail for extract operations + if opts.ChangeType == ChangeExtract { + wg.Add(1) + go func() { + defer wg.Done() + ed := e.getPrepareExtractDetail(target) + mu.Lock() + extractDetail = ed + mu.Unlock() + }() + } + wg.Wait() // Calculate risk assessment @@ -1426,6 +1460,8 @@ func (e *Engine) PrepareChange(ctx context.Context, opts PrepareChangeOptions) ( RelatedTests: relatedTests, CoChangeFiles: coChangeFiles, RiskAssessment: risk, + RenameDetail: renameDetail, + ExtractDetail: extractDetail, }, nil } diff --git a/internal/query/compound_refactor.go b/internal/query/compound_refactor.go new file mode 100644 index 00000000..b9791ee8 --- /dev/null +++ b/internal/query/compound_refactor.go @@ -0,0 +1,344 @@ +package query + +import ( + "context" + "fmt" + "path/filepath" + "sync" + "time" + + "github.com/SimplyLiz/CodeMCP/internal/audit" + "github.com/SimplyLiz/CodeMCP/internal/version" +) + +// PlanRefactorOptions configures the unified refactoring planner. +type PlanRefactorOptions struct { + Target string // file or symbol + ChangeType ChangeType // modify, rename, delete, extract +} + +// PlanRefactorResponse contains the combined result of all refactoring analysis. +type PlanRefactorResponse struct { + AINavigationMeta + Target *PrepareChangeTarget `json:"target"` + RiskAssessment *PlanRefactorRisk `json:"riskAssessment"` + ImpactAnalysis *PlanRefactorImpact `json:"impactAnalysis"` + TestStrategy *PlanRefactorTests `json:"testStrategy"` + CouplingAnalysis *PlanRefactorCoupling `json:"couplingAnalysis,omitempty"` + RefactoringSteps []RefactoringStep `json:"refactoringSteps"` +} + +// PlanRefactorRisk combines file-level risk with per-function complexity breakdown. +type PlanRefactorRisk struct { + RiskLevel string `json:"riskLevel"` // critical, high, medium, low + RiskScore float64 `json:"riskScore"` // 0-100 + Factors []audit.RiskFactor `json:"factors,omitempty"` + FunctionComplexity []audit.FunctionRisk `json:"functionComplexity,omitempty"` +} + +// PlanRefactorImpact summarizes the blast radius. +type PlanRefactorImpact struct { + DirectDependents int `json:"directDependents"` + TransitiveClosure int `json:"transitiveClosure"` + AffectedFiles int `json:"affectedFiles"` + RenamePreview string `json:"renamePreview,omitempty"` // only for rename +} + +// PlanRefactorTests describes what tests exist and what's missing. +type PlanRefactorTests struct { + ExistingTests int `json:"existingTests"` + TestGapCount int `json:"testGapCount"` + CoveragePercent float64 `json:"coveragePercent"` + HighestRiskGap string `json:"highestRiskGap,omitempty"` // function name of riskiest untested fn +} + +// PlanRefactorCoupling describes co-change coupling for the target. +type PlanRefactorCoupling struct { + CoChangeFiles int `json:"coChangeFiles"` + HighestCoupled string `json:"highestCoupled,omitempty"` +} + +// RefactoringStep is an ordered action in the refactoring plan. +type RefactoringStep struct { + Order int `json:"order"` + Action string `json:"action"` + Description string `json:"description"` + Risk string `json:"risk"` // "low", "medium", "high" +} + +// PlanRefactor executes a comprehensive refactoring plan by running +// prepareChange, auditRisk, and analyzeTestGaps in parallel. +func (e *Engine) PlanRefactor(ctx context.Context, opts PlanRefactorOptions) (*PlanRefactorResponse, error) { + startTime := time.Now() + + if opts.ChangeType == "" { + opts.ChangeType = ChangeModify + } + + // Get repo state + repoState, err := e.GetRepoState(ctx, "full") + if err != nil { + return nil, err + } + + var wg sync.WaitGroup + var mu sync.Mutex + + // Sub-query results + var prepareResult *PrepareChangeResponse + var riskItem *audit.RiskItem + var testGapResult *PlanRefactorTests + var warnings []string + + // 1. PrepareChange (impact, dependents, tests, co-changes) + wg.Add(1) + go func() { + defer wg.Done() + result, err := e.PrepareChange(ctx, PrepareChangeOptions{ + Target: opts.Target, + ChangeType: opts.ChangeType, + }) + if err != nil { + mu.Lock() + warnings = append(warnings, fmt.Sprintf("prepareChange: %s", err.Error())) + mu.Unlock() + return + } + mu.Lock() + prepareResult = result + mu.Unlock() + }() + + // 2. AuditRisk on the target file for risk + function breakdown + wg.Add(1) + go func() { + defer wg.Done() + filePath := opts.Target + // If target is a symbol ID, we'll get the path from prepareResult later + if filepath.Ext(filePath) == "" && filePath != "" { + // Likely a symbol ID, skip file-level audit for now + return + } + analyzer := audit.NewAnalyzer(e.repoRoot, e.logger) + analysisResult, err := analyzer.Analyze(ctx, audit.AuditOptions{ + RepoRoot: e.repoRoot, + MinScore: 0, // include everything for this specific file + Limit: 1, + }) + if err != nil { + mu.Lock() + warnings = append(warnings, fmt.Sprintf("auditRisk: %s", err.Error())) + mu.Unlock() + return + } + if analysisResult != nil && len(analysisResult.Items) > 0 { + // Find the item matching our target + for i, item := range analysisResult.Items { + if item.File == filePath || filepath.Base(item.File) == filepath.Base(filePath) { + mu.Lock() + riskItem = &analysisResult.Items[i] + mu.Unlock() + return + } + } + } + }() + + // 3. Test gap analysis + wg.Add(1) + go func() { + defer wg.Done() + result, err := e.AnalyzeTestGaps(ctx, AnalyzeTestGapsOptions{ + Target: opts.Target, + MinLines: 3, + Limit: 20, + }) + if err != nil { + mu.Lock() + warnings = append(warnings, fmt.Sprintf("testGaps: %s", err.Error())) + mu.Unlock() + return + } + if result != nil { + strategy := &PlanRefactorTests{ + TestGapCount: len(result.Gaps), + CoveragePercent: result.Summary.CoveragePercent, + ExistingTests: result.Summary.TestedFunctions, + } + if len(result.Gaps) > 0 { + strategy.HighestRiskGap = result.Gaps[0].Function + } + mu.Lock() + testGapResult = strategy + mu.Unlock() + } + }() + + wg.Wait() + + // Assemble response + resp := &PlanRefactorResponse{} + + // Target from prepareChange + if prepareResult != nil { + resp.Target = prepareResult.Target + + // Impact analysis + resp.ImpactAnalysis = &PlanRefactorImpact{ + DirectDependents: len(prepareResult.DirectDependents), + } + if prepareResult.TransitiveImpact != nil { + resp.ImpactAnalysis.TransitiveClosure = prepareResult.TransitiveImpact.TotalCallers + resp.ImpactAnalysis.AffectedFiles = prepareResult.TransitiveImpact.ModuleSpread + } + if prepareResult.RenameDetail != nil { + resp.ImpactAnalysis.RenamePreview = FormatRenamePreview(prepareResult.RenameDetail) + } + + // Coupling + if len(prepareResult.CoChangeFiles) > 0 { + resp.CouplingAnalysis = &PlanRefactorCoupling{ + CoChangeFiles: len(prepareResult.CoChangeFiles), + } + if len(prepareResult.CoChangeFiles) > 0 { + resp.CouplingAnalysis.HighestCoupled = prepareResult.CoChangeFiles[0].File + } + } + } + + // Risk assessment + resp.RiskAssessment = &PlanRefactorRisk{ + RiskLevel: "low", + RiskScore: 0, + } + if riskItem != nil { + resp.RiskAssessment.RiskLevel = riskItem.RiskLevel + resp.RiskAssessment.RiskScore = riskItem.RiskScore + resp.RiskAssessment.Factors = riskItem.Factors + resp.RiskAssessment.FunctionComplexity = riskItem.FunctionComplexity + } else if prepareResult != nil && prepareResult.RiskAssessment != nil { + // Fall back to prepareChange risk if file-level audit didn't run + resp.RiskAssessment.RiskLevel = prepareResult.RiskAssessment.Level + resp.RiskAssessment.RiskScore = prepareResult.RiskAssessment.Score + } + + // Test strategy + if testGapResult != nil { + resp.TestStrategy = testGapResult + } else { + resp.TestStrategy = &PlanRefactorTests{} + } + + // Generate refactoring steps + resp.RefactoringSteps = generateRefactoringSteps(opts.ChangeType, resp) + + // Build provenance + var backendContribs []BackendContribution + if e.scipAdapter != nil && e.scipAdapter.IsAvailable() { + backendContribs = append(backendContribs, BackendContribution{ + BackendId: "scip", Available: true, Used: true, + }) + } + if e.gitAdapter != nil && e.gitAdapter.IsAvailable() { + backendContribs = append(backendContribs, BackendContribution{ + BackendId: "git", Available: true, Used: true, + }) + } + + resp.AINavigationMeta = AINavigationMeta{ + CkbVersion: version.Version, + SchemaVersion: 1, + Tool: "planRefactor", + Provenance: e.buildProvenance(repoState, "full", startTime, backendContribs, CompletenessInfo{ + Score: 0.85, + Reason: "compound-planrefactor", + }), + } + + return resp, nil +} + +// generateRefactoringSteps produces ordered steps based on change type and analysis. +func generateRefactoringSteps(changeType ChangeType, resp *PlanRefactorResponse) []RefactoringStep { + switch changeType { + case ChangeRename: + return generateRenameSteps(resp) + case ChangeExtract: + return generateExtractSteps(resp) + case ChangeDelete: + return generateDeleteSteps(resp) + default: + return generateModifySteps(resp) + } +} + +func generateRenameSteps(resp *PlanRefactorResponse) []RefactoringStep { + steps := []RefactoringStep{ + {Order: 1, Action: "Update definition", Description: "Rename the symbol at its definition site", Risk: "low"}, + } + + sites := 0 + if resp.ImpactAnalysis != nil { + sites = resp.ImpactAnalysis.DirectDependents + } + steps = append(steps, RefactoringStep{ + Order: 2, + Action: "Update call sites", + Description: fmt.Sprintf("Update %d call site(s) that reference this symbol", sites), + Risk: stepRisk(sites, 10, 50), + }) + + steps = append(steps, + RefactoringStep{Order: 3, Action: "Update imports", Description: "Update any import paths referencing the old name", Risk: "low"}, + RefactoringStep{Order: 4, Action: "Run tests", Description: "Execute affected test suite to verify rename", Risk: "low"}, + ) + + return steps +} + +func generateExtractSteps(resp *PlanRefactorResponse) []RefactoringStep { + return []RefactoringStep{ + {Order: 1, Action: "Identify boundary", Description: "Determine extraction boundary and variable flow", Risk: "medium"}, + {Order: 2, Action: "Create function", Description: "Create new function with identified parameters and returns", Risk: "low"}, + {Order: 3, Action: "Replace inline", Description: "Replace original code block with call to new function", Risk: "medium"}, + {Order: 4, Action: "Update tests", Description: "Add tests for extracted function, update existing tests", Risk: "low"}, + } +} + +func generateDeleteSteps(resp *PlanRefactorResponse) []RefactoringStep { + dependents := 0 + if resp.ImpactAnalysis != nil { + dependents = resp.ImpactAnalysis.DirectDependents + } + steps := []RefactoringStep{ + {Order: 1, Action: "Verify unused", Description: fmt.Sprintf("Confirm symbol has %d dependent(s) — resolve before deletion", dependents), Risk: stepRisk(dependents, 1, 5)}, + {Order: 2, Action: "Remove symbol", Description: "Delete the symbol definition", Risk: "low"}, + {Order: 3, Action: "Clean imports", Description: "Remove any now-unused import paths", Risk: "low"}, + {Order: 4, Action: "Run tests", Description: "Execute full test suite to verify no breakage", Risk: "low"}, + } + return steps +} + +func generateModifySteps(resp *PlanRefactorResponse) []RefactoringStep { + dependents := 0 + if resp.ImpactAnalysis != nil { + dependents = resp.ImpactAnalysis.DirectDependents + } + return []RefactoringStep{ + {Order: 1, Action: "Review dependents", Description: fmt.Sprintf("Review %d direct dependent(s) for compatibility", dependents), Risk: stepRisk(dependents, 5, 20)}, + {Order: 2, Action: "Update implementation", Description: "Apply changes to the target", Risk: "medium"}, + {Order: 3, Action: "Run affected tests", Description: "Execute tests related to modified code", Risk: "low"}, + {Order: 4, Action: "Check coupling", Description: "Verify co-change files don't need updates", Risk: "low"}, + } +} + +// stepRisk returns risk level based on count thresholds. +func stepRisk(count, mediumThreshold, highThreshold int) string { + if count >= highThreshold { + return "high" + } + if count >= mediumThreshold { + return "medium" + } + return "low" +} diff --git a/internal/query/degradation.go b/internal/query/degradation.go new file mode 100644 index 00000000..94f5730d --- /dev/null +++ b/internal/query/degradation.go @@ -0,0 +1,63 @@ +package query + +import "fmt" + +// DegradationWarning describes reduced capability with actionable fix guidance. +type DegradationWarning struct { + Code string `json:"code"` + Message string `json:"message"` + CapabilityPercent int `json:"capabilityPercent"` + FixCommand string `json:"fixCommand,omitempty"` +} + +// GenerateDegradationWarnings checks backend availability and staleness, +// returning warnings that explain reduced capability and how to fix it. +func GenerateDegradationWarnings(scipAvailable, gitAvailable, scipStale bool, commitsBehind int) []DegradationWarning { + var warnings []DegradationWarning + + if !scipAvailable { + warnings = append(warnings, DegradationWarning{ + Code: "scip_missing", + Message: "Running at ~40% capability — run 'ckb index' for full results", + CapabilityPercent: 40, + FixCommand: "ckb index", + }) + } else if scipStale && commitsBehind > 5 { + warnings = append(warnings, DegradationWarning{ + Code: "scip_stale", + Message: fmt.Sprintf("Index is %d commits behind (~60%% capability) — run 'ckb index'", commitsBehind), + CapabilityPercent: 60, + FixCommand: "ckb index", + }) + } + + if !gitAvailable { + warnings = append(warnings, DegradationWarning{ + Code: "git_unavailable", + Message: "Git not available. History-based features disabled (~20% capability)", + CapabilityPercent: 20, + }) + } + + return warnings +} + +// GetDegradationWarnings inspects current engine backend state and returns +// any applicable degradation warnings. +func (e *Engine) GetDegradationWarnings() []DegradationWarning { + scipAvailable := e.scipAdapter != nil && e.scipAdapter.IsAvailable() + gitAvailable := e.gitAdapter != nil && e.gitAdapter.IsAvailable() + + scipStale := false + commitsBehind := 0 + + if scipAvailable { + indexInfo := e.scipAdapter.GetIndexInfo() + if indexInfo != nil && indexInfo.Freshness != nil { + scipStale = indexInfo.Freshness.IsStale() + commitsBehind = indexInfo.Freshness.CommitsBehindHead + } + } + + return GenerateDegradationWarnings(scipAvailable, gitAvailable, scipStale, commitsBehind) +} diff --git a/internal/query/prepare_extract.go b/internal/query/prepare_extract.go new file mode 100644 index 00000000..29c6eba2 --- /dev/null +++ b/internal/query/prepare_extract.go @@ -0,0 +1,100 @@ +package query + +import ( + "os" + "path/filepath" + "strings" +) + +// ExtractParameter describes an input variable to the extracted function. +type ExtractParameter struct { + Name string `json:"name"` + Type string `json:"type,omitempty"` // inferred type if available + Line int `json:"line"` // where it's defined +} + +// ExtractReturn describes a return value from the extracted function. +type ExtractReturn struct { + Name string `json:"name"` + Type string `json:"type,omitempty"` + Line int `json:"line"` // where it's used after the block +} + +// BoundaryAnalysis describes the start/end boundaries of the extraction region. +type BoundaryAnalysis struct { + StartLine int `json:"startLine"` + EndLine int `json:"endLine"` + Lines int `json:"lines"` + Language string `json:"language,omitempty"` +} + +// ExtractDetail provides extract-specific information for prepareChange. +type ExtractDetail struct { + SuggestedSignature string `json:"suggestedSignature"` + Parameters []ExtractParameter `json:"parameters,omitempty"` + Returns []ExtractReturn `json:"returns,omitempty"` + BoundaryAnalysis *BoundaryAnalysis `json:"boundaryAnalysis"` +} + +// getPrepareExtractDetail builds extract-specific detail. +// Phase 1: minimal boundary analysis from file content. +// Phase 2 (future): full variable flow analysis via tree-sitter AST walking. +func (e *Engine) getPrepareExtractDetail(target *PrepareChangeTarget) *ExtractDetail { + if target == nil || target.Path == "" { + return nil + } + + absPath := filepath.Join(e.repoRoot, target.Path) + content, err := os.ReadFile(absPath) + if err != nil { + return nil + } + + lines := strings.Split(string(content), "\n") + totalLines := len(lines) + + // Default boundary: whole file (agents should specify precise boundaries) + startLine := 1 + endLine := totalLines + + lang := inferLanguage(target.Path) + + detail := &ExtractDetail{ + BoundaryAnalysis: &BoundaryAnalysis{ + StartLine: startLine, + EndLine: endLine, + Lines: endLine - startLine + 1, + Language: lang, + }, + } + + // Generate a basic suggested signature + if target.SymbolId != "" { + detail.SuggestedSignature = "func extracted() // TODO: determine parameters and returns" + } + + return detail +} + +// inferLanguage returns the language name from a file path. +func inferLanguage(path string) string { + ext := filepath.Ext(path) + switch ext { + case ".go": + return "go" + case ".ts", ".tsx": + return "typescript" + case ".js", ".jsx": + return "javascript" + case ".py": + return "python" + case ".rs": + return "rust" + case ".java": + return "java" + case ".kt": + return "kotlin" + default: + return "" + } +} diff --git a/internal/query/prepare_rename.go b/internal/query/prepare_rename.go new file mode 100644 index 00000000..04a56a27 --- /dev/null +++ b/internal/query/prepare_rename.go @@ -0,0 +1,158 @@ +package query + +import ( + "context" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/SimplyLiz/CodeMCP/internal/backends" +) + +// RenameCallSite describes a location where the renamed symbol is called. +type RenameCallSite struct { + File string `json:"file"` + Line int `json:"line"` + Column int `json:"column"` + ContextSnippet string `json:"contextSnippet"` // surrounding code, capped at 120 chars + Kind string `json:"kind"` // "call", "type-ref", "import" +} + +// RenameTypeRef describes a type reference to the renamed symbol. +type RenameTypeRef struct { + File string `json:"file"` + Line int `json:"line"` + ContextSnippet string `json:"contextSnippet"` +} + +// RenameImport describes an import path that references the renamed symbol. +type RenameImport struct { + File string `json:"file"` + Line int `json:"line"` + ImportPath string `json:"importPath"` +} + +// RenameDetail provides rename-specific information for prepareChange. +type RenameDetail struct { + CallSites []RenameCallSite `json:"callSites"` + TypeReferences []RenameTypeRef `json:"typeReferences,omitempty"` + ImportPaths []RenameImport `json:"importPaths,omitempty"` + TotalSites int `json:"totalSites"` +} + +// getPrepareRenameDetail builds rename-specific detail by finding all references +// and classifying them as call sites, type references, or imports. +func (e *Engine) getPrepareRenameDetail(ctx context.Context, symbolId string) *RenameDetail { + if symbolId == "" || e.scipAdapter == nil || !e.scipAdapter.IsAvailable() { + return nil + } + + refsResult, err := e.scipAdapter.FindReferences(ctx, symbolId, backends.RefOptions{ + MaxResults: 200, + IncludeTests: true, + }) + if err != nil || refsResult == nil || len(refsResult.References) == 0 { + return nil + } + + detail := &RenameDetail{ + TotalSites: refsResult.TotalReferences, + } + + for _, ref := range refsResult.References { + snippet := e.getContextSnippet(ref.Location.Path, ref.Location.Line, 120) + kind := classifyReference(ref, snippet) + + switch kind { + case "import": + detail.ImportPaths = append(detail.ImportPaths, RenameImport{ + File: ref.Location.Path, + Line: ref.Location.Line, + ImportPath: extractImportPath(snippet), + }) + case "type-ref": + detail.TypeReferences = append(detail.TypeReferences, RenameTypeRef{ + File: ref.Location.Path, + Line: ref.Location.Line, + ContextSnippet: snippet, + }) + default: + detail.CallSites = append(detail.CallSites, RenameCallSite{ + File: ref.Location.Path, + Line: ref.Location.Line, + Column: ref.Location.Column, + ContextSnippet: snippet, + Kind: kind, + }) + } + } + + return detail +} + +// getContextSnippet reads a single line from a file for context, capped at maxLen chars. +func (e *Engine) getContextSnippet(relPath string, line int, maxLen int) string { + absPath := filepath.Join(e.repoRoot, relPath) + content, err := os.ReadFile(absPath) + if err != nil { + return "" + } + + lines := strings.Split(string(content), "\n") + if line < 1 || line > len(lines) { + return "" + } + + snippet := strings.TrimSpace(lines[line-1]) + if len(snippet) > maxLen { + snippet = snippet[:maxLen] + } + return snippet +} + +// classifyReference determines if a reference is a call, type-ref, or import. +func classifyReference(ref backends.Reference, snippet string) string { + snippetLower := strings.ToLower(snippet) + + // Check for import patterns + if strings.Contains(snippetLower, "import ") || strings.Contains(snippetLower, "require(") { + return "import" + } + + // Use SCIP reference kind if available + if ref.Kind == "definition" || ref.Kind == "def" { + return "definition" + } + + // Check for type usage patterns + if strings.Contains(snippet, ": ") && !strings.Contains(snippet, "(") { + return "type-ref" + } + if strings.HasPrefix(strings.TrimSpace(snippet), "var ") || strings.HasPrefix(strings.TrimSpace(snippet), "type ") { + return "type-ref" + } + + return "call" +} + +// extractImportPath extracts the import path from an import statement snippet. +func extractImportPath(snippet string) string { + // Handle Go-style: "github.com/foo/bar" + if idx := strings.Index(snippet, `"`); idx >= 0 { + end := strings.Index(snippet[idx+1:], `"`) + if end >= 0 { + return snippet[idx+1 : idx+1+end] + } + } + return snippet +} + +// FormatRenamePreview generates a human-readable summary of rename impact. +func FormatRenamePreview(detail *RenameDetail) string { + if detail == nil { + return "No rename detail available" + } + return fmt.Sprintf("%d call sites, %d type references, %d import paths (%d total)", + len(detail.CallSites), len(detail.TypeReferences), len(detail.ImportPaths), detail.TotalSites) +} diff --git a/internal/query/testgap.go b/internal/query/testgap.go new file mode 100644 index 00000000..d2dd2a7a --- /dev/null +++ b/internal/query/testgap.go @@ -0,0 +1,25 @@ +package query + +import ( + "context" + + "github.com/SimplyLiz/CodeMCP/internal/testgap" +) + +// AnalyzeTestGapsOptions wraps testgap.AnalyzeOptions for the query engine. +type AnalyzeTestGapsOptions struct { + Target string + MinLines int + Limit int +} + +// AnalyzeTestGaps runs test gap analysis and wraps the result with provenance. +func (e *Engine) AnalyzeTestGaps(ctx context.Context, opts AnalyzeTestGapsOptions) (*testgap.TestGapResult, error) { + analyzer := testgap.NewAnalyzer(e.repoRoot, e.logger, e.scipAdapter) + + return analyzer.Analyze(ctx, testgap.AnalyzeOptions{ + Target: opts.Target, + MinLines: opts.MinLines, + Limit: opts.Limit, + }) +} diff --git a/internal/testgap/analyzer.go b/internal/testgap/analyzer.go new file mode 100644 index 00000000..0edf3485 --- /dev/null +++ b/internal/testgap/analyzer.go @@ -0,0 +1,315 @@ +// Package testgap identifies functions that lack test coverage by cross-referencing +// complexity analysis with test file detection via SCIP references or heuristics. +package testgap + +import ( + "context" + "log/slog" + "os" + "path/filepath" + "sort" + "strings" + + "github.com/SimplyLiz/CodeMCP/internal/backends" + "github.com/SimplyLiz/CodeMCP/internal/backends/scip" + "github.com/SimplyLiz/CodeMCP/internal/complexity" +) + +// TestGap describes a function that appears untested. +type TestGap struct { + File string `json:"file"` + Function string `json:"function"` + StartLine int `json:"startLine"` + EndLine int `json:"endLine"` + Complexity int `json:"complexity"` + Reason string `json:"reason"` // "no-test-reference", "no-test-file", "no-name-match" +} + +// GapSummary provides aggregate test gap statistics. +type GapSummary struct { + TotalFunctions int `json:"totalFunctions"` + TestedFunctions int `json:"testedFunctions"` + CoveragePercent float64 `json:"coveragePercent"` +} + +// TestGapResult holds the analysis output. +type TestGapResult struct { + Gaps []TestGap `json:"gaps"` + Summary GapSummary `json:"summary"` + Method string `json:"method"` // "scip" or "heuristic" +} + +// AnalyzeOptions configures test gap analysis. +type AnalyzeOptions struct { + Target string // file or directory path (relative to repo root) + MinLines int // minimum function lines to include (default 3) + Limit int // max gaps to return (default 50) +} + +// Analyzer detects untested functions. +type Analyzer struct { + repoRoot string + logger *slog.Logger + scipAdapter *scip.SCIPAdapter + complexityAnalyzer *complexity.Analyzer +} + +// NewAnalyzer creates a test gap analyzer. +func NewAnalyzer(repoRoot string, logger *slog.Logger, scipAdapter *scip.SCIPAdapter) *Analyzer { + var ca *complexity.Analyzer + if complexity.IsAvailable() { + ca = complexity.NewAnalyzer() + } + return &Analyzer{ + repoRoot: repoRoot, + logger: logger, + scipAdapter: scipAdapter, + complexityAnalyzer: ca, + } +} + +// Analyze runs test gap analysis on the target file(s). +func (a *Analyzer) Analyze(ctx context.Context, opts AnalyzeOptions) (*TestGapResult, error) { + if opts.MinLines <= 0 { + opts.MinLines = 3 + } + if opts.Limit <= 0 { + opts.Limit = 50 + } + + // Collect files to analyze + files, err := a.collectFiles(opts.Target) + if err != nil { + return nil, err + } + + useSCIP := a.scipAdapter != nil && a.scipAdapter.IsAvailable() + method := "heuristic" + if useSCIP { + method = "scip" + } + + var allGaps []TestGap + totalFunctions := 0 + testedFunctions := 0 + + for _, file := range files { + functions, err := a.extractFunctions(ctx, file) + if err != nil { + a.logger.Debug("Failed to extract functions", "file", file, "error", err.Error()) + continue + } + + for _, fn := range functions { + if fn.Lines < opts.MinLines { + continue + } + totalFunctions++ + + tested := false + reason := "" + + if useSCIP { + tested, reason = a.checkTestedViaSCIP(ctx, file, fn) + } else { + tested, reason = a.checkTestedViaHeuristic(file, fn) + } + + if tested { + testedFunctions++ + } else { + allGaps = append(allGaps, TestGap{ + File: file, + Function: fn.Name, + StartLine: fn.StartLine, + EndLine: fn.EndLine, + Complexity: fn.Cyclomatic, + Reason: reason, + }) + } + } + } + + // Sort by complexity descending + sort.Slice(allGaps, func(i, j int) bool { + return allGaps[i].Complexity > allGaps[j].Complexity + }) + + // Apply limit + if len(allGaps) > opts.Limit { + allGaps = allGaps[:opts.Limit] + } + + coveragePct := 0.0 + if totalFunctions > 0 { + coveragePct = float64(testedFunctions) / float64(totalFunctions) * 100 + } + + return &TestGapResult{ + Gaps: allGaps, + Summary: GapSummary{ + TotalFunctions: totalFunctions, + TestedFunctions: testedFunctions, + CoveragePercent: coveragePct, + }, + Method: method, + }, nil +} + +// collectFiles returns relative file paths for the target. +func (a *Analyzer) collectFiles(target string) ([]string, error) { + absPath := target + if !filepath.IsAbs(target) { + absPath = filepath.Join(a.repoRoot, target) + } + + info, err := os.Stat(absPath) + if err != nil { + return nil, err + } + + if !info.IsDir() { + rel, _ := filepath.Rel(a.repoRoot, absPath) + return []string{rel}, nil + } + + var files []string + err = filepath.Walk(absPath, func(path string, fi os.FileInfo, walkErr error) error { + if walkErr != nil { + return nil //nolint:nilerr + } + if fi.IsDir() { + name := fi.Name() + if name == "node_modules" || name == "vendor" || name == ".git" || name == "__pycache__" { + return filepath.SkipDir + } + return nil + } + if isAnalyzableSource(filepath.Ext(path)) && !isTestFile(path) { + rel, _ := filepath.Rel(a.repoRoot, path) + files = append(files, rel) + } + return nil + }) + return files, err +} + +// extractFunctions uses the complexity analyzer to get per-function info. +func (a *Analyzer) extractFunctions(ctx context.Context, relPath string) ([]complexity.ComplexityResult, error) { + if a.complexityAnalyzer == nil { + return nil, nil + } + absPath := filepath.Join(a.repoRoot, relPath) + fc, err := a.complexityAnalyzer.AnalyzeFile(ctx, absPath) + if err != nil { + return nil, err + } + if fc == nil || fc.Error != "" { + return nil, nil + } + return fc.Functions, nil +} + +// checkTestedViaSCIP checks if a function has references from test files using SCIP. +func (a *Analyzer) checkTestedViaSCIP(ctx context.Context, file string, fn complexity.ComplexityResult) (bool, string) { + if fn.Name == "" || fn.Name == "" { + return false, "no-test-reference" + } + + // Search for the function symbol within the file scope + searchResult, err := a.scipAdapter.SearchSymbols(ctx, fn.Name, backends.SearchOptions{ + Scope: []string{file}, + MaxResults: 5, + }) + if err != nil || searchResult == nil || len(searchResult.Symbols) == 0 { + // Fall back to heuristic if SCIP can't find the symbol + return a.checkTestedViaHeuristic(file, fn) + } + + // Check references for each matching symbol + for _, sym := range searchResult.Symbols { + refsResult, err := a.scipAdapter.FindReferences(ctx, sym.StableID, backends.RefOptions{ + IncludeTests: true, + MaxResults: 100, + }) + if err != nil || refsResult == nil { + continue + } + for _, ref := range refsResult.References { + if isTestFile(ref.Location.Path) { + return true, "" + } + } + } + + return false, "no-test-reference" +} + +// checkTestedViaHeuristic checks if a function name appears in test files. +func (a *Analyzer) checkTestedViaHeuristic(file string, fn complexity.ComplexityResult) (bool, string) { + if fn.Name == "" || fn.Name == "" { + return false, "no-name-match" + } + + // Find corresponding test files + testFiles := a.findTestFiles(file) + if len(testFiles) == 0 { + return false, "no-test-file" + } + + // Check if function name appears in any test file + for _, tf := range testFiles { + absPath := filepath.Join(a.repoRoot, tf) + content, err := os.ReadFile(absPath) + if err != nil { + continue + } + text := string(content) + + // Check for function name reference in test code + if strings.Contains(text, fn.Name) { + return true, "" + } + } + + return false, "no-name-match" +} + +// findTestFiles locates test files for a given source file. +func (a *Analyzer) findTestFiles(file string) []string { + ext := filepath.Ext(file) + base := strings.TrimSuffix(file, ext) + + candidates := []string{ + base + "_test" + ext, + base + ".test" + ext, + base + ".spec" + ext, + } + + var found []string + for _, c := range candidates { + absPath := filepath.Join(a.repoRoot, c) + if _, err := os.Stat(absPath); err == nil { + found = append(found, c) + } + } + return found +} + +func isTestFile(path string) bool { + return strings.Contains(path, "_test.go") || + strings.Contains(path, ".test.ts") || + strings.Contains(path, ".test.js") || + strings.Contains(path, ".spec.ts") || + strings.Contains(path, ".spec.js") || + strings.Contains(path, "test_") || + strings.HasSuffix(path, "_test.py") +} + +func isAnalyzableSource(ext string) bool { + switch ext { + case ".go", ".ts", ".tsx", ".js", ".jsx", ".py", ".java", ".kt", ".rs": + return true + } + return false +}