From a3c3216c022d0803a7bd29b1ae99746ed1738fcb Mon Sep 17 00:00:00 2001 From: cabana8471 Date: Wed, 21 Jan 2026 10:12:13 +0100 Subject: [PATCH 01/81] docs(fork): add FORK_README.md and FORK_CHANGELOG.md Add documentation files for the fork: - FORK_README.md: Overview of fork features, configuration guide, how to stay updated with upstream, rollback instructions - FORK_CHANGELOG.md: Detailed changelog for all fork modifications These files document the differences from upstream and help users understand and manage the forked features. Co-Authored-By: Claude Opus 4.5 --- FORK_CHANGELOG.md | 129 ++++++++++++++++++++++++++++++++++++++++++++ FORK_README.md | 135 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 264 insertions(+) create mode 100644 FORK_CHANGELOG.md create mode 100644 FORK_README.md diff --git a/FORK_CHANGELOG.md b/FORK_CHANGELOG.md new file mode 100644 index 00000000..9392ffdc --- /dev/null +++ b/FORK_CHANGELOG.md @@ -0,0 +1,129 @@ +# Fork Changelog + +All notable changes to this fork are documented in this file. +Format based on [Keep a Changelog](https://keepachangelog.com/). + +## [Unreleased] + +### Added +- Fork documentation (FORK_README.md, FORK_CHANGELOG.md) +- Configuration system via `.autocoder/config.json` + +## [2025-01-21] Quality Gates + +### Added +- New module: `quality_gates.py` - Quality checking logic (lint, type-check, custom scripts) +- New MCP tool: `feature_verify_quality` - Run quality checks on demand +- Auto-detection of linters: ESLint, Biome, ruff, flake8 +- Auto-detection of type checkers: TypeScript (tsc), Python (mypy) +- Support for custom quality scripts via `.autocoder/quality-checks.sh` + +### Changed +- Modified `feature_mark_passing` - Now enforces quality checks in strict mode +- In strict mode, `feature_mark_passing` BLOCKS if lint or type-check fails +- Quality results are stored in the `quality_result` DB column + +### Configuration +- `quality_gates.enabled`: Enable/disable quality gates (default: true) +- `quality_gates.strict_mode`: Block feature_mark_passing on failure (default: true) +- `quality_gates.checks.lint`: Run lint check (default: true) +- `quality_gates.checks.type_check`: Run type check (default: true) +- `quality_gates.checks.custom_script`: Path to custom script (optional) + +### How to Disable +```json +{"quality_gates": {"enabled": false}} +``` +Or for non-blocking mode: +```json +{"quality_gates": {"strict_mode": false}} +``` + +### Related Issues +- Addresses #68 (Agent skips features without testing) +- Addresses #69 (Test evidence storage) + +--- + +## [2025-01-21] Error Recovery + +### Added +- New DB columns: `failure_reason`, `failure_count`, `last_failure_at`, `quality_result` in Feature model +- New MCP tool: `feature_report_failure` - Report failures with escalation recommendations +- New MCP tool: `feature_get_stuck` - Get all features that have failed at least once +- New MCP tool: `feature_clear_all_in_progress` - Clear all stuck features at once +- New MCP tool: `feature_reset_failure` - Reset failure tracking for a feature +- New helper: `clear_stuck_features()` in `progress.py` - Auto-clear on agent startup +- Auto-recovery on agent startup: Clears stuck features from interrupted sessions + +### Changed +- Modified `api/database.py` - Added error recovery and quality result columns with auto-migration +- Modified `agent.py` - Calls `clear_stuck_features()` on startup +- Modified `mcp_server/feature_mcp.py` - Added error recovery MCP tools + +### Configuration +- New config section: `error_recovery` with `max_retries`, `skip_threshold`, `escalate_threshold`, `auto_clear_on_startup` + +### How to Disable +```json +{"error_recovery": {"auto_clear_on_startup": false}} +``` + +### Related Issues +- Fixes features stuck after stop (common issue when agents are interrupted) + +--- + +## Entry Template + +When adding a new feature, use this template: + +```markdown +## [YYYY-MM-DD] Feature Name + +### Added +- New file: `path/to/file.py` - Description +- New component: `ComponentName` - Description + +### Changed +- Modified `file.py` - What changed and why + +### Configuration +- New config option: `config.key` - What it does + +### How to Disable +\`\`\`json +{"feature_name": {"enabled": false}} +\`\`\` + +### Related Issues +- Closes #XX (upstream issue) +``` + +--- + +## Planned Features + +The following features are planned for implementation: + +### Phase 1: Foundation (Quick Wins) +- [ ] Enhanced Logging - Structured logs with filtering +- [ ] Quality Gates - Lint/type-check before marking passing +- [ ] Security Scanning - Detect vulnerabilities + +### Phase 2: Import Projects +- [ ] Stack Detector - Detect React, Next.js, Express, FastAPI, Django, Vue.js +- [ ] Feature Extractor - Reverse-engineer features from routes/endpoints +- [ ] Import Wizard UI - Chat-based project import + +### Phase 3: Workflow Improvements +- [ ] Feature Branches - Git workflow with feature branches +- [ ] Error Recovery - Handle stuck features, auto-clear on startup +- [ ] Review Agent - Automatic code review +- [ ] CI/CD Integration - GitHub Actions generation + +### Phase 4: Polish & Ecosystem +- [ ] Template Library - SaaS, e-commerce, dashboard templates +- [ ] Auto Documentation - README, API docs generation +- [ ] Design Tokens - Consistent styling +- [ ] Visual Regression - Screenshot comparison testing diff --git a/FORK_README.md b/FORK_README.md new file mode 100644 index 00000000..73974ff1 --- /dev/null +++ b/FORK_README.md @@ -0,0 +1,135 @@ +# Autocoder Fork - Enhanced Features + +This is a fork of [leonvanzyl/autocoder](https://github.com/leonvanzyl/autocoder) +with additional features for improved developer experience. + +## What's Different in This Fork + +### New Features + +- **Import Existing Projects** - Import existing codebases and continue development with Autocoder +- **Quality Gates** - Automatic code quality checks (lint, type-check) before marking features as passing +- **Enhanced Logging** - Better debugging with filterable, searchable, structured logs +- **Security Scanning** - Detect vulnerabilities in generated code (secrets, injection patterns) +- **Feature Branches** - Professional git workflow with automatic feature branch creation +- **Error Recovery** - Better handling of stuck features with auto-clear on startup +- **Template Library** - Pre-made templates for common app types (SaaS, e-commerce, dashboard) +- **CI/CD Integration** - GitHub Actions workflows generated automatically + +### Configuration + +All new features can be configured via `.autocoder/config.json`. +See [Configuration Guide](#configuration) for details. + +## Configuration + +Create a `.autocoder/config.json` file in your project directory: + +```json +{ + "version": "1.0", + + "quality_gates": { + "enabled": true, + "strict_mode": true, + "checks": { + "lint": true, + "type_check": true, + "unit_tests": false, + "custom_script": ".autocoder/quality-checks.sh" + } + }, + + "git_workflow": { + "mode": "feature_branches", + "branch_prefix": "feature/", + "auto_merge": false + }, + + "error_recovery": { + "max_retries": 3, + "skip_threshold": 5, + "escalate_threshold": 7 + }, + + "completion": { + "auto_stop_at_100": true, + "max_regression_cycles": 3 + }, + + "ci_cd": { + "provider": "github", + "environments": { + "staging": {"url": "", "auto_deploy": true}, + "production": {"url": "", "auto_deploy": false} + } + }, + + "import": { + "default_feature_status": "pending", + "auto_detect_stack": true + } +} +``` + +### Disabling Features + +Each feature can be disabled individually: + +```json +{ + "quality_gates": { + "enabled": false + }, + "git_workflow": { + "mode": "none" + } +} +``` + +## Staying Updated with Upstream + +This fork regularly syncs with upstream. To get latest upstream changes: + +```bash +git fetch upstream +git checkout master && git merge upstream/master +git checkout my-features && git merge master +``` + +## Reverting Changes + +### Revert to Original + +```bash +# Option 1: Full reset to upstream +git checkout my-features +git reset --hard upstream/master +git push origin my-features --force + +# Option 2: Revert specific commits +git log --oneline # find commit to revert +git revert + +# Option 3: Checkout specific files from upstream +git checkout upstream/master -- path/to/file.py +``` + +### Safety Checkpoint + +Before major changes, create a tag: + +```bash +git tag before-feature-name +# If something goes wrong: +git reset --hard before-feature-name +``` + +## Contributing Back + +Features that could benefit the original project are submitted as PRs to upstream. +See [FORK_CHANGELOG.md](./FORK_CHANGELOG.md) for detailed change history. + +## License + +Same license as the original [leonvanzyl/autocoder](https://github.com/leonvanzyl/autocoder) project. From 8f4165a53f96f148fc453eb04d5f6a949981432e Mon Sep 17 00:00:00 2001 From: cabana8471 Date: Wed, 21 Jan 2026 10:12:23 +0100 Subject: [PATCH 02/81] feat(config): add enhanced configuration system Add autocoder_config.py with full configuration schema for all planned features: - QualityGatesConfig: lint, type-check, custom scripts - GitWorkflowConfig: feature branches, trunk, none modes - ErrorRecoveryConfig: max retries, skip/escalate thresholds - CompletionConfig: auto-stop at 100%, regression cycles - CiCdConfig: provider and environments - SecurityScanningConfig: dependencies, secrets, injection patterns - LoggingConfig: level, structured output, timestamps Provides deep merge with defaults and convenience getters for each config section. Extends existing project_config.py pattern. Co-Authored-By: Claude Opus 4.5 --- server/services/autocoder_config.py | 376 ++++++++++++++++++++++++++++ 1 file changed, 376 insertions(+) create mode 100644 server/services/autocoder_config.py diff --git a/server/services/autocoder_config.py b/server/services/autocoder_config.py new file mode 100644 index 00000000..83313c0e --- /dev/null +++ b/server/services/autocoder_config.py @@ -0,0 +1,376 @@ +""" +Autocoder Enhanced Configuration +================================ + +Centralized configuration system for all Autocoder features. +Extends the basic project_config.py with support for: +- Quality Gates +- Git Workflow +- Error Recovery +- CI/CD Integration +- Import Settings +- Completion Settings + +Configuration is stored in {project_dir}/.autocoder/config.json. +""" + +import json +import logging +from pathlib import Path +from typing import Any, TypedDict + +logger = logging.getLogger(__name__) + + +# ============================================================================= +# Type Definitions for Configuration Schema +# ============================================================================= + + +class QualityChecksConfig(TypedDict, total=False): + """Configuration for individual quality checks.""" + lint: bool + type_check: bool + unit_tests: bool + custom_script: str | None + + +class QualityGatesConfig(TypedDict, total=False): + """Configuration for quality gates feature.""" + enabled: bool + strict_mode: bool + checks: QualityChecksConfig + + +class GitWorkflowConfig(TypedDict, total=False): + """Configuration for git workflow feature.""" + mode: str # "feature_branches" | "trunk" | "none" + branch_prefix: str + auto_merge: bool + + +class ErrorRecoveryConfig(TypedDict, total=False): + """Configuration for error recovery feature.""" + max_retries: int + skip_threshold: int + escalate_threshold: int + auto_clear_on_startup: bool + + +class CompletionConfig(TypedDict, total=False): + """Configuration for completion behavior.""" + auto_stop_at_100: bool + max_regression_cycles: int + prompt_before_extra_cycles: bool + + +class EnvironmentConfig(TypedDict, total=False): + """Configuration for a deployment environment.""" + url: str + auto_deploy: bool + + +class CiCdConfig(TypedDict, total=False): + """Configuration for CI/CD integration.""" + provider: str # "github" | "gitlab" | "none" + environments: dict[str, EnvironmentConfig] + + +class ImportConfig(TypedDict, total=False): + """Configuration for project import feature.""" + default_feature_status: str # "pending" | "passing" + auto_detect_stack: bool + + +class SecurityScanningConfig(TypedDict, total=False): + """Configuration for security scanning feature.""" + enabled: bool + scan_dependencies: bool + scan_secrets: bool + scan_injection_patterns: bool + fail_on_high_severity: bool + + +class LoggingConfig(TypedDict, total=False): + """Configuration for enhanced logging feature.""" + enabled: bool + level: str # "debug" | "info" | "warn" | "error" + structured_output: bool + include_timestamps: bool + max_log_file_size_mb: int + + +class AutocoderConfig(TypedDict, total=False): + """Full Autocoder configuration schema.""" + version: str + dev_command: str | None + quality_gates: QualityGatesConfig + git_workflow: GitWorkflowConfig + error_recovery: ErrorRecoveryConfig + completion: CompletionConfig + ci_cd: CiCdConfig + import_settings: ImportConfig + security_scanning: SecurityScanningConfig + logging: LoggingConfig + + +# ============================================================================= +# Default Configuration Values +# ============================================================================= + + +DEFAULT_CONFIG: AutocoderConfig = { + "version": "1.0", + "dev_command": None, + "quality_gates": { + "enabled": True, + "strict_mode": True, + "checks": { + "lint": True, + "type_check": True, + "unit_tests": False, + "custom_script": None, + }, + }, + "git_workflow": { + "mode": "none", + "branch_prefix": "feature/", + "auto_merge": False, + }, + "error_recovery": { + "max_retries": 3, + "skip_threshold": 5, + "escalate_threshold": 7, + "auto_clear_on_startup": True, + }, + "completion": { + "auto_stop_at_100": True, + "max_regression_cycles": 3, + "prompt_before_extra_cycles": False, + }, + "ci_cd": { + "provider": "none", + "environments": {}, + }, + "import_settings": { + "default_feature_status": "pending", + "auto_detect_stack": True, + }, + "security_scanning": { + "enabled": True, + "scan_dependencies": True, + "scan_secrets": True, + "scan_injection_patterns": True, + "fail_on_high_severity": False, + }, + "logging": { + "enabled": True, + "level": "info", + "structured_output": True, + "include_timestamps": True, + "max_log_file_size_mb": 10, + }, +} + + +# ============================================================================= +# Configuration Loading and Saving +# ============================================================================= + + +def _get_config_path(project_dir: Path) -> Path: + """Get the path to the project config file.""" + return project_dir / ".autocoder" / "config.json" + + +def _deep_merge(base: dict, override: dict) -> dict: + """ + Deep merge two dictionaries. + + Values from override take precedence over base. + Nested dicts are merged recursively. + + Args: + base: Base dictionary with default values + override: Dictionary with override values + + Returns: + Merged dictionary + """ + result = base.copy() + + for key, value in override.items(): + if key in result and isinstance(result[key], dict) and isinstance(value, dict): + result[key] = _deep_merge(result[key], value) + else: + result[key] = value + + return result + + +def load_autocoder_config(project_dir: Path) -> AutocoderConfig: + """ + Load the full Autocoder configuration with defaults. + + Reads from .autocoder/config.json and merges with defaults. + If the config file doesn't exist or is invalid, returns defaults. + + Args: + project_dir: Path to the project directory + + Returns: + Full configuration with all sections populated + """ + config_path = _get_config_path(project_dir) + + if not config_path.exists(): + logger.debug("No config file found at %s, using defaults", config_path) + return DEFAULT_CONFIG.copy() + + try: + with open(config_path, "r", encoding="utf-8") as f: + user_config = json.load(f) + + if not isinstance(user_config, dict): + logger.warning( + "Invalid config format in %s: expected dict, got %s", + config_path, type(user_config).__name__ + ) + return DEFAULT_CONFIG.copy() + + # Merge user config with defaults + merged = _deep_merge(DEFAULT_CONFIG, user_config) + return merged + + except json.JSONDecodeError as e: + logger.warning("Failed to parse config at %s: %s", config_path, e) + return DEFAULT_CONFIG.copy() + except OSError as e: + logger.warning("Failed to read config at %s: %s", config_path, e) + return DEFAULT_CONFIG.copy() + + +def save_autocoder_config(project_dir: Path, config: AutocoderConfig) -> None: + """ + Save the Autocoder configuration to disk. + + Creates the .autocoder directory if it doesn't exist. + + Args: + project_dir: Path to the project directory + config: Configuration to save + + Raises: + OSError: If the file cannot be written + """ + config_path = _get_config_path(project_dir) + config_path.parent.mkdir(parents=True, exist_ok=True) + + try: + with open(config_path, "w", encoding="utf-8") as f: + json.dump(config, f, indent=2) + logger.debug("Saved config to %s", config_path) + except OSError as e: + logger.error("Failed to save config to %s: %s", config_path, e) + raise + + +def update_autocoder_config(project_dir: Path, updates: dict[str, Any]) -> AutocoderConfig: + """ + Update specific configuration values. + + Loads current config, applies updates, and saves. + + Args: + project_dir: Path to the project directory + updates: Dictionary with values to update (can be nested) + + Returns: + Updated configuration + """ + config = load_autocoder_config(project_dir) + merged = _deep_merge(config, updates) + save_autocoder_config(project_dir, merged) + return merged + + +# ============================================================================= +# Convenience Getters for Specific Sections +# ============================================================================= + + +def get_quality_gates_config(project_dir: Path) -> QualityGatesConfig: + """Get quality gates configuration for a project.""" + config = load_autocoder_config(project_dir) + return config.get("quality_gates", DEFAULT_CONFIG["quality_gates"]) + + +def get_git_workflow_config(project_dir: Path) -> GitWorkflowConfig: + """Get git workflow configuration for a project.""" + config = load_autocoder_config(project_dir) + return config.get("git_workflow", DEFAULT_CONFIG["git_workflow"]) + + +def get_error_recovery_config(project_dir: Path) -> ErrorRecoveryConfig: + """Get error recovery configuration for a project.""" + config = load_autocoder_config(project_dir) + return config.get("error_recovery", DEFAULT_CONFIG["error_recovery"]) + + +def get_completion_config(project_dir: Path) -> CompletionConfig: + """Get completion configuration for a project.""" + config = load_autocoder_config(project_dir) + return config.get("completion", DEFAULT_CONFIG["completion"]) + + +def get_security_scanning_config(project_dir: Path) -> SecurityScanningConfig: + """Get security scanning configuration for a project.""" + config = load_autocoder_config(project_dir) + return config.get("security_scanning", DEFAULT_CONFIG["security_scanning"]) + + +def get_logging_config(project_dir: Path) -> LoggingConfig: + """Get logging configuration for a project.""" + config = load_autocoder_config(project_dir) + return config.get("logging", DEFAULT_CONFIG["logging"]) + + +# ============================================================================= +# Feature Enable/Disable Checks +# ============================================================================= + + +def is_quality_gates_enabled(project_dir: Path) -> bool: + """Check if quality gates are enabled for a project.""" + config = get_quality_gates_config(project_dir) + return config.get("enabled", True) + + +def is_strict_quality_mode(project_dir: Path) -> bool: + """Check if strict quality mode is enabled (blocks feature_mark_passing on failure).""" + config = get_quality_gates_config(project_dir) + return config.get("enabled", True) and config.get("strict_mode", True) + + +def is_security_scanning_enabled(project_dir: Path) -> bool: + """Check if security scanning is enabled for a project.""" + config = get_security_scanning_config(project_dir) + return config.get("enabled", True) + + +def is_auto_clear_on_startup_enabled(project_dir: Path) -> bool: + """Check if auto-clear stuck features on startup is enabled.""" + config = get_error_recovery_config(project_dir) + return config.get("auto_clear_on_startup", True) + + +def is_auto_stop_at_100_enabled(project_dir: Path) -> bool: + """Check if agent should auto-stop when all features pass.""" + config = get_completion_config(project_dir) + return config.get("auto_stop_at_100", True) + + +def get_git_workflow_mode(project_dir: Path) -> str: + """Get the git workflow mode for a project.""" + config = get_git_workflow_config(project_dir) + return config.get("mode", "none") From 4c995ddc5def43a31846e6d0de14a0348de58c99 Mon Sep 17 00:00:00 2001 From: cabana8471 Date: Wed, 21 Jan 2026 10:12:32 +0100 Subject: [PATCH 03/81] feat(error-recovery): add failure tracking and auto-clear stuck features Add error recovery system to handle stuck and failing features: Database changes (api/database.py): - Add failure_reason, failure_count, last_failure_at columns - Add quality_result column for quality gate results - Add auto-migration for new columns Progress tracking (progress.py): - Add clear_stuck_features() to clear orphaned in_progress flags Agent startup (agent.py): - Call clear_stuck_features() on agent startup - Prevents features from being stuck after interrupted sessions This addresses the common issue where features remain stuck with in_progress=True when agents are stopped mid-work. Co-Authored-By: Claude Opus 4.5 --- agent.py | 12 ++++++++++- api/database.py | 55 +++++++++++++++++++++++++++++++++++++++++++++++++ progress.py | 42 +++++++++++++++++++++++++++++++++++++ 3 files changed, 108 insertions(+), 1 deletion(-) diff --git a/agent.py b/agent.py index 09c1a043..365566db 100644 --- a/agent.py +++ b/agent.py @@ -23,7 +23,13 @@ sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding="utf-8", errors="replace", line_buffering=True) from client import create_client -from progress import count_passing_tests, has_features, print_progress_summary, print_session_header +from progress import ( + clear_stuck_features, + count_passing_tests, + has_features, + print_progress_summary, + print_session_header, +) from prompts import ( copy_spec_to_project, get_coding_prompt, @@ -149,6 +155,10 @@ async def run_autonomous_agent( # Create project directory project_dir.mkdir(parents=True, exist_ok=True) + # Auto-recovery: Clear stuck features from previous interrupted sessions + # This prevents features from being orphaned when agents are stopped mid-work + clear_stuck_features(project_dir) + # Determine agent type if not explicitly set if agent_type is None: # Auto-detect based on whether we have features diff --git a/api/database.py b/api/database.py index cb8e7aa9..68864928 100644 --- a/api/database.py +++ b/api/database.py @@ -34,6 +34,14 @@ class Feature(Base): # NULL/empty = no dependencies (backwards compatible) dependencies = Column(JSON, nullable=True, default=None) + # Error Recovery: Track failures for retry logic and escalation + failure_reason = Column(Text, nullable=True) # Last failure reason + failure_count = Column(Integer, nullable=False, default=0) # Number of failed attempts + last_failure_at = Column(Text, nullable=True) # ISO timestamp of last failure + + # Quality Gates: Store quality check results + quality_result = Column(JSON, nullable=True) # {"passed": bool, "checks": {...}, "timestamp": "..."} + def to_dict(self) -> dict: """Convert feature to dictionary for JSON serialization.""" return { @@ -48,6 +56,12 @@ def to_dict(self) -> dict: "in_progress": self.in_progress if self.in_progress is not None else False, # Dependencies: NULL/empty treated as empty list for backwards compat "dependencies": self.dependencies if self.dependencies else [], + # Error Recovery fields + "failure_reason": self.failure_reason, + "failure_count": self.failure_count if self.failure_count is not None else 0, + "last_failure_at": self.last_failure_at, + # Quality Gates field + "quality_result": self.quality_result, } def get_dependencies_safe(self) -> list[int]: @@ -113,6 +127,45 @@ def _migrate_add_dependencies_column(engine) -> None: conn.commit() +def _migrate_add_error_recovery_columns(engine) -> None: + """Add error recovery columns to existing databases. + + Columns added: + - failure_reason: TEXT - last failure reason + - failure_count: INTEGER - number of failed attempts + - last_failure_at: TEXT - ISO timestamp of last failure + """ + with engine.connect() as conn: + result = conn.execute(text("PRAGMA table_info(features)")) + columns = [row[1] for row in result.fetchall()] + + if "failure_reason" not in columns: + conn.execute(text("ALTER TABLE features ADD COLUMN failure_reason TEXT DEFAULT NULL")) + + if "failure_count" not in columns: + conn.execute(text("ALTER TABLE features ADD COLUMN failure_count INTEGER DEFAULT 0")) + + if "last_failure_at" not in columns: + conn.execute(text("ALTER TABLE features ADD COLUMN last_failure_at TEXT DEFAULT NULL")) + + conn.commit() + + +def _migrate_add_quality_result_column(engine) -> None: + """Add quality_result column to existing databases. + + Stores JSON with quality check results: + {"passed": bool, "checks": {...}, "timestamp": "..."} + """ + with engine.connect() as conn: + result = conn.execute(text("PRAGMA table_info(features)")) + columns = [row[1] for row in result.fetchall()] + + if "quality_result" not in columns: + conn.execute(text("ALTER TABLE features ADD COLUMN quality_result TEXT DEFAULT NULL")) + conn.commit() + + def _is_network_path(path: Path) -> bool: """Detect if path is on a network filesystem. @@ -195,6 +248,8 @@ def create_database(project_dir: Path) -> tuple: _migrate_add_in_progress_column(engine) _migrate_fix_null_boolean_fields(engine) _migrate_add_dependencies_column(engine) + _migrate_add_error_recovery_columns(engine) + _migrate_add_quality_result_column(engine) SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine) return engine, SessionLocal diff --git a/progress.py b/progress.py index dfb700b4..3e04a430 100644 --- a/progress.py +++ b/progress.py @@ -198,6 +198,48 @@ def send_progress_webhook(passing: int, total: int, project_dir: Path) -> None: ) +def clear_stuck_features(project_dir: Path) -> int: + """ + Clear all in_progress flags from features at agent startup. + + When an agent is stopped mid-work (e.g., user interrupt, crash), + features can be left with in_progress=True and become orphaned. + This function clears those flags so features return to the pending queue. + + Args: + project_dir: Directory containing the project + + Returns: + Number of features that were unstuck + """ + db_file = project_dir / "features.db" + if not db_file.exists(): + return 0 + + try: + conn = sqlite3.connect(db_file) + cursor = conn.cursor() + + # Count how many will be cleared + cursor.execute("SELECT COUNT(*) FROM features WHERE in_progress = 1") + count = cursor.fetchone()[0] + + if count > 0: + # Clear all in_progress flags + cursor.execute("UPDATE features SET in_progress = 0 WHERE in_progress = 1") + conn.commit() + print(f"[Auto-recovery] Cleared {count} stuck feature(s) from previous session") + + conn.close() + return count + except sqlite3.OperationalError: + # Table doesn't exist or doesn't have in_progress column + return 0 + except Exception as e: + print(f"[Warning] Could not clear stuck features: {e}") + return 0 + + def print_session_header(session_num: int, is_initializer: bool) -> None: """Print a formatted header for the session.""" session_type = "INITIALIZER" if is_initializer else "CODING AGENT" From 36de2c240cf7f7aee1faae45b8333a4d8e5005cf Mon Sep 17 00:00:00 2001 From: cabana8471 Date: Wed, 21 Jan 2026 10:12:51 +0100 Subject: [PATCH 04/81] feat(error-recovery): add MCP tools for failure tracking Add new MCP tools for error recovery: - feature_report_failure: Report failures with escalation recommendations - count < 3: retry - count >= 3: skip - count >= 5: decompose - count >= 7: escalate to human - feature_get_stuck: Get all features with failure_count > 0 - feature_clear_all_in_progress: Clear all stuck features at once - feature_reset_failure: Reset failure counter for a feature feat(quality-gates): add feature_verify_quality MCP tool Add quality gates MCP tools: - feature_verify_quality: Run lint and type-check before marking passing - Modify feature_mark_passing to enforce quality checks in strict mode - Auto-detect linters: ESLint, Biome, ruff, flake8 - Auto-detect type checkers: TypeScript tsc, Python mypy In strict mode (default), feature_mark_passing BLOCKS if quality checks fail. Agent must fix issues and retry. Addresses #68 (Agent skips features without testing) Addresses #69 (Test evidence storage) Co-Authored-By: Claude Opus 4.5 --- mcp_server/feature_mcp.py | 344 +++++++++++++++++++++++++++++++++++++- 1 file changed, 342 insertions(+), 2 deletions(-) diff --git a/mcp_server/feature_mcp.py b/mcp_server/feature_mcp.py index e46403b2..396afe5a 100755 --- a/mcp_server/feature_mcp.py +++ b/mcp_server/feature_mcp.py @@ -395,15 +395,25 @@ def feature_mark_passing( ) -> str: """Mark a feature as passing after successful implementation. + IMPORTANT: In strict mode (default), this will automatically run quality checks + (lint, type-check) and BLOCK if they fail. You must fix the issues and try again. + Updates the feature's passes field to true and clears the in_progress flag. Use this after you have implemented the feature and verified it works correctly. + Quality checks are configured in .autocoder/config.json (quality_gates section). + To disable: set quality_gates.enabled=false or quality_gates.strict_mode=false. + Args: feature_id: The ID of the feature to mark as passing Returns: - JSON with the updated feature details, or error if not found. + JSON with the updated feature details, or error if quality checks fail. """ + # Import quality gates module + sys.path.insert(0, str(Path(__file__).parent.parent)) + from quality_gates import verify_quality, load_quality_config + session = get_session() try: feature = session.query(Feature).filter(Feature.id == feature_id).first() @@ -411,12 +421,65 @@ def feature_mark_passing( if feature is None: return json.dumps({"error": f"Feature with ID {feature_id} not found"}) + # Load quality gates config + config = load_quality_config(PROJECT_DIR) + quality_enabled = config.get("enabled", True) + strict_mode = config.get("strict_mode", True) + + # Run quality checks in strict mode + if quality_enabled and strict_mode: + checks_config = config.get("checks", {}) + + quality_result = verify_quality( + PROJECT_DIR, + run_lint=checks_config.get("lint", True), + run_type_check=checks_config.get("type_check", True), + run_custom=True, + custom_script_path=checks_config.get("custom_script"), + ) + + # Store the quality result + feature.quality_result = quality_result + + # Block if quality checks failed + if not quality_result["passed"]: + feature.in_progress = False # Release the feature + session.commit() + + # Build detailed error message + failed_checks = [] + for name, check in quality_result["checks"].items(): + if not check["passed"]: + output_preview = check["output"][:500] if check["output"] else "No output" + failed_checks.append({ + "check": check["name"], + "output": output_preview, + }) + + return json.dumps({ + "error": "quality_check_failed", + "message": f"Cannot mark feature #{feature_id} as passing - quality checks failed", + "summary": quality_result["summary"], + "failed_checks": failed_checks, + "hint": "Fix the issues above and try feature_mark_passing again", + }, indent=2) + + # All checks passed (or disabled) - mark as passing feature.passes = True feature.in_progress = False + # Clear any previous failure tracking on success + feature.failure_count = 0 + feature.failure_reason = None session.commit() session.refresh(feature) - return json.dumps(feature.to_dict(), indent=2) + result = feature.to_dict() + if quality_enabled and strict_mode: + result["quality_status"] = "passed" + else: + result["quality_status"] = "skipped" + + return json.dumps(result, indent=2) finally: session.close() @@ -1044,5 +1107,282 @@ def feature_set_dependencies( session.close() +# ============================================================================= +# Quality Gates Tools +# ============================================================================= + + +@mcp.tool() +def feature_verify_quality( + feature_id: Annotated[int, Field(ge=1, description="Feature ID to verify quality for")] +) -> str: + """Verify code quality before marking a feature as passing. + + Runs configured quality checks: + - Lint (ESLint/Biome for JS/TS, ruff/flake8 for Python) + - Type check (TypeScript tsc, Python mypy) + - Custom script (.autocoder/quality-checks.sh if exists) + + Configuration is loaded from .autocoder/config.json (quality_gates section). + + IMPORTANT: In strict mode (default), feature_mark_passing will automatically + call this and BLOCK if quality checks fail. Use this tool for manual checks + or to preview quality status. + + Args: + feature_id: The ID of the feature being verified + + Returns: + JSON with: passed (bool), checks (dict), summary (str) + """ + # Import here to avoid circular imports + sys.path.insert(0, str(Path(__file__).parent.parent)) + from quality_gates import verify_quality, load_quality_config + + session = get_session() + try: + feature = session.query(Feature).filter(Feature.id == feature_id).first() + if feature is None: + return json.dumps({"error": f"Feature with ID {feature_id} not found"}) + + # Load config + config = load_quality_config(PROJECT_DIR) + + if not config.get("enabled", True): + return json.dumps({ + "passed": True, + "summary": "Quality gates disabled in config", + "checks": {} + }) + + checks_config = config.get("checks", {}) + + # Run quality checks + result = verify_quality( + PROJECT_DIR, + run_lint=checks_config.get("lint", True), + run_type_check=checks_config.get("type_check", True), + run_custom=True, + custom_script_path=checks_config.get("custom_script"), + ) + + # Store result in database + feature.quality_result = result + session.commit() + + return json.dumps({ + "feature_id": feature_id, + "passed": result["passed"], + "summary": result["summary"], + "checks": result["checks"], + "timestamp": result["timestamp"], + }, indent=2) + finally: + session.close() + + +# ============================================================================= +# Error Recovery Tools +# ============================================================================= + + +@mcp.tool() +def feature_report_failure( + feature_id: Annotated[int, Field(ge=1, description="Feature ID that failed")], + reason: Annotated[str, Field(min_length=1, description="Description of why the feature failed")] +) -> str: + """Report a failure for a feature, incrementing its failure count. + + Use this when you encounter an error implementing a feature. + The failure information helps with retry logic and escalation. + + Behavior based on failure_count: + - count < 3: Agent should retry with the failure reason as context + - count >= 3: Agent should skip this feature (use feature_skip) + - count >= 5: Feature may need to be broken into smaller features + - count >= 7: Feature is escalated for human review + + Args: + feature_id: The ID of the feature that failed + reason: Description of the failure (error message, blocker, etc.) + + Returns: + JSON with updated failure info: failure_count, failure_reason, recommendation + """ + from datetime import datetime + + session = get_session() + try: + feature = session.query(Feature).filter(Feature.id == feature_id).first() + + if feature is None: + return json.dumps({"error": f"Feature with ID {feature_id} not found"}) + + # Update failure tracking + feature.failure_count = (feature.failure_count or 0) + 1 + feature.failure_reason = reason + feature.last_failure_at = datetime.utcnow().isoformat() + + # Clear in_progress so the feature returns to pending + feature.in_progress = False + + session.commit() + session.refresh(feature) + + # Determine recommendation based on failure count + count = feature.failure_count + if count < 3: + recommendation = "retry" + message = f"Retry #{count}. Include the failure reason in your next attempt." + elif count < 5: + recommendation = "skip" + message = f"Failed {count} times. Consider skipping with feature_skip and trying later." + elif count < 7: + recommendation = "decompose" + message = f"Failed {count} times. This feature may need to be broken into smaller parts." + else: + recommendation = "escalate" + message = f"Failed {count} times. This feature needs human review." + + return json.dumps({ + "feature_id": feature_id, + "failure_count": feature.failure_count, + "failure_reason": feature.failure_reason, + "last_failure_at": feature.last_failure_at, + "recommendation": recommendation, + "message": message + }, indent=2) + finally: + session.close() + + +@mcp.tool() +def feature_get_stuck() -> str: + """Get all features that have failed at least once. + + Returns features sorted by failure_count (descending), showing + which features are having the most trouble. + + Use this to identify problematic features that may need: + - Manual intervention + - Decomposition into smaller features + - Dependency adjustments + + Returns: + JSON with: features (list with failure info), count (int) + """ + session = get_session() + try: + features = ( + session.query(Feature) + .filter(Feature.failure_count > 0) + .order_by(Feature.failure_count.desc()) + .all() + ) + + result = [] + for f in features: + result.append({ + "id": f.id, + "name": f.name, + "category": f.category, + "failure_count": f.failure_count, + "failure_reason": f.failure_reason, + "last_failure_at": f.last_failure_at, + "passes": f.passes, + "in_progress": f.in_progress, + }) + + return json.dumps({ + "features": result, + "count": len(result) + }, indent=2) + finally: + session.close() + + +@mcp.tool() +def feature_clear_all_in_progress() -> str: + """Clear ALL in_progress flags from all features. + + Use this on agent startup to unstick features from previous + interrupted sessions. When an agent is stopped mid-work, features + can be left with in_progress=True and become orphaned. + + This does NOT affect: + - passes status (completed features stay completed) + - failure_count (failure history is preserved) + - priority (queue order is preserved) + + Returns: + JSON with: cleared (int) - number of features that were unstuck + """ + session = get_session() + try: + # Count features that will be cleared + in_progress_count = ( + session.query(Feature) + .filter(Feature.in_progress == True) + .count() + ) + + if in_progress_count == 0: + return json.dumps({ + "cleared": 0, + "message": "No features were in_progress" + }) + + # Clear all in_progress flags + session.execute( + text("UPDATE features SET in_progress = 0 WHERE in_progress = 1") + ) + session.commit() + + return json.dumps({ + "cleared": in_progress_count, + "message": f"Cleared in_progress flag from {in_progress_count} feature(s)" + }, indent=2) + finally: + session.close() + + +@mcp.tool() +def feature_reset_failure( + feature_id: Annotated[int, Field(ge=1, description="Feature ID to reset")] +) -> str: + """Reset the failure counter and reason for a feature. + + Use this when you want to give a feature a fresh start, + for example after fixing an underlying issue. + + Args: + feature_id: The ID of the feature to reset + + Returns: + JSON with the updated feature details + """ + session = get_session() + try: + feature = session.query(Feature).filter(Feature.id == feature_id).first() + + if feature is None: + return json.dumps({"error": f"Feature with ID {feature_id} not found"}) + + feature.failure_count = 0 + feature.failure_reason = None + feature.last_failure_at = None + + session.commit() + session.refresh(feature) + + return json.dumps({ + "success": True, + "message": f"Reset failure tracking for feature #{feature_id}", + "feature": feature.to_dict() + }, indent=2) + finally: + session.close() + + if __name__ == "__main__": mcp.run() From 9b2cee2c16f4d4b82f03a3dc77de14d108dbb16f Mon Sep 17 00:00:00 2001 From: cabana8471 Date: Wed, 21 Jan 2026 10:12:59 +0100 Subject: [PATCH 05/81] feat(quality-gates): add quality checking module Add quality_gates.py with: - Auto-detection of JS/TS linters (ESLint, Biome, npm lint) - Auto-detection of Python linters (ruff, flake8) - Auto-detection of type checkers (TypeScript tsc, Python mypy) - Support for custom quality scripts (.autocoder/quality-checks.sh) - Configuration loading from .autocoder/config.json Functions: - run_lint_check(): Run detected linter - run_type_check(): Run detected type checker - run_custom_script(): Run custom quality script - verify_quality(): Run all configured checks - load_quality_config(): Load config with defaults Results are stored in QualityGateResult TypedDict with: passed, timestamp, checks (dict), summary Co-Authored-By: Claude Opus 4.5 --- quality_gates.py | 376 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 376 insertions(+) create mode 100644 quality_gates.py diff --git a/quality_gates.py b/quality_gates.py new file mode 100644 index 00000000..046349f9 --- /dev/null +++ b/quality_gates.py @@ -0,0 +1,376 @@ +""" +Quality Gates Module +==================== + +Provides quality checking functionality for the Autocoder system. +Runs lint, type-check, and custom scripts before allowing features +to be marked as passing. + +Supports: +- ESLint/Biome for JavaScript/TypeScript +- ruff/flake8 for Python +- Custom scripts via .autocoder/quality-checks.sh +""" + +import json +import subprocess +import shutil +from datetime import datetime +from pathlib import Path +from typing import TypedDict + + +class QualityCheckResult(TypedDict): + """Result of a single quality check.""" + name: str + passed: bool + output: str + duration_ms: int + + +class QualityGateResult(TypedDict): + """Result of all quality checks combined.""" + passed: bool + timestamp: str + checks: dict[str, QualityCheckResult] + summary: str + + +def _run_command(cmd: list[str], cwd: Path, timeout: int = 60) -> tuple[int, str, int]: + """ + Run a command and return (exit_code, output, duration_ms). + + Args: + cmd: Command and arguments as a list + cwd: Working directory + timeout: Timeout in seconds + + Returns: + (exit_code, combined_output, duration_ms) + """ + import time + start = time.time() + + try: + result = subprocess.run( + cmd, + cwd=cwd, + capture_output=True, + text=True, + timeout=timeout, + ) + duration_ms = int((time.time() - start) * 1000) + output = result.stdout + result.stderr + return result.returncode, output.strip(), duration_ms + except subprocess.TimeoutExpired: + duration_ms = int((time.time() - start) * 1000) + return 124, f"Command timed out after {timeout}s", duration_ms + except FileNotFoundError: + return 127, f"Command not found: {cmd[0]}", 0 + except Exception as e: + return 1, str(e), 0 + + +def _detect_js_linter(project_dir: Path) -> tuple[str, list[str]] | None: + """ + Detect the JavaScript/TypeScript linter to use. + + Returns: + (name, command) tuple, or None if no linter detected + """ + # Check for ESLint + if (project_dir / "node_modules/.bin/eslint").exists(): + return ("eslint", ["node_modules/.bin/eslint", ".", "--max-warnings=0"]) + + # Check for Biome + if (project_dir / "node_modules/.bin/biome").exists(): + return ("biome", ["node_modules/.bin/biome", "lint", "."]) + + # Check for package.json lint script + package_json = project_dir / "package.json" + if package_json.exists(): + try: + data = json.loads(package_json.read_text()) + scripts = data.get("scripts", {}) + if "lint" in scripts: + return ("npm_lint", ["npm", "run", "lint"]) + except (json.JSONDecodeError, OSError): + pass + + return None + + +def _detect_python_linter(project_dir: Path) -> tuple[str, list[str]] | None: + """ + Detect the Python linter to use. + + Returns: + (name, command) tuple, or None if no linter detected + """ + # Check for ruff + if shutil.which("ruff"): + return ("ruff", ["ruff", "check", "."]) + + # Check for flake8 + if shutil.which("flake8"): + return ("flake8", ["flake8", "."]) + + # Check in virtual environment + venv_ruff = project_dir / "venv/bin/ruff" + if venv_ruff.exists(): + return ("ruff", [str(venv_ruff), "check", "."]) + + venv_flake8 = project_dir / "venv/bin/flake8" + if venv_flake8.exists(): + return ("flake8", [str(venv_flake8), "."]) + + return None + + +def _detect_type_checker(project_dir: Path) -> tuple[str, list[str]] | None: + """ + Detect the type checker to use. + + Returns: + (name, command) tuple, or None if no type checker detected + """ + # TypeScript + if (project_dir / "tsconfig.json").exists(): + if (project_dir / "node_modules/.bin/tsc").exists(): + return ("tsc", ["node_modules/.bin/tsc", "--noEmit"]) + if shutil.which("npx"): + return ("tsc", ["npx", "tsc", "--noEmit"]) + + # Python (mypy) + if (project_dir / "pyproject.toml").exists() or (project_dir / "setup.py").exists(): + if shutil.which("mypy"): + return ("mypy", ["mypy", "."]) + venv_mypy = project_dir / "venv/bin/mypy" + if venv_mypy.exists(): + return ("mypy", [str(venv_mypy), "."]) + + return None + + +def run_lint_check(project_dir: Path) -> QualityCheckResult: + """ + Run lint check on the project. + + Automatically detects the appropriate linter based on project type. + + Args: + project_dir: Path to the project directory + + Returns: + QualityCheckResult with lint results + """ + # Try JS/TS linter first + linter = _detect_js_linter(project_dir) + if linter is None: + # Try Python linter + linter = _detect_python_linter(project_dir) + + if linter is None: + return { + "name": "lint", + "passed": True, + "output": "No linter detected, skipping lint check", + "duration_ms": 0, + } + + name, cmd = linter + exit_code, output, duration_ms = _run_command(cmd, project_dir) + + # Truncate output if too long + if len(output) > 5000: + output = output[:5000] + "\n... (truncated)" + + return { + "name": f"lint ({name})", + "passed": exit_code == 0, + "output": output if output else "No issues found", + "duration_ms": duration_ms, + } + + +def run_type_check(project_dir: Path) -> QualityCheckResult: + """ + Run type check on the project. + + Automatically detects the appropriate type checker based on project type. + + Args: + project_dir: Path to the project directory + + Returns: + QualityCheckResult with type check results + """ + checker = _detect_type_checker(project_dir) + + if checker is None: + return { + "name": "type_check", + "passed": True, + "output": "No type checker detected, skipping type check", + "duration_ms": 0, + } + + name, cmd = checker + exit_code, output, duration_ms = _run_command(cmd, project_dir, timeout=120) + + # Truncate output if too long + if len(output) > 5000: + output = output[:5000] + "\n... (truncated)" + + return { + "name": f"type_check ({name})", + "passed": exit_code == 0, + "output": output if output else "No type errors found", + "duration_ms": duration_ms, + } + + +def run_custom_script(project_dir: Path, script_path: str | None = None) -> QualityCheckResult | None: + """ + Run a custom quality check script. + + Args: + project_dir: Path to the project directory + script_path: Path to the script (relative to project), defaults to .autocoder/quality-checks.sh + + Returns: + QualityCheckResult, or None if script doesn't exist + """ + if script_path is None: + script_path = ".autocoder/quality-checks.sh" + + script_full_path = project_dir / script_path + + if not script_full_path.exists(): + return None + + # Make sure it's executable + try: + script_full_path.chmod(0o755) + except OSError: + pass + + exit_code, output, duration_ms = _run_command( + ["bash", str(script_full_path)], + project_dir, + timeout=300, # 5 minutes for custom scripts + ) + + # Truncate output if too long + if len(output) > 10000: + output = output[:10000] + "\n... (truncated)" + + return { + "name": "custom_script", + "passed": exit_code == 0, + "output": output if output else "Script completed successfully", + "duration_ms": duration_ms, + } + + +def verify_quality( + project_dir: Path, + run_lint: bool = True, + run_type_check: bool = True, + run_custom: bool = True, + custom_script_path: str | None = None, +) -> QualityGateResult: + """ + Run all configured quality checks. + + Args: + project_dir: Path to the project directory + run_lint: Whether to run lint check + run_type_check: Whether to run type check + run_custom: Whether to run custom script + custom_script_path: Path to custom script (optional) + + Returns: + QualityGateResult with all check results + """ + checks: dict[str, QualityCheckResult] = {} + all_passed = True + + if run_lint: + lint_result = run_lint_check(project_dir) + checks["lint"] = lint_result + if not lint_result["passed"]: + all_passed = False + + if run_type_check: + type_result = run_type_check(project_dir) + checks["type_check"] = type_result + if not type_result["passed"]: + all_passed = False + + if run_custom: + custom_result = run_custom_script(project_dir, custom_script_path) + if custom_result is not None: + checks["custom_script"] = custom_result + if not custom_result["passed"]: + all_passed = False + + # Build summary + passed_count = sum(1 for c in checks.values() if c["passed"]) + total_count = len(checks) + failed_names = [name for name, c in checks.items() if not c["passed"]] + + if all_passed: + summary = f"All {total_count} quality checks passed" + else: + summary = f"{passed_count}/{total_count} checks passed. Failed: {', '.join(failed_names)}" + + return { + "passed": all_passed, + "timestamp": datetime.utcnow().isoformat(), + "checks": checks, + "summary": summary, + } + + +def load_quality_config(project_dir: Path) -> dict: + """ + Load quality gates configuration from .autocoder/config.json. + + Args: + project_dir: Path to the project directory + + Returns: + Quality gates config dict with defaults applied + """ + defaults = { + "enabled": True, + "strict_mode": True, + "checks": { + "lint": True, + "type_check": True, + "unit_tests": False, + "custom_script": None, + }, + } + + config_path = project_dir / ".autocoder" / "config.json" + if not config_path.exists(): + return defaults + + try: + data = json.loads(config_path.read_text()) + quality_config = data.get("quality_gates", {}) + + # Merge with defaults + result = defaults.copy() + for key in ["enabled", "strict_mode"]: + if key in quality_config: + result[key] = quality_config[key] + + if "checks" in quality_config: + result["checks"] = {**defaults["checks"], **quality_config["checks"]} + + return result + except (json.JSONDecodeError, OSError): + return defaults From 4aec4b2cfef41a5fdcddf5fead8a76df8e18595a Mon Sep 17 00:00:00 2001 From: cabana8471 Date: Wed, 21 Jan 2026 10:16:40 +0100 Subject: [PATCH 06/81] feat(import): add base analyzer abstract class Create the analyzers module with base class for stack detection: - analyzers/__init__.py: Module entry point, exports StackDetector - analyzers/base_analyzer.py: Abstract BaseAnalyzer class with: - RouteInfo, ComponentInfo, EndpointInfo, AnalysisResult TypedDicts - can_analyze() method to detect if analyzer applies - analyze() method to extract routes, components, endpoints - Helper methods: _read_file_safe(), _find_files() This provides the foundation for all stack-specific analyzers. Co-Authored-By: Claude Opus 4.5 --- analyzers/__init__.py | 18 +++++ analyzers/base_analyzer.py | 152 +++++++++++++++++++++++++++++++++++++ 2 files changed, 170 insertions(+) create mode 100644 analyzers/__init__.py create mode 100644 analyzers/base_analyzer.py diff --git a/analyzers/__init__.py b/analyzers/__init__.py new file mode 100644 index 00000000..1b90152e --- /dev/null +++ b/analyzers/__init__.py @@ -0,0 +1,18 @@ +""" +Codebase Analyzers +================== + +Modules for analyzing existing codebases to detect tech stack, +extract features, and prepare for import into Autocoder. + +Main entry point: stack_detector.py +""" + +from .stack_detector import StackDetector, StackDetectionResult +from .base_analyzer import BaseAnalyzer + +__all__ = [ + "StackDetector", + "StackDetectionResult", + "BaseAnalyzer", +] diff --git a/analyzers/base_analyzer.py b/analyzers/base_analyzer.py new file mode 100644 index 00000000..9bb31de2 --- /dev/null +++ b/analyzers/base_analyzer.py @@ -0,0 +1,152 @@ +""" +Base Analyzer +============= + +Abstract base class for all stack analyzers. +Each analyzer detects a specific tech stack and extracts relevant information. +""" + +from abc import ABC, abstractmethod +from pathlib import Path +from typing import TypedDict + + +class RouteInfo(TypedDict): + """Information about a detected route.""" + path: str + method: str # GET, POST, PUT, DELETE, etc. + handler: str # Function or component name + file: str # Source file path + + +class ComponentInfo(TypedDict): + """Information about a detected component.""" + name: str + file: str + type: str # page, component, layout, etc. + + +class EndpointInfo(TypedDict): + """Information about an API endpoint.""" + path: str + method: str + handler: str + file: str + description: str # Generated description + + +class AnalysisResult(TypedDict): + """Result of analyzing a codebase with a specific analyzer.""" + stack_name: str + confidence: float # 0.0 to 1.0 + routes: list[RouteInfo] + components: list[ComponentInfo] + endpoints: list[EndpointInfo] + entry_point: str | None + config_files: list[str] + dependencies: dict[str, str] # name: version + metadata: dict # Additional stack-specific info + + +class BaseAnalyzer(ABC): + """ + Abstract base class for stack analyzers. + + Each analyzer is responsible for: + 1. Detecting if a codebase uses its stack (can_analyze) + 2. Extracting routes, components, and endpoints (analyze) + """ + + def __init__(self, project_dir: Path): + """ + Initialize the analyzer. + + Args: + project_dir: Path to the project directory to analyze + """ + self.project_dir = project_dir + + @property + @abstractmethod + def stack_name(self) -> str: + """The name of the stack this analyzer handles (e.g., 'react', 'nextjs').""" + pass + + @abstractmethod + def can_analyze(self) -> tuple[bool, float]: + """ + Check if this analyzer can handle the codebase. + + Returns: + (can_handle, confidence) where: + - can_handle: True if the analyzer recognizes the stack + - confidence: 0.0 to 1.0 indicating how confident the detection is + """ + pass + + @abstractmethod + def analyze(self) -> AnalysisResult: + """ + Analyze the codebase and extract information. + + Returns: + AnalysisResult with detected routes, components, endpoints, etc. + """ + pass + + def _read_file_safe(self, path: Path, max_size: int = 1024 * 1024) -> str | None: + """ + Safely read a file, returning None if it doesn't exist or is too large. + + Args: + path: Path to the file + max_size: Maximum file size in bytes (default 1MB) + + Returns: + File contents or None + """ + if not path.exists(): + return None + + try: + if path.stat().st_size > max_size: + return None + return path.read_text(encoding="utf-8") + except (OSError, UnicodeDecodeError): + return None + + def _find_files(self, pattern: str, exclude_dirs: list[str] | None = None) -> list[Path]: + """ + Find files matching a glob pattern, excluding common non-source directories. + + Args: + pattern: Glob pattern (e.g., "**/*.tsx") + exclude_dirs: Additional directories to exclude + + Returns: + List of matching file paths + """ + default_exclude = [ + "node_modules", + "venv", + ".venv", + "__pycache__", + ".git", + "dist", + "build", + ".next", + ".nuxt", + "coverage", + ] + + if exclude_dirs: + default_exclude.extend(exclude_dirs) + + results = [] + for path in self.project_dir.glob(pattern): + # Check if any parent is in exclude list + parts = path.relative_to(self.project_dir).parts + if not any(part in default_exclude for part in parts): + results.append(path) + + return results From 2b3b9067ee1e787868df904e3718e936e28b2410 Mon Sep 17 00:00:00 2001 From: cabana8471 Date: Wed, 21 Jan 2026 10:16:48 +0100 Subject: [PATCH 07/81] feat(import): add stack detector orchestrator Add stack_detector.py that orchestrates multiple analyzers: - StackDetector class: Loads and runs all analyzers - detect(): Full analysis with routes, components, endpoints - detect_quick(): Fast detection for UI preview - StackDetectionResult: Complete result with: - detected_stacks list with confidence scores - primary_frontend, primary_backend, database - all_routes, all_endpoints, all_components - summary string for display The detector runs analyzers in order (more specific first like Next.js before React) and aggregates results. Co-Authored-By: Claude Opus 4.5 --- analyzers/stack_detector.py | 216 ++++++++++++++++++++++++++++++++++++ 1 file changed, 216 insertions(+) create mode 100644 analyzers/stack_detector.py diff --git a/analyzers/stack_detector.py b/analyzers/stack_detector.py new file mode 100644 index 00000000..37581873 --- /dev/null +++ b/analyzers/stack_detector.py @@ -0,0 +1,216 @@ +""" +Stack Detector +============== + +Orchestrates detection of tech stacks in a codebase. +Uses multiple analyzers to detect frontend, backend, and database technologies. +""" + +import json +from pathlib import Path +from typing import TypedDict + +from .base_analyzer import AnalysisResult + + +class StackInfo(TypedDict): + """Information about a detected stack.""" + name: str + category: str # frontend, backend, database, other + confidence: float + analysis: AnalysisResult | None + + +class StackDetectionResult(TypedDict): + """Complete result of stack detection.""" + project_dir: str + detected_stacks: list[StackInfo] + primary_frontend: str | None + primary_backend: str | None + database: str | None + routes_count: int + components_count: int + endpoints_count: int + all_routes: list[dict] + all_endpoints: list[dict] + all_components: list[dict] + summary: str + + +class StackDetector: + """ + Detects tech stacks in a codebase by running multiple analyzers. + + Usage: + detector = StackDetector(project_dir) + result = detector.detect() + """ + + def __init__(self, project_dir: Path): + """ + Initialize the stack detector. + + Args: + project_dir: Path to the project directory to analyze + """ + self.project_dir = Path(project_dir).resolve() + self._analyzers = [] + self._load_analyzers() + + def _load_analyzers(self) -> None: + """Load all available analyzers.""" + # Import analyzers here to avoid circular imports + from .react_analyzer import ReactAnalyzer + from .node_analyzer import NodeAnalyzer + from .python_analyzer import PythonAnalyzer + from .vue_analyzer import VueAnalyzer + + # Order matters: more specific analyzers first (Next.js before React) + self._analyzers = [ + ReactAnalyzer(self.project_dir), + VueAnalyzer(self.project_dir), + NodeAnalyzer(self.project_dir), + PythonAnalyzer(self.project_dir), + ] + + def detect(self) -> StackDetectionResult: + """ + Run all analyzers and compile results. + + Returns: + StackDetectionResult with all detected stacks and extracted information + """ + detected_stacks: list[StackInfo] = [] + all_routes: list[dict] = [] + all_endpoints: list[dict] = [] + all_components: list[dict] = [] + + for analyzer in self._analyzers: + can_analyze, confidence = analyzer.can_analyze() + + if can_analyze and confidence > 0.3: # Minimum confidence threshold + try: + analysis = analyzer.analyze() + + # Determine category + stack_name = analyzer.stack_name.lower() + if stack_name in ("react", "nextjs", "vue", "nuxt", "angular"): + category = "frontend" + elif stack_name in ("express", "fastapi", "django", "flask", "nestjs"): + category = "backend" + elif stack_name in ("postgres", "mysql", "mongodb", "sqlite"): + category = "database" + else: + category = "other" + + detected_stacks.append({ + "name": analyzer.stack_name, + "category": category, + "confidence": confidence, + "analysis": analysis, + }) + + # Collect all routes, endpoints, components + all_routes.extend(analysis.get("routes", [])) + all_endpoints.extend(analysis.get("endpoints", [])) + all_components.extend(analysis.get("components", [])) + + except Exception as e: + # Log but don't fail - continue with other analyzers + print(f"Warning: {analyzer.stack_name} analyzer failed: {e}") + + # Sort by confidence + detected_stacks.sort(key=lambda x: x["confidence"], reverse=True) + + # Determine primary frontend and backend + primary_frontend = None + primary_backend = None + database = None + + for stack in detected_stacks: + if stack["category"] == "frontend" and primary_frontend is None: + primary_frontend = stack["name"] + elif stack["category"] == "backend" and primary_backend is None: + primary_backend = stack["name"] + elif stack["category"] == "database" and database is None: + database = stack["name"] + + # Build summary + stack_names = [s["name"] for s in detected_stacks] + if stack_names: + summary = f"Detected: {', '.join(stack_names)}" + else: + summary = "No recognized tech stack detected" + + if all_routes: + summary += f" | {len(all_routes)} routes" + if all_endpoints: + summary += f" | {len(all_endpoints)} endpoints" + if all_components: + summary += f" | {len(all_components)} components" + + return { + "project_dir": str(self.project_dir), + "detected_stacks": detected_stacks, + "primary_frontend": primary_frontend, + "primary_backend": primary_backend, + "database": database, + "routes_count": len(all_routes), + "components_count": len(all_components), + "endpoints_count": len(all_endpoints), + "all_routes": all_routes, + "all_endpoints": all_endpoints, + "all_components": all_components, + "summary": summary, + } + + def detect_quick(self) -> dict: + """ + Quick detection without full analysis. + + Returns a simplified result with just stack names and confidence. + Useful for UI display before full analysis. + """ + results = [] + + for analyzer in self._analyzers: + can_analyze, confidence = analyzer.can_analyze() + if can_analyze and confidence > 0.3: + results.append({ + "name": analyzer.stack_name, + "confidence": confidence, + }) + + results.sort(key=lambda x: x["confidence"], reverse=True) + + return { + "project_dir": str(self.project_dir), + "stacks": results, + "primary": results[0]["name"] if results else None, + } + + def to_json(self, result: StackDetectionResult) -> str: + """Convert detection result to JSON string.""" + # Remove analysis objects for cleaner output + clean_result = { + **result, + "detected_stacks": [ + {k: v for k, v in stack.items() if k != "analysis"} + for stack in result["detected_stacks"] + ], + } + return json.dumps(clean_result, indent=2) + + +def detect_stack(project_dir: str | Path) -> StackDetectionResult: + """ + Convenience function to detect stack in a project. + + Args: + project_dir: Path to the project directory + + Returns: + StackDetectionResult + """ + detector = StackDetector(Path(project_dir)) + return detector.detect() From 6ffb0f8d64823355ad0119660aede27a4224a447 Mon Sep 17 00:00:00 2001 From: cabana8471 Date: Wed, 21 Jan 2026 10:16:58 +0100 Subject: [PATCH 08/81] feat(import): add React/Next.js analyzer Add react_analyzer.py for React ecosystem detection: - Detects: React, React+Vite, Create React App, Next.js - Extracts routes from: - React Router ( elements, createBrowserRouter) - Next.js App Router (app/page.tsx files) - Next.js Pages Router (pages/*.tsx files) - Extracts API endpoints from Next.js: - pages/api/*.ts (Pages Router) - app/api/route.ts (App Router, detects exported methods) - Extracts components from components/ and pages/ directories Detection heuristics: - package.json: "next", "react", "vite", "react-scripts" - Config files: next.config.js, vite.config.ts - Entry files: src/App.tsx, pages/_app.tsx, app/layout.tsx Co-Authored-By: Claude Opus 4.5 --- analyzers/react_analyzer.py | 418 ++++++++++++++++++++++++++++++++++++ 1 file changed, 418 insertions(+) create mode 100644 analyzers/react_analyzer.py diff --git a/analyzers/react_analyzer.py b/analyzers/react_analyzer.py new file mode 100644 index 00000000..9d125e3c --- /dev/null +++ b/analyzers/react_analyzer.py @@ -0,0 +1,418 @@ +""" +React Analyzer +============== + +Detects React, Vite, and Next.js projects. +Extracts routes from React Router and Next.js file-based routing. +""" + +import json +import re +from pathlib import Path + +from .base_analyzer import ( + AnalysisResult, + BaseAnalyzer, + ComponentInfo, + EndpointInfo, + RouteInfo, +) + + +class ReactAnalyzer(BaseAnalyzer): + """Analyzer for React, Vite, and Next.js projects.""" + + @property + def stack_name(self) -> str: + return self._detected_stack + + def __init__(self, project_dir: Path): + super().__init__(project_dir) + self._detected_stack = "react" # Default, may change to "nextjs" + + def can_analyze(self) -> tuple[bool, float]: + """Detect if this is a React/Next.js project.""" + confidence = 0.0 + + # Check package.json + package_json = self.project_dir / "package.json" + if package_json.exists(): + try: + data = json.loads(package_json.read_text()) + deps = { + **data.get("dependencies", {}), + **data.get("devDependencies", {}), + } + + # Check for Next.js first (more specific) + if "next" in deps: + self._detected_stack = "nextjs" + confidence = 0.95 + return True, confidence + + # Check for React + if "react" in deps: + confidence = 0.85 + + # Check for Vite + if "vite" in deps: + self._detected_stack = "react-vite" + confidence = 0.9 + + # Check for Create React App + if "react-scripts" in deps: + self._detected_stack = "react-cra" + confidence = 0.9 + + return True, confidence + + except (json.JSONDecodeError, OSError): + pass + + # Check for Next.js config + if (self.project_dir / "next.config.js").exists() or \ + (self.project_dir / "next.config.mjs").exists() or \ + (self.project_dir / "next.config.ts").exists(): + self._detected_stack = "nextjs" + return True, 0.95 + + # Check for common React files + if (self.project_dir / "src" / "App.tsx").exists() or \ + (self.project_dir / "src" / "App.jsx").exists(): + return True, 0.7 + + return False, 0.0 + + def analyze(self) -> AnalysisResult: + """Analyze the React/Next.js project.""" + routes: list[RouteInfo] = [] + components: list[ComponentInfo] = [] + endpoints: list[EndpointInfo] = [] + config_files: list[str] = [] + dependencies: dict[str, str] = {} + entry_point: str | None = None + + # Load dependencies from package.json + package_json = self.project_dir / "package.json" + if package_json.exists(): + try: + data = json.loads(package_json.read_text()) + dependencies = { + **data.get("dependencies", {}), + **data.get("devDependencies", {}), + } + except (json.JSONDecodeError, OSError): + pass + + # Collect config files + for config_name in [ + "next.config.js", "next.config.mjs", "next.config.ts", + "vite.config.js", "vite.config.ts", + "tsconfig.json", "tailwind.config.js", "tailwind.config.ts", + ]: + if (self.project_dir / config_name).exists(): + config_files.append(config_name) + + # Detect entry point + for entry in ["src/main.tsx", "src/main.jsx", "src/index.tsx", "src/index.jsx", "pages/_app.tsx", "app/layout.tsx"]: + if (self.project_dir / entry).exists(): + entry_point = entry + break + + # Extract routes based on stack type + if self._detected_stack == "nextjs": + routes = self._extract_nextjs_routes() + endpoints = self._extract_nextjs_api_routes() + else: + routes = self._extract_react_router_routes() + + # Extract components + components = self._extract_components() + + return { + "stack_name": self._detected_stack, + "confidence": 0.9, + "routes": routes, + "components": components, + "endpoints": endpoints, + "entry_point": entry_point, + "config_files": config_files, + "dependencies": dependencies, + "metadata": { + "has_typescript": "typescript" in dependencies, + "has_tailwind": "tailwindcss" in dependencies, + "has_react_router": "react-router-dom" in dependencies, + }, + } + + def _extract_nextjs_routes(self) -> list[RouteInfo]: + """Extract routes from Next.js file-based routing.""" + routes: list[RouteInfo] = [] + + # Check for App Router (Next.js 13+) + app_dir = self.project_dir / "app" + if app_dir.exists(): + routes.extend(self._extract_app_router_routes(app_dir)) + + # Check for Pages Router + pages_dir = self.project_dir / "pages" + if pages_dir.exists(): + routes.extend(self._extract_pages_router_routes(pages_dir)) + + # Also check src/app and src/pages + src_app = self.project_dir / "src" / "app" + if src_app.exists(): + routes.extend(self._extract_app_router_routes(src_app)) + + src_pages = self.project_dir / "src" / "pages" + if src_pages.exists(): + routes.extend(self._extract_pages_router_routes(src_pages)) + + return routes + + def _extract_app_router_routes(self, app_dir: Path) -> list[RouteInfo]: + """Extract routes from Next.js App Router.""" + routes: list[RouteInfo] = [] + + for page_file in app_dir.rglob("page.tsx"): + rel_path = page_file.relative_to(app_dir) + route_path = "/" + "/".join(rel_path.parent.parts) + + # Handle dynamic routes: [id] -> :id + route_path = re.sub(r"\[([^\]]+)\]", r":\1", route_path) + + # Clean up + if route_path == "/.": + route_path = "/" + route_path = route_path.replace("//", "/") + + routes.append({ + "path": route_path, + "method": "GET", + "handler": "Page", + "file": str(page_file.relative_to(self.project_dir)), + }) + + # Also check .jsx files + for page_file in app_dir.rglob("page.jsx"): + rel_path = page_file.relative_to(app_dir) + route_path = "/" + "/".join(rel_path.parent.parts) + route_path = re.sub(r"\[([^\]]+)\]", r":\1", route_path) + if route_path == "/.": + route_path = "/" + route_path = route_path.replace("//", "/") + + routes.append({ + "path": route_path, + "method": "GET", + "handler": "Page", + "file": str(page_file.relative_to(self.project_dir)), + }) + + return routes + + def _extract_pages_router_routes(self, pages_dir: Path) -> list[RouteInfo]: + """Extract routes from Next.js Pages Router.""" + routes: list[RouteInfo] = [] + + for page_file in pages_dir.rglob("*.tsx"): + if page_file.name.startswith("_"): # Skip _app.tsx, _document.tsx + continue + if "api" in page_file.parts: # Skip API routes + continue + + rel_path = page_file.relative_to(pages_dir) + route_path = "/" + str(rel_path.with_suffix("")) + + # Handle index files + route_path = route_path.replace("/index", "") + if not route_path: + route_path = "/" + + # Handle dynamic routes + route_path = re.sub(r"\[([^\]]+)\]", r":\1", route_path) + + routes.append({ + "path": route_path, + "method": "GET", + "handler": page_file.stem, + "file": str(page_file.relative_to(self.project_dir)), + }) + + # Also check .jsx files + for page_file in pages_dir.rglob("*.jsx"): + if page_file.name.startswith("_"): + continue + if "api" in page_file.parts: + continue + + rel_path = page_file.relative_to(pages_dir) + route_path = "/" + str(rel_path.with_suffix("")) + route_path = route_path.replace("/index", "") + if not route_path: + route_path = "/" + route_path = re.sub(r"\[([^\]]+)\]", r":\1", route_path) + + routes.append({ + "path": route_path, + "method": "GET", + "handler": page_file.stem, + "file": str(page_file.relative_to(self.project_dir)), + }) + + return routes + + def _extract_nextjs_api_routes(self) -> list[EndpointInfo]: + """Extract API routes from Next.js.""" + endpoints: list[EndpointInfo] = [] + + # Check pages/api (Pages Router) + api_dirs = [ + self.project_dir / "pages" / "api", + self.project_dir / "src" / "pages" / "api", + ] + + for api_dir in api_dirs: + if api_dir.exists(): + for api_file in api_dir.rglob("*.ts"): + endpoints.extend(self._parse_api_route(api_file, api_dir)) + for api_file in api_dir.rglob("*.js"): + endpoints.extend(self._parse_api_route(api_file, api_dir)) + + # Check app/api (App Router - route.ts files) + app_api_dirs = [ + self.project_dir / "app" / "api", + self.project_dir / "src" / "app" / "api", + ] + + for app_api in app_api_dirs: + if app_api.exists(): + for route_file in app_api.rglob("route.ts"): + endpoints.extend(self._parse_app_router_api(route_file, app_api)) + for route_file in app_api.rglob("route.js"): + endpoints.extend(self._parse_app_router_api(route_file, app_api)) + + return endpoints + + def _parse_api_route(self, api_file: Path, api_dir: Path) -> list[EndpointInfo]: + """Parse a Pages Router API route file.""" + rel_path = api_file.relative_to(api_dir) + route_path = "/api/" + str(rel_path.with_suffix("")) + route_path = route_path.replace("/index", "") + route_path = re.sub(r"\[([^\]]+)\]", r":\1", route_path) + + return [{ + "path": route_path, + "method": "ALL", # Default export handles all methods + "handler": "handler", + "file": str(api_file.relative_to(self.project_dir)), + "description": f"API endpoint at {route_path}", + }] + + def _parse_app_router_api(self, route_file: Path, api_dir: Path) -> list[EndpointInfo]: + """Parse an App Router API route file.""" + rel_path = route_file.relative_to(api_dir) + route_path = "/api/" + "/".join(rel_path.parent.parts) + route_path = re.sub(r"\[([^\]]+)\]", r":\1", route_path) + if route_path.endswith("/"): + route_path = route_path[:-1] + + # Try to detect which methods are exported + content = self._read_file_safe(route_file) + methods = [] + if content: + for method in ["GET", "POST", "PUT", "PATCH", "DELETE"]: + if f"export async function {method}" in content or \ + f"export function {method}" in content: + methods.append(method) + + if not methods: + methods = ["ALL"] + + return [ + { + "path": route_path, + "method": method, + "handler": method, + "file": str(route_file.relative_to(self.project_dir)), + "description": f"{method} {route_path}", + } + for method in methods + ] + + def _extract_react_router_routes(self) -> list[RouteInfo]: + """Extract routes from React Router configuration.""" + routes: list[RouteInfo] = [] + + # Look for route definitions in common files + route_files = self._find_files("**/*.tsx") + self._find_files("**/*.jsx") + + # Pattern for React Router elements + route_pattern = re.compile( + r']*path=["\']([^"\']+)["\'][^>]*>', + re.IGNORECASE + ) + + # Pattern for createBrowserRouter routes + browser_router_pattern = re.compile( + r'{\s*path:\s*["\']([^"\']+)["\']', + re.IGNORECASE + ) + + for file in route_files: + content = self._read_file_safe(file) + if content is None: + continue + + # Skip if not likely a routing file + if "Route" not in content and "createBrowserRouter" not in content: + continue + + # Extract routes from JSX + for match in route_pattern.finditer(content): + routes.append({ + "path": match.group(1), + "method": "GET", + "handler": "Route", + "file": str(file.relative_to(self.project_dir)), + }) + + # Extract routes from createBrowserRouter + for match in browser_router_pattern.finditer(content): + routes.append({ + "path": match.group(1), + "method": "GET", + "handler": "RouterRoute", + "file": str(file.relative_to(self.project_dir)), + }) + + return routes + + def _extract_components(self) -> list[ComponentInfo]: + """Extract React components.""" + components: list[ComponentInfo] = [] + + # Find component files + component_files = self._find_files("**/components/**/*.tsx") + \ + self._find_files("**/components/**/*.jsx") + + for file in component_files: + components.append({ + "name": file.stem, + "file": str(file.relative_to(self.project_dir)), + "type": "component", + }) + + # Find page files + page_files = self._find_files("**/pages/**/*.tsx") + \ + self._find_files("**/pages/**/*.jsx") + + for file in page_files: + if not file.name.startswith("_"): + components.append({ + "name": file.stem, + "file": str(file.relative_to(self.project_dir)), + "type": "page", + }) + + return components From c3ac1353812db7ac6dd2b2b7e97d3bb691f4c175 Mon Sep 17 00:00:00 2001 From: cabana8471 Date: Wed, 21 Jan 2026 10:17:06 +0100 Subject: [PATCH 09/81] feat(import): add Node.js/Express/NestJS analyzer Add node_analyzer.py for Node.js backend detection: - Detects: Express, NestJS, Fastify, Koa, generic Node.js - Extracts API endpoints from: - Express: router.get(), app.post() patterns - NestJS: @Get(), @Post() decorators with @Controller() prefix - Fastify: fastify.get() patterns - Extracts components: - middleware/ files - services/ files and *.service.ts - models/ files and *.model.ts Detection heuristics: - package.json: "express", "@nestjs/core", "fastify", "koa" - File structure: routes/, controllers/, middleware/ - Entry files: app.js, server.js, src/index.ts Co-Authored-By: Claude Opus 4.5 --- analyzers/node_analyzer.py | 352 +++++++++++++++++++++++++++++++++++++ 1 file changed, 352 insertions(+) create mode 100644 analyzers/node_analyzer.py diff --git a/analyzers/node_analyzer.py b/analyzers/node_analyzer.py new file mode 100644 index 00000000..8ceb96e4 --- /dev/null +++ b/analyzers/node_analyzer.py @@ -0,0 +1,352 @@ +""" +Node.js Analyzer +================ + +Detects Node.js/Express/NestJS projects. +Extracts API endpoints from Express router definitions. +""" + +import json +import re +from pathlib import Path + +from .base_analyzer import ( + AnalysisResult, + BaseAnalyzer, + ComponentInfo, + EndpointInfo, + RouteInfo, +) + + +class NodeAnalyzer(BaseAnalyzer): + """Analyzer for Node.js/Express/NestJS projects.""" + + @property + def stack_name(self) -> str: + return self._detected_stack + + def __init__(self, project_dir: Path): + super().__init__(project_dir) + self._detected_stack = "nodejs" # Default, may change to "express" or "nestjs" + + def can_analyze(self) -> tuple[bool, float]: + """Detect if this is a Node.js/Express/NestJS project.""" + confidence = 0.0 + + # Check package.json + package_json = self.project_dir / "package.json" + if package_json.exists(): + try: + data = json.loads(package_json.read_text()) + deps = { + **data.get("dependencies", {}), + **data.get("devDependencies", {}), + } + + # Check for NestJS first (more specific) + if "@nestjs/core" in deps: + self._detected_stack = "nestjs" + confidence = 0.95 + return True, confidence + + # Check for Express + if "express" in deps: + self._detected_stack = "express" + confidence = 0.85 + + # Bonus for having typical Express structure + if (self.project_dir / "routes").exists() or \ + (self.project_dir / "src" / "routes").exists(): + confidence = 0.9 + + return True, confidence + + # Check for Fastify + if "fastify" in deps: + self._detected_stack = "fastify" + confidence = 0.85 + return True, confidence + + # Check for Koa + if "koa" in deps: + self._detected_stack = "koa" + confidence = 0.85 + return True, confidence + + # Generic Node.js (has node-specific files but no specific framework) + if "type" in data and data["type"] == "module": + self._detected_stack = "nodejs" + confidence = 0.5 + return True, confidence + + except (json.JSONDecodeError, OSError): + pass + + # Check for common Node.js files + common_files = ["app.js", "server.js", "index.js", "src/app.js", "src/server.js"] + for file in common_files: + if (self.project_dir / file).exists(): + self._detected_stack = "nodejs" + return True, 0.5 + + return False, 0.0 + + def analyze(self) -> AnalysisResult: + """Analyze the Node.js project.""" + routes: list[RouteInfo] = [] + components: list[ComponentInfo] = [] + endpoints: list[EndpointInfo] = [] + config_files: list[str] = [] + dependencies: dict[str, str] = {} + entry_point: str | None = None + + # Load dependencies from package.json + package_json = self.project_dir / "package.json" + if package_json.exists(): + try: + data = json.loads(package_json.read_text()) + dependencies = { + **data.get("dependencies", {}), + **data.get("devDependencies", {}), + } + + # Detect entry point from package.json + entry_point = data.get("main") + if not entry_point: + scripts = data.get("scripts", {}) + start_script = scripts.get("start", "") + if "node" in start_script: + # Extract file from "node src/index.js" etc. + match = re.search(r"node\s+(\S+)", start_script) + if match: + entry_point = match.group(1) + + except (json.JSONDecodeError, OSError): + pass + + # Collect config files + for config_name in [ + "tsconfig.json", ".eslintrc.js", ".eslintrc.json", + "jest.config.js", "nodemon.json", ".env.example", + ]: + if (self.project_dir / config_name).exists(): + config_files.append(config_name) + + # Detect entry point if not found + if not entry_point: + for candidate in ["src/index.js", "src/index.ts", "src/app.js", "src/app.ts", + "index.js", "app.js", "server.js"]: + if (self.project_dir / candidate).exists(): + entry_point = candidate + break + + # Extract endpoints based on stack type + if self._detected_stack == "express": + endpoints = self._extract_express_routes() + elif self._detected_stack == "nestjs": + endpoints = self._extract_nestjs_routes() + elif self._detected_stack == "fastify": + endpoints = self._extract_fastify_routes() + else: + # Generic Node.js - try Express patterns + endpoints = self._extract_express_routes() + + # Extract middleware/components + components = self._extract_components() + + return { + "stack_name": self._detected_stack, + "confidence": 0.85, + "routes": routes, + "components": components, + "endpoints": endpoints, + "entry_point": entry_point, + "config_files": config_files, + "dependencies": dependencies, + "metadata": { + "has_typescript": "typescript" in dependencies, + "has_prisma": "prisma" in dependencies or "@prisma/client" in dependencies, + "has_mongoose": "mongoose" in dependencies, + "has_sequelize": "sequelize" in dependencies, + }, + } + + def _extract_express_routes(self) -> list[EndpointInfo]: + """Extract routes from Express router definitions.""" + endpoints: list[EndpointInfo] = [] + + # Find route files + route_files = ( + self._find_files("**/routes/**/*.js") + + self._find_files("**/routes/**/*.ts") + + self._find_files("**/router/**/*.js") + + self._find_files("**/router/**/*.ts") + + self._find_files("**/controllers/**/*.js") + + self._find_files("**/controllers/**/*.ts") + ) + + # Also check main files + for main_file in ["app.js", "app.ts", "server.js", "server.ts", + "src/app.js", "src/app.ts", "index.js", "index.ts"]: + main_path = self.project_dir / main_file + if main_path.exists(): + route_files.append(main_path) + + # Pattern for Express routes + # router.get('/path', handler) + # app.post('/path', handler) + route_pattern = re.compile( + r'(?:router|app)\.(get|post|put|patch|delete|all)\s*\(\s*["\']([^"\']+)["\']', + re.IGNORECASE + ) + + for file in route_files: + content = self._read_file_safe(file) + if content is None: + continue + + for match in route_pattern.finditer(content): + method = match.group(1).upper() + path = match.group(2) + + endpoints.append({ + "path": path, + "method": method, + "handler": "handler", + "file": str(file.relative_to(self.project_dir)), + "description": f"{method} {path}", + }) + + return endpoints + + def _extract_nestjs_routes(self) -> list[EndpointInfo]: + """Extract routes from NestJS controllers.""" + endpoints: list[EndpointInfo] = [] + + # Find controller files + controller_files = ( + self._find_files("**/*.controller.ts") + + self._find_files("**/*.controller.js") + ) + + # Pattern for NestJS decorators + # @Get('/path'), @Post(), etc. + decorator_pattern = re.compile( + r'@(Get|Post|Put|Patch|Delete|All)\s*\(\s*["\']?([^"\')\s]*)["\']?\s*\)', + re.IGNORECASE + ) + + # Pattern for controller path + controller_pattern = re.compile( + r'@Controller\s*\(\s*["\']?([^"\')\s]*)["\']?\s*\)', + re.IGNORECASE + ) + + for file in controller_files: + content = self._read_file_safe(file) + if content is None: + continue + + # Get controller base path + controller_match = controller_pattern.search(content) + base_path = "/" + controller_match.group(1) if controller_match else "" + + for match in decorator_pattern.finditer(content): + method = match.group(1).upper() + path = match.group(2) or "" + + full_path = base_path + if path: + full_path = f"{base_path}/{path}".replace("//", "/") + + endpoints.append({ + "path": full_path or "/", + "method": method, + "handler": "controller", + "file": str(file.relative_to(self.project_dir)), + "description": f"{method} {full_path or '/'}", + }) + + return endpoints + + def _extract_fastify_routes(self) -> list[EndpointInfo]: + """Extract routes from Fastify route definitions.""" + endpoints: list[EndpointInfo] = [] + + # Find route files + route_files = ( + self._find_files("**/routes/**/*.js") + + self._find_files("**/routes/**/*.ts") + + self._find_files("**/*.routes.js") + + self._find_files("**/*.routes.ts") + ) + + # Pattern for Fastify routes + # fastify.get('/path', handler) + route_pattern = re.compile( + r'(?:fastify|server|app)\.(get|post|put|patch|delete|all)\s*\(\s*["\']([^"\']+)["\']', + re.IGNORECASE + ) + + for file in route_files: + content = self._read_file_safe(file) + if content is None: + continue + + for match in route_pattern.finditer(content): + method = match.group(1).upper() + path = match.group(2) + + endpoints.append({ + "path": path, + "method": method, + "handler": "handler", + "file": str(file.relative_to(self.project_dir)), + "description": f"{method} {path}", + }) + + return endpoints + + def _extract_components(self) -> list[ComponentInfo]: + """Extract middleware and service components.""" + components: list[ComponentInfo] = [] + + # Find middleware files + middleware_files = self._find_files("**/middleware/**/*.js") + \ + self._find_files("**/middleware/**/*.ts") + + for file in middleware_files: + components.append({ + "name": file.stem, + "file": str(file.relative_to(self.project_dir)), + "type": "middleware", + }) + + # Find service files + service_files = self._find_files("**/services/**/*.js") + \ + self._find_files("**/services/**/*.ts") + \ + self._find_files("**/*.service.js") + \ + self._find_files("**/*.service.ts") + + for file in service_files: + components.append({ + "name": file.stem, + "file": str(file.relative_to(self.project_dir)), + "type": "service", + }) + + # Find model files + model_files = self._find_files("**/models/**/*.js") + \ + self._find_files("**/models/**/*.ts") + \ + self._find_files("**/*.model.js") + \ + self._find_files("**/*.model.ts") + + for file in model_files: + components.append({ + "name": file.stem, + "file": str(file.relative_to(self.project_dir)), + "type": "model", + }) + + return components From cbfd51c5a4878f36b13de47d3f78ce0bbad606da Mon Sep 17 00:00:00 2001 From: cabana8471 Date: Wed, 21 Jan 2026 10:17:15 +0100 Subject: [PATCH 10/81] feat(import): add Python/FastAPI/Django/Flask analyzer Add python_analyzer.py for Python web framework detection: - Detects: FastAPI, Django, Flask - Extracts API endpoints from: - FastAPI: @app.get(), @router.post() with APIRouter prefix - Django: path() and re_path() in urls.py files - Flask: @app.route() with methods parameter, Blueprint prefixes - Extracts components: - models.py and models/ directories - views.py and views/ directories - services/ and *_service.py files Detection heuristics: - manage.py -> Django - requirements.txt/pyproject.toml: "fastapi", "flask", "django" - File imports: "from fastapi import", "from flask import" Converts Django params to :name format. Parses Flask methods=['GET', 'POST'] syntax. Co-Authored-By: Claude Opus 4.5 --- analyzers/python_analyzer.py | 395 +++++++++++++++++++++++++++++++++++ 1 file changed, 395 insertions(+) create mode 100644 analyzers/python_analyzer.py diff --git a/analyzers/python_analyzer.py b/analyzers/python_analyzer.py new file mode 100644 index 00000000..7d421f0d --- /dev/null +++ b/analyzers/python_analyzer.py @@ -0,0 +1,395 @@ +""" +Python Analyzer +=============== + +Detects FastAPI, Django, and Flask projects. +Extracts API endpoints from route/view definitions. +""" + +import re +from pathlib import Path + +from .base_analyzer import ( + AnalysisResult, + BaseAnalyzer, + ComponentInfo, + EndpointInfo, + RouteInfo, +) + + +class PythonAnalyzer(BaseAnalyzer): + """Analyzer for FastAPI, Django, and Flask projects.""" + + @property + def stack_name(self) -> str: + return self._detected_stack + + def __init__(self, project_dir: Path): + super().__init__(project_dir) + self._detected_stack = "python" # Default, may change + + def can_analyze(self) -> tuple[bool, float]: + """Detect if this is a Python web framework project.""" + confidence = 0.0 + + # Check for Django first + if (self.project_dir / "manage.py").exists(): + self._detected_stack = "django" + confidence = 0.95 + return True, confidence + + # Check requirements.txt + requirements = self.project_dir / "requirements.txt" + if requirements.exists(): + try: + content = requirements.read_text().lower() + + if "fastapi" in content: + self._detected_stack = "fastapi" + confidence = 0.9 + return True, confidence + + if "flask" in content: + self._detected_stack = "flask" + confidence = 0.85 + return True, confidence + + if "django" in content: + self._detected_stack = "django" + confidence = 0.85 + return True, confidence + + except OSError: + pass + + # Check pyproject.toml + pyproject = self.project_dir / "pyproject.toml" + if pyproject.exists(): + try: + content = pyproject.read_text().lower() + + if "fastapi" in content: + self._detected_stack = "fastapi" + confidence = 0.9 + return True, confidence + + if "flask" in content: + self._detected_stack = "flask" + confidence = 0.85 + return True, confidence + + if "django" in content: + self._detected_stack = "django" + confidence = 0.85 + return True, confidence + + except OSError: + pass + + # Check for common FastAPI patterns + main_py = self.project_dir / "main.py" + if main_py.exists(): + content = self._read_file_safe(main_py) + if content and "from fastapi import" in content: + self._detected_stack = "fastapi" + return True, 0.9 + + # Check for Flask patterns + app_py = self.project_dir / "app.py" + if app_py.exists(): + content = self._read_file_safe(app_py) + if content and "from flask import" in content: + self._detected_stack = "flask" + return True, 0.85 + + return False, 0.0 + + def analyze(self) -> AnalysisResult: + """Analyze the Python project.""" + routes: list[RouteInfo] = [] + components: list[ComponentInfo] = [] + endpoints: list[EndpointInfo] = [] + config_files: list[str] = [] + dependencies: dict[str, str] = {} + entry_point: str | None = None + + # Load dependencies from requirements.txt + requirements = self.project_dir / "requirements.txt" + if requirements.exists(): + try: + for line in requirements.read_text().splitlines(): + line = line.strip() + if line and not line.startswith("#"): + # Parse package==version or package>=version etc. + match = re.match(r"([a-zA-Z0-9_-]+)(?:[=<>!~]+(.+))?", line) + if match: + dependencies[match.group(1)] = match.group(2) or "*" + except OSError: + pass + + # Collect config files + for config_name in [ + "pyproject.toml", "setup.py", "setup.cfg", + "requirements.txt", "requirements-dev.txt", + ".env.example", "alembic.ini", "pytest.ini", + ]: + if (self.project_dir / config_name).exists(): + config_files.append(config_name) + + # Extract endpoints based on framework + if self._detected_stack == "fastapi": + endpoints = self._extract_fastapi_routes() + entry_point = "main.py" + elif self._detected_stack == "django": + endpoints = self._extract_django_routes() + entry_point = "manage.py" + elif self._detected_stack == "flask": + endpoints = self._extract_flask_routes() + entry_point = "app.py" + + # Find entry point if not set + if not entry_point or not (self.project_dir / entry_point).exists(): + for candidate in ["main.py", "app.py", "server.py", "run.py", "src/main.py"]: + if (self.project_dir / candidate).exists(): + entry_point = candidate + break + + # Extract components (models, services, etc.) + components = self._extract_components() + + return { + "stack_name": self._detected_stack, + "confidence": 0.85, + "routes": routes, + "components": components, + "endpoints": endpoints, + "entry_point": entry_point, + "config_files": config_files, + "dependencies": dependencies, + "metadata": { + "has_sqlalchemy": "sqlalchemy" in dependencies, + "has_alembic": "alembic" in dependencies, + "has_pytest": "pytest" in dependencies, + "has_celery": "celery" in dependencies, + }, + } + + def _extract_fastapi_routes(self) -> list[EndpointInfo]: + """Extract routes from FastAPI decorators.""" + endpoints: list[EndpointInfo] = [] + + # Find Python files + py_files = self._find_files("**/*.py") + + # Pattern for FastAPI routes + # @app.get("/path") + # @router.post("/path") + route_pattern = re.compile( + r'@(?:app|router)\.(get|post|put|patch|delete)\s*\(\s*["\']([^"\']+)["\']', + re.IGNORECASE + ) + + # Pattern for APIRouter prefix + router_prefix_pattern = re.compile( + r'APIRouter\s*\([^)]*prefix\s*=\s*["\']([^"\']+)["\']', + re.IGNORECASE + ) + + for file in py_files: + content = self._read_file_safe(file) + if content is None: + continue + + # Skip if not a route file + if "@app." not in content and "@router." not in content: + continue + + # Try to find router prefix + prefix = "" + prefix_match = router_prefix_pattern.search(content) + if prefix_match: + prefix = prefix_match.group(1) + + for match in route_pattern.finditer(content): + method = match.group(1).upper() + path = match.group(2) + + full_path = prefix + path if prefix else path + + endpoints.append({ + "path": full_path, + "method": method, + "handler": "handler", + "file": str(file.relative_to(self.project_dir)), + "description": f"{method} {full_path}", + }) + + return endpoints + + def _extract_django_routes(self) -> list[EndpointInfo]: + """Extract routes from Django URL patterns.""" + endpoints: list[EndpointInfo] = [] + + # Find urls.py files + url_files = self._find_files("**/urls.py") + + # Pattern for Django URL patterns + # path('api/users/', views.user_list) + # path('api/users//', views.user_detail) + path_pattern = re.compile( + r'path\s*\(\s*["\']([^"\']+)["\']', + re.IGNORECASE + ) + + # Pattern for re_path + re_path_pattern = re.compile( + r're_path\s*\(\s*["\']([^"\']+)["\']', + re.IGNORECASE + ) + + for file in url_files: + content = self._read_file_safe(file) + if content is None: + continue + + for match in path_pattern.finditer(content): + path = "/" + match.group(1).rstrip("/") + if path == "/": + path = "/" + + # Django uses for params, convert to :name + path = re.sub(r"<\w+:(\w+)>", r":\1", path) + path = re.sub(r"<(\w+)>", r":\1", path) + + endpoints.append({ + "path": path, + "method": "ALL", # Django views typically handle multiple methods + "handler": "view", + "file": str(file.relative_to(self.project_dir)), + "description": f"Django view at {path}", + }) + + for match in re_path_pattern.finditer(content): + # re_path uses regex, just record the pattern + path = "/" + match.group(1) + + endpoints.append({ + "path": path, + "method": "ALL", + "handler": "view", + "file": str(file.relative_to(self.project_dir)), + "description": f"Django regex route", + }) + + return endpoints + + def _extract_flask_routes(self) -> list[EndpointInfo]: + """Extract routes from Flask decorators.""" + endpoints: list[EndpointInfo] = [] + + # Find Python files + py_files = self._find_files("**/*.py") + + # Pattern for Flask routes + # @app.route('/path', methods=['GET', 'POST']) + # @bp.route('/path') + route_pattern = re.compile( + r'@(?:app|bp|blueprint)\s*\.\s*route\s*\(\s*["\']([^"\']+)["\'](?:\s*,\s*methods\s*=\s*\[([^\]]+)\])?', + re.IGNORECASE + ) + + # Pattern for Blueprint prefix + blueprint_pattern = re.compile( + r'Blueprint\s*\(\s*[^,]+\s*,\s*[^,]+\s*(?:,\s*url_prefix\s*=\s*["\']([^"\']+)["\'])?', + re.IGNORECASE + ) + + for file in py_files: + content = self._read_file_safe(file) + if content is None: + continue + + # Skip if not a route file + if "@app." not in content and "@bp." not in content and "@blueprint" not in content.lower(): + continue + + # Try to find blueprint prefix + prefix = "" + prefix_match = blueprint_pattern.search(content) + if prefix_match and prefix_match.group(1): + prefix = prefix_match.group(1) + + for match in route_pattern.finditer(content): + path = match.group(1) + methods_str = match.group(2) + + full_path = prefix + path if prefix else path + + # Parse methods + methods = ["GET"] # Default + if methods_str: + # Parse ['GET', 'POST'] format + methods = re.findall(r"['\"](\w+)['\"]", methods_str) + + for method in methods: + endpoints.append({ + "path": full_path, + "method": method.upper(), + "handler": "view", + "file": str(file.relative_to(self.project_dir)), + "description": f"{method.upper()} {full_path}", + }) + + return endpoints + + def _extract_components(self) -> list[ComponentInfo]: + """Extract models, services, and other components.""" + components: list[ComponentInfo] = [] + + # Find model files + model_files = ( + self._find_files("**/models.py") + + self._find_files("**/models/**/*.py") + + self._find_files("**/*_model.py") + ) + + for file in model_files: + if file.name != "__init__.py": + components.append({ + "name": file.stem, + "file": str(file.relative_to(self.project_dir)), + "type": "model", + }) + + # Find view/controller files + view_files = ( + self._find_files("**/views.py") + + self._find_files("**/views/**/*.py") + + self._find_files("**/routers/**/*.py") + + self._find_files("**/api/**/*.py") + ) + + for file in view_files: + if file.name != "__init__.py": + components.append({ + "name": file.stem, + "file": str(file.relative_to(self.project_dir)), + "type": "view", + }) + + # Find service files + service_files = ( + self._find_files("**/services/**/*.py") + + self._find_files("**/*_service.py") + ) + + for file in service_files: + if file.name != "__init__.py": + components.append({ + "name": file.stem, + "file": str(file.relative_to(self.project_dir)), + "type": "service", + }) + + return components From a4262be8ad39cf3c69275d1dcd30e6fa900b3c47 Mon Sep 17 00:00:00 2001 From: cabana8471 Date: Wed, 21 Jan 2026 10:17:23 +0100 Subject: [PATCH 11/81] feat(import): add Vue.js/Nuxt analyzer Add vue_analyzer.py for Vue.js ecosystem detection: - Detects: Vue.js, Vue+Vite, Vue CLI, Nuxt.js - Extracts routes from: - Vue Router: { path: '/about' } in router files - Nuxt pages/ directory (file-based routing) - Extracts API endpoints from Nuxt server/api directory: - Detects HTTP method from filename (e.g., users.get.ts) - Handles dynamic routes [id].ts - Extracts components from: - components/ directory - views/ directory - layouts/ directory - pages/ directory (Nuxt) Detection heuristics: - package.json: "nuxt", "vue", "vite", "@vue/cli-service" - Config files: nuxt.config.js/ts - Entry files: src/App.vue, src/main.ts Co-Authored-By: Claude Opus 4.5 --- analyzers/vue_analyzer.py | 319 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 319 insertions(+) create mode 100644 analyzers/vue_analyzer.py diff --git a/analyzers/vue_analyzer.py b/analyzers/vue_analyzer.py new file mode 100644 index 00000000..75b3ae41 --- /dev/null +++ b/analyzers/vue_analyzer.py @@ -0,0 +1,319 @@ +""" +Vue.js Analyzer +=============== + +Detects Vue.js and Nuxt.js projects. +Extracts routes from Vue Router and Nuxt file-based routing. +""" + +import json +import re +from pathlib import Path + +from .base_analyzer import ( + AnalysisResult, + BaseAnalyzer, + ComponentInfo, + EndpointInfo, + RouteInfo, +) + + +class VueAnalyzer(BaseAnalyzer): + """Analyzer for Vue.js and Nuxt.js projects.""" + + @property + def stack_name(self) -> str: + return self._detected_stack + + def __init__(self, project_dir: Path): + super().__init__(project_dir) + self._detected_stack = "vue" # Default, may change to "nuxt" + + def can_analyze(self) -> tuple[bool, float]: + """Detect if this is a Vue.js/Nuxt.js project.""" + confidence = 0.0 + + # Check package.json + package_json = self.project_dir / "package.json" + if package_json.exists(): + try: + data = json.loads(package_json.read_text()) + deps = { + **data.get("dependencies", {}), + **data.get("devDependencies", {}), + } + + # Check for Nuxt first (more specific) + if "nuxt" in deps or "nuxt3" in deps: + self._detected_stack = "nuxt" + confidence = 0.95 + return True, confidence + + # Check for Vue + if "vue" in deps: + confidence = 0.85 + + # Check for Vite + if "vite" in deps: + self._detected_stack = "vue-vite" + confidence = 0.9 + + # Check for Vue CLI + if "@vue/cli-service" in deps: + self._detected_stack = "vue-cli" + confidence = 0.9 + + return True, confidence + + except (json.JSONDecodeError, OSError): + pass + + # Check for Nuxt config + if (self.project_dir / "nuxt.config.js").exists() or \ + (self.project_dir / "nuxt.config.ts").exists(): + self._detected_stack = "nuxt" + return True, 0.95 + + # Check for common Vue files + if (self.project_dir / "src" / "App.vue").exists(): + return True, 0.7 + + return False, 0.0 + + def analyze(self) -> AnalysisResult: + """Analyze the Vue.js/Nuxt.js project.""" + routes: list[RouteInfo] = [] + components: list[ComponentInfo] = [] + endpoints: list[EndpointInfo] = [] + config_files: list[str] = [] + dependencies: dict[str, str] = {} + entry_point: str | None = None + + # Load dependencies from package.json + package_json = self.project_dir / "package.json" + if package_json.exists(): + try: + data = json.loads(package_json.read_text()) + dependencies = { + **data.get("dependencies", {}), + **data.get("devDependencies", {}), + } + except (json.JSONDecodeError, OSError): + pass + + # Collect config files + for config_name in [ + "nuxt.config.js", "nuxt.config.ts", + "vite.config.js", "vite.config.ts", + "vue.config.js", "tsconfig.json", + "tailwind.config.js", "tailwind.config.ts", + ]: + if (self.project_dir / config_name).exists(): + config_files.append(config_name) + + # Detect entry point + for entry in ["src/main.ts", "src/main.js", "app.vue", "src/App.vue"]: + if (self.project_dir / entry).exists(): + entry_point = entry + break + + # Extract routes based on stack type + if self._detected_stack == "nuxt": + routes = self._extract_nuxt_routes() + endpoints = self._extract_nuxt_api_routes() + else: + routes = self._extract_vue_router_routes() + + # Extract components + components = self._extract_components() + + return { + "stack_name": self._detected_stack, + "confidence": 0.85, + "routes": routes, + "components": components, + "endpoints": endpoints, + "entry_point": entry_point, + "config_files": config_files, + "dependencies": dependencies, + "metadata": { + "has_typescript": "typescript" in dependencies, + "has_tailwind": "tailwindcss" in dependencies, + "has_vue_router": "vue-router" in dependencies, + "has_pinia": "pinia" in dependencies, + "has_vuex": "vuex" in dependencies, + }, + } + + def _extract_nuxt_routes(self) -> list[RouteInfo]: + """Extract routes from Nuxt file-based routing.""" + routes: list[RouteInfo] = [] + + # Check for pages directory + pages_dirs = [ + self.project_dir / "pages", + self.project_dir / "src" / "pages", + ] + + for pages_dir in pages_dirs: + if pages_dir.exists(): + routes.extend(self._extract_pages_routes(pages_dir)) + + return routes + + def _extract_pages_routes(self, pages_dir: Path) -> list[RouteInfo]: + """Extract routes from Nuxt pages directory.""" + routes: list[RouteInfo] = [] + + for page_file in pages_dir.rglob("*.vue"): + rel_path = page_file.relative_to(pages_dir) + route_path = "/" + str(rel_path.with_suffix("")) + + # Handle index files + route_path = route_path.replace("/index", "") + if not route_path: + route_path = "/" + + # Handle dynamic routes: [id].vue or _id.vue -> :id + route_path = re.sub(r"\[([^\]]+)\]", r":\1", route_path) + route_path = re.sub(r"/_([^/]+)", r"/:\1", route_path) + + routes.append({ + "path": route_path, + "method": "GET", + "handler": page_file.stem, + "file": str(page_file.relative_to(self.project_dir)), + }) + + return routes + + def _extract_nuxt_api_routes(self) -> list[EndpointInfo]: + """Extract API routes from Nuxt server directory.""" + endpoints: list[EndpointInfo] = [] + + # Nuxt 3 uses server/api directory + api_dirs = [ + self.project_dir / "server" / "api", + self.project_dir / "server" / "routes", + ] + + for api_dir in api_dirs: + if not api_dir.exists(): + continue + + for api_file in api_dir.rglob("*.ts"): + rel_path = api_file.relative_to(api_dir) + route_path = "/api/" + str(rel_path.with_suffix("")) + + # Handle index files + route_path = route_path.replace("/index", "") + + # Handle dynamic routes + route_path = re.sub(r"\[([^\]]+)\]", r":\1", route_path) + + # Try to detect method from filename + method = "ALL" + for m in ["get", "post", "put", "patch", "delete"]: + if api_file.stem.endswith(f".{m}") or api_file.stem == m: + method = m.upper() + route_path = route_path.replace(f".{m}", "") + break + + endpoints.append({ + "path": route_path, + "method": method, + "handler": "handler", + "file": str(api_file.relative_to(self.project_dir)), + "description": f"{method} {route_path}", + }) + + # Also check .js files + for api_file in api_dir.rglob("*.js"): + rel_path = api_file.relative_to(api_dir) + route_path = "/api/" + str(rel_path.with_suffix("")) + route_path = route_path.replace("/index", "") + route_path = re.sub(r"\[([^\]]+)\]", r":\1", route_path) + + endpoints.append({ + "path": route_path, + "method": "ALL", + "handler": "handler", + "file": str(api_file.relative_to(self.project_dir)), + "description": f"API endpoint at {route_path}", + }) + + return endpoints + + def _extract_vue_router_routes(self) -> list[RouteInfo]: + """Extract routes from Vue Router configuration.""" + routes: list[RouteInfo] = [] + + # Look for router configuration files + router_files = ( + self._find_files("**/router/**/*.js") + + self._find_files("**/router/**/*.ts") + + self._find_files("**/router.js") + + self._find_files("**/router.ts") + + self._find_files("**/routes.js") + + self._find_files("**/routes.ts") + ) + + # Pattern for Vue Router routes + # { path: '/about', ... } + route_pattern = re.compile( + r'{\s*path:\s*["\']([^"\']+)["\']', + re.IGNORECASE + ) + + for file in router_files: + content = self._read_file_safe(file) + if content is None: + continue + + for match in route_pattern.finditer(content): + routes.append({ + "path": match.group(1), + "method": "GET", + "handler": "RouterRoute", + "file": str(file.relative_to(self.project_dir)), + }) + + return routes + + def _extract_components(self) -> list[ComponentInfo]: + """Extract Vue components.""" + components: list[ComponentInfo] = [] + + # Find component files + component_files = ( + self._find_files("**/components/**/*.vue") + + self._find_files("**/views/**/*.vue") + ) + + for file in component_files: + # Determine component type + if "views" in file.parts: + comp_type = "view" + elif "layouts" in file.parts: + comp_type = "layout" + else: + comp_type = "component" + + components.append({ + "name": file.stem, + "file": str(file.relative_to(self.project_dir)), + "type": comp_type, + }) + + # Find page files (Nuxt) + page_files = self._find_files("**/pages/**/*.vue") + + for file in page_files: + components.append({ + "name": file.stem, + "file": str(file.relative_to(self.project_dir)), + "type": "page", + }) + + return components From f3f9e598062387b981bd8bd917b6d70ea8abb3b8 Mon Sep 17 00:00:00 2001 From: cabana8471 Date: Wed, 21 Jan 2026 10:17:42 +0100 Subject: [PATCH 12/81] docs(fork): update changelog with Stack Detector feature Document the Stack Detector implementation: - List all analyzer modules and their capabilities - Add usage example - Create supported stacks table with indicators - Document detection heuristics Co-Authored-By: Claude Opus 4.5 --- FORK_CHANGELOG.md | 42 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/FORK_CHANGELOG.md b/FORK_CHANGELOG.md index 9392ffdc..926fc497 100644 --- a/FORK_CHANGELOG.md +++ b/FORK_CHANGELOG.md @@ -9,6 +9,48 @@ Format based on [Keep a Changelog](https://keepachangelog.com/). - Fork documentation (FORK_README.md, FORK_CHANGELOG.md) - Configuration system via `.autocoder/config.json` +## [2025-01-21] Stack Detector (Import Projects - Phase 1) + +### Added +- New module: `analyzers/` - Codebase analysis for project import +- `analyzers/base_analyzer.py` - Abstract base class with TypedDicts +- `analyzers/stack_detector.py` - Orchestrator for running all analyzers +- `analyzers/react_analyzer.py` - React, Vite, Next.js detection +- `analyzers/node_analyzer.py` - Express, NestJS, Fastify detection +- `analyzers/python_analyzer.py` - FastAPI, Django, Flask detection +- `analyzers/vue_analyzer.py` - Vue.js, Nuxt detection + +### Features +- Auto-detect tech stack from package.json, requirements.txt, config files +- Extract routes from React Router, Next.js file-based, Vue Router +- Extract API endpoints from Express, FastAPI, Django, NestJS +- Extract components from components/, views/, models/ directories +- Confidence scoring for each detected stack + +### Usage +```python +from analyzers import StackDetector + +detector = StackDetector(project_dir) +result = detector.detect() # Full analysis +quick = detector.detect_quick() # Fast preview +``` + +### Supported Stacks +| Stack | Indicators | +|-------|-----------| +| React | "react" in package.json, src/App.tsx | +| Next.js | next.config.js, pages/ or app/ dirs | +| Vue.js | "vue" in package.json, src/App.vue | +| Nuxt | nuxt.config.js, pages/ | +| Express | "express" in package.json, routes/ | +| NestJS | "@nestjs/core" in package.json | +| FastAPI | "from fastapi import" in main.py | +| Django | manage.py in root | +| Flask | "from flask import" in app.py | + +--- + ## [2025-01-21] Quality Gates ### Added From 03d246c9ccdefeeb31d6303bc4bcec6e0baebaa4 Mon Sep 17 00:00:00 2001 From: cabana8471 Date: Wed, 21 Jan 2026 10:19:07 +0100 Subject: [PATCH 13/81] feat(import): add feature extractor module Add feature_extractor.py to transform detected routes/endpoints into Autocoder features: Functions: - extract_features(): Transform StackDetectionResult into features - extract_from_project(): One-step detection and extraction - features_to_bulk_create_format(): Format for feature_create_bulk MCP Feature generation: - Routes -> "View X page" navigation features - API endpoints -> "API: List/Create/Update/Delete X" features - Components -> Page/Model/Service features (lower priority) - Infrastructure -> Basic startup and health check features Naming heuristics: - /users -> "View users page" - /users/:id -> "View user details page" - POST /api/users -> "API: Create users" - DELETE /api/users/:id -> "API: Delete users" Each feature includes: - category: Navigation, API, Authentication, etc. - name: Human-readable feature name - description: What the feature should do - steps: Test/verification steps All features marked as pending (passes=False) for verification. Co-Authored-By: Claude Opus 4.5 --- analyzers/__init__.py | 19 +- analyzers/feature_extractor.py | 446 +++++++++++++++++++++++++++++++++ 2 files changed, 464 insertions(+), 1 deletion(-) create mode 100644 analyzers/feature_extractor.py diff --git a/analyzers/__init__.py b/analyzers/__init__.py index 1b90152e..5e6f1eb6 100644 --- a/analyzers/__init__.py +++ b/analyzers/__init__.py @@ -5,14 +5,31 @@ Modules for analyzing existing codebases to detect tech stack, extract features, and prepare for import into Autocoder. -Main entry point: stack_detector.py +Main entry points: +- StackDetector: Detect tech stack and extract routes/endpoints +- extract_features: Transform detection result into Autocoder features +- extract_from_project: One-step detection and feature extraction """ from .stack_detector import StackDetector, StackDetectionResult from .base_analyzer import BaseAnalyzer +from .feature_extractor import ( + DetectedFeature, + FeatureExtractionResult, + extract_features, + extract_from_project, + features_to_bulk_create_format, +) __all__ = [ + # Stack Detection "StackDetector", "StackDetectionResult", "BaseAnalyzer", + # Feature Extraction + "DetectedFeature", + "FeatureExtractionResult", + "extract_features", + "extract_from_project", + "features_to_bulk_create_format", ] diff --git a/analyzers/feature_extractor.py b/analyzers/feature_extractor.py new file mode 100644 index 00000000..387a26c4 --- /dev/null +++ b/analyzers/feature_extractor.py @@ -0,0 +1,446 @@ +""" +Feature Extractor +================= + +Transforms detected routes, endpoints, and components into Autocoder features. +Each feature is marked as pending (passes=False) for verification. + +Generates features in the format expected by feature_create_bulk MCP tool. +""" + +from pathlib import Path +from typing import TypedDict + +from .stack_detector import StackDetectionResult + + +class DetectedFeature(TypedDict): + """A feature extracted from codebase analysis.""" + category: str + name: str + description: str + steps: list[str] + source_type: str # "route", "endpoint", "component", "inferred" + source_file: str | None + confidence: float # 0.0 to 1.0 + + +class FeatureExtractionResult(TypedDict): + """Result of feature extraction.""" + features: list[DetectedFeature] + count: int + by_category: dict[str, int] + summary: str + + +def _route_to_feature_name(path: str, method: str = "GET") -> str: + """ + Convert a route path to a human-readable feature name. + + Examples: + "/" -> "View home page" + "/users" -> "View users page" + "/users/:id" -> "View user details page" + "/api/users" -> "API: List users" + """ + # Clean up path + path = path.strip("/") + + if not path: + return "View home page" + + # Handle API routes + if path.startswith("api/"): + api_path = path[4:] # Remove "api/" + parts = api_path.split("/") + + # Handle dynamic segments + parts = [p for p in parts if not p.startswith(":") and not p.startswith("[")] + + if not parts: + return "API: Root endpoint" + + resource = parts[-1].replace("-", " ").replace("_", " ").title() + + if method == "GET": + if any(p.startswith(":") or p.startswith("[") for p in api_path.split("/")): + return f"API: Get {resource} details" + return f"API: List {resource}" + elif method == "POST": + return f"API: Create {resource}" + elif method == "PUT" or method == "PATCH": + return f"API: Update {resource}" + elif method == "DELETE": + return f"API: Delete {resource}" + else: + return f"API: {resource} endpoint" + + # Handle page routes + parts = path.split("/") + + # Handle dynamic segments (remove them from naming) + clean_parts = [p for p in parts if not p.startswith(":") and not p.startswith("[")] + + if not clean_parts: + return "View dynamic page" + + # Build name from path parts + page_name = " ".join(p.replace("-", " ").replace("_", " ") for p in clean_parts) + page_name = page_name.title() + + # Check if it's a detail page (has dynamic segment) + has_dynamic = any(p.startswith(":") or p.startswith("[") for p in parts) + + if has_dynamic: + return f"View {page_name} details page" + + return f"View {page_name} page" + + +def _generate_page_steps(path: str, stack: str | None) -> list[str]: + """Generate test steps for a page route.""" + clean_path = path + + # Replace dynamic segments with example values + if ":id" in clean_path or "[id]" in clean_path: + clean_path = clean_path.replace(":id", "123").replace("[id]", "123") + + # Generate steps + steps = [ + f"Navigate to {clean_path}", + "Verify the page loads without errors", + "Verify the page title and main content are visible", + ] + + # Add stack-specific checks + if stack in ("react", "nextjs", "vue", "nuxt"): + steps.append("Verify no console errors in browser developer tools") + steps.append("Verify responsive layout at mobile and desktop widths") + + return steps + + +def _generate_api_steps(path: str, method: str) -> list[str]: + """Generate test steps for an API endpoint.""" + # Replace dynamic segments with example values + test_path = path.replace(":id", "123").replace("[id]", "123") + + steps = [] + + if method == "GET": + steps = [ + f"Send GET request to {test_path}", + "Verify response status code is 200", + "Verify response body contains expected data structure", + ] + elif method == "POST": + steps = [ + f"Send POST request to {test_path} with valid payload", + "Verify response status code is 201 (created)", + "Verify response contains the created resource", + f"Send POST request to {test_path} with invalid payload", + "Verify response status code is 400 (bad request)", + ] + elif method in ("PUT", "PATCH"): + steps = [ + f"Send {method} request to {test_path} with valid payload", + "Verify response status code is 200", + "Verify response contains the updated resource", + "Verify the resource was actually updated", + ] + elif method == "DELETE": + steps = [ + f"Send DELETE request to {test_path}", + "Verify response status code is 200 or 204", + "Verify the resource no longer exists", + ] + else: + steps = [ + f"Send {method} request to {test_path}", + "Verify response status code is appropriate", + ] + + return steps + + +def _generate_component_steps(name: str, comp_type: str) -> list[str]: + """Generate test steps for a component.""" + if comp_type == "page": + return [ + f"Navigate to the {name} page", + "Verify all UI elements render correctly", + "Test user interactions (buttons, forms, etc.)", + "Verify data is fetched and displayed", + ] + elif comp_type == "model": + return [ + f"Verify {name} model schema matches expected fields", + "Test CRUD operations on the model", + "Verify validation rules work correctly", + ] + elif comp_type == "middleware": + return [ + f"Verify {name} middleware processes requests correctly", + "Test edge cases and error handling", + ] + elif comp_type == "service": + return [ + f"Verify {name} service methods work correctly", + "Test error handling in service layer", + ] + else: + return [ + f"Verify {name} component renders correctly", + "Test component props and state", + "Verify component interactions work", + ] + + +def extract_features(detection_result: StackDetectionResult) -> FeatureExtractionResult: + """ + Extract features from a stack detection result. + + Converts routes, endpoints, and components into Autocoder features. + Each feature is ready to be created via feature_create_bulk. + + Args: + detection_result: Result from StackDetector.detect() + + Returns: + FeatureExtractionResult with list of features + """ + features: list[DetectedFeature] = [] + primary_frontend = detection_result.get("primary_frontend") + primary_backend = detection_result.get("primary_backend") + + # Track unique features to avoid duplicates + seen_features: set[str] = set() + + # Extract features from routes (frontend pages) + for route in detection_result.get("all_routes", []): + path = route.get("path", "") + method = route.get("method", "GET") + source_file = route.get("file") + + feature_name = _route_to_feature_name(path, method) + + # Skip duplicates + feature_key = f"route:{path}:{method}" + if feature_key in seen_features: + continue + seen_features.add(feature_key) + + features.append({ + "category": "Navigation", + "name": feature_name, + "description": f"User can navigate to and view the {path or '/'} page. The page should load correctly and display the expected content.", + "steps": _generate_page_steps(path, primary_frontend), + "source_type": "route", + "source_file": source_file, + "confidence": 0.8, + }) + + # Extract features from API endpoints + for endpoint in detection_result.get("all_endpoints", []): + path = endpoint.get("path", "") + method = endpoint.get("method", "ALL") + source_file = endpoint.get("file") + + # Handle ALL method by creating GET endpoint + if method == "ALL": + method = "GET" + + feature_name = _route_to_feature_name(path, method) + + # Skip duplicates + feature_key = f"endpoint:{path}:{method}" + if feature_key in seen_features: + continue + seen_features.add(feature_key) + + # Determine category based on path + category = "API" + path_lower = path.lower() + if "auth" in path_lower or "login" in path_lower or "register" in path_lower: + category = "Authentication" + elif "user" in path_lower or "profile" in path_lower: + category = "User Management" + elif "admin" in path_lower: + category = "Administration" + + features.append({ + "category": category, + "name": feature_name, + "description": f"{method} endpoint at {path}. Should handle requests appropriately and return correct responses.", + "steps": _generate_api_steps(path, method), + "source_type": "endpoint", + "source_file": source_file, + "confidence": 0.85, + }) + + # Extract features from components (with lower priority) + component_features: list[DetectedFeature] = [] + for component in detection_result.get("all_components", []): + name = component.get("name", "") + comp_type = component.get("type", "component") + source_file = component.get("file") + + # Skip common/generic components + skip_names = ["index", "app", "main", "layout", "_app", "_document"] + if name.lower() in skip_names: + continue + + # Skip duplicates + feature_key = f"component:{name}:{comp_type}" + if feature_key in seen_features: + continue + seen_features.add(feature_key) + + # Only include significant components + if comp_type in ("page", "view", "model", "service"): + clean_name = name.replace("-", " ").replace("_", " ").title() + + # Determine category + if comp_type == "model": + category = "Data Models" + elif comp_type == "service": + category = "Services" + elif comp_type in ("page", "view"): + category = "Pages" + else: + category = "Components" + + component_features.append({ + "category": category, + "name": f"{clean_name} {comp_type.title()}", + "description": f"The {clean_name} {comp_type} should function correctly and handle all expected use cases.", + "steps": _generate_component_steps(name, comp_type), + "source_type": "component", + "source_file": source_file, + "confidence": 0.6, # Lower confidence for component-based features + }) + + # Add component features if we don't have many from routes/endpoints + if len(features) < 10: + features.extend(component_features[:10]) # Limit to 10 component features + + # Add basic infrastructure features + basic_features = _generate_basic_features(detection_result) + features.extend(basic_features) + + # Count by category + by_category: dict[str, int] = {} + for f in features: + cat = f["category"] + by_category[cat] = by_category.get(cat, 0) + 1 + + # Build summary + summary = f"Extracted {len(features)} features from {len(detection_result.get('detected_stacks', []))} detected stack(s)" + + return { + "features": features, + "count": len(features), + "by_category": by_category, + "summary": summary, + } + + +def _generate_basic_features(detection_result: StackDetectionResult) -> list[DetectedFeature]: + """Generate basic infrastructure features based on detected stack.""" + features: list[DetectedFeature] = [] + + primary_frontend = detection_result.get("primary_frontend") + primary_backend = detection_result.get("primary_backend") + + # Application startup feature + if primary_frontend or primary_backend: + features.append({ + "category": "Infrastructure", + "name": "Application starts successfully", + "description": "The application should start without errors and be accessible.", + "steps": [ + "Run the application start command", + "Verify the server starts without errors", + "Access the application URL", + "Verify the main page loads", + ], + "source_type": "inferred", + "source_file": None, + "confidence": 1.0, + }) + + # Frontend-specific features + if primary_frontend in ("react", "nextjs", "vue", "nuxt"): + features.append({ + "category": "Infrastructure", + "name": "No console errors on page load", + "description": "The application should load without JavaScript errors in the browser console.", + "steps": [ + "Open browser developer tools", + "Navigate to the home page", + "Check the console for errors", + "Navigate to other pages and repeat", + ], + "source_type": "inferred", + "source_file": None, + "confidence": 0.9, + }) + + # Backend-specific features + if primary_backend in ("express", "fastapi", "django", "flask", "nestjs"): + features.append({ + "category": "Infrastructure", + "name": "Health check endpoint responds", + "description": "The API should have a health check endpoint that responds correctly.", + "steps": [ + "Send GET request to /health or /api/health", + "Verify response status is 200", + "Verify response indicates healthy status", + ], + "source_type": "inferred", + "source_file": None, + "confidence": 0.7, + }) + + return features + + +def features_to_bulk_create_format(features: list[DetectedFeature]) -> list[dict]: + """ + Convert extracted features to the format expected by feature_create_bulk. + + Removes source_type, source_file, and confidence fields. + Returns a list ready for MCP tool consumption. + + Args: + features: List of DetectedFeature objects + + Returns: + List of dicts with category, name, description, steps + """ + return [ + { + "category": f["category"], + "name": f["name"], + "description": f["description"], + "steps": f["steps"], + } + for f in features + ] + + +def extract_from_project(project_dir: str | Path) -> FeatureExtractionResult: + """ + Convenience function to detect stack and extract features in one step. + + Args: + project_dir: Path to the project directory + + Returns: + FeatureExtractionResult with extracted features + """ + from .stack_detector import StackDetector + + detector = StackDetector(Path(project_dir)) + detection_result = detector.detect() + return extract_features(detection_result) From 41f99d19f5b964be38e97ac78e5282e1e3333d61 Mon Sep 17 00:00:00 2001 From: cabana8471 Date: Wed, 21 Jan 2026 10:20:50 +0100 Subject: [PATCH 14/81] feat(import): add import project REST API Add server/routers/import_project.py with endpoints: - POST /api/import/analyze: Analyze project directory, detect stack - POST /api/import/extract-features: Generate features from analysis - POST /api/import/create-features: Create features in project database - GET /api/import/quick-detect: Quick stack detection for UI preview Response models: - AnalyzeResponse: Detected stacks, counts, summary - ExtractFeaturesResponse: Features with category, steps, confidence - CreateFeaturesResponse: Creation confirmation Integration: - Added import_project_router to server/routers/__init__.py - Registered router in server/main.py Usage flow: 1. User selects directory 2. POST /analyze to detect stack 3. POST /extract-features to generate features 4. User reviews and optionally edits features 5. POST /create-features to import into project Co-Authored-By: Claude Opus 4.5 --- server/main.py | 2 + server/routers/__init__.py | 2 + server/routers/import_project.py | 320 +++++++++++++++++++++++++++++++ 3 files changed, 324 insertions(+) create mode 100644 server/routers/import_project.py diff --git a/server/main.py b/server/main.py index 9340315f..7735e6c4 100644 --- a/server/main.py +++ b/server/main.py @@ -28,6 +28,7 @@ expand_project_router, features_router, filesystem_router, + import_project_router, projects_router, settings_router, spec_creation_router, @@ -117,6 +118,7 @@ async def require_localhost(request: Request, call_next): app.include_router(assistant_chat_router) app.include_router(settings_router) app.include_router(terminal_router) +app.include_router(import_project_router) # ============================================================================ diff --git a/server/routers/__init__.py b/server/routers/__init__.py index 763247fc..e1e0ce3b 100644 --- a/server/routers/__init__.py +++ b/server/routers/__init__.py @@ -15,6 +15,7 @@ from .settings import router as settings_router from .spec_creation import router as spec_creation_router from .terminal import router as terminal_router +from .import_project import router as import_project_router __all__ = [ "projects_router", @@ -27,4 +28,5 @@ "assistant_chat_router", "settings_router", "terminal_router", + "import_project_router", ] diff --git a/server/routers/import_project.py b/server/routers/import_project.py new file mode 100644 index 00000000..fd24b86d --- /dev/null +++ b/server/routers/import_project.py @@ -0,0 +1,320 @@ +""" +Import Project Router +===================== + +REST and WebSocket endpoints for importing existing projects into Autocoder. + +The import flow: +1. POST /api/import/analyze - Analyze codebase, detect stack +2. POST /api/import/extract-features - Generate features from analysis +3. POST /api/import/create-features - Create features in database +""" + +import json +import logging +import re +import sys +from pathlib import Path +from typing import Optional + +from fastapi import APIRouter, HTTPException +from pydantic import BaseModel, Field + +logger = logging.getLogger(__name__) + +router = APIRouter(prefix="/api/import", tags=["import-project"]) + +# Root directory +ROOT_DIR = Path(__file__).parent.parent.parent + +# Add root to path for imports +if str(ROOT_DIR) not in sys.path: + sys.path.insert(0, str(ROOT_DIR)) + + +def _get_project_path(project_name: str) -> Path | None: + """Get project path from registry.""" + from registry import get_project_path + return get_project_path(project_name) + + +def validate_path(path: str) -> bool: + """Validate path to prevent traversal attacks.""" + # Allow absolute paths but check for common attack patterns + if ".." in path or "\x00" in path: + return False + return True + + +# ============================================================================ +# Request/Response Models +# ============================================================================ + +class AnalyzeRequest(BaseModel): + """Request to analyze a project directory.""" + path: str = Field(..., description="Absolute path to the project directory") + + +class StackInfo(BaseModel): + """Information about a detected stack.""" + name: str + category: str + confidence: float + + +class AnalyzeResponse(BaseModel): + """Response from project analysis.""" + project_dir: str + detected_stacks: list[StackInfo] + primary_frontend: Optional[str] = None + primary_backend: Optional[str] = None + database: Optional[str] = None + routes_count: int + components_count: int + endpoints_count: int + summary: str + + +class ExtractFeaturesRequest(BaseModel): + """Request to extract features from an analyzed project.""" + path: str = Field(..., description="Absolute path to the project directory") + + +class DetectedFeature(BaseModel): + """A feature extracted from codebase analysis.""" + category: str + name: str + description: str + steps: list[str] + source_type: str + source_file: Optional[str] = None + confidence: float + + +class ExtractFeaturesResponse(BaseModel): + """Response from feature extraction.""" + features: list[DetectedFeature] + count: int + by_category: dict[str, int] + summary: str + + +class CreateFeaturesRequest(BaseModel): + """Request to create features in the database.""" + project_name: str = Field(..., description="Name of the registered project") + features: list[dict] = Field(..., description="Features to create (category, name, description, steps)") + + +class CreateFeaturesResponse(BaseModel): + """Response from feature creation.""" + created: int + project_name: str + message: str + + +# ============================================================================ +# REST Endpoints +# ============================================================================ + +@router.post("/analyze", response_model=AnalyzeResponse) +async def analyze_project(request: AnalyzeRequest): + """ + Analyze a project directory to detect tech stack. + + Returns detected stacks with confidence scores, plus counts of + routes, endpoints, and components found. + """ + if not validate_path(request.path): + raise HTTPException(status_code=400, detail="Invalid path") + + project_dir = Path(request.path).resolve() + + if not project_dir.exists(): + raise HTTPException(status_code=404, detail="Directory not found") + + if not project_dir.is_dir(): + raise HTTPException(status_code=400, detail="Path is not a directory") + + try: + from analyzers import StackDetector + + detector = StackDetector(project_dir) + result = detector.detect() + + # Convert to response model + stacks = [ + StackInfo( + name=s["name"], + category=s["category"], + confidence=s["confidence"], + ) + for s in result["detected_stacks"] + ] + + return AnalyzeResponse( + project_dir=str(project_dir), + detected_stacks=stacks, + primary_frontend=result.get("primary_frontend"), + primary_backend=result.get("primary_backend"), + database=result.get("database"), + routes_count=result.get("routes_count", 0), + components_count=result.get("components_count", 0), + endpoints_count=result.get("endpoints_count", 0), + summary=result.get("summary", ""), + ) + + except Exception as e: + logger.exception(f"Error analyzing project: {e}") + raise HTTPException(status_code=500, detail=f"Analysis failed: {str(e)}") + + +@router.post("/extract-features", response_model=ExtractFeaturesResponse) +async def extract_features(request: ExtractFeaturesRequest): + """ + Extract features from an analyzed project. + + Returns a list of features ready for import, each with: + - category, name, description, steps + - source_type (route, endpoint, component, inferred) + - confidence score + """ + if not validate_path(request.path): + raise HTTPException(status_code=400, detail="Invalid path") + + project_dir = Path(request.path).resolve() + + if not project_dir.exists(): + raise HTTPException(status_code=404, detail="Directory not found") + + try: + from analyzers import extract_from_project + + result = extract_from_project(project_dir) + + # Convert to response model + features = [ + DetectedFeature( + category=f["category"], + name=f["name"], + description=f["description"], + steps=f["steps"], + source_type=f["source_type"], + source_file=f.get("source_file"), + confidence=f["confidence"], + ) + for f in result["features"] + ] + + return ExtractFeaturesResponse( + features=features, + count=result["count"], + by_category=result["by_category"], + summary=result["summary"], + ) + + except Exception as e: + logger.exception(f"Error extracting features: {e}") + raise HTTPException(status_code=500, detail=f"Feature extraction failed: {str(e)}") + + +@router.post("/create-features", response_model=CreateFeaturesResponse) +async def create_features(request: CreateFeaturesRequest): + """ + Create features in the database for a registered project. + + Takes extracted features and creates them via the feature database. + All features are created with passes=False (pending verification). + """ + # Validate project name + if not re.match(r'^[a-zA-Z0-9_-]{1,50}$', request.project_name): + raise HTTPException(status_code=400, detail="Invalid project name") + + project_dir = _get_project_path(request.project_name) + if not project_dir: + raise HTTPException(status_code=404, detail="Project not found in registry") + + if not project_dir.exists(): + raise HTTPException(status_code=404, detail="Project directory not found") + + if not request.features: + raise HTTPException(status_code=400, detail="No features provided") + + try: + from api.database import Feature, create_database + + # Initialize database + engine, SessionLocal = create_database(project_dir) + session = SessionLocal() + + try: + # Get starting priority + from sqlalchemy import func + max_priority = session.query(func.max(Feature.priority)).scalar() or 0 + + # Create features + created_count = 0 + for i, f in enumerate(request.features): + # Validate required fields + if not all(key in f for key in ["category", "name", "description", "steps"]): + logger.warning(f"Skipping feature missing required fields: {f}") + continue + + feature = Feature( + priority=max_priority + i + 1, + category=f["category"], + name=f["name"], + description=f["description"], + steps=f["steps"], + passes=False, + in_progress=False, + ) + session.add(feature) + created_count += 1 + + session.commit() + + return CreateFeaturesResponse( + created=created_count, + project_name=request.project_name, + message=f"Created {created_count} features for project '{request.project_name}'", + ) + + finally: + session.close() + + except Exception as e: + logger.exception(f"Error creating features: {e}") + raise HTTPException(status_code=500, detail=f"Feature creation failed: {str(e)}") + + +@router.get("/quick-detect") +async def quick_detect(path: str): + """ + Quick detection endpoint for UI preview. + + Returns only stack names and confidence without full analysis. + Useful for showing detected stack while user configures import. + """ + if not validate_path(path): + raise HTTPException(status_code=400, detail="Invalid path") + + project_dir = Path(path).resolve() + + if not project_dir.exists(): + raise HTTPException(status_code=404, detail="Directory not found") + + try: + from analyzers import StackDetector + + detector = StackDetector(project_dir) + result = detector.detect_quick() + + return { + "project_dir": str(project_dir), + "stacks": result.get("stacks", []), + "primary": result.get("primary"), + } + + except Exception as e: + logger.exception(f"Error in quick detect: {e}") + raise HTTPException(status_code=500, detail=f"Detection failed: {str(e)}") From 739f605164c3d787d3d3a25c93870ec1c05d64b9 Mon Sep 17 00:00:00 2001 From: cabana8471 Date: Wed, 21 Jan 2026 10:21:08 +0100 Subject: [PATCH 15/81] docs(fork): update changelog with Import Project API Document the Import Project REST API: - List all endpoints with methods and descriptions - Explain feature extraction logic - Add usage examples with curl commands Co-Authored-By: Claude Opus 4.5 --- FORK_CHANGELOG.md | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/FORK_CHANGELOG.md b/FORK_CHANGELOG.md index 926fc497..143640d5 100644 --- a/FORK_CHANGELOG.md +++ b/FORK_CHANGELOG.md @@ -9,6 +9,46 @@ Format based on [Keep a Changelog](https://keepachangelog.com/). - Fork documentation (FORK_README.md, FORK_CHANGELOG.md) - Configuration system via `.autocoder/config.json` +## [2025-01-21] Import Project API (Import Projects - Phase 2) + +### Added +- New router: `server/routers/import_project.py` - REST API for project import +- New module: `analyzers/feature_extractor.py` - Transform routes to features + +### API Endpoints +| Endpoint | Method | Description | +|----------|--------|-------------| +| `/api/import/analyze` | POST | Analyze directory, detect stack | +| `/api/import/extract-features` | POST | Generate features from analysis | +| `/api/import/create-features` | POST | Create features in database | +| `/api/import/quick-detect` | GET | Quick stack preview | + +### Feature Extraction +- Routes -> "View X page" navigation features +- API endpoints -> "API: Create/List/Update/Delete X" features +- Infrastructure -> Startup, health check features +- Each feature includes category, name, description, steps + +### Usage +```bash +# 1. Analyze project +curl -X POST http://localhost:8888/api/import/analyze \ + -H "Content-Type: application/json" \ + -d '{"path": "/path/to/existing/project"}' + +# 2. Extract features +curl -X POST http://localhost:8888/api/import/extract-features \ + -H "Content-Type: application/json" \ + -d '{"path": "/path/to/existing/project"}' + +# 3. Create features in registered project +curl -X POST http://localhost:8888/api/import/create-features \ + -H "Content-Type: application/json" \ + -d '{"project_name": "my-project", "features": [...]}' +``` + +--- + ## [2025-01-21] Stack Detector (Import Projects - Phase 1) ### Added From 5c5500761219c1252081af8c7e216ddc1e593819 Mon Sep 17 00:00:00 2001 From: cabana8471 Date: Wed, 21 Jan 2026 10:24:01 +0100 Subject: [PATCH 16/81] docs(fork): mark completed features in Planned Features section Update the Planned Features checklist: - [x] Quality Gates (Phase 1) - [x] Stack Detector (Phase 2) - [x] Feature Extractor (Phase 2) - [x] Import Wizard API (Phase 2) - [x] Error Recovery (Phase 3) Split Import Wizard into API (done) and UI (pending). Co-Authored-By: Claude Opus 4.5 --- FORK_CHANGELOG.md | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/FORK_CHANGELOG.md b/FORK_CHANGELOG.md index 143640d5..e150ecde 100644 --- a/FORK_CHANGELOG.md +++ b/FORK_CHANGELOG.md @@ -190,17 +190,18 @@ The following features are planned for implementation: ### Phase 1: Foundation (Quick Wins) - [ ] Enhanced Logging - Structured logs with filtering -- [ ] Quality Gates - Lint/type-check before marking passing +- [x] Quality Gates - Lint/type-check before marking passing ✅ - [ ] Security Scanning - Detect vulnerabilities ### Phase 2: Import Projects -- [ ] Stack Detector - Detect React, Next.js, Express, FastAPI, Django, Vue.js -- [ ] Feature Extractor - Reverse-engineer features from routes/endpoints -- [ ] Import Wizard UI - Chat-based project import +- [x] Stack Detector - Detect React, Next.js, Express, FastAPI, Django, Vue.js ✅ +- [x] Feature Extractor - Reverse-engineer features from routes/endpoints ✅ +- [x] Import Wizard API - REST endpoints for import flow ✅ +- [ ] Import Wizard UI - Chat-based project import (UI component) ### Phase 3: Workflow Improvements - [ ] Feature Branches - Git workflow with feature branches -- [ ] Error Recovery - Handle stuck features, auto-clear on startup +- [x] Error Recovery - Handle stuck features, auto-clear on startup ✅ - [ ] Review Agent - Automatic code review - [ ] CI/CD Integration - GitHub Actions generation From 09f836e5a088cb2fefae53dd928a443c42ac1980 Mon Sep 17 00:00:00 2001 From: cabana8471 Date: Wed, 21 Jan 2026 10:27:55 +0100 Subject: [PATCH 17/81] feat(logging): add structured logging module with SQLite storage - Add StructuredLogger class with JSON format, agent/feature/tool tracking - Add LogQuery class for filtering, search, timeline, and stats - Add REST API endpoints for log querying and export - Support JSONL, JSON, CSV export formats Co-Authored-By: Claude Opus 4.5 --- server/main.py | 2 + server/routers/__init__.py | 2 + server/routers/logs.py | 316 ++++++++++++++++++++ structured_logging.py | 574 +++++++++++++++++++++++++++++++++++++ 4 files changed, 894 insertions(+) create mode 100644 server/routers/logs.py create mode 100644 structured_logging.py diff --git a/server/main.py b/server/main.py index 7735e6c4..2d2ac5bb 100644 --- a/server/main.py +++ b/server/main.py @@ -29,6 +29,7 @@ features_router, filesystem_router, import_project_router, + logs_router, projects_router, settings_router, spec_creation_router, @@ -119,6 +120,7 @@ async def require_localhost(request: Request, call_next): app.include_router(settings_router) app.include_router(terminal_router) app.include_router(import_project_router) +app.include_router(logs_router) # ============================================================================ diff --git a/server/routers/__init__.py b/server/routers/__init__.py index e1e0ce3b..79acc7e5 100644 --- a/server/routers/__init__.py +++ b/server/routers/__init__.py @@ -16,6 +16,7 @@ from .spec_creation import router as spec_creation_router from .terminal import router as terminal_router from .import_project import router as import_project_router +from .logs import router as logs_router __all__ = [ "projects_router", @@ -29,4 +30,5 @@ "settings_router", "terminal_router", "import_project_router", + "logs_router", ] diff --git a/server/routers/logs.py b/server/routers/logs.py new file mode 100644 index 00000000..05c2ccd1 --- /dev/null +++ b/server/routers/logs.py @@ -0,0 +1,316 @@ +""" +Logs Router +=========== + +REST API endpoints for querying and exporting structured logs. + +Endpoints: +- GET /api/logs - Query logs with filters +- GET /api/logs/timeline - Get activity timeline +- GET /api/logs/stats - Get per-agent statistics +- POST /api/logs/export - Export logs to file +""" + +import logging +import tempfile +from datetime import datetime, timedelta +from pathlib import Path +from typing import Literal, Optional + +from fastapi import APIRouter, HTTPException, Query +from fastapi.responses import FileResponse +from pydantic import BaseModel, Field + +logger = logging.getLogger(__name__) + +router = APIRouter(prefix="/api/logs", tags=["logs"]) + + +def _get_project_path(project_name: str) -> Path | None: + """Get project path from registry.""" + from registry import get_project_path + + return get_project_path(project_name) + + +# ============================================================================ +# Request/Response Models +# ============================================================================ + + +class LogEntry(BaseModel): + """A structured log entry.""" + + id: int + timestamp: str + level: str + message: str + agent_id: Optional[str] = None + feature_id: Optional[int] = None + tool_name: Optional[str] = None + duration_ms: Optional[int] = None + extra: Optional[str] = None + + +class LogQueryResponse(BaseModel): + """Response from log query.""" + + logs: list[LogEntry] + total: int + limit: int + offset: int + + +class TimelineBucket(BaseModel): + """A timeline bucket with activity counts.""" + + timestamp: str + agents: dict[str, int] + total: int + errors: int + + +class TimelineResponse(BaseModel): + """Response from timeline query.""" + + buckets: list[TimelineBucket] + bucket_minutes: int + + +class AgentStats(BaseModel): + """Statistics for a single agent.""" + + agent_id: Optional[str] + total: int + info_count: int + warn_count: int + error_count: int + first_log: Optional[str] + last_log: Optional[str] + + +class StatsResponse(BaseModel): + """Response from stats query.""" + + agents: list[AgentStats] + total_logs: int + + +class ExportRequest(BaseModel): + """Request to export logs.""" + + project_name: str + format: Literal["json", "jsonl", "csv"] = "jsonl" + level: Optional[str] = None + agent_id: Optional[str] = None + feature_id: Optional[int] = None + since_hours: Optional[int] = None + + +class ExportResponse(BaseModel): + """Response from export request.""" + + filename: str + count: int + format: str + + +# ============================================================================ +# REST Endpoints +# ============================================================================ + + +@router.get("/{project_name}", response_model=LogQueryResponse) +async def query_logs( + project_name: str, + level: Optional[str] = Query(None, description="Filter by log level (debug, info, warn, error)"), + agent_id: Optional[str] = Query(None, description="Filter by agent ID"), + feature_id: Optional[int] = Query(None, description="Filter by feature ID"), + tool_name: Optional[str] = Query(None, description="Filter by tool name"), + search: Optional[str] = Query(None, description="Full-text search in message"), + since_hours: Optional[int] = Query(None, description="Filter logs from last N hours"), + limit: int = Query(100, ge=1, le=1000, description="Max results"), + offset: int = Query(0, ge=0, description="Pagination offset"), +): + """ + Query logs with filters. + + Supports filtering by level, agent, feature, tool, and full-text search. + """ + project_dir = _get_project_path(project_name) + if not project_dir: + raise HTTPException(status_code=404, detail="Project not found") + + try: + from structured_logging import get_log_query + + query = get_log_query(project_dir) + + since = None + if since_hours: + since = datetime.utcnow() - timedelta(hours=since_hours) + + logs = query.query( + level=level, + agent_id=agent_id, + feature_id=feature_id, + tool_name=tool_name, + search=search, + since=since, + limit=limit, + offset=offset, + ) + + total = query.count(level=level, agent_id=agent_id, feature_id=feature_id, since=since) + + return LogQueryResponse( + logs=[LogEntry(**log) for log in logs], + total=total, + limit=limit, + offset=offset, + ) + + except Exception as e: + logger.exception(f"Error querying logs: {e}") + raise HTTPException(status_code=500, detail=f"Query failed: {str(e)}") + + +@router.get("/{project_name}/timeline", response_model=TimelineResponse) +async def get_timeline( + project_name: str, + since_hours: int = Query(24, ge=1, le=168, description="Hours to look back"), + bucket_minutes: int = Query(5, ge=1, le=60, description="Bucket size in minutes"), +): + """ + Get activity timeline bucketed by time intervals. + + Useful for visualizing agent activity over time. + """ + project_dir = _get_project_path(project_name) + if not project_dir: + raise HTTPException(status_code=404, detail="Project not found") + + try: + from structured_logging import get_log_query + + query = get_log_query(project_dir) + + since = datetime.utcnow() - timedelta(hours=since_hours) + buckets = query.get_timeline(since=since, bucket_minutes=bucket_minutes) + + return TimelineResponse( + buckets=[TimelineBucket(**b) for b in buckets], + bucket_minutes=bucket_minutes, + ) + + except Exception as e: + logger.exception(f"Error getting timeline: {e}") + raise HTTPException(status_code=500, detail=f"Timeline query failed: {str(e)}") + + +@router.get("/{project_name}/stats", response_model=StatsResponse) +async def get_stats( + project_name: str, + since_hours: Optional[int] = Query(None, description="Hours to look back"), +): + """ + Get log statistics per agent. + + Shows total logs, info/warn/error counts, and time range per agent. + """ + project_dir = _get_project_path(project_name) + if not project_dir: + raise HTTPException(status_code=404, detail="Project not found") + + try: + from structured_logging import get_log_query + + query = get_log_query(project_dir) + + since = None + if since_hours: + since = datetime.utcnow() - timedelta(hours=since_hours) + + agents = query.get_agent_stats(since=since) + total = sum(a.get("total", 0) for a in agents) + + return StatsResponse( + agents=[AgentStats(**a) for a in agents], + total_logs=total, + ) + + except Exception as e: + logger.exception(f"Error getting stats: {e}") + raise HTTPException(status_code=500, detail=f"Stats query failed: {str(e)}") + + +@router.post("/export", response_model=ExportResponse) +async def export_logs(request: ExportRequest): + """ + Export logs to a downloadable file. + + Supports JSON, JSONL, and CSV formats. + """ + project_dir = _get_project_path(request.project_name) + if not project_dir: + raise HTTPException(status_code=404, detail="Project not found") + + try: + from structured_logging import get_log_query + + query = get_log_query(project_dir) + + since = None + if request.since_hours: + since = datetime.utcnow() - timedelta(hours=request.since_hours) + + # Create temp file for export + suffix = f".{request.format}" if request.format != "jsonl" else ".jsonl" + timestamp = datetime.utcnow().strftime("%Y%m%d_%H%M%S") + filename = f"logs_{request.project_name}_{timestamp}{suffix}" + + # Export to project's .autocoder/exports directory + export_dir = project_dir / ".autocoder" / "exports" + export_dir.mkdir(parents=True, exist_ok=True) + output_path = export_dir / filename + + count = query.export_logs( + output_path=output_path, + format=request.format, + level=request.level, + agent_id=request.agent_id, + feature_id=request.feature_id, + since=since, + ) + + return ExportResponse( + filename=filename, + count=count, + format=request.format, + ) + + except Exception as e: + logger.exception(f"Error exporting logs: {e}") + raise HTTPException(status_code=500, detail=f"Export failed: {str(e)}") + + +@router.get("/{project_name}/download/{filename}") +async def download_export(project_name: str, filename: str): + """Download an exported log file.""" + project_dir = _get_project_path(project_name) + if not project_dir: + raise HTTPException(status_code=404, detail="Project not found") + + # Security: validate filename to prevent path traversal + if ".." in filename or "/" in filename or "\\" in filename: + raise HTTPException(status_code=400, detail="Invalid filename") + + export_path = project_dir / ".autocoder" / "exports" / filename + if not export_path.exists(): + raise HTTPException(status_code=404, detail="Export file not found") + + return FileResponse( + path=export_path, + filename=filename, + media_type="application/octet-stream", + ) diff --git a/structured_logging.py b/structured_logging.py new file mode 100644 index 00000000..e1c8cf0c --- /dev/null +++ b/structured_logging.py @@ -0,0 +1,574 @@ +""" +Structured Logging Module +========================= + +Enhanced logging with structured JSON format, filtering, and export capabilities. + +Features: +- JSON-formatted logs with consistent schema +- Filter by agent, feature, level +- Full-text search +- Timeline view for agent activity +- Export logs for offline analysis + +Log Format: +{ + "timestamp": "2025-01-21T10:30:00.000Z", + "level": "info|warn|error", + "agent_id": "coding-42", + "feature_id": 42, + "tool_name": "feature_mark_passing", + "duration_ms": 150, + "message": "Feature marked as passing" +} +""" + +import json +import logging +import os +import re +import sqlite3 +import threading +from dataclasses import dataclass, field, asdict +from datetime import datetime, timedelta +from pathlib import Path +from typing import Any, Optional, Literal + +# Type aliases +LogLevel = Literal["debug", "info", "warn", "error"] + + +@dataclass +class StructuredLogEntry: + """A structured log entry with all metadata.""" + + timestamp: str + level: LogLevel + message: str + agent_id: Optional[str] = None + feature_id: Optional[int] = None + tool_name: Optional[str] = None + duration_ms: Optional[int] = None + extra: dict = field(default_factory=dict) + + def to_dict(self) -> dict: + """Convert to dictionary, excluding None values.""" + result = { + "timestamp": self.timestamp, + "level": self.level, + "message": self.message, + } + if self.agent_id: + result["agent_id"] = self.agent_id + if self.feature_id is not None: + result["feature_id"] = self.feature_id + if self.tool_name: + result["tool_name"] = self.tool_name + if self.duration_ms is not None: + result["duration_ms"] = self.duration_ms + if self.extra: + result["extra"] = self.extra + return result + + def to_json(self) -> str: + """Convert to JSON string.""" + return json.dumps(self.to_dict()) + + +class StructuredLogHandler(logging.Handler): + """ + Custom logging handler that stores structured logs in SQLite. + + Thread-safe for concurrent agent logging. + """ + + def __init__( + self, + db_path: Path, + agent_id: Optional[str] = None, + max_entries: int = 10000, + ): + super().__init__() + self.db_path = db_path + self.agent_id = agent_id + self.max_entries = max_entries + self._lock = threading.Lock() + self._init_database() + + def _init_database(self) -> None: + """Initialize the SQLite database for logs.""" + with self._lock: + conn = sqlite3.connect(self.db_path) + cursor = conn.cursor() + + # Create logs table + cursor.execute(""" + CREATE TABLE IF NOT EXISTS logs ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + timestamp TEXT NOT NULL, + level TEXT NOT NULL, + message TEXT NOT NULL, + agent_id TEXT, + feature_id INTEGER, + tool_name TEXT, + duration_ms INTEGER, + extra TEXT + ) + """) + + # Create indexes for common queries + cursor.execute(""" + CREATE INDEX IF NOT EXISTS idx_logs_timestamp + ON logs(timestamp) + """) + cursor.execute(""" + CREATE INDEX IF NOT EXISTS idx_logs_level + ON logs(level) + """) + cursor.execute(""" + CREATE INDEX IF NOT EXISTS idx_logs_agent_id + ON logs(agent_id) + """) + cursor.execute(""" + CREATE INDEX IF NOT EXISTS idx_logs_feature_id + ON logs(feature_id) + """) + + conn.commit() + conn.close() + + def emit(self, record: logging.LogRecord) -> None: + """Store a log record in the database.""" + try: + # Extract structured data from record + entry = StructuredLogEntry( + timestamp=datetime.utcnow().isoformat() + "Z", + level=record.levelname.lower(), + message=self.format(record), + agent_id=getattr(record, "agent_id", self.agent_id), + feature_id=getattr(record, "feature_id", None), + tool_name=getattr(record, "tool_name", None), + duration_ms=getattr(record, "duration_ms", None), + extra=getattr(record, "extra", {}), + ) + + with self._lock: + conn = sqlite3.connect(self.db_path) + cursor = conn.cursor() + + cursor.execute( + """ + INSERT INTO logs + (timestamp, level, message, agent_id, feature_id, tool_name, duration_ms, extra) + VALUES (?, ?, ?, ?, ?, ?, ?, ?) + """, + ( + entry.timestamp, + entry.level, + entry.message, + entry.agent_id, + entry.feature_id, + entry.tool_name, + entry.duration_ms, + json.dumps(entry.extra) if entry.extra else None, + ), + ) + + # Cleanup old entries if over limit + cursor.execute("SELECT COUNT(*) FROM logs") + count = cursor.fetchone()[0] + if count > self.max_entries: + delete_count = count - self.max_entries + cursor.execute( + """ + DELETE FROM logs WHERE id IN ( + SELECT id FROM logs ORDER BY timestamp ASC LIMIT ? + ) + """, + (delete_count,), + ) + + conn.commit() + conn.close() + + except Exception: + self.handleError(record) + + +class StructuredLogger: + """ + Enhanced logger with structured logging capabilities. + + Usage: + logger = StructuredLogger(project_dir, agent_id="coding-1") + logger.info("Starting feature", feature_id=42) + logger.error("Test failed", feature_id=42, tool_name="playwright") + """ + + def __init__( + self, + project_dir: Path, + agent_id: Optional[str] = None, + console_output: bool = True, + ): + self.project_dir = Path(project_dir) + self.agent_id = agent_id + self.db_path = self.project_dir / ".autocoder" / "logs.db" + + # Ensure directory exists + self.db_path.parent.mkdir(parents=True, exist_ok=True) + + # Setup logger + self.logger = logging.getLogger(f"autocoder.{agent_id or 'main'}") + self.logger.setLevel(logging.DEBUG) + + # Clear existing handlers + self.logger.handlers.clear() + + # Add structured handler + self.handler = StructuredLogHandler(self.db_path, agent_id) + self.handler.setFormatter(logging.Formatter("%(message)s")) + self.logger.addHandler(self.handler) + + # Add console handler if requested + if console_output: + console = logging.StreamHandler() + console.setLevel(logging.INFO) + console.setFormatter( + logging.Formatter("%(asctime)s [%(levelname)s] %(message)s") + ) + self.logger.addHandler(console) + + def _log( + self, + level: str, + message: str, + feature_id: Optional[int] = None, + tool_name: Optional[str] = None, + duration_ms: Optional[int] = None, + **extra, + ) -> None: + """Internal logging method with structured data.""" + record_extra = { + "agent_id": self.agent_id, + "feature_id": feature_id, + "tool_name": tool_name, + "duration_ms": duration_ms, + "extra": extra, + } + + # Use LogRecord extras + getattr(self.logger, level)( + message, + extra=record_extra, + ) + + def debug(self, message: str, **kwargs) -> None: + """Log debug message.""" + self._log("debug", message, **kwargs) + + def info(self, message: str, **kwargs) -> None: + """Log info message.""" + self._log("info", message, **kwargs) + + def warn(self, message: str, **kwargs) -> None: + """Log warning message.""" + self._log("warning", message, **kwargs) + + def warning(self, message: str, **kwargs) -> None: + """Log warning message (alias).""" + self._log("warning", message, **kwargs) + + def error(self, message: str, **kwargs) -> None: + """Log error message.""" + self._log("error", message, **kwargs) + + +class LogQuery: + """ + Query interface for structured logs. + + Supports filtering, searching, and aggregation. + """ + + def __init__(self, db_path: Path): + self.db_path = db_path + + def _connect(self) -> sqlite3.Connection: + """Get database connection.""" + conn = sqlite3.connect(self.db_path) + conn.row_factory = sqlite3.Row + return conn + + def query( + self, + level: Optional[LogLevel] = None, + agent_id: Optional[str] = None, + feature_id: Optional[int] = None, + tool_name: Optional[str] = None, + search: Optional[str] = None, + since: Optional[datetime] = None, + until: Optional[datetime] = None, + limit: int = 100, + offset: int = 0, + ) -> list[dict]: + """ + Query logs with filters. + + Args: + level: Filter by log level + agent_id: Filter by agent ID + feature_id: Filter by feature ID + tool_name: Filter by tool name + search: Full-text search in message + since: Start datetime + until: End datetime + limit: Max results + offset: Pagination offset + + Returns: + List of log entries as dicts + """ + conn = self._connect() + cursor = conn.cursor() + + conditions = [] + params = [] + + if level: + conditions.append("level = ?") + params.append(level) + + if agent_id: + conditions.append("agent_id = ?") + params.append(agent_id) + + if feature_id is not None: + conditions.append("feature_id = ?") + params.append(feature_id) + + if tool_name: + conditions.append("tool_name = ?") + params.append(tool_name) + + if search: + conditions.append("message LIKE ?") + params.append(f"%{search}%") + + if since: + conditions.append("timestamp >= ?") + params.append(since.isoformat()) + + if until: + conditions.append("timestamp <= ?") + params.append(until.isoformat()) + + where_clause = " AND ".join(conditions) if conditions else "1=1" + + query = f""" + SELECT * FROM logs + WHERE {where_clause} + ORDER BY timestamp DESC + LIMIT ? OFFSET ? + """ + params.extend([limit, offset]) + + cursor.execute(query, params) + rows = cursor.fetchall() + conn.close() + + return [dict(row) for row in rows] + + def count( + self, + level: Optional[LogLevel] = None, + agent_id: Optional[str] = None, + feature_id: Optional[int] = None, + since: Optional[datetime] = None, + ) -> int: + """Count logs matching filters.""" + conn = self._connect() + cursor = conn.cursor() + + conditions = [] + params = [] + + if level: + conditions.append("level = ?") + params.append(level) + if agent_id: + conditions.append("agent_id = ?") + params.append(agent_id) + if feature_id is not None: + conditions.append("feature_id = ?") + params.append(feature_id) + if since: + conditions.append("timestamp >= ?") + params.append(since.isoformat()) + + where_clause = " AND ".join(conditions) if conditions else "1=1" + cursor.execute(f"SELECT COUNT(*) FROM logs WHERE {where_clause}", params) + count = cursor.fetchone()[0] + conn.close() + return count + + def get_timeline( + self, + since: Optional[datetime] = None, + until: Optional[datetime] = None, + bucket_minutes: int = 5, + ) -> list[dict]: + """ + Get activity timeline bucketed by time intervals. + + Returns list of buckets with counts per agent. + """ + conn = self._connect() + cursor = conn.cursor() + + # Default to last 24 hours + if not since: + since = datetime.utcnow() - timedelta(hours=24) + if not until: + until = datetime.utcnow() + + cursor.execute( + """ + SELECT + strftime('%Y-%m-%d %H:', timestamp) || + printf('%02d', (CAST(strftime('%M', timestamp) AS INTEGER) / ?) * ?) || ':00' as bucket, + agent_id, + COUNT(*) as count, + SUM(CASE WHEN level = 'error' THEN 1 ELSE 0 END) as errors + FROM logs + WHERE timestamp >= ? AND timestamp <= ? + GROUP BY bucket, agent_id + ORDER BY bucket + """, + (bucket_minutes, bucket_minutes, since.isoformat(), until.isoformat()), + ) + + rows = cursor.fetchall() + conn.close() + + # Group by bucket + buckets = {} + for row in rows: + bucket = row["bucket"] + if bucket not in buckets: + buckets[bucket] = {"timestamp": bucket, "agents": {}, "total": 0, "errors": 0} + agent = row["agent_id"] or "main" + buckets[bucket]["agents"][agent] = row["count"] + buckets[bucket]["total"] += row["count"] + buckets[bucket]["errors"] += row["errors"] + + return list(buckets.values()) + + def get_agent_stats(self, since: Optional[datetime] = None) -> list[dict]: + """Get log statistics per agent.""" + conn = self._connect() + cursor = conn.cursor() + + params = [] + where_clause = "1=1" + if since: + where_clause = "timestamp >= ?" + params.append(since.isoformat()) + + cursor.execute( + f""" + SELECT + agent_id, + COUNT(*) as total, + SUM(CASE WHEN level = 'info' THEN 1 ELSE 0 END) as info_count, + SUM(CASE WHEN level = 'warn' OR level = 'warning' THEN 1 ELSE 0 END) as warn_count, + SUM(CASE WHEN level = 'error' THEN 1 ELSE 0 END) as error_count, + MIN(timestamp) as first_log, + MAX(timestamp) as last_log + FROM logs + WHERE {where_clause} + GROUP BY agent_id + ORDER BY total DESC + """, + params, + ) + + rows = cursor.fetchall() + conn.close() + return [dict(row) for row in rows] + + def export_logs( + self, + output_path: Path, + format: Literal["json", "jsonl", "csv"] = "jsonl", + **filters, + ) -> int: + """ + Export logs to file. + + Args: + output_path: Output file path + format: Export format (json, jsonl, csv) + **filters: Query filters + + Returns: + Number of exported entries + """ + # Get all matching logs + logs = self.query(limit=1000000, **filters) + + output_path = Path(output_path) + output_path.parent.mkdir(parents=True, exist_ok=True) + + if format == "json": + with open(output_path, "w") as f: + json.dump(logs, f, indent=2) + + elif format == "jsonl": + with open(output_path, "w") as f: + for log in logs: + f.write(json.dumps(log) + "\n") + + elif format == "csv": + import csv + + if logs: + with open(output_path, "w", newline="") as f: + writer = csv.DictWriter(f, fieldnames=logs[0].keys()) + writer.writeheader() + writer.writerows(logs) + + return len(logs) + + +def get_logger( + project_dir: Path, + agent_id: Optional[str] = None, + console_output: bool = True, +) -> StructuredLogger: + """ + Get or create a structured logger for a project. + + Args: + project_dir: Project directory + agent_id: Agent identifier (e.g., "coding-1", "initializer") + console_output: Whether to also log to console + + Returns: + StructuredLogger instance + """ + return StructuredLogger(project_dir, agent_id, console_output) + + +def get_log_query(project_dir: Path) -> LogQuery: + """ + Get log query interface for a project. + + Args: + project_dir: Project directory + + Returns: + LogQuery instance + """ + db_path = Path(project_dir) / ".autocoder" / "logs.db" + return LogQuery(db_path) From fbd4eea31504017ebe9a3bf2c1717e9e875a8d21 Mon Sep 17 00:00:00 2001 From: cabana8471 Date: Wed, 21 Jan 2026 10:28:18 +0100 Subject: [PATCH 18/81] docs(fork): update changelog with Enhanced Logging feature Co-Authored-By: Claude Opus 4.5 --- FORK_CHANGELOG.md | 56 ++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 55 insertions(+), 1 deletion(-) diff --git a/FORK_CHANGELOG.md b/FORK_CHANGELOG.md index e150ecde..4e214dcd 100644 --- a/FORK_CHANGELOG.md +++ b/FORK_CHANGELOG.md @@ -9,6 +9,60 @@ Format based on [Keep a Changelog](https://keepachangelog.com/). - Fork documentation (FORK_README.md, FORK_CHANGELOG.md) - Configuration system via `.autocoder/config.json` +## [2025-01-21] Enhanced Logging System + +### Added +- New module: `structured_logging.py` - Structured JSON logging with SQLite storage +- New router: `server/routers/logs.py` - REST API for log querying and export + +### Log Format +```json +{ + "timestamp": "2025-01-21T10:30:00.000Z", + "level": "info|warn|error", + "agent_id": "coding-42", + "feature_id": 42, + "tool_name": "feature_mark_passing", + "duration_ms": 150, + "message": "Feature marked as passing" +} +``` + +### API Endpoints +| Endpoint | Method | Description | +|----------|--------|-------------| +| `/api/logs/{project_name}` | GET | Query logs with filters | +| `/api/logs/{project_name}/timeline` | GET | Get activity timeline | +| `/api/logs/{project_name}/stats` | GET | Get per-agent statistics | +| `/api/logs/export` | POST | Export logs to file | +| `/api/logs/{project_name}/download/{filename}` | GET | Download exported file | + +### Features +- Filter by level, agent, feature, tool +- Full-text search in messages +- Timeline view bucketed by configurable intervals +- Per-agent statistics (info/warn/error counts) +- Export to JSON, JSONL, CSV formats +- Auto-cleanup old logs (configurable max entries) + +### Usage +```python +from structured_logging import get_logger, get_log_query + +# Create logger for an agent +logger = get_logger(project_dir, agent_id="coding-1") +logger.info("Starting feature", feature_id=42) +logger.error("Test failed", feature_id=42, tool_name="playwright") + +# Query logs +query = get_log_query(project_dir) +logs = query.query(level="error", agent_id="coding-1", limit=50) +timeline = query.get_timeline(since_hours=24) +stats = query.get_agent_stats() +``` + +--- + ## [2025-01-21] Import Project API (Import Projects - Phase 2) ### Added @@ -189,7 +243,7 @@ When adding a new feature, use this template: The following features are planned for implementation: ### Phase 1: Foundation (Quick Wins) -- [ ] Enhanced Logging - Structured logs with filtering +- [x] Enhanced Logging - Structured logs with filtering ✅ - [x] Quality Gates - Lint/type-check before marking passing ✅ - [ ] Security Scanning - Detect vulnerabilities From c3b20bfaaeb839197345e1cc77a66377fcb4290d Mon Sep 17 00:00:00 2001 From: cabana8471 Date: Wed, 21 Jan 2026 10:30:27 +0100 Subject: [PATCH 19/81] feat(security): add security scanning module - Add SecurityScanner class for vulnerability detection - Scan dependencies with npm audit / pip-audit - Detect hardcoded secrets (API keys, passwords, tokens) - Pattern matching for SQL injection, XSS, command injection - Add REST API endpoints for scanning and reports - Save reports to .autocoder/security-reports/ Co-Authored-By: Claude Opus 4.5 --- security_scanner.py | 696 +++++++++++++++++++++++++++++++++++++ server/main.py | 2 + server/routers/__init__.py | 2 + server/routers/security.py | 212 +++++++++++ 4 files changed, 912 insertions(+) create mode 100644 security_scanner.py create mode 100644 server/routers/security.py diff --git a/security_scanner.py b/security_scanner.py new file mode 100644 index 00000000..e7cb10ae --- /dev/null +++ b/security_scanner.py @@ -0,0 +1,696 @@ +""" +Security Scanner Module +======================= + +Detect vulnerabilities in generated code and dependencies. + +Features: +- Dependency scanning (npm audit, pip-audit/safety) +- Secret detection (API keys, passwords, tokens) +- Code vulnerability patterns (SQL injection, XSS, command injection) +- OWASP Top 10 pattern matching + +Integration: +- Can be run standalone or as part of quality gates +- Results stored in project's .autocoder/security-reports/ +""" + +import json +import os +import re +import shutil +import subprocess +from dataclasses import dataclass, field +from datetime import datetime +from enum import Enum +from pathlib import Path +from typing import Optional + + +class Severity(str, Enum): + """Vulnerability severity levels.""" + + CRITICAL = "critical" + HIGH = "high" + MEDIUM = "medium" + LOW = "low" + INFO = "info" + + +class VulnerabilityType(str, Enum): + """Types of vulnerabilities detected.""" + + DEPENDENCY = "dependency" + SECRET = "secret" + SQL_INJECTION = "sql_injection" + XSS = "xss" + COMMAND_INJECTION = "command_injection" + PATH_TRAVERSAL = "path_traversal" + INSECURE_CRYPTO = "insecure_crypto" + HARDCODED_CREDENTIAL = "hardcoded_credential" + SENSITIVE_DATA_EXPOSURE = "sensitive_data_exposure" + OTHER = "other" + + +@dataclass +class Vulnerability: + """A detected vulnerability.""" + + type: VulnerabilityType + severity: Severity + title: str + description: str + file_path: Optional[str] = None + line_number: Optional[int] = None + code_snippet: Optional[str] = None + recommendation: Optional[str] = None + cwe_id: Optional[str] = None + package_name: Optional[str] = None + package_version: Optional[str] = None + + def to_dict(self) -> dict: + """Convert to dictionary.""" + result = { + "type": self.type.value, + "severity": self.severity.value, + "title": self.title, + "description": self.description, + } + if self.file_path: + result["file_path"] = self.file_path + if self.line_number: + result["line_number"] = self.line_number + if self.code_snippet: + result["code_snippet"] = self.code_snippet + if self.recommendation: + result["recommendation"] = self.recommendation + if self.cwe_id: + result["cwe_id"] = self.cwe_id + if self.package_name: + result["package_name"] = self.package_name + if self.package_version: + result["package_version"] = self.package_version + return result + + +@dataclass +class ScanResult: + """Result of a security scan.""" + + project_dir: str + scan_time: str + vulnerabilities: list[Vulnerability] = field(default_factory=list) + summary: dict = field(default_factory=dict) + scans_run: list[str] = field(default_factory=list) + + def to_dict(self) -> dict: + """Convert to dictionary.""" + return { + "project_dir": self.project_dir, + "scan_time": self.scan_time, + "vulnerabilities": [v.to_dict() for v in self.vulnerabilities], + "summary": self.summary, + "scans_run": self.scans_run, + "total_issues": len(self.vulnerabilities), + "by_severity": { + "critical": len([v for v in self.vulnerabilities if v.severity == Severity.CRITICAL]), + "high": len([v for v in self.vulnerabilities if v.severity == Severity.HIGH]), + "medium": len([v for v in self.vulnerabilities if v.severity == Severity.MEDIUM]), + "low": len([v for v in self.vulnerabilities if v.severity == Severity.LOW]), + "info": len([v for v in self.vulnerabilities if v.severity == Severity.INFO]), + }, + } + + +# ============================================================================ +# Secret Patterns +# ============================================================================ + +SECRET_PATTERNS = [ + # API Keys + ( + r'(?i)(api[_-]?key|apikey)\s*[=:]\s*["\']?([a-zA-Z0-9_\-]{20,})["\']?', + "API Key Detected", + Severity.HIGH, + "CWE-798", + ), + # AWS Keys + ( + r'(?i)(AKIA[0-9A-Z]{16})', + "AWS Access Key ID", + Severity.CRITICAL, + "CWE-798", + ), + ( + r'(?i)aws[_-]?secret[_-]?access[_-]?key\s*[=:]\s*["\']?([a-zA-Z0-9/+=]{40})["\']?', + "AWS Secret Access Key", + Severity.CRITICAL, + "CWE-798", + ), + # Private Keys + ( + r'-----BEGIN (RSA |EC |DSA )?PRIVATE KEY-----', + "Private Key Detected", + Severity.CRITICAL, + "CWE-321", + ), + # Passwords + ( + r'(?i)(password|passwd|pwd)\s*[=:]\s*["\']([^"\']{8,})["\']', + "Hardcoded Password", + Severity.HIGH, + "CWE-798", + ), + # Generic Secrets + ( + r'(?i)(secret|token|auth)[_-]?(key|token)?\s*[=:]\s*["\']?([a-zA-Z0-9_\-]{20,})["\']?', + "Secret/Token Detected", + Severity.HIGH, + "CWE-798", + ), + # Database Connection Strings + ( + r'(?i)(mongodb|postgres|mysql|redis)://[^"\'\s]+:[^"\'\s]+@', + "Database Connection String with Credentials", + Severity.HIGH, + "CWE-798", + ), + # JWT Tokens + ( + r'eyJ[a-zA-Z0-9_-]*\.eyJ[a-zA-Z0-9_-]*\.[a-zA-Z0-9_-]*', + "JWT Token Detected", + Severity.MEDIUM, + "CWE-200", + ), + # GitHub Tokens + ( + r'gh[pousr]_[A-Za-z0-9_]{36,}', + "GitHub Token Detected", + Severity.CRITICAL, + "CWE-798", + ), + # Slack Tokens + ( + r'xox[baprs]-[0-9]{10,13}-[0-9]{10,13}[a-zA-Z0-9-]*', + "Slack Token Detected", + Severity.HIGH, + "CWE-798", + ), +] + +# ============================================================================ +# Code Vulnerability Patterns +# ============================================================================ + +CODE_PATTERNS = [ + # SQL Injection + ( + r'(?i)execute\s*\(\s*["\'].*\%.*["\'].*%', + "Potential SQL Injection (string formatting)", + VulnerabilityType.SQL_INJECTION, + Severity.HIGH, + "CWE-89", + "Use parameterized queries instead of string formatting", + ), + ( + r'(?i)(cursor\.execute|db\.execute|connection\.execute)\s*\(\s*f["\']', + "Potential SQL Injection (f-string)", + VulnerabilityType.SQL_INJECTION, + Severity.HIGH, + "CWE-89", + "Use parameterized queries instead of f-strings", + ), + ( + r'(?i)query\s*=\s*["\']SELECT.*\+', + "Potential SQL Injection (string concatenation)", + VulnerabilityType.SQL_INJECTION, + Severity.HIGH, + "CWE-89", + "Use parameterized queries instead of string concatenation", + ), + # XSS + ( + r'(?i)innerHTML\s*=\s*[^"\']*\+', + "Potential XSS (innerHTML with concatenation)", + VulnerabilityType.XSS, + Severity.HIGH, + "CWE-79", + "Use textContent or sanitize HTML before setting innerHTML", + ), + ( + r'(?i)document\.write\s*\(', + "Potential XSS (document.write)", + VulnerabilityType.XSS, + Severity.MEDIUM, + "CWE-79", + "Avoid document.write, use DOM manipulation instead", + ), + ( + r'(?i)dangerouslySetInnerHTML', + "React dangerouslySetInnerHTML usage", + VulnerabilityType.XSS, + Severity.MEDIUM, + "CWE-79", + "Ensure content is properly sanitized before using dangerouslySetInnerHTML", + ), + # Command Injection + ( + r'(?i)(subprocess\.call|subprocess\.run|os\.system|os\.popen)\s*\([^)]*\+', + "Potential Command Injection (string concatenation)", + VulnerabilityType.COMMAND_INJECTION, + Severity.CRITICAL, + "CWE-78", + "Use subprocess with list arguments and avoid shell=True", + ), + ( + r'(?i)shell\s*=\s*True', + "Subprocess with shell=True", + VulnerabilityType.COMMAND_INJECTION, + Severity.MEDIUM, + "CWE-78", + "Avoid shell=True, use list arguments instead", + ), + ( + r'(?i)exec\s*\(\s*[^"\']*\+', + "Potential Code Injection (exec with concatenation)", + VulnerabilityType.COMMAND_INJECTION, + Severity.CRITICAL, + "CWE-94", + "Avoid using exec with user-controlled input", + ), + ( + r'(?i)eval\s*\(\s*[^"\']*\+', + "Potential Code Injection (eval with concatenation)", + VulnerabilityType.COMMAND_INJECTION, + Severity.CRITICAL, + "CWE-94", + "Avoid using eval with user-controlled input", + ), + # Path Traversal + ( + r'(?i)(open|read|write)\s*\([^)]*\+[^)]*\)', + "Potential Path Traversal (file operation with concatenation)", + VulnerabilityType.PATH_TRAVERSAL, + Severity.MEDIUM, + "CWE-22", + "Validate and sanitize file paths before use", + ), + # Insecure Crypto + ( + r'(?i)(md5|sha1)\s*\(', + "Weak Cryptographic Hash (MD5/SHA1)", + VulnerabilityType.INSECURE_CRYPTO, + Severity.LOW, + "CWE-328", + "Use SHA-256 or stronger for security-sensitive operations", + ), + ( + r'(?i)random\.random\s*\(', + "Insecure Random Number Generator", + VulnerabilityType.INSECURE_CRYPTO, + Severity.LOW, + "CWE-330", + "Use secrets module for security-sensitive random values", + ), + # Sensitive Data + ( + r'(?i)console\.(log|info|debug)\s*\([^)]*password', + "Password logged to console", + VulnerabilityType.SENSITIVE_DATA_EXPOSURE, + Severity.MEDIUM, + "CWE-532", + "Remove sensitive data from log statements", + ), + ( + r'(?i)print\s*\([^)]*password', + "Password printed to output", + VulnerabilityType.SENSITIVE_DATA_EXPOSURE, + Severity.MEDIUM, + "CWE-532", + "Remove sensitive data from print statements", + ), +] + + +class SecurityScanner: + """ + Security scanner for detecting vulnerabilities in code and dependencies. + + Usage: + scanner = SecurityScanner(project_dir) + result = scanner.scan() + print(f"Found {len(result.vulnerabilities)} issues") + """ + + def __init__(self, project_dir: Path): + self.project_dir = Path(project_dir) + + def scan( + self, + scan_dependencies: bool = True, + scan_secrets: bool = True, + scan_code: bool = True, + save_report: bool = True, + ) -> ScanResult: + """ + Run security scan on the project. + + Args: + scan_dependencies: Run npm audit / pip-audit + scan_secrets: Scan for hardcoded secrets + scan_code: Scan for code vulnerabilities + save_report: Save report to .autocoder/security-reports/ + + Returns: + ScanResult with all findings + """ + result = ScanResult( + project_dir=str(self.project_dir), + scan_time=datetime.utcnow().isoformat() + "Z", + ) + + if scan_dependencies: + self._scan_dependencies(result) + + if scan_secrets: + self._scan_secrets(result) + + if scan_code: + self._scan_code_patterns(result) + + # Generate summary + result.summary = { + "total_issues": len(result.vulnerabilities), + "critical": len([v for v in result.vulnerabilities if v.severity == Severity.CRITICAL]), + "high": len([v for v in result.vulnerabilities if v.severity == Severity.HIGH]), + "medium": len([v for v in result.vulnerabilities if v.severity == Severity.MEDIUM]), + "low": len([v for v in result.vulnerabilities if v.severity == Severity.LOW]), + "has_critical_or_high": any( + v.severity in (Severity.CRITICAL, Severity.HIGH) + for v in result.vulnerabilities + ), + } + + if save_report: + self._save_report(result) + + return result + + def _scan_dependencies(self, result: ScanResult) -> None: + """Scan dependencies for known vulnerabilities.""" + # Check for npm + if (self.project_dir / "package.json").exists(): + self._run_npm_audit(result) + + # Check for Python + if (self.project_dir / "requirements.txt").exists() or ( + self.project_dir / "pyproject.toml" + ).exists(): + self._run_pip_audit(result) + + def _run_npm_audit(self, result: ScanResult) -> None: + """Run npm audit and parse results.""" + result.scans_run.append("npm_audit") + + try: + proc = subprocess.run( + ["npm", "audit", "--json"], + cwd=self.project_dir, + capture_output=True, + text=True, + timeout=120, + ) + + if proc.stdout: + try: + audit_data = json.loads(proc.stdout) + + # Parse vulnerabilities from npm audit output + vulns = audit_data.get("vulnerabilities", {}) + for pkg_name, pkg_info in vulns.items(): + severity_str = pkg_info.get("severity", "medium") + severity_map = { + "critical": Severity.CRITICAL, + "high": Severity.HIGH, + "moderate": Severity.MEDIUM, + "low": Severity.LOW, + "info": Severity.INFO, + } + severity = severity_map.get(severity_str, Severity.MEDIUM) + + via = pkg_info.get("via", []) + description = "" + if via and isinstance(via[0], dict): + description = via[0].get("title", "") + elif via and isinstance(via[0], str): + description = f"Vulnerable through {via[0]}" + + result.vulnerabilities.append( + Vulnerability( + type=VulnerabilityType.DEPENDENCY, + severity=severity, + title=f"Vulnerable dependency: {pkg_name}", + description=description or "Known vulnerability in package", + package_name=pkg_name, + package_version=pkg_info.get("range"), + recommendation=f"Run: npm update {pkg_name}", + ) + ) + except json.JSONDecodeError: + pass + + except subprocess.TimeoutExpired: + pass + except FileNotFoundError: + pass + + def _run_pip_audit(self, result: ScanResult) -> None: + """Run pip-audit and parse results.""" + result.scans_run.append("pip_audit") + + # Try pip-audit first + pip_audit_path = shutil.which("pip-audit") + if pip_audit_path: + try: + proc = subprocess.run( + ["pip-audit", "--format", "json", "-r", "requirements.txt"], + cwd=self.project_dir, + capture_output=True, + text=True, + timeout=120, + ) + + if proc.stdout: + try: + vulns = json.loads(proc.stdout) + for vuln in vulns: + severity_map = { + "CRITICAL": Severity.CRITICAL, + "HIGH": Severity.HIGH, + "MEDIUM": Severity.MEDIUM, + "LOW": Severity.LOW, + } + result.vulnerabilities.append( + Vulnerability( + type=VulnerabilityType.DEPENDENCY, + severity=severity_map.get( + vuln.get("severity", "MEDIUM"), Severity.MEDIUM + ), + title=f"Vulnerable dependency: {vuln.get('name')}", + description=vuln.get("description", ""), + package_name=vuln.get("name"), + package_version=vuln.get("version"), + cwe_id=vuln.get("id"), + recommendation=f"Upgrade to {vuln.get('fix_versions', ['latest'])[0] if vuln.get('fix_versions') else 'latest'}", + ) + ) + except json.JSONDecodeError: + pass + except (subprocess.TimeoutExpired, FileNotFoundError): + pass + + # Try safety as fallback + safety_path = shutil.which("safety") + if safety_path and not any( + v.type == VulnerabilityType.DEPENDENCY + for v in result.vulnerabilities + if v.package_name + ): + try: + proc = subprocess.run( + ["safety", "check", "--json", "-r", "requirements.txt"], + cwd=self.project_dir, + capture_output=True, + text=True, + timeout=120, + ) + + if proc.stdout: + try: + # Safety JSON format is different + safety_data = json.loads(proc.stdout) + # Parse safety output (format varies by version) + if isinstance(safety_data, list): + for item in safety_data: + if isinstance(item, list) and len(item) >= 4: + result.vulnerabilities.append( + Vulnerability( + type=VulnerabilityType.DEPENDENCY, + severity=Severity.MEDIUM, + title=f"Vulnerable dependency: {item[0]}", + description=item[3] if len(item) > 3 else "", + package_name=item[0], + package_version=item[1] if len(item) > 1 else None, + ) + ) + except json.JSONDecodeError: + pass + except (subprocess.TimeoutExpired, FileNotFoundError): + pass + + def _scan_secrets(self, result: ScanResult) -> None: + """Scan files for hardcoded secrets.""" + result.scans_run.append("secret_detection") + + # File extensions to scan + extensions = { + ".py", ".js", ".ts", ".tsx", ".jsx", + ".json", ".yaml", ".yml", ".toml", + ".env", ".env.local", ".env.example", + ".sh", ".bash", ".zsh", + ".md", ".txt", + } + + # Directories to skip + skip_dirs = { + "node_modules", "venv", ".venv", "__pycache__", + ".git", "dist", "build", ".next", + "vendor", "packages", + } + + for file_path in self._iter_files(extensions, skip_dirs): + try: + content = file_path.read_text(errors="ignore") + lines = content.split("\n") + + for pattern, title, severity, cwe_id in SECRET_PATTERNS: + for i, line in enumerate(lines, 1): + if re.search(pattern, line): + # Skip if it looks like an example or placeholder + if any( + placeholder in line.lower() + for placeholder in [ + "example", + "your_", + " 100 else line, + cwe_id=cwe_id, + recommendation="Move sensitive values to environment variables", + ) + ) + except Exception: + continue + + def _scan_code_patterns(self, result: ScanResult) -> None: + """Scan code for vulnerability patterns.""" + result.scans_run.append("code_patterns") + + # File extensions to scan + extensions = {".py", ".js", ".ts", ".tsx", ".jsx"} + + # Directories to skip + skip_dirs = { + "node_modules", "venv", ".venv", "__pycache__", + ".git", "dist", "build", ".next", + } + + for file_path in self._iter_files(extensions, skip_dirs): + try: + content = file_path.read_text(errors="ignore") + lines = content.split("\n") + + for pattern, title, vuln_type, severity, cwe_id, recommendation in CODE_PATTERNS: + for i, line in enumerate(lines, 1): + if re.search(pattern, line): + result.vulnerabilities.append( + Vulnerability( + type=vuln_type, + severity=severity, + title=title, + description=f"Potential vulnerability pattern detected", + file_path=str(file_path.relative_to(self.project_dir)), + line_number=i, + code_snippet=line.strip()[:100], + cwe_id=cwe_id, + recommendation=recommendation, + ) + ) + except Exception: + continue + + def _iter_files( + self, extensions: set[str], skip_dirs: set[str] + ): + """Iterate over files with given extensions, skipping certain directories.""" + for root, dirs, files in os.walk(self.project_dir): + # Skip excluded directories + dirs[:] = [d for d in dirs if d not in skip_dirs and not d.startswith(".")] + + for file in files: + file_path = Path(root) / file + if file_path.suffix in extensions or file in {".env", ".env.local", ".env.example"}: + yield file_path + + def _save_report(self, result: ScanResult) -> None: + """Save scan report to file.""" + reports_dir = self.project_dir / ".autocoder" / "security-reports" + reports_dir.mkdir(parents=True, exist_ok=True) + + timestamp = datetime.utcnow().strftime("%Y%m%d_%H%M%S") + report_path = reports_dir / f"security_scan_{timestamp}.json" + + with open(report_path, "w") as f: + json.dump(result.to_dict(), f, indent=2) + + +def scan_project( + project_dir: Path, + scan_dependencies: bool = True, + scan_secrets: bool = True, + scan_code: bool = True, +) -> ScanResult: + """ + Convenience function to scan a project. + + Args: + project_dir: Project directory + scan_dependencies: Run dependency audit + scan_secrets: Scan for secrets + scan_code: Scan for code patterns + + Returns: + ScanResult with findings + """ + scanner = SecurityScanner(project_dir) + return scanner.scan( + scan_dependencies=scan_dependencies, + scan_secrets=scan_secrets, + scan_code=scan_code, + ) diff --git a/server/main.py b/server/main.py index 2d2ac5bb..c586e5bd 100644 --- a/server/main.py +++ b/server/main.py @@ -31,6 +31,7 @@ import_project_router, logs_router, projects_router, + security_router, settings_router, spec_creation_router, terminal_router, @@ -121,6 +122,7 @@ async def require_localhost(request: Request, call_next): app.include_router(terminal_router) app.include_router(import_project_router) app.include_router(logs_router) +app.include_router(security_router) # ============================================================================ diff --git a/server/routers/__init__.py b/server/routers/__init__.py index 79acc7e5..e18677e9 100644 --- a/server/routers/__init__.py +++ b/server/routers/__init__.py @@ -17,6 +17,7 @@ from .terminal import router as terminal_router from .import_project import router as import_project_router from .logs import router as logs_router +from .security import router as security_router __all__ = [ "projects_router", @@ -31,4 +32,5 @@ "terminal_router", "import_project_router", "logs_router", + "security_router", ] diff --git a/server/routers/security.py b/server/routers/security.py new file mode 100644 index 00000000..989768df --- /dev/null +++ b/server/routers/security.py @@ -0,0 +1,212 @@ +""" +Security Router +=============== + +REST API endpoints for security scanning. + +Endpoints: +- POST /api/security/scan - Run security scan on a project +- GET /api/security/reports - List scan reports +- GET /api/security/reports/{filename} - Get a specific report +""" + +import json +import logging +from datetime import datetime +from pathlib import Path +from typing import Optional + +from fastapi import APIRouter, HTTPException, Query +from pydantic import BaseModel, Field + +logger = logging.getLogger(__name__) + +router = APIRouter(prefix="/api/security", tags=["security"]) + + +def _get_project_path(project_name: str) -> Path | None: + """Get project path from registry.""" + from registry import get_project_path + + return get_project_path(project_name) + + +# ============================================================================ +# Request/Response Models +# ============================================================================ + + +class ScanRequest(BaseModel): + """Request to run a security scan.""" + + project_name: str = Field(..., description="Name of the registered project") + scan_dependencies: bool = Field(True, description="Run npm audit / pip-audit") + scan_secrets: bool = Field(True, description="Scan for hardcoded secrets") + scan_code: bool = Field(True, description="Scan for code vulnerability patterns") + + +class VulnerabilityInfo(BaseModel): + """Information about a detected vulnerability.""" + + type: str + severity: str + title: str + description: str + file_path: Optional[str] = None + line_number: Optional[int] = None + code_snippet: Optional[str] = None + recommendation: Optional[str] = None + cwe_id: Optional[str] = None + package_name: Optional[str] = None + package_version: Optional[str] = None + + +class ScanSummary(BaseModel): + """Summary of scan results.""" + + total_issues: int + critical: int + high: int + medium: int + low: int + has_critical_or_high: bool + + +class ScanResponse(BaseModel): + """Response from security scan.""" + + project_dir: str + scan_time: str + vulnerabilities: list[VulnerabilityInfo] + summary: ScanSummary + scans_run: list[str] + report_saved: bool + + +class ReportListResponse(BaseModel): + """Response listing available reports.""" + + reports: list[str] + count: int + + +# ============================================================================ +# REST Endpoints +# ============================================================================ + + +@router.post("/scan", response_model=ScanResponse) +async def run_security_scan(request: ScanRequest): + """ + Run a security scan on a project. + + Scans for: + - Vulnerable dependencies (npm audit, pip-audit) + - Hardcoded secrets (API keys, passwords, tokens) + - Code vulnerability patterns (SQL injection, XSS, etc.) + + Results are saved to .autocoder/security-reports/ + """ + project_dir = _get_project_path(request.project_name) + if not project_dir: + raise HTTPException(status_code=404, detail="Project not found") + + if not project_dir.exists(): + raise HTTPException(status_code=404, detail="Project directory not found") + + try: + from security_scanner import scan_project + + result = scan_project( + project_dir, + scan_dependencies=request.scan_dependencies, + scan_secrets=request.scan_secrets, + scan_code=request.scan_code, + ) + + return ScanResponse( + project_dir=result.project_dir, + scan_time=result.scan_time, + vulnerabilities=[ + VulnerabilityInfo(**v.to_dict()) for v in result.vulnerabilities + ], + summary=ScanSummary(**result.summary), + scans_run=result.scans_run, + report_saved=True, + ) + + except Exception as e: + logger.exception(f"Error running security scan: {e}") + raise HTTPException(status_code=500, detail=f"Scan failed: {str(e)}") + + +@router.get("/reports/{project_name}", response_model=ReportListResponse) +async def list_reports(project_name: str): + """ + List available security scan reports for a project. + """ + project_dir = _get_project_path(project_name) + if not project_dir: + raise HTTPException(status_code=404, detail="Project not found") + + reports_dir = project_dir / ".autocoder" / "security-reports" + if not reports_dir.exists(): + return ReportListResponse(reports=[], count=0) + + reports = sorted( + [f.name for f in reports_dir.glob("security_scan_*.json")], + reverse=True, + ) + + return ReportListResponse(reports=reports, count=len(reports)) + + +@router.get("/reports/{project_name}/{filename}") +async def get_report(project_name: str, filename: str): + """ + Get a specific security scan report. + """ + project_dir = _get_project_path(project_name) + if not project_dir: + raise HTTPException(status_code=404, detail="Project not found") + + # Security: validate filename to prevent path traversal + if ".." in filename or "/" in filename or "\\" in filename: + raise HTTPException(status_code=400, detail="Invalid filename") + + if not filename.startswith("security_scan_") or not filename.endswith(".json"): + raise HTTPException(status_code=400, detail="Invalid report filename") + + report_path = project_dir / ".autocoder" / "security-reports" / filename + if not report_path.exists(): + raise HTTPException(status_code=404, detail="Report not found") + + try: + with open(report_path) as f: + return json.load(f) + except Exception as e: + raise HTTPException(status_code=500, detail=f"Error reading report: {str(e)}") + + +@router.get("/latest/{project_name}") +async def get_latest_report(project_name: str): + """ + Get the most recent security scan report for a project. + """ + project_dir = _get_project_path(project_name) + if not project_dir: + raise HTTPException(status_code=404, detail="Project not found") + + reports_dir = project_dir / ".autocoder" / "security-reports" + if not reports_dir.exists(): + raise HTTPException(status_code=404, detail="No reports found") + + reports = sorted(reports_dir.glob("security_scan_*.json"), reverse=True) + if not reports: + raise HTTPException(status_code=404, detail="No reports found") + + try: + with open(reports[0]) as f: + return json.load(f) + except Exception as e: + raise HTTPException(status_code=500, detail=f"Error reading report: {str(e)}") From f15e3945ed156c815aa6f28e467822169e4d66e8 Mon Sep 17 00:00:00 2001 From: cabana8471 Date: Wed, 21 Jan 2026 10:30:50 +0100 Subject: [PATCH 20/81] docs(fork): update changelog with Security Scanning feature Co-Authored-By: Claude Opus 4.5 --- FORK_CHANGELOG.md | 51 ++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 50 insertions(+), 1 deletion(-) diff --git a/FORK_CHANGELOG.md b/FORK_CHANGELOG.md index 4e214dcd..f055fcc9 100644 --- a/FORK_CHANGELOG.md +++ b/FORK_CHANGELOG.md @@ -9,6 +9,55 @@ Format based on [Keep a Changelog](https://keepachangelog.com/). - Fork documentation (FORK_README.md, FORK_CHANGELOG.md) - Configuration system via `.autocoder/config.json` +## [2025-01-21] Security Scanning + +### Added +- New module: `security_scanner.py` - Vulnerability detection for code and dependencies +- New router: `server/routers/security.py` - REST API for security scanning + +### Vulnerability Types Detected +| Type | Description | +|------|-------------| +| Dependency | Vulnerable packages via npm audit / pip-audit | +| Secret | Hardcoded API keys, passwords, tokens | +| SQL Injection | String formatting in SQL queries | +| XSS | innerHTML, document.write, dangerouslySetInnerHTML | +| Command Injection | shell=True, exec/eval with concatenation | +| Path Traversal | File operations with string concatenation | +| Insecure Crypto | MD5/SHA1, random.random() | + +### API Endpoints +| Endpoint | Method | Description | +|----------|--------|-------------| +| `/api/security/scan` | POST | Run security scan | +| `/api/security/reports/{project}` | GET | List scan reports | +| `/api/security/reports/{project}/{filename}` | GET | Get specific report | +| `/api/security/latest/{project}` | GET | Get latest report | + +### Secret Patterns Detected +- AWS Access Keys and Secret Keys +- GitHub Tokens +- Slack Tokens +- Private Keys (RSA, EC, DSA) +- Generic API keys and tokens +- Database connection strings with credentials +- JWT tokens + +### Usage +```python +from security_scanner import scan_project + +result = scan_project(project_dir) +print(f"Found {result.summary['total_issues']} issues") +print(f"Critical: {result.summary['critical']}") +print(f"High: {result.summary['high']}") +``` + +### Reports +Reports are saved to `.autocoder/security-reports/security_scan_YYYYMMDD_HHMMSS.json` + +--- + ## [2025-01-21] Enhanced Logging System ### Added @@ -245,7 +294,7 @@ The following features are planned for implementation: ### Phase 1: Foundation (Quick Wins) - [x] Enhanced Logging - Structured logs with filtering ✅ - [x] Quality Gates - Lint/type-check before marking passing ✅ -- [ ] Security Scanning - Detect vulnerabilities +- [x] Security Scanning - Detect vulnerabilities ✅ ### Phase 2: Import Projects - [x] Stack Detector - Detect React, Next.js, Express, FastAPI, Django, Vue.js ✅ From 47fcef28b155cd9dc56a78d654fb2e5ba1808337 Mon Sep 17 00:00:00 2001 From: cabana8471 Date: Wed, 21 Jan 2026 10:32:42 +0100 Subject: [PATCH 21/81] feat(git): add feature branches git workflow - Add GitWorkflow class for branch management - Support modes: feature_branches, trunk, none - Auto-create branches like feature/42-user-can-login - Add REST API endpoints for workflow operations - Configurable via .autocoder/config.json Co-Authored-By: Claude Opus 4.5 --- git_workflow.py | 527 +++++++++++++++++++++++++++++++++ server/main.py | 2 + server/routers/__init__.py | 2 + server/routers/git_workflow.py | 283 ++++++++++++++++++ 4 files changed, 814 insertions(+) create mode 100644 git_workflow.py create mode 100644 server/routers/git_workflow.py diff --git a/git_workflow.py b/git_workflow.py new file mode 100644 index 00000000..4af0702f --- /dev/null +++ b/git_workflow.py @@ -0,0 +1,527 @@ +""" +Git Workflow Module +=================== + +Professional git workflow with feature branches for Autocoder. + +Workflow Modes: +- feature_branches: Create branch per feature, merge on completion +- trunk: All changes on main branch (default) +- none: No git operations + +Branch naming: feature/{feature_id}-{slugified-name} +Example: feature/42-user-can-login +""" + +import logging +import re +import subprocess +from dataclasses import dataclass +from pathlib import Path +from typing import Literal, Optional + +logger = logging.getLogger(__name__) + +# Type alias for workflow modes +WorkflowMode = Literal["feature_branches", "trunk", "none"] + + +@dataclass +class BranchInfo: + """Information about a git branch.""" + + name: str + feature_id: Optional[int] = None + is_feature_branch: bool = False + is_current: bool = False + + +@dataclass +class WorkflowResult: + """Result of a workflow operation.""" + + success: bool + message: str + branch_name: Optional[str] = None + previous_branch: Optional[str] = None + + +def slugify(text: str) -> str: + """ + Convert text to URL-friendly slug. + + Example: "User can login" -> "user-can-login" + """ + # Convert to lowercase + text = text.lower() + # Replace spaces and underscores with hyphens + text = re.sub(r"[\s_]+", "-", text) + # Remove non-alphanumeric characters (except hyphens) + text = re.sub(r"[^a-z0-9-]", "", text) + # Remove consecutive hyphens + text = re.sub(r"-+", "-", text) + # Trim hyphens from ends + text = text.strip("-") + # Limit length + return text[:50] + + +def get_branch_name(feature_id: int, feature_name: str, prefix: str = "feature/") -> str: + """ + Generate branch name for a feature. + + Args: + feature_id: Feature ID + feature_name: Feature name + prefix: Branch prefix (default: "feature/") + + Returns: + Branch name like "feature/42-user-can-login" + """ + slug = slugify(feature_name) + return f"{prefix}{feature_id}-{slug}" + + +class GitWorkflow: + """ + Git workflow manager for feature branches. + + Usage: + workflow = GitWorkflow(project_dir, mode="feature_branches") + + # Start working on a feature + result = workflow.start_feature(42, "User can login") + # ... implement feature ... + + # Complete feature (merge to main) + result = workflow.complete_feature(42) + + # Or abort feature + result = workflow.abort_feature(42) + """ + + def __init__( + self, + project_dir: Path, + mode: WorkflowMode = "trunk", + branch_prefix: str = "feature/", + main_branch: str = "main", + auto_merge: bool = False, + ): + self.project_dir = Path(project_dir) + self.mode = mode + self.branch_prefix = branch_prefix + self.main_branch = main_branch + self.auto_merge = auto_merge + + def _run_git(self, *args, check: bool = True) -> subprocess.CompletedProcess: + """Run a git command in the project directory.""" + cmd = ["git"] + list(args) + return subprocess.run( + cmd, + cwd=self.project_dir, + capture_output=True, + text=True, + check=check, + ) + + def _is_git_repo(self) -> bool: + """Check if directory is a git repository.""" + try: + self._run_git("rev-parse", "--git-dir") + return True + except subprocess.CalledProcessError: + return False + + def _get_current_branch(self) -> Optional[str]: + """Get name of current branch.""" + try: + result = self._run_git("rev-parse", "--abbrev-ref", "HEAD") + return result.stdout.strip() + except subprocess.CalledProcessError: + return None + + def _branch_exists(self, branch_name: str) -> bool: + """Check if a branch exists.""" + result = self._run_git("branch", "--list", branch_name, check=False) + return bool(result.stdout.strip()) + + def _has_uncommitted_changes(self) -> bool: + """Check for uncommitted changes.""" + result = self._run_git("status", "--porcelain", check=False) + return bool(result.stdout.strip()) + + def get_feature_branch(self, feature_id: int) -> Optional[str]: + """ + Find branch for a feature ID. + + Returns branch name if found, None otherwise. + """ + result = self._run_git("branch", "--list", f"{self.branch_prefix}{feature_id}-*", check=False) + branches = [b.strip().lstrip("* ") for b in result.stdout.strip().split("\n") if b.strip()] + return branches[0] if branches else None + + def start_feature(self, feature_id: int, feature_name: str) -> WorkflowResult: + """ + Start working on a feature (create and checkout branch). + + In trunk mode, this is a no-op. + In feature_branches mode, creates branch and checks it out. + + Args: + feature_id: Feature ID + feature_name: Feature name for branch naming + + Returns: + WorkflowResult with success status and branch info + """ + if self.mode == "none": + return WorkflowResult( + success=True, + message="Git workflow disabled", + ) + + if self.mode == "trunk": + return WorkflowResult( + success=True, + message="Using trunk-based development", + branch_name=self.main_branch, + ) + + # feature_branches mode + if not self._is_git_repo(): + return WorkflowResult( + success=False, + message="Not a git repository", + ) + + # Check for existing branch + existing_branch = self.get_feature_branch(feature_id) + if existing_branch: + # Switch to existing branch + try: + self._run_git("checkout", existing_branch) + return WorkflowResult( + success=True, + message=f"Switched to existing branch: {existing_branch}", + branch_name=existing_branch, + ) + except subprocess.CalledProcessError as e: + return WorkflowResult( + success=False, + message=f"Failed to checkout branch: {e.stderr}", + ) + + # Create new branch + branch_name = get_branch_name(feature_id, feature_name, self.branch_prefix) + current_branch = self._get_current_branch() + + try: + # Stash uncommitted changes if any + had_changes = self._has_uncommitted_changes() + if had_changes: + self._run_git("stash", "push", "-m", f"Auto-stash before feature/{feature_id}") + + # Create and checkout new branch from main + self._run_git("checkout", self.main_branch) + self._run_git("checkout", "-b", branch_name) + + # Apply stashed changes if any + if had_changes: + self._run_git("stash", "pop", check=False) + + logger.info(f"Created feature branch: {branch_name}") + return WorkflowResult( + success=True, + message=f"Created branch: {branch_name}", + branch_name=branch_name, + previous_branch=current_branch, + ) + + except subprocess.CalledProcessError as e: + return WorkflowResult( + success=False, + message=f"Failed to create branch: {e.stderr}", + ) + + def commit_feature_progress( + self, + feature_id: int, + message: str, + add_all: bool = True, + ) -> WorkflowResult: + """ + Commit current changes for a feature. + + Args: + feature_id: Feature ID + message: Commit message + add_all: Whether to add all changes + + Returns: + WorkflowResult with success status + """ + if self.mode == "none": + return WorkflowResult( + success=True, + message="Git workflow disabled", + ) + + if not self._is_git_repo(): + return WorkflowResult( + success=False, + message="Not a git repository", + ) + + try: + if add_all: + self._run_git("add", "-A") + + # Check if there are staged changes + result = self._run_git("diff", "--cached", "--quiet", check=False) + if result.returncode == 0: + return WorkflowResult( + success=True, + message="No changes to commit", + ) + + # Commit + full_message = f"feat(feature-{feature_id}): {message}" + self._run_git("commit", "-m", full_message) + + return WorkflowResult( + success=True, + message=f"Committed: {message}", + ) + + except subprocess.CalledProcessError as e: + return WorkflowResult( + success=False, + message=f"Commit failed: {e.stderr}", + ) + + def complete_feature(self, feature_id: int) -> WorkflowResult: + """ + Complete a feature (merge to main if auto_merge enabled). + + Args: + feature_id: Feature ID + + Returns: + WorkflowResult with success status + """ + if self.mode != "feature_branches": + return WorkflowResult( + success=True, + message="Feature branches not enabled", + ) + + branch_name = self.get_feature_branch(feature_id) + if not branch_name: + return WorkflowResult( + success=False, + message=f"No branch found for feature {feature_id}", + ) + + current_branch = self._get_current_branch() + + try: + # Commit any remaining changes + if self._has_uncommitted_changes(): + self._run_git("add", "-A") + self._run_git("commit", "-m", f"feat(feature-{feature_id}): final changes") + + if not self.auto_merge: + return WorkflowResult( + success=True, + message=f"Feature complete on branch {branch_name}. Manual merge required.", + branch_name=branch_name, + ) + + # Auto-merge enabled + self._run_git("checkout", self.main_branch) + self._run_git("merge", "--no-ff", branch_name, "-m", f"Merge feature {feature_id}") + + # Optionally delete feature branch + # self._run_git("branch", "-d", branch_name) + + logger.info(f"Merged feature branch {branch_name} to {self.main_branch}") + return WorkflowResult( + success=True, + message=f"Merged {branch_name} to {self.main_branch}", + branch_name=self.main_branch, + previous_branch=branch_name, + ) + + except subprocess.CalledProcessError as e: + # Restore original branch on failure + if current_branch: + self._run_git("checkout", current_branch, check=False) + return WorkflowResult( + success=False, + message=f"Merge failed: {e.stderr}", + ) + + def abort_feature(self, feature_id: int, delete_branch: bool = False) -> WorkflowResult: + """ + Abort a feature (discard changes, optionally delete branch). + + Args: + feature_id: Feature ID + delete_branch: Whether to delete the feature branch + + Returns: + WorkflowResult with success status + """ + if self.mode != "feature_branches": + return WorkflowResult( + success=True, + message="Feature branches not enabled", + ) + + branch_name = self.get_feature_branch(feature_id) + if not branch_name: + return WorkflowResult( + success=False, + message=f"No branch found for feature {feature_id}", + ) + + try: + # Discard uncommitted changes + self._run_git("checkout", "--", ".", check=False) + self._run_git("clean", "-fd", check=False) + + # Switch back to main + self._run_git("checkout", self.main_branch) + + if delete_branch: + self._run_git("branch", "-D", branch_name) + return WorkflowResult( + success=True, + message=f"Aborted and deleted branch {branch_name}", + branch_name=self.main_branch, + ) + + return WorkflowResult( + success=True, + message=f"Aborted feature, branch {branch_name} preserved", + branch_name=self.main_branch, + ) + + except subprocess.CalledProcessError as e: + return WorkflowResult( + success=False, + message=f"Abort failed: {e.stderr}", + ) + + def list_feature_branches(self) -> list[BranchInfo]: + """ + List all feature branches. + + Returns: + List of BranchInfo objects + """ + if not self._is_git_repo(): + return [] + + result = self._run_git("branch", "--list", f"{self.branch_prefix}*", check=False) + current = self._get_current_branch() + + branches = [] + for line in result.stdout.strip().split("\n"): + if not line.strip(): + continue + is_current = line.startswith("*") + name = line.strip().lstrip("* ") + + # Extract feature ID from branch name + feature_id = None + match = re.search(rf"{re.escape(self.branch_prefix)}(\d+)-", name) + if match: + feature_id = int(match.group(1)) + + branches.append( + BranchInfo( + name=name, + feature_id=feature_id, + is_feature_branch=True, + is_current=is_current, + ) + ) + + return branches + + def get_status(self) -> dict: + """ + Get current git workflow status. + + Returns: + Dict with current branch, mode, uncommitted changes, etc. + """ + if not self._is_git_repo(): + return { + "is_git_repo": False, + "mode": self.mode, + } + + current = self._get_current_branch() + feature_branches = self.list_feature_branches() + + # Check if current branch is a feature branch + current_feature_id = None + if current and current.startswith(self.branch_prefix): + match = re.search(rf"{re.escape(self.branch_prefix)}(\d+)-", current) + if match: + current_feature_id = int(match.group(1)) + + return { + "is_git_repo": True, + "mode": self.mode, + "current_branch": current, + "main_branch": self.main_branch, + "is_on_feature_branch": current_feature_id is not None, + "current_feature_id": current_feature_id, + "has_uncommitted_changes": self._has_uncommitted_changes(), + "feature_branches": [b.name for b in feature_branches], + "feature_branch_count": len(feature_branches), + } + + +def get_workflow(project_dir: Path) -> GitWorkflow: + """ + Get git workflow manager for a project. + + Reads configuration from .autocoder/config.json. + + Args: + project_dir: Project directory + + Returns: + GitWorkflow instance configured for the project + """ + # Try to load config + mode: WorkflowMode = "trunk" + branch_prefix = "feature/" + main_branch = "main" + auto_merge = False + + try: + from server.services.autocoder_config import load_config + + config = load_config(project_dir) + git_config = config.get("git_workflow", {}) + + mode = git_config.get("mode", "trunk") + branch_prefix = git_config.get("branch_prefix", "feature/") + main_branch = git_config.get("main_branch", "main") + auto_merge = git_config.get("auto_merge", False) + except Exception: + pass + + return GitWorkflow( + project_dir, + mode=mode, + branch_prefix=branch_prefix, + main_branch=main_branch, + auto_merge=auto_merge, + ) diff --git a/server/main.py b/server/main.py index c586e5bd..fefbf50c 100644 --- a/server/main.py +++ b/server/main.py @@ -28,6 +28,7 @@ expand_project_router, features_router, filesystem_router, + git_workflow_router, import_project_router, logs_router, projects_router, @@ -123,6 +124,7 @@ async def require_localhost(request: Request, call_next): app.include_router(import_project_router) app.include_router(logs_router) app.include_router(security_router) +app.include_router(git_workflow_router) # ============================================================================ diff --git a/server/routers/__init__.py b/server/routers/__init__.py index e18677e9..1492d405 100644 --- a/server/routers/__init__.py +++ b/server/routers/__init__.py @@ -18,6 +18,7 @@ from .import_project import router as import_project_router from .logs import router as logs_router from .security import router as security_router +from .git_workflow import router as git_workflow_router __all__ = [ "projects_router", @@ -33,4 +34,5 @@ "import_project_router", "logs_router", "security_router", + "git_workflow_router", ] diff --git a/server/routers/git_workflow.py b/server/routers/git_workflow.py new file mode 100644 index 00000000..8bef4b8d --- /dev/null +++ b/server/routers/git_workflow.py @@ -0,0 +1,283 @@ +""" +Git Workflow Router +=================== + +REST API endpoints for git workflow management. + +Endpoints: +- GET /api/git/status - Get current git status +- POST /api/git/start-feature - Start working on a feature (create branch) +- POST /api/git/complete-feature - Complete a feature (merge) +- POST /api/git/abort-feature - Abort a feature +- GET /api/git/branches - List feature branches +""" + +import logging +from pathlib import Path +from typing import Optional + +from fastapi import APIRouter, HTTPException, Query +from pydantic import BaseModel, Field + +logger = logging.getLogger(__name__) + +router = APIRouter(prefix="/api/git", tags=["git-workflow"]) + + +def _get_project_path(project_name: str) -> Path | None: + """Get project path from registry.""" + from registry import get_project_path + + return get_project_path(project_name) + + +# ============================================================================ +# Request/Response Models +# ============================================================================ + + +class StartFeatureRequest(BaseModel): + """Request to start a feature branch.""" + + project_name: str = Field(..., description="Name of the registered project") + feature_id: int = Field(..., description="Feature ID") + feature_name: str = Field(..., description="Feature name for branch naming") + + +class CompleteFeatureRequest(BaseModel): + """Request to complete a feature.""" + + project_name: str = Field(..., description="Name of the registered project") + feature_id: int = Field(..., description="Feature ID") + + +class AbortFeatureRequest(BaseModel): + """Request to abort a feature.""" + + project_name: str = Field(..., description="Name of the registered project") + feature_id: int = Field(..., description="Feature ID") + delete_branch: bool = Field(False, description="Whether to delete the branch") + + +class CommitRequest(BaseModel): + """Request to commit changes.""" + + project_name: str = Field(..., description="Name of the registered project") + feature_id: int = Field(..., description="Feature ID") + message: str = Field(..., description="Commit message") + + +class WorkflowResultResponse(BaseModel): + """Response from workflow operations.""" + + success: bool + message: str + branch_name: Optional[str] = None + previous_branch: Optional[str] = None + + +class GitStatusResponse(BaseModel): + """Response with git status information.""" + + is_git_repo: bool + mode: str + current_branch: Optional[str] = None + main_branch: Optional[str] = None + is_on_feature_branch: bool = False + current_feature_id: Optional[int] = None + has_uncommitted_changes: bool = False + feature_branches: list[str] = [] + feature_branch_count: int = 0 + + +class BranchInfo(BaseModel): + """Information about a branch.""" + + name: str + feature_id: Optional[int] = None + is_feature_branch: bool = False + is_current: bool = False + + +class BranchListResponse(BaseModel): + """Response with list of branches.""" + + branches: list[BranchInfo] + count: int + + +# ============================================================================ +# REST Endpoints +# ============================================================================ + + +@router.get("/status/{project_name}", response_model=GitStatusResponse) +async def get_git_status(project_name: str): + """ + Get current git workflow status for a project. + + Returns information about current branch, mode, and feature branches. + """ + project_dir = _get_project_path(project_name) + if not project_dir: + raise HTTPException(status_code=404, detail="Project not found") + + try: + from git_workflow import get_workflow + + workflow = get_workflow(project_dir) + status = workflow.get_status() + + return GitStatusResponse(**status) + + except Exception as e: + logger.exception(f"Error getting git status: {e}") + raise HTTPException(status_code=500, detail=f"Failed to get status: {str(e)}") + + +@router.post("/start-feature", response_model=WorkflowResultResponse) +async def start_feature(request: StartFeatureRequest): + """ + Start working on a feature (create and checkout branch). + + In feature_branches mode, creates a new branch like 'feature/42-user-can-login'. + In trunk mode, this is a no-op. + """ + project_dir = _get_project_path(request.project_name) + if not project_dir: + raise HTTPException(status_code=404, detail="Project not found") + + try: + from git_workflow import get_workflow + + workflow = get_workflow(project_dir) + result = workflow.start_feature(request.feature_id, request.feature_name) + + return WorkflowResultResponse( + success=result.success, + message=result.message, + branch_name=result.branch_name, + previous_branch=result.previous_branch, + ) + + except Exception as e: + logger.exception(f"Error starting feature: {e}") + raise HTTPException(status_code=500, detail=f"Failed to start feature: {str(e)}") + + +@router.post("/complete-feature", response_model=WorkflowResultResponse) +async def complete_feature(request: CompleteFeatureRequest): + """ + Complete a feature (merge to main if auto_merge enabled). + + Commits any remaining changes and optionally merges the feature branch. + """ + project_dir = _get_project_path(request.project_name) + if not project_dir: + raise HTTPException(status_code=404, detail="Project not found") + + try: + from git_workflow import get_workflow + + workflow = get_workflow(project_dir) + result = workflow.complete_feature(request.feature_id) + + return WorkflowResultResponse( + success=result.success, + message=result.message, + branch_name=result.branch_name, + previous_branch=result.previous_branch, + ) + + except Exception as e: + logger.exception(f"Error completing feature: {e}") + raise HTTPException(status_code=500, detail=f"Failed to complete feature: {str(e)}") + + +@router.post("/abort-feature", response_model=WorkflowResultResponse) +async def abort_feature(request: AbortFeatureRequest): + """ + Abort a feature (discard changes, optionally delete branch). + + Returns to main branch and discards uncommitted changes. + """ + project_dir = _get_project_path(request.project_name) + if not project_dir: + raise HTTPException(status_code=404, detail="Project not found") + + try: + from git_workflow import get_workflow + + workflow = get_workflow(project_dir) + result = workflow.abort_feature(request.feature_id, request.delete_branch) + + return WorkflowResultResponse( + success=result.success, + message=result.message, + branch_name=result.branch_name, + previous_branch=result.previous_branch, + ) + + except Exception as e: + logger.exception(f"Error aborting feature: {e}") + raise HTTPException(status_code=500, detail=f"Failed to abort feature: {str(e)}") + + +@router.post("/commit", response_model=WorkflowResultResponse) +async def commit_changes(request: CommitRequest): + """ + Commit current changes for a feature. + + Adds all changes and commits with a structured message. + """ + project_dir = _get_project_path(request.project_name) + if not project_dir: + raise HTTPException(status_code=404, detail="Project not found") + + try: + from git_workflow import get_workflow + + workflow = get_workflow(project_dir) + result = workflow.commit_feature_progress(request.feature_id, request.message) + + return WorkflowResultResponse( + success=result.success, + message=result.message, + ) + + except Exception as e: + logger.exception(f"Error committing: {e}") + raise HTTPException(status_code=500, detail=f"Commit failed: {str(e)}") + + +@router.get("/branches/{project_name}", response_model=BranchListResponse) +async def list_branches(project_name: str): + """ + List all feature branches for a project. + """ + project_dir = _get_project_path(project_name) + if not project_dir: + raise HTTPException(status_code=404, detail="Project not found") + + try: + from git_workflow import get_workflow + + workflow = get_workflow(project_dir) + branches = workflow.list_feature_branches() + + return BranchListResponse( + branches=[ + BranchInfo( + name=b.name, + feature_id=b.feature_id, + is_feature_branch=b.is_feature_branch, + is_current=b.is_current, + ) + for b in branches + ], + count=len(branches), + ) + + except Exception as e: + logger.exception(f"Error listing branches: {e}") + raise HTTPException(status_code=500, detail=f"Failed to list branches: {str(e)}") From 71359014652c912c46183a67a3d24f24e8fea97c Mon Sep 17 00:00:00 2001 From: cabana8471 Date: Wed, 21 Jan 2026 10:33:05 +0100 Subject: [PATCH 22/81] docs(fork): update changelog with Feature Branches feature Co-Authored-By: Claude Opus 4.5 --- FORK_CHANGELOG.md | 59 ++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 58 insertions(+), 1 deletion(-) diff --git a/FORK_CHANGELOG.md b/FORK_CHANGELOG.md index f055fcc9..9eeb475c 100644 --- a/FORK_CHANGELOG.md +++ b/FORK_CHANGELOG.md @@ -9,6 +9,63 @@ Format based on [Keep a Changelog](https://keepachangelog.com/). - Fork documentation (FORK_README.md, FORK_CHANGELOG.md) - Configuration system via `.autocoder/config.json` +## [2025-01-21] Feature Branches Git Workflow + +### Added +- New module: `git_workflow.py` - Git workflow management for feature branches +- New router: `server/routers/git_workflow.py` - REST API for git operations + +### Workflow Modes +| Mode | Description | +|------|-------------| +| `feature_branches` | Create branch per feature, merge on completion | +| `trunk` | All changes on main branch (default) | +| `none` | No git operations | + +### Branch Naming +- Format: `feature/{id}-{slugified-name}` +- Example: `feature/42-user-can-login` + +### API Endpoints +| Endpoint | Method | Description | +|----------|--------|-------------| +| `/api/git/status/{project}` | GET | Get current git status | +| `/api/git/start-feature` | POST | Start feature (create branch) | +| `/api/git/complete-feature` | POST | Complete feature (merge) | +| `/api/git/abort-feature` | POST | Abort feature | +| `/api/git/commit` | POST | Commit changes | +| `/api/git/branches/{project}` | GET | List feature branches | + +### Configuration +```json +{ + "git_workflow": { + "mode": "feature_branches", + "branch_prefix": "feature/", + "main_branch": "main", + "auto_merge": false + } +} +``` + +### Usage +```python +from git_workflow import get_workflow + +workflow = get_workflow(project_dir) + +# Start working on a feature +result = workflow.start_feature(42, "User can login") + +# Commit progress +result = workflow.commit_feature_progress(42, "Add login form") + +# Complete feature (merge to main if auto_merge enabled) +result = workflow.complete_feature(42) +``` + +--- + ## [2025-01-21] Security Scanning ### Added @@ -303,7 +360,7 @@ The following features are planned for implementation: - [ ] Import Wizard UI - Chat-based project import (UI component) ### Phase 3: Workflow Improvements -- [ ] Feature Branches - Git workflow with feature branches +- [x] Feature Branches - Git workflow with feature branches ✅ - [x] Error Recovery - Handle stuck features, auto-clear on startup ✅ - [ ] Review Agent - Automatic code review - [ ] CI/CD Integration - GitHub Actions generation From 3bcb5849ba771d40ca10f99867b7a2297ec06210 Mon Sep 17 00:00:00 2001 From: cabana8471 Date: Wed, 21 Jan 2026 10:35:26 +0100 Subject: [PATCH 23/81] feat(cicd): add CI/CD workflow generation - Add integrations/ci module for GitHub Actions - Generate CI, Security, and Deploy workflows - Auto-detect tech stack (Node.js, Python, TypeScript) - Add REST API endpoints for workflow generation - Support workflow preview without saving - Add PyYAML dependency Co-Authored-By: Claude Opus 4.5 --- integrations/__init__.py | 13 + integrations/ci/__init__.py | 66 ++++ integrations/ci/github_actions.py | 610 ++++++++++++++++++++++++++++++ requirements.txt | 1 + server/main.py | 2 + server/routers/__init__.py | 2 + server/routers/cicd.py | 254 +++++++++++++ 7 files changed, 948 insertions(+) create mode 100644 integrations/__init__.py create mode 100644 integrations/ci/__init__.py create mode 100644 integrations/ci/github_actions.py create mode 100644 server/routers/cicd.py diff --git a/integrations/__init__.py b/integrations/__init__.py new file mode 100644 index 00000000..df9ad1ec --- /dev/null +++ b/integrations/__init__.py @@ -0,0 +1,13 @@ +""" +Integrations Package +==================== + +External integrations for Autocoder including CI/CD, deployment, etc. +""" + +from .ci import generate_ci_config, generate_github_workflow + +__all__ = [ + "generate_ci_config", + "generate_github_workflow", +] diff --git a/integrations/ci/__init__.py b/integrations/ci/__init__.py new file mode 100644 index 00000000..c4ff80aa --- /dev/null +++ b/integrations/ci/__init__.py @@ -0,0 +1,66 @@ +""" +CI/CD Integration Module +======================== + +Generate CI/CD configuration based on detected tech stack. + +Supported providers: +- GitHub Actions +- GitLab CI (planned) + +Features: +- Auto-detect tech stack and generate appropriate workflows +- Lint, type-check, test, build, deploy stages +- Environment management (staging, production) +""" + +from .github_actions import ( + generate_github_workflow, + generate_all_workflows, + GitHubWorkflow, + WorkflowTrigger, +) + +__all__ = [ + "generate_github_workflow", + "generate_all_workflows", + "GitHubWorkflow", + "WorkflowTrigger", +] + + +def generate_ci_config(project_dir, provider: str = "github") -> dict: + """ + Generate CI configuration based on detected tech stack. + + Args: + project_dir: Project directory + provider: CI provider ("github" or "gitlab") + + Returns: + Dict with generated configuration and file paths + """ + from pathlib import Path + + project_dir = Path(project_dir) + + if provider == "github": + workflows = generate_all_workflows(project_dir) + return { + "provider": "github", + "workflows": workflows, + "output_dir": str(project_dir / ".github" / "workflows"), + } + + elif provider == "gitlab": + # GitLab CI support planned + return { + "provider": "gitlab", + "error": "GitLab CI not yet implemented", + } + + else: + return { + "provider": provider, + "error": f"Unknown provider: {provider}", + } diff --git a/integrations/ci/github_actions.py b/integrations/ci/github_actions.py new file mode 100644 index 00000000..8b3be1bd --- /dev/null +++ b/integrations/ci/github_actions.py @@ -0,0 +1,610 @@ +""" +GitHub Actions Workflow Generator +================================= + +Generate GitHub Actions workflows based on detected tech stack. + +Workflow types: +- CI: Lint, type-check, test on push/PR +- Deploy: Build and deploy on merge to main +- Security: Dependency audit and code scanning +""" + +import json +import os +from dataclasses import dataclass, field +from enum import Enum +from pathlib import Path +from typing import Any, Literal, Optional + +import yaml + + +class WorkflowTrigger(str, Enum): + """Workflow trigger types.""" + + PUSH = "push" + PULL_REQUEST = "pull_request" + WORKFLOW_DISPATCH = "workflow_dispatch" + SCHEDULE = "schedule" + + +@dataclass +class WorkflowJob: + """A job in a GitHub Actions workflow.""" + + name: str + runs_on: str = "ubuntu-latest" + steps: list[dict] = field(default_factory=list) + needs: list[str] = field(default_factory=list) + if_condition: Optional[str] = None + env: dict[str, str] = field(default_factory=dict) + + def to_dict(self) -> dict: + """Convert to workflow YAML format.""" + result = { + "name": self.name, + "runs-on": self.runs_on, + "steps": self.steps, + } + if self.needs: + result["needs"] = self.needs + if self.if_condition: + result["if"] = self.if_condition + if self.env: + result["env"] = self.env + return result + + +@dataclass +class GitHubWorkflow: + """A GitHub Actions workflow.""" + + name: str + filename: str + on: dict[str, Any] + jobs: dict[str, WorkflowJob] + env: dict[str, str] = field(default_factory=dict) + permissions: dict[str, str] = field(default_factory=dict) + + def to_yaml(self) -> str: + """Convert to YAML string.""" + workflow = { + "name": self.name, + "on": self.on, + "jobs": {name: job.to_dict() for name, job in self.jobs.items()}, + } + if self.env: + workflow["env"] = self.env + if self.permissions: + workflow["permissions"] = self.permissions + + return yaml.dump(workflow, default_flow_style=False, sort_keys=False) + + def save(self, project_dir: Path) -> Path: + """Save workflow to .github/workflows directory.""" + workflows_dir = project_dir / ".github" / "workflows" + workflows_dir.mkdir(parents=True, exist_ok=True) + + output_path = workflows_dir / self.filename + with open(output_path, "w") as f: + f.write(self.to_yaml()) + + return output_path + + +def _detect_stack(project_dir: Path) -> dict: + """Detect tech stack from project files.""" + stack = { + "has_node": False, + "has_python": False, + "has_typescript": False, + "has_react": False, + "has_nextjs": False, + "has_vue": False, + "has_fastapi": False, + "has_django": False, + "node_version": "20", + "python_version": "3.11", + "package_manager": "npm", + } + + # Check for Node.js + package_json = project_dir / "package.json" + if package_json.exists(): + stack["has_node"] = True + try: + with open(package_json) as f: + pkg = json.load(f) + deps = {**pkg.get("dependencies", {}), **pkg.get("devDependencies", {})} + + if "typescript" in deps: + stack["has_typescript"] = True + if "react" in deps: + stack["has_react"] = True + if "next" in deps: + stack["has_nextjs"] = True + if "vue" in deps: + stack["has_vue"] = True + + # Detect package manager + if (project_dir / "pnpm-lock.yaml").exists(): + stack["package_manager"] = "pnpm" + elif (project_dir / "yarn.lock").exists(): + stack["package_manager"] = "yarn" + elif (project_dir / "bun.lockb").exists(): + stack["package_manager"] = "bun" + + # Node version from engines + engines = pkg.get("engines", {}) + if "node" in engines: + version = engines["node"].strip(">=^~") + if version and version[0].isdigit(): + stack["node_version"] = version.split(".")[0] + except (json.JSONDecodeError, KeyError): + pass + + # Check for Python + if (project_dir / "requirements.txt").exists() or (project_dir / "pyproject.toml").exists(): + stack["has_python"] = True + + # Check for FastAPI + requirements_path = project_dir / "requirements.txt" + if requirements_path.exists(): + content = requirements_path.read_text().lower() + if "fastapi" in content: + stack["has_fastapi"] = True + if "django" in content: + stack["has_django"] = True + + # Python version from pyproject.toml + pyproject = project_dir / "pyproject.toml" + if pyproject.exists(): + content = pyproject.read_text() + if "python_requires" in content or "requires-python" in content: + import re + match = re.search(r'["\']>=?3\.(\d+)', content) + if match: + stack["python_version"] = f"3.{match.group(1)}" + + return stack + + +def _checkout_step() -> dict: + """Standard checkout step.""" + return { + "name": "Checkout code", + "uses": "actions/checkout@v4", + } + + +def _setup_node_step(version: str, cache: str = "npm") -> dict: + """Setup Node.js step.""" + return { + "name": "Setup Node.js", + "uses": "actions/setup-node@v4", + "with": { + "node-version": version, + "cache": cache, + }, + } + + +def _setup_python_step(version: str) -> dict: + """Setup Python step.""" + return { + "name": "Setup Python", + "uses": "actions/setup-python@v5", + "with": { + "python-version": version, + "cache": "pip", + }, + } + + +def _install_deps_step(package_manager: str = "npm") -> dict: + """Install dependencies step.""" + commands = { + "npm": "npm ci", + "yarn": "yarn install --frozen-lockfile", + "pnpm": "pnpm install --frozen-lockfile", + "bun": "bun install --frozen-lockfile", + } + return { + "name": "Install dependencies", + "run": commands.get(package_manager, "npm ci"), + } + + +def _python_install_step() -> dict: + """Python install dependencies step.""" + return { + "name": "Install dependencies", + "run": "pip install -r requirements.txt", + } + + +def generate_ci_workflow(project_dir: Path) -> GitHubWorkflow: + """ + Generate CI workflow for lint, type-check, and tests. + + Triggers on push to feature branches and PRs to main. + """ + stack = _detect_stack(project_dir) + + jobs = {} + + # Node.js jobs + if stack["has_node"]: + lint_steps = [ + _checkout_step(), + _setup_node_step(stack["node_version"], stack["package_manager"]), + _install_deps_step(stack["package_manager"]), + { + "name": "Run linter", + "run": f"{stack['package_manager']} run lint" if stack["package_manager"] != "npm" else "npm run lint", + }, + ] + + jobs["lint"] = WorkflowJob( + name="Lint", + steps=lint_steps, + ) + + if stack["has_typescript"]: + typecheck_steps = [ + _checkout_step(), + _setup_node_step(stack["node_version"], stack["package_manager"]), + _install_deps_step(stack["package_manager"]), + { + "name": "Type check", + "run": "npx tsc --noEmit", + }, + ] + + jobs["typecheck"] = WorkflowJob( + name="Type Check", + steps=typecheck_steps, + ) + + test_steps = [ + _checkout_step(), + _setup_node_step(stack["node_version"], stack["package_manager"]), + _install_deps_step(stack["package_manager"]), + { + "name": "Run tests", + "run": f"{stack['package_manager']} test" if stack["package_manager"] != "npm" else "npm test", + }, + ] + + jobs["test"] = WorkflowJob( + name="Test", + steps=test_steps, + needs=["lint"] + (["typecheck"] if stack["has_typescript"] else []), + ) + + build_steps = [ + _checkout_step(), + _setup_node_step(stack["node_version"], stack["package_manager"]), + _install_deps_step(stack["package_manager"]), + { + "name": "Build", + "run": f"{stack['package_manager']} run build" if stack["package_manager"] != "npm" else "npm run build", + }, + ] + + jobs["build"] = WorkflowJob( + name="Build", + steps=build_steps, + needs=["test"], + ) + + # Python jobs + if stack["has_python"]: + python_lint_steps = [ + _checkout_step(), + _setup_python_step(stack["python_version"]), + _python_install_step(), + { + "name": "Run ruff", + "run": "pip install ruff && ruff check .", + }, + ] + + jobs["python-lint"] = WorkflowJob( + name="Python Lint", + steps=python_lint_steps, + ) + + python_test_steps = [ + _checkout_step(), + _setup_python_step(stack["python_version"]), + _python_install_step(), + { + "name": "Run tests", + "run": "pip install pytest && pytest", + }, + ] + + jobs["python-test"] = WorkflowJob( + name="Python Test", + steps=python_test_steps, + needs=["python-lint"], + ) + + return GitHubWorkflow( + name="CI", + filename="ci.yml", + on={ + "push": { + "branches": ["main", "master", "feature/*"], + }, + "pull_request": { + "branches": ["main", "master"], + }, + }, + jobs=jobs, + ) + + +def generate_security_workflow(project_dir: Path) -> GitHubWorkflow: + """ + Generate security scanning workflow. + + Runs dependency audit and code scanning. + """ + stack = _detect_stack(project_dir) + + jobs = {} + + if stack["has_node"]: + audit_steps = [ + _checkout_step(), + _setup_node_step(stack["node_version"], stack["package_manager"]), + { + "name": "Run npm audit", + "run": "npm audit --audit-level=moderate", + "continue-on-error": True, + }, + ] + + jobs["npm-audit"] = WorkflowJob( + name="NPM Audit", + steps=audit_steps, + ) + + if stack["has_python"]: + pip_audit_steps = [ + _checkout_step(), + _setup_python_step(stack["python_version"]), + { + "name": "Run pip-audit", + "run": "pip install pip-audit && pip-audit -r requirements.txt", + "continue-on-error": True, + }, + ] + + jobs["pip-audit"] = WorkflowJob( + name="Pip Audit", + steps=pip_audit_steps, + ) + + # CodeQL analysis + codeql_steps = [ + _checkout_step(), + { + "name": "Initialize CodeQL", + "uses": "github/codeql-action/init@v3", + "with": { + "languages": ", ".join( + filter(None, [ + "javascript" if stack["has_node"] else None, + "python" if stack["has_python"] else None, + ]) + ), + }, + }, + { + "name": "Autobuild", + "uses": "github/codeql-action/autobuild@v3", + }, + { + "name": "Perform CodeQL Analysis", + "uses": "github/codeql-action/analyze@v3", + }, + ] + + jobs["codeql"] = WorkflowJob( + name="CodeQL Analysis", + steps=codeql_steps, + ) + + return GitHubWorkflow( + name="Security", + filename="security.yml", + on={ + "push": { + "branches": ["main", "master"], + }, + "pull_request": { + "branches": ["main", "master"], + }, + "schedule": [ + {"cron": "0 0 * * 0"}, # Weekly on Sunday + ], + }, + jobs=jobs, + permissions={ + "security-events": "write", + "actions": "read", + "contents": "read", + }, + ) + + +def generate_deploy_workflow(project_dir: Path) -> GitHubWorkflow: + """ + Generate deployment workflow. + + Builds and deploys on merge to main. + """ + stack = _detect_stack(project_dir) + + jobs = {} + + # Build job + build_steps = [_checkout_step()] + + if stack["has_node"]: + build_steps.extend([ + _setup_node_step(stack["node_version"], stack["package_manager"]), + _install_deps_step(stack["package_manager"]), + { + "name": "Build", + "run": f"{stack['package_manager']} run build" if stack["package_manager"] != "npm" else "npm run build", + }, + { + "name": "Upload build artifacts", + "uses": "actions/upload-artifact@v4", + "with": { + "name": "build", + "path": "dist/", + "retention-days": 7, + }, + }, + ]) + + if stack["has_python"]: + build_steps.extend([ + _setup_python_step(stack["python_version"]), + _python_install_step(), + { + "name": "Build package", + "run": "pip install build && python -m build", + }, + ]) + + jobs["build"] = WorkflowJob( + name="Build", + steps=build_steps, + ) + + # Deploy staging job (placeholder) + deploy_staging_steps = [ + _checkout_step(), + { + "name": "Download build artifacts", + "uses": "actions/download-artifact@v4", + "with": { + "name": "build", + "path": "dist/", + }, + }, + { + "name": "Deploy to staging", + "run": "echo 'Add your staging deployment commands here'", + "env": { + "DEPLOY_ENV": "staging", + }, + }, + ] + + jobs["deploy-staging"] = WorkflowJob( + name="Deploy to Staging", + steps=deploy_staging_steps, + needs=["build"], + env={"DEPLOY_ENV": "staging"}, + ) + + # Deploy production job (manual trigger) + deploy_prod_steps = [ + _checkout_step(), + { + "name": "Download build artifacts", + "uses": "actions/download-artifact@v4", + "with": { + "name": "build", + "path": "dist/", + }, + }, + { + "name": "Deploy to production", + "run": "echo 'Add your production deployment commands here'", + "env": { + "DEPLOY_ENV": "production", + }, + }, + ] + + jobs["deploy-production"] = WorkflowJob( + name="Deploy to Production", + steps=deploy_prod_steps, + needs=["deploy-staging"], + if_condition="github.event_name == 'workflow_dispatch'", + env={"DEPLOY_ENV": "production"}, + ) + + return GitHubWorkflow( + name="Deploy", + filename="deploy.yml", + on={ + "push": { + "branches": ["main", "master"], + }, + "workflow_dispatch": {}, + }, + jobs=jobs, + ) + + +def generate_github_workflow( + project_dir: Path, + workflow_type: Literal["ci", "security", "deploy"] = "ci", + save: bool = True, +) -> GitHubWorkflow: + """ + Generate a GitHub Actions workflow. + + Args: + project_dir: Project directory + workflow_type: Type of workflow (ci, security, deploy) + save: Whether to save the workflow file + + Returns: + GitHubWorkflow instance + """ + generators = { + "ci": generate_ci_workflow, + "security": generate_security_workflow, + "deploy": generate_deploy_workflow, + } + + generator = generators.get(workflow_type) + if not generator: + raise ValueError(f"Unknown workflow type: {workflow_type}") + + workflow = generator(Path(project_dir)) + + if save: + workflow.save(Path(project_dir)) + + return workflow + + +def generate_all_workflows(project_dir: Path, save: bool = True) -> dict[str, GitHubWorkflow]: + """ + Generate all workflow types for a project. + + Args: + project_dir: Project directory + save: Whether to save workflow files + + Returns: + Dict mapping workflow type to GitHubWorkflow + """ + workflows = {} + for workflow_type in ["ci", "security", "deploy"]: + workflows[workflow_type] = generate_github_workflow( + project_dir, workflow_type, save + ) + return workflows diff --git a/requirements.txt b/requirements.txt index 0e260ba3..93449630 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,6 +8,7 @@ python-multipart>=0.0.17 psutil>=6.0.0 aiofiles>=24.0.0 pywinpty>=2.0.0; sys_platform == "win32" +pyyaml>=6.0.0 # Dev dependencies ruff>=0.8.0 diff --git a/server/main.py b/server/main.py index fefbf50c..311cd0b9 100644 --- a/server/main.py +++ b/server/main.py @@ -24,6 +24,7 @@ from .routers import ( agent_router, assistant_chat_router, + cicd_router, devserver_router, expand_project_router, features_router, @@ -125,6 +126,7 @@ async def require_localhost(request: Request, call_next): app.include_router(logs_router) app.include_router(security_router) app.include_router(git_workflow_router) +app.include_router(cicd_router) # ============================================================================ diff --git a/server/routers/__init__.py b/server/routers/__init__.py index 1492d405..3b2a7954 100644 --- a/server/routers/__init__.py +++ b/server/routers/__init__.py @@ -19,6 +19,7 @@ from .logs import router as logs_router from .security import router as security_router from .git_workflow import router as git_workflow_router +from .cicd import router as cicd_router __all__ = [ "projects_router", @@ -35,4 +36,5 @@ "logs_router", "security_router", "git_workflow_router", + "cicd_router", ] diff --git a/server/routers/cicd.py b/server/routers/cicd.py new file mode 100644 index 00000000..65709554 --- /dev/null +++ b/server/routers/cicd.py @@ -0,0 +1,254 @@ +""" +CI/CD Router +============ + +REST API endpoints for CI/CD workflow generation. + +Endpoints: +- POST /api/cicd/generate - Generate CI/CD workflows +- GET /api/cicd/workflows - List existing workflows +- GET /api/cicd/preview - Preview workflow content +""" + +import logging +from pathlib import Path +from typing import Literal, Optional + +from fastapi import APIRouter, HTTPException +from pydantic import BaseModel, Field + +logger = logging.getLogger(__name__) + +router = APIRouter(prefix="/api/cicd", tags=["cicd"]) + + +def _get_project_path(project_name: str) -> Path | None: + """Get project path from registry.""" + from registry import get_project_path + + return get_project_path(project_name) + + +# ============================================================================ +# Request/Response Models +# ============================================================================ + + +class GenerateRequest(BaseModel): + """Request to generate CI/CD workflows.""" + + project_name: str = Field(..., description="Name of the registered project") + provider: str = Field("github", description="CI provider (github, gitlab)") + workflow_types: list[str] = Field( + ["ci", "security", "deploy"], + description="Types of workflows to generate", + ) + save: bool = Field(True, description="Whether to save the workflow files") + + +class WorkflowInfo(BaseModel): + """Information about a generated workflow.""" + + name: str + filename: str + type: str + path: Optional[str] = None + + +class GenerateResponse(BaseModel): + """Response from workflow generation.""" + + provider: str + workflows: list[WorkflowInfo] + output_dir: str + message: str + + +class PreviewRequest(BaseModel): + """Request to preview a workflow.""" + + project_name: str = Field(..., description="Name of the registered project") + workflow_type: str = Field("ci", description="Type of workflow (ci, security, deploy)") + + +class PreviewResponse(BaseModel): + """Response with workflow preview.""" + + workflow_type: str + filename: str + content: str + + +class WorkflowListResponse(BaseModel): + """Response with list of existing workflows.""" + + workflows: list[WorkflowInfo] + count: int + + +# ============================================================================ +# REST Endpoints +# ============================================================================ + + +@router.post("/generate", response_model=GenerateResponse) +async def generate_workflows(request: GenerateRequest): + """ + Generate CI/CD workflows for a project. + + Detects tech stack and generates appropriate workflow files. + Supports GitHub Actions (and GitLab CI planned). + """ + project_dir = _get_project_path(request.project_name) + if not project_dir: + raise HTTPException(status_code=404, detail="Project not found") + + if not project_dir.exists(): + raise HTTPException(status_code=404, detail="Project directory not found") + + try: + if request.provider == "github": + from integrations.ci import generate_github_workflow + + workflows = [] + for wf_type in request.workflow_types: + if wf_type not in ["ci", "security", "deploy"]: + continue + + workflow = generate_github_workflow( + project_dir, + workflow_type=wf_type, + save=request.save, + ) + + path = None + if request.save: + path = str(project_dir / ".github" / "workflows" / workflow.filename) + + workflows.append( + WorkflowInfo( + name=workflow.name, + filename=workflow.filename, + type=wf_type, + path=path, + ) + ) + + return GenerateResponse( + provider="github", + workflows=workflows, + output_dir=str(project_dir / ".github" / "workflows"), + message=f"Generated {len(workflows)} workflow(s)", + ) + + else: + raise HTTPException( + status_code=400, + detail=f"Unsupported provider: {request.provider}", + ) + + except Exception as e: + logger.exception(f"Error generating workflows: {e}") + raise HTTPException(status_code=500, detail=f"Generation failed: {str(e)}") + + +@router.post("/preview", response_model=PreviewResponse) +async def preview_workflow(request: PreviewRequest): + """ + Preview a workflow without saving it. + + Returns the YAML content that would be generated. + """ + project_dir = _get_project_path(request.project_name) + if not project_dir: + raise HTTPException(status_code=404, detail="Project not found") + + if request.workflow_type not in ["ci", "security", "deploy"]: + raise HTTPException( + status_code=400, + detail=f"Invalid workflow type: {request.workflow_type}", + ) + + try: + from integrations.ci import generate_github_workflow + + workflow = generate_github_workflow( + project_dir, + workflow_type=request.workflow_type, + save=False, + ) + + return PreviewResponse( + workflow_type=request.workflow_type, + filename=workflow.filename, + content=workflow.to_yaml(), + ) + + except Exception as e: + logger.exception(f"Error previewing workflow: {e}") + raise HTTPException(status_code=500, detail=f"Preview failed: {str(e)}") + + +@router.get("/workflows/{project_name}", response_model=WorkflowListResponse) +async def list_workflows(project_name: str): + """ + List existing GitHub Actions workflows for a project. + """ + project_dir = _get_project_path(project_name) + if not project_dir: + raise HTTPException(status_code=404, detail="Project not found") + + workflows_dir = project_dir / ".github" / "workflows" + if not workflows_dir.exists(): + return WorkflowListResponse(workflows=[], count=0) + + workflows = [] + for file in workflows_dir.glob("*.yml"): + # Determine workflow type from filename + wf_type = "custom" + if file.stem in ["ci", "security", "deploy"]: + wf_type = file.stem + + workflows.append( + WorkflowInfo( + name=file.stem.title(), + filename=file.name, + type=wf_type, + path=str(file), + ) + ) + + return WorkflowListResponse( + workflows=workflows, + count=len(workflows), + ) + + +@router.get("/workflows/{project_name}/{filename}") +async def get_workflow_content(project_name: str, filename: str): + """ + Get the content of a specific workflow file. + """ + project_dir = _get_project_path(project_name) + if not project_dir: + raise HTTPException(status_code=404, detail="Project not found") + + # Security: validate filename + if ".." in filename or "/" in filename or "\\" in filename: + raise HTTPException(status_code=400, detail="Invalid filename") + + if not filename.endswith((".yml", ".yaml")): + raise HTTPException(status_code=400, detail="Invalid workflow filename") + + workflow_path = project_dir / ".github" / "workflows" / filename + if not workflow_path.exists(): + raise HTTPException(status_code=404, detail="Workflow not found") + + try: + content = workflow_path.read_text() + return { + "filename": filename, + "content": content, + } + except Exception as e: + raise HTTPException(status_code=500, detail=f"Error reading workflow: {str(e)}") From 1b80b9d8163340937f17ffe34415ba418b487d51 Mon Sep 17 00:00:00 2001 From: cabana8471 Date: Wed, 21 Jan 2026 10:35:51 +0100 Subject: [PATCH 24/81] docs(fork): update changelog with CI/CD Integration feature Co-Authored-By: Claude Opus 4.5 --- FORK_CHANGELOG.md | 62 ++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 61 insertions(+), 1 deletion(-) diff --git a/FORK_CHANGELOG.md b/FORK_CHANGELOG.md index 9eeb475c..ba6c033d 100644 --- a/FORK_CHANGELOG.md +++ b/FORK_CHANGELOG.md @@ -9,6 +9,66 @@ Format based on [Keep a Changelog](https://keepachangelog.com/). - Fork documentation (FORK_README.md, FORK_CHANGELOG.md) - Configuration system via `.autocoder/config.json` +## [2025-01-21] CI/CD Integration + +### Added +- New module: `integrations/ci/` - CI/CD workflow generation +- New router: `server/routers/cicd.py` - REST API for workflow management + +### Generated Workflows +| Workflow | Filename | Triggers | +|----------|----------|----------| +| CI | `ci.yml` | Push to branches, PRs | +| Security | `security.yml` | Push/PR to main, weekly | +| Deploy | `deploy.yml` | Push to main, manual | + +### CI Workflow Jobs +- **Lint**: ESLint, ruff +- **Type Check**: TypeScript tsc, mypy +- **Test**: npm test, pytest +- **Build**: Production build + +### Security Workflow Jobs +- **NPM Audit**: Dependency vulnerability scan +- **Pip Audit**: Python dependency scan +- **CodeQL**: GitHub code scanning + +### Deploy Workflow Jobs +- **Build**: Create production artifacts +- **Deploy Staging**: Auto-deploy on merge to main +- **Deploy Production**: Manual trigger only + +### API Endpoints +| Endpoint | Method | Description | +|----------|--------|-------------| +| `/api/cicd/generate` | POST | Generate workflows | +| `/api/cicd/preview` | POST | Preview workflow YAML | +| `/api/cicd/workflows/{project}` | GET | List existing workflows | +| `/api/cicd/workflows/{project}/{filename}` | GET | Get workflow content | + +### Usage +```bash +# Generate all workflows +curl -X POST http://localhost:8888/api/cicd/generate \ + -H "Content-Type: application/json" \ + -d '{"project_name": "my-project"}' + +# Preview CI workflow +curl -X POST http://localhost:8888/api/cicd/preview \ + -H "Content-Type: application/json" \ + -d '{"project_name": "my-project", "workflow_type": "ci"}' +``` + +### Stack Detection +Automatically detects: +- Node.js version from `engines` in package.json +- Package manager (npm, yarn, pnpm, bun) +- TypeScript, React, Next.js, Vue +- Python version from pyproject.toml +- FastAPI, Django + +--- + ## [2025-01-21] Feature Branches Git Workflow ### Added @@ -363,7 +423,7 @@ The following features are planned for implementation: - [x] Feature Branches - Git workflow with feature branches ✅ - [x] Error Recovery - Handle stuck features, auto-clear on startup ✅ - [ ] Review Agent - Automatic code review -- [ ] CI/CD Integration - GitHub Actions generation +- [x] CI/CD Integration - GitHub Actions generation ✅ ### Phase 4: Polish & Ecosystem - [ ] Template Library - SaaS, e-commerce, dashboard templates From 81056d12013300b3fc7bb65587d891ba44257bef Mon Sep 17 00:00:00 2001 From: cabana8471 Date: Wed, 21 Jan 2026 10:38:27 +0100 Subject: [PATCH 25/81] feat(templates): add project template library - Add templates module with library loader - Create 5 starter templates: - saas-starter: Multi-tenant SaaS - ecommerce: Online store - admin-dashboard: Admin panel - blog-cms: Content management - api-service: RESTful API - Add REST API endpoints for template management - Generate app_spec.txt from templates - Support design tokens customization Co-Authored-By: Claude Opus 4.5 --- server/main.py | 2 + server/routers/__init__.py | 2 + server/routers/templates.py | 316 +++++++++++++++++++++++++ templates/__init__.py | 39 +++ templates/catalog/admin-dashboard.yaml | 83 +++++++ templates/catalog/api-service.yaml | 80 +++++++ templates/catalog/blog-cms.yaml | 80 +++++++ templates/catalog/ecommerce.yaml | 83 +++++++ templates/catalog/saas-starter.yaml | 74 ++++++ templates/library.py | 314 ++++++++++++++++++++++++ 10 files changed, 1073 insertions(+) create mode 100644 server/routers/templates.py create mode 100644 templates/__init__.py create mode 100644 templates/catalog/admin-dashboard.yaml create mode 100644 templates/catalog/api-service.yaml create mode 100644 templates/catalog/blog-cms.yaml create mode 100644 templates/catalog/ecommerce.yaml create mode 100644 templates/catalog/saas-starter.yaml create mode 100644 templates/library.py diff --git a/server/main.py b/server/main.py index 311cd0b9..98bcab97 100644 --- a/server/main.py +++ b/server/main.py @@ -36,6 +36,7 @@ security_router, settings_router, spec_creation_router, + templates_router, terminal_router, ) from .schemas import SetupStatus @@ -127,6 +128,7 @@ async def require_localhost(request: Request, call_next): app.include_router(security_router) app.include_router(git_workflow_router) app.include_router(cicd_router) +app.include_router(templates_router) # ============================================================================ diff --git a/server/routers/__init__.py b/server/routers/__init__.py index 3b2a7954..0c7b795f 100644 --- a/server/routers/__init__.py +++ b/server/routers/__init__.py @@ -20,6 +20,7 @@ from .security import router as security_router from .git_workflow import router as git_workflow_router from .cicd import router as cicd_router +from .templates import router as templates_router __all__ = [ "projects_router", @@ -37,4 +38,5 @@ "security_router", "git_workflow_router", "cicd_router", + "templates_router", ] diff --git a/server/routers/templates.py b/server/routers/templates.py new file mode 100644 index 00000000..2241f567 --- /dev/null +++ b/server/routers/templates.py @@ -0,0 +1,316 @@ +""" +Templates Router +================ + +REST API endpoints for project templates. + +Endpoints: +- GET /api/templates - List all available templates +- GET /api/templates/{template_id} - Get template details +- POST /api/templates/preview - Preview app_spec.txt generation +- POST /api/templates/apply - Apply template to new project +""" + +import logging +from pathlib import Path +from typing import Optional + +from fastapi import APIRouter, HTTPException +from pydantic import BaseModel, Field + +logger = logging.getLogger(__name__) + +router = APIRouter(prefix="/api/templates", tags=["templates"]) + + +# ============================================================================ +# Request/Response Models +# ============================================================================ + + +class TechStackInfo(BaseModel): + """Technology stack information.""" + + frontend: Optional[str] = None + backend: Optional[str] = None + database: Optional[str] = None + auth: Optional[str] = None + styling: Optional[str] = None + hosting: Optional[str] = None + + +class DesignTokensInfo(BaseModel): + """Design tokens information.""" + + colors: dict[str, str] = {} + spacing: list[int] = [] + fonts: dict[str, str] = {} + border_radius: dict[str, str] = {} + + +class TemplateInfo(BaseModel): + """Template summary information.""" + + id: str + name: str + description: str + estimated_features: int + tags: list[str] = [] + difficulty: str = "intermediate" + + +class TemplateDetail(BaseModel): + """Full template details.""" + + id: str + name: str + description: str + tech_stack: TechStackInfo + feature_categories: dict[str, list[str]] + design_tokens: DesignTokensInfo + estimated_features: int + tags: list[str] = [] + difficulty: str = "intermediate" + + +class TemplateListResponse(BaseModel): + """Response with list of templates.""" + + templates: list[TemplateInfo] + count: int + + +class PreviewRequest(BaseModel): + """Request to preview app_spec.txt.""" + + template_id: str = Field(..., description="Template identifier") + app_name: str = Field(..., description="Application name") + customizations: Optional[dict] = Field(None, description="Optional customizations") + + +class PreviewResponse(BaseModel): + """Response with app_spec.txt preview.""" + + template_id: str + app_name: str + app_spec_content: str + feature_count: int + + +class ApplyRequest(BaseModel): + """Request to apply template to a project.""" + + template_id: str = Field(..., description="Template identifier") + project_name: str = Field(..., description="Name for the new project") + project_dir: str = Field(..., description="Directory for the project") + customizations: Optional[dict] = Field(None, description="Optional customizations") + + +class ApplyResponse(BaseModel): + """Response from applying template.""" + + success: bool + project_name: str + project_dir: str + app_spec_path: str + feature_count: int + message: str + + +# ============================================================================ +# REST Endpoints +# ============================================================================ + + +@router.get("", response_model=TemplateListResponse) +async def list_templates(): + """ + List all available templates. + + Returns basic information about each template. + """ + try: + from templates import list_templates as get_templates + + templates = get_templates() + + return TemplateListResponse( + templates=[ + TemplateInfo( + id=t.id, + name=t.name, + description=t.description, + estimated_features=t.estimated_features, + tags=t.tags, + difficulty=t.difficulty, + ) + for t in templates + ], + count=len(templates), + ) + + except Exception as e: + logger.exception(f"Error listing templates: {e}") + raise HTTPException(status_code=500, detail=f"Failed to list templates: {str(e)}") + + +@router.get("/{template_id}", response_model=TemplateDetail) +async def get_template(template_id: str): + """ + Get detailed information about a specific template. + """ + try: + from templates import get_template as load_template + + template = load_template(template_id) + + if not template: + raise HTTPException(status_code=404, detail=f"Template not found: {template_id}") + + return TemplateDetail( + id=template.id, + name=template.name, + description=template.description, + tech_stack=TechStackInfo( + frontend=template.tech_stack.frontend, + backend=template.tech_stack.backend, + database=template.tech_stack.database, + auth=template.tech_stack.auth, + styling=template.tech_stack.styling, + hosting=template.tech_stack.hosting, + ), + feature_categories=template.feature_categories, + design_tokens=DesignTokensInfo( + colors=template.design_tokens.colors, + spacing=template.design_tokens.spacing, + fonts=template.design_tokens.fonts, + border_radius=template.design_tokens.border_radius, + ), + estimated_features=template.estimated_features, + tags=template.tags, + difficulty=template.difficulty, + ) + + except HTTPException: + raise + except Exception as e: + logger.exception(f"Error getting template: {e}") + raise HTTPException(status_code=500, detail=f"Failed to get template: {str(e)}") + + +@router.post("/preview", response_model=PreviewResponse) +async def preview_template(request: PreviewRequest): + """ + Preview the app_spec.txt that would be generated from a template. + + Does not create any files - just returns the content. + """ + try: + from templates import get_template, generate_app_spec, generate_features + + template = get_template(request.template_id) + if not template: + raise HTTPException(status_code=404, detail=f"Template not found: {request.template_id}") + + app_spec_content = generate_app_spec( + template, + request.app_name, + request.customizations, + ) + + features = generate_features(template) + + return PreviewResponse( + template_id=request.template_id, + app_name=request.app_name, + app_spec_content=app_spec_content, + feature_count=len(features), + ) + + except HTTPException: + raise + except Exception as e: + logger.exception(f"Error previewing template: {e}") + raise HTTPException(status_code=500, detail=f"Preview failed: {str(e)}") + + +@router.post("/apply", response_model=ApplyResponse) +async def apply_template(request: ApplyRequest): + """ + Apply a template to create a new project. + + Creates the project directory, prompts folder, and app_spec.txt. + Does NOT register the project or create features - use the projects API for that. + """ + try: + from templates import get_template, generate_app_spec, generate_features + + template = get_template(request.template_id) + if not template: + raise HTTPException(status_code=404, detail=f"Template not found: {request.template_id}") + + # Create project directory + project_dir = Path(request.project_dir) + prompts_dir = project_dir / "prompts" + prompts_dir.mkdir(parents=True, exist_ok=True) + + # Generate and save app_spec.txt + app_spec_content = generate_app_spec( + template, + request.project_name, + request.customizations, + ) + + app_spec_path = prompts_dir / "app_spec.txt" + with open(app_spec_path, "w") as f: + f.write(app_spec_content) + + features = generate_features(template) + + return ApplyResponse( + success=True, + project_name=request.project_name, + project_dir=str(project_dir), + app_spec_path=str(app_spec_path), + feature_count=len(features), + message=f"Template '{template.name}' applied successfully. Register the project and run the initializer to create features.", + ) + + except HTTPException: + raise + except Exception as e: + logger.exception(f"Error applying template: {e}") + raise HTTPException(status_code=500, detail=f"Apply failed: {str(e)}") + + +@router.get("/{template_id}/features") +async def get_template_features(template_id: str): + """ + Get the features that would be created from a template. + + Returns features in bulk_create format. + """ + try: + from templates import get_template, generate_features + + template = get_template(template_id) + if not template: + raise HTTPException(status_code=404, detail=f"Template not found: {template_id}") + + features = generate_features(template) + + return { + "template_id": template_id, + "features": features, + "count": len(features), + "by_category": { + category: len(feature_names) + for category, feature_names in template.feature_categories.items() + }, + } + + except HTTPException: + raise + except Exception as e: + logger.exception(f"Error getting template features: {e}") + raise HTTPException(status_code=500, detail=f"Failed to get features: {str(e)}") diff --git a/templates/__init__.py b/templates/__init__.py new file mode 100644 index 00000000..a31a8d3b --- /dev/null +++ b/templates/__init__.py @@ -0,0 +1,39 @@ +""" +Template Library +================ + +Pre-made templates for common application types. + +Templates provide starting points with: +- Tech stack configuration +- Pre-defined features and categories +- Design tokens +- Estimated feature count + +Available templates: +- saas-starter: Multi-tenant SaaS with auth and billing +- ecommerce: Online store with products, cart, checkout +- admin-dashboard: Admin panel with CRUD operations +- blog-cms: Blog/CMS with posts, categories, comments +- api-service: RESTful API service +""" + +from .library import ( + Template, + TemplateCategory, + get_template, + list_templates, + load_template, + generate_app_spec, + generate_features, +) + +__all__ = [ + "Template", + "TemplateCategory", + "get_template", + "list_templates", + "load_template", + "generate_app_spec", + "generate_features", +] diff --git a/templates/catalog/admin-dashboard.yaml b/templates/catalog/admin-dashboard.yaml new file mode 100644 index 00000000..1380a4a2 --- /dev/null +++ b/templates/catalog/admin-dashboard.yaml @@ -0,0 +1,83 @@ +name: "Admin Dashboard" +description: "Full-featured admin panel with CRUD operations, charts, and data tables" + +tech_stack: + frontend: "React" + backend: "FastAPI" + database: "PostgreSQL" + auth: "JWT" + styling: "Tailwind CSS" + +feature_categories: + authentication: + - "Admin login" + - "Password reset" + - "Role-based access control" + - "Session management" + + dashboard: + - "Overview page" + - "Statistics cards" + - "Charts (line, bar, pie)" + - "Recent activity" + - "Quick actions" + + user_management: + - "User list with pagination" + - "User search and filter" + - "Create new user" + - "Edit user" + - "Delete user" + - "User roles management" + - "User activity log" + + content_management: + - "Content list" + - "Create content" + - "Edit content" + - "Delete content" + - "Publish/unpublish" + - "Content categories" + + data_tables: + - "Sortable columns" + - "Filterable columns" + - "Pagination" + - "Bulk actions" + - "Export to CSV" + - "Column visibility toggle" + + settings: + - "General settings" + - "Email templates" + - "Notification settings" + - "Backup management" + - "System logs" + + notifications: + - "In-app notifications" + - "Notification center" + - "Mark as read" + - "Notification preferences" + +design_tokens: + colors: + primary: "#3B82F6" + secondary: "#8B5CF6" + accent: "#F59E0B" + background: "#F3F4F6" + sidebar: "#1F2937" + text: "#111827" + muted: "#6B7280" + spacing: [4, 8, 12, 16, 24, 32] + fonts: + heading: "Inter" + body: "Inter" + border_radius: + small: "4px" + medium: "6px" + large: "8px" + +estimated_features: 40 +tags: ["admin", "dashboard", "crud", "management"] +difficulty: "intermediate" diff --git a/templates/catalog/api-service.yaml b/templates/catalog/api-service.yaml new file mode 100644 index 00000000..9815245e --- /dev/null +++ b/templates/catalog/api-service.yaml @@ -0,0 +1,80 @@ +name: "API Service" +description: "RESTful API service with authentication, rate limiting, and documentation" + +tech_stack: + backend: "FastAPI" + database: "PostgreSQL" + auth: "JWT" + hosting: "Docker" + +feature_categories: + core_api: + - "Health check endpoint" + - "Version endpoint" + - "OpenAPI documentation" + - "Swagger UI" + - "ReDoc documentation" + + authentication: + - "User registration" + - "User login" + - "Token refresh" + - "Password reset" + - "API key authentication" + - "OAuth2 support" + + user_management: + - "Get current user" + - "Update user profile" + - "Change password" + - "Delete account" + - "List users (admin)" + + resource_crud: + - "Create resource" + - "Read resource" + - "Update resource" + - "Delete resource" + - "List resources" + - "Search resources" + - "Filter resources" + - "Paginate results" + + security: + - "Rate limiting" + - "Request validation" + - "Input sanitization" + - "CORS configuration" + - "Security headers" + + monitoring: + - "Request logging" + - "Error tracking" + - "Performance metrics" + - "Health checks" + + admin: + - "Admin endpoints" + - "User management" + - "System statistics" + - "Audit logs" + +design_tokens: + colors: + primary: "#059669" + secondary: "#0EA5E9" + accent: "#F59E0B" + background: "#F9FAFB" + text: "#111827" + spacing: [4, 8, 12, 16, 24, 32] + fonts: + heading: "Inter" + body: "Inter" + border_radius: + small: "4px" + medium: "6px" + large: "8px" + +estimated_features: 30 +tags: ["api", "rest", "backend", "microservice"] +difficulty: "intermediate" diff --git a/templates/catalog/blog-cms.yaml b/templates/catalog/blog-cms.yaml new file mode 100644 index 00000000..a95fb6e0 --- /dev/null +++ b/templates/catalog/blog-cms.yaml @@ -0,0 +1,80 @@ +name: "Blog & CMS" +description: "Content management system with blog posts, categories, and comments" + +tech_stack: + frontend: "Next.js" + backend: "Node.js/Express" + database: "PostgreSQL" + auth: "NextAuth.js" + styling: "Tailwind CSS" + +feature_categories: + public_pages: + - "Home page with featured posts" + - "Blog listing page" + - "Blog post detail page" + - "Category pages" + - "Tag pages" + - "Author pages" + - "Search results page" + - "About page" + - "Contact page" + + blog_features: + - "Post search" + - "Category filtering" + - "Tag filtering" + - "Related posts" + - "Social sharing" + - "Reading time estimate" + - "Table of contents" + + comments: + - "Comment submission" + - "Comment moderation" + - "Reply to comments" + - "Like comments" + - "Comment notifications" + + admin_content: + - "Post editor (rich text)" + - "Post preview" + - "Draft management" + - "Schedule posts" + - "Post categories" + - "Post tags" + - "Media library" + + admin_settings: + - "Site settings" + - "SEO settings" + - "Social media links" + - "Analytics integration" + + user_features: + - "Author registration" + - "Author login" + - "Author profile" + - "Author dashboard" + - "Newsletter subscription" + +design_tokens: + colors: + primary: "#0F172A" + secondary: "#3B82F6" + accent: "#F97316" + background: "#FFFFFF" + text: "#334155" + muted: "#94A3B8" + spacing: [4, 8, 12, 16, 24, 32, 48, 64] + fonts: + heading: "Merriweather" + body: "Source Sans Pro" + border_radius: + small: "2px" + medium: "4px" + large: "8px" + +estimated_features: 35 +tags: ["blog", "cms", "content", "publishing"] +difficulty: "intermediate" diff --git a/templates/catalog/ecommerce.yaml b/templates/catalog/ecommerce.yaml new file mode 100644 index 00000000..dcbcf146 --- /dev/null +++ b/templates/catalog/ecommerce.yaml @@ -0,0 +1,83 @@ +name: "E-Commerce Store" +description: "Full-featured online store with products, cart, checkout, and order management" + +tech_stack: + frontend: "Next.js" + backend: "Node.js/Express" + database: "PostgreSQL" + auth: "NextAuth.js" + styling: "Tailwind CSS" + hosting: "Vercel" + +feature_categories: + product_catalog: + - "Product listing page" + - "Product detail page" + - "Product search" + - "Category navigation" + - "Product filtering" + - "Product sorting" + - "Product image gallery" + - "Related products" + + shopping_cart: + - "Add to cart" + - "Update cart quantity" + - "Remove from cart" + - "Cart sidebar/drawer" + - "Cart page" + - "Save for later" + + checkout: + - "Guest checkout" + - "User checkout" + - "Shipping address form" + - "Shipping method selection" + - "Payment integration (Stripe)" + - "Order summary" + - "Order confirmation" + + user_account: + - "User registration" + - "User login" + - "Password reset" + - "Profile management" + - "Address book" + - "Order history" + - "Wishlist" + + admin_panel: + - "Product management" + - "Category management" + - "Order management" + - "Customer management" + - "Inventory tracking" + - "Sales reports" + - "Discount codes" + + marketing: + - "Newsletter signup" + - "Promotional banners" + - "Product reviews" + - "Rating system" + +design_tokens: + colors: + primary: "#2563EB" + secondary: "#16A34A" + accent: "#DC2626" + background: "#FFFFFF" + text: "#1F2937" + muted: "#9CA3AF" + spacing: [4, 8, 12, 16, 24, 32, 48] + fonts: + heading: "Poppins" + body: "Open Sans" + border_radius: + small: "4px" + medium: "8px" + large: "16px" + +estimated_features: 50 +tags: ["ecommerce", "store", "shopping", "payments"] +difficulty: "advanced" diff --git a/templates/catalog/saas-starter.yaml b/templates/catalog/saas-starter.yaml new file mode 100644 index 00000000..98b4f947 --- /dev/null +++ b/templates/catalog/saas-starter.yaml @@ -0,0 +1,74 @@ +name: "SaaS Starter" +description: "Multi-tenant SaaS application with authentication, billing, and dashboard" + +tech_stack: + frontend: "Next.js" + backend: "Node.js/Express" + database: "PostgreSQL" + auth: "NextAuth.js" + styling: "Tailwind CSS" + hosting: "Vercel" + +feature_categories: + authentication: + - "User registration" + - "User login" + - "Password reset" + - "Email verification" + - "OAuth login (Google, GitHub)" + - "Two-factor authentication" + - "Session management" + + multi_tenancy: + - "Organization creation" + - "Team member invitations" + - "Role management (Admin, Member)" + - "Organization settings" + - "Switch between organizations" + + billing: + - "Subscription plans display" + - "Stripe integration" + - "Payment method management" + - "Invoice history" + - "Usage tracking" + - "Plan upgrades/downgrades" + + dashboard: + - "Overview page with metrics" + - "Usage statistics charts" + - "Recent activity feed" + - "Quick actions" + + user_profile: + - "Profile settings" + - "Avatar upload" + - "Notification preferences" + - "API key management" + + admin: + - "User management" + - "Organization management" + - "System health dashboard" + - "Audit logs" + +design_tokens: + colors: + primary: "#6366F1" + secondary: "#10B981" + accent: "#F59E0B" + background: "#F9FAFB" + text: "#111827" + muted: "#6B7280" + spacing: [4, 8, 12, 16, 24, 32, 48] + fonts: + heading: "Inter" + body: "Inter" + border_radius: + small: "4px" + medium: "8px" + large: "12px" + +estimated_features: 45 +tags: ["saas", "subscription", "multi-tenant", "billing"] +difficulty: "advanced" diff --git a/templates/library.py b/templates/library.py new file mode 100644 index 00000000..53054c3c --- /dev/null +++ b/templates/library.py @@ -0,0 +1,314 @@ +""" +Template Library Module +======================= + +Load and manage application templates for quick project scaffolding. +""" + +import json +from dataclasses import dataclass, field +from pathlib import Path +from typing import Any, Optional + +import yaml + +# Directory containing template files +TEMPLATES_DIR = Path(__file__).parent / "catalog" + + +@dataclass +class DesignTokens: + """Design tokens for consistent styling.""" + + colors: dict[str, str] = field(default_factory=dict) + spacing: list[int] = field(default_factory=list) + fonts: dict[str, str] = field(default_factory=dict) + border_radius: dict[str, str] = field(default_factory=dict) + + @classmethod + def from_dict(cls, data: dict) -> "DesignTokens": + """Create from dictionary.""" + return cls( + colors=data.get("colors", {}), + spacing=data.get("spacing", [4, 8, 12, 16, 24, 32]), + fonts=data.get("fonts", {}), + border_radius=data.get("border_radius", {}), + ) + + +@dataclass +class TechStack: + """Technology stack configuration.""" + + frontend: Optional[str] = None + backend: Optional[str] = None + database: Optional[str] = None + auth: Optional[str] = None + styling: Optional[str] = None + hosting: Optional[str] = None + + @classmethod + def from_dict(cls, data: dict) -> "TechStack": + """Create from dictionary.""" + return cls( + frontend=data.get("frontend"), + backend=data.get("backend"), + database=data.get("database"), + auth=data.get("auth"), + styling=data.get("styling"), + hosting=data.get("hosting"), + ) + + +@dataclass +class TemplateFeature: + """A feature in a template.""" + + name: str + description: str + category: str + steps: list[str] = field(default_factory=list) + priority: int = 0 + + @classmethod + def from_dict(cls, data: dict, category: str, priority: int) -> "TemplateFeature": + """Create from dictionary.""" + steps = data.get("steps", []) + if not steps: + # Generate default steps + steps = [f"Implement {data['name']}"] + + return cls( + name=data["name"], + description=data.get("description", data["name"]), + category=category, + steps=steps, + priority=priority, + ) + + +@dataclass +class TemplateCategory: + """A category of features in a template.""" + + name: str + features: list[str] + description: Optional[str] = None + + +@dataclass +class Template: + """An application template.""" + + id: str + name: str + description: str + tech_stack: TechStack + feature_categories: dict[str, list[str]] + design_tokens: DesignTokens + estimated_features: int + tags: list[str] = field(default_factory=list) + difficulty: str = "intermediate" + preview_image: Optional[str] = None + + @classmethod + def from_dict(cls, template_id: str, data: dict) -> "Template": + """Create from dictionary.""" + return cls( + id=template_id, + name=data["name"], + description=data["description"], + tech_stack=TechStack.from_dict(data.get("tech_stack", {})), + feature_categories=data.get("feature_categories", {}), + design_tokens=DesignTokens.from_dict(data.get("design_tokens", {})), + estimated_features=data.get("estimated_features", 0), + tags=data.get("tags", []), + difficulty=data.get("difficulty", "intermediate"), + preview_image=data.get("preview_image"), + ) + + def to_dict(self) -> dict: + """Convert to dictionary.""" + return { + "id": self.id, + "name": self.name, + "description": self.description, + "tech_stack": { + "frontend": self.tech_stack.frontend, + "backend": self.tech_stack.backend, + "database": self.tech_stack.database, + "auth": self.tech_stack.auth, + "styling": self.tech_stack.styling, + "hosting": self.tech_stack.hosting, + }, + "feature_categories": self.feature_categories, + "design_tokens": { + "colors": self.design_tokens.colors, + "spacing": self.design_tokens.spacing, + "fonts": self.design_tokens.fonts, + "border_radius": self.design_tokens.border_radius, + }, + "estimated_features": self.estimated_features, + "tags": self.tags, + "difficulty": self.difficulty, + } + + +def load_template(template_id: str) -> Optional[Template]: + """ + Load a template by ID. + + Args: + template_id: Template identifier (filename without extension) + + Returns: + Template instance or None if not found + """ + template_path = TEMPLATES_DIR / f"{template_id}.yaml" + + if not template_path.exists(): + return None + + try: + with open(template_path) as f: + data = yaml.safe_load(f) + return Template.from_dict(template_id, data) + except Exception: + return None + + +def list_templates() -> list[Template]: + """ + List all available templates. + + Returns: + List of Template instances + """ + templates = [] + + if not TEMPLATES_DIR.exists(): + return templates + + for file in TEMPLATES_DIR.glob("*.yaml"): + template = load_template(file.stem) + if template: + templates.append(template) + + return sorted(templates, key=lambda t: t.name) + + +def get_template(template_id: str) -> Optional[Template]: + """ + Get a specific template by ID. + + Args: + template_id: Template identifier + + Returns: + Template instance or None + """ + return load_template(template_id) + + +def generate_features(template: Template) -> list[dict]: + """ + Generate feature list from a template. + + Returns features in the format expected by feature_create_bulk. + + Args: + template: Template instance + + Returns: + List of feature dictionaries + """ + features = [] + priority = 1 + + for category, feature_names in template.feature_categories.items(): + for feature_name in feature_names: + features.append({ + "priority": priority, + "category": category.replace("_", " ").title(), + "name": feature_name, + "description": f"{feature_name} functionality for the application", + "steps": [f"Implement {feature_name}"], + "passes": False, + }) + priority += 1 + + return features + + +def generate_app_spec( + template: Template, + app_name: str, + customizations: Optional[dict] = None, +) -> str: + """ + Generate app_spec.txt content from a template. + + Args: + template: Template instance + app_name: Application name + customizations: Optional customizations to apply + + Returns: + XML content for app_spec.txt + """ + customizations = customizations or {} + + # Merge design tokens with customizations + colors = {**template.design_tokens.colors, **customizations.get("colors", {})} + + # Build XML + xml_parts = [ + '', + "", + f" {app_name}", + f" {template.description}", + "", + " ", + ] + + if template.tech_stack.frontend: + xml_parts.append(f" {template.tech_stack.frontend}") + if template.tech_stack.backend: + xml_parts.append(f" {template.tech_stack.backend}") + if template.tech_stack.database: + xml_parts.append(f" {template.tech_stack.database}") + if template.tech_stack.auth: + xml_parts.append(f" {template.tech_stack.auth}") + if template.tech_stack.styling: + xml_parts.append(f" {template.tech_stack.styling}") + + xml_parts.extend([ + " ", + "", + " ", + " ", + ]) + + for color_name, color_value in colors.items(): + xml_parts.append(f" <{color_name}>{color_value}") + + xml_parts.extend([ + " ", + " ", + "", + " ", + ]) + + for category, feature_names in template.feature_categories.items(): + category_title = category.replace("_", " ").title() + xml_parts.append(f" ") + for feature_name in feature_names: + xml_parts.append(f" {feature_name}") + xml_parts.append(" ") + + xml_parts.extend([ + " ", + "", + ]) + + return "\n".join(xml_parts) From aade8045c4a7badb7981e431b963cf2258aa837f Mon Sep 17 00:00:00 2001 From: cabana8471 Date: Wed, 21 Jan 2026 10:38:49 +0100 Subject: [PATCH 26/81] docs(fork): update changelog with Template Library feature Co-Authored-By: Claude Opus 4.5 --- FORK_CHANGELOG.md | 66 ++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 65 insertions(+), 1 deletion(-) diff --git a/FORK_CHANGELOG.md b/FORK_CHANGELOG.md index ba6c033d..95a5b296 100644 --- a/FORK_CHANGELOG.md +++ b/FORK_CHANGELOG.md @@ -9,6 +9,70 @@ Format based on [Keep a Changelog](https://keepachangelog.com/). - Fork documentation (FORK_README.md, FORK_CHANGELOG.md) - Configuration system via `.autocoder/config.json` +## [2025-01-21] Template Library + +### Added +- New module: `templates/` - Project template library +- New router: `server/routers/templates.py` - REST API for templates + +### Available Templates +| Template | Description | Features | +|----------|-------------|----------| +| `saas-starter` | Multi-tenant SaaS with auth, billing | ~45 | +| `ecommerce` | Online store with cart, checkout | ~50 | +| `admin-dashboard` | Admin panel with CRUD, charts | ~40 | +| `blog-cms` | Blog/CMS with posts, comments | ~35 | +| `api-service` | RESTful API with auth, docs | ~30 | + +### API Endpoints +| Endpoint | Method | Description | +|----------|--------|-------------| +| `/api/templates` | GET | List all templates | +| `/api/templates/{id}` | GET | Get template details | +| `/api/templates/preview` | POST | Preview app_spec.txt | +| `/api/templates/apply` | POST | Apply template to project | +| `/api/templates/{id}/features` | GET | Get template features | + +### Template Format (YAML) +```yaml +name: "Template Name" +description: "Description" +tech_stack: + frontend: "Next.js" + backend: "FastAPI" + database: "PostgreSQL" +feature_categories: + authentication: + - "User login" + - "User registration" +design_tokens: + colors: + primary: "#3B82F6" +estimated_features: 30 +tags: ["saas", "auth"] +``` + +### Usage +```bash +# List templates +curl http://localhost:8888/api/templates + +# Get template details +curl http://localhost:8888/api/templates/saas-starter + +# Preview app_spec.txt +curl -X POST http://localhost:8888/api/templates/preview \ + -H "Content-Type: application/json" \ + -d '{"template_id": "saas-starter", "app_name": "My SaaS"}' + +# Apply template +curl -X POST http://localhost:8888/api/templates/apply \ + -H "Content-Type: application/json" \ + -d '{"template_id": "saas-starter", "project_name": "my-saas", "project_dir": "/path/to/project"}' +``` + +--- + ## [2025-01-21] CI/CD Integration ### Added @@ -426,7 +490,7 @@ The following features are planned for implementation: - [x] CI/CD Integration - GitHub Actions generation ✅ ### Phase 4: Polish & Ecosystem -- [ ] Template Library - SaaS, e-commerce, dashboard templates +- [x] Template Library - SaaS, e-commerce, dashboard templates ✅ - [ ] Auto Documentation - README, API docs generation - [ ] Design Tokens - Consistent styling - [ ] Visual Regression - Screenshot comparison testing From c8aced43f85fae2a98db04146b0f96e97ff5c016 Mon Sep 17 00:00:00 2001 From: cabana8471 Date: Wed, 21 Jan 2026 10:43:27 +0100 Subject: [PATCH 27/81] feat(ui): add Import Wizard UI for importing existing projects - Add useImportProject hook for import workflow state - Add ImportProjectModal component with multi-step wizard: - Folder selection - Stack detection display - Feature extraction and review - Feature selection (toggle individual features) - Project registration and feature creation - Modify NewProjectModal to add "Import Existing" option - Integrate with /api/import/* REST endpoints Co-Authored-By: Claude Opus 4.5 --- ui/src/components/ImportProjectModal.tsx | 632 +++++++++++++++++++++++ ui/src/components/NewProjectModal.tsx | 119 ++++- ui/src/hooks/useImportProject.ts | 242 +++++++++ 3 files changed, 988 insertions(+), 5 deletions(-) create mode 100644 ui/src/components/ImportProjectModal.tsx create mode 100644 ui/src/hooks/useImportProject.ts diff --git a/ui/src/components/ImportProjectModal.tsx b/ui/src/components/ImportProjectModal.tsx new file mode 100644 index 00000000..3aef2823 --- /dev/null +++ b/ui/src/components/ImportProjectModal.tsx @@ -0,0 +1,632 @@ +/** + * Import Project Modal Component + * + * Multi-step wizard for importing existing projects: + * 1. Select project folder + * 2. Analyze and detect tech stack + * 3. Extract features from codebase + * 4. Review and select features to import + * 5. Create features in database + */ + +import { useState } from 'react' +import { + X, + Folder, + Search, + Layers, + CheckCircle2, + AlertCircle, + Loader2, + ArrowRight, + ArrowLeft, + Code, + Database, + Server, + Layout, + CheckSquare, + Square, + ChevronDown, + ChevronRight, +} from 'lucide-react' +import { useImportProject } from '../hooks/useImportProject' +import { useCreateProject } from '../hooks/useProjects' +import { FolderBrowser } from './FolderBrowser' + +type Step = 'folder' | 'analyzing' | 'detected' | 'features' | 'register' | 'complete' + +interface ImportProjectModalProps { + isOpen: boolean + onClose: () => void + onProjectImported: (projectName: string) => void +} + +export function ImportProjectModal({ + isOpen, + onClose, + onProjectImported, +}: ImportProjectModalProps) { + const [step, setStep] = useState('folder') + const [projectName, setProjectName] = useState('') + const [expandedCategories, setExpandedCategories] = useState>(new Set()) + const [registerError, setRegisterError] = useState(null) + + const { + state, + analyze, + extractFeatures, + createFeatures, + toggleFeature, + selectAllFeatures, + deselectAllFeatures, + reset, + } = useImportProject() + + const createProject = useCreateProject() + + if (!isOpen) return null + + const handleFolderSelect = async (path: string) => { + setStep('analyzing') + await analyze(path) + if (state.step !== 'error') { + setStep('detected') + } + } + + const handleExtractFeatures = async () => { + await extractFeatures() + if (state.step !== 'error') { + setStep('features') + // Expand all categories by default + if (state.featuresResult) { + setExpandedCategories(new Set(Object.keys(state.featuresResult.by_category))) + } + } + } + + const handleContinueToRegister = () => { + // Generate default project name from path + const pathParts = state.projectPath?.split(/[/\\]/) || [] + const defaultName = pathParts[pathParts.length - 1] || 'imported-project' + setProjectName(defaultName.replace(/[^a-zA-Z0-9_-]/g, '-')) + setStep('register') + } + + const handleRegisterAndCreate = async () => { + if (!projectName.trim() || !state.projectPath) return + + setRegisterError(null) + + try { + // First register the project + await createProject.mutateAsync({ + name: projectName.trim(), + path: state.projectPath, + specMethod: 'manual', + }) + + // Then create features + await createFeatures(projectName.trim()) + + if (state.step !== 'error') { + setStep('complete') + setTimeout(() => { + onProjectImported(projectName.trim()) + handleClose() + }, 1500) + } + } catch (err) { + setRegisterError(err instanceof Error ? err.message : 'Failed to register project') + } + } + + const handleClose = () => { + setStep('folder') + setProjectName('') + setExpandedCategories(new Set()) + setRegisterError(null) + reset() + onClose() + } + + const handleBack = () => { + if (step === 'detected') { + setStep('folder') + reset() + } else if (step === 'features') { + setStep('detected') + } else if (step === 'register') { + setStep('features') + } + } + + const toggleCategory = (category: string) => { + setExpandedCategories(prev => { + const next = new Set(prev) + if (next.has(category)) { + next.delete(category) + } else { + next.add(category) + } + return next + }) + } + + const getStackIcon = (category: string) => { + switch (category.toLowerCase()) { + case 'frontend': + return + case 'backend': + return + case 'database': + return + default: + return + } + } + + // Folder selection step + if (step === 'folder') { + return ( +
+
e.stopPropagation()} + > +
+
+ +
+

+ Import Existing Project +

+

+ Select the folder containing your existing project +

+
+
+ +
+ +
+ +
+
+
+ ) + } + + // Analyzing step + if (step === 'analyzing' || state.step === 'analyzing') { + return ( +
+
e.stopPropagation()} + > +
+

+ Analyzing Project +

+ +
+ +
+
+ +
+

Detecting Tech Stack

+

+ Scanning your project for frameworks, routes, and components... +

+ +
+
+
+ ) + } + + // Error state + if (state.step === 'error') { + return ( +
+
e.stopPropagation()} + > +
+

+ Error +

+ +
+ +
+
+ +
+

Analysis Failed

+

{state.error}

+ +
+
+
+ ) + } + + // Detection results step + if (step === 'detected' && state.analyzeResult) { + const result = state.analyzeResult + return ( +
+
e.stopPropagation()} + > +
+
+ +

+ Stack Detected +

+
+ +
+ +
+ {/* Summary */} +
+

{result.summary}

+
+ + {/* Detected Stacks */} +

Detected Technologies

+
+ {result.detected_stacks.map((stack, i) => ( +
+ {getStackIcon(stack.category)} +
+
{stack.name}
+
+ {stack.category} +
+
+
+ {Math.round(stack.confidence * 100)}% +
+
+ ))} +
+ + {/* Stats */} +

Codebase Analysis

+
+
+
+ {result.routes_count} +
+
Routes
+
+
+
+ {result.endpoints_count} +
+
Endpoints
+
+
+
+ {result.components_count} +
+
Components
+
+
+
+ +
+ + +
+
+
+ ) + } + + // Features review step + if (step === 'features' && state.featuresResult) { + const result = state.featuresResult + const categories = Object.keys(result.by_category) + + // Group features by category + const featuresByCategory: Record = {} + result.features.forEach(f => { + if (!featuresByCategory[f.category]) { + featuresByCategory[f.category] = [] + } + featuresByCategory[f.category].push(f) + }) + + return ( +
+
e.stopPropagation()} + > +
+
+ +
+

+ Review Features +

+

+ {state.selectedFeatures.length} of {result.count} features selected +

+
+
+ +
+ + {/* Selection controls */} +
+ + +
+ +
+ {categories.map(category => ( +
+ + + {expandedCategories.has(category) && ( +
+ {featuresByCategory[category]?.map((feature, i) => { + const isSelected = state.selectedFeatures.some( + f => f.name === feature.name && f.category === feature.category + ) + return ( +
toggleFeature(feature)} + className={` + flex items-start gap-3 p-3 cursor-pointer transition-all + border-2 border-[var(--color-neo-border)] + ${isSelected + ? 'bg-[var(--color-neo-done-light)] border-[var(--color-neo-done)]' + : 'bg-white hover:bg-[var(--color-neo-bg-secondary)]' + } + `} + > + {isSelected ? ( + + ) : ( + + )} +
+
{feature.name}
+
+ {feature.description} +
+
+ + {feature.source_type} + + {feature.source_file && ( + + {feature.source_file} + + )} +
+
+
+ ) + })} +
+ )} +
+ ))} +
+ +
+ + +
+
+
+ ) + } + + // Register project step + if (step === 'register') { + return ( +
+
e.stopPropagation()} + > +
+

+ Register Project +

+ +
+ +
+
+ + setProjectName(e.target.value)} + placeholder="my-project" + className="neo-input" + pattern="^[a-zA-Z0-9_-]+$" + autoFocus + /> +

+ Use letters, numbers, hyphens, and underscores only. +

+
+ +
+
+
+ Features to create: + {state.selectedFeatures.length} +
+
+ Project path: + + {state.projectPath} + +
+
+
+ + {(registerError || state.error) && ( +
+ {registerError || state.error} +
+ )} + +
+ + +
+
+
+
+ ) + } + + // Complete step + if (step === 'complete') { + return ( +
+
e.stopPropagation()} + > +
+

+ Import Complete +

+
+ +
+
+ +
+

{projectName}

+

+ Project imported successfully! +

+

+ {state.createResult?.created} features created +

+
+ + Redirecting... +
+
+
+
+ ) + } + + return null +} diff --git a/ui/src/components/NewProjectModal.tsx b/ui/src/components/NewProjectModal.tsx index 5edbc394..b6569a08 100644 --- a/ui/src/components/NewProjectModal.tsx +++ b/ui/src/components/NewProjectModal.tsx @@ -10,15 +10,17 @@ */ import { useState } from 'react' -import { X, Bot, FileEdit, ArrowRight, ArrowLeft, Loader2, CheckCircle2, Folder } from 'lucide-react' +import { X, Bot, FileEdit, ArrowRight, ArrowLeft, Loader2, CheckCircle2, Folder, Download } from 'lucide-react' import { useCreateProject } from '../hooks/useProjects' import { SpecCreationChat } from './SpecCreationChat' import { FolderBrowser } from './FolderBrowser' +import { ImportProjectModal } from './ImportProjectModal' import { startAgent } from '../lib/api' type InitializerStatus = 'idle' | 'starting' | 'error' -type Step = 'name' | 'folder' | 'method' | 'chat' | 'complete' +type Step = 'choose' | 'name' | 'folder' | 'method' | 'chat' | 'complete' | 'import' +type ProjectType = 'new' | 'import' type SpecMethod = 'claude' | 'manual' interface NewProjectModalProps { @@ -34,7 +36,8 @@ export function NewProjectModal({ onProjectCreated, onStepChange, }: NewProjectModalProps) { - const [step, setStep] = useState('name') + const [step, setStep] = useState('choose') + const [_projectType, setProjectType] = useState(null) const [projectName, setProjectName] = useState('') const [projectPath, setProjectPath] = useState(null) const [_specMethod, setSpecMethod] = useState(null) @@ -165,7 +168,8 @@ export function NewProjectModal({ } const handleClose = () => { - changeStep('name') + changeStep('choose') + setProjectType(null) setProjectName('') setProjectPath(null) setSpecMethod(null) @@ -183,9 +187,37 @@ export function NewProjectModal({ } else if (step === 'folder') { changeStep('name') setProjectPath(null) + } else if (step === 'name') { + changeStep('choose') + setProjectType(null) + } + } + + const handleProjectTypeSelect = (type: ProjectType) => { + setProjectType(type) + if (type === 'new') { + changeStep('name') + } else { + changeStep('import') } } + const handleImportComplete = (importedProjectName: string) => { + onProjectCreated(importedProjectName) + handleClose() + } + + // Import project view + if (step === 'import') { + return ( + + ) + } + // Full-screen chat view if (step === 'chat') { return ( @@ -253,6 +285,7 @@ export function NewProjectModal({ {/* Header */}

+ {step === 'choose' && 'New Project'} {step === 'name' && 'Create New Project'} {step === 'method' && 'Choose Setup Method'} {step === 'complete' && 'Project Created!'} @@ -267,6 +300,74 @@ export function NewProjectModal({ {/* Content */}
+ {/* Step 0: Choose project type */} + {step === 'choose' && ( +
+

+ What would you like to do? +

+ +
+ {/* New project option */} + + + {/* Import existing option */} + +
+
+ )} + {/* Step 1: Project Name */} {step === 'name' && (
@@ -294,7 +395,15 @@ export function NewProjectModal({
)} -
+
+