diff --git a/codeflow_engine/__init__.py b/codeflow_engine/__init__.py index 6aa05ad..8d9fb51 100644 --- a/codeflow_engine/__init__.py +++ b/codeflow_engine/__init__.py @@ -2,6 +2,13 @@ CodeFlow Engine - Automated Code Review and Quality Management System This package provides AI-powered code analysis, automated fixes, and quality assurance workflows. + +Main Components: +- CodeFlowEngine: Main orchestrator for all automation activities +- ActionRegistry: Registry for managing action plugins +- WorkflowEngine: Workflow execution engine +- LLMProviderManager: Multi-provider LLM abstraction +- MetricsCollector: Quality metrics collection """ import logging @@ -9,30 +16,37 @@ from typing import Any, cast from codeflow_engine.actions.registry import ActionRegistry -# from codeflow_engine.agents.agents import AgentManager # Not implemented yet from codeflow_engine.ai.core.base import LLMProvider from codeflow_engine.ai.core.providers.manager import LLMProviderManager from codeflow_engine.config import CodeFlowConfig from codeflow_engine.engine import CodeFlowEngine -from codeflow_engine.exceptions import (CodeFlowException, ConfigurationError, - IntegrationError) +from codeflow_engine.exceptions import ( + ActionError, + AuthenticationError, + CodeFlowException, + CodeFlowPermissionError, + ConfigurationError, + IntegrationError, + LLMProviderError, + RateLimitError, + ValidationError, + WorkflowError, +) from codeflow_engine.integrations.base import Integration -# from codeflow_engine.integrations.bitbucket.bitbucket_integration import \ -# BitbucketIntegration # Not implemented yet -# from codeflow_engine.integrations.github.github_integration import GitHubIntegration # Not implemented yet -# from codeflow_engine.integrations.gitlab.gitlab_integration import GitLabIntegration # Not implemented yet -# from codeflow_engine.integrations.jira.jira_integration import JiraIntegration # Not implemented yet -# from codeflow_engine.integrations.registry import IntegrationRegistry # Not implemented yet -# from codeflow_engine.integrations.slack.slack_integration import SlackIntegration # Not implemented yet from codeflow_engine.quality.metrics_collector import MetricsCollector -# from codeflow_engine.reporting.report_generator import ReportGenerator # Not implemented yet -from codeflow_engine.security.authorization.enterprise_manager import \ - EnterpriseAuthorizationManager + +# Security - guarded import +EnterpriseAuthorizationManager: type[Any] | None = None +try: + from codeflow_engine.security.authorization.enterprise_manager import ( + EnterpriseAuthorizationManager, + ) +except (ImportError, OSError): + pass + from codeflow_engine.workflows.base import Workflow from codeflow_engine.workflows.engine import WorkflowEngine -# from codeflow_engine.workflows.workflow_manager import WorkflowManager # Not implemented yet - # Import structlog with error handling STRUCTLOG_AVAILABLE: bool try: @@ -48,20 +62,47 @@ # Public API exports __all__ = [ - "ActionRegistry", + # Core engine "CodeFlowEngine", - "MetricsCollector", - "EnterpriseAuthorizationManager", + "CodeFlowConfig", + # Registries + "ActionRegistry", + # AI/LLM "LLMProvider", "LLMProviderManager", + # Integrations + "Integration", + # Workflows + "Workflow", + "WorkflowEngine", + # Quality + "MetricsCollector", + # Security + "EnterpriseAuthorizationManager", + # Exceptions + "ActionError", + "AuthenticationError", + "CodeFlowException", + "CodeFlowPermissionError", + "ConfigurationError", + "IntegrationError", + "LLMProviderError", + "RateLimitError", + "ValidationError", + "WorkflowError", + # Utilities + "configure_logging", ] -# Setup logging defaults - def configure_logging(level: str = "INFO", *, format_json: bool = False) -> None: - """Configure default logging for CodeFlow Engine.""" + """ + Configure default logging for CodeFlow Engine. + Args: + level: Logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL) + format_json: If True and structlog is available, use JSON logging + """ if format_json and STRUCTLOG_AVAILABLE and structlog_module: # Structured JSON logging structlog_module.configure( @@ -86,6 +127,7 @@ def configure_logging(level: str = "INFO", *, format_json: bool = False) -> None ) +# Configure logging on import log_level = os.getenv("CODEFLOW_LOG_LEVEL", "INFO") json_logging = os.getenv("CODEFLOW_JSON_LOGGING", "false").lower() == "true" configure_logging(level=log_level, format_json=json_logging) diff --git a/codeflow_engine/actions/__init__.py b/codeflow_engine/actions/__init__.py index 3d2855d..5178031 100644 --- a/codeflow_engine/actions/__init__.py +++ b/codeflow_engine/actions/__init__.py @@ -1,225 +1,234 @@ -""" +""" CodeFlow Engine Actions. -Core automation actions for GitHub PR processing +Core automation actions for GitHub PR processing, organized by category: +- analysis: Code analysis actions (PR review, comment analysis) +- generation: Code/content generation (scaffolding, release notes) +- git: Git operations (patches, branches, releases) +- issues: Issue/PR management (labels, comments) +- quality: Quality checks (security, performance, accessibility) +- ai_actions: AI/LLM-powered features (AutoGen, memory systems) +- platform: Platform detection and integration +- scripts: Script/workflow execution +- maintenance: Maintenance tasks """ -# mypy: disable-error-code=unused-ignore -# mypy: ignore-errors - from typing import Any +from codeflow_engine.actions.registry import ActionRegistry -# Import action classes with error handling for optional dependencies -PlatformDetector: type[Any] | None = None +# Import category modules with error handling for optional dependencies +ai_actions = None try: - from codeflow_engine.actions.platform_detector_enhanced import ( - PlatformDetector as _RealPlatformDetector, - ) - - PlatformDetector = _RealPlatformDetector -except ImportError: + from codeflow_engine.actions import ai_actions +except (ImportError, OSError): pass -PrototypeEnhancer: type[Any] | None = None +analysis = None try: - from codeflow_engine.actions.prototype_enhancer import ( - PrototypeEnhancer as _RealPrototypeEnhancer, - ) + from codeflow_engine.actions import analysis +except (ImportError, OSError): + pass - PrototypeEnhancer = _RealPrototypeEnhancer -except ImportError: +base = None +try: + from codeflow_engine.actions import base +except (ImportError, OSError): pass -PlatformDetector: type[Any] | None = None +generation = None try: - from codeflow_engine.actions.platform_detector import ( - PlatformDetector as _RealPlatformDetector, - ) + from codeflow_engine.actions import generation +except (ImportError, OSError): + pass - PlatformDetector = _RealPlatformDetector -except ImportError: +git = None +try: + from codeflow_engine.actions import git +except (ImportError, OSError): pass -AutoGenImplementation: type[Any] | None = None +issues = None try: - from codeflow_engine.actions.autogen_implementation import ( - AutoGenImplementation as _RealAutoGenImplementation, - ) + from codeflow_engine.actions import issues +except (ImportError, OSError): + pass - AutoGenImplementation = _RealAutoGenImplementation -except ImportError: +maintenance = None +try: + from codeflow_engine.actions import maintenance +except (ImportError, OSError): pass -IssueCreator: type[Any] | None = None +platform = None try: - from codeflow_engine.actions.issue_creator import IssueCreator as _RealIssueCreator + from codeflow_engine.actions import platform +except (ImportError, OSError): + pass - IssueCreator = _RealIssueCreator -except ImportError: +quality = None +try: + from codeflow_engine.actions import quality +except (ImportError, OSError): pass -LLMProviderManager: type[Any] | None = None +scripts = None try: - from codeflow_engine.actions.llm import LLMProviderManager as _RealLLMProviderManager + from codeflow_engine.actions import scripts +except (ImportError, OSError): + pass - LLMProviderManager = _RealLLMProviderManager +# Re-export commonly used actions for backward compatibility +# Analysis +AICommentAnalyzer: type[Any] | None = None +try: + from codeflow_engine.actions.analysis import AICommentAnalyzer except ImportError: pass -AutoGenAgentSystem: type[Any] | None = None +PRReviewAnalyzer: type[Any] | None = None try: - from codeflow_engine.actions.autogen_multi_agent import ( - AutoGenAgentSystem as _RealAutoGenAgentSystem, - ) - - AutoGenAgentSystem = _RealAutoGenAgentSystem + from codeflow_engine.actions.analysis import PRReviewAnalyzer except ImportError: pass -Mem0MemoryManager: type[Any] | None = None +# Issues +IssueCreator: type[Any] | None = None try: - from codeflow_engine.actions.mem0_memory_integration import ( - Mem0MemoryManager as _RealMem0MemoryManager, - ) - - Mem0MemoryManager = _RealMem0MemoryManager + from codeflow_engine.actions.issues import IssueCreator except ImportError: pass -QualityGates: type[Any] | None = None +PRCommentHandler: type[Any] | None = None try: - from codeflow_engine.actions.quality_gates import QualityGates as _RealQualityGates - - QualityGates = _RealQualityGates + from codeflow_engine.actions.issues import PRCommentHandler except ImportError: pass -LearningMemorySystem: type[Any] | None = None +CreateOrUpdateIssue: type[Any] | None = None try: - from codeflow_engine.actions.learning_memory_system import ( - LearningMemorySystem as _RealLearningMemorySystem, - ) - - LearningMemorySystem = _RealLearningMemorySystem + from codeflow_engine.actions.issues import CreateOrUpdateIssue except ImportError: pass -MultiPlatformIntegrator: type[Any] | None = None +PostComment: type[Any] | None = None try: - from codeflow_engine.actions.multi_platform_integrator import ( - MultiPlatformIntegrator as _RealMultiPlatformIntegrator, - ) - - MultiPlatformIntegrator = _RealMultiPlatformIntegrator + from codeflow_engine.actions.issues import PostComment except ImportError: pass -AICommentAnalyzer: type[Any] | None = None +LabelPR: type[Any] | None = None try: - from codeflow_engine.actions.ai_comment_analyzer import ( - AICommentAnalyzer as _RealAICommentAnalyzer, - ) - - AICommentAnalyzer = _RealAICommentAnalyzer + from codeflow_engine.actions.issues import LabelPR except ImportError: pass -PRCommentHandler: type[Any] | None = None +# Git +ApplyGitPatch: type[Any] | None = None try: - from codeflow_engine.actions.handle_pr_comment import ( - PRCommentHandler as _RealPRCommentHandler, - ) - - PRCommentHandler = _RealPRCommentHandler + from codeflow_engine.actions.git import ApplyGitPatch except ImportError: pass -# Utility actions -LabelPR: type[Any] | None = None +# Quality +QualityGates: type[Any] | None = None try: - from codeflow_engine.actions.label_pr import LabelPR as _RealLabelPR - - LabelPR = _RealLabelPR + from codeflow_engine.actions.quality import QualityGates except ImportError: pass -PostComment: type[Any] | None = None +RunSecurityAudit: type[Any] | None = None try: - from codeflow_engine.actions.post_comment import PostComment as _RealPostComment - - PostComment = _RealPostComment + from codeflow_engine.actions.quality import RunSecurityAudit except ImportError: pass -CreateOrUpdateIssue: type[Any] | None = None +CheckPerformanceBudget: type[Any] | None = None try: - from codeflow_engine.actions.create_or_update_issue import ( - CreateOrUpdateIssue as _RealCreateOrUpdateIssue, - ) - - CreateOrUpdateIssue = _RealCreateOrUpdateIssue + from codeflow_engine.actions.quality import CheckPerformanceBudget except ImportError: pass -ApplyGitPatch: type[Any] | None = None +VisualRegressionTest: type[Any] | None = None try: - from codeflow_engine.actions.apply_git_patch import ApplyGitPatch as _RealApplyGitPatch - - ApplyGitPatch = _RealApplyGitPatch + from codeflow_engine.actions.quality import VisualRegressionTest except ImportError: pass -RunSecurityAudit: type[Any] | None = None +# AI Actions +AutoGenImplementation: type[Any] | None = None try: - from codeflow_engine.actions.run_security_audit import ( - RunSecurityAudit as _RealRunSecurityAudit, - ) + from codeflow_engine.actions.ai_actions import AutoGenImplementation +except ImportError: + pass - RunSecurityAudit = _RealRunSecurityAudit +AutoGenAgentSystem: type[Any] | None = None +try: + from codeflow_engine.actions.ai_actions import AutoGenAgentSystem except ImportError: pass -CheckPerformanceBudget: type[Any] | None = None +Mem0MemoryManager: type[Any] | None = None try: - from codeflow_engine.actions.check_performance_budget import ( - CheckPerformanceBudget as _RealCheckPerformanceBudget, - ) + from codeflow_engine.actions.ai_actions import Mem0MemoryManager +except ImportError: + pass - CheckPerformanceBudget = _RealCheckPerformanceBudget +LearningMemorySystem: type[Any] | None = None +try: + from codeflow_engine.actions.ai_actions import LearningMemorySystem except ImportError: pass -VisualRegressionTest: type[Any] | None = None +LLMProviderManager: type[Any] | None = None try: - from codeflow_engine.actions.visual_regression_test import ( - VisualRegressionTest as _RealVisualRegressionTest, - ) + from codeflow_engine.actions.ai_actions.llm import LLMProviderManager +except ImportError: + pass - VisualRegressionTest = _RealVisualRegressionTest +# Platform +PlatformDetector: type[Any] | None = None +try: + from codeflow_engine.actions.platform import PlatformDetector except ImportError: pass -GenerateReleaseNotes: type[Any] | None = None +MultiPlatformIntegrator: type[Any] | None = None try: - from codeflow_engine.actions.generate_release_notes import ( - GenerateReleaseNotes as _RealGenerateReleaseNotes, - ) + from codeflow_engine.actions.platform import MultiPlatformIntegrator +except ImportError: + pass - GenerateReleaseNotes = _RealGenerateReleaseNotes +PrototypeEnhancer: type[Any] | None = None +try: + from codeflow_engine.actions.platform import PrototypeEnhancer except ImportError: pass -AIImplementationRoadmap: type[Any] | None = None +# Generation +GenerateReleaseNotes: type[Any] | None = None try: - from codeflow_engine.actions.ai_implementation_roadmap import AIImplementationRoadmap + from codeflow_engine.actions.generation import GenerateReleaseNotes except ImportError: - AIImplementationRoadmap = None + pass -# All available actions +# All available exports __all__ = [ + # Registry + "ActionRegistry", + # Category modules + "ai_actions", + "analysis", + "base", + "generation", + "git", + "issues", + "maintenance", + "platform", + "quality", + "scripts", + # Backward compatible action exports "AICommentAnalyzer", - "AIImplementationRoadmap", "ApplyGitPatch", "AutoGenAgentSystem", "AutoGenImplementation", @@ -228,14 +237,12 @@ "GenerateReleaseNotes", "IssueCreator", "LLMProviderManager", - # Utility actions "LabelPR", "LearningMemorySystem", "Mem0MemoryManager", "MultiPlatformIntegrator", "PRCommentHandler", - # Core AI-powered actions - "PlatformDetector", + "PRReviewAnalyzer", "PlatformDetector", "PostComment", "PrototypeEnhancer", diff --git a/codeflow_engine/actions/ai_actions/__init__.py b/codeflow_engine/actions/ai_actions/__init__.py new file mode 100644 index 0000000..db90918 --- /dev/null +++ b/codeflow_engine/actions/ai_actions/__init__.py @@ -0,0 +1,61 @@ +""" +CodeFlow Engine - AI Actions + +Actions for AI/LLM-powered features including AutoGen, memory systems, and summarization. +""" + +from typing import Any + +# Import with error handling for optional dependencies +AutoGenImplementation: type[Any] | None = None +try: + from codeflow_engine.actions.ai_actions.autogen_implementation import AutoGenImplementation +except ImportError: + pass + +AutoGenAgentSystem: type[Any] | None = None +try: + from codeflow_engine.actions.ai_actions.autogen_multi_agent import AutoGenAgentSystem +except ImportError: + pass + +Mem0MemoryManager: type[Any] | None = None +try: + from codeflow_engine.actions.ai_actions.mem0_memory_integration import Mem0MemoryManager +except ImportError: + pass + +LearningMemorySystem: type[Any] | None = None +try: + from codeflow_engine.actions.ai_actions.learning_memory_system import LearningMemorySystem +except ImportError: + pass + +ConfigurableLLMProvider: type[Any] | None = None +try: + from codeflow_engine.actions.ai_actions.configurable_llm_provider import ConfigurableLLMProvider +except ImportError: + pass + +# Submodule exports with guarded imports +autogen = None +try: + from codeflow_engine.actions.ai_actions import autogen +except (ImportError, OSError): + pass + +llm = None +try: + from codeflow_engine.actions.ai_actions import llm +except (ImportError, OSError): + pass + +__all__ = [ + "AutoGenAgentSystem", + "AutoGenImplementation", + "ConfigurableLLMProvider", + "LearningMemorySystem", + "Mem0MemoryManager", + "autogen", + "llm", +] diff --git a/codeflow_engine/actions/autogen/__init__.py b/codeflow_engine/actions/ai_actions/autogen/__init__.py similarity index 100% rename from codeflow_engine/actions/autogen/__init__.py rename to codeflow_engine/actions/ai_actions/autogen/__init__.py diff --git a/codeflow_engine/actions/autogen/agents.py b/codeflow_engine/actions/ai_actions/autogen/agents.py similarity index 100% rename from codeflow_engine/actions/autogen/agents.py rename to codeflow_engine/actions/ai_actions/autogen/agents.py diff --git a/codeflow_engine/actions/autogen/models.py b/codeflow_engine/actions/ai_actions/autogen/models.py similarity index 100% rename from codeflow_engine/actions/autogen/models.py rename to codeflow_engine/actions/ai_actions/autogen/models.py diff --git a/codeflow_engine/actions/autogen/system.py b/codeflow_engine/actions/ai_actions/autogen/system.py similarity index 100% rename from codeflow_engine/actions/autogen/system.py rename to codeflow_engine/actions/ai_actions/autogen/system.py diff --git a/codeflow_engine/actions/autogen_implementation.py b/codeflow_engine/actions/ai_actions/autogen_implementation.py similarity index 100% rename from codeflow_engine/actions/autogen_implementation.py rename to codeflow_engine/actions/ai_actions/autogen_implementation.py diff --git a/codeflow_engine/actions/autogen_multi_agent.py b/codeflow_engine/actions/ai_actions/autogen_multi_agent.py similarity index 100% rename from codeflow_engine/actions/autogen_multi_agent.py rename to codeflow_engine/actions/ai_actions/autogen_multi_agent.py diff --git a/codeflow_engine/actions/configurable_llm_provider.py b/codeflow_engine/actions/ai_actions/configurable_llm_provider.py similarity index 100% rename from codeflow_engine/actions/configurable_llm_provider.py rename to codeflow_engine/actions/ai_actions/configurable_llm_provider.py diff --git a/codeflow_engine/actions/learning_memory_system.py b/codeflow_engine/actions/ai_actions/learning_memory_system.py similarity index 100% rename from codeflow_engine/actions/learning_memory_system.py rename to codeflow_engine/actions/ai_actions/learning_memory_system.py diff --git a/codeflow_engine/actions/llm/__init__.py b/codeflow_engine/actions/ai_actions/llm/__init__.py similarity index 100% rename from codeflow_engine/actions/llm/__init__.py rename to codeflow_engine/actions/ai_actions/llm/__init__.py diff --git a/codeflow_engine/actions/llm/base.py b/codeflow_engine/actions/ai_actions/llm/base.py similarity index 100% rename from codeflow_engine/actions/llm/base.py rename to codeflow_engine/actions/ai_actions/llm/base.py diff --git a/codeflow_engine/actions/llm/manager.py b/codeflow_engine/actions/ai_actions/llm/manager.py similarity index 100% rename from codeflow_engine/actions/llm/manager.py rename to codeflow_engine/actions/ai_actions/llm/manager.py diff --git a/codeflow_engine/actions/llm/providers/__init__.py b/codeflow_engine/actions/ai_actions/llm/providers/__init__.py similarity index 100% rename from codeflow_engine/actions/llm/providers/__init__.py rename to codeflow_engine/actions/ai_actions/llm/providers/__init__.py diff --git a/codeflow_engine/actions/llm/providers/anthropic.py b/codeflow_engine/actions/ai_actions/llm/providers/anthropic.py similarity index 100% rename from codeflow_engine/actions/llm/providers/anthropic.py rename to codeflow_engine/actions/ai_actions/llm/providers/anthropic.py diff --git a/codeflow_engine/actions/llm/providers/azure_openai.py b/codeflow_engine/actions/ai_actions/llm/providers/azure_openai.py similarity index 100% rename from codeflow_engine/actions/llm/providers/azure_openai.py rename to codeflow_engine/actions/ai_actions/llm/providers/azure_openai.py diff --git a/codeflow_engine/actions/llm/providers/groq.py b/codeflow_engine/actions/ai_actions/llm/providers/groq.py similarity index 100% rename from codeflow_engine/actions/llm/providers/groq.py rename to codeflow_engine/actions/ai_actions/llm/providers/groq.py diff --git a/codeflow_engine/actions/llm/providers/mistral.py b/codeflow_engine/actions/ai_actions/llm/providers/mistral.py similarity index 100% rename from codeflow_engine/actions/llm/providers/mistral.py rename to codeflow_engine/actions/ai_actions/llm/providers/mistral.py diff --git a/codeflow_engine/actions/llm/providers/openai.py b/codeflow_engine/actions/ai_actions/llm/providers/openai.py similarity index 100% rename from codeflow_engine/actions/llm/providers/openai.py rename to codeflow_engine/actions/ai_actions/llm/providers/openai.py diff --git a/codeflow_engine/actions/llm/types.py b/codeflow_engine/actions/ai_actions/llm/types.py similarity index 100% rename from codeflow_engine/actions/llm/types.py rename to codeflow_engine/actions/ai_actions/llm/types.py diff --git a/codeflow_engine/actions/mem0_memory_integration.py b/codeflow_engine/actions/ai_actions/mem0_memory_integration.py similarity index 100% rename from codeflow_engine/actions/mem0_memory_integration.py rename to codeflow_engine/actions/ai_actions/mem0_memory_integration.py diff --git a/codeflow_engine/actions/summarize_pr_with_ai.py b/codeflow_engine/actions/ai_actions/summarize_pr_with_ai.py similarity index 100% rename from codeflow_engine/actions/summarize_pr_with_ai.py rename to codeflow_engine/actions/ai_actions/summarize_pr_with_ai.py diff --git a/codeflow_engine/actions/analysis/__init__.py b/codeflow_engine/actions/analysis/__init__.py new file mode 100644 index 0000000..58c61f9 --- /dev/null +++ b/codeflow_engine/actions/analysis/__init__.py @@ -0,0 +1,25 @@ +""" +CodeFlow Engine - Analysis Actions + +Actions for analyzing code, PRs, and comments. +""" + +from typing import Any + +# Import with error handling for optional dependencies +AICommentAnalyzer: type[Any] | None = None +try: + from codeflow_engine.actions.analysis.ai_comment_analyzer import AICommentAnalyzer +except ImportError: + pass + +PRReviewAnalyzer: type[Any] | None = None +try: + from codeflow_engine.actions.analysis.pr_review_analyzer import PRReviewAnalyzer +except ImportError: + pass + +__all__ = [ + "AICommentAnalyzer", + "PRReviewAnalyzer", +] diff --git a/codeflow_engine/actions/ai_comment_analyzer.py b/codeflow_engine/actions/analysis/ai_comment_analyzer.py similarity index 100% rename from codeflow_engine/actions/ai_comment_analyzer.py rename to codeflow_engine/actions/analysis/ai_comment_analyzer.py diff --git a/codeflow_engine/actions/analyze_console_logs.py b/codeflow_engine/actions/analysis/analyze_console_logs.py similarity index 100% rename from codeflow_engine/actions/analyze_console_logs.py rename to codeflow_engine/actions/analysis/analyze_console_logs.py diff --git a/codeflow_engine/actions/extract_pr_comment_data.py b/codeflow_engine/actions/analysis/extract_pr_comment_data.py similarity index 100% rename from codeflow_engine/actions/extract_pr_comment_data.py rename to codeflow_engine/actions/analysis/extract_pr_comment_data.py diff --git a/codeflow_engine/actions/find_dead_code.py b/codeflow_engine/actions/analysis/find_dead_code.py similarity index 100% rename from codeflow_engine/actions/find_dead_code.py rename to codeflow_engine/actions/analysis/find_dead_code.py diff --git a/codeflow_engine/actions/pr_review_analyzer.py b/codeflow_engine/actions/analysis/pr_review_analyzer.py similarity index 100% rename from codeflow_engine/actions/pr_review_analyzer.py rename to codeflow_engine/actions/analysis/pr_review_analyzer.py diff --git a/codeflow_engine/actions/generation/__init__.py b/codeflow_engine/actions/generation/__init__.py new file mode 100644 index 0000000..2c79028 --- /dev/null +++ b/codeflow_engine/actions/generation/__init__.py @@ -0,0 +1,32 @@ +""" +CodeFlow Engine - Generation Actions + +Actions for generating code, documentation, and scaffolding. +""" + +from typing import Any + +# Import with error handling for optional dependencies +GenerateBarrelFile: type[Any] | None = None +try: + from codeflow_engine.actions.generation.generate_barrel_file import GenerateBarrelFile +except ImportError: + pass + +GeneratePropTable: type[Any] | None = None +try: + from codeflow_engine.actions.generation.generate_prop_table import GeneratePropTable +except ImportError: + pass + +GenerateReleaseNotes: type[Any] | None = None +try: + from codeflow_engine.actions.generation.generate_release_notes import GenerateReleaseNotes +except ImportError: + pass + +__all__ = [ + "GenerateBarrelFile", + "GeneratePropTable", + "GenerateReleaseNotes", +] diff --git a/codeflow_engine/actions/generate_barrel_file.py b/codeflow_engine/actions/generation/generate_barrel_file.py similarity index 100% rename from codeflow_engine/actions/generate_barrel_file.py rename to codeflow_engine/actions/generation/generate_barrel_file.py diff --git a/codeflow_engine/actions/generate_prop_table.py b/codeflow_engine/actions/generation/generate_prop_table.py similarity index 100% rename from codeflow_engine/actions/generate_prop_table.py rename to codeflow_engine/actions/generation/generate_prop_table.py diff --git a/codeflow_engine/actions/generate_release_notes.py b/codeflow_engine/actions/generation/generate_release_notes.py similarity index 100% rename from codeflow_engine/actions/generate_release_notes.py rename to codeflow_engine/actions/generation/generate_release_notes.py diff --git a/codeflow_engine/actions/scaffold_api_route.py b/codeflow_engine/actions/generation/scaffold_api_route.py similarity index 100% rename from codeflow_engine/actions/scaffold_api_route.py rename to codeflow_engine/actions/generation/scaffold_api_route.py diff --git a/codeflow_engine/actions/scaffold_component.py b/codeflow_engine/actions/generation/scaffold_component.py similarity index 100% rename from codeflow_engine/actions/scaffold_component.py rename to codeflow_engine/actions/generation/scaffold_component.py diff --git a/codeflow_engine/actions/scaffold_shared_hook.py b/codeflow_engine/actions/generation/scaffold_shared_hook.py similarity index 100% rename from codeflow_engine/actions/scaffold_shared_hook.py rename to codeflow_engine/actions/generation/scaffold_shared_hook.py diff --git a/codeflow_engine/actions/svg_to_component.py b/codeflow_engine/actions/generation/svg_to_component.py similarity index 100% rename from codeflow_engine/actions/svg_to_component.py rename to codeflow_engine/actions/generation/svg_to_component.py diff --git a/codeflow_engine/actions/git/__init__.py b/codeflow_engine/actions/git/__init__.py new file mode 100644 index 0000000..83feee3 --- /dev/null +++ b/codeflow_engine/actions/git/__init__.py @@ -0,0 +1,32 @@ +""" +CodeFlow Engine - Git Actions + +Actions for Git operations like patches, branches, and releases. +""" + +from typing import Any + +# Import with error handling for optional dependencies +ApplyGitPatch: type[Any] | None = None +try: + from codeflow_engine.actions.git.apply_git_patch import ApplyGitPatch +except ImportError: + pass + +DeleteBranch: type[Any] | None = None +try: + from codeflow_engine.actions.git.delete_branch import DeleteBranch +except ImportError: + pass + +CreateGitHubRelease: type[Any] | None = None +try: + from codeflow_engine.actions.git.create_github_release import CreateGitHubRelease +except ImportError: + pass + +__all__ = [ + "ApplyGitPatch", + "CreateGitHubRelease", + "DeleteBranch", +] diff --git a/codeflow_engine/actions/apply_git_patch.py b/codeflow_engine/actions/git/apply_git_patch.py similarity index 100% rename from codeflow_engine/actions/apply_git_patch.py rename to codeflow_engine/actions/git/apply_git_patch.py diff --git a/codeflow_engine/actions/create_github_release.py b/codeflow_engine/actions/git/create_github_release.py similarity index 100% rename from codeflow_engine/actions/create_github_release.py rename to codeflow_engine/actions/git/create_github_release.py diff --git a/codeflow_engine/actions/delete_branch.py b/codeflow_engine/actions/git/delete_branch.py similarity index 100% rename from codeflow_engine/actions/delete_branch.py rename to codeflow_engine/actions/git/delete_branch.py diff --git a/codeflow_engine/actions/find_merged_branches.py b/codeflow_engine/actions/git/find_merged_branches.py similarity index 100% rename from codeflow_engine/actions/find_merged_branches.py rename to codeflow_engine/actions/git/find_merged_branches.py diff --git a/codeflow_engine/actions/issues/__init__.py b/codeflow_engine/actions/issues/__init__.py new file mode 100644 index 0000000..213ea44 --- /dev/null +++ b/codeflow_engine/actions/issues/__init__.py @@ -0,0 +1,60 @@ +""" +CodeFlow Engine - Issue/PR Actions + +Actions for managing issues, PRs, comments, and labels. +""" + +from typing import Any + +# Import with error handling for optional dependencies +IssueCreator: type[Any] | None = None +try: + from codeflow_engine.actions.issues.issue_creator import IssueCreator +except ImportError: + pass + +PRCommentHandler: type[Any] | None = None +try: + from codeflow_engine.actions.issues.handle_pr_comment import PRCommentHandler +except ImportError: + pass + +CreateOrUpdateIssue: type[Any] | None = None +try: + from codeflow_engine.actions.issues.create_or_update_issue import CreateOrUpdateIssue +except ImportError: + pass + +PostComment: type[Any] | None = None +try: + from codeflow_engine.actions.issues.post_comment import PostComment +except ImportError: + pass + +LabelPR: type[Any] | None = None +try: + from codeflow_engine.actions.issues.label_pr import LabelPR +except ImportError: + pass + +LabelPRBySize: type[Any] | None = None +try: + from codeflow_engine.actions.issues.label_pr_by_size import LabelPRBySize +except ImportError: + pass + +FindStaleIssuesOrPRs: type[Any] | None = None +try: + from codeflow_engine.actions.issues.find_stale_issues_or_prs import FindStaleIssuesOrPRs +except ImportError: + pass + +__all__ = [ + "CreateOrUpdateIssue", + "FindStaleIssuesOrPRs", + "IssueCreator", + "LabelPR", + "LabelPRBySize", + "PRCommentHandler", + "PostComment", +] diff --git a/codeflow_engine/actions/create_or_update_issue.py b/codeflow_engine/actions/issues/create_or_update_issue.py similarity index 100% rename from codeflow_engine/actions/create_or_update_issue.py rename to codeflow_engine/actions/issues/create_or_update_issue.py diff --git a/codeflow_engine/actions/find_stale_issues_or_prs.py b/codeflow_engine/actions/issues/find_stale_issues_or_prs.py similarity index 100% rename from codeflow_engine/actions/find_stale_issues_or_prs.py rename to codeflow_engine/actions/issues/find_stale_issues_or_prs.py diff --git a/codeflow_engine/actions/handle_pr_comment.py b/codeflow_engine/actions/issues/handle_pr_comment.py similarity index 100% rename from codeflow_engine/actions/handle_pr_comment.py rename to codeflow_engine/actions/issues/handle_pr_comment.py diff --git a/codeflow_engine/actions/issue_creator.py b/codeflow_engine/actions/issues/issue_creator.py similarity index 100% rename from codeflow_engine/actions/issue_creator.py rename to codeflow_engine/actions/issues/issue_creator.py diff --git a/codeflow_engine/actions/label_pr.py b/codeflow_engine/actions/issues/label_pr.py similarity index 100% rename from codeflow_engine/actions/label_pr.py rename to codeflow_engine/actions/issues/label_pr.py diff --git a/codeflow_engine/actions/label_pr_by_size.py b/codeflow_engine/actions/issues/label_pr_by_size.py similarity index 100% rename from codeflow_engine/actions/label_pr_by_size.py rename to codeflow_engine/actions/issues/label_pr_by_size.py diff --git a/codeflow_engine/actions/post_comment.py b/codeflow_engine/actions/issues/post_comment.py similarity index 100% rename from codeflow_engine/actions/post_comment.py rename to codeflow_engine/actions/issues/post_comment.py diff --git a/codeflow_engine/actions/maintenance/__init__.py b/codeflow_engine/actions/maintenance/__init__.py new file mode 100644 index 0000000..7e1ee83 --- /dev/null +++ b/codeflow_engine/actions/maintenance/__init__.py @@ -0,0 +1,53 @@ +""" +CodeFlow Engine - Maintenance Actions + +Actions for maintenance tasks like updating dependencies, docs, and code quality. +""" + +from typing import Any + +# Import with error handling for optional dependencies +UpdateDependency: type[Any] | None = None +try: + from codeflow_engine.actions.maintenance.update_dependency import UpdateDependency +except ImportError: + pass + +UpdateDocsFile: type[Any] | None = None +try: + from codeflow_engine.actions.maintenance.update_docs_file import UpdateDocsFile +except ImportError: + pass + +UpdateMigrationPlan: type[Any] | None = None +try: + from codeflow_engine.actions.maintenance.update_migration_plan import UpdateMigrationPlan +except ImportError: + pass + +FindLargeAssets: type[Any] | None = None +try: + from codeflow_engine.actions.maintenance.find_large_assets import FindLargeAssets +except ImportError: + pass + +EnforceImportOrder: type[Any] | None = None +try: + from codeflow_engine.actions.maintenance.enforce_import_order import EnforceImportOrder +except ImportError: + pass + +GenerateTodoReport: type[Any] | None = None +try: + from codeflow_engine.actions.maintenance.generate_todo_report import GenerateTodoReport +except ImportError: + pass + +__all__ = [ + "EnforceImportOrder", + "FindLargeAssets", + "GenerateTodoReport", + "UpdateDependency", + "UpdateDocsFile", + "UpdateMigrationPlan", +] diff --git a/codeflow_engine/actions/enforce_import_order.py b/codeflow_engine/actions/maintenance/enforce_import_order.py similarity index 100% rename from codeflow_engine/actions/enforce_import_order.py rename to codeflow_engine/actions/maintenance/enforce_import_order.py diff --git a/codeflow_engine/actions/find_large_assets.py b/codeflow_engine/actions/maintenance/find_large_assets.py similarity index 100% rename from codeflow_engine/actions/find_large_assets.py rename to codeflow_engine/actions/maintenance/find_large_assets.py diff --git a/codeflow_engine/actions/generate_todo_report.py b/codeflow_engine/actions/maintenance/generate_todo_report.py similarity index 100% rename from codeflow_engine/actions/generate_todo_report.py rename to codeflow_engine/actions/maintenance/generate_todo_report.py diff --git a/codeflow_engine/actions/update_dependency.py b/codeflow_engine/actions/maintenance/update_dependency.py similarity index 100% rename from codeflow_engine/actions/update_dependency.py rename to codeflow_engine/actions/maintenance/update_dependency.py diff --git a/codeflow_engine/actions/update_docs_file.py b/codeflow_engine/actions/maintenance/update_docs_file.py similarity index 100% rename from codeflow_engine/actions/update_docs_file.py rename to codeflow_engine/actions/maintenance/update_docs_file.py diff --git a/codeflow_engine/actions/update_migration_plan.py b/codeflow_engine/actions/maintenance/update_migration_plan.py similarity index 100% rename from codeflow_engine/actions/update_migration_plan.py rename to codeflow_engine/actions/maintenance/update_migration_plan.py diff --git a/codeflow_engine/actions/platform/__init__.py b/codeflow_engine/actions/platform/__init__.py new file mode 100644 index 0000000..af17e26 --- /dev/null +++ b/codeflow_engine/actions/platform/__init__.py @@ -0,0 +1,48 @@ +""" +CodeFlow Engine - Platform Actions + +Actions for platform detection, integration, and prototype enhancement. +Includes file analysis and scoring utilities. +""" + +from typing import Any + +from .config import PlatformConfigManager +from .detector import PlatformDetector +from .file_analyzer import FileAnalyzer +from .models import PlatformDetectorInputs, PlatformDetectorOutputs +from .patterns import PlatformPatterns +from .scoring import PlatformScoringEngine +from .utils import calculate_confidence_score, get_confidence_level + +# Import additional platform-related actions with error handling +MultiPlatformIntegrator: type[Any] | None = None +try: + from codeflow_engine.actions.platform.multi_platform_integrator import ( + MultiPlatformIntegrator, + ) +except ImportError: + pass + +PrototypeEnhancer: type[Any] | None = None +try: + from codeflow_engine.actions.platform.prototype_enhancer import PrototypeEnhancer +except ImportError: + pass + +__all__ = [ + # Core platform detection + "FileAnalyzer", + "PlatformConfigManager", + "PlatformDetector", + "PlatformDetectorInputs", + "PlatformDetectorOutputs", + "PlatformPatterns", + "PlatformScoringEngine", + # Platform integration + "MultiPlatformIntegrator", + "PrototypeEnhancer", + # Utilities + "calculate_confidence_score", + "get_confidence_level", +] diff --git a/codeflow_engine/actions/platform_detection/analysis/__init__.py b/codeflow_engine/actions/platform/analysis/__init__.py similarity index 100% rename from codeflow_engine/actions/platform_detection/analysis/__init__.py rename to codeflow_engine/actions/platform/analysis/__init__.py diff --git a/codeflow_engine/actions/platform_detection/analysis/base.py b/codeflow_engine/actions/platform/analysis/base.py similarity index 100% rename from codeflow_engine/actions/platform_detection/analysis/base.py rename to codeflow_engine/actions/platform/analysis/base.py diff --git a/codeflow_engine/actions/platform_detection/analysis/handlers.py b/codeflow_engine/actions/platform/analysis/handlers.py similarity index 100% rename from codeflow_engine/actions/platform_detection/analysis/handlers.py rename to codeflow_engine/actions/platform/analysis/handlers.py diff --git a/codeflow_engine/actions/platform_detection/analysis/patterns.py b/codeflow_engine/actions/platform/analysis/patterns.py similarity index 100% rename from codeflow_engine/actions/platform_detection/analysis/patterns.py rename to codeflow_engine/actions/platform/analysis/patterns.py diff --git a/codeflow_engine/actions/platform_detection/config.py b/codeflow_engine/actions/platform/config.py similarity index 100% rename from codeflow_engine/actions/platform_detection/config.py rename to codeflow_engine/actions/platform/config.py diff --git a/codeflow_engine/actions/platform_detection/detector.py b/codeflow_engine/actions/platform/detector.py similarity index 100% rename from codeflow_engine/actions/platform_detection/detector.py rename to codeflow_engine/actions/platform/detector.py diff --git a/codeflow_engine/actions/platform_detection/file_analyzer.py b/codeflow_engine/actions/platform/file_analyzer.py similarity index 100% rename from codeflow_engine/actions/platform_detection/file_analyzer.py rename to codeflow_engine/actions/platform/file_analyzer.py diff --git a/codeflow_engine/actions/platform_detection/inputs.py b/codeflow_engine/actions/platform/inputs.py similarity index 100% rename from codeflow_engine/actions/platform_detection/inputs.py rename to codeflow_engine/actions/platform/inputs.py diff --git a/codeflow_engine/actions/platform_detection/models.py b/codeflow_engine/actions/platform/models.py similarity index 100% rename from codeflow_engine/actions/platform_detection/models.py rename to codeflow_engine/actions/platform/models.py diff --git a/codeflow_engine/actions/multi_platform_integrator.py b/codeflow_engine/actions/platform/multi_platform_integrator.py similarity index 100% rename from codeflow_engine/actions/multi_platform_integrator.py rename to codeflow_engine/actions/platform/multi_platform_integrator.py diff --git a/codeflow_engine/actions/platform_detection/patterns.py b/codeflow_engine/actions/platform/patterns.py similarity index 100% rename from codeflow_engine/actions/platform_detection/patterns.py rename to codeflow_engine/actions/platform/patterns.py diff --git a/codeflow_engine/actions/platform_detector.py b/codeflow_engine/actions/platform/platform_detector.py similarity index 100% rename from codeflow_engine/actions/platform_detector.py rename to codeflow_engine/actions/platform/platform_detector.py diff --git a/codeflow_engine/actions/prototype_enhancement/__init__.py b/codeflow_engine/actions/platform/prototype_enhancement/__init__.py similarity index 100% rename from codeflow_engine/actions/prototype_enhancement/__init__.py rename to codeflow_engine/actions/platform/prototype_enhancement/__init__.py diff --git a/codeflow_engine/actions/prototype_enhancement/config_loader.py b/codeflow_engine/actions/platform/prototype_enhancement/config_loader.py similarity index 100% rename from codeflow_engine/actions/prototype_enhancement/config_loader.py rename to codeflow_engine/actions/platform/prototype_enhancement/config_loader.py diff --git a/codeflow_engine/actions/prototype_enhancement/enhancement_strategies.py b/codeflow_engine/actions/platform/prototype_enhancement/enhancement_strategies.py similarity index 100% rename from codeflow_engine/actions/prototype_enhancement/enhancement_strategies.py rename to codeflow_engine/actions/platform/prototype_enhancement/enhancement_strategies.py diff --git a/codeflow_engine/actions/prototype_enhancement/enhancer.py b/codeflow_engine/actions/platform/prototype_enhancement/enhancer.py similarity index 100% rename from codeflow_engine/actions/prototype_enhancement/enhancer.py rename to codeflow_engine/actions/platform/prototype_enhancement/enhancer.py diff --git a/codeflow_engine/actions/prototype_enhancement/file_generators.py b/codeflow_engine/actions/platform/prototype_enhancement/file_generators.py similarity index 100% rename from codeflow_engine/actions/prototype_enhancement/file_generators.py rename to codeflow_engine/actions/platform/prototype_enhancement/file_generators.py diff --git a/codeflow_engine/actions/prototype_enhancement/generators/__init__.py b/codeflow_engine/actions/platform/prototype_enhancement/generators/__init__.py similarity index 100% rename from codeflow_engine/actions/prototype_enhancement/generators/__init__.py rename to codeflow_engine/actions/platform/prototype_enhancement/generators/__init__.py diff --git a/codeflow_engine/actions/prototype_enhancement/generators/base_generator.py b/codeflow_engine/actions/platform/prototype_enhancement/generators/base_generator.py similarity index 100% rename from codeflow_engine/actions/prototype_enhancement/generators/base_generator.py rename to codeflow_engine/actions/platform/prototype_enhancement/generators/base_generator.py diff --git a/codeflow_engine/actions/prototype_enhancement/generators/ci_cd_generator.py b/codeflow_engine/actions/platform/prototype_enhancement/generators/ci_cd_generator.py similarity index 100% rename from codeflow_engine/actions/prototype_enhancement/generators/ci_cd_generator.py rename to codeflow_engine/actions/platform/prototype_enhancement/generators/ci_cd_generator.py diff --git a/codeflow_engine/actions/prototype_enhancement/generators/config_generator.py b/codeflow_engine/actions/platform/prototype_enhancement/generators/config_generator.py similarity index 100% rename from codeflow_engine/actions/prototype_enhancement/generators/config_generator.py rename to codeflow_engine/actions/platform/prototype_enhancement/generators/config_generator.py diff --git a/codeflow_engine/actions/prototype_enhancement/generators/deployment_generator.py b/codeflow_engine/actions/platform/prototype_enhancement/generators/deployment_generator.py similarity index 100% rename from codeflow_engine/actions/prototype_enhancement/generators/deployment_generator.py rename to codeflow_engine/actions/platform/prototype_enhancement/generators/deployment_generator.py diff --git a/codeflow_engine/actions/prototype_enhancement/generators/docker_generator.py b/codeflow_engine/actions/platform/prototype_enhancement/generators/docker_generator.py similarity index 100% rename from codeflow_engine/actions/prototype_enhancement/generators/docker_generator.py rename to codeflow_engine/actions/platform/prototype_enhancement/generators/docker_generator.py diff --git a/codeflow_engine/actions/prototype_enhancement/generators/docs_generator.py b/codeflow_engine/actions/platform/prototype_enhancement/generators/docs_generator.py similarity index 100% rename from codeflow_engine/actions/prototype_enhancement/generators/docs_generator.py rename to codeflow_engine/actions/platform/prototype_enhancement/generators/docs_generator.py diff --git a/codeflow_engine/actions/prototype_enhancement/generators/security_generator.py b/codeflow_engine/actions/platform/prototype_enhancement/generators/security_generator.py similarity index 100% rename from codeflow_engine/actions/prototype_enhancement/generators/security_generator.py rename to codeflow_engine/actions/platform/prototype_enhancement/generators/security_generator.py diff --git a/codeflow_engine/actions/prototype_enhancement/generators/template_utils.py b/codeflow_engine/actions/platform/prototype_enhancement/generators/template_utils.py similarity index 100% rename from codeflow_engine/actions/prototype_enhancement/generators/template_utils.py rename to codeflow_engine/actions/platform/prototype_enhancement/generators/template_utils.py diff --git a/codeflow_engine/actions/prototype_enhancement/generators/test_generator.py b/codeflow_engine/actions/platform/prototype_enhancement/generators/test_generator.py similarity index 100% rename from codeflow_engine/actions/prototype_enhancement/generators/test_generator.py rename to codeflow_engine/actions/platform/prototype_enhancement/generators/test_generator.py diff --git a/codeflow_engine/actions/prototype_enhancement/platform_configs.py b/codeflow_engine/actions/platform/prototype_enhancement/platform_configs.py similarity index 100% rename from codeflow_engine/actions/prototype_enhancement/platform_configs.py rename to codeflow_engine/actions/platform/prototype_enhancement/platform_configs.py diff --git a/codeflow_engine/actions/prototype_enhancement/template_metadata.py b/codeflow_engine/actions/platform/prototype_enhancement/template_metadata.py similarity index 100% rename from codeflow_engine/actions/prototype_enhancement/template_metadata.py rename to codeflow_engine/actions/platform/prototype_enhancement/template_metadata.py diff --git a/codeflow_engine/actions/prototype_enhancer.py b/codeflow_engine/actions/platform/prototype_enhancer.py similarity index 100% rename from codeflow_engine/actions/prototype_enhancer.py rename to codeflow_engine/actions/platform/prototype_enhancer.py diff --git a/codeflow_engine/actions/platform_detection/schema.py b/codeflow_engine/actions/platform/schema.py similarity index 100% rename from codeflow_engine/actions/platform_detection/schema.py rename to codeflow_engine/actions/platform/schema.py diff --git a/codeflow_engine/actions/platform_detection/scoring.py b/codeflow_engine/actions/platform/scoring.py similarity index 100% rename from codeflow_engine/actions/platform_detection/scoring.py rename to codeflow_engine/actions/platform/scoring.py diff --git a/codeflow_engine/actions/platform_detection/utils.py b/codeflow_engine/actions/platform/utils.py similarity index 100% rename from codeflow_engine/actions/platform_detection/utils.py rename to codeflow_engine/actions/platform/utils.py diff --git a/codeflow_engine/actions/platform_detection/__init__.py b/codeflow_engine/actions/platform_detection/__init__.py deleted file mode 100644 index d68befa..0000000 --- a/codeflow_engine/actions/platform_detection/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -""" -Platform Detection Module - -Detects and analyzes rapid prototyping platforms. -""" - -from .config import PlatformConfigManager -from .detector import PlatformDetector -from .file_analyzer import FileAnalyzer -from .models import PlatformDetectorInputs, PlatformDetectorOutputs -from .patterns import PlatformPatterns -from .scoring import PlatformScoringEngine -from .utils import calculate_confidence_score, get_confidence_level - -__all__ = [ - "PlatformDetector", - "FileAnalyzer", - "PlatformConfigManager", - "PlatformScoringEngine", - "PlatformDetectorInputs", - "PlatformDetectorOutputs", - "PlatformPatterns", - "calculate_confidence_score", - "get_confidence_level" -] diff --git a/codeflow_engine/actions/quality/__init__.py b/codeflow_engine/actions/quality/__init__.py new file mode 100644 index 0000000..da89818 --- /dev/null +++ b/codeflow_engine/actions/quality/__init__.py @@ -0,0 +1,60 @@ +""" +CodeFlow Engine - Quality Actions + +Actions for quality checks, security audits, and performance budgets. +""" + +from typing import Any + +# Import with error handling for optional dependencies +QualityGates: type[Any] | None = None +try: + from codeflow_engine.actions.quality.quality_gates import QualityGates +except ImportError: + pass + +CheckPerformanceBudget: type[Any] | None = None +try: + from codeflow_engine.actions.quality.check_performance_budget import CheckPerformanceBudget +except ImportError: + pass + +CheckLockfileDrift: type[Any] | None = None +try: + from codeflow_engine.actions.quality.check_lockfile_drift import CheckLockfileDrift +except ImportError: + pass + +CheckDependencyLicenses: type[Any] | None = None +try: + from codeflow_engine.actions.quality.check_dependency_licenses import CheckDependencyLicenses +except ImportError: + pass + +RunSecurityAudit: type[Any] | None = None +try: + from codeflow_engine.actions.quality.run_security_audit import RunSecurityAudit +except ImportError: + pass + +RunAccessibilityAudit: type[Any] | None = None +try: + from codeflow_engine.actions.quality.run_accessibility_audit import RunAccessibilityAudit +except ImportError: + pass + +VisualRegressionTest: type[Any] | None = None +try: + from codeflow_engine.actions.quality.visual_regression_test import VisualRegressionTest +except ImportError: + pass + +__all__ = [ + "CheckDependencyLicenses", + "CheckLockfileDrift", + "CheckPerformanceBudget", + "QualityGates", + "RunAccessibilityAudit", + "RunSecurityAudit", + "VisualRegressionTest", +] diff --git a/codeflow_engine/actions/check_dependency_licenses.py b/codeflow_engine/actions/quality/check_dependency_licenses.py similarity index 100% rename from codeflow_engine/actions/check_dependency_licenses.py rename to codeflow_engine/actions/quality/check_dependency_licenses.py diff --git a/codeflow_engine/actions/check_lockfile_drift.py b/codeflow_engine/actions/quality/check_lockfile_drift.py similarity index 100% rename from codeflow_engine/actions/check_lockfile_drift.py rename to codeflow_engine/actions/quality/check_lockfile_drift.py diff --git a/codeflow_engine/actions/check_performance_budget.py b/codeflow_engine/actions/quality/check_performance_budget.py similarity index 100% rename from codeflow_engine/actions/check_performance_budget.py rename to codeflow_engine/actions/quality/check_performance_budget.py diff --git a/codeflow_engine/actions/quality_gates/__init__.py b/codeflow_engine/actions/quality/gates/__init__.py similarity index 100% rename from codeflow_engine/actions/quality_gates/__init__.py rename to codeflow_engine/actions/quality/gates/__init__.py diff --git a/codeflow_engine/actions/quality_gates/evaluator.py b/codeflow_engine/actions/quality/gates/evaluator.py similarity index 100% rename from codeflow_engine/actions/quality_gates/evaluator.py rename to codeflow_engine/actions/quality/gates/evaluator.py diff --git a/codeflow_engine/actions/quality_gates/models.py b/codeflow_engine/actions/quality/gates/models.py similarity index 100% rename from codeflow_engine/actions/quality_gates/models.py rename to codeflow_engine/actions/quality/gates/models.py diff --git a/codeflow_engine/actions/quality_gates.py b/codeflow_engine/actions/quality/quality_gates.py similarity index 100% rename from codeflow_engine/actions/quality_gates.py rename to codeflow_engine/actions/quality/quality_gates.py diff --git a/codeflow_engine/actions/run_accessibility_audit.py b/codeflow_engine/actions/quality/run_accessibility_audit.py similarity index 100% rename from codeflow_engine/actions/run_accessibility_audit.py rename to codeflow_engine/actions/quality/run_accessibility_audit.py diff --git a/codeflow_engine/actions/run_security_audit.py b/codeflow_engine/actions/quality/run_security_audit.py similarity index 100% rename from codeflow_engine/actions/run_security_audit.py rename to codeflow_engine/actions/quality/run_security_audit.py diff --git a/codeflow_engine/actions/visual_regression_test.py b/codeflow_engine/actions/quality/visual_regression_test.py similarity index 100% rename from codeflow_engine/actions/visual_regression_test.py rename to codeflow_engine/actions/quality/visual_regression_test.py diff --git a/codeflow_engine/actions/scripts/__init__.py b/codeflow_engine/actions/scripts/__init__.py new file mode 100644 index 0000000..c663351 --- /dev/null +++ b/codeflow_engine/actions/scripts/__init__.py @@ -0,0 +1,53 @@ +""" +CodeFlow Engine - Script Actions + +Actions for running scripts, tests, deployments, and database operations. +""" + +from typing import Any + +# Import with error handling for optional dependencies +RunScript: type[Any] | None = None +try: + from codeflow_engine.actions.scripts.run_script import RunScript +except ImportError: + pass + +RunChangedTests: type[Any] | None = None +try: + from codeflow_engine.actions.scripts.run_changed_tests import RunChangedTests +except ImportError: + pass + +RunDBMigrations: type[Any] | None = None +try: + from codeflow_engine.actions.scripts.run_db_migrations import RunDBMigrations +except ImportError: + pass + +SeedDatabase: type[Any] | None = None +try: + from codeflow_engine.actions.scripts.seed_database import SeedDatabase +except ImportError: + pass + +TriggerDeployment: type[Any] | None = None +try: + from codeflow_engine.actions.scripts.trigger_deployment import TriggerDeployment +except ImportError: + pass + +PublishPackage: type[Any] | None = None +try: + from codeflow_engine.actions.scripts.publish_package import PublishPackage +except ImportError: + pass + +__all__ = [ + "PublishPackage", + "RunChangedTests", + "RunDBMigrations", + "RunScript", + "SeedDatabase", + "TriggerDeployment", +] diff --git a/codeflow_engine/actions/publish_package.py b/codeflow_engine/actions/scripts/publish_package.py similarity index 100% rename from codeflow_engine/actions/publish_package.py rename to codeflow_engine/actions/scripts/publish_package.py diff --git a/codeflow_engine/actions/run_changed_tests.py b/codeflow_engine/actions/scripts/run_changed_tests.py similarity index 100% rename from codeflow_engine/actions/run_changed_tests.py rename to codeflow_engine/actions/scripts/run_changed_tests.py diff --git a/codeflow_engine/actions/run_db_migrations.py b/codeflow_engine/actions/scripts/run_db_migrations.py similarity index 100% rename from codeflow_engine/actions/run_db_migrations.py rename to codeflow_engine/actions/scripts/run_db_migrations.py diff --git a/codeflow_engine/actions/run_script.py b/codeflow_engine/actions/scripts/run_script.py similarity index 100% rename from codeflow_engine/actions/run_script.py rename to codeflow_engine/actions/scripts/run_script.py diff --git a/codeflow_engine/actions/seed_database.py b/codeflow_engine/actions/scripts/seed_database.py similarity index 100% rename from codeflow_engine/actions/seed_database.py rename to codeflow_engine/actions/scripts/seed_database.py diff --git a/codeflow_engine/actions/take_screenshots.py b/codeflow_engine/actions/scripts/take_screenshots.py similarity index 100% rename from codeflow_engine/actions/take_screenshots.py rename to codeflow_engine/actions/scripts/take_screenshots.py diff --git a/codeflow_engine/actions/trigger_deployment.py b/codeflow_engine/actions/scripts/trigger_deployment.py similarity index 100% rename from codeflow_engine/actions/trigger_deployment.py rename to codeflow_engine/actions/scripts/trigger_deployment.py diff --git a/codeflow_engine/ai/extensions/__init__.py b/codeflow_engine/ai/extensions/__init__.py index 5a3644e..5fa6f76 100644 --- a/codeflow_engine/ai/extensions/__init__.py +++ b/codeflow_engine/ai/extensions/__init__.py @@ -2,13 +2,42 @@ AI Extensions Module AI extensions and implementation roadmap functionality. +Re-exports from the implementation_roadmap module for backward compatibility. """ -# Import the implementation module itself -from . import implementation +from typing import Any -# Import all public names from the implementation module -from .implementation import * +# Re-export from implementation_roadmap for backward compatibility +Phase1ExtensionImplementor: type[Any] | None = None +PhaseExecution: type[Any] | None = None +PhaseManager: type[Any] | None = None +ReportGenerator: type[Any] | None = None +Task: type[Any] | None = None +TaskExecution: type[Any] | None = None +TaskExecutor: type[Any] | None = None +TaskRegistry: type[Any] | None = None -# Set __all__ to include both the module and its public names -__all__ = ["implementation"] + getattr(implementation, "__all__", []) +try: + from codeflow_engine.ai.implementation_roadmap import ( + Phase1ExtensionImplementor, + PhaseExecution, + PhaseManager, + ReportGenerator, + Task, + TaskExecution, + TaskExecutor, + TaskRegistry, + ) +except ImportError: + pass + +__all__ = [ + "Phase1ExtensionImplementor", + "PhaseExecution", + "PhaseManager", + "ReportGenerator", + "Task", + "TaskExecution", + "TaskExecutor", + "TaskRegistry", +] diff --git a/codeflow_engine/ai/extensions/implementation/__init__.py b/codeflow_engine/ai/extensions/implementation/__init__.py deleted file mode 100644 index 7832df6..0000000 --- a/codeflow_engine/ai/extensions/implementation/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -""" -Implementation Roadmap Package - -Modular implementation system for CodeFlow extension roadmap. -""" - -from .implementor import Phase1ExtensionImplementor -from .phase_manager import PhaseExecution, PhaseManager -from .report_generator import ReportGenerator -from .task_definitions import Task, TaskRegistry -from .task_executor import TaskExecution, TaskExecutor - -__all__ = [ - "Phase1ExtensionImplementor", - "PhaseExecution", - "PhaseManager", - "ReportGenerator", - "Task", - "TaskExecution", - "TaskExecutor", - "TaskRegistry", -] - -__version__ = "1.0.0" diff --git a/codeflow_engine/ai/extensions/implementation/implementor.py b/codeflow_engine/ai/extensions/implementation/implementor.py deleted file mode 100644 index 6f66681..0000000 --- a/codeflow_engine/ai/extensions/implementation/implementor.py +++ /dev/null @@ -1,356 +0,0 @@ -""" -Main Implementation Orchestrator - -Provides the same interface as the original Phase1ExtensionImplementor while using modular components. -""" - -import logging -from pathlib import Path -from typing import Any - -from .phase_manager import PhaseManager -from .report_generator import ReportGenerator -from .task_definitions import Task, TaskRegistry -from .task_executor import TaskExecutor - -logger = logging.getLogger(__name__) - - -class Phase1ExtensionImplementor: - """ - Modular implementation orchestrator that maintains backward compatibility - with the original Phase1ExtensionImplementor interface. - """ - - def __init__(self) -> None: - self.project_root = Path.cwd() - self.implementation_log: list[dict[str, Any]] = [] - self.current_phase: str | None = None - self.tasks: dict[str, Task] = {} - - # Initialize modular components - self.task_executor = TaskExecutor(self.project_root) - self.phase_manager = PhaseManager(self.task_executor) - self.report_generator = ReportGenerator( - self.phase_manager, self.task_executor, self.project_root - ) - - # Maintain backward compatibility with original interface - self.implementation_phases = TaskRegistry.get_phase_definitions() - self._initialize_tasks() - - def _initialize_tasks(self) -> None: - """Initialize task objects from task definitions.""" - task_definitions = TaskRegistry.get_task_definitions() - - for task_id, task_info in task_definitions.items(): - self.tasks[task_id] = Task( - id=task_id, - description=task_info.get("description", ""), - dependencies=task_info.get("dependencies", []), - metadata=task_info, - ) - - async def run_implementation( - self, phase: str = "immediate", dry_run: bool = False - ) -> None: - """ - Run implementation for a specific phase. - Maintains backward compatibility with original method signature. - """ - try: - self.current_phase = phase - logger.info(f"Starting implementation phase: {phase}") - - # Log start of implementation - self._log_implementation_event( - { - "event": "phase_start", - "phase": phase, - "dry_run": dry_run, - "timestamp": self._get_timestamp(), - } - ) - - # Execute the phase using the modular phase manager - phase_execution = await self.phase_manager.execute_phase( - phase_id=phase, dry_run=dry_run - ) - - # Update current phase based on execution result - if phase_execution.is_completed: - logger.info(f"Phase {phase} completed successfully") - self._log_implementation_event( - { - "event": "phase_completed", - "phase": phase, - "duration_seconds": ( - phase_execution.duration.total_seconds() - if phase_execution.duration - else 0 - ), - "timestamp": self._get_timestamp(), - } - ) - else: - logger.error(f"Phase {phase} failed") - self._log_implementation_event( - { - "event": "phase_failed", - "phase": phase, - "timestamp": self._get_timestamp(), - } - ) - - except Exception as e: - logger.exception(f"Implementation failed for phase {phase}: {e}") - self._log_implementation_event( - { - "event": "phase_error", - "phase": phase, - "error": str(e), - "timestamp": self._get_timestamp(), - } - ) - raise - - async def run_all_phases(self, dry_run: bool = False) -> None: - """Run all implementation phases in order.""" - try: - logger.info("Starting all implementation phases") - - self._log_implementation_event( - { - "event": "full_implementation_start", - "dry_run": dry_run, - "timestamp": self._get_timestamp(), - } - ) - - # Execute all phases using the phase manager - phase_executions = await self.phase_manager.execute_all_phases( - dry_run=dry_run, stop_on_failure=True - ) - - # Log completion - completed_phases = sum( - 1 for e in phase_executions.values() if e.is_completed - ) - failed_phases = sum(1 for e in phase_executions.values() if e.is_failed) - - self._log_implementation_event( - { - "event": "full_implementation_completed", - "completed_phases": completed_phases, - "failed_phases": failed_phases, - "timestamp": self._get_timestamp(), - } - ) - - logger.info( - f"Implementation completed: {completed_phases} phases successful, {failed_phases} failed" - ) - - except Exception as e: - logger.exception(f"Full implementation failed: {e}") - self._log_implementation_event( - { - "event": "full_implementation_error", - "error": str(e), - "timestamp": self._get_timestamp(), - } - ) - raise - - def get_implementation_status(self) -> dict[str, Any]: - """Get current implementation status.""" - overall_status = self.phase_manager.get_overall_status() - execution_summary = self.task_executor.get_execution_summary() - - return { - "current_phase": self.current_phase, - "overall_progress": overall_status.get("overall_progress_percentage", 0), - "phases": overall_status.get("phases", {}), - "tasks": execution_summary.get("executions", {}), - "implementation_log": self.implementation_log[-10:], # Last 10 events - "next_steps": self.phase_manager.get_next_steps(), - } - - def generate_implementation_report(self) -> dict[str, Any]: - """Generate comprehensive implementation report.""" - return self.report_generator.generate_progress_report() - - def save_implementation_report(self, filename: str | None = None) -> Path: - """Save implementation report to file.""" - report = self.generate_implementation_report() - return self.report_generator.save_report(report, filename) - - def get_phase_status(self, phase_id: str) -> dict[str, Any]: - """Get status of a specific phase.""" - return self.phase_manager.get_phase_status(phase_id) - - def get_task_status(self, task_id: str) -> dict[str, Any]: - """Get status of a specific task.""" - if task_id in self.task_executor.executions: - execution = self.task_executor.executions[task_id] - return { - "task_id": task_id, - "status": execution.status, - "start_time": ( - execution.start_time.isoformat() if execution.start_time else None - ), - "end_time": ( - execution.end_time.isoformat() if execution.end_time else None - ), - "duration_seconds": ( - execution.duration.total_seconds() if execution.duration else None - ), - "error_message": execution.error_message, - "output": execution.output, - "logs": execution.logs, - } - return { - "task_id": task_id, - "status": "not_started", - "message": "Task has not been executed yet", - } - - def pause_implementation(self, phase_id: str | None = None) -> bool: - """Pause implementation of a specific phase or current phase.""" - target_phase = phase_id or self.current_phase - if target_phase: - return self.phase_manager.pause_phase(target_phase) - return False - - def resume_implementation(self, phase_id: str | None = None) -> bool: - """Resume implementation of a specific phase or current phase.""" - target_phase = phase_id or self.current_phase - if target_phase: - return self.phase_manager.resume_phase(target_phase) - return False - - def reset_phase(self, phase_id: str) -> bool: - """Reset a phase to allow re-execution.""" - return self.phase_manager.reset_phase(phase_id) - - def get_available_tasks(self) -> list[dict[str, Any]]: - """Get list of all available tasks.""" - task_definitions = TaskRegistry.get_task_definitions() - return [ - { - "task_id": task_id, - "name": task_info.get("name", task_id), - "description": task_info.get("description", ""), - "category": task_info.get("category", "uncategorized"), - "complexity": task_info.get("complexity", "unknown"), - "estimated_time": task_info.get("estimated_time", "unknown"), - "dependencies": task_info.get("dependencies", []), - } - for task_id, task_info in task_definitions.items() - ] - - def get_available_phases(self) -> list[dict[str, Any]]: - """Get list of all available phases.""" - return [ - { - "phase_id": phase_id, - "name": phase_info.get("name", phase_id), - "description": phase_info.get("description", ""), - "priority": phase_info.get("priority", 999), - "duration_days": phase_info.get("duration_days", 0), - "tasks": phase_info.get("tasks", []), - "depends_on": phase_info.get("depends_on", []), - } - for phase_id, phase_info in self.implementation_phases.items() - ] - - def get_dependency_graph(self) -> dict[str, list[str]]: - """Get the complete task dependency graph.""" - return TaskRegistry.get_dependency_graph() - - def validate_dependencies(self, task_ids: list[str]) -> dict[str, Any]: - """Validate that task dependencies can be satisfied.""" - dependency_graph = self.get_dependency_graph() - issues = [] - - for task_id in task_ids: - dependencies = dependency_graph.get(task_id, []) - missing_deps = [dep for dep in dependencies if dep not in task_ids] - - if missing_deps: - issues.append( - {"task_id": task_id, "missing_dependencies": missing_deps} - ) - - return {"valid": len(issues) == 0, "issues": issues} - - def _get_next_steps(self) -> list[dict[str, Any]]: - """ - Get recommended next steps based on current progress. - Maintains backward compatibility with original method. - """ - return self.phase_manager.get_next_steps() - - def _log_implementation_event(self, event: dict[str, Any]) -> None: - """Log an implementation event.""" - self.implementation_log.append(event) - - # Keep only the last 100 events to prevent memory bloat - if len(self.implementation_log) > 100: - self.implementation_log = self.implementation_log[-100:] - - def _get_timestamp(self) -> str: - """Get current timestamp as ISO string.""" - from datetime import datetime - - return datetime.now().isoformat() - - # Backward compatibility methods - - @property - def tasks_completed(self) -> int: - """Get number of completed tasks (backward compatibility).""" - return sum(1 for e in self.task_executor.executions.values() if e.is_completed) - - @property - def tasks_failed(self) -> int: - """Get number of failed tasks (backward compatibility).""" - return sum(1 for e in self.task_executor.executions.values() if e.is_failed) - - @property - def implementation_progress(self) -> float: - """Get overall implementation progress percentage (backward compatibility).""" - overall_status = self.phase_manager.get_overall_status() - return ( - overall_status.get("overall_progress_percentage", 0.0) - if overall_status - else 0.0 - ) - - def get_implementation_summary(self) -> dict[str, Any]: - """ - Get implementation summary (backward compatibility). - This method maintains the same interface as the original implementation. - """ - overall_status = self.phase_manager.get_overall_status() - execution_summary = self.task_executor.get_execution_summary() - - return { - "current_phase": self.current_phase, - "total_phases": overall_status.get("total_phases", 0), - "completed_phases": overall_status.get("completed_phases", 0), - "failed_phases": overall_status.get("failed_phases", 0), - "total_tasks": execution_summary.get("total_tasks", 0), - "completed_tasks": execution_summary.get("completed", 0), - "failed_tasks": execution_summary.get("failed", 0), - "success_rate": execution_summary.get("success_rate", 0.0), - "overall_progress": overall_status.get("overall_progress_percentage", 0.0), - "implementation_log": self.implementation_log, - "next_steps": self._get_next_steps(), - } - - def get_progress_percentage(self) -> float: - """Get overall implementation progress percentage (backward compatibility).""" - overall_status = self.phase_manager.get_overall_status() - progress = overall_status.get("overall_progress_percentage", 0.0) - return float(progress) if progress is not None else 0.0 diff --git a/codeflow_engine/ai/extensions/implementation/phase_manager.py b/codeflow_engine/ai/extensions/implementation/phase_manager.py deleted file mode 100644 index e33f15a..0000000 --- a/codeflow_engine/ai/extensions/implementation/phase_manager.py +++ /dev/null @@ -1,350 +0,0 @@ -""" -Phase Manager Module - -Handles phase orchestration, workflow management, and progress tracking for implementation roadmap. -""" - -import logging -from dataclasses import dataclass, field -from datetime import datetime, timedelta -from typing import Any - -from .task_definitions import TaskRegistry -from .task_executor import TaskExecution, TaskExecutor - -logger = logging.getLogger(__name__) - - -@dataclass -class PhaseExecution: - """Represents a phase execution instance.""" - - phase_id: str - start_time: datetime | None = None - end_time: datetime | None = None - status: str = "pending" # pending, running, completed, failed, paused - task_executions: dict[str, TaskExecution] = field(default_factory=dict) - progress_percentage: float = 0.0 - - @property - def duration(self) -> timedelta | None: - """Get phase execution duration.""" - if self.start_time and self.end_time: - return self.end_time - self.start_time - return None - - @property - def is_completed(self) -> bool: - """Check if phase completed successfully.""" - return self.status == "completed" - - @property - def is_failed(self) -> bool: - """Check if phase failed.""" - return self.status == "failed" - - def update_progress(self) -> None: - """Update progress percentage based on task completion.""" - if not self.task_executions: - self.progress_percentage = 0.0 - return - - completed_tasks = sum( - 1 for execution in self.task_executions.values() if execution.is_completed - ) - total_tasks = len(self.task_executions) - self.progress_percentage = (completed_tasks / total_tasks) * 100.0 - - -class PhaseManager: - """Manages implementation phases and their execution.""" - - def __init__(self, task_executor: TaskExecutor) -> None: - self.task_executor = task_executor - self.phase_executions: dict[str, PhaseExecution] = {} - self.current_phase: str | None = None - self.phase_definitions = TaskRegistry.get_phase_definitions() - - async def execute_phase( - self, phase_id: str, dry_run: bool = False, force: bool = False - ) -> PhaseExecution: - """Execute a specific phase.""" - if phase_id not in self.phase_definitions: - msg = f"Unknown phase: {phase_id}" - raise ValueError(msg) - - # Check dependencies unless forced - if not force and not await self._check_phase_dependencies(phase_id): - msg = f"Phase dependencies not satisfied for: {phase_id}" - raise ValueError(msg) - - # Initialize phase execution - if phase_id not in self.phase_executions: - self.phase_executions[phase_id] = PhaseExecution(phase_id=phase_id) - - phase_execution = self.phase_executions[phase_id] - phase_definition = self.phase_definitions[phase_id] - - try: - phase_execution.start_time = datetime.now() - phase_execution.status = "running" - self.current_phase = phase_id - - logger.info(f"Starting phase: {phase_id}") - - # Get tasks for this phase - task_ids = phase_definition.get("tasks", []) - - # Execute tasks with dependencies - task_executions = await self.task_executor.execute_tasks_with_dependencies( - task_ids, dry_run - ) - - # Update phase execution with task results - phase_execution.task_executions.update(task_executions) - phase_execution.update_progress() - - # Determine phase status based on task results - failed_tasks = [ - task_id - for task_id, execution in task_executions.items() - if execution.is_failed - ] - - if failed_tasks: - phase_execution.status = "failed" - logger.error( - f"Phase {phase_id} failed due to failed tasks: {failed_tasks}" - ) - else: - phase_execution.status = "completed" - logger.info(f"Phase {phase_id} completed successfully") - - phase_execution.end_time = datetime.now() - - except Exception as e: - phase_execution.end_time = datetime.now() - phase_execution.status = "failed" - logger.exception(f"Phase {phase_id} failed with exception: {e}") - raise - - return phase_execution - - async def execute_all_phases( - self, dry_run: bool = False, stop_on_failure: bool = True - ) -> dict[str, PhaseExecution]: - """Execute all phases in order.""" - # Get phases sorted by priority - phases = sorted( - self.phase_definitions.items(), key=lambda x: x[1].get("priority", 999) - ) - - for phase_id, _phase_definition in phases: - try: - await self.execute_phase(phase_id, dry_run) - - if self.phase_executions[phase_id].is_failed and stop_on_failure: - logger.error(f"Stopping execution due to failed phase: {phase_id}") - break - - except Exception as e: - logger.exception(f"Failed to execute phase {phase_id}: {e}") - if stop_on_failure: - break - - return self.phase_executions - - async def _check_phase_dependencies(self, phase_id: str) -> bool: - """Check if phase dependencies are satisfied.""" - phase_definition = self.phase_definitions[phase_id] - dependencies = phase_definition.get("depends_on", []) - - for dep_phase_id in dependencies: - if dep_phase_id not in self.phase_executions: - logger.warning(f"Dependency phase {dep_phase_id} not executed") - return False - - if not self.phase_executions[dep_phase_id].is_completed: - logger.warning(f"Dependency phase {dep_phase_id} not completed") - return False - - return True - - def get_phase_status(self, phase_id: str) -> dict[str, Any]: - """Get detailed status of a specific phase.""" - if phase_id not in self.phase_executions: - return { - "phase_id": phase_id, - "status": "not_started", - "progress_percentage": 0.0, - } - - execution = self.phase_executions[phase_id] - phase_definition = self.phase_definitions[phase_id] - - return { - "phase_id": phase_id, - "name": phase_definition.get("name", phase_id), - "description": phase_definition.get("description", ""), - "status": execution.status, - "progress_percentage": execution.progress_percentage, - "start_time": ( - execution.start_time.isoformat() if execution.start_time else None - ), - "end_time": execution.end_time.isoformat() if execution.end_time else None, - "duration_seconds": ( - execution.duration.total_seconds() if execution.duration else None - ), - "total_tasks": len(phase_definition.get("tasks", [])), - "completed_tasks": sum( - 1 for e in execution.task_executions.values() if e.is_completed - ), - "failed_tasks": sum( - 1 for e in execution.task_executions.values() if e.is_failed - ), - "task_status": { - task_id: { - "status": task_execution.status, - "duration_seconds": ( - task_execution.duration.total_seconds() - if task_execution.duration - else None - ), - "error": task_execution.error_message, - } - for task_id, task_execution in execution.task_executions.items() - }, - } - - def get_overall_status(self) -> dict[str, Any]: - """Get overall implementation status across all phases.""" - total_phases = len(self.phase_definitions) - completed_phases = sum( - 1 for execution in self.phase_executions.values() if execution.is_completed - ) - failed_phases = sum( - 1 for execution in self.phase_executions.values() if execution.is_failed - ) - - # Calculate overall progress - if total_phases == 0: - overall_progress = 0.0 - else: - phase_weights = {"immediate": 0.4, "medium": 0.4, "strategic": 0.2} - - weighted_progress = 0.0 - total_weight = 0.0 - - for phase_id, execution in self.phase_executions.items(): - weight = phase_weights.get(phase_id, 1.0 / total_phases) - weighted_progress += execution.progress_percentage * weight - total_weight += weight - - overall_progress = ( - weighted_progress / total_weight if total_weight > 0 else 0.0 - ) - - # Determine overall status - if failed_phases > 0: - overall_status = "failed" - elif completed_phases == total_phases: - overall_status = "completed" - elif any(e.status == "running" for e in self.phase_executions.values()): - overall_status = "running" - else: - overall_status = "pending" - - return { - "overall_status": overall_status, - "overall_progress_percentage": overall_progress, - "current_phase": self.current_phase, - "total_phases": total_phases, - "completed_phases": completed_phases, - "failed_phases": failed_phases, - "phases": { - phase_id: self.get_phase_status(phase_id) - for phase_id in self.phase_definitions - }, - } - - def get_next_steps(self) -> list[dict[str, Any]]: - """Get recommended next steps based on current progress.""" - next_steps = [] - - # Find the next phase to execute - for phase_id in ["immediate", "medium", "strategic"]: - if phase_id not in self.phase_executions: - phase_definition = self.phase_definitions[phase_id] - next_steps.append( - { - "type": "phase", - "action": f"Execute {phase_definition.get('name', phase_id)}", - "description": phase_definition.get("description", ""), - "priority": "high" if phase_id == "immediate" else "medium", - "estimated_duration_days": phase_definition.get( - "duration_days", 0 - ), - } - ) - break - if not self.phase_executions[phase_id].is_completed: - # Find failed tasks in current phase - execution = self.phase_executions[phase_id] - failed_tasks = [ - task_id - for task_id, task_execution in execution.task_executions.items() - if task_execution.is_failed - ] - - if failed_tasks: - next_steps.append( - { - "type": "retry", - "action": f"Retry failed tasks in {phase_id}", - "description": f"Retry tasks: {', '.join(failed_tasks)}", - "priority": "high", - "tasks": failed_tasks, - } - ) - break - - # Add general recommendations - if not next_steps: - next_steps.append( - { - "type": "maintenance", - "action": "Monitor and maintain implemented features", - "description": "All phases completed. Focus on monitoring and optimization.", - "priority": "low", - } - ) - - return next_steps - - def pause_phase(self, phase_id: str) -> bool: - """Pause execution of a specific phase.""" - if phase_id in self.phase_executions: - execution = self.phase_executions[phase_id] - if execution.status == "running": - execution.status = "paused" - logger.info(f"Phase {phase_id} paused") - return True - return False - - def resume_phase(self, phase_id: str) -> bool: - """Resume execution of a paused phase.""" - if phase_id in self.phase_executions: - execution = self.phase_executions[phase_id] - if execution.status == "paused": - execution.status = "running" - logger.info(f"Phase {phase_id} resumed") - return True - return False - - def reset_phase(self, phase_id: str) -> bool: - """Reset a phase to allow re-execution.""" - if phase_id in self.phase_executions: - del self.phase_executions[phase_id] - logger.info(f"Phase {phase_id} reset") - return True - return False diff --git a/codeflow_engine/ai/extensions/implementation/report_generator.py b/codeflow_engine/ai/extensions/implementation/report_generator.py deleted file mode 100644 index 548c63d..0000000 --- a/codeflow_engine/ai/extensions/implementation/report_generator.py +++ /dev/null @@ -1,535 +0,0 @@ -""" -Report Generator Module - -Generates comprehensive reports and analytics for implementation roadmap execution. -""" - -import json -import logging -import operator -from datetime import datetime, timedelta -from pathlib import Path -from typing import Any - -from .phase_manager import PhaseManager -from .task_executor import TaskExecution, TaskExecutor - -logger = logging.getLogger(__name__) - - -class ReportGenerator: - """Generates comprehensive reports for implementation progress.""" - - def __init__( - self, - phase_manager: PhaseManager, - task_executor: TaskExecutor, - project_root: Path, - ) -> None: - self.phase_manager = phase_manager - self.task_executor = task_executor - self.project_root = project_root - - def generate_progress_report(self) -> dict[str, Any]: - """Generate a comprehensive progress report.""" - overall_status = self.phase_manager.get_overall_status() - execution_summary = self.task_executor.get_execution_summary() - - return { - "report_metadata": { - "generated_at": datetime.now().isoformat(), - "report_type": "progress_report", - "version": "1.0", - }, - "executive_summary": self._generate_executive_summary( - overall_status, execution_summary - ), - "overall_status": overall_status, - "execution_summary": execution_summary, - "phase_details": self._generate_phase_details(), - "task_analysis": self._generate_task_analysis(), - "recommendations": self._generate_recommendations(), - "next_steps": self.phase_manager.get_next_steps(), - "metrics": self._generate_metrics(), - "timeline": self._generate_timeline(), - } - - def _generate_executive_summary( - self, overall_status: dict[str, Any], execution_summary: dict[str, Any] - ) -> dict[str, Any]: - """Generate executive summary.""" - total_phases = overall_status.get("total_phases", 0) - completed_phases = overall_status.get("completed_phases", 0) - failed_phases = overall_status.get("failed_phases", 0) - - total_tasks = execution_summary.get("total_tasks", 0) - completed_tasks = execution_summary.get("completed", 0) - failed_tasks = execution_summary.get("failed", 0) - - # Calculate health score - phase_health = ( - (completed_phases / total_phases * 100) if total_phases > 0 else 0 - ) - task_health = (completed_tasks / total_tasks * 100) if total_tasks > 0 else 0 - overall_health = (phase_health + task_health) / 2 - - # Determine status color via shared utility - from codeflow_engine.reporting.html_utils import status_color_name - - status_color = status_color_name(overall_health) - - return { - "overall_health_score": round(overall_health, 1), - "status_color": status_color, - "current_phase": overall_status.get("current_phase"), - "progress_percentage": round( - overall_status.get("overall_progress_percentage", 0), 1 - ), - "phases_completed": f"{completed_phases}/{total_phases}", - "tasks_completed": f"{completed_tasks}/{total_tasks}", - "critical_issues": failed_phases + failed_tasks, - "estimated_completion": self._estimate_completion_date(), - "key_achievements": self._get_key_achievements(), - "major_blockers": self._get_major_blockers(), - } - - def _generate_phase_details(self) -> dict[str, Any]: - """Generate detailed phase information.""" - phase_details = {} - - for phase_id in self.phase_manager.phase_definitions: - status = self.phase_manager.get_phase_status(phase_id) - phase_details[phase_id] = { - **status, - "success_criteria": self.phase_manager.phase_definitions[phase_id].get( - "success_criteria", [] - ), - "risk_assessment": self._assess_phase_risk(phase_id), - "resource_utilization": self._calculate_resource_utilization(phase_id), - "quality_metrics": self._calculate_quality_metrics(phase_id), - } - - return phase_details - - def _generate_task_analysis(self) -> dict[str, Any]: - """Generate task-level analysis.""" - task_categories = {} - task_complexity_analysis = {} - - # Group tasks by category - from .task_definitions import TaskRegistry - - categories = TaskRegistry.get_task_categories() - - for category, task_ids in categories.items(): - completed = sum( - 1 - for task_id in task_ids - if task_id in self.task_executor.executions - and self.task_executor.executions[task_id].is_completed - ) - failed = sum( - 1 - for task_id in task_ids - if task_id in self.task_executor.executions - and self.task_executor.executions[task_id].is_failed - ) - - task_categories[category] = { - "total_tasks": len(task_ids), - "completed": completed, - "failed": failed, - "success_rate": (completed / len(task_ids) * 100) if task_ids else 0, - } - - # Analyze task complexity vs success rate - task_definitions = TaskRegistry.get_task_definitions() - complexity_levels = ["low", "medium", "high", "very_high"] - - for complexity in complexity_levels: - matching_tasks = [ - task_id - for task_id, task_info in task_definitions.items() - if task_info.get("complexity") == complexity - ] - - if matching_tasks: - completed = sum( - 1 - for task_id in matching_tasks - if task_id in self.task_executor.executions - and self.task_executor.executions[task_id].is_completed - ) - - task_complexity_analysis[complexity] = { - "total_tasks": len(matching_tasks), - "completed": completed, - "success_rate": ( - (completed / len(matching_tasks) * 100) if matching_tasks else 0 - ), - } - - return { - "category_breakdown": task_categories, - "complexity_analysis": task_complexity_analysis, - "longest_running_tasks": self._get_longest_running_tasks(), - "most_failed_tasks": self._get_most_failed_tasks(), - } - - def _generate_recommendations(self) -> list[dict[str, Any]]: - """Generate actionable recommendations.""" - recommendations = [] - - # Analyze failed tasks - failed_executions = [ - execution - for execution in self.task_executor.executions.values() - if execution.is_failed - ] - - if failed_executions: - recommendations.append( - { - "type": "critical", - "title": "Address Failed Tasks", - "description": f"{len(failed_executions)} tasks have failed and need attention", - "action": "Review error logs and retry failed tasks", - "priority": "high", - "impact": "Blocks progress on dependent tasks", - } - ) - - # Check for slow phases - slow_phases = [] - for phase_id, execution in self.phase_manager.phase_executions.items(): - if execution.duration and execution.duration > timedelta(days=7): - slow_phases.append(phase_id) - - if slow_phases: - recommendations.append( - { - "type": "performance", - "title": "Optimize Slow Phases", -"description": f"Phases {', '.join(slow_phases)} are taking longer than expected", - "action": "Review task dependencies and consider parallel execution", - "priority": "medium", - "impact": "Delays overall implementation timeline", - } - ) - - # Check resource utilization - overall_status = self.phase_manager.get_overall_status() - if overall_status.get("overall_progress_percentage", 0) < 50: - recommendations.append( - { - "type": "strategic", - "title": "Accelerate Implementation", - "description": "Implementation progress is below 50%", - "action": "Consider allocating more resources or simplifying scope", - "priority": "medium", - "impact": "Affects delivery timeline", - } - ) - - return recommendations - - def _generate_metrics(self) -> dict[str, Any]: - """Generate key performance metrics.""" - execution_summary = self.task_executor.get_execution_summary() - self.phase_manager.get_overall_status() - - # Calculate timeline metrics - all_executions: list[TaskExecution] = [] - for phase_exec in self.phase_manager.phase_executions.values(): - all_executions.extend(phase_exec.task_executions.values()) - - if all_executions: - # Extract non-None start times directly - start_times = [ - exec.start_time - for exec in all_executions - if exec.start_time is not None - ] - if start_times: - earliest_start = min(start_times) - # Extract non-None end times directly - end_times = [ - exec.end_time - for exec in all_executions - if exec.end_time is not None - ] - if end_times: - latest_end = max(end_times) - total_duration = latest_end - earliest_start - else: - total_duration = None - else: - total_duration = None - else: - total_duration = None - - # Calculate velocity (tasks completed per day) - completed_executions = [ - execution - for execution in self.task_executor.executions.values() - if execution.is_completed and execution.start_time - ] - - if completed_executions: - # Extract non-None start times directly - start_times = [ - e.start_time for e in completed_executions if e.start_time is not None - ] - if start_times: - earliest_start = min(start_times) - days_elapsed = (datetime.now() - earliest_start).days or 1 - velocity = len(completed_executions) / days_elapsed - else: - velocity = 0 - else: - velocity = 0 - - # Calculate average task duration - completed_durations = [ - execution.duration.total_seconds() / 3600 # Convert to hours - for execution in completed_executions - if execution.duration - ] - avg_task_duration = ( - sum(completed_durations) / len(completed_durations) - if completed_durations - else 0 - ) - - return { - "velocity_tasks_per_day": round(velocity, 2), - "average_task_duration_hours": round(avg_task_duration, 2), - "success_rate_percentage": round( - execution_summary.get("success_rate", 0) * 100, 1 - ), - "total_implementation_time_hours": round( - execution_summary.get("total_duration_seconds", 0) / 3600, 2 - ), - "phases_on_track": self._count_phases_on_track(), - "critical_path_tasks": self._identify_critical_path_tasks(), - "resource_efficiency": self._calculate_resource_efficiency(), - "total_duration": total_duration, - } - - def _generate_timeline(self) -> list[dict[str, Any]]: - """Generate implementation timeline.""" - timeline = [] - - for phase_id, phase_execution in self.phase_manager.phase_executions.items(): - if phase_execution.start_time: - timeline.append( - { - "date": phase_execution.start_time.isoformat(), - "event": f"Started phase: {phase_id}", - "type": "phase_start", - "phase_id": phase_id, - } - ) - - if phase_execution.end_time: - timeline.append( - { - "date": phase_execution.end_time.isoformat(), - "event": f"Completed phase: {phase_id}", - "type": "phase_end", - "phase_id": phase_id, - "status": phase_execution.status, - } - ) - - # Add task milestones - for task_id, task_execution in self.task_executor.executions.items(): - if task_execution.is_completed and task_execution.end_time: - timeline.append( - { - "date": task_execution.end_time.isoformat(), - "event": f"Completed task: {task_id}", - "type": "task_completion", - "task_id": task_id, - } - ) - - # Sort by date - timeline.sort(key=operator.itemgetter("date")) - - return timeline - - def save_report(self, report: dict[str, Any], filename: str | None = None) -> Path: - """Save report to file.""" - if not filename: - timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") - filename = f"implementation_report_{timestamp}.json" - - report_path = self.project_root / "reports" / filename - report_path.parent.mkdir(exist_ok=True) - - with open(report_path, "w", encoding="utf-8") as f: - json.dump(report, f, indent=2, default=str) - - logger.info(f"Report saved to: {report_path}") - return report_path - - def generate_html_report(self, report: dict[str, Any]) -> str: - """Generate HTML version of the report.""" - from codeflow_engine.reporting.html_page import PageHeader, build_basic_page - - extra_css = """ -.metric { display: inline-block; margin: 10px; padding: 15px; -.phase { margin: 20px 0; padding: 15px; border-left: 4px solid #007bff; background: #f8f9fa; } -.recommendation { margin: 10px 0; padding: 10px; border-radius: 5px; } -.critical { background: #f8d7da; border: 1px solid #f5c6cb; } -.performance { background: #fff3cd; border: 1px solid #ffeaa7; } -.strategic { background: #d1ecf1; border: 1px solid #bee5eb; } -""" - - # Format recommendations - recommendations_html = "" - for rec in report.get("recommendations", []): - recommendations_html += f""" -
- {rec["title"]}
- {rec["description"]}
- Action: {rec["action"]} -
- """ - - # Format phases - phases_html = "" - for phase_id, phase_data in report.get("phase_details", {}).items(): - phases_html += f""" -
-

{phase_data.get("name", phase_id)}

-

Status: {phase_data.get("status", "unknown")}

-

Progress: {phase_data.get("progress_percentage", 0)}%

-

Tasks: {phase_data.get("completed_tasks", 0)}/ -

- """ - - executive = report.get("executive_summary", {}) - - content_html = f""" -
-

Health Score: {executive.get("overall_health_score", 0)}%

-
-

Executive Summary

-
Progress: {executive.get("progress_percentage", 0)}%
-
Phases: {executive.get("phases_completed", "0/0")}
-
Tasks: {executive.get("tasks_completed", "0/0")}
-
Success Rate: {report.get("metrics", {}).get("success_rate_percentage", 0)}%
- -

Recommendations

- {recommendations_html} - -

Phase Status

- {phases_html} - """ - - return build_basic_page( - header=PageHeader(title="CodeFlow Implementation Report"), - generated_at=report.get("report_metadata", {}).get("generated_at", ""), - content_html=content_html, - extra_css=extra_css, - ) - - # Helper methods - - def _estimate_completion_date(self) -> str | None: - """Estimate completion date based on current velocity.""" - # Placeholder implementation - return None - - def _get_key_achievements(self) -> list[str]: - """Get list of key achievements.""" - achievements = [ - f"Completed {execution.task_id}" - for execution in self.task_executor.executions.values() - if execution.is_completed - ] - return achievements[:5] # Return top 5 - - def _get_major_blockers(self) -> list[str]: - """Get list of major blockers.""" - blockers = [ - f"Failed {execution.task_id}: {execution.error_message}" - for execution in self.task_executor.executions.values() - if execution.is_failed - ] - return blockers[:3] # Return top 3 - - def _assess_phase_risk(self, phase_id: str) -> str: - """Assess risk level for a phase.""" - if phase_id in self.phase_manager.phase_executions: - execution = self.phase_manager.phase_executions[phase_id] - failed_tasks = sum( - 1 for e in execution.task_executions.values() if e.is_failed - ) - if failed_tasks > 2: - return "high" - if failed_tasks > 0: - return "medium" - return "low" - - def _calculate_resource_utilization(self, phase_id: str) -> float: - """Calculate resource utilization for a phase.""" - # Placeholder implementation - return 75.0 - - def _calculate_quality_metrics(self, phase_id: str) -> dict[str, Any]: - """Calculate quality metrics for a phase.""" - # Placeholder implementation - return {"test_coverage": 85, "code_quality": 92} - - def _get_longest_running_tasks(self) -> list[dict[str, Any]]: - """Get longest running tasks.""" - completed_tasks = [ - (task_id, execution) - for task_id, execution in self.task_executor.executions.items() - if execution.is_completed and execution.duration - ] - - # Sort by duration, handling None values - completed_tasks.sort( - key=lambda x: x[1].duration.total_seconds() if x[1].duration else 0, - reverse=True, - ) - - return [ - { - "task_id": task_id, - "duration_hours": ( - round(execution.duration.total_seconds() / 3600, 2) - if execution.duration - else 0 - ), - } - for task_id, execution in completed_tasks[:5] - ] - - def _get_most_failed_tasks(self) -> list[str]: - """Get most frequently failed tasks.""" - failed_tasks = [ - execution.task_id - for execution in self.task_executor.executions.values() - if execution.is_failed - ] - return list(set(failed_tasks))[:5] - - def _count_phases_on_track(self) -> int: - """Count phases that are on track.""" - # Placeholder implementation - return len(self.phase_manager.phase_executions) - - def _identify_critical_path_tasks(self) -> list[str]: - """Identify tasks on the critical path.""" - # Placeholder implementation - return ["setup_sentry_monitoring", "implement_structured_logging"] - - def _calculate_resource_efficiency(self) -> float: - """Calculate overall resource efficiency.""" - # Placeholder implementation - return 82.5 diff --git a/codeflow_engine/ai/extensions/implementation/task_definitions.py b/codeflow_engine/ai/extensions/implementation/task_definitions.py deleted file mode 100644 index ef9d184..0000000 --- a/codeflow_engine/ai/extensions/implementation/task_definitions.py +++ /dev/null @@ -1,326 +0,0 @@ -""" -Task Definitions Module - -Centralized task definitions and phase configurations for implementation roadmap. -""" - -from dataclasses import dataclass, field -from typing import Any - - -@dataclass -class Task: - """Represents a task in the implementation roadmap.""" - - id: str - description: str - status: str = "pending" - dependencies: list[str] = field(default_factory=list) - metadata: dict[str, Any] = field(default_factory=dict) - - def __post_init__(self) -> None: - # Default factory handles None values automatically - pass - - -class TaskRegistry: - """Registry for all available implementation tasks.""" - - @staticmethod - def get_task_definitions() -> dict[str, dict[str, Any]]: - """Get all task definitions with their metadata.""" - return { - # Immediate Priority Tasks - "setup_sentry_monitoring": { - "name": "Setup Sentry Monitoring", - "description": "Configure Sentry for error tracking and performance monitoring", - "category": "monitoring", - "complexity": "medium", - "estimated_time": "2-3 hours", - "dependencies": [], - "files_created": ["sentry_config.py", "requirements-sentry.txt"], - "env_vars": ["SENTRY_DSN", "SENTRY_ENVIRONMENT"], - }, - "implement_structured_logging": { - "name": "Implement Structured Logging", - "description": "Set up JSON-based structured logging with proper formatters", - "category": "logging", - "complexity": "low", - "estimated_time": "1-2 hours", - "dependencies": [], - "files_created": ["logging_config.py", "log_formatters.py"], - "env_vars": ["LOG_LEVEL", "LOG_FORMAT"], - }, - "setup_redis_caching": { - "name": "Setup Redis Caching", - "description": "Configure Redis for LLM response and API call caching", - "category": "caching", - "complexity": "medium", - "estimated_time": "3-4 hours", - "dependencies": [], - "files_created": [ - "redis_config.py", - "cache_manager.py", - "cache_decorators.py", - ], - "env_vars": ["REDIS_URL", "REDIS_PASSWORD", "CACHE_TTL"], - }, - "create_health_checks": { - "name": "Create Health Checks", - "description": "Implement comprehensive health check endpoints", - "category": "monitoring", - "complexity": "medium", - "estimated_time": "2-3 hours", - "dependencies": [], - "files_created": ["health_checks.py", "health_endpoints.py"], - "env_vars": [], - }, - "implement_basic_circuit_breakers": { - "name": "Implement Circuit Breakers", - "description": "Add circuit breaker pattern for external API calls", - "category": "resilience", - "complexity": "high", - "estimated_time": "4-5 hours", - "dependencies": [], - "files_created": ["circuit_breaker.py", "api_resilience.py"], - "env_vars": ["CIRCUIT_BREAKER_THRESHOLD", "CIRCUIT_BREAKER_TIMEOUT"], - }, - # Medium Priority Tasks - "setup_postgresql_integration": { - "name": "Setup PostgreSQL Integration", - "description": "Configure PostgreSQL for data persistence", - "category": "database", - "complexity": "high", - "estimated_time": "4-6 hours", - "dependencies": ["setup_sentry_monitoring"], - "files_created": ["database_config.py", "models.py", "migrations/"], - "env_vars": ["DATABASE_URL", "DB_POOL_SIZE"], - }, - "implement_prometheus_metrics": { - "name": "Implement Prometheus Metrics", - "description": "Set up Prometheus metrics collection", - "category": "monitoring", - "complexity": "medium", - "estimated_time": "3-4 hours", - "dependencies": ["create_health_checks"], - "files_created": ["metrics_config.py", "custom_metrics.py"], - "env_vars": ["METRICS_PORT", "METRICS_PATH"], - }, - "setup_oauth2_authentication": { - "name": "Setup OAuth2 Authentication", - "description": "Implement OAuth 2.0 authentication flow", - "category": "security", - "complexity": "high", - "estimated_time": "5-6 hours", - "dependencies": ["setup_postgresql_integration"], - "files_created": [ - "auth_config.py", - "oauth_handlers.py", - "token_manager.py", - ], - "env_vars": [ - "OAUTH_CLIENT_ID", - "OAUTH_CLIENT_SECRET", - "JWT_SECRET_KEY", - ], - }, - "implement_advanced_llm_routing": { - "name": "Advanced LLM Routing", - "description": "Implement intelligent LLM routing and load balancing", - "category": "ai", - "complexity": "high", - "estimated_time": "6-8 hours", - "dependencies": ["setup_redis_caching", "implement_prometheus_metrics"], - "files_created": [ - "llm_router.py", - "load_balancer.py", - "model_selector.py", - ], - "env_vars": ["LLM_ROUTING_STRATEGY", "LOAD_BALANCER_ALGORITHM"], - }, - "create_comprehensive_testing": { - "name": "Comprehensive Testing Framework", - "description": "Set up unit, integration, and performance testing", - "category": "testing", - "complexity": "high", - "estimated_time": "8-10 hours", - "dependencies": ["implement_basic_circuit_breakers"], - "files_created": [ - "test_config.py", - "test_fixtures.py", - "performance_tests.py", - "integration_tests.py", - ], - "env_vars": ["TEST_DATABASE_URL", "TEST_REDIS_URL"], - }, - # Strategic Priority Tasks - "implement_rag_system": { - "name": "RAG System Implementation", - "description": "Implement Retrieval Augmented Generation system", - "category": "ai", - "complexity": "very_high", - "estimated_time": "10-15 hours", - "dependencies": [ - "setup_postgresql_integration", - "implement_advanced_llm_routing", - ], - "files_created": [ - "rag_system.py", - "vector_store.py", - "retrieval_engine.py", - "embedding_manager.py", - ], - "env_vars": ["VECTOR_DB_URL", "EMBEDDING_MODEL", "RAG_CHUNK_SIZE"], - }, - "create_analytics_dashboard": { - "name": "Analytics Dashboard", - "description": "Build comprehensive analytics and monitoring dashboard", - "category": "analytics", - "complexity": "very_high", - "estimated_time": "12-16 hours", - "dependencies": [ - "implement_prometheus_metrics", - "setup_oauth2_authentication", - ], - "files_created": [ - "dashboard_app.py", - "analytics_queries.py", - "dashboard_templates/", - "static/dashboard/", - ], - "env_vars": ["DASHBOARD_SECRET_KEY", "ANALYTICS_DB_URL"], - }, - "setup_fine_tuned_models": { - "name": "Fine-tuned Models Setup", - "description": "Configure fine-tuned model training and deployment", - "category": "ai", - "complexity": "very_high", - "estimated_time": "15-20 hours", - "dependencies": [ - "implement_rag_system", - "create_comprehensive_testing", - ], - "files_created": [ - "model_training.py", - "fine_tuning_pipeline.py", - "model_deployment.py", - "training_data_processor.py", - ], - "env_vars": [ - "TRAINING_DATA_PATH", - "MODEL_REGISTRY_URL", - "FINE_TUNING_API_KEY", - ], - }, - "implement_multi_cloud_deployment": { - "name": "Multi-cloud Deployment", - "description": "Set up deployment across multiple cloud providers", - "category": "infrastructure", - "complexity": "very_high", - "estimated_time": "20-25 hours", - "dependencies": [ - "create_analytics_dashboard", - "setup_fine_tuned_models", - ], - "files_created": [ - "cloud_config.py", - "deployment_scripts/", - "terraform/", - "kubernetes/", - ], - "env_vars": [ - "AWS_ACCESS_KEY_ID", - "GCP_PROJECT_ID", - "AZURE_SUBSCRIPTION_ID", - ], - }, - } - - @staticmethod - def get_phase_definitions() -> dict[str, dict[str, Any]]: - """Get phase definitions with their task assignments.""" - return { - "immediate": { - "name": "Immediate Priority (Week 1-2)", - "description": "Essential production-ready features", - "duration_days": 10, - "priority": 1, - "tasks": [ - "setup_sentry_monitoring", - "implement_structured_logging", - "setup_redis_caching", - "create_health_checks", - "implement_basic_circuit_breakers", - ], - "success_criteria": [ - "Error tracking is functional", - "Structured logs are being generated", - "Redis caching is working", - "Health endpoints return 200", - "Circuit breakers prevent cascading failures", - ], - }, - "medium": { - "name": "Medium Priority (Week 3-6)", - "description": "Enhanced functionality and integrations", - "duration_days": 25, - "priority": 2, - "depends_on": ["immediate"], - "tasks": [ - "setup_postgresql_integration", - "implement_prometheus_metrics", - "setup_oauth2_authentication", - "implement_advanced_llm_routing", - "create_comprehensive_testing", - ], - "success_criteria": [ - "Database is properly configured", - "Metrics are being collected", - "Authentication is working", - "LLM routing is optimized", - "Test coverage > 80%", - ], - }, - "strategic": { - "name": "Long-term Strategic (Month 2+)", - "description": "Advanced AI features and scalability", - "duration_days": 45, - "priority": 3, - "depends_on": ["medium"], - "tasks": [ - "implement_rag_system", - "create_analytics_dashboard", - "setup_fine_tuned_models", - "implement_multi_cloud_deployment", - ], - "success_criteria": [ - "RAG system improves response quality", - "Dashboard provides actionable insights", - "Fine-tuned models are deployed", - "Multi-cloud deployment is stable", - ], - }, - } - - @staticmethod - def get_task_categories() -> dict[str, list[str]]: - """Get tasks organized by category.""" - tasks = TaskRegistry.get_task_definitions() - categories: dict[str, list[str]] = {} - - for task_id, task_info in tasks.items(): - category = task_info.get("category", "uncategorized") - if category not in categories: - categories[category] = [] - categories[category].append(task_id) - - return categories - - @staticmethod - def get_dependency_graph() -> dict[str, list[str]]: - """Get the complete dependency graph for all tasks.""" - tasks = TaskRegistry.get_task_definitions() - return { - task_id: task_info.get("dependencies", []) - for task_id, task_info in tasks.items() - } diff --git a/codeflow_engine/ai/extensions/implementation/task_executor.py b/codeflow_engine/ai/extensions/implementation/task_executor.py deleted file mode 100644 index 554476b..0000000 --- a/codeflow_engine/ai/extensions/implementation/task_executor.py +++ /dev/null @@ -1,519 +0,0 @@ -""" -Task Executor Module - -Handles task execution, monitoring, and state management for implementation roadmap. -""" - -import asyncio -from dataclasses import dataclass, field -from datetime import datetime, timedelta -import logging -from pathlib import Path -from typing import TYPE_CHECKING, Any - -from implementation.task_definitions import TaskRegistry - - -if TYPE_CHECKING: - from collections.abc import Callable - -logger = logging.getLogger(__name__) - - -@dataclass -class TaskExecution: - """Represents a task execution instance.""" - - task_id: str - start_time: datetime | None = None - end_time: datetime | None = None - status: str = "pending" # pending, running, completed, failed, skipped - error_message: str | None = None - output: dict[str, Any] = field(default_factory=dict) - duration: timedelta | None = None - logs: list[str] = field(default_factory=list) - - @property - def is_completed(self) -> bool: - """Check if task completed successfully.""" - return self.status == "completed" - - @property - def is_failed(self) -> bool: - """Check if task failed.""" - return self.status == "failed" - - -class TaskExecutor: - """Executes implementation tasks with dependency management.""" - - def __init__(self, project_root: Path) -> None: - self.project_root = project_root - self.executions: dict[str, TaskExecution] = {} - self.task_handlers: dict[str, Callable] = {} - self._setup_task_handlers() - - def _setup_task_handlers(self) -> None: - """Set up task execution handlers.""" - self.task_handlers = { - "setup_sentry_monitoring": self._setup_sentry_monitoring, - "implement_structured_logging": self._implement_structured_logging, - "setup_redis_caching": self._setup_redis_caching, - "create_health_checks": self._create_health_checks, - "implement_basic_circuit_breakers": self._implement_circuit_breakers, - "setup_postgresql_integration": self._setup_postgresql_integration, - "implement_prometheus_metrics": self._implement_prometheus_metrics, - "setup_oauth2_authentication": self._setup_oauth2_authentication, - "implement_advanced_llm_routing": self._implement_advanced_llm_routing, - "create_comprehensive_testing": self._create_comprehensive_testing, - "implement_rag_system": self._implement_rag_system, - "create_analytics_dashboard": self._create_analytics_dashboard, - "setup_fine_tuned_models": self._setup_fine_tuned_models, - "implement_multi_cloud_deployment": self._implement_multi_cloud_deployment, - } - - async def execute_task(self, task_id: str, dry_run: bool = False) -> TaskExecution: - """Execute a single task.""" - if task_id in self.executions: - return self.executions[task_id] - - execution = TaskExecution(task_id=task_id) - self.executions[task_id] = execution - - execution.start_time = datetime.now() - execution.status = "running" - - logger.info(f"Starting task: {task_id}") - execution.logs.append(f"Task {task_id} started at {execution.start_time}") - - if dry_run: - execution.output = {"dry_run": True, "message": f"Would execute {task_id}"} - execution.status = "completed" - else: - try: - # Execute the task - result = await self._execute_task_implementation(task_id) - if isinstance(result, BaseException): - # Handle exception case - execution.status = "failed" - execution.error_message = str(result) - execution.end_time = datetime.now() - logger.error(f"Task {task_id} failed with exception: {result}") - elif isinstance(result, TaskExecution): - # Handle successful execution - update the existing execution with result data - execution.status = result.status - execution.output = result.output - execution.end_time = datetime.now() - logger.info(f"Task {task_id} completed successfully") - except Exception as e: - execution.status = "failed" - execution.error_message = str(e) - execution.end_time = datetime.now() - logger.exception(f"Task {task_id} failed: {e}") - - # Calculate duration - if execution.start_time and execution.end_time: - execution.duration = execution.end_time - execution.start_time - - return execution - - async def execute_tasks_with_dependencies( - self, task_ids: list[str], dry_run: bool = False - ) -> dict[str, TaskExecution]: - """Execute multiple tasks respecting dependencies.""" - dependency_graph = TaskRegistry.get_dependency_graph() - completed_tasks: set[str] = set() - failed_tasks: set[str] = set() - - async def can_execute_task(task_id: str) -> bool: - """Check if task dependencies are satisfied.""" - dependencies = dependency_graph.get(task_id, []) - return all(dep in completed_tasks for dep in dependencies) - - remaining_tasks = set(task_ids) - - while remaining_tasks: - # Find tasks that can be executed - ready_tasks = [ - task_id - for task_id in remaining_tasks - if await can_execute_task(task_id) - ] - - if not ready_tasks: - # Check for circular dependencies or missing dependencies - blocked_tasks = remaining_tasks - failed_tasks - if blocked_tasks: - logger.error( - f"Circular dependency or missing dependencies for tasks: {blocked_tasks}" - ) - for task_id in blocked_tasks: - execution = TaskExecution(task_id=task_id) - execution.status = "failed" - execution.error_message = "Dependency not satisfied" - self.executions[task_id] = execution - failed_tasks.add(task_id) - break - - # Execute ready tasks in parallel - tasks = [self.execute_task(task_id, dry_run) for task_id in ready_tasks] - executions = await asyncio.gather(*tasks, return_exceptions=True) - - for task_id, result in zip(ready_tasks, executions, strict=False): - if isinstance(result, Exception): - logger.error(f"Task {task_id} raised exception: {result}") - failed_tasks.add(task_id) - elif isinstance(result, TaskExecution): - if result.is_completed: - completed_tasks.add(task_id) - else: - failed_tasks.add(task_id) - else: - # Unexpected result type - logger.error( - f"Task {task_id} returned unexpected result type: {type(result)}" - ) - failed_tasks.add(task_id) - - remaining_tasks.discard(task_id) - - return self.executions - - def get_execution_summary(self) -> dict[str, Any]: - """Get summary of all task executions.""" - total_tasks = len(self.executions) - completed = sum(1 for e in self.executions.values() if e.is_completed) - failed = sum(1 for e in self.executions.values() if e.is_failed) - - total_duration = sum( - (e.duration.total_seconds() if e.duration else 0) - for e in self.executions.values() - ) - - return { - "total_tasks": total_tasks, - "completed": completed, - "failed": failed, - "success_rate": completed / total_tasks if total_tasks > 0 else 0, - "total_duration_seconds": total_duration, - "executions": { - task_id: { - "status": execution.status, - "duration_seconds": ( - execution.duration.total_seconds() - if execution.duration - else None - ), - "error": execution.error_message, - } - for task_id, execution in self.executions.items() - }, - } - - # Task Implementation Methods - - async def _setup_sentry_monitoring(self) -> dict[str, Any]: - """Set up Sentry monitoring.""" - config_content = '''""" -Sentry Configuration for CodeFlow Engine -""" - -import sentry_sdk -from sentry_sdk.integrations.logging import LoggingIntegration -import os - -def configure_sentry(): - """Configure Sentry for error tracking.""" - sentry_logging = LoggingIntegration( - level=logging.INFO, - event_level=logging.ERROR - ) - - sentry_sdk.init( - dsn=os.getenv("SENTRY_DSN"), - environment=os.getenv("SENTRY_ENVIRONMENT", "production"), - integrations=[sentry_logging], - traces_sample_rate=0.1, - profiles_sample_rate=0.1, - ) -''' - - config_file = self.project_root / "sentry_config.py" - config_file.write_text(config_content) - - return { - "files_created": ["sentry_config.py"], - "env_vars_required": ["SENTRY_DSN", "SENTRY_ENVIRONMENT"], - "status": "completed", - } - - async def _implement_structured_logging(self) -> dict[str, Any]: - """Implement structured logging.""" - logging_config = '''""" -Structured Logging Configuration -""" - -import logging -import json -from datetime import datetime - -class StructuredFormatter(logging.Formatter): - """JSON formatter for structured logging.""" - - def format(self, record): - log_entry = { - "timestamp": datetime.utcnow().isoformat(), - "level": record.levelname, - "logger": record.name, - "message": record.getMessage(), - "module": record.module, - "function": record.funcName, - "line": record.lineno, - } - - if hasattr(record, "extra_data"): - log_entry.update(record.extra_data) - - return json.dumps(log_entry) - -def setup_structured_logging(): - """Set up structured logging.""" - handler = logging.StreamHandler() - handler.setFormatter(StructuredFormatter()) - - root_logger = logging.getLogger() - root_logger.addHandler(handler) - root_logger.setLevel(logging.INFO) -''' - - config_file = self.project_root / "logging_config.py" - config_file.write_text(logging_config) - - return { - "files_created": ["logging_config.py"], - "env_vars_required": ["LOG_LEVEL", "LOG_FORMAT"], - "status": "completed", - } - - async def _setup_redis_caching(self) -> dict[str, Any]: - """Set up Redis caching.""" - redis_config = '''""" -Redis Caching Configuration -""" - -import redis -import json -import os -from typing import Any, Optional - -class CacheManager: - """Redis-based cache manager.""" - - def __init__(self): - self.redis_client = redis.from_url( - os.getenv("REDIS_URL", "redis://localhost:6379"), - decode_responses=True - ) - self.default_ttl = int(os.getenv("CACHE_TTL", "3600")) - - async def get(self, key: str) -> Optional[Any]: - """Get value from cache.""" - try: - value = self.redis_client.get(key) - return json.loads(value) if value else None - except Exception: - return None - - async def set(self, key: str, value: Any, ttl: Optional[int] = None) -> bool: - """Set value in cache.""" - try: - ttl = ttl or self.default_ttl - return self.redis_client.setex(key, ttl, json.dumps(value)) - except Exception: - return False -''' - - config_file = self.project_root / "redis_config.py" - config_file.write_text(redis_config) - - return { - "files_created": ["redis_config.py"], - "env_vars_required": ["REDIS_URL", "CACHE_TTL"], - "status": "completed", - } - - async def _create_health_checks(self) -> dict[str, Any]: - """Create health check endpoints.""" - health_checks = '''""" -Health Check Implementation -""" - -from typing import Dict, Any -import asyncio - -class HealthChecker: - """Health check manager.""" - - async def check_database(self) -> Dict[str, Any]: - """Check database connectivity.""" - # Placeholder implementation - return {"status": "healthy", "response_time_ms": 10} - - async def check_redis(self) -> Dict[str, Any]: - """Check Redis connectivity.""" - # Placeholder implementation - return {"status": "healthy", "response_time_ms": 5} - - async def check_external_apis(self) -> Dict[str, Any]: - """Check external API connectivity.""" - # Placeholder implementation - return {"status": "healthy", "apis_checked": ["openai", "anthropic"]} - - async def get_health_status(self) -> Dict[str, Any]: - """Get overall health status.""" - checks = await asyncio.gather( - self.check_database(), - self.check_redis(), - self.check_external_apis(), - return_exceptions=True - ) - - return { - "status": "healthy", - "timestamp": "2024-01-01T00:00:00Z", - "checks": { - "database": checks[0], - "redis": checks[1], - "external_apis": checks[2] - } - } -''' - - config_file = self.project_root / "health_checks.py" - config_file.write_text(health_checks) - - return {"files_created": ["health_checks.py"], "status": "completed"} - - async def _implement_circuit_breakers(self) -> dict[str, Any]: - """Implement circuit breaker pattern.""" - circuit_breaker = '''""" -Circuit Breaker Implementation -""" - -import asyncio -from enum import Enum -from typing import Callable, Any -from datetime import datetime, timedelta - -class CircuitState(Enum): - CLOSED = "closed" - OPEN = "open" - HALF_OPEN = "half_open" - -class CircuitBreaker: - """Circuit breaker for external API calls.""" - - def __init__(self, failure_threshold: int = 5, timeout: int = 60): - self.failure_threshold = failure_threshold - self.timeout = timeout - self.failure_count = 0 - self.last_failure_time = None - self.state = CircuitState.CLOSED - - async def call(self, func: Callable, *args, **kwargs) -> Any: - """Execute function with circuit breaker protection.""" - if self.state == CircuitState.OPEN: - if self._should_attempt_reset(): - self.state = CircuitState.HALF_OPEN - else: - raise Exception("Circuit breaker is OPEN") - - try: - result = await func(*args, **kwargs) - self._on_success() - return result - except Exception as e: - self._on_failure() - raise e - - def _should_attempt_reset(self) -> bool: - """Check if circuit breaker should attempt reset.""" - if self.last_failure_time: - return datetime.now() - self.last_failure_time > timedelta(seconds=self.timeout) - return False - - def _on_success(self) -> None: - """Handle successful call.""" - self.failure_count = 0 - self.state = CircuitState.CLOSED - - def _on_failure(self) -> None: - """Handle failed call.""" - self.failure_count += 1 - self.last_failure_time = datetime.now() - - if self.failure_count >= self.failure_threshold: - self.state = CircuitState.OPEN -''' - - config_file = self.project_root / "circuit_breaker.py" - config_file.write_text(circuit_breaker) - - return { - "files_created": ["circuit_breaker.py"], - "env_vars_required": [ - "CIRCUIT_BREAKER_THRESHOLD", - "CIRCUIT_BREAKER_TIMEOUT", - ], - "status": "completed", - } - - # Placeholder implementations for other tasks - async def _setup_postgresql_integration(self) -> dict[str, Any]: - return {"status": "completed", "message": "PostgreSQL integration configured"} - - async def _implement_prometheus_metrics(self) -> dict[str, Any]: - return {"status": "completed", "message": "Prometheus metrics implemented"} - - async def _setup_oauth2_authentication(self) -> dict[str, Any]: - return {"status": "completed", "message": "OAuth2 authentication configured"} - - async def _implement_advanced_llm_routing(self) -> dict[str, Any]: - return {"status": "completed", "message": "Advanced LLM routing implemented"} - - async def _create_comprehensive_testing(self) -> dict[str, Any]: - return { - "status": "completed", - "message": "Comprehensive testing framework created", - } - - async def _implement_rag_system(self) -> dict[str, Any]: - return {"status": "completed", "message": "RAG system implemented"} - - async def _create_analytics_dashboard(self) -> dict[str, Any]: - return {"status": "completed", "message": "Analytics dashboard created"} - - async def _setup_fine_tuned_models(self) -> dict[str, Any]: - return {"status": "completed", "message": "Fine-tuned models configured"} - - async def _implement_multi_cloud_deployment(self) -> dict[str, Any]: - return {"status": "completed", "message": "Multi-cloud deployment implemented"} - - async def _execute_task_implementation( - self, task_id: str - ) -> TaskExecution | BaseException: - """Execute task implementation.""" - handler = self.task_handlers.get(task_id) - if handler: - try: - result = await handler() - # Wrap the result in a TaskExecution object - execution = TaskExecution(task_id=task_id) - execution.status = "completed" - execution.output = result - execution.end_time = datetime.now() - return execution - except Exception as e: - return e - else: - return ValueError(f"No handler found for task: {task_id}") diff --git a/codeflow_engine/engine.py b/codeflow_engine/engine.py index c1669d4..0d0f614 100644 --- a/codeflow_engine/engine.py +++ b/codeflow_engine/engine.py @@ -4,53 +4,21 @@ Main engine class that orchestrates CodeFlow operations. """ -import asyncio import logging -from pathlib import Path -from typing import Any, Dict, List, Optional +from typing import Any from codeflow_engine.actions.registry import ActionRegistry -# from codeflow_engine.agents.agents import AgentManager # Not implemented yet from codeflow_engine.ai.core.providers.manager import LLMProviderManager from codeflow_engine.config import CodeFlowConfig from codeflow_engine.exceptions import CodeFlowException, ConfigurationError from codeflow_engine.health import HealthChecker from codeflow_engine.integrations.registry import IntegrationRegistry -from codeflow_engine.quality.metrics_collector import MetricsCollector from codeflow_engine.utils.error_handlers import handle_operation_error from codeflow_engine.workflows.engine import WorkflowEngine -# from codeflow_engine.workflows.workflow_manager import WorkflowManager # Not implemented yet logger = logging.getLogger(__name__) -def handle_operation_error( - operation_name: str, - exception: Exception, - error_class: type[CodeFlowException] = CodeFlowException, - *, - reraise: bool = True, -) -> None: - """ - Standardized error handling helper for engine operations. - - Args: - operation_name: Name of the operation that failed - exception: The exception that was raised - error_class: Exception class to raise (default: CodeFlowException) - log_level: Logging level to use ('exception', 'error', 'warning') - reraise: Whether to reraise the exception after logging - - Raises: - error_class: The specified exception class with formatted message - """ - error_msg = f"{operation_name} failed: {exception}" - logger.exception(error_msg) - - if reraise: - raise error_class(error_msg) from exception - - class CodeFlowEngine: """ Main CodeFlow Engine class that coordinates all automation activities. diff --git a/codeflow_engine/models/__init__.py b/codeflow_engine/models/__init__.py index 6616ee0..2fa50e7 100644 --- a/codeflow_engine/models/__init__.py +++ b/codeflow_engine/models/__init__.py @@ -1,70 +1,31 @@ -""" -Models Package - -This package contains data models and schemas used throughout the CodeFlow system. """ +CodeFlow Engine Models -from pathlib import Path +This package contains data models and schemas used throughout the CodeFlow system. +Modules: +- artifacts: Prototype enhancement artifacts and I/O models +- base: Base model classes and mixins +- config: Configuration-related models +- events: Event and webhook payload models +""" -package_dir = Path(__file__).parent -package_dir.mkdir(parents=True, exist_ok=True) +from typing import Any -# Create artifacts module -artifacts_path = package_dir / "artifacts.py" -if not artifacts_path.exists(): - with artifacts_path.open("w", encoding="utf-8") as f: - content = ( - '"""\n' - "Artifacts Module\n\n" - "This module contains data models for various artifacts " - "used in the CodeFlow system.\n" - '"""\n\n' - "from dataclasses import dataclass\n" - "from enum import Enum\n" - "from typing import Any, Dict, List, Optional\n\n\n" - "class EnhancementType(str, Enum):\n" - ' """Types of enhancements that can be applied to ' - 'a prototype."""\n' - ' PRODUCTION = "production"\n' - ' TESTING = "testing"\n' - ' SECURITY = "security"\n\n\n' - "@dataclass\n" - "class PrototypeEnhancerInputs:\n" - ' """Input model for the PrototypeEnhancer."""\n' - " platform: str\n" - ' enhancement_type: "EnhancementType"\n' - " project_path: str\n" - " config: Optional[Dict[str, Any]] = None\n" - " dry_run: bool = False\n\n\n" - "@dataclass\n" - "class PrototypeEnhancerOutputs:\n" - ' """Output model for the PrototypeEnhancer."""\n' - " success: bool\n" - " message: str\n" - " generated_files: List[str]\n" - " modified_files: List[str]\n" - " next_steps: List[str]\n" - " metadata: Optional[Dict[str, Any]] = None\n" - ) - f.write(content) +# Import artifact models with error handling +EnhancementType: type[Any] | None = None +PrototypeEnhancerInputs: type[Any] | None = None +PrototypeEnhancerOutputs: type[Any] | None = None -# Create a placeholder for other model files -for model_file in ["base.py", "config.py", "events.py"]: - file_path = package_dir / model_file - if not file_path.exists(): - with file_path.open("w", encoding="utf-8") as f: - model_name = model_file.capitalize().replace("_", " ").replace( - ".py", "" - ) - model_desc = model_file.replace("_", " ").replace(".py", "") - content = ( - f'"""\n{model_name}\n\n' - f'This module contains data models for {model_desc}.\n"""' - ) - f.write(content) +try: + from codeflow_engine.models.artifacts import ( + EnhancementType, + PrototypeEnhancerInputs, + PrototypeEnhancerOutputs, + ) +except ImportError: + pass -# Export the models for easier imports __all__ = [ "EnhancementType", "PrototypeEnhancerInputs", diff --git a/codeflow_engine/models/base.py b/codeflow_engine/models/base.py index 3d83884..6a06f65 100644 --- a/codeflow_engine/models/base.py +++ b/codeflow_engine/models/base.py @@ -1,5 +1,69 @@ """ -Base +Base Models -This module contains data models for base. +This module contains base model classes and mixins for the CodeFlow system. """ + +from abc import ABC +from dataclasses import dataclass, field +from datetime import datetime, timezone +from typing import Any + + +@dataclass +class BaseModel(ABC): + """Base class for all CodeFlow models with common functionality.""" + + id: str | None = None + created_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc)) + updated_at: datetime | None = None + + def to_dict(self) -> dict[str, Any]: + """Convert model to dictionary representation.""" + result = {} + for key, value in self.__dict__.items(): + if isinstance(value, datetime): + result[key] = value.isoformat() + elif hasattr(value, "to_dict"): + result[key] = value.to_dict() + else: + result[key] = value + return result + + +@dataclass +class TimestampMixin: + """Mixin for models that need timestamp tracking. + + Note: Defines created_at and updated_at fields (same as BaseModel). + Be cautious when using with BaseModel in multiple inheritance to avoid conflicts. + """ + + created_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc)) + updated_at: datetime | None = None + + def touch(self) -> None: + """Update the updated_at timestamp.""" + self.updated_at = datetime.now(timezone.utc) + + +@dataclass +class MetadataMixin: + """Mixin for models that support arbitrary metadata.""" + + metadata: dict[str, Any] = field(default_factory=dict) + + def set_metadata(self, key: str, value: Any) -> None: + """Set a metadata key-value pair.""" + self.metadata[key] = value + + def get_metadata(self, key: str, default: Any = None) -> Any: + """Get a metadata value by key.""" + return self.metadata.get(key, default) + + +__all__ = [ + "BaseModel", + "MetadataMixin", + "TimestampMixin", +] diff --git a/codeflow_engine/models/config.py b/codeflow_engine/models/config.py index 1c55057..757f593 100644 --- a/codeflow_engine/models/config.py +++ b/codeflow_engine/models/config.py @@ -1,5 +1,127 @@ """ -Config +Configuration Models -This module contains data models for config. +This module contains data models for configuration objects used in the CodeFlow system. """ + +from enum import StrEnum +from typing import Any + +from pydantic import BaseModel, Field, SecretStr, field_validator + + +class LogLevel(StrEnum): + """Logging levels for the application.""" + + DEBUG = "debug" + INFO = "info" + WARNING = "warning" + ERROR = "error" + CRITICAL = "critical" + + +class Environment(StrEnum): + """Environment types.""" + + DEVELOPMENT = "development" + STAGING = "staging" + PRODUCTION = "production" + TESTING = "testing" + + +class DatabaseConfig(BaseModel): + """Database configuration model with validation.""" + + url: str + pool_size: int = Field(default=5, ge=0) + max_overflow: int = Field(default=10, ge=0) + echo: bool = False + ssl_required: bool = True + + @field_validator("url") + @classmethod + def validate_url(cls, v: str) -> str: + """Validate database URL format.""" + if not v: + raise ValueError("Database URL cannot be empty") + # Basic URL validation - must contain :// + if "://" not in v: + raise ValueError( + "Invalid database URL format. Expected format: dialect://user:password@host:port/database" + ) + return v + + +class RedisConfig(BaseModel): + """Redis configuration model with validation.""" + + url: str + max_connections: int = Field(default=10, ge=0) + ssl: bool = True + + @field_validator("url") + @classmethod + def validate_url(cls, v: str) -> str: + """Validate Redis URL format.""" + if not v: + raise ValueError("Redis URL cannot be empty") + # Basic URL validation - must contain :// + if "://" not in v: + raise ValueError( + "Invalid Redis URL format. Expected format: redis://host:port or rediss://host:port" + ) + return v + + +class LLMConfig(BaseModel): + """LLM provider configuration model with validation.""" + + provider: str + api_key: SecretStr | None = None + model: str # Required; provider-specific (e.g., gpt-4, claude-3-opus, mistral-large) + temperature: float = Field(default=0.7, ge=0.0, le=2.0) + max_tokens: int = Field(default=4096, gt=0) + + +class GitHubConfig(BaseModel): + """GitHub integration configuration model with secrets protection.""" + + token: SecretStr | None = None + app_id: str | None = None + private_key: SecretStr | None = None + webhook_secret: SecretStr | None = None + + +class WorkflowConfig(BaseModel): + """Workflow execution configuration model with validation.""" + + max_concurrent: int = Field(default=10, gt=0) + timeout_seconds: int = Field(default=300, gt=0) + retry_attempts: int = Field(default=3, ge=0) + retry_delay_seconds: int = Field(default=5, gt=0) + + +class AppConfig(BaseModel): + """Main application configuration model.""" + + environment: Environment = Environment.DEVELOPMENT + log_level: LogLevel = LogLevel.INFO + debug: bool = False + database: DatabaseConfig | None = None + redis: RedisConfig | None = None + llm: LLMConfig | None = None + github: GitHubConfig | None = None + workflow: WorkflowConfig = Field(default_factory=WorkflowConfig) + custom_settings: dict[str, Any] = Field(default_factory=dict) + + +__all__ = [ + "AppConfig", + "DatabaseConfig", + "Environment", + "GitHubConfig", + "LLMConfig", + "LogLevel", + "RedisConfig", + "WorkflowConfig", +] diff --git a/codeflow_engine/models/events.py b/codeflow_engine/models/events.py index bfec160..ba31be1 100644 --- a/codeflow_engine/models/events.py +++ b/codeflow_engine/models/events.py @@ -1,5 +1,167 @@ """ -Events +Event Models -This module contains data models for events. +This module contains data models for events and webhook payloads used in the CodeFlow system. """ + +from dataclasses import dataclass, field +from datetime import datetime, timezone +from enum import StrEnum +from typing import Any + + +class EventType(StrEnum): + """Types of events that can be processed.""" + + PULL_REQUEST = "pull_request" + PULL_REQUEST_REVIEW = "pull_request_review" + PULL_REQUEST_REVIEW_COMMENT = "pull_request_review_comment" + ISSUE = "issue" + ISSUE_COMMENT = "issue_comment" + PUSH = "push" + COMMIT_COMMENT = "commit_comment" + CHECK_RUN = "check_run" + CHECK_SUITE = "check_suite" + DEPLOYMENT = "deployment" + RELEASE = "release" + WORKFLOW_RUN = "workflow_run" + CUSTOM = "custom" + + +class EventAction(StrEnum): + """Common event actions.""" + + OPENED = "opened" + CLOSED = "closed" + EDITED = "edited" + DELETED = "deleted" + CREATED = "created" + UPDATED = "updated" + MERGED = "merged" + SYNCHRONIZE = "synchronize" + REOPENED = "reopened" + LABELED = "labeled" + UNLABELED = "unlabeled" + ASSIGNED = "assigned" + UNASSIGNED = "unassigned" + REVIEW_REQUESTED = "review_requested" + REVIEW_REQUEST_REMOVED = "review_request_removed" + SUBMITTED = "submitted" + DISMISSED = "dismissed" + + +@dataclass +class User: + """User model for event actors.""" + + id: int + login: str + type: str = "User" + avatar_url: str | None = None + html_url: str | None = None + + +@dataclass +class Repository: + """Repository model for events.""" + + id: int + name: str + full_name: str + owner: User + private: bool = False + html_url: str | None = None + default_branch: str = "main" + + +@dataclass +class PullRequest: + """Pull request model for PR events.""" + + id: int + number: int + title: str + body: str | None + state: str + user: User + base: dict[str, Any] + head: dict[str, Any] + html_url: str | None = None + merged: bool = False + mergeable: bool | None = None + draft: bool = False + labels: list[dict[str, Any]] = field(default_factory=list) + created_at: datetime | None = None + updated_at: datetime | None = None + + +@dataclass +class Issue: + """Issue model for issue events.""" + + id: int + number: int + title: str + body: str | None + state: str + user: User + html_url: str | None = None + labels: list[dict[str, Any]] = field(default_factory=list) + created_at: datetime | None = None + updated_at: datetime | None = None + + +@dataclass +class Comment: + """Comment model for comment events.""" + + id: int + body: str + user: User + html_url: str | None = None + created_at: datetime | None = None + updated_at: datetime | None = None + + +@dataclass +class WebhookEvent: + """Base webhook event model.""" + + event_type: EventType + action: EventAction | str | None + repository: Repository + sender: User + installation_id: int | None = None + payload: dict[str, Any] = field(default_factory=dict) + timestamp: datetime = field(default_factory=lambda: datetime.now(timezone.utc)) + + # Optional specific event data + pull_request: PullRequest | None = None + issue: Issue | None = None + comment: Comment | None = None + + +@dataclass +class EventResult: + """Result of processing an event.""" + + success: bool + event_type: EventType + action: str | None = None + message: str | None = None + data: dict[str, Any] = field(default_factory=dict) + errors: list[str] = field(default_factory=list) + processing_time_ms: float | None = None + + +__all__ = [ + "Comment", + "EventAction", + "EventResult", + "EventType", + "Issue", + "PullRequest", + "Repository", + "User", + "WebhookEvent", +] diff --git a/codeflow_engine/security/__init__.py b/codeflow_engine/security/__init__.py index f742fa9..658c7b1 100644 --- a/codeflow_engine/security/__init__.py +++ b/codeflow_engine/security/__init__.py @@ -1 +1,70 @@ -"""Security package initializer.""" +""" +CodeFlow Engine Security Module + +Comprehensive security functionality including: +- Authentication and authorization +- Input validation +- Rate limiting +- Encryption utilities +- Exception handling with secure error messages +- Zero-trust security model +""" + +from typing import Any + +# Authorization +EnterpriseAuthorizationManager: type[Any] | None = None +try: + from codeflow_engine.security.authorization.enterprise_manager import ( + EnterpriseAuthorizationManager, + ) +except ImportError: + pass + +# Authentication +try: + from codeflow_engine.security.auth import authenticate, verify_token +except ImportError: + authenticate = None + verify_token = None + +# Rate limiting +try: + from codeflow_engine.security.rate_limiting import RateLimiter, rate_limit +except ImportError: + RateLimiter = None + rate_limit = None + +# Encryption +try: + from codeflow_engine.security.encryption import decrypt, encrypt +except ImportError: + encrypt = None + decrypt = None + +# Input validation +try: + from codeflow_engine.security.input_validation import ( + sanitize_input, + validate_input, + ) +except ImportError: + validate_input = None + sanitize_input = None + +__all__ = [ + # Authorization + "EnterpriseAuthorizationManager", + # Authentication + "authenticate", + "verify_token", + # Rate limiting + "RateLimiter", + "rate_limit", + # Encryption + "decrypt", + "encrypt", + # Validation + "sanitize_input", + "validate_input", +] diff --git a/codeflow_engine/security/zero-trust.py b/codeflow_engine/security/zero_trust.py similarity index 100% rename from codeflow_engine/security/zero-trust.py rename to codeflow_engine/security/zero_trust.py diff --git a/codeflow_engine/utils/__init__.py b/codeflow_engine/utils/__init__.py index 3a73bca..baefb40 100644 --- a/codeflow_engine/utils/__init__.py +++ b/codeflow_engine/utils/__init__.py @@ -1,23 +1,64 @@ -""" -Utility modules for CodeFlow Engine. +""" +CodeFlow Engine Utility Modules + +Core utilities for logging, error handling, resilience, and other common operations. -Note: Core portable utilities (logging, resilience) are also available -in the standalone codeflow-utils package at tools/packages/codeflow-utils-python. -This module re-exports engine-specific versions that integrate with CodeFlowSettings. +Modules: +- logging: Structured logging configuration +- error_handlers: Standardized error handling patterns +- resilience: Circuit breaker and retry patterns +- volume_utils: Volume management utilities """ -from codeflow_engine.utils.logging import get_logger, log_with_context, setup_logging -from codeflow_engine.utils.resilience import ( - CircuitBreaker, - CircuitBreakerOpenError, - CircuitBreakerState, -) +from typing import Any + +# Logging utilities +get_logger: Any = None +log_with_context: Any = None +setup_logging: Any = None +try: + from codeflow_engine.utils.logging import ( + get_logger, + log_with_context, + setup_logging, + ) +except ImportError: + pass + +# Resilience utilities +CircuitBreaker: type[Any] | None = None +CircuitBreakerOpenError: type[Any] | None = None +CircuitBreakerState: type[Any] | None = None +try: + from codeflow_engine.utils.resilience import ( + CircuitBreaker, + CircuitBreakerOpenError, + CircuitBreakerState, + ) +except ImportError: + pass + +# Error handling utilities +handle_operation_error: Any = None +handle_workflow_error: Any = None +try: + from codeflow_engine.utils.error_handlers import ( + handle_operation_error, + handle_workflow_error, + ) +except ImportError: + pass __all__ = [ + # Logging "get_logger", "log_with_context", "setup_logging", + # Resilience "CircuitBreaker", "CircuitBreakerOpenError", "CircuitBreakerState", + # Error handling + "handle_operation_error", + "handle_workflow_error", ] diff --git a/codeflow_engine/workflows/__init__.py b/codeflow_engine/workflows/__init__.py index e50e2ba..ac9094d 100644 --- a/codeflow_engine/workflows/__init__.py +++ b/codeflow_engine/workflows/__init__.py @@ -1,6 +1,12 @@ """ CodeFlow Engine Workflows -Pre-built workflow definitions for common automation scenarios + +Pre-built workflow definitions for common automation scenarios. + +Modules: +- base: Base workflow class and interfaces +- engine: Workflow execution engine +- validation: Workflow validation utilities """ from pathlib import Path @@ -8,6 +14,8 @@ import yaml +from codeflow_engine.workflows.base import Workflow +from codeflow_engine.workflows.engine import WorkflowEngine # Workflow registry WORKFLOW_REGISTRY: dict[str, Any] = {} @@ -110,8 +118,14 @@ def get_workflow_info(workflow_name: str) -> dict[str, Any]: __all__ = [ + # Classes + "Workflow", + "WorkflowEngine", + # Registry "CORE_WORKFLOWS", "WORKFLOW_CATEGORIES", + "WORKFLOW_REGISTRY", + # Functions "get_workflow_info", "get_workflows_by_category", "list_workflows",