diff --git a/.flake8 b/.flake8 deleted file mode 100644 index 2dec3967..00000000 --- a/.flake8 +++ /dev/null @@ -1,14 +0,0 @@ -[flake8] -ignore = - # Clashing with black: - E203, - W503 -exclude = - .git, - .github, - docs, - -per-file-ignores = - # tests and examples may contain lines longer than 79 chars - tests/*: E501 - examples/*: E501 diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml new file mode 100644 index 00000000..e4a9a55e --- /dev/null +++ b/.github/workflows/pre-commit.yml @@ -0,0 +1,27 @@ +name: pre-commit (Ruff) + +on: + push: + branches: + - "master" + - "main" + pull_request: + branches: + - "master" + - "main" + workflow_dispatch: + +jobs: + ruff: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: "3.11" + - uses: pre-commit/action@v3.0.1 + with: + extra_args: --all-files ruff + - uses: pre-commit/action@v3.0.1 + with: + extra_args: --all-files ruff-format diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a7cfab43..6c113f84 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,23 +1,27 @@ repos: - - repo: https://github.com/ambv/black - rev: 23.11.0 + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.7.0 hooks: - - id: black - - repo: https://github.com/pycqa/isort - rev: 5.12.0 - hooks: - - id: isort - - repo: https://github.com/python-poetry/poetry - rev: 1.7.0 + - id: ruff + args: ["--fix", "--exit-non-zero-on-fix"] + - id: ruff-format + - repo: local hooks: - id: poetry-check + name: poetry-check + entry: poetry check + language: system + pass_filenames: false - id: poetry-lock - - repo: local - hooks: + name: poetry-lock + entry: poetry lock + language: system + pass_filenames: false - id: unittests name: unittests language: system entry: poetry run pytest ./tests + files: ^(flowpipe/|tests/|examples/) pass_filenames: false - id: pylint name: pylint diff --git a/.specify/memory/constitution.md b/.specify/memory/constitution.md new file mode 100644 index 00000000..da2168fd --- /dev/null +++ b/.specify/memory/constitution.md @@ -0,0 +1,104 @@ + +# Flowpipe Constitution + +## Core Principles + +### I. Framework-Only Scope +Flowpipe ships only the graph, node, plug, and evaluation framework. Core code MUST remain +domain-agnostic: contributions MAY NOT add business-specific node implementations, external +service wrappers, or bundled workflows. Examples and docs can demonstrate user-defined +nodes, but the runtime stays a thin framework so teams own their node libraries. Rationale: +the project’s value is the reusable framework, not pre-baked logic, which keeps Flowpipe +lightweight. + +### II. Plain Python Simplicity +Flowpipe MUST run with normal Python tooling and never require bespoke environments, +background services, or infrastructure. New dependencies MUST be justified, pure-Python +when possible, and optionalized if they raise the project’s footprint. APIs MUST embrace +idiomatic Python constructs (classes, decorators, dataclasses where compatible) so users can +work inside familiar workflows. Rationale: simplicity keeps adoption friction low and honors +Flowpipe’s lightweight promise. + +### III. Portable Graph Serialization +Graphs and nodes MUST stay serializable so they can execute remotely (render farms, job +queues, services). State belongs in plugs and metadata; hidden runtime state is forbidden. +Node metadata MUST capture everything remote evaluators need, and serialization formats +must stay stable so external converters—documented but not maintained here—can rebuild +graphs. Rationale: serialization is the bridge that lets Flowpipe stay framework-only yet run +anywhere. + +### IV. Test-Driven Total Coverage +Every change follows red-green-refactor: author tests that fail, implement code, refactor with +green tests. Coverage MUST remain at 100% with pytest across supported Python versions. +New features require targeted unit tests plus higher-level coverage (integration/functional) +when behavior spans nodes/graphs. Skipping tests or marking `# pragma: no cover` demands +documented justification. Rationale: Flowpipe is engineered test-first, and exhaustive tests +ensure safe serialization and remote execution. + +### V. Stable APIs & Dual-Python Support +The public API MUST remain backward compatible for Python 3.7+ consumers until +governance explicitly retires that guarantee. Deprecations require a migration path and +semantic versioning (MAJOR for breaking changes, MINOR for backward-compatible features, +PATCH for fixes). Metadata in `pyproject.toml`, docs, and release notes MUST stay in sync so +downstream automation remains trustable. Rationale: pipelines depend on Flowpipe stability. + +## Engineering Constraints + +- Repository content MUST stay focused on framework modules (`flowpipe/*`), documentation, + and examples. Example nodes remain under `examples/` and never bleed into installable + packages. +- Serialization requirements MUST be met by keeping plug payloads JSON/pickle friendly, and + by guarding against implicit references (open file handles, live connections, global singletons). +- Tooling MUST use Black formatting and Google-style docstrings; lint/test hooks in + `.pre-commit-config.yaml` MUST remain green before merge. +- Documentation MUST accompany new behaviors, covering how to keep nodes simple, + serialize metadata, and run graphs locally versus remotely. Conversion guides live in docs, + not in runtime code. +- Releases follow the documented recipe (update `pyproject.toml` + `docs/conf.py`, tag, publish) + so PyPI consumers get reproducible artifacts with matching metadata. + +## Development Workflow & Quality Gates + +1. **Specification First**: Each feature starts with `specs/[feature]/spec.md`, capturing independent + user stories, serialization considerations, and backward-compatibility notes. +2. **Implementation Plan**: Plans must document how the work obeys every core principle, + especially framework scope, serialization, and TDD coverage expectations. +3. **Task Breakdown**: Tasks are grouped per user story so increments stay independently + deliverable and testable. Each task references precise paths and required tests. +4. **Testing Discipline**: Contributors run pre-commit hooks plus pytest across supported Python + versions. Pull requests link coverage diffs proving 100% coverage is intact. +5. **Serialization Proof**: Features that touch graph/node data include reproduction steps (docs + or tests) showing the new metadata serializes/deserializes cleanly without custom runtimes. + +## Governance + +- This constitution governs every Flowpipe contribution. Conflicting practices defer to this file. +- Amendments require: (a) an issue outlining the change and rationale, (b) agreement from at + least two maintainers, (c) synchronized updates to dependent templates/docs, and (d) a + recorded version bump with dates. +- Version bumps follow semantic rules described in Principle V. Ratification date records when + v1.0.0 was adopted; Last Amended reflects the latest accepted change. +- Compliance Review: Every PR must cite how it satisfies each principle (link to plan/spec + sections). Reviews block until gaps are resolved or explicitly deferred with TODOs noted here. + +**Version**: 1.0.0 | **Ratified**: 2025-11-14 | **Last Amended**: 2025-11-14 diff --git a/.specify/scripts/powershell/check-prerequisites.ps1 b/.specify/scripts/powershell/check-prerequisites.ps1 new file mode 100644 index 00000000..91667e9e --- /dev/null +++ b/.specify/scripts/powershell/check-prerequisites.ps1 @@ -0,0 +1,148 @@ +#!/usr/bin/env pwsh + +# Consolidated prerequisite checking script (PowerShell) +# +# This script provides unified prerequisite checking for Spec-Driven Development workflow. +# It replaces the functionality previously spread across multiple scripts. +# +# Usage: ./check-prerequisites.ps1 [OPTIONS] +# +# OPTIONS: +# -Json Output in JSON format +# -RequireTasks Require tasks.md to exist (for implementation phase) +# -IncludeTasks Include tasks.md in AVAILABLE_DOCS list +# -PathsOnly Only output path variables (no validation) +# -Help, -h Show help message + +[CmdletBinding()] +param( + [switch]$Json, + [switch]$RequireTasks, + [switch]$IncludeTasks, + [switch]$PathsOnly, + [switch]$Help +) + +$ErrorActionPreference = 'Stop' + +# Show help if requested +if ($Help) { + Write-Output @" +Usage: check-prerequisites.ps1 [OPTIONS] + +Consolidated prerequisite checking for Spec-Driven Development workflow. + +OPTIONS: + -Json Output in JSON format + -RequireTasks Require tasks.md to exist (for implementation phase) + -IncludeTasks Include tasks.md in AVAILABLE_DOCS list + -PathsOnly Only output path variables (no prerequisite validation) + -Help, -h Show this help message + +EXAMPLES: + # Check task prerequisites (plan.md required) + .\check-prerequisites.ps1 -Json + + # Check implementation prerequisites (plan.md + tasks.md required) + .\check-prerequisites.ps1 -Json -RequireTasks -IncludeTasks + + # Get feature paths only (no validation) + .\check-prerequisites.ps1 -PathsOnly + +"@ + exit 0 +} + +# Source common functions +. "$PSScriptRoot/common.ps1" + +# Get feature paths and validate branch +$paths = Get-FeaturePathsEnv + +if (-not (Test-FeatureBranch -Branch $paths.CURRENT_BRANCH -HasGit:$paths.HAS_GIT)) { + exit 1 +} + +# If paths-only mode, output paths and exit (support combined -Json -PathsOnly) +if ($PathsOnly) { + if ($Json) { + [PSCustomObject]@{ + REPO_ROOT = $paths.REPO_ROOT + BRANCH = $paths.CURRENT_BRANCH + FEATURE_DIR = $paths.FEATURE_DIR + FEATURE_SPEC = $paths.FEATURE_SPEC + IMPL_PLAN = $paths.IMPL_PLAN + TASKS = $paths.TASKS + } | ConvertTo-Json -Compress + } else { + Write-Output "REPO_ROOT: $($paths.REPO_ROOT)" + Write-Output "BRANCH: $($paths.CURRENT_BRANCH)" + Write-Output "FEATURE_DIR: $($paths.FEATURE_DIR)" + Write-Output "FEATURE_SPEC: $($paths.FEATURE_SPEC)" + Write-Output "IMPL_PLAN: $($paths.IMPL_PLAN)" + Write-Output "TASKS: $($paths.TASKS)" + } + exit 0 +} + +# Validate required directories and files +if (-not (Test-Path $paths.FEATURE_DIR -PathType Container)) { + Write-Output "ERROR: Feature directory not found: $($paths.FEATURE_DIR)" + Write-Output "Run /speckit.specify first to create the feature structure." + exit 1 +} + +if (-not (Test-Path $paths.IMPL_PLAN -PathType Leaf)) { + Write-Output "ERROR: plan.md not found in $($paths.FEATURE_DIR)" + Write-Output "Run /speckit.plan first to create the implementation plan." + exit 1 +} + +# Check for tasks.md if required +if ($RequireTasks -and -not (Test-Path $paths.TASKS -PathType Leaf)) { + Write-Output "ERROR: tasks.md not found in $($paths.FEATURE_DIR)" + Write-Output "Run /speckit.tasks first to create the task list." + exit 1 +} + +# Build list of available documents +$docs = @() + +# Always check these optional docs +if (Test-Path $paths.RESEARCH) { $docs += 'research.md' } +if (Test-Path $paths.DATA_MODEL) { $docs += 'data-model.md' } + +# Check contracts directory (only if it exists and has files) +if ((Test-Path $paths.CONTRACTS_DIR) -and (Get-ChildItem -Path $paths.CONTRACTS_DIR -ErrorAction SilentlyContinue | Select-Object -First 1)) { + $docs += 'contracts/' +} + +if (Test-Path $paths.QUICKSTART) { $docs += 'quickstart.md' } + +# Include tasks.md if requested and it exists +if ($IncludeTasks -and (Test-Path $paths.TASKS)) { + $docs += 'tasks.md' +} + +# Output results +if ($Json) { + # JSON output + [PSCustomObject]@{ + FEATURE_DIR = $paths.FEATURE_DIR + AVAILABLE_DOCS = $docs + } | ConvertTo-Json -Compress +} else { + # Text output + Write-Output "FEATURE_DIR:$($paths.FEATURE_DIR)" + Write-Output "AVAILABLE_DOCS:" + + # Show status of each potential document + Test-FileExists -Path $paths.RESEARCH -Description 'research.md' | Out-Null + Test-FileExists -Path $paths.DATA_MODEL -Description 'data-model.md' | Out-Null + Test-DirHasFiles -Path $paths.CONTRACTS_DIR -Description 'contracts/' | Out-Null + Test-FileExists -Path $paths.QUICKSTART -Description 'quickstart.md' | Out-Null + + if ($IncludeTasks) { + Test-FileExists -Path $paths.TASKS -Description 'tasks.md' | Out-Null + } +} diff --git a/.specify/scripts/powershell/common.ps1 b/.specify/scripts/powershell/common.ps1 new file mode 100644 index 00000000..b0be2735 --- /dev/null +++ b/.specify/scripts/powershell/common.ps1 @@ -0,0 +1,137 @@ +#!/usr/bin/env pwsh +# Common PowerShell functions analogous to common.sh + +function Get-RepoRoot { + try { + $result = git rev-parse --show-toplevel 2>$null + if ($LASTEXITCODE -eq 0) { + return $result + } + } catch { + # Git command failed + } + + # Fall back to script location for non-git repos + return (Resolve-Path (Join-Path $PSScriptRoot "../../..")).Path +} + +function Get-CurrentBranch { + # First check if SPECIFY_FEATURE environment variable is set + if ($env:SPECIFY_FEATURE) { + return $env:SPECIFY_FEATURE + } + + # Then check git if available + try { + $result = git rev-parse --abbrev-ref HEAD 2>$null + if ($LASTEXITCODE -eq 0) { + return $result + } + } catch { + # Git command failed + } + + # For non-git repos, try to find the latest feature directory + $repoRoot = Get-RepoRoot + $specsDir = Join-Path $repoRoot "specs" + + if (Test-Path $specsDir) { + $latestFeature = "" + $highest = 0 + + Get-ChildItem -Path $specsDir -Directory | ForEach-Object { + if ($_.Name -match '^(\d{3})-') { + $num = [int]$matches[1] + if ($num -gt $highest) { + $highest = $num + $latestFeature = $_.Name + } + } + } + + if ($latestFeature) { + return $latestFeature + } + } + + # Final fallback + return "main" +} + +function Test-HasGit { + try { + git rev-parse --show-toplevel 2>$null | Out-Null + return ($LASTEXITCODE -eq 0) + } catch { + return $false + } +} + +function Test-FeatureBranch { + param( + [string]$Branch, + [bool]$HasGit = $true + ) + + # For non-git repos, we can't enforce branch naming but still provide output + if (-not $HasGit) { + Write-Warning "[specify] Warning: Git repository not detected; skipped branch validation" + return $true + } + + if ($Branch -notmatch '^[0-9]{3}-') { + Write-Output "ERROR: Not on a feature branch. Current branch: $Branch" + Write-Output "Feature branches should be named like: 001-feature-name" + return $false + } + return $true +} + +function Get-FeatureDir { + param([string]$RepoRoot, [string]$Branch) + Join-Path $RepoRoot "specs/$Branch" +} + +function Get-FeaturePathsEnv { + $repoRoot = Get-RepoRoot + $currentBranch = Get-CurrentBranch + $hasGit = Test-HasGit + $featureDir = Get-FeatureDir -RepoRoot $repoRoot -Branch $currentBranch + + [PSCustomObject]@{ + REPO_ROOT = $repoRoot + CURRENT_BRANCH = $currentBranch + HAS_GIT = $hasGit + FEATURE_DIR = $featureDir + FEATURE_SPEC = Join-Path $featureDir 'spec.md' + IMPL_PLAN = Join-Path $featureDir 'plan.md' + TASKS = Join-Path $featureDir 'tasks.md' + RESEARCH = Join-Path $featureDir 'research.md' + DATA_MODEL = Join-Path $featureDir 'data-model.md' + QUICKSTART = Join-Path $featureDir 'quickstart.md' + CONTRACTS_DIR = Join-Path $featureDir 'contracts' + } +} + +function Test-FileExists { + param([string]$Path, [string]$Description) + if (Test-Path -Path $Path -PathType Leaf) { + Write-Output " ✓ $Description" + return $true + } else { + Write-Output " ✗ $Description" + return $false + } +} + +function Test-DirHasFiles { + param([string]$Path, [string]$Description) + if ((Test-Path -Path $Path -PathType Container) -and (Get-ChildItem -Path $Path -ErrorAction SilentlyContinue | Where-Object { -not $_.PSIsContainer } | Select-Object -First 1)) { + Write-Output " ✓ $Description" + return $true + } else { + Write-Output " ✗ $Description" + return $false + } +} + diff --git a/.specify/scripts/powershell/create-new-feature.ps1 b/.specify/scripts/powershell/create-new-feature.ps1 new file mode 100644 index 00000000..351f4e9e --- /dev/null +++ b/.specify/scripts/powershell/create-new-feature.ps1 @@ -0,0 +1,327 @@ +#!/usr/bin/env pwsh +# Create a new feature +[CmdletBinding()] +param( + [switch]$Json, + [string]$ShortName, + [int]$Number = 0, + [switch]$Help, + [Parameter(ValueFromRemainingArguments = $true)] + [string[]]$FeatureDescription +) +$ErrorActionPreference = 'Stop' + +# Show help if requested +if ($Help) { + Write-Host "Usage: ./create-new-feature.ps1 [-Json] [-ShortName ] [-Number N] " + Write-Host "" + Write-Host "Options:" + Write-Host " -Json Output in JSON format" + Write-Host " -ShortName Provide a custom short name (2-4 words) for the branch" + Write-Host " -Number N Specify branch number manually (overrides auto-detection)" + Write-Host " -Help Show this help message" + Write-Host "" + Write-Host "Examples:" + Write-Host " ./create-new-feature.ps1 'Add user authentication system' -ShortName 'user-auth'" + Write-Host " ./create-new-feature.ps1 'Implement OAuth2 integration for API'" + exit 0 +} + +# Check if feature description provided +if (-not $FeatureDescription -or $FeatureDescription.Count -eq 0) { + Write-Error "Usage: ./create-new-feature.ps1 [-Json] [-ShortName ] " + exit 1 +} + +$featureDesc = ($FeatureDescription -join ' ').Trim() + +# Resolve repository root. Prefer git information when available, but fall back +# to searching for repository markers so the workflow still functions in repositories that +# were initialized with --no-git. +function Find-RepositoryRoot { + param( + [string]$StartDir, + [string[]]$Markers = @('.git', '.specify') + ) + $current = Resolve-Path $StartDir + while ($true) { + foreach ($marker in $Markers) { + if (Test-Path (Join-Path $current $marker)) { + return $current + } + } + $parent = Split-Path $current -Parent + if ($parent -eq $current) { + # Reached filesystem root without finding markers + return $null + } + $current = $parent + } +} + +function Get-HighestNumberFromSpecs { + param([string]$SpecsDir) + + $highest = 0 + if (Test-Path $SpecsDir) { + Get-ChildItem -Path $SpecsDir -Directory | ForEach-Object { + if ($_.Name -match '^(\d+)') { + $num = [int]$matches[1] + if ($num -gt $highest) { $highest = $num } + } + } + } + return $highest +} + +function Get-HighestNumberFromBranches { + param() + + $highest = 0 + try { + $branches = git branch -a 2>$null + if ($LASTEXITCODE -eq 0) { + foreach ($branch in $branches) { + # Clean branch name: remove leading markers and remote prefixes + $cleanBranch = $branch.Trim() -replace '^\*?\s+', '' -replace '^remotes/[^/]+/', '' + + # Extract feature number if branch matches pattern ###-* + if ($cleanBranch -match '^(\d+)-') { + $num = [int]$matches[1] + if ($num -gt $highest) { $highest = $num } + } + } + } + } catch { + # If git command fails, return 0 + Write-Verbose "Could not check Git branches: $_" + } + return $highest +} + +function Get-NextBranchNumber { + param( + [string]$ShortName, + [string]$SpecsDir + ) + + # Fetch all remotes to get latest branch info (suppress errors if no remotes) + try { + git fetch --all --prune 2>$null | Out-Null + } catch { + # Ignore fetch errors + } + + # Find remote branches matching the pattern using git ls-remote + $remoteBranches = @() + try { + $remoteRefs = git ls-remote --heads origin 2>$null + if ($remoteRefs) { + $remoteBranches = $remoteRefs | Where-Object { $_ -match "refs/heads/(\d+)-$([regex]::Escape($ShortName))$" } | ForEach-Object { + if ($_ -match "refs/heads/(\d+)-") { + [int]$matches[1] + } + } + } + } catch { + # Ignore errors + } + + # Check local branches + $localBranches = @() + try { + $allBranches = git branch 2>$null + if ($allBranches) { + $localBranches = $allBranches | Where-Object { $_ -match "^\*?\s*(\d+)-$([regex]::Escape($ShortName))$" } | ForEach-Object { + if ($_ -match "(\d+)-") { + [int]$matches[1] + } + } + } + } catch { + # Ignore errors + } + + # Check specs directory + $specDirs = @() + if (Test-Path $SpecsDir) { + try { + $specDirs = Get-ChildItem -Path $SpecsDir -Directory | Where-Object { $_.Name -match "^(\d+)-$([regex]::Escape($ShortName))$" } | ForEach-Object { + if ($_.Name -match "^(\d+)-") { + [int]$matches[1] + } + } + } catch { + # Ignore errors + } + } + + # Combine all sources and get the highest number + $maxNum = 0 + foreach ($num in ($remoteBranches + $localBranches + $specDirs)) { + if ($num -gt $maxNum) { + $maxNum = $num + } + } + + # Return next number + return $maxNum + 1 +} + +function ConvertTo-CleanBranchName { + param([string]$Name) + + return $Name.ToLower() -replace '[^a-z0-9]', '-' -replace '-{2,}', '-' -replace '^-', '' -replace '-$', '' +} +$fallbackRoot = (Find-RepositoryRoot -StartDir $PSScriptRoot) +if (-not $fallbackRoot) { + Write-Error "Error: Could not determine repository root. Please run this script from within the repository." + exit 1 +} + +try { + $repoRoot = git rev-parse --show-toplevel 2>$null + if ($LASTEXITCODE -eq 0) { + $hasGit = $true + } else { + throw "Git not available" + } +} catch { + $repoRoot = $fallbackRoot + $hasGit = $false +} + +Set-Location $repoRoot + +$specsDir = Join-Path $repoRoot 'specs' +New-Item -ItemType Directory -Path $specsDir -Force | Out-Null + +# Function to generate branch name with stop word filtering and length filtering +function Get-BranchName { + param([string]$Description) + + # Common stop words to filter out + $stopWords = @( + 'i', 'a', 'an', 'the', 'to', 'for', 'of', 'in', 'on', 'at', 'by', 'with', 'from', + 'is', 'are', 'was', 'were', 'be', 'been', 'being', 'have', 'has', 'had', + 'do', 'does', 'did', 'will', 'would', 'should', 'could', 'can', 'may', 'might', 'must', 'shall', + 'this', 'that', 'these', 'those', 'my', 'your', 'our', 'their', + 'want', 'need', 'add', 'get', 'set' + ) + + # Convert to lowercase and extract words (alphanumeric only) + $cleanName = $Description.ToLower() -replace '[^a-z0-9\s]', ' ' + $words = $cleanName -split '\s+' | Where-Object { $_ } + + # Filter words: remove stop words and words shorter than 3 chars (unless they're uppercase acronyms in original) + $meaningfulWords = @() + foreach ($word in $words) { + # Skip stop words + if ($stopWords -contains $word) { continue } + + # Keep words that are length >= 3 OR appear as uppercase in original (likely acronyms) + if ($word.Length -ge 3) { + $meaningfulWords += $word + } elseif ($Description -match "\b$($word.ToUpper())\b") { + # Keep short words if they appear as uppercase in original (likely acronyms) + $meaningfulWords += $word + } + } + + # If we have meaningful words, use first 3-4 of them + if ($meaningfulWords.Count -gt 0) { + $maxWords = if ($meaningfulWords.Count -eq 4) { 4 } else { 3 } + $result = ($meaningfulWords | Select-Object -First $maxWords) -join '-' + return $result + } else { + # Fallback to original logic if no meaningful words found + $result = ConvertTo-CleanBranchName -Name $Description + $fallbackWords = ($result -split '-') | Where-Object { $_ } | Select-Object -First 3 + return [string]::Join('-', $fallbackWords) + } +} + +# Generate branch name +if ($ShortName) { + # Use provided short name, just clean it up + $branchSuffix = ConvertTo-CleanBranchName -Name $ShortName +} else { + # Generate from description with smart filtering + $branchSuffix = Get-BranchName -Description $featureDesc +} + +# Determine branch number +if ($Number -eq 0) { + if ($hasGit) { + # Check existing branches on remotes + $Number = Get-NextBranchNumber -ShortName $branchSuffix -SpecsDir $specsDir + } else { + # Fall back to local directory check + $Number = (Get-HighestNumberFromSpecs -SpecsDir $specsDir) + 1 + } +} + +$featureNum = ('{0:000}' -f $Number) +$branchName = "$featureNum-$branchSuffix" + +# GitHub enforces a 244-byte limit on branch names +# Validate and truncate if necessary +$maxBranchLength = 244 +if ($branchName.Length -gt $maxBranchLength) { + # Calculate how much we need to trim from suffix + # Account for: feature number (3) + hyphen (1) = 4 chars + $maxSuffixLength = $maxBranchLength - 4 + + # Truncate suffix + $truncatedSuffix = $branchSuffix.Substring(0, [Math]::Min($branchSuffix.Length, $maxSuffixLength)) + # Remove trailing hyphen if truncation created one + $truncatedSuffix = $truncatedSuffix -replace '-$', '' + + $originalBranchName = $branchName + $branchName = "$featureNum-$truncatedSuffix" + + Write-Warning "[specify] Branch name exceeded GitHub's 244-byte limit" + Write-Warning "[specify] Original: $originalBranchName ($($originalBranchName.Length) bytes)" + Write-Warning "[specify] Truncated to: $branchName ($($branchName.Length) bytes)" +} + +if ($hasGit) { + try { + git checkout -b $branchName | Out-Null + } catch { + Write-Warning "Failed to create git branch: $branchName" + } +} else { + Write-Warning "[specify] Warning: Git repository not detected; skipped branch creation for $branchName" +} + +$featureDir = Join-Path $specsDir $branchName +New-Item -ItemType Directory -Path $featureDir -Force | Out-Null + +$template = Join-Path $repoRoot '.specify/templates/spec-template.md' +$specFile = Join-Path $featureDir 'spec.md' +if (Test-Path $template) { + Copy-Item $template $specFile -Force +} else { + New-Item -ItemType File -Path $specFile | Out-Null +} + +# Set the SPECIFY_FEATURE environment variable for the current session +$env:SPECIFY_FEATURE = $branchName + +if ($Json) { + $obj = [PSCustomObject]@{ + BRANCH_NAME = $branchName + SPEC_FILE = $specFile + FEATURE_NUM = $featureNum + HAS_GIT = $hasGit + } + $obj | ConvertTo-Json -Compress +} else { + Write-Output "BRANCH_NAME: $branchName" + Write-Output "SPEC_FILE: $specFile" + Write-Output "FEATURE_NUM: $featureNum" + Write-Output "HAS_GIT: $hasGit" + Write-Output "SPECIFY_FEATURE environment variable set to: $branchName" +} + diff --git a/.specify/scripts/powershell/setup-plan.ps1 b/.specify/scripts/powershell/setup-plan.ps1 new file mode 100644 index 00000000..d0ed582f --- /dev/null +++ b/.specify/scripts/powershell/setup-plan.ps1 @@ -0,0 +1,61 @@ +#!/usr/bin/env pwsh +# Setup implementation plan for a feature + +[CmdletBinding()] +param( + [switch]$Json, + [switch]$Help +) + +$ErrorActionPreference = 'Stop' + +# Show help if requested +if ($Help) { + Write-Output "Usage: ./setup-plan.ps1 [-Json] [-Help]" + Write-Output " -Json Output results in JSON format" + Write-Output " -Help Show this help message" + exit 0 +} + +# Load common functions +. "$PSScriptRoot/common.ps1" + +# Get all paths and variables from common functions +$paths = Get-FeaturePathsEnv + +# Check if we're on a proper feature branch (only for git repos) +if (-not (Test-FeatureBranch -Branch $paths.CURRENT_BRANCH -HasGit $paths.HAS_GIT)) { + exit 1 +} + +# Ensure the feature directory exists +New-Item -ItemType Directory -Path $paths.FEATURE_DIR -Force | Out-Null + +# Copy plan template if it exists, otherwise note it or create empty file +$template = Join-Path $paths.REPO_ROOT '.specify/templates/plan-template.md' +if (Test-Path $template) { + Copy-Item $template $paths.IMPL_PLAN -Force + Write-Output "Copied plan template to $($paths.IMPL_PLAN)" +} else { + Write-Warning "Plan template not found at $template" + # Create a basic plan file if template doesn't exist + New-Item -ItemType File -Path $paths.IMPL_PLAN -Force | Out-Null +} + +# Output results +if ($Json) { + $result = [PSCustomObject]@{ + FEATURE_SPEC = $paths.FEATURE_SPEC + IMPL_PLAN = $paths.IMPL_PLAN + SPECS_DIR = $paths.FEATURE_DIR + BRANCH = $paths.CURRENT_BRANCH + HAS_GIT = $paths.HAS_GIT + } + $result | ConvertTo-Json -Compress +} else { + Write-Output "FEATURE_SPEC: $($paths.FEATURE_SPEC)" + Write-Output "IMPL_PLAN: $($paths.IMPL_PLAN)" + Write-Output "SPECS_DIR: $($paths.FEATURE_DIR)" + Write-Output "BRANCH: $($paths.CURRENT_BRANCH)" + Write-Output "HAS_GIT: $($paths.HAS_GIT)" +} diff --git a/.specify/scripts/powershell/update-agent-context.ps1 b/.specify/scripts/powershell/update-agent-context.ps1 new file mode 100644 index 00000000..741a55a6 --- /dev/null +++ b/.specify/scripts/powershell/update-agent-context.ps1 @@ -0,0 +1,439 @@ +#!/usr/bin/env pwsh +<#! +.SYNOPSIS +Update agent context files with information from plan.md (PowerShell version) + +.DESCRIPTION +Mirrors the behavior of scripts/bash/update-agent-context.sh: + 1. Environment Validation + 2. Plan Data Extraction + 3. Agent File Management (create from template or update existing) + 4. Content Generation (technology stack, recent changes, timestamp) + 5. Multi-Agent Support (claude, gemini, copilot, cursor-agent, qwen, opencode, codex, windsurf, kilocode, auggie, roo, amp, q) + +.PARAMETER AgentType +Optional agent key to update a single agent. If omitted, updates all existing agent files (creating a default Claude file if none exist). + +.EXAMPLE +./update-agent-context.ps1 -AgentType claude + +.EXAMPLE +./update-agent-context.ps1 # Updates all existing agent files + +.NOTES +Relies on common helper functions in common.ps1 +#> +param( + [Parameter(Position=0)] + [ValidateSet('claude','gemini','copilot','cursor-agent','qwen','opencode','codex','windsurf','kilocode','auggie','roo','codebuddy','amp','q')] + [string]$AgentType +) + +$ErrorActionPreference = 'Stop' + +# Import common helpers +$ScriptDir = Split-Path -Parent $MyInvocation.MyCommand.Path +. (Join-Path $ScriptDir 'common.ps1') + +# Acquire environment paths +$envData = Get-FeaturePathsEnv +$REPO_ROOT = $envData.REPO_ROOT +$CURRENT_BRANCH = $envData.CURRENT_BRANCH +$HAS_GIT = $envData.HAS_GIT +$IMPL_PLAN = $envData.IMPL_PLAN +$NEW_PLAN = $IMPL_PLAN + +# Agent file paths +$CLAUDE_FILE = Join-Path $REPO_ROOT 'CLAUDE.md' +$GEMINI_FILE = Join-Path $REPO_ROOT 'GEMINI.md' +$COPILOT_FILE = Join-Path $REPO_ROOT '.github/agents/copilot-instructions.md' +$CURSOR_FILE = Join-Path $REPO_ROOT '.cursor/rules/specify-rules.mdc' +$QWEN_FILE = Join-Path $REPO_ROOT 'QWEN.md' +$AGENTS_FILE = Join-Path $REPO_ROOT 'AGENTS.md' +$WINDSURF_FILE = Join-Path $REPO_ROOT '.windsurf/rules/specify-rules.md' +$KILOCODE_FILE = Join-Path $REPO_ROOT '.kilocode/rules/specify-rules.md' +$AUGGIE_FILE = Join-Path $REPO_ROOT '.augment/rules/specify-rules.md' +$ROO_FILE = Join-Path $REPO_ROOT '.roo/rules/specify-rules.md' +$CODEBUDDY_FILE = Join-Path $REPO_ROOT 'CODEBUDDY.md' +$AMP_FILE = Join-Path $REPO_ROOT 'AGENTS.md' +$Q_FILE = Join-Path $REPO_ROOT 'AGENTS.md' + +$TEMPLATE_FILE = Join-Path $REPO_ROOT '.specify/templates/agent-file-template.md' + +# Parsed plan data placeholders +$script:NEW_LANG = '' +$script:NEW_FRAMEWORK = '' +$script:NEW_DB = '' +$script:NEW_PROJECT_TYPE = '' + +function Write-Info { + param( + [Parameter(Mandatory=$true)] + [string]$Message + ) + Write-Host "INFO: $Message" +} + +function Write-Success { + param( + [Parameter(Mandatory=$true)] + [string]$Message + ) + Write-Host "$([char]0x2713) $Message" +} + +function Write-WarningMsg { + param( + [Parameter(Mandatory=$true)] + [string]$Message + ) + Write-Warning $Message +} + +function Write-Err { + param( + [Parameter(Mandatory=$true)] + [string]$Message + ) + Write-Host "ERROR: $Message" -ForegroundColor Red +} + +function Validate-Environment { + if (-not $CURRENT_BRANCH) { + Write-Err 'Unable to determine current feature' + if ($HAS_GIT) { Write-Info "Make sure you're on a feature branch" } else { Write-Info 'Set SPECIFY_FEATURE environment variable or create a feature first' } + exit 1 + } + if (-not (Test-Path $NEW_PLAN)) { + Write-Err "No plan.md found at $NEW_PLAN" + Write-Info 'Ensure you are working on a feature with a corresponding spec directory' + if (-not $HAS_GIT) { Write-Info 'Use: $env:SPECIFY_FEATURE=your-feature-name or create a new feature first' } + exit 1 + } + if (-not (Test-Path $TEMPLATE_FILE)) { + Write-Err "Template file not found at $TEMPLATE_FILE" + Write-Info 'Run specify init to scaffold .specify/templates, or add agent-file-template.md there.' + exit 1 + } +} + +function Extract-PlanField { + param( + [Parameter(Mandatory=$true)] + [string]$FieldPattern, + [Parameter(Mandatory=$true)] + [string]$PlanFile + ) + if (-not (Test-Path $PlanFile)) { return '' } + # Lines like **Language/Version**: Python 3.12 + $regex = "^\*\*$([Regex]::Escape($FieldPattern))\*\*: (.+)$" + Get-Content -LiteralPath $PlanFile -Encoding utf8 | ForEach-Object { + if ($_ -match $regex) { + $val = $Matches[1].Trim() + if ($val -notin @('NEEDS CLARIFICATION','N/A')) { return $val } + } + } | Select-Object -First 1 +} + +function Parse-PlanData { + param( + [Parameter(Mandatory=$true)] + [string]$PlanFile + ) + if (-not (Test-Path $PlanFile)) { Write-Err "Plan file not found: $PlanFile"; return $false } + Write-Info "Parsing plan data from $PlanFile" + $script:NEW_LANG = Extract-PlanField -FieldPattern 'Language/Version' -PlanFile $PlanFile + $script:NEW_FRAMEWORK = Extract-PlanField -FieldPattern 'Primary Dependencies' -PlanFile $PlanFile + $script:NEW_DB = Extract-PlanField -FieldPattern 'Storage' -PlanFile $PlanFile + $script:NEW_PROJECT_TYPE = Extract-PlanField -FieldPattern 'Project Type' -PlanFile $PlanFile + + if ($NEW_LANG) { Write-Info "Found language: $NEW_LANG" } else { Write-WarningMsg 'No language information found in plan' } + if ($NEW_FRAMEWORK) { Write-Info "Found framework: $NEW_FRAMEWORK" } + if ($NEW_DB -and $NEW_DB -ne 'N/A') { Write-Info "Found database: $NEW_DB" } + if ($NEW_PROJECT_TYPE) { Write-Info "Found project type: $NEW_PROJECT_TYPE" } + return $true +} + +function Format-TechnologyStack { + param( + [Parameter(Mandatory=$false)] + [string]$Lang, + [Parameter(Mandatory=$false)] + [string]$Framework + ) + $parts = @() + if ($Lang -and $Lang -ne 'NEEDS CLARIFICATION') { $parts += $Lang } + if ($Framework -and $Framework -notin @('NEEDS CLARIFICATION','N/A')) { $parts += $Framework } + if (-not $parts) { return '' } + return ($parts -join ' + ') +} + +function Get-ProjectStructure { + param( + [Parameter(Mandatory=$false)] + [string]$ProjectType + ) + if ($ProjectType -match 'web') { return "backend/`nfrontend/`ntests/" } else { return "src/`ntests/" } +} + +function Get-CommandsForLanguage { + param( + [Parameter(Mandatory=$false)] + [string]$Lang + ) + switch -Regex ($Lang) { + 'Python' { return "cd src; pytest; ruff check ." } + 'Rust' { return "cargo test; cargo clippy" } + 'JavaScript|TypeScript' { return "npm test; npm run lint" } + default { return "# Add commands for $Lang" } + } +} + +function Get-LanguageConventions { + param( + [Parameter(Mandatory=$false)] + [string]$Lang + ) + if ($Lang) { "${Lang}: Follow standard conventions" } else { 'General: Follow standard conventions' } +} + +function New-AgentFile { + param( + [Parameter(Mandatory=$true)] + [string]$TargetFile, + [Parameter(Mandatory=$true)] + [string]$ProjectName, + [Parameter(Mandatory=$true)] + [datetime]$Date + ) + if (-not (Test-Path $TEMPLATE_FILE)) { Write-Err "Template not found at $TEMPLATE_FILE"; return $false } + $temp = New-TemporaryFile + Copy-Item -LiteralPath $TEMPLATE_FILE -Destination $temp -Force + + $projectStructure = Get-ProjectStructure -ProjectType $NEW_PROJECT_TYPE + $commands = Get-CommandsForLanguage -Lang $NEW_LANG + $languageConventions = Get-LanguageConventions -Lang $NEW_LANG + + $escaped_lang = $NEW_LANG + $escaped_framework = $NEW_FRAMEWORK + $escaped_branch = $CURRENT_BRANCH + + $content = Get-Content -LiteralPath $temp -Raw -Encoding utf8 + $content = $content -replace '\[PROJECT NAME\]',$ProjectName + $content = $content -replace '\[DATE\]',$Date.ToString('yyyy-MM-dd') + + # Build the technology stack string safely + $techStackForTemplate = "" + if ($escaped_lang -and $escaped_framework) { + $techStackForTemplate = "- $escaped_lang + $escaped_framework ($escaped_branch)" + } elseif ($escaped_lang) { + $techStackForTemplate = "- $escaped_lang ($escaped_branch)" + } elseif ($escaped_framework) { + $techStackForTemplate = "- $escaped_framework ($escaped_branch)" + } + + $content = $content -replace '\[EXTRACTED FROM ALL PLAN.MD FILES\]',$techStackForTemplate + # For project structure we manually embed (keep newlines) + $escapedStructure = [Regex]::Escape($projectStructure) + $content = $content -replace '\[ACTUAL STRUCTURE FROM PLANS\]',$escapedStructure + # Replace escaped newlines placeholder after all replacements + $content = $content -replace '\[ONLY COMMANDS FOR ACTIVE TECHNOLOGIES\]',$commands + $content = $content -replace '\[LANGUAGE-SPECIFIC, ONLY FOR LANGUAGES IN USE\]',$languageConventions + + # Build the recent changes string safely + $recentChangesForTemplate = "" + if ($escaped_lang -and $escaped_framework) { + $recentChangesForTemplate = "- ${escaped_branch}: Added ${escaped_lang} + ${escaped_framework}" + } elseif ($escaped_lang) { + $recentChangesForTemplate = "- ${escaped_branch}: Added ${escaped_lang}" + } elseif ($escaped_framework) { + $recentChangesForTemplate = "- ${escaped_branch}: Added ${escaped_framework}" + } + + $content = $content -replace '\[LAST 3 FEATURES AND WHAT THEY ADDED\]',$recentChangesForTemplate + # Convert literal \n sequences introduced by Escape to real newlines + $content = $content -replace '\\n',[Environment]::NewLine + + $parent = Split-Path -Parent $TargetFile + if (-not (Test-Path $parent)) { New-Item -ItemType Directory -Path $parent | Out-Null } + Set-Content -LiteralPath $TargetFile -Value $content -NoNewline -Encoding utf8 + Remove-Item $temp -Force + return $true +} + +function Update-ExistingAgentFile { + param( + [Parameter(Mandatory=$true)] + [string]$TargetFile, + [Parameter(Mandatory=$true)] + [datetime]$Date + ) + if (-not (Test-Path $TargetFile)) { return (New-AgentFile -TargetFile $TargetFile -ProjectName (Split-Path $REPO_ROOT -Leaf) -Date $Date) } + + $techStack = Format-TechnologyStack -Lang $NEW_LANG -Framework $NEW_FRAMEWORK + $newTechEntries = @() + if ($techStack) { + $escapedTechStack = [Regex]::Escape($techStack) + if (-not (Select-String -Pattern $escapedTechStack -Path $TargetFile -Quiet)) { + $newTechEntries += "- $techStack ($CURRENT_BRANCH)" + } + } + if ($NEW_DB -and $NEW_DB -notin @('N/A','NEEDS CLARIFICATION')) { + $escapedDB = [Regex]::Escape($NEW_DB) + if (-not (Select-String -Pattern $escapedDB -Path $TargetFile -Quiet)) { + $newTechEntries += "- $NEW_DB ($CURRENT_BRANCH)" + } + } + $newChangeEntry = '' + if ($techStack) { $newChangeEntry = "- ${CURRENT_BRANCH}: Added ${techStack}" } + elseif ($NEW_DB -and $NEW_DB -notin @('N/A','NEEDS CLARIFICATION')) { $newChangeEntry = "- ${CURRENT_BRANCH}: Added ${NEW_DB}" } + + $lines = Get-Content -LiteralPath $TargetFile -Encoding utf8 + $output = New-Object System.Collections.Generic.List[string] + $inTech = $false; $inChanges = $false; $techAdded = $false; $changeAdded = $false; $existingChanges = 0 + + for ($i=0; $i -lt $lines.Count; $i++) { + $line = $lines[$i] + if ($line -eq '## Active Technologies') { + $output.Add($line) + $inTech = $true + continue + } + if ($inTech -and $line -match '^##\s') { + if (-not $techAdded -and $newTechEntries.Count -gt 0) { $newTechEntries | ForEach-Object { $output.Add($_) }; $techAdded = $true } + $output.Add($line); $inTech = $false; continue + } + if ($inTech -and [string]::IsNullOrWhiteSpace($line)) { + if (-not $techAdded -and $newTechEntries.Count -gt 0) { $newTechEntries | ForEach-Object { $output.Add($_) }; $techAdded = $true } + $output.Add($line); continue + } + if ($line -eq '## Recent Changes') { + $output.Add($line) + if ($newChangeEntry) { $output.Add($newChangeEntry); $changeAdded = $true } + $inChanges = $true + continue + } + if ($inChanges -and $line -match '^##\s') { $output.Add($line); $inChanges = $false; continue } + if ($inChanges -and $line -match '^- ') { + if ($existingChanges -lt 2) { $output.Add($line); $existingChanges++ } + continue + } + if ($line -match '\*\*Last updated\*\*: .*\d{4}-\d{2}-\d{2}') { + $output.Add(($line -replace '\d{4}-\d{2}-\d{2}',$Date.ToString('yyyy-MM-dd'))) + continue + } + $output.Add($line) + } + + # Post-loop check: if we're still in the Active Technologies section and haven't added new entries + if ($inTech -and -not $techAdded -and $newTechEntries.Count -gt 0) { + $newTechEntries | ForEach-Object { $output.Add($_) } + } + + Set-Content -LiteralPath $TargetFile -Value ($output -join [Environment]::NewLine) -Encoding utf8 + return $true +} + +function Update-AgentFile { + param( + [Parameter(Mandatory=$true)] + [string]$TargetFile, + [Parameter(Mandatory=$true)] + [string]$AgentName + ) + if (-not $TargetFile -or -not $AgentName) { Write-Err 'Update-AgentFile requires TargetFile and AgentName'; return $false } + Write-Info "Updating $AgentName context file: $TargetFile" + $projectName = Split-Path $REPO_ROOT -Leaf + $date = Get-Date + + $dir = Split-Path -Parent $TargetFile + if (-not (Test-Path $dir)) { New-Item -ItemType Directory -Path $dir | Out-Null } + + if (-not (Test-Path $TargetFile)) { + if (New-AgentFile -TargetFile $TargetFile -ProjectName $projectName -Date $date) { Write-Success "Created new $AgentName context file" } else { Write-Err 'Failed to create new agent file'; return $false } + } else { + try { + if (Update-ExistingAgentFile -TargetFile $TargetFile -Date $date) { Write-Success "Updated existing $AgentName context file" } else { Write-Err 'Failed to update agent file'; return $false } + } catch { + Write-Err "Cannot access or update existing file: $TargetFile. $_" + return $false + } + } + return $true +} + +function Update-SpecificAgent { + param( + [Parameter(Mandatory=$true)] + [string]$Type + ) + switch ($Type) { + 'claude' { Update-AgentFile -TargetFile $CLAUDE_FILE -AgentName 'Claude Code' } + 'gemini' { Update-AgentFile -TargetFile $GEMINI_FILE -AgentName 'Gemini CLI' } + 'copilot' { Update-AgentFile -TargetFile $COPILOT_FILE -AgentName 'GitHub Copilot' } + 'cursor-agent' { Update-AgentFile -TargetFile $CURSOR_FILE -AgentName 'Cursor IDE' } + 'qwen' { Update-AgentFile -TargetFile $QWEN_FILE -AgentName 'Qwen Code' } + 'opencode' { Update-AgentFile -TargetFile $AGENTS_FILE -AgentName 'opencode' } + 'codex' { Update-AgentFile -TargetFile $AGENTS_FILE -AgentName 'Codex CLI' } + 'windsurf' { Update-AgentFile -TargetFile $WINDSURF_FILE -AgentName 'Windsurf' } + 'kilocode' { Update-AgentFile -TargetFile $KILOCODE_FILE -AgentName 'Kilo Code' } + 'auggie' { Update-AgentFile -TargetFile $AUGGIE_FILE -AgentName 'Auggie CLI' } + 'roo' { Update-AgentFile -TargetFile $ROO_FILE -AgentName 'Roo Code' } + 'codebuddy' { Update-AgentFile -TargetFile $CODEBUDDY_FILE -AgentName 'CodeBuddy CLI' } + 'amp' { Update-AgentFile -TargetFile $AMP_FILE -AgentName 'Amp' } + 'q' { Update-AgentFile -TargetFile $Q_FILE -AgentName 'Amazon Q Developer CLI' } + default { Write-Err "Unknown agent type '$Type'"; Write-Err 'Expected: claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|kilocode|auggie|roo|codebuddy|amp|q'; return $false } + } +} + +function Update-AllExistingAgents { + $found = $false + $ok = $true + if (Test-Path $CLAUDE_FILE) { if (-not (Update-AgentFile -TargetFile $CLAUDE_FILE -AgentName 'Claude Code')) { $ok = $false }; $found = $true } + if (Test-Path $GEMINI_FILE) { if (-not (Update-AgentFile -TargetFile $GEMINI_FILE -AgentName 'Gemini CLI')) { $ok = $false }; $found = $true } + if (Test-Path $COPILOT_FILE) { if (-not (Update-AgentFile -TargetFile $COPILOT_FILE -AgentName 'GitHub Copilot')) { $ok = $false }; $found = $true } + if (Test-Path $CURSOR_FILE) { if (-not (Update-AgentFile -TargetFile $CURSOR_FILE -AgentName 'Cursor IDE')) { $ok = $false }; $found = $true } + if (Test-Path $QWEN_FILE) { if (-not (Update-AgentFile -TargetFile $QWEN_FILE -AgentName 'Qwen Code')) { $ok = $false }; $found = $true } + if (Test-Path $AGENTS_FILE) { if (-not (Update-AgentFile -TargetFile $AGENTS_FILE -AgentName 'Codex/opencode')) { $ok = $false }; $found = $true } + if (Test-Path $WINDSURF_FILE) { if (-not (Update-AgentFile -TargetFile $WINDSURF_FILE -AgentName 'Windsurf')) { $ok = $false }; $found = $true } + if (Test-Path $KILOCODE_FILE) { if (-not (Update-AgentFile -TargetFile $KILOCODE_FILE -AgentName 'Kilo Code')) { $ok = $false }; $found = $true } + if (Test-Path $AUGGIE_FILE) { if (-not (Update-AgentFile -TargetFile $AUGGIE_FILE -AgentName 'Auggie CLI')) { $ok = $false }; $found = $true } + if (Test-Path $ROO_FILE) { if (-not (Update-AgentFile -TargetFile $ROO_FILE -AgentName 'Roo Code')) { $ok = $false }; $found = $true } + if (Test-Path $CODEBUDDY_FILE) { if (-not (Update-AgentFile -TargetFile $CODEBUDDY_FILE -AgentName 'CodeBuddy CLI')) { $ok = $false }; $found = $true } + if (Test-Path $Q_FILE) { if (-not (Update-AgentFile -TargetFile $Q_FILE -AgentName 'Amazon Q Developer CLI')) { $ok = $false }; $found = $true } + if (-not $found) { + Write-Info 'No existing agent files found, creating default Claude file...' + if (-not (Update-AgentFile -TargetFile $CLAUDE_FILE -AgentName 'Claude Code')) { $ok = $false } + } + return $ok +} + +function Print-Summary { + Write-Host '' + Write-Info 'Summary of changes:' + if ($NEW_LANG) { Write-Host " - Added language: $NEW_LANG" } + if ($NEW_FRAMEWORK) { Write-Host " - Added framework: $NEW_FRAMEWORK" } + if ($NEW_DB -and $NEW_DB -ne 'N/A') { Write-Host " - Added database: $NEW_DB" } + Write-Host '' + Write-Info 'Usage: ./update-agent-context.ps1 [-AgentType claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|kilocode|auggie|roo|codebuddy|amp|q]' +} + +function Main { + Validate-Environment + Write-Info "=== Updating agent context files for feature $CURRENT_BRANCH ===" + if (-not (Parse-PlanData -PlanFile $NEW_PLAN)) { Write-Err 'Failed to parse plan data'; exit 1 } + $success = $true + if ($AgentType) { + Write-Info "Updating specific agent: $AgentType" + if (-not (Update-SpecificAgent -Type $AgentType)) { $success = $false } + } + else { + Write-Info 'No agent specified, updating all existing agent files...' + if (-not (Update-AllExistingAgents)) { $success = $false } + } + Print-Summary + if ($success) { Write-Success 'Agent context update completed successfully'; exit 0 } else { Write-Err 'Agent context update completed with errors'; exit 1 } +} + +Main + diff --git a/.specify/templates/agent-file-template.md b/.specify/templates/agent-file-template.md new file mode 100644 index 00000000..4cc7fd66 --- /dev/null +++ b/.specify/templates/agent-file-template.md @@ -0,0 +1,28 @@ +# [PROJECT NAME] Development Guidelines + +Auto-generated from all feature plans. Last updated: [DATE] + +## Active Technologies + +[EXTRACTED FROM ALL PLAN.MD FILES] + +## Project Structure + +```text +[ACTUAL STRUCTURE FROM PLANS] +``` + +## Commands + +[ONLY COMMANDS FOR ACTIVE TECHNOLOGIES] + +## Code Style + +[LANGUAGE-SPECIFIC, ONLY FOR LANGUAGES IN USE] + +## Recent Changes + +[LAST 3 FEATURES AND WHAT THEY ADDED] + + + diff --git a/.specify/templates/checklist-template.md b/.specify/templates/checklist-template.md new file mode 100644 index 00000000..806657da --- /dev/null +++ b/.specify/templates/checklist-template.md @@ -0,0 +1,40 @@ +# [CHECKLIST TYPE] Checklist: [FEATURE NAME] + +**Purpose**: [Brief description of what this checklist covers] +**Created**: [DATE] +**Feature**: [Link to spec.md or relevant documentation] + +**Note**: This checklist is generated by the `/speckit.checklist` command based on feature context and requirements. + + + +## [Category 1] + +- [ ] CHK001 First checklist item with clear action +- [ ] CHK002 Second checklist item +- [ ] CHK003 Third checklist item + +## [Category 2] + +- [ ] CHK004 Another category item +- [ ] CHK005 Item with specific criteria +- [ ] CHK006 Final item in this category + +## Notes + +- Check items off as completed: `[x]` +- Add comments or findings inline +- Link to relevant resources or documentation +- Items are numbered sequentially for easy reference diff --git a/.specify/templates/plan-template.md b/.specify/templates/plan-template.md new file mode 100644 index 00000000..5aefd24b --- /dev/null +++ b/.specify/templates/plan-template.md @@ -0,0 +1,115 @@ +# Implementation Plan: [FEATURE] + +**Branch**: `[###-feature-name]` | **Date**: [DATE] | **Spec**: [link] +**Input**: Feature specification from `/specs/[###-feature-name]/spec.md` + +**Note**: This template is filled in by the `/speckit.plan` command. See `.specify/templates/commands/plan.md` for the execution workflow. + +## Summary + +[Extract from feature spec: primary requirement + technical approach from research] + +## Technical Context + + + +**Language/Version**: [e.g., Python 3.11, Swift 5.9, Rust 1.75 or NEEDS CLARIFICATION] +**Primary Dependencies**: [e.g., FastAPI, UIKit, LLVM or NEEDS CLARIFICATION] +**Storage**: [if applicable, e.g., PostgreSQL, CoreData, files or N/A] +**Testing**: [e.g., pytest, XCTest, cargo test or NEEDS CLARIFICATION] +**Target Platform**: [e.g., Linux server, iOS 15+, WASM or NEEDS CLARIFICATION] +**Project Type**: [single/web/mobile - determines source structure] +**Performance Goals**: [domain-specific, e.g., 1000 req/s, 10k lines/sec, 60 fps or NEEDS CLARIFICATION] +**Constraints**: [domain-specific, e.g., <200ms p95, <100MB memory, offline-capable or NEEDS CLARIFICATION] +**Scale/Scope**: [domain-specific, e.g., 10k users, 1M LOC, 50 screens or NEEDS CLARIFICATION] + +## Constitution Check + +*GATE: Must pass before Phase 0 research. Re-check after Phase 1 design.* + +- **Framework-Only Scope**: Confirm the feature does not add bundled node implementations or + service-specific integrations inside `flowpipe/`. Document where user-land nodes live. +- **Plain Python Simplicity**: List every new dependency, why it is unavoidable, and how it + remains optional/pure-Python. Note required Python versions (2.7 + 3.6+). +- **Portable Graph Serialization**: Describe how new metadata stays serializable and how + remote execution tooling can consume it. +- **Test-Driven Total Coverage**: Outline the red-green plan, required pytest suites, and how + you will keep coverage at 100%. +- **Stable APIs & Dual-Python Support**: Explain compatibility impact, required SemVer bump, + and migration guidance (if any). +- **Engineering Constraints**: Call out documentation, formatting, and release artifacts that + must be updated alongside the feature. + +## Project Structure + +### Documentation (this feature) + +```text +specs/[###-feature]/ +├── plan.md # This file (/speckit.plan command output) +├── research.md # Phase 0 output (/speckit.plan command) +├── data-model.md # Phase 1 output (/speckit.plan command) +├── quickstart.md # Phase 1 output (/speckit.plan command) +├── contracts/ # Phase 1 output (/speckit.plan command) +└── tasks.md # Phase 2 output (/speckit.tasks command - NOT created by /speckit.plan) +``` + +### Source Code (repository root) + + +```text +# [REMOVE IF UNUSED] Option 1: Single project (DEFAULT) +src/ +├── models/ +├── services/ +├── cli/ +└── lib/ + +tests/ +├── contract/ +├── integration/ +└── unit/ + +# [REMOVE IF UNUSED] Option 2: Web application (when "frontend" + "backend" detected) +backend/ +├── src/ +│ ├── models/ +│ ├── services/ +│ └── api/ +└── tests/ + +frontend/ +├── src/ +│ ├── components/ +│ ├── pages/ +│ └── services/ +└── tests/ + +# [REMOVE IF UNUSED] Option 3: Mobile + API (when "iOS/Android" detected) +api/ +└── [same as backend above] + +ios/ or android/ +└── [platform-specific structure: feature modules, UI flows, platform tests] +``` + +**Structure Decision**: [Document the selected structure and reference the real +directories captured above] + +## Complexity Tracking + +> **Fill ONLY if Constitution Check has violations that must be justified** + +| Violation | Why Needed | Simpler Alternative Rejected Because | +|-----------|------------|-------------------------------------| +| [e.g., 4th project] | [current need] | [why 3 projects insufficient] | +| [e.g., Repository pattern] | [specific problem] | [why direct DB access insufficient] | diff --git a/.specify/templates/spec-template.md b/.specify/templates/spec-template.md new file mode 100644 index 00000000..55416cd3 --- /dev/null +++ b/.specify/templates/spec-template.md @@ -0,0 +1,130 @@ +# Feature Specification: [FEATURE NAME] + +**Feature Branch**: `[###-feature-name]` +**Created**: [DATE] +**Status**: Draft +**Input**: User description: "$ARGUMENTS" + +## User Scenarios & Testing *(mandatory)* + + + +### User Story 1 - [Brief Title] (Priority: P1) + +[Describe this user journey in plain language] + +**Why this priority**: [Explain the value and why it has this priority level] + +**Independent Test**: [Describe how this can be tested independently - e.g., "Can be fully tested by [specific action] and delivers [specific value]"] + +**Acceptance Scenarios**: + +1. **Given** [initial state], **When** [action], **Then** [expected outcome] +2. **Given** [initial state], **When** [action], **Then** [expected outcome] + +--- + +### User Story 2 - [Brief Title] (Priority: P2) + +[Describe this user journey in plain language] + +**Why this priority**: [Explain the value and why it has this priority level] + +**Independent Test**: [Describe how this can be tested independently] + +**Acceptance Scenarios**: + +1. **Given** [initial state], **When** [action], **Then** [expected outcome] + +--- + +### User Story 3 - [Brief Title] (Priority: P3) + +[Describe this user journey in plain language] + +**Why this priority**: [Explain the value and why it has this priority level] + +**Independent Test**: [Describe how this can be tested independently] + +**Acceptance Scenarios**: + +1. **Given** [initial state], **When** [action], **Then** [expected outcome] + +--- + +[Add more user stories as needed, each with an assigned priority] + +### Edge Cases + + + +- What happens when [boundary condition]? +- How does system handle [error scenario]? + +## Requirements *(mandatory)* + + + +### Functional Requirements + +- **FR-001**: System MUST [specific capability, e.g., "allow users to create accounts"] +- **FR-002**: System MUST [specific capability, e.g., "validate email addresses"] +- **FR-003**: Users MUST be able to [key interaction, e.g., "reset their password"] +- **FR-004**: System MUST [data requirement, e.g., "persist user preferences"] +- **FR-005**: System MUST [behavior, e.g., "log all security events"] + +*Example of marking unclear requirements:* + +- **FR-006**: System MUST authenticate users via [NEEDS CLARIFICATION: auth method not specified - email/password, SSO, OAuth?] +- **FR-007**: System MUST retain user data for [NEEDS CLARIFICATION: retention period not specified] + +### Key Entities *(include if feature involves data)* + +- **[Entity 1]**: [What it represents, key attributes without implementation] +- **[Entity 2]**: [What it represents, relationships to other entities] + +## Success Criteria *(mandatory)* + + + +### Measurable Outcomes + +- **SC-001**: [Measurable metric, e.g., "Users can complete account creation in under 2 minutes"] +- **SC-002**: [Measurable metric, e.g., "System handles 1000 concurrent users without degradation"] +- **SC-003**: [User satisfaction metric, e.g., "90% of users successfully complete primary task on first attempt"] +- **SC-004**: [Business metric, e.g., "Reduce support tickets related to [X] by 50%"] + +## Constitution Alignment Checklist *(must be explicit)* + +- **Framework-Only Scope**: Describe how this feature keeps Flowpipe as a framework (no bundled + node logic) and where sample nodes will live (docs/examples only). +- **Plain Python Simplicity**: List required Python versions (2.7 + 3.6+ supported) and any new + dependencies or services, including why they stay optional/pure-Python. +- **Portable Graph Serialization**: Explain how nodes/graphs stay serializable and how remote + evaluators will consume the new metadata. +- **Test-Driven Total Coverage**: Identify the exact pytest suites to extend, new tests to add, and + how 100% coverage will be preserved. +- **Stable APIs & Dual-Python Support**: Document compatibility impact, expected semantic + version bump, and migration notes if behavior changes. +- **Engineering Constraints**: Call out the docs, formatting hooks, and release artifacts that must be + updated because of this feature. diff --git a/.specify/templates/tasks-template.md b/.specify/templates/tasks-template.md new file mode 100644 index 00000000..dd4588e0 --- /dev/null +++ b/.specify/templates/tasks-template.md @@ -0,0 +1,258 @@ +--- + +description: "Task list template for feature implementation" +--- + +# Tasks: [FEATURE NAME] + +**Input**: Design documents from `/specs/[###-feature-name]/` +**Prerequisites**: plan.md (required), spec.md (required for user stories), research.md, data-model.md, contracts/ + +**Tests**: The examples below include test tasks. Tests are OPTIONAL - only include them if explicitly requested in the feature specification. + +**Organization**: Tasks are grouped by user story to enable independent implementation and testing of each story. + +## Format: `[ID] [P?] [Story] Description` + +- **[P]**: Can run in parallel (different files, no dependencies) +- **[Story]**: Which user story this task belongs to (e.g., US1, US2, US3) +- Include exact file paths in descriptions + +## Path Conventions + +- **Single project**: `src/`, `tests/` at repository root +- **Web app**: `backend/src/`, `frontend/src/` +- **Mobile**: `api/src/`, `ios/src/` or `android/src/` +- Paths shown below assume single project - adjust based on plan.md structure + +## Constitution Guardrails + +- Always include tasks that prove Flowpipe stays a framework (no shipping node logic). +- Call out any dependency or environment changes plus how they remain optional/pure-Python. +- Add explicit tasks for serialization/remote-execution verification when node data structures change. +- Create TDD tasks first: write/extend tests that fail, then implement code until coverage returns to 100%. +- Flag compatibility tasks (docs, migration notes, SemVer decision) if APIs shift. + + +## Phase 1: Setup (Shared Infrastructure) + +**Purpose**: Project initialization and basic structure + +- [ ] T001 Create project structure per implementation plan +- [ ] T002 Initialize [language] project with [framework] dependencies +- [ ] T003 [P] Configure linting and formatting tools + +--- + +## Phase 2: Foundational (Blocking Prerequisites) + +**Purpose**: Core infrastructure that MUST be complete before ANY user story can be implemented + +**⚠️ CRITICAL**: No user story work can begin until this phase is complete + +Examples of foundational tasks (adjust based on your project): + +- [ ] T004 Setup database schema and migrations framework +- [ ] T005 [P] Implement authentication/authorization framework +- [ ] T006 [P] Setup API routing and middleware structure +- [ ] T007 Create base models/entities that all stories depend on +- [ ] T008 Configure error handling and logging infrastructure +- [ ] T009 Setup environment configuration management + +**Checkpoint**: Foundation ready - user story implementation can now begin in parallel + +--- + +## Phase 3: User Story 1 - [Title] (Priority: P1) 🎯 MVP + +**Goal**: [Brief description of what this story delivers] + +**Independent Test**: [How to verify this story works on its own] + +### Tests for User Story 1 (OPTIONAL - only if tests requested) ⚠️ + +> **NOTE: Write these tests FIRST, ensure they FAIL before implementation** + +- [ ] T010 [P] [US1] Contract test for [endpoint] in tests/contract/test_[name].py +- [ ] T011 [P] [US1] Integration test for [user journey] in tests/integration/test_[name].py + +### Implementation for User Story 1 + +- [ ] T012 [P] [US1] Create [Entity1] model in src/models/[entity1].py +- [ ] T013 [P] [US1] Create [Entity2] model in src/models/[entity2].py +- [ ] T014 [US1] Implement [Service] in src/services/[service].py (depends on T012, T013) +- [ ] T015 [US1] Implement [endpoint/feature] in src/[location]/[file].py +- [ ] T016 [US1] Add validation and error handling +- [ ] T017 [US1] Add logging for user story 1 operations + +**Checkpoint**: At this point, User Story 1 should be fully functional and testable independently + +--- + +## Phase 4: User Story 2 - [Title] (Priority: P2) + +**Goal**: [Brief description of what this story delivers] + +**Independent Test**: [How to verify this story works on its own] + +### Tests for User Story 2 (OPTIONAL - only if tests requested) ⚠️ + +- [ ] T018 [P] [US2] Contract test for [endpoint] in tests/contract/test_[name].py +- [ ] T019 [P] [US2] Integration test for [user journey] in tests/integration/test_[name].py + +### Implementation for User Story 2 + +- [ ] T020 [P] [US2] Create [Entity] model in src/models/[entity].py +- [ ] T021 [US2] Implement [Service] in src/services/[service].py +- [ ] T022 [US2] Implement [endpoint/feature] in src/[location]/[file].py +- [ ] T023 [US2] Integrate with User Story 1 components (if needed) + +**Checkpoint**: At this point, User Stories 1 AND 2 should both work independently + +--- + +## Phase 5: User Story 3 - [Title] (Priority: P3) + +**Goal**: [Brief description of what this story delivers] + +**Independent Test**: [How to verify this story works on its own] + +### Tests for User Story 3 (OPTIONAL - only if tests requested) ⚠️ + +- [ ] T024 [P] [US3] Contract test for [endpoint] in tests/contract/test_[name].py +- [ ] T025 [P] [US3] Integration test for [user journey] in tests/integration/test_[name].py + +### Implementation for User Story 3 + +- [ ] T026 [P] [US3] Create [Entity] model in src/models/[entity].py +- [ ] T027 [US3] Implement [Service] in src/services/[service].py +- [ ] T028 [US3] Implement [endpoint/feature] in src/[location]/[file].py + +**Checkpoint**: All user stories should now be independently functional + +--- + +[Add more user story phases as needed, following the same pattern] + +--- + +## Phase N: Polish & Cross-Cutting Concerns + +**Purpose**: Improvements that affect multiple user stories + +- [ ] TXXX [P] Documentation updates in docs/ +- [ ] TXXX Code cleanup and refactoring +- [ ] TXXX Performance optimization across all stories +- [ ] TXXX [P] Additional unit tests (if requested) in tests/unit/ +- [ ] TXXX Security hardening +- [ ] TXXX Run quickstart.md validation + +--- + +## Dependencies & Execution Order + +### Phase Dependencies + +- **Setup (Phase 1)**: No dependencies - can start immediately +- **Foundational (Phase 2)**: Depends on Setup completion - BLOCKS all user stories +- **User Stories (Phase 3+)**: All depend on Foundational phase completion + - User stories can then proceed in parallel (if staffed) + - Or sequentially in priority order (P1 → P2 → P3) +- **Polish (Final Phase)**: Depends on all desired user stories being complete + +### User Story Dependencies + +- **User Story 1 (P1)**: Can start after Foundational (Phase 2) - No dependencies on other stories +- **User Story 2 (P2)**: Can start after Foundational (Phase 2) - May integrate with US1 but should be independently testable +- **User Story 3 (P3)**: Can start after Foundational (Phase 2) - May integrate with US1/US2 but should be independently testable + +### Within Each User Story + +- Tests (if included) MUST be written and FAIL before implementation +- Models before services +- Services before endpoints +- Core implementation before integration +- Story complete before moving to next priority + +### Parallel Opportunities + +- All Setup tasks marked [P] can run in parallel +- All Foundational tasks marked [P] can run in parallel (within Phase 2) +- Once Foundational phase completes, all user stories can start in parallel (if team capacity allows) +- All tests for a user story marked [P] can run in parallel +- Models within a story marked [P] can run in parallel +- Different user stories can be worked on in parallel by different team members + +--- + +## Parallel Example: User Story 1 + +```bash +# Launch all tests for User Story 1 together (if tests requested): +Task: "Contract test for [endpoint] in tests/contract/test_[name].py" +Task: "Integration test for [user journey] in tests/integration/test_[name].py" + +# Launch all models for User Story 1 together: +Task: "Create [Entity1] model in src/models/[entity1].py" +Task: "Create [Entity2] model in src/models/[entity2].py" +``` + +--- + +## Implementation Strategy + +### MVP First (User Story 1 Only) + +1. Complete Phase 1: Setup +2. Complete Phase 2: Foundational (CRITICAL - blocks all stories) +3. Complete Phase 3: User Story 1 +4. **STOP and VALIDATE**: Test User Story 1 independently +5. Deploy/demo if ready + +### Incremental Delivery + +1. Complete Setup + Foundational → Foundation ready +2. Add User Story 1 → Test independently → Deploy/Demo (MVP!) +3. Add User Story 2 → Test independently → Deploy/Demo +4. Add User Story 3 → Test independently → Deploy/Demo +5. Each story adds value without breaking previous stories + +### Parallel Team Strategy + +With multiple developers: + +1. Team completes Setup + Foundational together +2. Once Foundational is done: + - Developer A: User Story 1 + - Developer B: User Story 2 + - Developer C: User Story 3 +3. Stories complete and integrate independently + +--- + +## Notes + +- [P] tasks = different files, no dependencies +- [Story] label maps task to specific user story for traceability +- Each user story should be independently completable and testable +- Verify tests fail before implementing +- Commit after each task or logical group +- Stop at any checkpoint to validate story independently +- Avoid: vague tasks, same file conflicts, cross-story dependencies that break independence diff --git a/AGENTS.md b/AGENTS.md new file mode 100644 index 00000000..020c70da --- /dev/null +++ b/AGENTS.md @@ -0,0 +1,29 @@ +# flowpipe Development Guidelines + +Auto-generated from all feature plans. Last updated: 2025-11-14 + +## Active Technologies + +- Python runtime support 3.7+ (library); contributors use Python 3.8+ to run Ruff + pre-commit, Ruff (formatter/linter), pytest (001-ruff-formatting) + +## Project Structure + +```text +flowpipe/ +tests/ +``` + +## Commands + +cd src; pytest; ruff check . + +## Code Style + +Python runtime support 3.7+ (library); contributors use Python 3.8+ to run Ruff: Follow standard conventions + +## Recent Changes + +- 001-ruff-formatting: Added Python runtime support 3.7+ (library); contributors use Python 3.8+ to run Ruff + pre-commit, Ruff (formatter/linter), pytest + + + diff --git a/README.md b/README.md index ca7492b8..4706c888 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ Coverage
Coverage Report
FileStmtsMissCoverMissing
flowpipe
   __init__.py40100% 
   errors.py20100% 
   evaluator.py1092181%187–188, 224–253
   event.py220100% 
   graph.py2110100% 
   node.py3550100% 
   plug.py208299%62–67
   utilities.py710100% 
TOTAL9822398% 
-[![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](LICENSE) ![PyPI - Python Version](https://img.shields.io/pypi/pyversions/flowpipe) [![Documentation Status](https://readthedocs.org/projects/flowpipe/badge/?version=latest)](https://flowpipe.readthedocs.io/en/latest) [![Black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) +[![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](LICENSE) ![PyPI - Python Version](https://img.shields.io/pypi/pyversions/flowpipe) [![Documentation Status](https://readthedocs.org/projects/flowpipe/badge/?version=latest)](https://flowpipe.readthedocs.io/en/latest) [![Ruff](https://img.shields.io/badge/linting-ruff-00a3ff.svg?logo=ruff&logoColor=white)](https://github.com/astral-sh/ruff) ![Flowpipe Logo](https://raw.githubusercontent.com/PaulSchweizer/flowpipe/master/logo.png) @@ -40,6 +40,20 @@ Benefits:
Flowpipe Presentation Open Source Days 2025 +# Development workflow + +Flowpipe uses [Ruff](https://github.com/astral-sh/ruff) for both formatting and linting. To +match the automation used in CI: + +1. `pip install -U pre-commit ruff` +2. `pre-commit install` +3. `pre-commit run --all-files` + +Only Ruff hooks run in this workflow, so contributors and CI observe identical formatting and +import ordering rules. + +For detailed migration notes and upcoming changes, read [docs/release-notes.md](docs/release-notes.md). + # Quick Example Consider this simple example on how to represent the construction of a house with Flowpipe: diff --git a/contributing.md b/contributing.md index efe13286..c6f1cbbe 100644 --- a/contributing.md +++ b/contributing.md @@ -46,7 +46,7 @@ The tests have to pass on travis (py2.7 and py3.6). ## Coding styleguide -- We use [black](https://github.com/ambv/black) +- We use [Ruff](https://github.com/astral-sh/ruff) for formatting and linting. Install the hooks once with `pre-commit install` and run `pre-commit run --all-files` before pushing so CI sees the same fixes. - For docstrings please use the [google style](https://github.com/google/styleguide/blob/gh-pages/pyguide.md#38-comments-and-docstrings) ## Release to PyPi (Collaborators only) diff --git a/docs/conf.py b/docs/conf.py index 14b4a5ef..1da0cfff 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -13,6 +13,7 @@ import os import sys + sys.path.insert(0, os.path.abspath("./..")) sys.setrecursionlimit(1500) diff --git a/docs/index.rst b/docs/index.rst index c6bc7be3..c69a6275 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -14,6 +14,8 @@ Code Documentation source/flowpipe examples/examples.rst + ruff-formatting + release-notes Indices and tables ================== diff --git a/docs/release-notes.md b/docs/release-notes.md new file mode 100644 index 00000000..bc9ceaf5 --- /dev/null +++ b/docs/release-notes.md @@ -0,0 +1,8 @@ +# Flowpipe Release Notes + +## Upcoming Changes + +- **Ruff-only formatting**: Flowpipe now relies on [Ruff](https://github.com/astral-sh/ruff) for formatting, linting, and import ordering. Black and isort dependencies/hook references were removed from `pyproject.toml`, `poetry.lock`, and `.pre-commit-config.yaml`. +- **Unified tooling workflow**: Pre-commit hooks, CI jobs, and the local `pre-commit run --all-files` command all execute Ruff (`ruff` + `ruff-format`) to keep contributors and automation aligned. +- **Documentation updates**: Contributor docs explain how to install/run Ruff hooks, and README highlights the new formatting workflow. +- **CI enforcement**: GitHub Actions now includes a dedicated pre-commit workflow plus a Ruff step inside the pytest workflow to guarantee style validation before tests run. diff --git a/docs/ruff-formatting.md b/docs/ruff-formatting.md new file mode 100644 index 00000000..f7f2b4ff --- /dev/null +++ b/docs/ruff-formatting.md @@ -0,0 +1,41 @@ +# Ruff formatting workflow + +Flowpipe relies on [Ruff](https://github.com/astral-sh/ruff) for formatting, linting, and import organization. The same hooks run locally and in CI, so following this guide guarantees consistent style. + +## Installation + +```bash +pip install -U pre-commit ruff +pre-commit install +pre-commit autoupdate +``` + +## Running the hooks + +To check every tracked file (the same command CI runs): + +```bash +pre-commit run --all-files +``` + +Ruff auto-fixes most issues. Re-run the command until it exits successfully. The output should only list the `ruff` and `ruff-format` hooks. + +## Troubleshooting + +- **Stale hooks**: Run `pre-commit autoupdate` after pulling branches that update the hook configuration. +- **New environments**: Delete `.venv` or `.cache/pre-commit` if hooks reference old versions, then rerun the installation commands. +- **Unsupported Python**: Ruff requires Python 3.9+ for tooling. If you work on Flowpipe using an older interpreter, create a dedicated virtualenv for tooling commands. + +## Editor integration + +If you use VS Code, you can set the formatter to the `charliermarsh.ruff` extension like so: + +```json +{ + "[python]": { + "editor.defaultFormatter": "charliermarsh.ruff" + } +} +``` + +If you use another editor, point it at the `ruff` binary for format-on-save and lint diagnostics to keep results aligned with the hooks. diff --git a/examples/custom_evaluator.py b/examples/custom_evaluator.py index cdf65f86..e52bdca5 100644 --- a/examples/custom_evaluator.py +++ b/examples/custom_evaluator.py @@ -13,9 +13,10 @@ +-------------------------------------------------+ """ -from flowpipe import Evaluator, Graph, Node import json +from flowpipe import Evaluator, Graph, Node + class CustomEvaluator(Evaluator): """A custom evaluator that prints the nodes being evaluated. diff --git a/examples/graph_plugs.py b/examples/graph_plugs.py index 3b080220..c49ac2de 100644 --- a/examples/graph_plugs.py +++ b/examples/graph_plugs.py @@ -31,7 +31,7 @@ +------------------------------------------------+ """ -from flowpipe import Graph, INode, InputPlug, OutputPlug, InputPlugGroup +from flowpipe import Graph, INode, InputPlug, InputPlugGroup, OutputPlug class AddNode(INode): diff --git a/examples/house_and_birthday.py b/examples/house_and_birthday.py index 43a9297e..e05e5acb 100644 --- a/examples/house_and_birthday.py +++ b/examples/house_and_birthday.py @@ -35,6 +35,7 @@ +-------------------+ """ + from flowpipe import Graph, INode, InputPlug, Node, OutputPlug diff --git a/examples/nested_graphs.py b/examples/nested_graphs.py index 7e2e6d55..e1b15c3a 100644 --- a/examples/nested_graphs.py +++ b/examples/nested_graphs.py @@ -1,4 +1,5 @@ """Nested graphs are supported in flowpipe.""" + from flowpipe import Graph, Node diff --git a/examples/vfx_render_farm_conversion.py b/examples/vfx_render_farm_conversion.py index 6cef1ea6..a4e23f2f 100644 --- a/examples/vfx_render_farm_conversion.py +++ b/examples/vfx_render_farm_conversion.py @@ -2,6 +2,7 @@ This guide expects that your render farm can handle dependencies between tasks. """ + import json import logging import os @@ -9,6 +10,7 @@ from flowpipe import Graph, INode, Node + # ----------------------------------------------------------------------------- # # Necessary utilities diff --git a/examples/vfx_rendering.py b/examples/vfx_rendering.py index a170e450..a4f80ec9 100644 --- a/examples/vfx_rendering.py +++ b/examples/vfx_rendering.py @@ -83,12 +83,13 @@ def UpdateDatabase(id_, images): """Update the database entries of the given asset with the given data.""" return {"status": True} + def complex_cg_render(frames, batch_size): graph = Graph(name="Rendering") slapcomp = CreateSlapComp(graph=graph, template="nuke_template.nk") update_database = UpdateDatabase(graph=graph, id_=123456) - + camera_creation = CreateCamera(graph=graph) scene_creation = MayaSceneGeneration(graph=graph) diff --git a/examples/workflow_design_pattern.py b/examples/workflow_design_pattern.py index 3eb5bf6a..f2b784bc 100644 --- a/examples/workflow_design_pattern.py +++ b/examples/workflow_design_pattern.py @@ -14,6 +14,7 @@ The Workflow builds a Graph and initializes it with user provided settings as well as data taken from other sources (database, filesystem). """ + import getpass from flowpipe import Graph, Node diff --git a/examples/world_clock.py b/examples/world_clock.py index cbe077be..0c062389 100644 --- a/examples/world_clock.py +++ b/examples/world_clock.py @@ -24,6 +24,7 @@ | converted_time o-----+ +---------------------+ """ + from datetime import datetime from time import time diff --git a/flowpipe/__init__.py b/flowpipe/__init__.py index d9d1dcb6..d86d2d2b 100644 --- a/flowpipe/__init__.py +++ b/flowpipe/__init__.py @@ -1,4 +1,5 @@ """Flow-based programming with python.""" + from .graph import Graph # noqa F40 from .node import INode, Node # noqa F401 from .evaluator import Evaluator, LinearEvaluator, ThreadedEvaluator # noqa F401 diff --git a/flowpipe/evaluator.py b/flowpipe/evaluator.py index d22ca525..f9646d1d 100644 --- a/flowpipe/evaluator.py +++ b/flowpipe/evaluator.py @@ -8,6 +8,7 @@ from .errors import FlowpipeMultiprocessingError + log = logging.getLogger(__name__) @@ -243,12 +244,12 @@ def _evaluate_node_in_process(identifier, nodes_data): data["outputs"][name]["value"] = plug.value for sub_name, sub_plug in plug.sub_plugs.items(): if sub_name not in data["outputs"][name]["sub_plugs"]: - data["outputs"][name]["sub_plugs"][ - sub_name - ] = sub_plug.serialize() - data["outputs"][name]["sub_plugs"][sub_name][ - "value" - ] = sub_plug.value + data["outputs"][name]["sub_plugs"][sub_name] = ( + sub_plug.serialize() + ) + data["outputs"][name]["sub_plugs"][sub_name]["value"] = ( + sub_plug.value + ) nodes_data[identifier] = data diff --git a/flowpipe/event.py b/flowpipe/event.py index ba796157..de15724c 100644 --- a/flowpipe/event.py +++ b/flowpipe/event.py @@ -2,8 +2,10 @@ They an be used to observe the evaluation process. """ + import logging + log = logging.getLogger(__name__) diff --git a/flowpipe/graph.py b/flowpipe/graph.py index 9c08c38a..8ce4cd7d 100644 --- a/flowpipe/graph.py +++ b/flowpipe/graph.py @@ -1,4 +1,5 @@ """A Graph of Nodes.""" + from __future__ import absolute_import, print_function import logging @@ -16,6 +17,7 @@ from .plug import InputPlug, InputPlugGroup, OutputPlug from .utilities import deserialize_graph + log = logging.getLogger(__name__) diff --git a/flowpipe/node.py b/flowpipe/node.py index a8e9b199..144e33d2 100644 --- a/flowpipe/node.py +++ b/flowpipe/node.py @@ -1,4 +1,5 @@ """Nodes manipulate incoming data and provide the outgoing data.""" + from __future__ import absolute_import, print_function import copy @@ -21,6 +22,7 @@ sanitize_string_input, ) + log = logging.getLogger(__name__) @@ -145,9 +147,9 @@ def downstream_nodes(self): downstream_nodes[downstream.identifier] = downstream for downstream2 in downstream.downstream_nodes: if downstream2.identifier not in downstream_nodes: - downstream_nodes[ - downstream2.identifier - ] = downstream2 + downstream_nodes[downstream2.identifier] = ( + downstream2 + ) return list(downstream_nodes.values()) def evaluate(self): @@ -643,9 +645,7 @@ def _initialize(self, func, outputs, metadata): if func is not None: self.file_location = inspect.getfile(func) self.class_name = self.func.__name__ - arg_spec = inspect.getfullargspec( - func - ) # pylint: disable=deprecated-method + arg_spec = inspect.getfullargspec(func) # pylint: disable=deprecated-method defaults = {} if arg_spec.defaults is not None: defaults = dict( diff --git a/flowpipe/plug.py b/flowpipe/plug.py index 9ff796f6..bfbfdd58 100644 --- a/flowpipe/plug.py +++ b/flowpipe/plug.py @@ -1,4 +1,5 @@ """Plugs are ins and outs for Nodes through which they exchange data.""" + from __future__ import print_function import warnings @@ -6,6 +7,7 @@ from .utilities import get_hash + class IPlug: """The interface for the plugs. @@ -283,9 +285,9 @@ def serialize(self): """Serialize the Plug containing all it's connections.""" connections = {} if self.connections: - connections[ - self.connections[0].node.identifier - ] = self.connections[0].name + connections[self.connections[0].node.identifier] = ( + self.connections[0].name + ) return { "name": self.name, "value": self.value if not self.sub_plugs else None, @@ -352,9 +354,9 @@ def serialize(self): """Serialize the Plug containing all it's connections.""" connections = {} if self.connections: - connections[ - self.connections[0].node.identifier - ] = self.connections[0].name + connections[self.connections[0].node.identifier] = ( + self.connections[0].name + ) return { "name": self.name, "value": self.value, diff --git a/flowpipe/utilities.py b/flowpipe/utilities.py index 72d0e6e8..79d2d159 100644 --- a/flowpipe/utilities.py +++ b/flowpipe/utilities.py @@ -1,4 +1,5 @@ """Utilities for serializing and importing Nodes.""" + try: import importlib except ImportError: diff --git a/poetry.lock b/poetry.lock index 496f0a42..0108af2b 100644 --- a/poetry.lock +++ b/poetry.lock @@ -27,54 +27,6 @@ files = [ [package.dependencies] typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.11\""} -[[package]] -name = "black" -version = "25.9.0" -description = "The uncompromising code formatter." -optional = false -python-versions = ">=3.9" -groups = ["dev"] -files = [ - {file = "black-25.9.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ce41ed2614b706fd55fd0b4a6909d06b5bab344ffbfadc6ef34ae50adba3d4f7"}, - {file = "black-25.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2ab0ce111ef026790e9b13bd216fa7bc48edd934ffc4cbf78808b235793cbc92"}, - {file = "black-25.9.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f96b6726d690c96c60ba682955199f8c39abc1ae0c3a494a9c62c0184049a713"}, - {file = "black-25.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:d119957b37cc641596063cd7db2656c5be3752ac17877017b2ffcdb9dfc4d2b1"}, - {file = "black-25.9.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:456386fe87bad41b806d53c062e2974615825c7a52159cde7ccaeb0695fa28fa"}, - {file = "black-25.9.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a16b14a44c1af60a210d8da28e108e13e75a284bf21a9afa6b4571f96ab8bb9d"}, - {file = "black-25.9.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:aaf319612536d502fdd0e88ce52d8f1352b2c0a955cc2798f79eeca9d3af0608"}, - {file = "black-25.9.0-cp311-cp311-win_amd64.whl", hash = "sha256:c0372a93e16b3954208417bfe448e09b0de5cc721d521866cd9e0acac3c04a1f"}, - {file = "black-25.9.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:1b9dc70c21ef8b43248f1d86aedd2aaf75ae110b958a7909ad8463c4aa0880b0"}, - {file = "black-25.9.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8e46eecf65a095fa62e53245ae2795c90bdecabd53b50c448d0a8bcd0d2e74c4"}, - {file = "black-25.9.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9101ee58ddc2442199a25cb648d46ba22cd580b00ca4b44234a324e3ec7a0f7e"}, - {file = "black-25.9.0-cp312-cp312-win_amd64.whl", hash = "sha256:77e7060a00c5ec4b3367c55f39cf9b06e68965a4f2e61cecacd6d0d9b7ec945a"}, - {file = "black-25.9.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0172a012f725b792c358d57fe7b6b6e8e67375dd157f64fa7a3097b3ed3e2175"}, - {file = "black-25.9.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3bec74ee60f8dfef564b573a96b8930f7b6a538e846123d5ad77ba14a8d7a64f"}, - {file = "black-25.9.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b756fc75871cb1bcac5499552d771822fd9db5a2bb8db2a7247936ca48f39831"}, - {file = "black-25.9.0-cp313-cp313-win_amd64.whl", hash = "sha256:846d58e3ce7879ec1ffe816bb9df6d006cd9590515ed5d17db14e17666b2b357"}, - {file = "black-25.9.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ef69351df3c84485a8beb6f7b8f9721e2009e20ef80a8d619e2d1788b7816d47"}, - {file = "black-25.9.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e3c1f4cd5e93842774d9ee4ef6cd8d17790e65f44f7cdbaab5f2cf8ccf22a823"}, - {file = "black-25.9.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:154b06d618233fe468236ba1f0e40823d4eb08b26f5e9261526fde34916b9140"}, - {file = "black-25.9.0-cp39-cp39-win_amd64.whl", hash = "sha256:e593466de7b998374ea2585a471ba90553283fb9beefcfa430d84a2651ed5933"}, - {file = "black-25.9.0-py3-none-any.whl", hash = "sha256:474b34c1342cdc157d307b56c4c65bce916480c4a8f6551fdc6bf9b486a7c4ae"}, - {file = "black-25.9.0.tar.gz", hash = "sha256:0474bca9a0dd1b51791fcc507a4e02078a1c63f6d4e4ae5544b9848c7adfb619"}, -] - -[package.dependencies] -click = ">=8.0.0" -mypy-extensions = ">=0.4.3" -packaging = ">=22.0" -pathspec = ">=0.9.0" -platformdirs = ">=2" -pytokens = ">=0.1.10" -tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -typing-extensions = {version = ">=4.0.1", markers = "python_version < \"3.11\""} - -[package.extras] -colorama = ["colorama (>=0.4.3)"] -d = ["aiohttp (>=3.10)"] -jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"] -uvloop = ["uvloop (>=0.15.2)"] - [[package]] name = "cfgv" version = "3.4.0" @@ -87,21 +39,6 @@ files = [ {file = "cfgv-3.4.0.tar.gz", hash = "sha256:e52591d4c5f5dead8e0f673fb16db7949d2cfb3f7da4582893288f0ded8fe560"}, ] -[[package]] -name = "click" -version = "8.1.8" -description = "Composable command line interface toolkit" -optional = false -python-versions = ">=3.7" -groups = ["dev"] -files = [ - {file = "click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2"}, - {file = "click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a"}, -] - -[package.dependencies] -colorama = {version = "*", markers = "platform_system == \"Windows\""} - [[package]] name = "colorama" version = "0.4.6" @@ -109,7 +46,7 @@ description = "Cross-platform colored terminal text." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" groups = ["dev"] -markers = "platform_system == \"Windows\" or sys_platform == \"win32\"" +markers = "sys_platform == \"win32\"" files = [ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, @@ -462,18 +399,6 @@ files = [ {file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"}, ] -[[package]] -name = "pathspec" -version = "0.12.1" -description = "Utility library for gitignore style pattern matching of file paths." -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"}, - {file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"}, -] - [[package]] name = "platformdirs" version = "4.3.6" @@ -599,21 +524,6 @@ pytest = ">=4.6" [package.extras] testing = ["fields", "hunter", "process-tests", "pytest-xdist", "six", "virtualenv"] -[[package]] -name = "pytokens" -version = "0.2.0" -description = "A Fast, spec compliant Python 3.13+ tokenizer that runs on older Pythons." -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "pytokens-0.2.0-py3-none-any.whl", hash = "sha256:74d4b318c67f4295c13782ddd9abcb7e297ec5630ad060eb90abf7ebbefe59f8"}, - {file = "pytokens-0.2.0.tar.gz", hash = "sha256:532d6421364e5869ea57a9523bf385f02586d4662acbcc0342afd69511b4dd43"}, -] - -[package.extras] -dev = ["black", "build", "mypy", "pytest", "pytest-cov", "setuptools", "tox", "twine", "wheel"] - [[package]] name = "pyyaml" version = "6.0.2" @@ -677,6 +587,34 @@ files = [ {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"}, ] +[[package]] +name = "ruff" +version = "0.7.4" +description = "An extremely fast Python linter and code formatter, written in Rust." +optional = false +python-versions = ">=3.7" +groups = ["dev"] +files = [ + {file = "ruff-0.7.4-py3-none-linux_armv6l.whl", hash = "sha256:a4919925e7684a3f18e18243cd6bea7cfb8e968a6eaa8437971f681b7ec51478"}, + {file = "ruff-0.7.4-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:cfb365c135b830778dda8c04fb7d4280ed0b984e1aec27f574445231e20d6c63"}, + {file = "ruff-0.7.4-py3-none-macosx_11_0_arm64.whl", hash = "sha256:63a569b36bc66fbadec5beaa539dd81e0527cb258b94e29e0531ce41bacc1f20"}, + {file = "ruff-0.7.4-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d06218747d361d06fd2fdac734e7fa92df36df93035db3dc2ad7aa9852cb109"}, + {file = "ruff-0.7.4-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e0cea28d0944f74ebc33e9f934238f15c758841f9f5edd180b5315c203293452"}, + {file = "ruff-0.7.4-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:80094ecd4793c68b2571b128f91754d60f692d64bc0d7272ec9197fdd09bf9ea"}, + {file = "ruff-0.7.4-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:997512325c6620d1c4c2b15db49ef59543ef9cd0f4aa8065ec2ae5103cedc7e7"}, + {file = "ruff-0.7.4-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:00b4cf3a6b5fad6d1a66e7574d78956bbd09abfd6c8a997798f01f5da3d46a05"}, + {file = "ruff-0.7.4-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7dbdc7d8274e1422722933d1edddfdc65b4336abf0b16dfcb9dedd6e6a517d06"}, + {file = "ruff-0.7.4-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e92dfb5f00eaedb1501b2f906ccabfd67b2355bdf117fea9719fc99ac2145bc"}, + {file = "ruff-0.7.4-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:3bd726099f277d735dc38900b6a8d6cf070f80828877941983a57bca1cd92172"}, + {file = "ruff-0.7.4-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:2e32829c429dd081ee5ba39aef436603e5b22335c3d3fff013cd585806a6486a"}, + {file = "ruff-0.7.4-py3-none-musllinux_1_2_i686.whl", hash = "sha256:662a63b4971807623f6f90c1fb664613f67cc182dc4d991471c23c541fee62dd"}, + {file = "ruff-0.7.4-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:876f5e09eaae3eb76814c1d3b68879891d6fde4824c015d48e7a7da4cf066a3a"}, + {file = "ruff-0.7.4-py3-none-win32.whl", hash = "sha256:75c53f54904be42dd52a548728a5b572344b50d9b2873d13a3f8c5e3b91f5cac"}, + {file = "ruff-0.7.4-py3-none-win_amd64.whl", hash = "sha256:745775c7b39f914238ed1f1b0bebed0b9155a17cd8bc0b08d3c87e4703b990d6"}, + {file = "ruff-0.7.4-py3-none-win_arm64.whl", hash = "sha256:11bff065102c3ae9d3ea4dc9ecdfe5a5171349cdd0787c1fc64761212fc9cf1f"}, + {file = "ruff-0.7.4.tar.gz", hash = "sha256:cd12e35031f5af6b9b93715d8c4f40360070b2041f81273d0527683d5708fce2"}, +] + [[package]] name = "tomli" version = "2.2.1" @@ -768,4 +706,4 @@ test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess [metadata] lock-version = "2.1" python-versions = ">=3.9" -content-hash = "d75e4b42bf3823dd7c7816183e16352a5d12b7f64639fc07a78398fb591a806c" +content-hash = "1dd216f557fa4d4b0cec6123f81c8e3b06b268ee4b0eb67bd0bb53dcdd9ba27c" diff --git a/pyproject.toml b/pyproject.toml index 1a6ce2bf..8051a9ac 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,12 +16,18 @@ documentation = "https://flowpipe.readthedocs.io/en/latest/" requires = ["poetry-core>=1.0.0", "poetry-dynamic-versioning>=1.0.0,<2.0.0"] build-backend = "poetry_dynamic_versioning.backend" -[tool.black] +[tool.ruff] line-length = 79 +target-version = "py39" +src = ["flowpipe", "tests"] -[tool.isort] -profile = "black" -skip = ["flowpipe/__init__.py"] +[tool.ruff.lint] +select = ["E", "F", "I"] +ignore = ["E501"] + +[tool.ruff.lint.isort] +known-first-party = ["flowpipe"] +lines-after-imports = 2 [tool.pylint."MESSAGES CONTROL"] disable = [ @@ -57,12 +63,11 @@ enable = true poetry-dynamic-versioning = { version = ">=1.0.0,<2.0.0", extras = ["plugin"] } [tool.poetry.group.dev.dependencies] -black = "^25.9.0" mock = "^5.1.0" numpy = "^1.26.2" pre-commit = "^3.5.0" pylint = "^3.0.1" pytest-cov = "^4.1.0" pytest = "^7.4.2" -isort = { version = "^5.12.0", extras = ["pyproject"] } mypy = "^1.6.1" +ruff = "^0.7.0" diff --git a/specs/001-ruff-formatting/checklists/requirements.md b/specs/001-ruff-formatting/checklists/requirements.md new file mode 100644 index 00000000..49dfe4d1 --- /dev/null +++ b/specs/001-ruff-formatting/checklists/requirements.md @@ -0,0 +1,34 @@ +# Specification Quality Checklist: Adopt Ruff Formatting Hooks + +**Purpose**: Validate specification completeness and quality before proceeding to planning +**Created**: 2025-11-14 +**Feature**: [spec.md](../spec.md) + +## Content Quality + +- [x] No implementation details (languages, frameworks, APIs) +- [x] Focused on user value and business needs +- [x] Written for non-technical stakeholders +- [x] All mandatory sections completed + +## Requirement Completeness + +- [x] No [NEEDS CLARIFICATION] markers remain +- [x] Requirements are testable and unambiguous +- [x] Success criteria are measurable +- [x] Success criteria are technology-agnostic (no implementation details) +- [x] All acceptance scenarios are defined +- [x] Edge cases are identified +- [x] Scope is clearly bounded +- [x] Dependencies and assumptions identified + +## Feature Readiness + +- [x] All functional requirements have clear acceptance criteria +- [x] User scenarios cover primary flows +- [x] Feature meets measurable outcomes defined in Success Criteria +- [x] No implementation details leak into specification + +## Notes + +- All checklist items validated on 2025-11-14; specification ready for `/speckit.clarify` or `/speckit.plan`. diff --git a/specs/001-ruff-formatting/contracts/tooling.yaml b/specs/001-ruff-formatting/contracts/tooling.yaml new file mode 100644 index 00000000..0cce0b5e --- /dev/null +++ b/specs/001-ruff-formatting/contracts/tooling.yaml @@ -0,0 +1,77 @@ +openapi: 3.0.0 +info: + title: Flowpipe Tooling Contract + version: 1.0.0 + description: > + Conceptual API describing how contributors and CI invoke Ruff through the standard + tooling stack. Although these actions are executed via CLI/pre-commit, modeling them as + API-like contracts clarifies required inputs/outputs. +servers: + - url: local + description: Developer workstation or CI runner +paths: + /formatting/dry-run: + post: + summary: Simulate Ruff formatting without committing changes + description: > + Runs `pre-commit run --all-files` with Ruff hooks configured to auto-fix issues but + report remaining violations. Used by developers prior to commit. + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + files: + type: array + items: + type: string + description: Subset of files to check; omitted for all files. + autofix: + type: boolean + default: true + description: Whether Ruff is allowed to apply fixes automatically. + responses: + "200": + description: Ruff completed successfully + content: + application/json: + schema: + type: object + properties: + reformatted: + type: array + items: + type: string + description: Files rewritten by Ruff format/import passes. + remaining_issues: + type: integer + description: Count of outstanding violations. + "422": + description: Invalid configuration or unsupported Ruff version + /formatting/apply: + post: + summary: Enforce Ruff results in CI + description: > + CI jobs call this conceptual endpoint by executing `pre-commit run --all-files` or the + equivalent tox stage. Any Ruff failure blocks the pipeline. + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + branch: + type: string + description: Git branch under validation. + python_version: + type: string + description: Interpreter version used for Ruff (>=3.8). + responses: + "204": + description: Formatting and lint checks passed with zero issues + "409": + description: Ruff reported violations; CI should fail the job +components: {} diff --git a/specs/001-ruff-formatting/data-model.md b/specs/001-ruff-formatting/data-model.md new file mode 100644 index 00000000..040871bd --- /dev/null +++ b/specs/001-ruff-formatting/data-model.md @@ -0,0 +1,45 @@ +# Data Model: Adopt Ruff Formatting Hooks + +## Entity: PreCommitHook +- **Purpose**: Represents each hook entry managed by `.pre-commit-config.yaml`. +- **Fields**: + - `id` (string): Unique hook identifier provided by Ruff (`ruff`, `ruff-format`). + - `repo` (string): Source repository (`https://github.com/astral-sh/ruff-pre-commit`). + - `rev` (string): Tagged release of Ruff hooks (kept in sync with PyPI releases). + - `args` (list[string]): Optional CLI flags (e.g., `--fix`). + - `stages` (list[string]): Git stages where the hook runs (default `pre-commit`). +- **Relationships**: Depends on `RuffConfig` to know which style settings to enforce. +- **Validation rules**: `id` must match available hook names; `rev` must be a valid tag; args may only + include supported Ruff flags to avoid custom scripting. + +## Entity: RuffConfig +- **Purpose**: Captures formatting and lint preferences stored in `pyproject.toml` (or `ruff.toml`). +- **Fields**: + - `line_length` (int, default 88): Aligns with historical Black default. + - `target_version` (enum, default `py37`): Ensures Ruff understands Flowpipe’s runtime floor. + - `select`/`ignore` (list[string]): Additional rule toggles; minimal values expected. + - `format` (object): Optional formatting toggles if defaults ever diverge. +- **Relationships**: Referenced by `PreCommitHook` entries and dev documentation; ensures CI and + contributors share the same style. +- **Validation rules**: Only declare fields when diverging from Ruff defaults; unknown fields fail CI. + +## Entity: DocumentationAsset +- **Purpose**: Contributor-facing instructions (README, `contributing.md`, script comments) that + describe how to run hooks and resolve failures. +- **Fields**: + - `path` (string): Location of the file. + - `audience` (enum): Maintainer, contributor, CI. + - `content_summary` (string): Brief explanation of what the doc teaches (install Ruff, run hooks). +- **Relationships**: Links to `PreCommitHook` for step-by-step instructions and to `RuffConfig` for + describing rule sources. +- **Validation rules**: Must reference Ruff as the single formatter/import tool and avoid outdated + references to Black or isort. + +## Entity: CIJobReference +- **Purpose**: Any automated workflow (e.g., GitHub Actions, scripts) that triggers formatting checks. +- **Fields**: + - `job_id` (string): Workflow or script identifier. + - `trigger` (string): Event (push, PR, manual) or command (pre-commit, tox). + - `commands` (list[string]): Steps run within the job, which must now point at Ruff. +- **Relationships**: Consumes `PreCommitHook` definitions to remain aligned with local checks. +- **Validation rules**: Commands cannot reference Black/isort; jobs must fail if Ruff reports issues. diff --git a/specs/001-ruff-formatting/plan.md b/specs/001-ruff-formatting/plan.md new file mode 100644 index 00000000..3d3918ef --- /dev/null +++ b/specs/001-ruff-formatting/plan.md @@ -0,0 +1,127 @@ +# Implementation Plan: Adopt Ruff Formatting Hooks + +**Branch**: `001-ruff-formatting` | **Date**: 2025-11-14 | **Spec**: [specs/001-ruff-formatting/spec.md](specs/001-ruff-formatting/spec.md) +**Input**: Feature specification from `/specs/001-ruff-formatting/spec.md` + +**Note**: This template is filled in by the `/speckit.plan` command. See `.specify/templates/commands/plan.md` for the execution workflow. + +## Summary + +Flowpipe must replace the current Black + isort pre-commit hooks with Ruff while keeping the +workflow lightweight and familiar. We will rely on Ruff’s formatter/import defaults wherever +possible, reuse the existing pre-commit integration, and limit changes to configuration files and +docs. CI/dev tooling must only reference Ruff once the migration lands. + +## Technical Context + + + +**Language/Version**: Python runtime support 3.7+ (library); contributors use Python 3.8+ to run Ruff +**Primary Dependencies**: pre-commit, Ruff (formatter/linter), pytest +**Storage**: N/A (tooling configuration only) +**Testing**: pytest suite + `pre-commit run --all-files` verification +**Target Platform**: Cross-platform developer environments + CI runners +**Project Type**: Single Python library (Flowpipe core + tests/docs) +**Performance Goals**: Formatting/lint phase completes <60s locally and in CI +**Constraints**: Use Ruff defaults where viable, reuse existing tooling, minimal custom scripts, +documented support for existing Python runtime versions, keep framework-only scope +**Scale/Scope**: Repository-wide formatting hooks, docs, and CI references + +## Constitution Check + +*GATE: Must pass before Phase 0 research. Re-check after Phase 1 design.* + +- **Framework-Only Scope**: Work limited to `.pre-commit-config.yaml`, `pyproject.toml`, and + docs; no runtime node code is touched. +- **Plain Python Simplicity**: Ruff replaces two tools, reducing dependencies. It is pure-Python + and runs wherever contributors already run pre-commit; no extra services required. +- **Portable Graph Serialization**: Formatting tooling does not change how graphs/nodes are + serialized; we will state that serialization remains unaffected. +- **Test-Driven Total Coverage**: No runtime code updates expected. If tooling scripts are touched, + we will add/adjust pytest coverage accordingly and ensure pre-commit hook tests remain. +- **Stable APIs & Dual-Python Support**: API surface untouched. Release notes and docs will note + the contributor workflow change; no SemVer bump required because runtime behavior stays + identical. +- **Engineering Constraints**: Update contributor docs, README badges/instructions, CI configs, + and release instructions to reflect Ruff usage. Use Ruff defaults wherever feasible and keep + automation inside existing tooling (pre-commit, tox, CI). + +**Status**: PASS (pre- and post-design). No violations identified; complexity tracking not required. + +## Project Structure + +### Documentation (this feature) + +```text +specs/[###-feature]/ +├── plan.md # This file (/speckit.plan command output) +├── research.md # Phase 0 output (/speckit.plan command) +├── data-model.md # Phase 1 output (/speckit.plan command) +├── quickstart.md # Phase 1 output (/speckit.plan command) +├── contracts/ # Phase 1 output (/speckit.plan command) +└── tasks.md # Phase 2 output (/speckit.tasks command - NOT created by /speckit.plan) +``` + +### Source Code (repository root) + + +```text +flowpipe/ +tests/ +docs/ +examples/ +.pre-commit-config.yaml +pyproject.toml +specs/001-ruff-formatting/ +├── spec.md +├── plan.md +├── research.md # (to be created) +├── data-model.md # (to be created) +├── quickstart.md # (to be created) +└── contracts/ # (to be created) +``` + +**Structure Decision**: Single Python library repo; updates focus on root-level tooling files plus +supporting docs/tests already present in Flowpipe. + +## Complexity Tracking + +No constitutional violations identified; tracking table not required for this feature. + +## Phase 0 – Research Plan + +1. Catalog existing Black/isort hook behavior to ensure Ruff parity (line length, import sections). +2. Evaluate official Ruff pre-commit hooks for formatter + lint support and confirm arguments + needed for auto-fix/import ordering. +3. Validate contributors’ tooling expectations (Python version, install steps) and document any + delta in `research.md`. + +*Deliverable*: `specs/001-ruff-formatting/research.md` summarizing decisions, rationales, and +rejected alternatives (completed). + +## Phase 1 – Design & Contracts Plan + +1. Model affected configuration/doc entities in `data-model.md`. +2. Describe contributor/CI interactions in `contracts/tooling.yaml` using a simple OpenAPI schema. +3. Produce a `quickstart.md` giving installation + verification steps for Ruff-only hooks. +4. Update agent context via `.specify/scripts/powershell/update-agent-context.ps1 -AgentType codex` + so future agents know tooling choices. +5. Re-run Constitution Check to confirm no violations introduced (done; status PASS). + +## Phase 2 – Upcoming Work (Planning Only) + +- Update `.pre-commit-config.yaml` to remove Black/isort hooks and add Ruff equivalents. +- Ensure `pyproject.toml` (or `ruff.toml`) declares only necessary overrides (target version, minimal + customizations). +- Refresh documentation (`README.md`, `contributing.md`, release notes) referencing Ruff. +- Adjust CI scripts/workflows to invoke the same `pre-commit` hooks, ensuring parity with local dev. +- Validate by running `pre-commit run --all-files` and pytest to guarantee no regressions. diff --git a/specs/001-ruff-formatting/quickstart.md b/specs/001-ruff-formatting/quickstart.md new file mode 100644 index 00000000..051d9623 --- /dev/null +++ b/specs/001-ruff-formatting/quickstart.md @@ -0,0 +1,38 @@ +# Quickstart: Adopt Ruff Formatting Hooks + +## Goal +Ensure every contributor and CI job runs Ruff for formatting/import organization through the +existing pre-commit workflow with minimal manual configuration. + +## Prerequisites +- Python 3.8+ available for tooling (Flowpipe runtime still supports 3.7+). +- `pip install pre-commit` once per workstation. +- Existing Flowpipe repository clone with `pre-commit` hooks installed. + +## Steps +1. **Update dependencies** + ```bash + pip install -U pre-commit ruff + ``` +2. **Install/refresh hooks** + ```bash + pre-commit install + pre-commit autoupdate + ``` +3. **Run Ruff locally** + ```bash + pre-commit run --all-files + ``` + - Hook output should show only `ruff` and `ruff-format`. + - Auto-fixes apply in place; review git diff before committing. +4. **Address failures** + - For lint errors that cannot auto-fix, follow Ruff’s diagnostic message. + - Re-run the same command until it exits with code `0`. +5. **CI verification** + - Push the branch; GitHub Actions will execute the same `pre-commit` hooks. + - Pipelines fail if Ruff reports issues, matching local behavior. + +## Rollout Notes +- Document changes in `contributing.md` and release notes. +- Delete stale Black/isort references in scripts, docs, or PR templates. +- Encourage contributors to enable Ruff extensions in their IDEs for real-time feedback. diff --git a/specs/001-ruff-formatting/research.md b/specs/001-ruff-formatting/research.md new file mode 100644 index 00000000..efdd6bda --- /dev/null +++ b/specs/001-ruff-formatting/research.md @@ -0,0 +1,36 @@ +# Research: Adopt Ruff Formatting Hooks + +## Task 1: Best practices for Ruff in pre-commit +- **Decision**: Use the official `astral-sh/ruff-pre-commit` hooks (`ruff` and `ruff-format`) with + default arguments, enabling `--fix` on the lint hook so imports get reordered automatically. +- **Rationale**: This mirrors Ruff’s documented setup, minimizes custom scripting, and allows + contributors to reuse existing `pre-commit` workflows without new commands. +- **Alternatives considered**: + - *Custom local hook commands*: rejected because they add maintenance overhead and do not + benefit from upstream hook updates. + - *Keeping Black for formatting*: rejected because the requirement explicitly migrates to Ruff and + Black would duplicate functionality. + +## Task 2: Configuration alignment with prior style +- **Decision**: Keep Ruff’s defaults (line length 88, quote rules, import sorting) except where an + existing Flowpipe rule conflicts; only set fields in `pyproject.toml` when parity requires it. +- **Rationale**: Flowpipe already followed Black/isort defaults, which align with Ruff’s defaults, + so keeping configuration minimal avoids churn and honors the “write as little custom code as + possible” directive. +- **Alternatives considered**: + - *Comprehensive custom config*: rejected for adding noise and diverging from standard Ruff + guidance. + - *Relying entirely on implicit defaults with no config file*: rejected because Flowpipe already + tracks tool metadata in `pyproject.toml`, and documenting the hook location helps future + contributors. + +## Task 3: CI and contributor workflow continuity +- **Decision**: Reuse all existing automation (pre-commit hooks, developer setup steps, CI jobs) + by swapping the referenced hooks to Ruff; no new scripts or workflows will be introduced. +- **Rationale**: Minimal change surface satisfies “re-use as many existing tools and integrations as + possible” and ensures compatibility with historical contributor instructions. +- **Alternatives considered**: + - *Adding separate Ruff-only CI jobs*: rejected as redundant with the current pre-commit driven + formatting checks. + - *Running Ruff via make/Invoke tasks*: rejected because it introduces custom tooling and drifts + from standard practice. diff --git a/specs/001-ruff-formatting/spec.md b/specs/001-ruff-formatting/spec.md new file mode 100644 index 00000000..05bb6ff6 --- /dev/null +++ b/specs/001-ruff-formatting/spec.md @@ -0,0 +1,136 @@ +# Feature Specification: Adopt Ruff Formatting Hooks + +**Feature Branch**: `001-ruff-formatting` +**Created**: 2025-11-14 +**Status**: Draft +**Input**: User description: "Right now we are using black and isort for formatting in the pre-commit hooks. This has to be changed to ruff" + +## User Scenarios & Testing *(mandatory)* + +### User Story 1 - Maintain formatting via Ruff (Priority: P1) + +As a Flowpipe maintainer, I need the pre-commit workflow to run Ruff so contributors +automatically apply the same formatting and import rules before opening pull requests. + +**Why this priority**: Consistent formatting prevents noisy diffs and ensures reviewers focus on +behavior rather than style regressions. + +**Independent Test**: Run `pre-commit run --all-files`; verify only Ruff executes formatting and lint +checks, and that it exits cleanly after enforcing the configured style. + +**Acceptance Scenarios**: + +1. **Given** a clean clone with pre-commit installed, **When** a developer commits Python changes, + **Then** Ruff reformats files and import ordering without invoking Black or isort. +2. **Given** a file that violates the Ruff rules, **When** `pre-commit run --all-files` executes, + **Then** the hook reports the exact issues and offers autofix instructions (or applies fixes when + configured). + +--- + +### User Story 2 - Document new workflow (Priority: P2) + +As a contributor, I need documentation that explains which formatter runs in hooks and how to +configure my environment so I can fix formatting without guessing tools. + +**Why this priority**: Clear docs reduce onboarding friction and avoid contributors running the old +Black/isort commands. + +**Independent Test**: Visit the contributing guide; confirm it references Ruff, links to installation +instructions, and explains how to run the hooks locally. + +**Acceptance Scenarios**: + +1. **Given** a new contributor reading `contributing.md`, **When** they follow the formatting + instructions, **Then** they install Ruff (directly or via pre-commit) and can reproduce the same + formatting locally as CI. + +--- + +### User Story 3 - Keep CI & historical compatibility (Priority: P3) + +As a maintainer, I need CI and the supported Python 3.7+ runtime to continue working with the +new tooling so we do not break existing pipelines. + +**Why this priority**: Tooling updates cannot disrupt release automation or introduce unsupported +dependencies that block older Python runtimes. + +**Independent Test**: Run the existing CI formatting/lint job (or equivalent local script) and confirm +it references Ruff hooks/configs while maintaining the documented Python support matrix. + +**Acceptance Scenarios**: + +1. **Given** the CI pipeline or local `pre-commit run --all-files`, **When** it executes on Python 3.8+, + **Then** only Ruff provides lint/format checks and the job passes with the same success/failure + criteria as before. + +--- + +### Edge Cases + +- Developers with stale `.pre-commit` environments must receive clear upgrade instructions when + Ruff replaces prior hooks. +- Contributors editing code via automated tools (e.g., IDEs) still need guidance on invoking Ruff to + avoid style drift. +- Projects pinned to the minimum supported runtime (Python 3.7) must not be forced to install + unsupported Ruff versions; document required interpreter versions clearly. + +## Requirements *(mandatory)* + +### Functional Requirements + +- **FR-001**: Replace Black and isort hooks in `.pre-commit-config.yaml` with Ruff so formatting, + linting, and import ordering run through a single tool. +- **FR-002**: Configure Ruff (e.g., `pyproject.toml`) to match the currently enforced style (line + length, quote preference, import sections) to avoid mass churn. +- **FR-003**: Update developer documentation (`README.md`, `contributing.md`, and any script + comments) to reference Ruff commands instead of Black/isort. +- **FR-004**: Ensure CI or local validation scripts that previously invoked Black/isort now call Ruff + to keep automated enforcement aligned. +- **FR-005**: Provide migration notes in release documentation describing the switch so downstream + users know which formatter to run when contributing patches. + +### Key Entities *(include if feature involves data)* + +- **Pre-commit Hook Definition**: Entries in `.pre-commit-config.yaml` that specify which tooling runs + before commits; must reference Ruff repos and hook IDs. +- **Ruff Configuration**: Settings stored in `pyproject.toml` (or `ruff.toml`) defining formatting, + linting, and import ordering standards enforced across Flowpipe. + +## Assumptions + +- Ruff will serve as both formatter and import organizer, eliminating the need for separate Black or + isort hooks. +- Contributors use Python 3.8+ (or compatible) to install Ruff while Flowpipe’s runtime support for + Python 3.7+ remains unchanged. +- No additional style rules are introduced beyond those already enforced by Black/isort unless + explicitly documented. + +## Success Criteria *(mandatory)* + +### Measurable Outcomes + +- **SC-001**: The documented formatting workflow completes in under 60 seconds using a single + formatter/import tool, with no additional manual steps required from contributors. +- **SC-002**: 100% of newly merged pull requests show zero Black or isort-related diffs because only + Ruff formatting changes appear in commits. +- **SC-003**: Contributor documentation references Ruff exclusively, and at least 90% of new + contributors (measured via PR checklist or templates) confirm they followed the updated steps. +- **SC-004**: CI jobs enforcing formatting succeed on the first attempt in ≥95% of runs after the switch, + demonstrating the configuration is stable. + +## Constitution Alignment Checklist *(must be explicit)* + +- **Framework-Only Scope**: The change only touches tooling configuration and documentation; the + Flowpipe runtime remains a pure framework with no bundled nodes. +- **Plain Python Simplicity**: Ruff (pure-Python) replaces two separate tools, reducing dependency + overhead. Support for the Python 3.7+ runtime remains unchanged, while contributors install + compatible Ruff versions on modern interpreters. +- **Portable Graph Serialization**: Formatting changes do not alter graph/node serialization; ensure + docs mention that serialization logic stays unaffected. +- **Test-Driven Total Coverage**: Pre-commit hooks supplement, not replace, pytest coverage; any new + hook definitions must still allow tests to run and maintain 100% coverage expectations. +- **Stable APIs & Dual-Python Support**: No public API surface changes; release notes highlight the + tooling update so downstream packagers know about the new contribution workflow. +- **Engineering Constraints**: Update `.pre-commit-config.yaml`, `pyproject.toml`, and contributor + docs so formatting guidance, hooks, and release steps remain synchronized. diff --git a/specs/001-ruff-formatting/tasks.md b/specs/001-ruff-formatting/tasks.md new file mode 100644 index 00000000..d4256175 --- /dev/null +++ b/specs/001-ruff-formatting/tasks.md @@ -0,0 +1,104 @@ +--- +description: "Task list for adopting Ruff as the sole formatting/import tool" +--- + +# Tasks: Adopt Ruff Formatting Hooks + +**Input**: Design artifacts from `/specs/001-ruff-formatting/` +**Prerequisites**: `plan.md`, `spec.md`, `research.md`, `data-model.md`, `contracts/`, `quickstart.md` +**Tests**: Not explicitly requested; verification relies on `pre-commit run --all-files` and existing pytest suites. +**Organization**: Tasks are grouped by user story (US1–US3) so each slice remains independently testable. + +## Format: `[ID] [P?] [Story] Description` + +- `[P]` indicates the task can run in parallel (different files, no blocking dependencies). +- `[US#]` labels tie work to the user stories defined in the specification. +- All descriptions include the precise file or directory to touch. + +## Phase 1: Setup (Shared Infrastructure) + +**Purpose**: Establish the baseline dependency set required before modifying hooks or documentation. + +- [x] T001 Update `pyproject.toml` and `poetry.lock` to remove `black`/`isort` dev dependencies and add `ruff` under `[tool.poetry.group.dev.dependencies]`. + +--- + +## Phase 2: Foundational (Blocking Prerequisites) + +**Purpose**: Centralize formatter configuration inside Ruff before story-specific implementation begins. + +- [x] T002 Define the `[tool.ruff]` configuration (line length, target version, minimal rule toggles) and delete `[tool.black]`/`[tool.isort]` sections inside `pyproject.toml`. + +--- + +## Phase 3: User Story 1 – Maintain formatting via Ruff (Priority: P1) 🎯 MVP + +**Goal**: Contributors run Ruff automatically through pre-commit, ensuring only Ruff enforces formatting/import ordering. +**Independent Test**: After tasks complete, running `pre-commit run --all-files` must display only `ruff`/`ruff-format` hooks and exit cleanly once issues are resolved. + +### Implementation + +- [x] T003 [US1] Replace the Black/isort repos in `.pre-commit-config.yaml` with the official `astral-sh/ruff-pre-commit` hooks (`ruff` with `--fix` and `ruff-format`) so only Ruff enforces style before commits. + +- [x] T004 [P] [US1] Refresh `.flake8` comments/ignores to reference Ruff’s formatting expectations (e.g., keep `E203/W503` rationale) so no references to Black/isort remain in lint configuration. + +**Checkpoint**: User Story 1 complete when `pre-commit run --all-files` reforms code using Ruff only, and editor/on-save tooling matches those rules. + +--- + +## Phase 4: User Story 2 – Document new workflow (Priority: P2) + +**Goal**: Contributors understand Ruff is the sole formatter/import organizer and know how to run it locally. +**Independent Test**: A new contributor following the updated docs can install Ruff (via pre-commit) and reproduce CI formatting locally without confusion. + +### Implementation + +- [x] T005 [P] [US2] Update `README.md` badges/instructions to highlight Ruff (swap the Black badge, mention Ruff-driven formatting commands). +- [x] T006 [P] [US2] Rewrite the formatting guidance in `contributing.md` to describe installing Ruff via pre-commit, running `pre-commit run --all-files`, and removing all Black/isort references. +- [x] T007 [P] [US2] Author `docs/ruff-formatting.md` (and link it from `docs/index.rst`) detailing installation, troubleshooting stale hooks, and explaining how other tools can inspect graph data using Ruff-formatted code. + +**Checkpoint**: User Story 2 complete when all contributor-facing docs consistently reference Ruff commands and onboarding steps. + +--- + +## Phase 5: User Story 3 – Keep CI & historical compatibility (Priority: P3) + +**Goal**: CI enforces Ruff just like local hooks without breaking existing workflows or Python support. +**Independent Test**: GitHub Actions (or equivalent CI) run `pre-commit` Ruff hooks on Python 3.8+ and fail the build if Ruff reports issues. + +### Implementation + +- [x] T008 [US3] Add `.github/workflows/pre-commit.yml` that leverages `pre-commit/action@v3` to run the Ruff hooks on every push and pull request. +- [x] T009 [P] [US3] Insert a `poetry run pre-commit run --all-files --hook-stage manual ruff ruff-format` step near the start of `.github/workflows/pytest.yml` so test pipelines verify Ruff compliance using the existing tooling stack. + +**Checkpoint**: User Story 3 complete when CI pipelines fail on Ruff violations and still run on the documented Python versions without extra custom scripts. + +--- + +## Phase N: Polish & Cross-Cutting Concerns + +- [x] T010 Create `docs/release-notes.md` (and reference it from `README.md` or docs navigation) summarizing the Ruff migration and providing upgrade guidance for downstream consumers. + +--- + +## Dependencies & Execution Order + +- **Phase sequencing**: Complete Setup (T001) → Foundational (T002) → US1 → US2 → US3 → Polish. +- **User story dependencies**: + - US1 (P1) depends on T001–T002. + - US2 (P2) depends on US1, since documentation must describe the finalized tooling. + - US3 (P3) depends on US1 (CI must run the new hooks) but can proceed in parallel with US2 once Ruff is configured. +- **Cross-cutting**: Polish tasks run last to capture release/migration guidance once all stories stabilize. + +## Parallel Opportunities + +- Within **US1**, T004 and T005 modify independent files and can run in parallel after T003. +- Within **US2**, tasks T006–T008 touch different docs and can be parallelized to speed authoring. +- In **US3**, T009 and T010 affect different workflow files; they can be developed concurrently once T003 completes. + +## Implementation Strategy + +1. **MVP (US1 only)**: Execute T001–T005 to switch local tooling to Ruff; verify `pre-commit run --all-files` succeeds. +2. **Incremental Delivery**: Layer US2 documentation updates (T006–T008), ensuring contributors understand the new workflow. +3. **Full Enforcement**: Finish with US3 CI tasks (T009–T010) so pipelines gate on Ruff. +4. **Polish**: Publish migration notes (T011) and update contributor templates (T012), then run the full verification commands described in the plan. diff --git a/tests/test_multiprocessing.py b/tests/test_multiprocessing.py index 46416b21..b991c392 100644 --- a/tests/test_multiprocessing.py +++ b/tests/test_multiprocessing.py @@ -3,6 +3,7 @@ from flowpipe.graph import Graph from flowpipe.node import Node + # A value lower than 1 does not make a difference since starting the different # processes eats up time SLEEP_TIME = 3 diff --git a/tests/test_plugs.py b/tests/test_plugs.py index ce727da5..b46ee633 100644 --- a/tests/test_plugs.py +++ b/tests/test_plugs.py @@ -326,11 +326,11 @@ def A(compound_in): with pytest.raises(TypeError): node.outputs["compound_out"][0].value = 0 - node.inputs["compound_in"][u"unicode"].value = "unicode" - node.outputs["compound_out"][u"unicode"].value = "unicode" + node.inputs["compound_in"]["unicode"].value = "unicode" + node.outputs["compound_out"]["unicode"].value = "unicode" - assert node.inputs["compound_in"][u"unicode"].value == "unicode" - assert node.outputs["compound_out"][u"unicode"].value == "unicode" + assert node.inputs["compound_in"]["unicode"].value == "unicode" + assert node.outputs["compound_out"]["unicode"].value == "unicode" def test_compound_input_plugs_are_accessible_by_index(clear_default_graph):