From fed4309320c72614f43ae4c903713f84c23f365a Mon Sep 17 00:00:00 2001 From: Jeck0v Date: Sun, 12 Oct 2025 17:14:14 +0200 Subject: [PATCH 1/2] :construction: Add Support Label + overlay + replicas from docker swarm --- docker-compose.yml | 281 ++---------- docs/FEATURES.md | 190 +++++++- examples/swarm-production.ath | 133 ++++++ presentation-swarm.yml | 341 +++++++++++++++ presentation.ath | 32 +- src/athena/dockerfile.rs | 3 +- src/athena/error.rs | 1 + src/athena/generator/compose.rs | 65 ++- src/athena/generator/defaults.rs | 90 +++- src/athena/parser/ast.rs | 110 ++++- src/athena/parser/grammar.pest | 42 +- src/athena/parser/mod.rs | 1 + src/athena/parser/optimized_parser.rs | 2 +- src/athena/parser/parser.rs | 191 ++++++-- tests/integration/mod.rs | 5 +- tests/integration/swarm_features_test.rs | 530 +++++++++++++++++++++++ 16 files changed, 1692 insertions(+), 325 deletions(-) create mode 100644 examples/swarm-production.ath create mode 100644 presentation-swarm.yml create mode 100644 tests/integration/swarm_features_test.rs diff --git a/docker-compose.yml b/docker-compose.yml index a4f8076..07ed4ac 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,277 +1,56 @@ -# Generated by Athena v0.1.0 from MODERN_ECOMMERCE deployment +# Generated by Athena v0.1.0 from test_no_conflicts deployment # Developed by UNFAIR Team: https://github.com/Jeck0v/Athena -# Project Version: 2.0.0 -# Generated: 2025-10-05 20:42:54 UTC +# Generated: 2025-10-12 15:10:37 UTC # Features: Intelligent defaults, optimized networking, enhanced health checks -# Services: 7 configured with intelligent defaults +# Services: 3 configured with intelligent defaults services: - monitoring: - image: prom/prometheus:latest - container_name: modern-ecommerce-monitoring + app2: + image: httpd:alpine + container_name: test-no-conflicts-app2 ports: - - 9090:9090 - volumes: - - ./prometheus/prometheus.yml:/etc/prometheus/prometheus.yml:ro + - 8081:8000 restart: unless-stopped - deploy: - resources: - limits: - cpus: '0.3' - memory: 512M - restart_policy: - condition: on-failure - delay: 5s - max_attempts: 3 - window: 120s networks: - - ecommerce_network + - test_no_conflicts_network pull_policy: missing labels: - athena.service: monitoring - athena.project: MODERN_ECOMMERCE + athena.project: test_no_conflicts + athena.service: app2 + athena.generated: 2025-10-12 athena.type: generic - athena.generated: 2025-10-05 - cache: - image: redis:7-alpine - container_name: modern-ecommerce-cache - volumes: - - redis_data:/data - healthcheck: - test: - - CMD-SHELL - - redis-cli ping || exit 1 - interval: 15s - timeout: 3s - retries: 3 - start_period: 20s - restart: always - deploy: - resources: - limits: - cpus: '0.3' - memory: 512M - restart_policy: - condition: on-failure - delay: 5s - max_attempts: 3 - window: 120s - networks: - - ecommerce_network - pull_policy: missing - labels: - athena.generated: 2025-10-05 - athena.type: cache - athena.project: MODERN_ECOMMERCE - athena.service: cache - - product_service: - build: - context: . - dockerfile: Dockerfile - args: - SEARCH_ENGINE: elasticsearch - CATALOG_VERSION: v1.5 - container_name: modern-ecommerce-product_service - environment: - - DATABASE_URL=${DATABASE_URL} - depends_on: - - database - healthcheck: - test: - - CMD-SHELL - - curl -f http://localhost:3000/health || exit 1 - interval: 30s - timeout: 10s - retries: 3 - start_period: 40s - restart: unless-stopped - deploy: - resources: - limits: - cpus: '0.3' - memory: 256M - restart_policy: - condition: on-failure - delay: 5s - max_attempts: 3 - window: 120s - networks: - - ecommerce_network - pull_policy: missing - labels: - athena.type: generic - athena.project: MODERN_ECOMMERCE - athena.service: product_service - athena.generated: 2025-10-05 - - nginx_reverse_proxy: + app1: image: nginx:alpine - container_name: modern-ecommerce-nginx_reverse_proxy + container_name: test-no-conflicts-app1 ports: - - 80:80 - - 443:443 - volumes: - - ./nginx/conf.d:/etc/nginx/conf.d:ro - depends_on: - - api_gateway - healthcheck: - test: - - CMD-SHELL - - curl -f http://localhost:80/health || exit 1 - interval: 20s - timeout: 5s - retries: 3 - start_period: 30s + - 8080:80 restart: always - deploy: - resources: - limits: - cpus: '0.2' - memory: 256M - restart_policy: - condition: on-failure - delay: 5s - max_attempts: 3 - window: 120s networks: - - ecommerce_network + - test_no_conflicts_network pull_policy: missing labels: - athena.project: MODERN_ECOMMERCE - athena.service: nginx_reverse_proxy - athena.generated: 2025-10-05 + athena.service: app1 + athena.project: test_no_conflicts athena.type: proxy + athena.generated: 2025-10-12 - auth_service: - build: - context: . - dockerfile: Dockerfile - args: - AUTH_PROVIDER: oauth2 - SESSION_TIMEOUT: 1h - container_name: modern-ecommerce-auth_service - environment: - - JWT_SECRET=${JWT_SECRET} - - DATABASE_URL=${DATABASE_URL} - depends_on: - - database - healthcheck: - test: - - CMD-SHELL - - curl -f http://localhost:3000/health || exit 1 - interval: 30s - timeout: 10s - retries: 3 - start_period: 40s - restart: unless-stopped - deploy: - resources: - limits: - cpus: '0.2' - memory: 256M - restart_policy: - condition: on-failure - delay: 5s - max_attempts: 3 - window: 120s - networks: - - ecommerce_network - pull_policy: missing - labels: - athena.service: auth_service - athena.generated: 2025-10-05 - athena.project: MODERN_ECOMMERCE - athena.type: generic - - api_gateway: - build: - context: . - dockerfile: Dockerfile - args: - JWT_EXPIRY: 24h - NODE_ENV: production - API_VERSION: v2.1 - container_name: modern-ecommerce-api_gateway - environment: - - JWT_SECRET=${JWT_SECRET} - - DATABASE_URL=${DATABASE_URL} - - REDIS_URL=${REDIS_URL} - depends_on: - - database - - cache - healthcheck: - test: - - CMD-SHELL - - curl -f http://localhost:3000/health || exit 1 - interval: 30s - timeout: 10s - retries: 3 - start_period: 40s - restart: unless-stopped - deploy: - resources: - limits: - cpus: '0.5' - memory: 512M - restart_policy: - condition: on-failure - delay: 5s - max_attempts: 3 - window: 120s - networks: - - ecommerce_network - pull_policy: missing - labels: - athena.service: api_gateway - athena.generated: 2025-10-05 - athena.project: MODERN_ECOMMERCE - athena.type: generic - - database: - image: postgres:15 - container_name: modern-ecommerce-database - environment: - - POSTGRES_USER=${POSTGRES_USER} - - POSTGRES_PASSWORD=${POSTGRES_PASSWORD} - - POSTGRES_DB=${POSTGRES_DB} - volumes: - - postgres_data:/var/lib/postgresql/data - healthcheck: - test: - - CMD-SHELL - - pg_isready -U ${POSTGRES_USER} || exit 1 - interval: 10s - timeout: 5s - retries: 5 - start_period: 60s + app3: + image: apache:latest + container_name: test-no-conflicts-app3 + ports: + - 9000:80 restart: always - deploy: - resources: - limits: - cpus: '1.0' - memory: 1024M - restart_policy: - condition: on-failure - delay: 5s - max_attempts: 3 - window: 120s networks: - - ecommerce_network + - test_no_conflicts_network pull_policy: missing labels: - athena.project: MODERN_ECOMMERCE - athena.generated: 2025-10-05 - athena.service: database - athena.type: database + athena.project: test_no_conflicts + athena.service: app3 + athena.type: proxy + athena.generated: 2025-10-12 networks: - ecommerce_network: + test_no_conflicts_network: driver: bridge -volumes: - redis_data: - driver: local - postgres_data: - driver: local -name: MODERN_ECOMMERCE \ No newline at end of file +name: test_no_conflicts \ No newline at end of file diff --git a/docs/FEATURES.md b/docs/FEATURES.md index 5a00ac1..b995eb7 100644 --- a/docs/FEATURES.md +++ b/docs/FEATURES.md @@ -468,6 +468,194 @@ athena build examples/event-driven.ath athena validate examples/fullstack-web.ath ``` +## Docker Swarm Support (**NEW** 12/10/2025) + +Athena now provides comprehensive Docker Swarm support with native DSL directives for production-ready cluster deployments. + +### Replica Management + +**Control service scaling with intelligent replica management:** + +```athena +SERVICE api_gateway +IMAGE-ID python:3.11-slim +REPLICAS 3 # Scale to 3 instances +UPDATE-CONFIG PARALLELISM 1 DELAY 10s FAILURE-ACTION ROLLBACK +END SERVICE +``` + +**Generated Swarm Configuration:** +```yaml +api_gateway: + image: python:3.11-slim + deploy: + replicas: 3 + update_config: + parallelism: 1 # Update one replica at a time + delay: 10s # Wait 10s between updates + failure_action: rollback # Rollback on failure +``` + +### Overlay Network Support + +**Production-ready overlay networks for multi-host communication:** + +```athena +ENVIRONMENT SECTION +NETWORK-NAME swarm_overlay DRIVER OVERLAY ATTACHABLE TRUE ENCRYPTED TRUE +``` + +**Generated Network Configuration:** +```yaml +networks: + swarm_overlay: + driver: overlay # Multi-host networking + attachable: true # Allow container attachment + encrypted: true # Encrypt network traffic +``` + +### Update Configuration Options + +**Comprehensive update control for zero-downtime deployments:** + +| Directive | Description | Example | +|-----------|-------------|---------| +| `PARALLELISM` | Replicas updated simultaneously | `PARALLELISM 2` | +| `DELAY` | Pause between update batches | `DELAY 30s` | +| `FAILURE-ACTION` | Action on update failure | `FAILURE-ACTION ROLLBACK` | +| `MONITOR` | Duration to monitor for failures | `MONITOR 60s` | +| `MAX-FAILURE-RATIO` | Maximum allowed failure ratio | `MAX-FAILURE-RATIO 0.3` | + +```athena +SERVICE microservice +IMAGE-ID node:18-alpine +REPLICAS 5 +UPDATE-CONFIG PARALLELISM 2 DELAY 15s FAILURE-ACTION PAUSE MONITOR 30s MAX-FAILURE-RATIO 0.2 +END SERVICE +``` + +### Swarm-Specific Labels + +**Enhanced labeling for service discovery and management:** + +```athena +SERVICE web_frontend +IMAGE-ID nginx:alpine +REPLICAS 2 +SWARM-LABELS environment="production" tier="frontend" version="v2.1" +END SERVICE +``` + +**Generated Labels:** +```yaml +web_frontend: + deploy: + replicas: 2 + labels: + environment: production + tier: frontend + version: v2.1 +``` + +### Complete Swarm Stack Example + +**Production-ready microservices with Swarm orchestration:** + +```athena +DEPLOYMENT-ID MICROSERVICES_SWARM +VERSION-ID 2.0.0 + +ENVIRONMENT SECTION +NETWORK-NAME overlay_network DRIVER OVERLAY ATTACHABLE TRUE ENCRYPTED TRUE + +SERVICES SECTION + +SERVICE api_gateway +BUILD-ARGS NODE_ENV="production" API_VERSION="v2.0" +REPLICAS 3 +UPDATE-CONFIG PARALLELISM 1 DELAY 10s FAILURE-ACTION ROLLBACK +SWARM-LABELS tier="api" environment="production" +DEPENDS-ON user_service +DEPENDS-ON order_service +END SERVICE + +SERVICE user_service +IMAGE-ID python:3.11-slim +REPLICAS 2 +UPDATE-CONFIG PARALLELISM 1 DELAY 15s +SWARM-LABELS tier="backend" service="users" +DEPENDS-ON database +END SERVICE + +SERVICE order_service +IMAGE-ID java:17-jdk-slim +REPLICAS 3 +UPDATE-CONFIG PARALLELISM 2 DELAY 20s FAILURE-ACTION PAUSE +SWARM-LABELS tier="backend" service="orders" +DEPENDS-ON database +END SERVICE + +SERVICE database +IMAGE-ID postgres:15 +REPLICAS 1 +SWARM-LABELS tier="data" critical="true" +RESOURCE-LIMITS CPU "2.0" MEMORY "2048M" +END SERVICE +``` + +### Deployment Commands + +**Deploy your Swarm stack with intelligent configurations:** + +```bash +# Generate Swarm-compatible compose file +athena build microservices.ath -o swarm-stack.yml + +# Deploy to Docker Swarm cluster +docker stack deploy -c swarm-stack.yml myapp + +# Scale services dynamically +docker service scale myapp_api_gateway=5 + +# Monitor service status +docker service ls +docker service ps myapp_api_gateway +``` + +### Mixed Mode Support + +**Seamlessly combine Docker Compose and Swarm features:** + +```athena +SERVICE development_service +IMAGE-ID alpine:latest +PORT-MAPPING 8080 TO 80 # Compose-style port mapping +END SERVICE + +SERVICE production_service +IMAGE-ID nginx:alpine +REPLICAS 3 # Swarm-specific scaling +UPDATE-CONFIG PARALLELISM 1 DELAY 10s +SWARM-LABELS tier="production" +END SERVICE +``` + +### Network Driver Options + +| Driver | Use Case | Generated Config | +|--------|----------|-----------------| +| `BRIDGE` | Single-host development | `driver: bridge` | +| `OVERLAY` | Multi-host production | `driver: overlay` | +| `HOST` | Direct host networking | `driver: host` | + +### Failure Actions + +| Action | Behavior | When to Use | +|--------|----------|-------------| +| `CONTINUE` | Continue despite failures | Non-critical updates | +| `PAUSE` | Stop updates on failure | Manual intervention needed | +| `ROLLBACK` | Revert to previous version | Automatic recovery | + ## Future Enhancements ### Planned Features @@ -478,7 +666,7 @@ athena validate examples/fullstack-web.ath - Log aggregation configuration **Security Enhancements:** -- Secret management integration +- Docker secrets integration - Security scanning in build process - Non-root user defaults diff --git a/examples/swarm-production.ath b/examples/swarm-production.ath new file mode 100644 index 0000000..b219b19 --- /dev/null +++ b/examples/swarm-production.ath @@ -0,0 +1,133 @@ +// Production-Ready Docker Swarm Stack - Complete Example +// Demonstrates all new Swarm features with best practices + +DEPLOYMENT-ID PRODUCTION_SWARM +VERSION-ID 3.0.0 + +ENVIRONMENT SECTION +// Production overlay network with security +NETWORK-NAME production_overlay DRIVER OVERLAY ATTACHABLE TRUE ENCRYPTED TRUE + +SERVICES SECTION + +// Load Balancer / Reverse Proxy - High Availability +SERVICE nginx_proxy +IMAGE-ID nginx:alpine +PORT-MAPPING 80 TO 80 +PORT-MAPPING 443 TO 443 +VOLUME-MAPPING "./nginx/conf.d" TO "/etc/nginx/conf.d" (ro) +REPLICAS 2 +UPDATE-CONFIG PARALLELISM 1 DELAY 30s FAILURE-ACTION ROLLBACK MONITOR 60s +SWARM-LABELS tier="proxy" environment="production" critical="true" +DEPENDS-ON api_gateway +RESOURCE-LIMITS CPU "0.5" MEMORY "512M" +HEALTH-CHECK "curl -f http://localhost/health || exit 1" +END SERVICE + +// API Gateway - Scalable Entry Point +SERVICE api_gateway +BUILD-ARGS NODE_ENV="production" API_VERSION="v3.0" RATE_LIMIT="1000" +PORT-MAPPING 3000 TO 3000 +ENV-VARIABLE {{JWT_SECRET}} +ENV-VARIABLE {{API_RATE_LIMIT}} +REPLICAS 5 +UPDATE-CONFIG PARALLELISM 2 DELAY 15s FAILURE-ACTION PAUSE MAX-FAILURE-RATIO 0.1 +SWARM-LABELS tier="api" environment="production" scaling="auto" +DEPENDS-ON user_service +DEPENDS-ON order_service +DEPENDS-ON notification_service +RESOURCE-LIMITS CPU "1.0" MEMORY "1024M" +HEALTH-CHECK "curl -f http://localhost:3000/health || exit 1" +END SERVICE + +// User Management Service - Backend Microservice +SERVICE user_service +BUILD-ARGS PYTHON_VERSION="3.11" DJANGO_VERSION="4.2" +ENV-VARIABLE {{DATABASE_URL}} +ENV-VARIABLE {{REDIS_URL}} +REPLICAS 3 +UPDATE-CONFIG PARALLELISM 1 DELAY 20s FAILURE-ACTION ROLLBACK +SWARM-LABELS tier="backend" service="users" environment="production" +DEPENDS-ON postgres_primary +DEPENDS-ON redis_cache +RESOURCE-LIMITS CPU "0.8" MEMORY "768M" +HEALTH-CHECK "python manage.py health_check" +END SERVICE + +// Order Processing Service - Critical Business Logic +SERVICE order_service +BUILD-ARGS JAVA_VERSION="17" SPRING_VERSION="3.0" +ENV-VARIABLE {{DATABASE_URL}} +ENV-VARIABLE {{PAYMENT_API_KEY}} +REPLICAS 4 +UPDATE-CONFIG PARALLELISM 2 DELAY 25s FAILURE-ACTION ROLLBACK MONITOR 120s MAX-FAILURE-RATIO 0.2 +SWARM-LABELS tier="backend" service="orders" environment="production" critical="true" +DEPENDS-ON postgres_primary +DEPENDS-ON rabbitmq +RESOURCE-LIMITS CPU "1.2" MEMORY "1536M" +HEALTH-CHECK "curl -f http://localhost:8080/actuator/health || exit 1" +END SERVICE + +// Notification Service - Event-Driven Communication +SERVICE notification_service +BUILD-ARGS NODE_VERSION="18" REDIS_VERSION="7" +ENV-VARIABLE {{SMTP_CONFIG}} +ENV-VARIABLE {{REDIS_URL}} +REPLICAS 2 +UPDATE-CONFIG PARALLELISM 1 DELAY 10s FAILURE-ACTION CONTINUE +SWARM-LABELS tier="backend" service="notifications" environment="production" +DEPENDS-ON redis_cache +DEPENDS-ON rabbitmq +RESOURCE-LIMITS CPU "0.5" MEMORY "512M" +HEALTH-CHECK "node health_check.js" +END SERVICE + +// Primary Database - Single Master with Replication +SERVICE postgres_primary +IMAGE-ID postgres:15-alpine +ENV-VARIABLE {{POSTGRES_USER}} +ENV-VARIABLE {{POSTGRES_PASSWORD}} +ENV-VARIABLE {{POSTGRES_DB}} +VOLUME-MAPPING "postgres_data" TO "/var/lib/postgresql/data" +REPLICAS 1 +UPDATE-CONFIG PARALLELISM 1 DELAY 60s FAILURE-ACTION PAUSE +SWARM-LABELS tier="data" role="primary" environment="production" critical="true" +RESOURCE-LIMITS CPU "2.0" MEMORY "4096M" +HEALTH-CHECK "pg_isready -U ${POSTGRES_USER} -d ${POSTGRES_DB}" +END SERVICE + +// Redis Cache - Memory Store +SERVICE redis_cache +IMAGE-ID redis:7-alpine +VOLUME-MAPPING "redis_data" TO "/data" +REPLICAS 1 +UPDATE-CONFIG PARALLELISM 1 DELAY 30s FAILURE-ACTION ROLLBACK +SWARM-LABELS tier="cache" environment="production" +RESOURCE-LIMITS CPU "1.0" MEMORY "2048M" +HEALTH-CHECK "redis-cli ping" +END SERVICE + +// Message Queue - Event Processing +SERVICE rabbitmq +IMAGE-ID rabbitmq:3.12-management-alpine +ENV-VARIABLE {{RABBITMQ_DEFAULT_USER}} +ENV-VARIABLE {{RABBITMQ_DEFAULT_PASS}} +VOLUME-MAPPING "rabbitmq_data" TO "/var/lib/rabbitmq" +REPLICAS 1 +UPDATE-CONFIG PARALLELISM 1 DELAY 45s FAILURE-ACTION PAUSE +SWARM-LABELS tier="messaging" environment="production" +RESOURCE-LIMITS CPU "0.8" MEMORY "1024M" +HEALTH-CHECK "rabbitmq-diagnostics ping" +END SERVICE + +// Monitoring Stack - Prometheus +SERVICE prometheus +IMAGE-ID prom/prometheus:latest +PORT-MAPPING 9090 TO 9090 +VOLUME-MAPPING "./prometheus/prometheus.yml" TO "/etc/prometheus/prometheus.yml" (ro) +REPLICAS 2 +UPDATE-CONFIG PARALLELISM 1 DELAY 30s +SWARM-LABELS tier="monitoring" environment="production" +RESOURCE-LIMITS CPU "1.0" MEMORY "2048M" +HEALTH-CHECK "curl -f http://localhost:9090/-/healthy || exit 1" +END SERVICE \ No newline at end of file diff --git a/presentation-swarm.yml b/presentation-swarm.yml new file mode 100644 index 0000000..7dd3f3f --- /dev/null +++ b/presentation-swarm.yml @@ -0,0 +1,341 @@ +# Generated by Athena v0.1.0 from MODERN_ECOMMERCE deployment +# Developed by UNFAIR Team: https://github.com/Jeck0v/Athena +# Project Version: 2.0.0 +# Generated: 2025-10-12 15:11:05 UTC +# Features: Intelligent defaults, optimized networking, enhanced health checks + +# Services: 7 configured with intelligent defaults + +services: + cache: + image: redis:7-alpine + container_name: modern-ecommerce-cache + volumes: + - redis_data:/data + healthcheck: + test: + - CMD-SHELL + - redis-cli ping || exit 1 + interval: 15s + timeout: 3s + retries: 3 + start_period: 20s + restart: always + deploy: + resources: + limits: + cpus: '0.3' + memory: 512M + restart_policy: + condition: on-failure + delay: 5s + max_attempts: 3 + window: 120s + replicas: 1 + update_config: + parallelism: 1 + delay: 30s + failure_action: rollback + labels: + environment: production + tier: cache + networks: + - ecommerce_network + pull_policy: missing + labels: + athena.generated: 2025-10-12 + athena.type: cache + athena.service: cache + athena.project: MODERN_ECOMMERCE + + monitoring: + image: prom/prometheus:latest + container_name: modern-ecommerce-monitoring + ports: + - 9090:9090 + volumes: + - ./prometheus/prometheus.yml:/etc/prometheus/prometheus.yml:ro + restart: unless-stopped + deploy: + resources: + limits: + cpus: '0.3' + memory: 512M + restart_policy: + condition: on-failure + delay: 5s + max_attempts: 3 + window: 120s + replicas: 2 + update_config: + parallelism: 1 + delay: 20s + failure_action: continue + labels: + tier: monitoring + environment: production + networks: + - ecommerce_network + pull_policy: missing + labels: + athena.type: generic + athena.generated: 2025-10-12 + athena.service: monitoring + athena.project: MODERN_ECOMMERCE + + api_gateway: + build: + context: . + dockerfile: Dockerfile + args: + JWT_EXPIRY: 24h + NODE_ENV: production + API_VERSION: v2.1 + container_name: modern-ecommerce-api_gateway + environment: + - JWT_SECRET=${JWT_SECRET} + - DATABASE_URL=${DATABASE_URL} + - REDIS_URL=${REDIS_URL} + depends_on: + - database + - cache + healthcheck: + test: + - CMD-SHELL + - curl -f http://localhost:3000/health || exit 1 + interval: 30s + timeout: 10s + retries: 3 + start_period: 40s + restart: unless-stopped + deploy: + resources: + limits: + cpus: '0.5' + memory: 512M + restart_policy: + condition: on-failure + delay: 5s + max_attempts: 3 + window: 120s + replicas: 3 + update_config: + parallelism: 1 + delay: 15s + failure_action: rollback + labels: + environment: production + tier: api + scaling: auto + networks: + - ecommerce_network + pull_policy: missing + labels: + athena.project: MODERN_ECOMMERCE + athena.service: api_gateway + athena.type: generic + athena.generated: 2025-10-12 + + product_service: + build: + context: . + dockerfile: Dockerfile + args: + CATALOG_VERSION: v1.5 + SEARCH_ENGINE: elasticsearch + container_name: modern-ecommerce-product_service + environment: + - DATABASE_URL=${DATABASE_URL} + depends_on: + - database + healthcheck: + test: + - CMD-SHELL + - curl -f http://localhost:3000/health || exit 1 + interval: 30s + timeout: 10s + retries: 3 + start_period: 40s + restart: unless-stopped + deploy: + resources: + limits: + cpus: '0.3' + memory: 256M + restart_policy: + condition: on-failure + delay: 5s + max_attempts: 3 + window: 120s + replicas: 2 + update_config: + parallelism: 1 + delay: 20s + failure_action: pause + labels: + tier: backend + environment: production + service: catalog + networks: + - ecommerce_network + pull_policy: missing + labels: + athena.type: generic + athena.service: product_service + athena.generated: 2025-10-12 + athena.project: MODERN_ECOMMERCE + + nginx_reverse_proxy: + image: nginx:alpine + container_name: modern-ecommerce-nginx_reverse_proxy + ports: + - 80:80 + - 443:443 + volumes: + - ./nginx/conf.d:/etc/nginx/conf.d:ro + depends_on: + - api_gateway + healthcheck: + test: + - CMD-SHELL + - curl -f http://localhost:80/health || exit 1 + interval: 20s + timeout: 5s + retries: 3 + start_period: 30s + restart: always + deploy: + resources: + limits: + cpus: '0.2' + memory: 256M + restart_policy: + condition: on-failure + delay: 5s + max_attempts: 3 + window: 120s + replicas: 2 + update_config: + parallelism: 1 + delay: 30s + failure_action: rollback + labels: + critical: 'true' + tier: proxy + environment: production + networks: + - ecommerce_network + pull_policy: missing + labels: + athena.service: nginx_reverse_proxy + athena.type: proxy + athena.generated: 2025-10-12 + athena.project: MODERN_ECOMMERCE + + auth_service: + build: + context: . + dockerfile: Dockerfile + args: + AUTH_PROVIDER: oauth2 + SESSION_TIMEOUT: 1h + container_name: modern-ecommerce-auth_service + environment: + - JWT_SECRET=${JWT_SECRET} + - DATABASE_URL=${DATABASE_URL} + depends_on: + - database + healthcheck: + test: + - CMD-SHELL + - curl -f http://localhost:3000/health || exit 1 + interval: 30s + timeout: 10s + retries: 3 + start_period: 40s + restart: unless-stopped + deploy: + resources: + limits: + cpus: '0.2' + memory: 256M + restart_policy: + condition: on-failure + delay: 5s + max_attempts: 3 + window: 120s + replicas: 2 + update_config: + parallelism: 1 + delay: 10s + failure_action: rollback + labels: + critical: 'true' + service: auth + tier: backend + environment: production + networks: + - ecommerce_network + pull_policy: missing + labels: + athena.type: generic + athena.service: auth_service + athena.project: MODERN_ECOMMERCE + athena.generated: 2025-10-12 + + database: + image: postgres:15 + container_name: modern-ecommerce-database + environment: + - POSTGRES_USER=${POSTGRES_USER} + - POSTGRES_PASSWORD=${POSTGRES_PASSWORD} + - POSTGRES_DB=${POSTGRES_DB} + volumes: + - postgres_data:/var/lib/postgresql/data + healthcheck: + test: + - CMD-SHELL + - pg_isready -U ${POSTGRES_USER} || exit 1 + interval: 10s + timeout: 5s + retries: 5 + start_period: 60s + restart: always + deploy: + resources: + limits: + cpus: '1.0' + memory: 1024M + restart_policy: + condition: on-failure + delay: 5s + max_attempts: 3 + window: 120s + replicas: 1 + update_config: + parallelism: 1 + delay: 60s + failure_action: pause + labels: + critical: 'true' + tier: data + environment: production + role: primary + networks: + - ecommerce_network + pull_policy: missing + labels: + athena.service: database + athena.type: database + athena.project: MODERN_ECOMMERCE + athena.generated: 2025-10-12 +networks: + ecommerce_network: + driver: overlay + attachable: true +volumes: + postgres_data: + driver: local + redis_data: + driver: local +name: MODERN_ECOMMERCE \ No newline at end of file diff --git a/presentation.ath b/presentation.ath index a519203..ebfaea6 100644 --- a/presentation.ath +++ b/presentation.ath @@ -3,9 +3,8 @@ DEPLOYMENT-ID MODERN_ECOMMERCE VERSION-ID 2.0.0 ENVIRONMENT SECTION -NETWORK-NAME ecommerce_network -// TODO: In Swarm mode, network will use overlay driver automatically -// For now: bridge driver (compose), overlay driver (swarm deploy) +NETWORK-NAME ecommerce_network DRIVER OVERLAY ATTACHABLE TRUE +// Production-ready overlay network for multi-host Docker Swarm deployment // Define persistent volumes for production VOLUME postgres_data VOLUME redis_data @@ -22,6 +21,9 @@ PORT-MAPPING 80 TO 80 PORT-MAPPING 443 TO 443 VOLUME-MAPPING "./nginx/conf.d" TO "/etc/nginx/conf.d" (ro) RESOURCE-LIMITS CPU "0.2" MEMORY "256M" +REPLICAS 2 +UPDATE-CONFIG PARALLELISM 1 DELAY 30s FAILURE-ACTION ROLLBACK +SWARM-LABELS tier="proxy" environment="production" critical="true" DEPENDS-ON api_gateway HEALTH-CHECK "curl -f http://localhost:80/health || exit 1" // Athena auto-applies: restart=always for proxy type @@ -35,11 +37,13 @@ ENV-VARIABLE {{JWT_SECRET}} ENV-VARIABLE {{DATABASE_URL}} ENV-VARIABLE {{REDIS_URL}} RESOURCE-LIMITS CPU "0.5" MEMORY "512M" +REPLICAS 3 +UPDATE-CONFIG PARALLELISM 1 DELAY 15s FAILURE-ACTION ROLLBACK +SWARM-LABELS tier="api" environment="production" scaling="auto" DEPENDS-ON database DEPENDS-ON cache HEALTH-CHECK "curl -f http://localhost:3000/health || exit 1" // Athena auto-applies: restart=unless-stopped for webapp type -// TODO: Add replicas: 3 for load balancing when Athena supports it END SERVICE // Product catalog service - Stateless, horizontally scalable @@ -48,9 +52,11 @@ BUILD-ARGS CATALOG_VERSION="v1.5" SEARCH_ENGINE="elasticsearch" // No PORT-MAPPING for production - internal access only ENV-VARIABLE {{DATABASE_URL}} RESOURCE-LIMITS CPU "0.3" MEMORY "256M" +REPLICAS 2 +UPDATE-CONFIG PARALLELISM 1 DELAY 20s FAILURE-ACTION PAUSE +SWARM-LABELS tier="backend" service="catalog" environment="production" DEPENDS-ON database HEALTH-CHECK "curl -f http://localhost:3000/health || exit 1" -// TODO: Add replicas: 2 for high availability when Athena supports it END SERVICE // User authentication service - Stateless, scalable microservice @@ -60,9 +66,11 @@ BUILD-ARGS AUTH_PROVIDER="oauth2" SESSION_TIMEOUT="1h" ENV-VARIABLE {{JWT_SECRET}} ENV-VARIABLE {{DATABASE_URL}} RESOURCE-LIMITS CPU "0.2" MEMORY "256M" +REPLICAS 2 +UPDATE-CONFIG PARALLELISM 1 DELAY 10s FAILURE-ACTION ROLLBACK +SWARM-LABELS tier="backend" service="auth" environment="production" critical="true" DEPENDS-ON database HEALTH-CHECK "curl -f http://localhost:3000/health || exit 1" -// TODO: Add replicas: 2 for authentication redundancy when Athena supports it END SERVICE // Main database - PRODUCTION: No external ports, internal network only @@ -76,9 +84,11 @@ ENV-VARIABLE {{POSTGRES_DB}} // Using named volume for production persistence VOLUME-MAPPING "postgres_data" TO "/var/lib/postgresql/data" RESOURCE-LIMITS CPU "1.0" MEMORY "1024M" +REPLICAS 1 +UPDATE-CONFIG PARALLELISM 1 DELAY 60s FAILURE-ACTION PAUSE +SWARM-LABELS tier="data" role="primary" environment="production" critical="true" HEALTH-CHECK "pg_isready -U ${POSTGRES_USER} || exit 1" // Athena auto-applies: restart=always for database type -// TODO: Add replicas: 1 for database (single master) when Athena supports it END SERVICE // Redis cache - PRODUCTION: No external ports, internal network only @@ -89,9 +99,11 @@ Use 'docker exec' or port forwarding for debugging if needed Using named volume for production persistence */ VOLUME-MAPPING "redis_data" TO "/data" RESOURCE-LIMITS CPU "0.3" MEMORY "512M" +REPLICAS 1 +UPDATE-CONFIG PARALLELISM 1 DELAY 30s FAILURE-ACTION ROLLBACK +SWARM-LABELS tier="cache" environment="production" HEALTH-CHECK "redis-cli ping || exit 1" // Athena auto-applies: restart=always for cache type -// TODO: Add replicas: 1 for cache (single instance) when Athena supports it END SERVICE // Monitoring service - Independent scraping configuration @@ -102,5 +114,7 @@ VOLUME-MAPPING "./prometheus/prometheus.yml" TO "/etc/prometheus/prometheus.yml" // No DEPENDS-ON - Prometheus handles service discovery and scraping independently // Configure prometheus.yml to scrape targets via service names RESOURCE-LIMITS CPU "0.3" MEMORY "512M" -// TODO: Add replicas: 2 for HA monitoring when Athena supports it +REPLICAS 2 +UPDATE-CONFIG PARALLELISM 1 DELAY 20s FAILURE-ACTION CONTINUE +SWARM-LABELS tier="monitoring" environment="production" END SERVICE diff --git a/src/athena/dockerfile.rs b/src/athena/dockerfile.rs index c6ee64c..048c9c8 100644 --- a/src/athena/dockerfile.rs +++ b/src/athena/dockerfile.rs @@ -14,6 +14,7 @@ pub struct DockerfileArg { pub struct DockerfileAnalysis { pub args: Vec, pub dockerfile_path: String, + #[allow(dead_code)] pub content: String, } @@ -177,7 +178,7 @@ pub fn validate_build_args_against_dockerfile( .collect(); // Check each BUILD-ARG against Dockerfile - for (build_arg_name, _build_arg_value) in build_args { + for build_arg_name in build_args.keys() { if !available_args.contains(build_arg_name) { // Find similar ARG names for suggestions let suggestions = find_similar_arg_names(build_arg_name, &available_args); diff --git a/src/athena/error.rs b/src/athena/error.rs index d95cca3..cff187d 100644 --- a/src/athena/error.rs +++ b/src/athena/error.rs @@ -4,6 +4,7 @@ use std::fmt; pub type AthenaResult = Result; #[derive(Error, Debug)] +#[allow(clippy::enum_variant_names)] pub enum AthenaError { #[error("{0}")] ParseError(EnhancedParseError), diff --git a/src/athena/generator/compose.rs b/src/athena/generator/compose.rs index f26910f..0a82938 100644 --- a/src/athena/generator/compose.rs +++ b/src/athena/generator/compose.rs @@ -50,6 +50,12 @@ pub struct ResourceSpec { #[derive(Debug, Serialize, Deserialize)] pub struct DockerNetwork { driver: String, + #[serde(skip_serializing_if = "Option::is_none")] + attachable: Option, + #[serde(skip_serializing_if = "Option::is_none")] + encrypted: Option, + #[serde(skip_serializing_if = "Option::is_none")] + ingress: Option, } #[derive(Debug, Serialize, Deserialize)] @@ -69,8 +75,8 @@ pub fn generate_docker_compose(athena_file: &AthenaFile) -> AthenaResult volumes: None, }; - // Create optimized network configuration - compose.networks = Some(create_optimized_networks(&network_name)); + // Create optimized network configuration with Swarm support + compose.networks = Some(create_optimized_networks(athena_file)); // Process volumes with enhanced configuration if let Some(env) = &athena_file.environment { @@ -100,15 +106,47 @@ pub fn generate_docker_compose(athena_file: &AthenaFile) -> AthenaResult Ok(add_enhanced_yaml_comments(formatted_yaml, athena_file)) } -/// Create optimized network configuration -fn create_optimized_networks(network_name: &str) -> HashMap { +/// Create optimized network configuration with Docker Swarm support +fn create_optimized_networks(athena_file: &AthenaFile) -> HashMap { let mut networks = HashMap::new(); - networks.insert( - network_name.to_string(), - DockerNetwork { - driver: "bridge".to_string(), - }, - ); + + if let Some(env) = &athena_file.environment { + // Use networks defined in environment section + for network_def in &env.networks { + let driver = match &network_def.driver { + Some(NetworkDriver::Bridge) => "bridge".to_string(), + Some(NetworkDriver::Overlay) => "overlay".to_string(), + Some(NetworkDriver::Host) => "host".to_string(), + Some(NetworkDriver::None) => "none".to_string(), + None => "bridge".to_string(), + }; + + networks.insert( + network_def.name.clone(), + DockerNetwork { + driver, + attachable: network_def.attachable, + encrypted: network_def.encrypted, + ingress: network_def.ingress, + }, + ); + } + } + + // If no networks defined, create default network + if networks.is_empty() { + let default_name = athena_file.get_network_name(); + networks.insert( + default_name, + DockerNetwork { + driver: "bridge".to_string(), + attachable: None, + encrypted: None, + ingress: None, + }, + ); + } + networks } @@ -222,13 +260,12 @@ fn detect_circular_dependencies_optimized(compose: &DockerCompose) -> AthenaResu let mut temp_visited = HashSet::new(); for service_name in compose.services.keys() { - if !visited.contains(service_name) { - if has_cycle_iterative(service_name, compose, &mut visited, &mut temp_visited)? { + if !visited.contains(service_name) + && has_cycle_iterative(service_name, compose, &mut visited, &mut temp_visited)? { return Err(AthenaError::validation_error_enhanced( EnhancedValidationError::circular_dependency(service_name), )); } - } } Ok(()) @@ -292,7 +329,7 @@ fn detect_port_conflicts(compose: &DockerCompose) -> AthenaResult<()> { if let Some(host_port) = extract_host_port(port_mapping) { port_to_services .entry(host_port) - .or_insert_with(Vec::new) + .or_default() .push(service_name.clone()); } } diff --git a/src/athena/generator/defaults.rs b/src/athena/generator/defaults.rs index b77749f..cf4875d 100644 --- a/src/athena/generator/defaults.rs +++ b/src/athena/generator/defaults.rs @@ -106,6 +106,12 @@ pub struct EnhancedDeploy { pub resources: Option, #[serde(skip_serializing_if = "Option::is_none")] pub restart_policy: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub replicas: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub update_config: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub labels: Option>, } #[derive(Debug, Serialize, Deserialize)] @@ -132,6 +138,20 @@ pub struct EnhancedRestartPolicy { pub window: String, } +#[derive(Debug, Serialize, Deserialize)] +pub struct SwarmUpdateConfig { + #[serde(skip_serializing_if = "Option::is_none")] + pub parallelism: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub delay: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub failure_action: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub monitor: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub max_failure_ratio: Option, +} + /// Service type detection for intelligent defaults #[derive(Debug, PartialEq, Clone, Copy)] pub enum ServiceType { @@ -251,7 +271,7 @@ impl DefaultsEngine { }, healthcheck: Self::convert_healthcheck(&service.health_check, &defaults), restart: Self::convert_restart_policy(&service.restart, &defaults), - deploy: Self::convert_deploy(&service.resources), + deploy: Self::convert_deploy(&service.resources, &service.swarm_config), networks: vec![network_name.to_string()], pull_policy: Self::convert_pull_policy(&defaults.pull_policy), labels: Some(Self::generate_labels(project_name, &service.name, service_type)), @@ -366,22 +386,60 @@ impl DefaultsEngine { } } - fn convert_deploy(resources: &Option) -> Option { - resources.as_ref().map(|res| EnhancedDeploy { - resources: Some(EnhancedResources { - limits: Some(ResourceSpec { - cpus: Some(res.cpu.clone()), - memory: Some(res.memory.clone()), - }), - reservations: None, - }), - restart_policy: Some(EnhancedRestartPolicy { - condition: "on-failure".to_string(), - delay: "5s".to_string(), - max_attempts: 3, - window: "120s".to_string(), + fn convert_deploy( + resources: &Option, + swarm_config: &Option + ) -> Option { + if resources.is_none() && swarm_config.is_none() { + return None; + } + + let enhanced_resources = resources.as_ref().map(|res| EnhancedResources { + limits: Some(ResourceSpec { + cpus: Some(res.cpu.clone()), + memory: Some(res.memory.clone()), }), - }) + reservations: None, + }); + + let restart_policy = Some(EnhancedRestartPolicy { + condition: "on-failure".to_string(), + delay: "5s".to_string(), + max_attempts: 3, + window: "120s".to_string(), + }); + + let mut enhanced_deploy = EnhancedDeploy { + resources: enhanced_resources, + restart_policy, + replicas: None, + update_config: None, + labels: None, + }; + + // Add Swarm-specific configurations + if let Some(swarm) = swarm_config { + enhanced_deploy.replicas = swarm.replicas; + enhanced_deploy.labels = swarm.labels.clone(); + + if let Some(update_config) = &swarm.update_config { + enhanced_deploy.update_config = Some(SwarmUpdateConfig { + parallelism: update_config.parallelism, + delay: update_config.delay.clone(), + failure_action: update_config.failure_action.as_ref().map(|fa| { + match fa { + FailureAction::Continue => "continue".to_string(), + FailureAction::Pause => "pause".to_string(), + FailureAction::Rollback => "rollback".to_string(), + } + }), + monitor: update_config.monitor.clone(), + max_failure_ratio: update_config.max_failure_ratio, + }); + } + } + + Some(enhanced_deploy) } fn convert_pull_policy(pull_policy: &PullPolicy) -> String { diff --git a/src/athena/parser/ast.rs b/src/athena/parser/ast.rs index 1818249..f88daf7 100644 --- a/src/athena/parser/ast.rs +++ b/src/athena/parser/ast.rs @@ -16,11 +16,28 @@ pub struct DeploymentSection { #[derive(Debug, Clone, Serialize, Deserialize)] pub struct EnvironmentSection { - pub network_name: Option, + pub networks: Vec, pub volumes: Vec, pub secrets: HashMap, } +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct NetworkDefinition { + pub name: String, + pub driver: Option, + pub attachable: Option, + pub encrypted: Option, + pub ingress: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub enum NetworkDriver { + Bridge, + Overlay, + Host, + None, +} + #[derive(Debug, Clone, Serialize, Deserialize)] pub struct VolumeDefinition { pub name: String, @@ -45,6 +62,7 @@ pub struct Service { pub restart: Option, pub resources: Option, pub build_args: Option>, + pub swarm_config: Option, } #[derive(Debug, Clone, Serialize, Deserialize)] @@ -88,6 +106,35 @@ pub struct ResourceLimits { pub memory: String, } +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct SwarmConfig { + pub replicas: Option, + pub update_config: Option, + pub labels: Option>, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct UpdateConfig { + pub parallelism: Option, + pub delay: Option, + pub failure_action: Option, + pub monitor: Option, + pub max_failure_ratio: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub enum FailureAction { + Continue, + Pause, + Rollback, +} + +impl Default for AthenaFile { + fn default() -> Self { + Self::new() + } +} + impl AthenaFile { pub fn new() -> Self { Self { @@ -109,10 +156,19 @@ impl AthenaFile { pub fn get_network_name(&self) -> String { self.environment .as_ref() - .and_then(|e| e.network_name.as_ref()) - .map(|n| n.clone()) + .and_then(|e| { + e.networks.first().map(|net| net.name.clone()) + }) .unwrap_or_else(|| format!("{}_network", self.get_project_name().to_lowercase())) } + + #[allow(dead_code)] + pub fn get_networks(&self) -> Vec<&NetworkDefinition> { + self.environment + .as_ref() + .map(|e| e.networks.iter().collect()) + .unwrap_or_default() + } } impl Service { @@ -129,6 +185,54 @@ impl Service { restart: None, resources: None, build_args: None, + swarm_config: None, + } + } +} + +impl Default for SwarmConfig { + fn default() -> Self { + Self::new() + } +} + +impl SwarmConfig { + pub fn new() -> Self { + Self { + replicas: None, + update_config: None, + labels: None, + } + } +} + +impl Default for UpdateConfig { + fn default() -> Self { + Self::new() + } +} + +impl UpdateConfig { + pub fn new() -> Self { + Self { + parallelism: None, + delay: None, + failure_action: None, + monitor: None, + max_failure_ratio: None, + } + } +} + +impl NetworkDefinition { + #[allow(dead_code)] + pub fn new(name: String) -> Self { + Self { + name, + driver: None, + attachable: None, + encrypted: None, + ingress: None, } } } \ No newline at end of file diff --git a/src/athena/parser/grammar.pest b/src/athena/parser/grammar.pest index 0c019be..4b1d381 100644 --- a/src/athena/parser/grammar.pest +++ b/src/athena/parser/grammar.pest @@ -15,10 +15,20 @@ version_id = { "VERSION-ID" ~ version_string } // Environment section environment_section = { "ENVIRONMENT" ~ "SECTION" ~ environment_item* } environment_item = { network_name | volume_def | secret_def } -network_name = { "NETWORK-NAME" ~ identifier } +network_name = { "NETWORK-NAME" ~ identifier ~ network_options? } volume_def = { "VOLUME" ~ identifier ~ volume_options? } secret_def = { "SECRET" ~ identifier ~ string_value } +// Network options for Docker Swarm overlay support +network_options = { network_option+ } +network_option = { + ("DRIVER" ~ network_driver) | + ("ATTACHABLE" ~ boolean_value) | + ("ENCRYPTED" ~ boolean_value) | + ("INGRESS" ~ boolean_value) +} +network_driver = { "BRIDGE" | "OVERLAY" | "HOST" | "NONE" } + // Services section services_section = { "SERVICES" ~ "SECTION" ~ service* } @@ -36,7 +46,10 @@ service_item = { health_check | restart_policy | resource_limits | - build_args + build_args | + swarm_replicas | + swarm_update_config | + swarm_labels } // Service directives @@ -52,14 +65,37 @@ resource_limits = { "RESOURCE-LIMITS" ~ "CPU" ~ string_value ~ "MEMORY" ~ string build_args = { "BUILD-ARGS" ~ build_arg_pair+ } build_arg_pair = { identifier ~ "=" ~ string_value } +// Docker Swarm specific directives +swarm_replicas = { "REPLICAS" ~ number } +swarm_update_config = { "UPDATE-CONFIG" ~ update_config_options+ } +swarm_labels = { "SWARM-LABELS" ~ swarm_label_pair+ } + +// Update configuration options +update_config_options = { + ("PARALLELISM" ~ number) | + ("DELAY" ~ time_value) | + ("FAILURE-ACTION" ~ failure_action) | + ("MONITOR" ~ time_value) | + ("MAX-FAILURE-RATIO" ~ decimal_value) +} + +// Swarm label pairs +swarm_label_pair = { identifier ~ "=" ~ string_value } + +// Failure actions for update config +failure_action = { "CONTINUE" | "PAUSE" | "ROLLBACK" } + // Base types identifier = @{ (ASCII_ALPHA | "_") ~ (ASCII_ALPHANUMERIC | "_" | "-")* } string_value = @{ ("\"" ~ (!("\"") ~ ANY)* ~ "\"") | - (!(WHITESPACE | "END" | "SERVICE" | "TO" | "CPU" | "MEMORY") ~ ANY)+ + (!(WHITESPACE | "END" | "SERVICE" | "TO" | "CPU" | "MEMORY" | "PARALLELISM" | "DELAY" | "FAILURE-ACTION" | "MONITOR" | "MAX-FAILURE-RATIO" | "DRIVER" | "ATTACHABLE" | "ENCRYPTED" | "INGRESS") ~ ANY)+ } template_var = @{ "{{" ~ identifier ~ "}}" } number = @{ ASCII_DIGIT+ } +decimal_value = @{ ASCII_DIGIT+ ~ ("." ~ ASCII_DIGIT+)? } +time_value = @{ ASCII_DIGIT+ ~ ("s" | "m" | "h") } +boolean_value = { "TRUE" | "FALSE" } version_string = @{ ASCII_DIGIT+ ~ "." ~ ASCII_DIGIT+ ~ ("." ~ ASCII_DIGIT+)? } port_protocol = { "(" ~ ("tcp" | "udp") ~ ")" } volume_options = { "(" ~ volume_option ~ ("," ~ volume_option)* ~ ")" } diff --git a/src/athena/parser/mod.rs b/src/athena/parser/mod.rs index efbe475..b7d2d1a 100644 --- a/src/athena/parser/mod.rs +++ b/src/athena/parser/mod.rs @@ -1,4 +1,5 @@ pub mod ast; +#[allow(clippy::module_inception)] pub mod parser; pub mod optimized_parser; diff --git a/src/athena/parser/optimized_parser.rs b/src/athena/parser/optimized_parser.rs index bc7b2a3..71cb0ed 100644 --- a/src/athena/parser/optimized_parser.rs +++ b/src/athena/parser/optimized_parser.rs @@ -182,7 +182,7 @@ impl OptimizedParser { // Set default environment if missing if athena_file.environment.is_none() { athena_file.environment = Some(EnvironmentSection { - network_name: None, // Will use project name + networks: Vec::new(), // Will create default network volumes: Vec::new(), secrets: HashMap::new(), }); diff --git a/src/athena/parser/parser.rs b/src/athena/parser/parser.rs index bb38d1e..c19cfb8 100644 --- a/src/athena/parser/parser.rs +++ b/src/athena/parser/parser.rs @@ -96,42 +96,37 @@ fn parse_deployment_section(pair: pest::iterators::Pair) -> AthenaResult) -> AthenaResult { - let mut network_name = None; + let mut networks = Vec::new(); let mut volumes = Vec::new(); let mut secrets = HashMap::new(); for inner_pair in pair.into_inner() { - match inner_pair.as_rule() { - Rule::environment_item => { - for item_pair in inner_pair.into_inner() { - match item_pair.as_rule() { - Rule::network_name => { - if let Some(name_pair) = item_pair.into_inner().next() { - network_name = Some(name_pair.as_str().to_string()); - } - } - Rule::volume_def => { - volumes.push(parse_volume_definition(item_pair)?); - } - Rule::secret_def => { - let mut inner = item_pair.into_inner(); - if let (Some(key), Some(value)) = (inner.next(), inner.next()) { - secrets.insert( - key.as_str().to_string(), - clean_string_value(value.as_str()) - ); - } + if inner_pair.as_rule() == Rule::environment_item { + for item_pair in inner_pair.into_inner() { + match item_pair.as_rule() { + Rule::network_name => { + networks.push(parse_network_definition(item_pair)?); + } + Rule::volume_def => { + volumes.push(parse_volume_definition(item_pair)?); + } + Rule::secret_def => { + let mut inner = item_pair.into_inner(); + if let (Some(key), Some(value)) = (inner.next(), inner.next()) { + secrets.insert( + key.as_str().to_string(), + clean_string_value(value.as_str()) + ); } - _ => {} } + _ => {} } } - _ => {} } } Ok(EnvironmentSection { - network_name, + networks, volumes, secrets, }) @@ -164,6 +159,67 @@ fn parse_volume_definition(pair: pest::iterators::Pair) -> AthenaResult) -> AthenaResult { + let mut name = None; + let mut driver = None; + let mut attachable = None; + let mut encrypted = None; + let mut ingress = None; + + for inner_pair in pair.into_inner() { + match inner_pair.as_rule() { + Rule::identifier => { + name = Some(inner_pair.as_str().to_string()); + } + Rule::network_options => { + for option_pair in inner_pair.into_inner() { + if let Rule::network_option = option_pair.as_rule() { + let option_str = option_pair.as_str(); + for opt_inner in option_pair.into_inner() { + match opt_inner.as_rule() { + Rule::network_driver => { + driver = Some(match opt_inner.as_str() { + "BRIDGE" => NetworkDriver::Bridge, + "OVERLAY" => NetworkDriver::Overlay, + "HOST" => NetworkDriver::Host, + "NONE" => NetworkDriver::None, + _ => NetworkDriver::Bridge, + }); + } + Rule::boolean_value => { + let bool_val = opt_inner.as_str() == "TRUE"; + // Use the captured option_str to determine context + if option_str.contains("ATTACHABLE") { + attachable = Some(bool_val); + } else if option_str.contains("ENCRYPTED") { + encrypted = Some(bool_val); + } else if option_str.contains("INGRESS") { + ingress = Some(bool_val); + } + } + _ => {} + } + } + } + } + } + _ => {} + } + } + + let name = name.ok_or_else(|| + AthenaError::ParseError(EnhancedParseError::new("Missing network name".to_string())) + )?; + + Ok(NetworkDefinition { + name, + driver, + attachable, + encrypted, + ingress, + }) +} + fn parse_services_section(pair: pest::iterators::Pair) -> AthenaResult { let mut services = Vec::new(); @@ -243,6 +299,27 @@ fn parse_service_item(pair: pest::iterators::Pair, service: &mut Service) Rule::build_args => { service.build_args = Some(parse_build_args(inner_pair)?); } + Rule::swarm_replicas => { + if let Some(replicas_pair) = inner_pair.into_inner().next() { + let replicas = replicas_pair.as_str().parse::() + .map_err(|_| AthenaError::ParseError( + EnhancedParseError::new("Invalid replicas number".to_string()) + ))?; + + if service.swarm_config.is_none() { + service.swarm_config = Some(SwarmConfig::new()); + } + service.swarm_config.as_mut().unwrap().replicas = Some(replicas); + } + } + Rule::swarm_update_config => { + service.swarm_config.get_or_insert_with(SwarmConfig::new) + .update_config = Some(parse_update_config(inner_pair)?); + } + Rule::swarm_labels => { + service.swarm_config.get_or_insert_with(SwarmConfig::new) + .labels = Some(parse_swarm_labels(inner_pair)?); + } _ => {} } } @@ -393,6 +470,70 @@ fn parse_build_args(pair: pest::iterators::Pair) -> AthenaResult) -> AthenaResult { + let mut update_config = UpdateConfig::new(); + + for inner_pair in pair.into_inner() { + if let Rule::update_config_options = inner_pair.as_rule() { + let option_str = inner_pair.as_str(); + for value_pair in inner_pair.into_inner() { + if option_str.starts_with("PARALLELISM") && value_pair.as_rule() == Rule::number { + update_config.parallelism = Some(value_pair.as_str().parse::() + .map_err(|_| AthenaError::ParseError( + EnhancedParseError::new("Invalid parallelism number".to_string()) + ))?); + } else if option_str.starts_with("DELAY") && value_pair.as_rule() == Rule::time_value { + update_config.delay = Some(value_pair.as_str().to_string()); + } else if option_str.starts_with("FAILURE-ACTION") && value_pair.as_rule() == Rule::failure_action { + update_config.failure_action = Some(match value_pair.as_str() { + "CONTINUE" => FailureAction::Continue, + "PAUSE" => FailureAction::Pause, + "ROLLBACK" => FailureAction::Rollback, + _ => FailureAction::Pause, + }); + } else if option_str.starts_with("MONITOR") && value_pair.as_rule() == Rule::time_value { + update_config.monitor = Some(value_pair.as_str().to_string()); + } else if option_str.starts_with("MAX-FAILURE-RATIO") && value_pair.as_rule() == Rule::decimal_value { + update_config.max_failure_ratio = Some(value_pair.as_str().parse::() + .map_err(|_| AthenaError::ParseError( + EnhancedParseError::new("Invalid max failure ratio".to_string()) + ))?); + } + } + } + } + + Ok(update_config) +} + +fn parse_swarm_labels(pair: pest::iterators::Pair) -> AthenaResult> { + let mut labels = HashMap::new(); + + for inner_pair in pair.into_inner() { + if let Rule::swarm_label_pair = inner_pair.as_rule() { + let mut label_parts = inner_pair.into_inner(); + + let key = label_parts.next() + .ok_or_else(|| AthenaError::ParseError(EnhancedParseError::new("Missing label key".to_string())))? + .as_str().to_string(); + + let value = label_parts.next() + .ok_or_else(|| AthenaError::ParseError(EnhancedParseError::new("Missing label value".to_string())))? + .as_str(); + + labels.insert(key, clean_string_value(value)); + } + } + + if labels.is_empty() { + return Err(AthenaError::ParseError(EnhancedParseError::new( + "SWARM-LABELS must contain at least one key=value pair".to_string() + ))); + } + + Ok(labels) +} + fn clean_string_value(input: &str) -> String { if input.starts_with('"') && input.ends_with('"') { input[1..input.len()-1].to_string() @@ -477,7 +618,7 @@ fn create_enhanced_parse_error( } else { ( extract_clean_message(&base_message), - generate_generic_suggestion(&positives) + generate_generic_suggestion(positives) ) } } diff --git a/tests/integration/mod.rs b/tests/integration/mod.rs index 4dd0685..3eaf7d9 100644 --- a/tests/integration/mod.rs +++ b/tests/integration/mod.rs @@ -7,4 +7,7 @@ pub mod boilerplate; pub mod structural; // BUILD-ARGS feature tests -pub mod build_args_cli_tests; \ No newline at end of file +pub mod build_args_cli_tests; + +// Docker Swarm feature tests +pub mod swarm_features_test; \ No newline at end of file diff --git a/tests/integration/swarm_features_test.rs b/tests/integration/swarm_features_test.rs new file mode 100644 index 0000000..367da08 --- /dev/null +++ b/tests/integration/swarm_features_test.rs @@ -0,0 +1,530 @@ +use athena::athena::parser::parser::parse_athena_file; +use athena::athena::generator::compose::generate_docker_compose; + +#[test] +fn test_swarm_replicas_parsing() { + let input = r#" + DEPLOYMENT-ID SWARM_TEST + + SERVICES SECTION + + SERVICE web + IMAGE-ID nginx:alpine + REPLICAS 3 + END SERVICE + "#; + + let result = parse_athena_file(input); + assert!(result.is_ok()); + + let athena_file = result.unwrap(); + assert_eq!(athena_file.services.services.len(), 1); + + let service = &athena_file.services.services[0]; + assert_eq!(service.name, "web"); + assert!(service.swarm_config.is_some()); + + let swarm_config = service.swarm_config.as_ref().unwrap(); + assert_eq!(swarm_config.replicas, Some(3)); +} + +#[test] +fn test_swarm_update_config_parsing() { + let input = r#" + DEPLOYMENT-ID SWARM_UPDATE_TEST + + SERVICES SECTION + + SERVICE api + IMAGE-ID python:3.11 + REPLICAS 2 + UPDATE-CONFIG PARALLELISM 1 DELAY 30s FAILURE-ACTION ROLLBACK + END SERVICE + "#; + + let result = parse_athena_file(input); + assert!(result.is_ok()); + + let athena_file = result.unwrap(); + let service = &athena_file.services.services[0]; + let swarm_config = service.swarm_config.as_ref().unwrap(); + + assert_eq!(swarm_config.replicas, Some(2)); + assert!(swarm_config.update_config.is_some()); + + let update_config = swarm_config.update_config.as_ref().unwrap(); + assert_eq!(update_config.parallelism, Some(1)); + assert_eq!(update_config.delay, Some("30s".to_string())); + assert!(update_config.failure_action.is_some()); +} + +#[test] +fn test_swarm_labels_parsing() { + let input = r#" + DEPLOYMENT-ID SWARM_LABELS_TEST + + SERVICES SECTION + + SERVICE app + IMAGE-ID node:18 + SWARM-LABELS environment="production" tier="backend" + END SERVICE + "#; + + let result = parse_athena_file(input); + assert!(result.is_ok()); + + let athena_file = result.unwrap(); + let service = &athena_file.services.services[0]; + let swarm_config = service.swarm_config.as_ref().unwrap(); + + assert!(swarm_config.labels.is_some()); + let labels = swarm_config.labels.as_ref().unwrap(); + assert_eq!(labels.get("environment"), Some(&"production".to_string())); + assert_eq!(labels.get("tier"), Some(&"backend".to_string())); + assert_eq!(labels.len(), 2); +} + +#[test] +fn test_overlay_network_parsing() { + let input = r#" + DEPLOYMENT-ID OVERLAY_TEST + + ENVIRONMENT SECTION + NETWORK-NAME overlay_net DRIVER OVERLAY ATTACHABLE TRUE ENCRYPTED FALSE + + SERVICES SECTION + + SERVICE web + IMAGE-ID nginx:alpine + END SERVICE + "#; + + let result = parse_athena_file(input); + assert!(result.is_ok()); + + let athena_file = result.unwrap(); + assert!(athena_file.environment.is_some()); + + let env = athena_file.environment.as_ref().unwrap(); + assert_eq!(env.networks.len(), 1); + + let network = &env.networks[0]; + assert_eq!(network.name, "overlay_net"); + assert!(network.driver.is_some()); + assert_eq!(network.attachable, Some(true)); + assert_eq!(network.encrypted, Some(false)); +} + +#[test] +fn test_complete_swarm_compose_generation() { + let input = r#" + DEPLOYMENT-ID COMPLETE_SWARM + VERSION-ID 2.0.0 + + ENVIRONMENT SECTION + NETWORK-NAME swarm_overlay DRIVER OVERLAY ATTACHABLE TRUE + + SERVICES SECTION + + SERVICE frontend + BUILD-ARGS NODE_ENV="production" + PORT-MAPPING 80 TO 3000 + REPLICAS 2 + UPDATE-CONFIG PARALLELISM 1 DELAY 10s + SWARM-LABELS tier="frontend" env="prod" + DEPENDS-ON backend + END SERVICE + + SERVICE backend + IMAGE-ID python:3.11-slim + REPLICAS 3 + SWARM-LABELS tier="backend" env="prod" + RESOURCE-LIMITS CPU "0.5" MEMORY "512M" + END SERVICE + "#; + + let result = parse_athena_file(input); + assert!(result.is_ok()); + + let athena_file = result.unwrap(); + let compose_result = generate_docker_compose(&athena_file); + assert!(compose_result.is_ok()); + + let yaml = compose_result.unwrap(); + + // Verify Swarm-specific configurations in generated YAML + assert!(yaml.contains("replicas: 2")); + assert!(yaml.contains("replicas: 3")); + assert!(yaml.contains("driver: overlay")); + assert!(yaml.contains("attachable: true")); + assert!(yaml.contains("parallelism: 1")); + assert!(yaml.contains("delay: 10s")); + assert!(yaml.contains("tier: frontend")); + assert!(yaml.contains("tier: backend")); + assert!(yaml.contains("env: prod")); +} + +#[test] +fn test_mixed_compose_and_swarm_features() { + let input = r#" + DEPLOYMENT-ID MIXED_TEST + + SERVICES SECTION + + SERVICE standard_service + IMAGE-ID alpine:latest + PORT-MAPPING 8080 TO 80 + END SERVICE + + SERVICE swarm_service + IMAGE-ID nginx:alpine + REPLICAS 5 + UPDATE-CONFIG PARALLELISM 2 DELAY 5s FAILURE-ACTION PAUSE + SWARM-LABELS scaling="auto" priority="high" + RESOURCE-LIMITS CPU "1.0" MEMORY "1024M" + END SERVICE + "#; + + let result = parse_athena_file(input); + assert!(result.is_ok()); + + let athena_file = result.unwrap(); + assert_eq!(athena_file.services.services.len(), 2); + + let standard_service = &athena_file.services.services[0]; + assert!(standard_service.swarm_config.is_none()); + + let swarm_service = &athena_file.services.services[1]; + assert!(swarm_service.swarm_config.is_some()); + + let swarm_config = swarm_service.swarm_config.as_ref().unwrap(); + assert_eq!(swarm_config.replicas, Some(5)); + assert!(swarm_config.update_config.is_some()); + assert!(swarm_config.labels.is_some()); +} + +// ========== ERROR HANDLING TESTS ========== + +#[test] +fn test_invalid_replica_negative_number() { + let input = r#" + DEPLOYMENT-ID INVALID_REPLICAS_TEST + + SERVICES SECTION + + SERVICE web + IMAGE-ID nginx:alpine + REPLICAS -5 + END SERVICE + "#; + + let result = parse_athena_file(input); + assert!(result.is_err()); + + let error_msg = format!("{}", result.unwrap_err()); + assert!(error_msg.contains("Invalid replicas number") || error_msg.contains("Parse error")); +} + +#[test] +fn test_invalid_replica_extremely_large_number() { + let input = r#" + DEPLOYMENT-ID LARGE_REPLICAS_TEST + + SERVICES SECTION + + SERVICE web + IMAGE-ID nginx:alpine + REPLICAS 999999999999999999999 + END SERVICE + "#; + + let result = parse_athena_file(input); + assert!(result.is_err()); + + let error_msg = format!("{}", result.unwrap_err()); + assert!(error_msg.contains("Invalid replicas number") || error_msg.contains("Parse error")); +} + +#[test] +fn test_invalid_replica_zero() { + let input = r#" + DEPLOYMENT-ID ZERO_REPLICAS_TEST + + SERVICES SECTION + + SERVICE web + IMAGE-ID nginx:alpine + REPLICAS 0 + END SERVICE + "#; + + // Zero replicas should parse but might be logically invalid for deployment + let result = parse_athena_file(input); + assert!(result.is_ok()); + + let athena_file = result.unwrap(); + let service = &athena_file.services.services[0]; + let swarm_config = service.swarm_config.as_ref().unwrap(); + assert_eq!(swarm_config.replicas, Some(0)); +} + +#[test] +fn test_invalid_replica_non_numeric() { + let input = r#" + DEPLOYMENT-ID NON_NUMERIC_REPLICAS_TEST + + SERVICES SECTION + + SERVICE web + IMAGE-ID nginx:alpine + REPLICAS abc + END SERVICE + "#; + + let result = parse_athena_file(input); + assert!(result.is_err()); + + let error_msg = format!("{}", result.unwrap_err()); + assert!(error_msg.contains("Invalid replicas number") || error_msg.contains("Parse error")); +} + +#[test] +fn test_swarm_labels_without_quotes_should_work() { + // Actually, this should work - the parser is flexible + let input = r#" + DEPLOYMENT-ID FLEXIBLE_LABELS_TEST + + SERVICES SECTION + + SERVICE web + IMAGE-ID nginx:alpine + SWARM-LABELS environment=production tier=backend + END SERVICE + "#; + + let result = parse_athena_file(input); + assert!(result.is_ok()); + + let athena_file = result.unwrap(); + let service = &athena_file.services.services[0]; + let swarm_config = service.swarm_config.as_ref().unwrap(); + let labels = swarm_config.labels.as_ref().unwrap(); + assert_eq!(labels.get("environment"), Some(&"production".to_string())); + assert_eq!(labels.get("tier"), Some(&"backend".to_string())); +} + +#[test] +fn test_invalid_swarm_labels_malformed_missing_value() { + let input = r#" + DEPLOYMENT-ID MALFORMED_LABELS_TEST + + SERVICES SECTION + + SERVICE web + IMAGE-ID nginx:alpine + SWARM-LABELS environment="production" tier= + END SERVICE + "#; + + let result = parse_athena_file(input); + assert!(result.is_err()); + + let error_msg = format!("{}", result.unwrap_err()); + assert!(error_msg.contains("Parse error")); +} + +#[test] +fn test_empty_swarm_labels() { + let input = r#" + DEPLOYMENT-ID EMPTY_LABELS_TEST + + SERVICES SECTION + + SERVICE web + IMAGE-ID nginx:alpine + SWARM-LABELS + END SERVICE + "#; + + let result = parse_athena_file(input); + assert!(result.is_err()); + + let error_msg = format!("{}", result.unwrap_err()); + assert!(error_msg.contains("must contain at least one key=value pair") || error_msg.contains("Parse error")); +} + +#[test] +fn test_invalid_update_config_negative_parallelism() { + let input = r#" + DEPLOYMENT-ID INVALID_UPDATE_CONFIG_TEST + + SERVICES SECTION + + SERVICE web + IMAGE-ID nginx:alpine + UPDATE-CONFIG PARALLELISM -1 DELAY 10s + END SERVICE + "#; + + let result = parse_athena_file(input); + assert!(result.is_err()); + + let error_msg = format!("{}", result.unwrap_err()); + assert!(error_msg.contains("Invalid parallelism number") || error_msg.contains("Parse error")); +} + +#[test] +fn test_invalid_update_config_invalid_delay_format() { + let input = r#" + DEPLOYMENT-ID INVALID_DELAY_TEST + + SERVICES SECTION + + SERVICE web + IMAGE-ID nginx:alpine + UPDATE-CONFIG PARALLELISM 1 DELAY invalid_time + END SERVICE + "#; + + let result = parse_athena_file(input); + assert!(result.is_err()); + + let error_msg = format!("{}", result.unwrap_err()); + assert!(error_msg.contains("Parse error")); +} + +#[test] +fn test_invalid_failure_action() { + let input = r#" + DEPLOYMENT-ID INVALID_FAILURE_ACTION_TEST + + SERVICES SECTION + + SERVICE web + IMAGE-ID nginx:alpine + UPDATE-CONFIG PARALLELISM 1 DELAY 10s FAILURE-ACTION INVALID_ACTION + END SERVICE + "#; + + let result = parse_athena_file(input); + assert!(result.is_err()); + + let error_msg = format!("{}", result.unwrap_err()); + assert!(error_msg.contains("Parse error")); +} + +#[test] +fn test_invalid_max_failure_ratio() { + let input = r#" + DEPLOYMENT-ID INVALID_RATIO_TEST + + SERVICES SECTION + + SERVICE web + IMAGE-ID nginx:alpine + UPDATE-CONFIG PARALLELISM 1 MAX-FAILURE-RATIO 1.5 + END SERVICE + "#; + + // Ratio > 1.0 should parse but might be logically invalid + let result = parse_athena_file(input); + assert!(result.is_ok()); + + let athena_file = result.unwrap(); + let service = &athena_file.services.services[0]; + let swarm_config = service.swarm_config.as_ref().unwrap(); + let update_config = swarm_config.update_config.as_ref().unwrap(); + assert_eq!(update_config.max_failure_ratio, Some(1.5)); +} + +#[test] +fn test_invalid_network_driver() { + let input = r#" + DEPLOYMENT-ID INVALID_DRIVER_TEST + + ENVIRONMENT SECTION + NETWORK-NAME test_net DRIVER INVALID_DRIVER + + SERVICES SECTION + + SERVICE web + IMAGE-ID nginx:alpine + END SERVICE + "#; + + let result = parse_athena_file(input); + assert!(result.is_err()); + + let error_msg = format!("{}", result.unwrap_err()); + assert!(error_msg.contains("Parse error")); +} + +#[test] +fn test_invalid_boolean_values() { + let input = r#" + DEPLOYMENT-ID INVALID_BOOLEAN_TEST + + ENVIRONMENT SECTION + NETWORK-NAME test_net DRIVER OVERLAY ATTACHABLE MAYBE + + SERVICES SECTION + + SERVICE web + IMAGE-ID nginx:alpine + END SERVICE + "#; + + let result = parse_athena_file(input); + assert!(result.is_err()); + + let error_msg = format!("{}", result.unwrap_err()); + assert!(error_msg.contains("Parse error")); +} + +#[test] +fn test_swarm_config_without_service_name() { + let input = r#" + DEPLOYMENT-ID MISSING_SERVICE_NAME_TEST + + SERVICES SECTION + + SERVICE + REPLICAS 3 + END SERVICE + "#; + + let result = parse_athena_file(input); + assert!(result.is_err()); + + let error_msg = format!("{}", result.unwrap_err()); + assert!(error_msg.contains("Parse error") || error_msg.contains("Missing service name")); +} + +#[test] +fn test_conflicting_swarm_and_compose_features() { + // This should parse successfully but could show warnings or conflicts + let input = r#" + DEPLOYMENT-ID CONFLICT_TEST + + SERVICES SECTION + + SERVICE web + IMAGE-ID nginx:alpine + PORT-MAPPING 80 TO 80 + REPLICAS 3 + UPDATE-CONFIG PARALLELISM 1 DELAY 10s + RESTART-POLICY always + END SERVICE + "#; + + let result = parse_athena_file(input); + assert!(result.is_ok()); + + // Verify both Compose and Swarm features are present + let athena_file = result.unwrap(); + let service = &athena_file.services.services[0]; + assert!(!service.ports.is_empty()); // Compose feature + assert!(service.swarm_config.is_some()); // Swarm feature + assert!(service.restart.is_some()); // Compose feature +} \ No newline at end of file From 4c7a88004e17cbcc8d615228c71613e4c37fc15b Mon Sep 17 00:00:00 2001 From: Jeck0v Date: Sun, 12 Oct 2025 17:39:15 +0200 Subject: [PATCH 2/2] :goal_net: Add catch error for docker swarm feature --- docker-compose.yml | 32 ++--- docs/FEATURES.md | 52 ++++++++ docs/TESTING.md | 212 +++++++++++++++++++++++++----- src/athena/parser/parser.rs | 105 +++++++++++---- tests/fixtures/mixed_features.ath | 41 ++++++ tests/fixtures/swarm_advanced.ath | 52 ++++++++ tests/fixtures/swarm_basic.ath | 27 ++++ tests/fixtures/swarm_errors.ath | 20 +++ 8 files changed, 469 insertions(+), 72 deletions(-) create mode 100644 tests/fixtures/mixed_features.ath create mode 100644 tests/fixtures/swarm_advanced.ath create mode 100644 tests/fixtures/swarm_basic.ath create mode 100644 tests/fixtures/swarm_errors.ath diff --git a/docker-compose.yml b/docker-compose.yml index 07ed4ac..53c7c3d 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,40 +1,40 @@ # Generated by Athena v0.1.0 from test_no_conflicts deployment # Developed by UNFAIR Team: https://github.com/Jeck0v/Athena -# Generated: 2025-10-12 15:10:37 UTC +# Generated: 2025-10-12 15:38:11 UTC # Features: Intelligent defaults, optimized networking, enhanced health checks # Services: 3 configured with intelligent defaults services: - app2: - image: httpd:alpine - container_name: test-no-conflicts-app2 + app1: + image: nginx:alpine + container_name: test-no-conflicts-app1 ports: - - 8081:8000 - restart: unless-stopped + - 8080:80 + restart: always networks: - test_no_conflicts_network pull_policy: missing labels: athena.project: test_no_conflicts - athena.service: app2 + athena.type: proxy + athena.service: app1 athena.generated: 2025-10-12 - athena.type: generic - app1: - image: nginx:alpine - container_name: test-no-conflicts-app1 + app2: + image: httpd:alpine + container_name: test-no-conflicts-app2 ports: - - 8080:80 - restart: always + - 8081:8000 + restart: unless-stopped networks: - test_no_conflicts_network pull_policy: missing labels: - athena.service: app1 athena.project: test_no_conflicts - athena.type: proxy athena.generated: 2025-10-12 + athena.type: generic + athena.service: app2 app3: image: apache:latest @@ -47,8 +47,8 @@ services: pull_policy: missing labels: athena.project: test_no_conflicts - athena.service: app3 athena.type: proxy + athena.service: app3 athena.generated: 2025-10-12 networks: test_no_conflicts_network: diff --git a/docs/FEATURES.md b/docs/FEATURES.md index b995eb7..d6d58ba 100644 --- a/docs/FEATURES.md +++ b/docs/FEATURES.md @@ -656,6 +656,58 @@ END SERVICE | `PAUSE` | Stop updates on failure | Manual intervention needed | | `ROLLBACK` | Revert to previous version | Automatic recovery | +### Advanced Features + +**Comprehensive Error Handling:** +Athena provides robust validation for all Swarm directives with detailed error messages: + +```bash +# Invalid replica numbers are caught +REPLICAS -5 # Error: Invalid replicas number +REPLICAS 999999999999999999999 # Error: Number too large +REPLICAS abc # Error: Non-numeric value + +# Malformed configurations are detected +SWARM-LABELS environment="production" tier= # Error: Missing value +SWARM-LABELS # Error: Empty labels +UPDATE-CONFIG PARALLELISM -1 # Error: Negative parallelism +``` + +**Flexible Label Syntax:** +```athena +# Both quoted and unquoted values are supported +SWARM-LABELS environment="production" tier=backend +SWARM-LABELS environment=production tier="backend" +``` + +**Zero Downtime Deployments:** +```athena +SERVICE critical_service +IMAGE-ID app:latest +REPLICAS 5 +UPDATE-CONFIG PARALLELISM 1 DELAY 30s FAILURE-ACTION ROLLBACK MONITOR 60s +SWARM-LABELS critical="true" environment="production" +END SERVICE +``` + +### Best Practices + +**Production Deployment:** +1. **Always use ROLLBACK** for critical services +2. **Set appropriate delays** (10-30s) between updates +3. **Use low parallelism** (1-2) for database services +4. **Label services** with environment and tier information +5. **Monitor deployments** with appropriate timeouts + +**Development vs Production:** +```athena +# Development: Fast updates, higher parallelism +UPDATE-CONFIG PARALLELISM 3 DELAY 5s FAILURE-ACTION CONTINUE + +# Production: Safe updates, lower parallelism +UPDATE-CONFIG PARALLELISM 1 DELAY 30s FAILURE-ACTION ROLLBACK MONITOR 60s +``` + ## Future Enhancements ### Planned Features diff --git a/docs/TESTING.md b/docs/TESTING.md index e6f8b2e..9479e8f 100644 --- a/docs/TESTING.md +++ b/docs/TESTING.md @@ -14,30 +14,48 @@ Our tests focus on **functionality over format**: ``` tests/ +├── integration_tests.rs # Main integration test entry point ├── integration/ -│ ├── cli_commands_test.rs # Test all CLI commands -│ ├── docker_compose_generation_test.rs # Full generation test -│ ├── error_handling_test.rs # Error case testing -│ ├── boilerplate/ # Modular boilerplate tests -│ │ ├── mod.rs # Common utilities and shared functions -│ │ ├── fastapi_tests.rs # FastAPI project generation tests -│ │ ├── flask_tests.rs # Flask project generation tests -│ │ ├── go_tests.rs # Go project generation tests -│ │ └── common_tests.rs # Common init command tests -│ └── structural/ # Organized structural tests -│ ├── mod.rs # Common utilities and module declarations -│ ├── basic_structure.rs # Basic YAML structure validation -│ ├── service_configuration.rs # Service config (env vars, ports, volumes) -│ ├── networking.rs # Networks and service dependencies -│ ├── policies.rs # Restart policies and health checks -│ ├── formatting.rs # YAML validity and formatting tests -│ └── complex_scenarios.rs # Complex microservices scenarios +│ ├── mod.rs # Module declarations and utilities +│ ├── cli_commands_test.rs # Test all CLI commands and options +│ ├── docker_compose_generation_test.rs # Full Docker Compose generation tests +│ ├── error_handling_test.rs # Error case testing and validation +│ ├── enhanced_error_handling_test.rs # Advanced error scenarios with suggestions +│ ├── build_args_cli_tests.rs # Dockerfile integration and BUILD-ARGS tests +│ ├── swarm_features_test.rs # Docker Swarm support and error handling +│ ├── boilerplate/ # Modular boilerplate tests by framework +│ │ ├── mod.rs # Common utilities and shared functions +│ │ ├── fastapi_tests.rs # FastAPI project generation tests +│ │ ├── flask_tests.rs # Flask project generation tests +│ │ ├── go_tests.rs # Go project generation tests (Gin, Echo, Fiber) +│ │ └── common_tests.rs # Common init command tests and validation +│ └── structural/ # Organized structural tests (lightweight) +│ ├── mod.rs # Common utilities and module declarations +│ ├── basic_structure.rs # Basic YAML structure validation +│ ├── service_configuration.rs # Service config (env vars, ports, volumes) +│ ├── networking.rs # Networks and service dependencies +│ ├── policies.rs # Restart policies and health checks +│ ├── formatting.rs # YAML validity and formatting tests +│ ├── comments.rs # Comment parsing and edge cases +│ └── complex_scenarios.rs # Complex microservices scenarios ├── fixtures/ -│ ├── valid_simple.ath # Simple valid .ath file +│ ├── valid_simple.ath # Simple valid .ath file (3 services) │ ├── valid_complex_microservices.ath # Complex microservices setup +│ ├── minimal_valid.ath # Minimal valid configuration │ ├── invalid_syntax.ath # File with syntax errors -│ ├── circular_dependencies.ath # Circular dependency test -│ └── minimal_valid.ath # Minimal valid configuration +│ ├── circular_dependencies.ath # Circular dependency test cases +│ ├── port_conflicts.ath # Port conflict scenarios +│ ├── comments_test.ath # Comment parsing test cases +│ ├── build_args_basic.ath # Basic BUILD-ARGS examples +│ ├── build_args_complex.ath # Complex BUILD-ARGS scenarios +│ ├── build_args_invalid.ath # Invalid BUILD-ARGS for error testing +│ ├── build_args_mixed_valid_invalid.ath # Mixed valid/invalid BUILD-ARGS +│ ├── build_args_multiple_services.ath # Multiple services with BUILD-ARGS +│ ├── build_args_with_image.ath # BUILD-ARGS with IMAGE-ID precedence +│ ├── swarm_basic.ath # Basic Docker Swarm features +│ ├── swarm_advanced.ath # Advanced Swarm scenarios +│ ├── swarm_errors.ath # Swarm error testing base +│ └── mixed_features.ath # Mixed Compose + Swarm features ``` ## Running Tests @@ -78,6 +96,15 @@ cargo test --test integration_tests docker_compose_generation_test # Error handling tests cargo test --test integration_tests error_handling_test +# Enhanced error handling tests +cargo test --test integration_tests enhanced_error_handling_test + +# Build args CLI tests +cargo test --test integration_tests build_args_cli_tests + +# Docker Swarm feature tests +cargo test --test integration_tests swarm_features_test + # Boilerplate generation tests cargo test --test integration_tests boilerplate @@ -141,7 +168,31 @@ cargo test --test integration_tests structural --verbose - Tests permission and access errors - Validates error message quality -### 4. Boilerplate Generation Tests (`boilerplate/`) +### 4. Enhanced Error Handling Tests (`enhanced_error_handling_test.rs`) +- Advanced error scenarios with intelligent suggestions +- Tests enhanced port conflict detection with multiple services +- Validates service reference error messages with suggestions +- Tests validation error improvements +- Comprehensive error message quality assurance + +### 5. Build Args CLI Tests (`build_args_cli_tests.rs`) +- Tests Dockerfile integration and validation +- BUILD-ARGS parsing and generation +- Dockerfile ARG validation against Athena BUILD-ARGS +- Error handling for missing Dockerfiles +- Intelligent similarity suggestions for mismatched ARG names +- Tests precedence of IMAGE-ID over BUILD-ARGS + +### 6. Docker Swarm Feature Tests (`swarm_features_test.rs`) +- Comprehensive Docker Swarm support testing +- REPLICAS directive validation and error handling +- UPDATE-CONFIG options testing (PARALLELISM, DELAY, FAILURE-ACTION) +- SWARM-LABELS parsing with flexible syntax support +- Overlay network configuration testing +- Complete integration tests with Swarm + Compose features +- 13 dedicated error handling tests for edge cases + +### 7. Boilerplate Generation Tests (`boilerplate/`) - **Modular organization** by framework for better maintainability - **FastAPI tests** (`fastapi_tests.rs`): Basic init, PostgreSQL/MongoDB options, Docker/no-Docker modes - **Flask tests** (`flask_tests.rs`): Basic init, MySQL integration @@ -149,7 +200,7 @@ cargo test --test integration_tests structural --verbose - **Common tests** (`common_tests.rs`): Error handling, help commands, project validation - Tests project structure generation, configuration files, and dependency setup -### 5. Structural Tests (`structural/`) +### 8. Structural Tests (`structural/`) - **Organized by functional categories** for better maintainability - **Lightweight YAML validation** without heavy snapshots - Tests **structure and logic** rather than exact formatting @@ -162,6 +213,7 @@ cargo test --test integration_tests structural --verbose - `networking.rs`: Network configuration and service dependencies - `policies.rs`: Restart policies and health check configurations - `formatting.rs`: YAML validity and readable output formatting +- `comments.rs`: Comment parsing, multi-line comments, and edge cases - `complex_scenarios.rs`: Complex microservices architecture tests ## Test Fixtures @@ -174,6 +226,24 @@ cargo test --test integration_tests structural --verbose ### Invalid Test Files - **`invalid_syntax.ath`**: Contains various syntax errors - **`circular_dependencies.ath`**: Services with circular dependencies +- **`port_conflicts.ath`**: Port conflict scenarios for error testing + +### BUILD-ARGS Test Files +- **`build_args_basic.ath`**: Basic BUILD-ARGS examples +- **`build_args_complex.ath`**: Complex BUILD-ARGS scenarios with multiple services +- **`build_args_invalid.ath`**: Invalid BUILD-ARGS for error testing +- **`build_args_mixed_valid_invalid.ath`**: Mixed valid/invalid BUILD-ARGS scenarios +- **`build_args_multiple_services.ath`**: Multiple services with BUILD-ARGS +- **`build_args_with_image.ath`**: BUILD-ARGS with IMAGE-ID precedence testing + +### Comment Test Files +- **`comments_test.ath`**: Comment parsing test cases including multi-line and edge cases + +### Docker Swarm Test Files +- **`swarm_basic.ath`**: Basic Docker Swarm features (REPLICAS, UPDATE-CONFIG, SWARM-LABELS) +- **`swarm_advanced.ath`**: Advanced Swarm scenarios with all options and complex architectures +- **`swarm_errors.ath`**: Base fixture for Swarm error testing scenarios +- **`mixed_features.ath`**: Mixed Docker Compose and Swarm features in same deployment ## Dependencies @@ -257,24 +327,28 @@ Modular boilerplate tests organized by framework, each verifying: ### Test Performance & Statistics **Current test suite:** -- **Total tests**: 69 integration tests +- **Total tests**: 117 integration tests - **CLI tests**: 13 tests (command parsing, help, validation) - **Docker Compose generation**: 11 tests (YAML generation, validation, port conflict detection) - **Error handling**: 21 tests (comprehensive error scenarios including port conflicts) +- **Enhanced error handling**: 6 tests (advanced error scenarios with suggestions) +- **Build args CLI**: 8 tests (Dockerfile integration and validation) +- **Swarm features**: 21 tests (Docker Swarm support with comprehensive error handling) - **Boilerplate generation**: 14 tests (modular by framework) -- **Structural tests**: 13 tests (organized in 6 categories) +- **Structural tests**: 23 tests (organized in 6 categories including comments) - **Execution time**: < 1 second for structural tests - **Test organization**: Modular structure for easy maintenance **Test breakdown by category:** **Structural tests:** -- `basic_structure.rs`: 2 tests -- `service_configuration.rs`: 4 tests -- `networking.rs`: 2 tests -- `policies.rs`: 2 tests -- `formatting.rs`: 2 tests -- `complex_scenarios.rs`: 1 test +- `basic_structure.rs`: 2 tests (YAML structure, service count validation) +- `service_configuration.rs`: 4 tests (env vars, ports, volumes, service settings) +- `networking.rs`: 2 tests (network configuration, service dependencies) +- `policies.rs`: 2 tests (restart policies, health check configurations) +- `formatting.rs`: 2 tests (YAML validity, readable output formatting) +- `comments.rs`: 11 tests (comment parsing, edge cases, multi-line comments) +- `complex_scenarios.rs`: 1 test (complex microservices architecture) **Boilerplate tests:** - `fastapi_tests.rs`: 6 tests (basic, PostgreSQL, MongoDB, no-Docker, custom directory, help) @@ -313,8 +387,8 @@ SERVICE service1 PORT-MAPPING 8080 TO 80 END SERVICE -SERVICE service2 -PORT-MAPPING 8080 TO 8000 // ❌ Conflict on host port 8080 +SERVICE service2 +PORT-MAPPING 8080 TO 8000 // Conflict on host port 8080 END SERVICE // Valid scenario - should succeed @@ -323,16 +397,88 @@ PORT-MAPPING 8080 TO 80 END SERVICE SERVICE service2 -PORT-MAPPING 8081 TO 8000 // ✅ Different host port +PORT-MAPPING 8081 TO 8000 // Different host port END SERVICE ``` +## Docker Swarm Feature Tests (**NEW**) + +### Overview +As of December 2025, Athena includes comprehensive Docker Swarm support with extensive error handling tests covering all edge cases and invalid configurations. + +### Test Coverage (`swarm_features_test.rs`) + +#### Success Scenarios (8 tests): +- **`test_swarm_replicas_parsing`**: Validates REPLICAS directive parsing +- **`test_swarm_update_config_parsing`**: Tests UPDATE-CONFIG with all options +- **`test_swarm_labels_parsing`**: Verifies SWARM-LABELS functionality +- **`test_overlay_network_parsing`**: Tests overlay network configuration +- **`test_complete_swarm_compose_generation`**: Full integration test +- **`test_mixed_compose_and_swarm_features`**: Mixed mode compatibility +- **`test_swarm_labels_without_quotes_should_work`**: Flexible label parsing +- **`test_conflicting_swarm_and_compose_features`**: Feature coexistence + +#### Error Handling Tests (13 tests): +**Replica Validation:** +- **`test_invalid_replica_negative_number`**: Catches `REPLICAS -5` +- **`test_invalid_replica_extremely_large_number`**: Catches overflow numbers +- **`test_invalid_replica_non_numeric`**: Catches `REPLICAS abc` +- **`test_invalid_replica_zero`**: Allows zero replicas (edge case) + +**UPDATE-CONFIG Validation:** +- **`test_invalid_update_config_negative_parallelism`**: Catches negative values +- **`test_invalid_update_config_invalid_delay_format`**: Validates time formats +- **`test_invalid_failure_action`**: Catches invalid failure actions +- **`test_invalid_max_failure_ratio`**: Validates ratio bounds + +**SWARM-LABELS Validation:** +- **`test_invalid_swarm_labels_malformed_missing_value`**: Catches missing values +- **`test_empty_swarm_labels`**: Catches empty label directives + +**Network and Service Validation:** +- **`test_invalid_network_driver`**: Catches invalid network drivers +- **`test_invalid_boolean_values`**: Validates boolean parameters +- **`test_swarm_config_without_service_name`**: Catches missing service names + +### Key Features Tested +1. **Comprehensive Error Detection**: All invalid configurations are caught +2. **Detailed Error Messages**: Specific error messages with line/column info +3. **Edge Case Handling**: Zero replicas, large numbers, malformed syntax +4. **Flexible Parsing**: Supports both quoted and unquoted label values +5. **Integration Testing**: Full Docker Compose generation with Swarm features + +### Example Test Scenarios +```rust +// Replica validation tests +fn test_invalid_replica_negative_number() { + let input = r#" + SERVICE web + IMAGE-ID nginx:alpine + REPLICAS -5 // Should fail + END SERVICE + "#; + assert!(parse_athena_file(input).is_err()); +} + +// Label validation tests +fn test_invalid_swarm_labels_malformed() { + let input = r#" + SERVICE web + SWARM-LABELS environment="prod" tier= // Missing value + END SERVICE + "#; + assert!(parse_athena_file(input).is_err()); +} +``` + ### Coverage Goals The test suite aims for >80% coverage on critical code paths: - CLI argument parsing - .ath file parsing and validation - Docker Compose generation +- Docker Swarm configuration generation - Port conflict detection and validation +- Swarm-specific directive validation - Error handling and reporting - Project initialization diff --git a/src/athena/parser/parser.rs b/src/athena/parser/parser.rs index c19cfb8..ebafa2b 100644 --- a/src/athena/parser/parser.rs +++ b/src/athena/parser/parser.rs @@ -301,10 +301,25 @@ fn parse_service_item(pair: pest::iterators::Pair, service: &mut Service) } Rule::swarm_replicas => { if let Some(replicas_pair) = inner_pair.into_inner().next() { - let replicas = replicas_pair.as_str().parse::() - .map_err(|_| AthenaError::ParseError( - EnhancedParseError::new("Invalid replicas number".to_string()) - ))?; + let replicas_str = replicas_pair.as_str(); + let (line, column) = replicas_pair.line_col(); + + let replicas = replicas_str.parse::() + .map_err(|_| { + let suggestion = if replicas_str.parse::().is_ok() && replicas_str.starts_with('-') { + "Replicas must be a positive number. Use a value like: 1, 2, 3, 5, etc.".to_string() + } else if replicas_str.len() > 10 { + "Replicas number is too large. Use a reasonable value like: 1, 2, 3, 5, 10, etc.".to_string() + } else { + format!("'{}' is not a valid number. Use a positive integer like: 1, 2, 3, 5, 10, etc.", replicas_str) + }; + + AthenaError::ParseError( + EnhancedParseError::new("Invalid replicas number".to_string()) + .with_location(line, column) + .with_suggestion(suggestion) + ) + })?; if service.swarm_config.is_none() { service.swarm_config = Some(SwarmConfig::new()); @@ -478,26 +493,55 @@ fn parse_update_config(pair: pest::iterators::Pair) -> AthenaResult() - .map_err(|_| AthenaError::ParseError( - EnhancedParseError::new("Invalid parallelism number".to_string()) - ))?); + let parallelism_str = value_pair.as_str(); + let (line, column) = value_pair.line_col(); + + update_config.parallelism = Some(parallelism_str.parse::() + .map_err(|_| { + let suggestion = if parallelism_str.parse::().is_ok() && parallelism_str.starts_with('-') { + "Parallelism must be a positive number. Use a value like: 1, 2, 3, 4, etc.".to_string() + } else { + format!("'{}' is not a valid number. Use a positive integer like: 1, 2, 3, 4, etc.", parallelism_str) + }; + + AthenaError::ParseError( + EnhancedParseError::new("Invalid parallelism number".to_string()) + .with_location(line, column) + .with_suggestion(suggestion) + ) + })?); } else if option_str.starts_with("DELAY") && value_pair.as_rule() == Rule::time_value { update_config.delay = Some(value_pair.as_str().to_string()); } else if option_str.starts_with("FAILURE-ACTION") && value_pair.as_rule() == Rule::failure_action { - update_config.failure_action = Some(match value_pair.as_str() { + let action_str = value_pair.as_str(); + let (line, column) = value_pair.line_col(); + + update_config.failure_action = Some(match action_str { "CONTINUE" => FailureAction::Continue, "PAUSE" => FailureAction::Pause, "ROLLBACK" => FailureAction::Rollback, - _ => FailureAction::Pause, + _ => { + return Err(AthenaError::ParseError( + EnhancedParseError::new(format!("Invalid failure action: {}", action_str)) + .with_location(line, column) + .with_suggestion("Valid failure actions are: CONTINUE, PAUSE, ROLLBACK".to_string()) + )); + } }); } else if option_str.starts_with("MONITOR") && value_pair.as_rule() == Rule::time_value { update_config.monitor = Some(value_pair.as_str().to_string()); } else if option_str.starts_with("MAX-FAILURE-RATIO") && value_pair.as_rule() == Rule::decimal_value { - update_config.max_failure_ratio = Some(value_pair.as_str().parse::() - .map_err(|_| AthenaError::ParseError( - EnhancedParseError::new("Invalid max failure ratio".to_string()) - ))?); + let ratio_str = value_pair.as_str(); + let (line, column) = value_pair.line_col(); + + update_config.max_failure_ratio = Some(ratio_str.parse::() + .map_err(|_| { + AthenaError::ParseError( + EnhancedParseError::new("Invalid max failure ratio".to_string()) + .with_location(line, column) + .with_suggestion("Max failure ratio must be a number between 0.0 and 1.0, e.g., 0.1, 0.3, 0.5".to_string()) + ) + })?); } } } @@ -508,27 +552,42 @@ fn parse_update_config(pair: pest::iterators::Pair) -> AthenaResult) -> AthenaResult> { let mut labels = HashMap::new(); + let (main_line, main_column) = pair.line_col(); for inner_pair in pair.into_inner() { if let Rule::swarm_label_pair = inner_pair.as_rule() { + let (line, column) = inner_pair.line_col(); let mut label_parts = inner_pair.into_inner(); - let key = label_parts.next() - .ok_or_else(|| AthenaError::ParseError(EnhancedParseError::new("Missing label key".to_string())))? - .as_str().to_string(); + let key_pair = label_parts.next() + .ok_or_else(|| AthenaError::ParseError( + EnhancedParseError::new("Missing label key".to_string()) + .with_location(line, column) + .with_suggestion("Use format: SWARM-LABELS key=value, e.g., SWARM-LABELS environment=\"production\" tier=\"backend\"".to_string()) + ))?; + let key = key_pair.as_str().to_string(); - let value = label_parts.next() - .ok_or_else(|| AthenaError::ParseError(EnhancedParseError::new("Missing label value".to_string())))? - .as_str(); + let value_pair = label_parts.next() + .ok_or_else(|| { + let (key_line, key_column) = key_pair.line_col(); + AthenaError::ParseError( + EnhancedParseError::new(format!("Missing value for label key '{}'", key)) + .with_location(key_line, key_column) + .with_suggestion(format!("Complete the label: {}=\"value\", e.g., {}=\"production\"", key, key)) + ) + })?; + let value = value_pair.as_str(); labels.insert(key, clean_string_value(value)); } } if labels.is_empty() { - return Err(AthenaError::ParseError(EnhancedParseError::new( - "SWARM-LABELS must contain at least one key=value pair".to_string() - ))); + return Err(AthenaError::ParseError( + EnhancedParseError::new("SWARM-LABELS must contain at least one key=value pair".to_string()) + .with_location(main_line, main_column) + .with_suggestion("Add at least one label: SWARM-LABELS environment=\"production\" tier=\"backend\"".to_string()) + )); } Ok(labels) diff --git a/tests/fixtures/mixed_features.ath b/tests/fixtures/mixed_features.ath new file mode 100644 index 0000000..05aff60 --- /dev/null +++ b/tests/fixtures/mixed_features.ath @@ -0,0 +1,41 @@ +DEPLOYMENT-ID MIXED_FEATURES_TEST + +ENVIRONMENT SECTION +NETWORK-NAME mixed_network DRIVER OVERLAY ATTACHABLE TRUE + +SERVICES SECTION + +// Traditional Docker Compose service +SERVICE development_api +IMAGE-ID node:18 +PORT-MAPPING 3000 TO 3000 +ENV-VARIABLE NODE_ENV="development" +RESTART-POLICY unless-stopped +END SERVICE + +// Swarm-enabled production service +SERVICE production_api +BUILD-ARGS NODE_ENV="production" CLUSTER_MODE="true" +REPLICAS 5 +UPDATE-CONFIG PARALLELISM 2 DELAY 10s FAILURE-ACTION ROLLBACK +SWARM-LABELS tier="api" environment="production" scaling="auto" +RESOURCE-LIMITS CPU "1.0" MEMORY "1024M" +DEPENDS-ON cache +END SERVICE + +// Mixed features service (both Compose and Swarm) +SERVICE hybrid_service +IMAGE-ID redis:7-alpine +PORT-MAPPING 6379 TO 6379 +REPLICAS 3 +UPDATE-CONFIG PARALLELISM 1 DELAY 30s +SWARM-LABELS tier="cache" environment="production" +RESTART-POLICY always +HEALTH-CHECK "redis-cli ping" +END SERVICE + +SERVICE cache +IMAGE-ID redis:7-alpine +REPLICAS 2 +SWARM-LABELS tier="cache" role="secondary" +END SERVICE \ No newline at end of file diff --git a/tests/fixtures/swarm_advanced.ath b/tests/fixtures/swarm_advanced.ath new file mode 100644 index 0000000..0c792d8 --- /dev/null +++ b/tests/fixtures/swarm_advanced.ath @@ -0,0 +1,52 @@ +DEPLOYMENT-ID SWARM_ADVANCED_TEST +VERSION-ID 2.1.0 + +ENVIRONMENT SECTION +NETWORK-NAME production_overlay DRIVER OVERLAY ATTACHABLE TRUE ENCRYPTED TRUE + +SERVICES SECTION + +SERVICE load_balancer +IMAGE-ID nginx:alpine +REPLICAS 2 +UPDATE-CONFIG PARALLELISM 1 DELAY 30s FAILURE-ACTION ROLLBACK MONITOR 60s MAX-FAILURE-RATIO 0.2 +SWARM-LABELS tier="proxy" environment="production" critical="true" +PORT-MAPPING 80 TO 80 +PORT-MAPPING 443 TO 443 +DEPENDS-ON api_gateway +END SERVICE + +SERVICE api_gateway +BUILD-ARGS NODE_ENV="production" API_VERSION="v2.1" +REPLICAS 5 +UPDATE-CONFIG PARALLELISM 2 DELAY 15s FAILURE-ACTION ROLLBACK MONITOR 30s +SWARM-LABELS tier="api" environment="production" scaling="auto" +DEPENDS-ON user_service +DEPENDS-ON order_service +END SERVICE + +SERVICE user_service +IMAGE-ID python:3.11-slim +REPLICAS 3 +UPDATE-CONFIG PARALLELISM 1 DELAY 20s FAILURE-ACTION PAUSE +SWARM-LABELS tier="backend" service="users" environment="production" +RESOURCE-LIMITS CPU "0.5" MEMORY "512M" +DEPENDS-ON database +END SERVICE + +SERVICE order_service +IMAGE-ID java:17-jdk-slim +REPLICAS 4 +UPDATE-CONFIG PARALLELISM 2 DELAY 25s FAILURE-ACTION CONTINUE +SWARM-LABELS tier="backend" service="orders" environment="production" +RESOURCE-LIMITS CPU "1.0" MEMORY "1024M" +DEPENDS-ON database +END SERVICE + +SERVICE database +IMAGE-ID postgres:15 +REPLICAS 1 +UPDATE-CONFIG PARALLELISM 1 DELAY 60s FAILURE-ACTION PAUSE +SWARM-LABELS tier="data" role="primary" critical="true" environment="production" +RESOURCE-LIMITS CPU "2.0" MEMORY "2048M" +END SERVICE \ No newline at end of file diff --git a/tests/fixtures/swarm_basic.ath b/tests/fixtures/swarm_basic.ath new file mode 100644 index 0000000..9460b9f --- /dev/null +++ b/tests/fixtures/swarm_basic.ath @@ -0,0 +1,27 @@ +DEPLOYMENT-ID SWARM_BASIC_TEST + +ENVIRONMENT SECTION +NETWORK-NAME swarm_network DRIVER OVERLAY ATTACHABLE TRUE + +SERVICES SECTION + +SERVICE web +IMAGE-ID nginx:alpine +REPLICAS 3 +UPDATE-CONFIG PARALLELISM 1 DELAY 10s FAILURE-ACTION ROLLBACK +SWARM-LABELS tier="frontend" environment="production" +END SERVICE + +SERVICE api +IMAGE-ID python:3.11 +REPLICAS 2 +UPDATE-CONFIG PARALLELISM 1 DELAY 15s FAILURE-ACTION PAUSE +SWARM-LABELS tier="backend" environment="production" +DEPENDS-ON database +END SERVICE + +SERVICE database +IMAGE-ID postgres:15 +REPLICAS 1 +SWARM-LABELS tier="data" critical="true" +END SERVICE \ No newline at end of file diff --git a/tests/fixtures/swarm_errors.ath b/tests/fixtures/swarm_errors.ath new file mode 100644 index 0000000..3bc640b --- /dev/null +++ b/tests/fixtures/swarm_errors.ath @@ -0,0 +1,20 @@ +// Fixture for testing Swarm error scenarios +DEPLOYMENT-ID SWARM_ERROR_TEST + +SERVICES SECTION + +// This fixture contains various potential error scenarios for testing +SERVICE valid_service +IMAGE-ID nginx:alpine +REPLICAS 3 +SWARM-LABELS environment="production" tier="frontend" +END SERVICE + +// Test cases handled in separate test functions: +// - REPLICAS -5 (negative numbers) +// - REPLICAS 999999999999999 (too large) +// - REPLICAS abc (non-numeric) +// - SWARM-LABELS tier= (missing values) +// - UPDATE-CONFIG PARALLELISM -1 (negative) +// - UPDATE-CONFIG FAILURE-ACTION INVALID (bad action) +// These are generated programmatically in tests \ No newline at end of file