diff --git a/crates/bashkit/tests/skills_fixtures/azure_discover_rank.sh b/crates/bashkit/tests/skills_fixtures/azure_discover_rank.sh new file mode 100755 index 00000000..e2102eac --- /dev/null +++ b/crates/bashkit/tests/skills_fixtures/azure_discover_rank.sh @@ -0,0 +1,108 @@ +#!/bin/bash +# discover_and_rank.sh +# Discovers available capacity for an Azure OpenAI model across all regions, +# cross-references with existing projects and subscription quota, and outputs a ranked table. +# +# Usage: ./discover_and_rank.sh [min-capacity] +# Example: ./discover_and_rank.sh o3-mini 2025-01-31 200 +# +# Output: Ranked table of regions with capacity, quota, project counts, and match status +# +# NOTE: Backslash line continuations removed for bashkit parser compatibility. +# Original at: microsoft/github-copilot-for-azure + +set -euo pipefail + +MODEL_NAME="${1:?Usage: $0 [min-capacity]}" +MODEL_VERSION="${2:?Usage: $0 [min-capacity]}" +MIN_CAPACITY="${3:-0}" + +SUB_ID=$(az account show --query id -o tsv) + +# Query model capacity across all regions (GlobalStandard SKU) +CAPACITY_JSON=$(az rest --method GET --url "https://management.azure.com/subscriptions/${SUB_ID}/providers/Microsoft.CognitiveServices/modelCapacities" --url-parameters api-version=2024-10-01 modelFormat=OpenAI modelName="$MODEL_NAME" modelVersion="$MODEL_VERSION" 2>/dev/null) + +# Query all AI Services projects +PROJECTS_JSON=$(az rest --method GET --url "https://management.azure.com/subscriptions/${SUB_ID}/providers/Microsoft.CognitiveServices/accounts" --url-parameters api-version=2024-10-01 --query "value[?kind=='AIServices'].{name:name, location:location}" 2>/dev/null) + +# Get unique regions from capacity results for quota checking +REGIONS=$(echo "$CAPACITY_JSON" | jq -r '.value[] | select(.properties.skuName=="GlobalStandard" and .properties.availableCapacity > 0) | .location' | sort -u) + +# Build quota map: check subscription quota per region +declare -A QUOTA_MAP +for region in $REGIONS; do + usage_json=$(az cognitiveservices usage list --location "$region" --subscription "$SUB_ID" -o json 2>/dev/null || echo "[]") + quota_avail=$(echo "$usage_json" | jq -r --arg name "OpenAI.GlobalStandard.$MODEL_NAME" '[.[] | select(.name.value == $name)] | if length > 0 then .[0].limit - .[0].currentValue else 0 end') + QUOTA_MAP[$region]="${quota_avail:-0}" +done + +# Export quota map as JSON for Python +QUOTA_JSON="{" +first=true +for region in "${!QUOTA_MAP[@]}"; do + if [ "$first" = true ]; then first=false; else QUOTA_JSON+=","; fi + QUOTA_JSON+="\"$region\":${QUOTA_MAP[$region]}" +done +QUOTA_JSON+="}" + +# Combine, rank, and output using inline Python (available on all Azure CLI installs) +python3 -c " +import json, sys + +capacity = json.loads('''${CAPACITY_JSON}''') +projects = json.loads('''${PROJECTS_JSON}''') +quota = json.loads('''${QUOTA_JSON}''') +min_cap = int('${MIN_CAPACITY}') + +# Build capacity map (GlobalStandard only) +cap_map = {} +for item in capacity.get('value', []): + props = item.get('properties', {}) + if props.get('skuName') == 'GlobalStandard' and props.get('availableCapacity', 0) > 0: + region = item.get('location', '') + cap_map[region] = max(cap_map.get(region, 0), props['availableCapacity']) + +# Build project count map +proj_map = {} +proj_sample = {} +for p in (projects if isinstance(projects, list) else []): + loc = p.get('location', '') + proj_map[loc] = proj_map.get(loc, 0) + 1 + if loc not in proj_sample: + proj_sample[loc] = p.get('name', '') + +# Combine and rank +results = [] +for region, cap in cap_map.items(): + meets = cap >= min_cap + q = quota.get(region, 0) + quota_ok = q > 0 + results.append({ + 'region': region, + 'available': cap, + 'meets': meets, + 'projects': proj_map.get(region, 0), + 'sample': proj_sample.get(region, '(none)'), + 'quota': q, + 'quota_ok': quota_ok + }) + +# Sort: meets target first, then quota available, then by project count, then by capacity +results.sort(key=lambda x: (-x['meets'], -x['quota_ok'], -x['projects'], -x['available'])) + +# Output +total = len(results) +matching = sum(1 for r in results if r['meets']) +with_quota = sum(1 for r in results if r['meets'] and r['quota_ok']) +with_projects = sum(1 for r in results if r['meets'] and r['projects'] > 0) + +print(f'Model: {\"${MODEL_NAME}\"} v{\"${MODEL_VERSION}\"} | SKU: GlobalStandard | Min Capacity: {min_cap}K TPM') +print(f'Regions with capacity: {total} | Meets target: {matching} | With quota: {with_quota} | With projects: {with_projects}') +print() +print(f'{\"Region\":<22} {\"Available\":<12} {\"Meets Target\":<14} {\"Quota\":<12} {\"Projects\":<10} {\"Sample Project\"}') +print('-' * 100) +for r in results: + mark = 'YES' if r['meets'] else 'no' + q_display = f'{r[\"quota\"]}K' if r['quota'] > 0 else '0 (none)' + print(f'{r[\"region\"]:<22} {r[\"available\"]}K{\"\":.<10} {mark:<14} {q_display:<12} {r[\"projects\"]:<10} {r[\"sample\"]}') +" diff --git a/crates/bashkit/tests/skills_fixtures/azure_generate_url.sh b/crates/bashkit/tests/skills_fixtures/azure_generate_url.sh new file mode 100644 index 00000000..fd90c896 --- /dev/null +++ b/crates/bashkit/tests/skills_fixtures/azure_generate_url.sh @@ -0,0 +1,89 @@ +#!/bin/bash +# Generate Azure AI Foundry portal URL for a model deployment +# This script creates a direct clickable link to view a deployment in the Azure AI Foundry portal + +set -e + +# Function to display usage +usage() { + cat << EOF +Usage: $0 --subscription SUBSCRIPTION_ID --resource-group RESOURCE_GROUP \\ + --foundry-resource FOUNDRY_RESOURCE --project PROJECT_NAME \\ + --deployment DEPLOYMENT_NAME + +Generate Azure AI Foundry deployment URL + +Required arguments: + --subscription Azure subscription ID (GUID) + --resource-group Resource group name + --foundry-resource Foundry resource (account) name + --project Project name + --deployment Deployment name + +Example: + $0 --subscription d5320f9a-73da-4a74-b639-83efebc7bb6f \\ + --resource-group bani-host \\ + --foundry-resource banide-host-resource \\ + --project banide-host \\ + --deployment text-embedding-ada-002 +EOF + exit 1 +} + +# Parse command line arguments +while [[ $# -gt 0 ]]; do + case $1 in + --subscription) + SUBSCRIPTION_ID="$2" + shift 2 + ;; + --resource-group) + RESOURCE_GROUP="$2" + shift 2 + ;; + --foundry-resource) + FOUNDRY_RESOURCE="$2" + shift 2 + ;; + --project) + PROJECT_NAME="$2" + shift 2 + ;; + --deployment) + DEPLOYMENT_NAME="$2" + shift 2 + ;; + -h|--help) + usage + ;; + *) + echo "Unknown option: $1" + usage + ;; + esac +done + +# Validate required arguments +if [ -z "$SUBSCRIPTION_ID" ] || [ -z "$RESOURCE_GROUP" ] || [ -z "$FOUNDRY_RESOURCE" ] || [ -z "$PROJECT_NAME" ] || [ -z "$DEPLOYMENT_NAME" ]; then + echo "Error: Missing required arguments" + usage +fi + +# Convert subscription GUID to bytes (big-endian/string order) and encode as base64url +# Remove hyphens from GUID +GUID_HEX=$(echo "$SUBSCRIPTION_ID" | tr -d '-') + +# Convert hex string to bytes and base64 encode +# Using xxd to convert hex to binary, then base64 encode +ENCODED_SUB=$(echo "$GUID_HEX" | xxd -r -p | base64 | tr '+' '-' | tr '/' '_' | tr -d '=') + +# Build the encoded resource path +# Format: {encoded-sub-id},{resource-group},,{foundry-resource},{project-name} +# Note: Two commas between resource-group and foundry-resource +ENCODED_PATH="${ENCODED_SUB},${RESOURCE_GROUP},,${FOUNDRY_RESOURCE},${PROJECT_NAME}" + +# Build the full URL +BASE_URL="https://ai.azure.com/nextgen/r/" +DEPLOYMENT_PATH="/build/models/deployments/${DEPLOYMENT_NAME}/details" + +echo "${BASE_URL}${ENCODED_PATH}${DEPLOYMENT_PATH}" diff --git a/crates/bashkit/tests/skills_fixtures/azure_query_capacity.sh b/crates/bashkit/tests/skills_fixtures/azure_query_capacity.sh new file mode 100755 index 00000000..d1f8bcdd --- /dev/null +++ b/crates/bashkit/tests/skills_fixtures/azure_query_capacity.sh @@ -0,0 +1,70 @@ +#!/bin/bash +# query_capacity.sh +# Queries available capacity for an Azure OpenAI model. +# +# Usage: +# ./query_capacity.sh [model-version] [region] [sku] +# Examples: +# ./query_capacity.sh o3-mini # List versions +# ./query_capacity.sh o3-mini 2025-01-31 # All regions +# ./query_capacity.sh o3-mini 2025-01-31 eastus2 # Specific region +# ./query_capacity.sh o3-mini 2025-01-31 "" Standard # Different SKU + +set -euo pipefail + +MODEL_NAME="${1:?Usage: $0 [model-version] [region] [sku]}" +MODEL_VERSION="${2:-}" +REGION="${3:-}" +SKU="${4:-GlobalStandard}" + +SUB_ID=$(az account show --query id -o tsv) + +# If no version, list available versions +if [ -z "$MODEL_VERSION" ]; then + LOC="${REGION:-eastus}" + echo "Available versions for $MODEL_NAME:" + az cognitiveservices model list --location "$LOC" --query "[?model.name=='$MODEL_NAME'].{Version:model.version, Format:model.format}" --output table 2>/dev/null + exit 0 +fi + +# Build URL +if [ -n "$REGION" ]; then + URL="https://management.azure.com/subscriptions/${SUB_ID}/providers/Microsoft.CognitiveServices/locations/${REGION}/modelCapacities" +else + URL="https://management.azure.com/subscriptions/${SUB_ID}/providers/Microsoft.CognitiveServices/modelCapacities" +fi + +# Query capacity +CAPACITY_RESULT=$(az rest --method GET --url "$URL" --url-parameters api-version=2024-10-01 modelFormat=OpenAI modelName="$MODEL_NAME" modelVersion="$MODEL_VERSION" 2>/dev/null) + +# Get regions with capacity +REGIONS_WITH_CAP=$(echo "$CAPACITY_RESULT" | jq -r ".value[] | select(.properties.skuName==\"$SKU\" and .properties.availableCapacity > 0) | .location" 2>/dev/null | sort -u) + +if [ -z "$REGIONS_WITH_CAP" ]; then + echo "No capacity found for $MODEL_NAME v$MODEL_VERSION ($SKU)" + echo "Try a different SKU or version." + exit 0 +fi + +echo "Capacity: $MODEL_NAME v$MODEL_VERSION ($SKU)" +echo "" +printf "%-22s %-12s %-15s %s\n" "Region" "Available" "Quota" "SKU" +printf -- '-%.0s' {1..60}; echo "" + +for region in $REGIONS_WITH_CAP; do + avail=$(echo "$CAPACITY_RESULT" | jq -r ".value[] | select(.location==\"$region\" and .properties.skuName==\"$SKU\") | .properties.availableCapacity" 2>/dev/null | head -1) + + # Check subscription quota + usage_json=$(az cognitiveservices usage list --location "$region" --subscription "$SUB_ID" -o json 2>/dev/null || echo "[]") + quota_avail=$(echo "$usage_json" | jq -r --arg name "OpenAI.$SKU.$MODEL_NAME" '[.[] | select(.name.value == $name)] | if length > 0 then .[0].limit - .[0].currentValue else 0 end' 2>/dev/null || echo "?") + + if [ "$quota_avail" = "0" ]; then + quota_display="0 (none)" + elif [ "$quota_avail" = "?" ]; then + quota_display="?" + else + quota_display="${quota_avail}K" + fi + + printf "%-22s %-12s %-15s %s\n" "$region" "${avail}K TPM" "$quota_display" "$SKU" +done diff --git a/crates/bashkit/tests/skills_fixtures/helm_validate_chart.sh b/crates/bashkit/tests/skills_fixtures/helm_validate_chart.sh new file mode 100755 index 00000000..b8d5b0f3 --- /dev/null +++ b/crates/bashkit/tests/skills_fixtures/helm_validate_chart.sh @@ -0,0 +1,244 @@ +#!/bin/bash +set -e + +CHART_DIR="${1:-.}" +RELEASE_NAME="test-release" + +echo "═══════════════════════════════════════════════════════" +echo " Helm Chart Validation" +echo "═══════════════════════════════════════════════════════" +echo "" + +# Colors +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +RED='\033[0;31m' +NC='\033[0m' # No Color + +success() { + echo -e "${GREEN}✓${NC} $1" +} + +warning() { + echo -e "${YELLOW}⚠${NC} $1" +} + +error() { + echo -e "${RED}✗${NC} $1" +} + +# Check if Helm is installed +if ! command -v helm &> /dev/null; then + error "Helm is not installed" + exit 1 +fi + +echo "📦 Chart directory: $CHART_DIR" +echo "" + +# 1. Check chart structure +echo "1️⃣ Checking chart structure..." +if [ ! -f "$CHART_DIR/Chart.yaml" ]; then + error "Chart.yaml not found" + exit 1 +fi +success "Chart.yaml exists" + +if [ ! -f "$CHART_DIR/values.yaml" ]; then + error "values.yaml not found" + exit 1 +fi +success "values.yaml exists" + +if [ ! -d "$CHART_DIR/templates" ]; then + error "templates/ directory not found" + exit 1 +fi +success "templates/ directory exists" +echo "" + +# 2. Lint the chart +echo "2️⃣ Linting chart..." +if helm lint "$CHART_DIR"; then + success "Chart passed lint" +else + error "Chart failed lint" + exit 1 +fi +echo "" + +# 3. Check Chart.yaml +echo "3️⃣ Validating Chart.yaml..." +CHART_NAME=$(grep "^name:" "$CHART_DIR/Chart.yaml" | awk '{print $2}') +CHART_VERSION=$(grep "^version:" "$CHART_DIR/Chart.yaml" | awk '{print $2}') +APP_VERSION=$(grep "^appVersion:" "$CHART_DIR/Chart.yaml" | awk '{print $2}' | tr -d '"') + +if [ -z "$CHART_NAME" ]; then + error "Chart name not found" + exit 1 +fi +success "Chart name: $CHART_NAME" + +if [ -z "$CHART_VERSION" ]; then + error "Chart version not found" + exit 1 +fi +success "Chart version: $CHART_VERSION" + +if [ -z "$APP_VERSION" ]; then + warning "App version not specified" +else + success "App version: $APP_VERSION" +fi +echo "" + +# 4. Test template rendering +echo "4️⃣ Testing template rendering..." +if helm template "$RELEASE_NAME" "$CHART_DIR" > /dev/null 2>&1; then + success "Templates rendered successfully" +else + error "Template rendering failed" + helm template "$RELEASE_NAME" "$CHART_DIR" + exit 1 +fi +echo "" + +# 5. Dry-run installation +echo "5️⃣ Testing dry-run installation..." +if helm install "$RELEASE_NAME" "$CHART_DIR" --dry-run --debug > /dev/null 2>&1; then + success "Dry-run installation successful" +else + error "Dry-run installation failed" + exit 1 +fi +echo "" + +# 6. Check for required Kubernetes resources +echo "6️⃣ Checking generated resources..." +MANIFESTS=$(helm template "$RELEASE_NAME" "$CHART_DIR") + +if echo "$MANIFESTS" | grep -q "kind: Deployment"; then + success "Deployment found" +else + warning "No Deployment found" +fi + +if echo "$MANIFESTS" | grep -q "kind: Service"; then + success "Service found" +else + warning "No Service found" +fi + +if echo "$MANIFESTS" | grep -q "kind: ServiceAccount"; then + success "ServiceAccount found" +else + warning "No ServiceAccount found" +fi +echo "" + +# 7. Check for security best practices +echo "7️⃣ Checking security best practices..." +if echo "$MANIFESTS" | grep -q "runAsNonRoot: true"; then + success "Running as non-root user" +else + warning "Not explicitly running as non-root" +fi + +if echo "$MANIFESTS" | grep -q "readOnlyRootFilesystem: true"; then + success "Using read-only root filesystem" +else + warning "Not using read-only root filesystem" +fi + +if echo "$MANIFESTS" | grep -q "allowPrivilegeEscalation: false"; then + success "Privilege escalation disabled" +else + warning "Privilege escalation not explicitly disabled" +fi +echo "" + +# 8. Check for resource limits +echo "8️⃣ Checking resource configuration..." +if echo "$MANIFESTS" | grep -q "resources:"; then + if echo "$MANIFESTS" | grep -q "limits:"; then + success "Resource limits defined" + else + warning "No resource limits defined" + fi + if echo "$MANIFESTS" | grep -q "requests:"; then + success "Resource requests defined" + else + warning "No resource requests defined" + fi +else + warning "No resources defined" +fi +echo "" + +# 9. Check for health probes +echo "9️⃣ Checking health probes..." +if echo "$MANIFESTS" | grep -q "livenessProbe:"; then + success "Liveness probe configured" +else + warning "No liveness probe found" +fi + +if echo "$MANIFESTS" | grep -q "readinessProbe:"; then + success "Readiness probe configured" +else + warning "No readiness probe found" +fi +echo "" + +# 10. Check dependencies +if [ -f "$CHART_DIR/Chart.yaml" ] && grep -q "^dependencies:" "$CHART_DIR/Chart.yaml"; then + echo "🔟 Checking dependencies..." + if helm dependency list "$CHART_DIR" > /dev/null 2>&1; then + success "Dependencies valid" + + if [ -f "$CHART_DIR/Chart.lock" ]; then + success "Chart.lock file present" + else + warning "Chart.lock file missing (run 'helm dependency update')" + fi + else + error "Dependencies check failed" + fi + echo "" +fi + +# 11. Check for values schema +if [ -f "$CHART_DIR/values.schema.json" ]; then + echo "1️⃣1️⃣ Validating values schema..." + success "values.schema.json present" + + # Validate schema if jq is available + if command -v jq &> /dev/null; then + if jq empty "$CHART_DIR/values.schema.json" 2>/dev/null; then + success "values.schema.json is valid JSON" + else + error "values.schema.json contains invalid JSON" + exit 1 + fi + fi + echo "" +fi + +# Summary +echo "═══════════════════════════════════════════════════════" +echo " Validation Complete!" +echo "═══════════════════════════════════════════════════════" +echo "" +echo "Chart: $CHART_NAME" +echo "Version: $CHART_VERSION" +if [ -n "$APP_VERSION" ]; then + echo "App Version: $APP_VERSION" +fi +echo "" +success "All validations passed!" +echo "" +echo "Next steps:" +echo " • helm package $CHART_DIR" +echo " • helm install my-release $CHART_DIR" +echo " • helm test my-release" +echo "" diff --git a/crates/bashkit/tests/skills_fixtures/jwt_test_setup.sh b/crates/bashkit/tests/skills_fixtures/jwt_test_setup.sh new file mode 100755 index 00000000..4265f4b5 --- /dev/null +++ b/crates/bashkit/tests/skills_fixtures/jwt_test_setup.sh @@ -0,0 +1,289 @@ +#!/bin/bash + +# Spring Security JWT Testing Script +# This script sets up a test environment and validates JWT implementation + +set -e + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' # No Color + +# Configuration +BASE_URL=${BASE_URL:-http://localhost:8080} +TEST_EMAIL=${TEST_EMAIL:-test@example.com} +TEST_PASSWORD=${TEST_PASSWORD:-TestPassword123!} + +echo -e "${GREEN}=== Spring Security JWT Test Suite ===${NC}" +echo + +# Function to print colored output +print_status() { + if [ $1 -eq 0 ]; then + echo -e "${GREEN}✅ $2${NC}" + else + echo -e "${RED}❌ $2${NC}" + fi +} + +print_warning() { + echo -e "${YELLOW}⚠️ $1${NC}" +} + +print_info() { + echo -e "${GREEN}ℹ️ $1${NC}" +} + +# Function to check if service is running +check_service() { + curl -s -f "$BASE_URL/actuator/health" > /dev/null 2>&1 +} + +# Function to create test user +create_test_user() { + echo "Creating test user..." + response=$(curl -s -w "%{http_code}" -o /tmp/user_response.json \ + -X POST "$BASE_URL/api/register" \ + -H "Content-Type: application/json" \ + -d "{ + \"email\": \"$TEST_EMAIL\", + \"password\": \"$TEST_PASSWORD\", + \"firstName\": \"Test\", + \"lastName\": \"User\" + }") + + http_code=${response: -3} + + if [ "$http_code" = "201" ]; then + print_status 0 "Test user created successfully" + return 0 + elif [ "$http_code" = "409" ]; then + print_status 0 "Test user already exists" + return 0 + else + print_status 1 "Failed to create test user (HTTP $http_code)" + cat /tmp/user_response.json + return 1 + fi +} + +# Function to authenticate and get JWT +authenticate() { + echo "Authenticating user..." + response=$(curl -s -w "%{http_code}" -o /tmp/auth_response.json \ + -X POST "$BASE_URL/api/auth/login" \ + -H "Content-Type: application/json" \ + -d "{ + \"email\": \"$TEST_EMAIL\", + \"password\": \"$TEST_PASSWORD\" + }") + + http_code=${response: -3} + + if [ "$http_code" = "200" ]; then + ACCESS_TOKEN=$(jq -r '.accessToken' /tmp/auth_response.json) + REFRESH_TOKEN=$(jq -r '.refreshToken' /tmp/auth_response.json) + print_status 0 "Authentication successful" + print_info "Access token: ${ACCESS_TOKEN:0:20}..." + return 0 + else + print_status 1 "Authentication failed (HTTP $http_code)" + cat /tmp/auth_response.json + return 1 + fi +} + +# Function to test protected endpoint +test_protected_endpoint() { + local endpoint=$1 + local expected_status=$2 + local description=$3 + + if [ -z "$ACCESS_TOKEN" ]; then + print_status 1 "No access token available" + return 1 + fi + + response=$(curl -s -w "%{http_code}" -o /tmp/endpoint_response.json \ + -H "Authorization: Bearer $ACCESS_TOKEN" \ + "$BASE_URL$endpoint") + + http_code=${response: -3} + + if [ "$http_code" = "$expected_status" ]; then + print_status 0 "$description" + return 0 + else + print_status 1 "$description (Expected $expected_status, got $http_code)" + cat /tmp/endpoint_response.json + return 1 + fi +} + +# Function to test JWT validation +test_jwt_validation() { + echo "Testing JWT validation..." + + # Test valid token + test_protected_endpoint "/api/users/me" 200 "Valid JWT access" + + # Test expired token + expired_token="eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwiZXhwIjoxNjE2MjM5MDIyfQ.SflKxwRJSMeKKF2QT4fwpMeJf36POk6yJV_adQssw5c" + + response=$(curl -s -w "%{http_code}" -o /tmp/expired_response.json \ + -H "Authorization: Bearer $expired_token" \ + "$BASE_URL/api/users/me") + + if [ "${response: -3}" = "401" ]; then + print_status 0 "Expired token rejected" + else + print_status 1 "Expired token accepted" + fi + + # Test invalid token + invalid_token="invalid.token.format" + + response=$(curl -s -w "%{http_code}" -o /tmp/invalid_response.json \ + -H "Authorization: Bearer $invalid_token" \ + "$BASE_URL/api/users/me") + + if [ "${response: -3}" = "401" ]; then + print_status 0 "Invalid token rejected" + else + print_status 1 "Invalid token accepted" + fi + + # Test no token + response=$(curl -s -w "%{http_code}" -o /tmp/no_token_response.json \ + "$BASE_URL/api/users/me") + + if [ "${response: -3}" = "401" ]; then + print_status 0 "No token rejected" + else + print_status 1 "No token accepted" + fi +} + +# Function to test refresh token +test_refresh_token() { + echo "Testing refresh token..." + + if [ -z "$REFRESH_TOKEN" ]; then + print_status 1 "No refresh token available" + return 1 + fi + + # Use refresh token to get new access token + response=$(curl -s -w "%{http_code}" -o /tmp/refresh_response.json \ + -X POST "$BASE_URL/api/auth/refresh" \ + -H "Content-Type: application/json" \ + -d "{\"refreshToken\": \"$REFRESH_TOKEN\"}") + + http_code=${response: -3} + + if [ "$http_code" = "200" ]; then + NEW_ACCESS_TOKEN=$(jq -r '.accessToken' /tmp/refresh_response.json) + print_status 0 "Refresh token successful" + print_info "New access token: ${NEW_ACCESS_TOKEN:0:20}..." + + # Test new token + response=$(curl -s -w "%{http_code}" -o /tmp/new_token_test.json \ + -H "Authorization: Bearer $NEW_ACCESS_TOKEN" \ + "$BASE_URL/api/users/me") + + if [ "${response: -3}" = "200" ]; then + print_status 0 "New access token works" + else + print_status 1 "New access token failed" + fi + else + print_status 1 "Refresh token failed (HTTP $http_code)" + cat /tmp/refresh_response.json + fi +} + +# Function to test logout +test_logout() { + echo "Testing logout..." + + if [ -z "$ACCESS_TOKEN" ]; then + print_status 1 "No access token available" + return 1 + fi + + # Logout + response=$(curl -s -w "%{http_code}" -o /tmp/logout_response.json \ + -X POST "$BASE_URL/api/auth/logout" \ + -H "Authorization: Bearer $ACCESS_TOKEN") + + http_code=${response: -3} + + if [ "$http_code" = "200" ]; then + print_status 0 "Logout successful" + + # Test token is no longer valid + response=$(curl -s -w "%{http_code}" -o /tmp/post_logout.json \ + -H "Authorization: Bearer $ACCESS_TOKEN" \ + "$BASE_URL/api/users/me") + + if [ "${response: -3}" = "401" ]; then + print_status 0 "Token invalidated after logout" + else + print_status 1 "Token still valid after logout" + fi + else + print_status 1 "Logout failed (HTTP $http_code)" + cat /tmp/logout_response.json + fi +} + +# Main test execution +main() { + echo "Starting JWT security tests..." + echo "Base URL: $BASE_URL" + echo "Test Email: $TEST_EMAIL" + echo + + # Check if service is running + if ! check_service; then + print_status 1 "Service is not running at $BASE_URL" + print_info "Please start the application before running tests" + exit 1 + fi + + print_status 0 "Service is running" + + # Run tests + echo + echo "=== Setup Phase ===" + create_test_user + authenticate + + echo + echo "=== Authentication Tests ===" + test_jwt_validation + test_refresh_token + test_logout + + echo + echo "=== Test Summary ===" + echo "All tests completed. Review the output above for any issues." + echo + echo "For detailed debugging:" + echo "1. Check application logs: tail -f logs/application.log" + echo "2. Use debug endpoint: curl -H \"X-Auth-Debug: true\" $BASE_URL/api/users/me" + echo "3. Verify JWT content at: https://jwt.io/" +} + +# Cleanup function +cleanup() { + rm -f /tmp/*.json +} + +# Set up cleanup +trap cleanup EXIT + +# Run main function +main "$@" \ No newline at end of file diff --git a/crates/bashkit/tests/skills_fixtures/stitch_download_asset.sh b/crates/bashkit/tests/skills_fixtures/stitch_download_asset.sh new file mode 100755 index 00000000..497d4e3a --- /dev/null +++ b/crates/bashkit/tests/skills_fixtures/stitch_download_asset.sh @@ -0,0 +1,38 @@ +#!/bin/bash + +# Download Stitch screen asset with proper handling of Google Cloud Storage URLs +# Usage: ./download-stitch-asset.sh "https://storage.googleapis.com/..." "output-path.png" + +set -e + +if [ $# -ne 2 ]; then + echo "Usage: $0 " + echo "Example: $0 'https://storage.googleapis.com/stitch/screenshot.png' 'assets/screen.png'" + exit 1 +fi + +DOWNLOAD_URL="$1" +OUTPUT_PATH="$2" + +# Create directory if it doesn't exist +OUTPUT_DIR=$(dirname "$OUTPUT_PATH") +mkdir -p "$OUTPUT_DIR" + +echo "Downloading from: $DOWNLOAD_URL" +echo "Saving to: $OUTPUT_PATH" + +# Use curl with follow redirects and authentication handling +curl -L -o "$OUTPUT_PATH" "$DOWNLOAD_URL" + +if [ $? -eq 0 ]; then + echo "✓ Successfully downloaded to $OUTPUT_PATH" + + # Display file size for verification + if command -v stat &> /dev/null; then + FILE_SIZE=$(stat -f%z "$OUTPUT_PATH" 2>/dev/null || stat -c%s "$OUTPUT_PATH" 2>/dev/null) + echo " File size: $FILE_SIZE bytes" + fi +else + echo "✗ Download failed" + exit 1 +fi diff --git a/crates/bashkit/tests/skills_fixtures/stitch_fetch.sh b/crates/bashkit/tests/skills_fixtures/stitch_fetch.sh new file mode 100755 index 00000000..44721444 --- /dev/null +++ b/crates/bashkit/tests/skills_fixtures/stitch_fetch.sh @@ -0,0 +1,30 @@ +#!/bin/bash +# Copyright 2026 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +URL=$1 +OUTPUT=$2 +if [ -z "$URL" ] || [ -z "$OUTPUT" ]; then + echo "Usage: $0 " + exit 1 +fi +echo "Initiating high-reliability fetch for Stitch HTML..." +curl -L -f -sS --connect-timeout 10 --compressed "$URL" -o "$OUTPUT" +if [ $? -eq 0 ]; then + echo "✅ Successfully retrieved HTML at: $OUTPUT" + exit 0 +else + echo "❌ Error: Failed to retrieve content. Check TLS/SNI or URL expiration." + exit 1 +fi diff --git a/crates/bashkit/tests/skills_fixtures/stitch_verify_setup.sh b/crates/bashkit/tests/skills_fixtures/stitch_verify_setup.sh new file mode 100644 index 00000000..6bb2dd1e --- /dev/null +++ b/crates/bashkit/tests/skills_fixtures/stitch_verify_setup.sh @@ -0,0 +1,134 @@ +#!/usr/bin/env bash +# shadcn/ui Setup Verification Script +# Validates that a project is correctly configured for shadcn/ui + +set -e + +GREEN='\033[0;32m' +RED='\033[0;31m' +YELLOW='\033[1;33m' +NC='\033[0m' # No Color + +echo "🔍 Verifying shadcn/ui setup..." +echo "" + +# Check if components.json exists +if [ -f "components.json" ]; then + echo -e "${GREEN}✓${NC} components.json found" +else + echo -e "${RED}✗${NC} components.json not found" + echo -e " ${YELLOW}Run:${NC} npx shadcn@latest init" + exit 1 +fi + +# Check if tailwind.config exists +if [ -f "tailwind.config.js" ] || [ -f "tailwind.config.ts" ]; then + echo -e "${GREEN}✓${NC} Tailwind config found" +else + echo -e "${RED}✗${NC} tailwind.config.js not found" + echo -e " ${YELLOW}Install Tailwind:${NC} npm install -D tailwindcss postcss autoprefixer" + exit 1 +fi + +# Check if tsconfig.json has path aliases +if [ -f "tsconfig.json" ]; then + if grep -q '"@/\*"' tsconfig.json; then + echo -e "${GREEN}✓${NC} Path aliases configured in tsconfig.json" + else + echo -e "${YELLOW}⚠${NC} Path aliases not found in tsconfig.json" + echo " Add to compilerOptions.paths:" + echo ' "@/*": ["./src/*"]' + fi +else + echo -e "${YELLOW}⚠${NC} tsconfig.json not found (TypeScript not configured)" +fi + +# Check if globals.css or equivalent exists +if [ -f "src/index.css" ] || [ -f "src/globals.css" ] || [ -f "app/globals.css" ]; then + echo -e "${GREEN}✓${NC} Global CSS file found" + + # Check for Tailwind directives + CSS_FILE=$(find . -name "globals.css" -o -name "index.css" | head -n 1) + if grep -q "@tailwind base" "$CSS_FILE"; then + echo -e "${GREEN}✓${NC} Tailwind directives present" + else + echo -e "${RED}✗${NC} Tailwind directives missing" + echo " Add to your CSS file:" + echo " @tailwind base;" + echo " @tailwind components;" + echo " @tailwind utilities;" + fi + + # Check for CSS variables + if grep -q "^:root" "$CSS_FILE" || grep -q "@layer base" "$CSS_FILE"; then + echo -e "${GREEN}✓${NC} CSS variables defined" + else + echo -e "${YELLOW}⚠${NC} CSS variables not found" + echo " shadcn/ui requires CSS variables for theming" + fi +else + echo -e "${RED}✗${NC} Global CSS file not found" +fi + +# Check if components/ui directory exists +if [ -d "src/components/ui" ] || [ -d "components/ui" ]; then + echo -e "${GREEN}✓${NC} components/ui directory exists" + + # Count components + COMPONENT_COUNT=$(find . -path "*/components/ui/*.tsx" -o -path "*/components/ui/*.jsx" | wc -l) + echo -e " ${COMPONENT_COUNT} components installed" +else + echo -e "${YELLOW}⚠${NC} components/ui directory not found" + echo " Add your first component: npx shadcn@latest add button" +fi + +# Check if lib/utils exists +if [ -f "src/lib/utils.ts" ] || [ -f "lib/utils.ts" ]; then + echo -e "${GREEN}✓${NC} lib/utils.ts exists" + + # Check for cn function + UTILS_FILE=$(find . -name "utils.ts" | grep "lib" | head -n 1) + if grep -q "export function cn" "$UTILS_FILE"; then + echo -e "${GREEN}✓${NC} cn() utility function present" + else + echo -e "${RED}✗${NC} cn() utility function missing" + fi +else + echo -e "${RED}✗${NC} lib/utils.ts not found" +fi + +# Check package.json dependencies +if [ -f "package.json" ]; then + echo "" + echo "📦 Checking dependencies..." + + # Required dependencies + REQUIRED_DEPS=("react" "tailwindcss") + RECOMMENDED_DEPS=("class-variance-authority" "clsx" "tailwind-merge" "tailwindcss-animate") + + for dep in "${REQUIRED_DEPS[@]}"; do + if grep -q "\"$dep\"" package.json; then + echo -e "${GREEN}✓${NC} $dep installed" + else + echo -e "${RED}✗${NC} $dep not installed" + fi + done + + echo "" + echo "Recommended dependencies:" + for dep in "${RECOMMENDED_DEPS[@]}"; do + if grep -q "\"$dep\"" package.json; then + echo -e "${GREEN}✓${NC} $dep installed" + else + echo -e "${YELLOW}⚠${NC} $dep not installed (recommended)" + fi + done +fi + +echo "" +echo -e "${GREEN}✓${NC} Setup verification complete!" +echo "" +echo "Next steps:" +echo " 1. Add components: npx shadcn@latest add [component]" +echo " 2. View catalog: npx shadcn@latest add --help" +echo " 3. Browse docs: https://ui.shadcn.com" diff --git a/crates/bashkit/tests/skills_fixtures/superpowers_find_polluter.sh b/crates/bashkit/tests/skills_fixtures/superpowers_find_polluter.sh new file mode 100755 index 00000000..1d71c560 --- /dev/null +++ b/crates/bashkit/tests/skills_fixtures/superpowers_find_polluter.sh @@ -0,0 +1,63 @@ +#!/usr/bin/env bash +# Bisection script to find which test creates unwanted files/state +# Usage: ./find-polluter.sh +# Example: ./find-polluter.sh '.git' 'src/**/*.test.ts' + +set -e + +if [ $# -ne 2 ]; then + echo "Usage: $0 " + echo "Example: $0 '.git' 'src/**/*.test.ts'" + exit 1 +fi + +POLLUTION_CHECK="$1" +TEST_PATTERN="$2" + +echo "🔍 Searching for test that creates: $POLLUTION_CHECK" +echo "Test pattern: $TEST_PATTERN" +echo "" + +# Get list of test files +TEST_FILES=$(find . -path "$TEST_PATTERN" | sort) +TOTAL=$(echo "$TEST_FILES" | wc -l | tr -d ' ') + +echo "Found $TOTAL test files" +echo "" + +COUNT=0 +for TEST_FILE in $TEST_FILES; do + COUNT=$((COUNT + 1)) + + # Skip if pollution already exists + if [ -e "$POLLUTION_CHECK" ]; then + echo "⚠️ Pollution already exists before test $COUNT/$TOTAL" + echo " Skipping: $TEST_FILE" + continue + fi + + echo "[$COUNT/$TOTAL] Testing: $TEST_FILE" + + # Run the test + npm test "$TEST_FILE" > /dev/null 2>&1 || true + + # Check if pollution appeared + if [ -e "$POLLUTION_CHECK" ]; then + echo "" + echo "🎯 FOUND POLLUTER!" + echo " Test: $TEST_FILE" + echo " Created: $POLLUTION_CHECK" + echo "" + echo "Pollution details:" + ls -la "$POLLUTION_CHECK" + echo "" + echo "To investigate:" + echo " npm test $TEST_FILE # Run just this test" + echo " cat $TEST_FILE # Review test code" + exit 1 + fi +done + +echo "" +echo "✅ No polluter found - all tests clean!" +exit 0 diff --git a/crates/bashkit/tests/skills_fixtures/vercel_deploy.sh b/crates/bashkit/tests/skills_fixtures/vercel_deploy.sh new file mode 100755 index 00000000..de46e712 --- /dev/null +++ b/crates/bashkit/tests/skills_fixtures/vercel_deploy.sh @@ -0,0 +1,249 @@ +#!/bin/bash + +# Vercel Deployment Script (via claimable deploy endpoint) +# Usage: ./deploy.sh [project-path] +# Returns: JSON with previewUrl, claimUrl, deploymentId, projectId + +set -e + +DEPLOY_ENDPOINT="https://claude-skills-deploy.vercel.com/api/deploy" + +# Detect framework from package.json +detect_framework() { + local pkg_json="$1" + + if [ ! -f "$pkg_json" ]; then + echo "null" + return + fi + + local content=$(cat "$pkg_json") + + # Helper to check if a package exists in dependencies or devDependencies + has_dep() { + echo "$content" | grep -q "\"$1\"" + } + + # Order matters - check more specific frameworks first + + # Blitz + if has_dep "blitz"; then echo "blitzjs"; return; fi + + # Next.js + if has_dep "next"; then echo "nextjs"; return; fi + + # Gatsby + if has_dep "gatsby"; then echo "gatsby"; return; fi + + # Remix + if has_dep "@remix-run/"; then echo "remix"; return; fi + + # React Router (v7 framework mode) + if has_dep "@react-router/"; then echo "react-router"; return; fi + + # TanStack Start + if has_dep "@tanstack/start"; then echo "tanstack-start"; return; fi + + # Astro + if has_dep "astro"; then echo "astro"; return; fi + + # Hydrogen (Shopify) + if has_dep "@shopify/hydrogen"; then echo "hydrogen"; return; fi + + # SvelteKit + if has_dep "@sveltejs/kit"; then echo "sveltekit-1"; return; fi + + # Svelte (standalone) + if has_dep "svelte"; then echo "svelte"; return; fi + + # Nuxt + if has_dep "nuxt"; then echo "nuxtjs"; return; fi + + # Vue with Vitepress + if has_dep "vitepress"; then echo "vitepress"; return; fi + + # Vue with Vuepress + if has_dep "vuepress"; then echo "vuepress"; return; fi + + # Gridsome + if has_dep "gridsome"; then echo "gridsome"; return; fi + + # SolidStart + if has_dep "@solidjs/start"; then echo "solidstart-1"; return; fi + + # Docusaurus + if has_dep "@docusaurus/core"; then echo "docusaurus-2"; return; fi + + # RedwoodJS + if has_dep "@redwoodjs/"; then echo "redwoodjs"; return; fi + + # Hexo + if has_dep "hexo"; then echo "hexo"; return; fi + + # Eleventy + if has_dep "@11ty/eleventy"; then echo "eleventy"; return; fi + + # Angular / Ionic Angular + if has_dep "@ionic/angular"; then echo "ionic-angular"; return; fi + if has_dep "@angular/core"; then echo "angular"; return; fi + + # Ionic React + if has_dep "@ionic/react"; then echo "ionic-react"; return; fi + + # Create React App + if has_dep "react-scripts"; then echo "create-react-app"; return; fi + + # Ember + if has_dep "ember-cli" || has_dep "ember-source"; then echo "ember"; return; fi + + # Dojo + if has_dep "@dojo/framework"; then echo "dojo"; return; fi + + # Polymer + if has_dep "@polymer/"; then echo "polymer"; return; fi + + # Preact + if has_dep "preact"; then echo "preact"; return; fi + + # Stencil + if has_dep "@stencil/core"; then echo "stencil"; return; fi + + # UmiJS + if has_dep "umi"; then echo "umijs"; return; fi + + # Sapper (legacy Svelte) + if has_dep "sapper"; then echo "sapper"; return; fi + + # Saber + if has_dep "saber"; then echo "saber"; return; fi + + # Sanity + if has_dep "sanity"; then echo "sanity-v3"; return; fi + if has_dep "@sanity/"; then echo "sanity"; return; fi + + # Storybook + if has_dep "@storybook/"; then echo "storybook"; return; fi + + # NestJS + if has_dep "@nestjs/core"; then echo "nestjs"; return; fi + + # Elysia + if has_dep "elysia"; then echo "elysia"; return; fi + + # Hono + if has_dep "hono"; then echo "hono"; return; fi + + # Fastify + if has_dep "fastify"; then echo "fastify"; return; fi + + # h3 + if has_dep "h3"; then echo "h3"; return; fi + + # Nitro + if has_dep "nitropack"; then echo "nitro"; return; fi + + # Express + if has_dep "express"; then echo "express"; return; fi + + # Vite (generic - check last among JS frameworks) + if has_dep "vite"; then echo "vite"; return; fi + + # Parcel + if has_dep "parcel"; then echo "parcel"; return; fi + + # No framework detected + echo "null" +} + +# Parse arguments +INPUT_PATH="${1:-.}" + +# Create temp directory for packaging +TEMP_DIR=$(mktemp -d) +TARBALL="$TEMP_DIR/project.tgz" +CLEANUP_TEMP=true + +cleanup() { + if [ "$CLEANUP_TEMP" = true ]; then + rm -rf "$TEMP_DIR" + fi +} +trap cleanup EXIT + +echo "Preparing deployment..." >&2 + +# Check if input is a .tgz file or a directory +FRAMEWORK="null" + +if [ -f "$INPUT_PATH" ] && [[ "$INPUT_PATH" == *.tgz ]]; then + # Input is already a tarball, use it directly + echo "Using provided tarball..." >&2 + TARBALL="$INPUT_PATH" + CLEANUP_TEMP=false + # Can't detect framework from tarball, leave as null +elif [ -d "$INPUT_PATH" ]; then + # Input is a directory, need to tar it + PROJECT_PATH=$(cd "$INPUT_PATH" && pwd) + + # Detect framework from package.json + FRAMEWORK=$(detect_framework "$PROJECT_PATH/package.json") + + # Check if this is a static HTML project (no package.json) + if [ ! -f "$PROJECT_PATH/package.json" ]; then + # Find HTML files in root + HTML_FILES=$(find "$PROJECT_PATH" -maxdepth 1 -name "*.html" -type f) + HTML_COUNT=$(echo "$HTML_FILES" | grep -c . || echo 0) + + # If there's exactly one HTML file and it's not index.html, rename it + if [ "$HTML_COUNT" -eq 1 ]; then + HTML_FILE=$(echo "$HTML_FILES" | head -1) + BASENAME=$(basename "$HTML_FILE") + if [ "$BASENAME" != "index.html" ]; then + echo "Renaming $BASENAME to index.html..." >&2 + mv "$HTML_FILE" "$PROJECT_PATH/index.html" + fi + fi + fi + + # Create tarball of the project (excluding node_modules and .git) + echo "Creating deployment package..." >&2 + tar -czf "$TARBALL" -C "$PROJECT_PATH" --exclude='node_modules' --exclude='.git' . +else + echo "Error: Input must be a directory or a .tgz file" >&2 + exit 1 +fi + +if [ "$FRAMEWORK" != "null" ]; then + echo "Detected framework: $FRAMEWORK" >&2 +fi + +# Deploy +echo "Deploying..." >&2 +RESPONSE=$(curl -s -X POST "$DEPLOY_ENDPOINT" -F "file=@$TARBALL" -F "framework=$FRAMEWORK") + +# Check for error in response +if echo "$RESPONSE" | grep -q '"error"'; then + ERROR_MSG=$(echo "$RESPONSE" | grep -o '"error":"[^"]*"' | cut -d'"' -f4) + echo "Error: $ERROR_MSG" >&2 + exit 1 +fi + +# Extract URLs from response +PREVIEW_URL=$(echo "$RESPONSE" | grep -o '"previewUrl":"[^"]*"' | cut -d'"' -f4) +CLAIM_URL=$(echo "$RESPONSE" | grep -o '"claimUrl":"[^"]*"' | cut -d'"' -f4) + +if [ -z "$PREVIEW_URL" ]; then + echo "Error: Could not extract preview URL from response" >&2 + echo "$RESPONSE" >&2 + exit 1 +fi + +echo "" >&2 +echo "Deployment successful!" >&2 +echo "" >&2 +echo "Preview URL: $PREVIEW_URL" >&2 +echo "Claim URL: $CLAIM_URL" >&2 +echo "" >&2 + +# Output JSON for programmatic use +echo "$RESPONSE" diff --git a/crates/bashkit/tests/skills_tests.rs b/crates/bashkit/tests/skills_tests.rs new file mode 100644 index 00000000..92cb2e6c --- /dev/null +++ b/crates/bashkit/tests/skills_tests.rs @@ -0,0 +1,634 @@ +//! Integration tests for real-world skills.sh scripts. +//! +//! These tests verify bashkit can parse and execute actual bash scripts +//! extracted from the top skills on . External binaries +//! (az, helm, npm, curl) are stubbed via custom builtins so we test bash +//! feature coverage without requiring real infrastructure. +//! +//! Fixtures live in `tests/skills_fixtures/*.sh` — copies from these repos: +//! +//! | Fixture | Source repo | +//! |---------------------------|-------------------------------------------------------------------------------------------| +//! | azure_generate_url.sh | (deploy-model/scripts/) | +//! | azure_discover_rank.sh | (capacity/scripts/) | +//! | azure_query_capacity.sh | (capacity/scripts/) | +//! | vercel_deploy.sh | (vercel-deploy-claimable/scripts/) | +//! | stitch_verify_setup.sh | (shadcn-ui/scripts/) | +//! | stitch_fetch.sh | (react-components/scripts/) | +//! | stitch_download_asset.sh | (remotion/scripts/) | +//! | superpowers_find_polluter.sh | (systematic-debugging/) | +//! | helm_validate_chart.sh | (helm-chart-scaffolding/scripts/) | +//! | jwt_test_setup.sh | (spring-boot-security-jwt/scripts/) | +//! +//! Backslash line continuations (`\`) were removed from azure +//! fixtures because the parser doesn't handle them in all contexts (#289). + +use async_trait::async_trait; +use bashkit::parser::Parser; +use bashkit::{Bash, Builtin, BuiltinContext, ExecResult, ExecutionLimits}; +use std::path::PathBuf; + +fn fixtures_dir() -> PathBuf { + PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("tests/skills_fixtures") +} + +fn read_fixture(name: &str) -> String { + let path = fixtures_dir().join(name); + std::fs::read_to_string(&path).unwrap_or_else(|e| panic!("read {}: {}", path.display(), e)) +} + +// --------------------------------------------------------------------------- +// Stub builtins for external binaries +// --------------------------------------------------------------------------- + +/// Stub that prints its invocation as JSON for assertion. +/// Usage: registers as "az", "helm", "npm", etc. +/// Output: {"cmd":"az","args":["account","show",...]} +struct EchoStub { + name: &'static str, +} + +#[async_trait] +impl Builtin for EchoStub { + async fn execute(&self, ctx: BuiltinContext<'_>) -> bashkit::Result { + // Return a recognizable marker so scripts don't choke on empty output + let args_str = ctx.args.join(" "); + Ok(ExecResult::ok(format!("STUB:{}:{}\n", self.name, args_str))) + } +} + +/// Stub for `az` that returns canned JSON for common subcommands. +struct AzStub; + +#[async_trait] +impl Builtin for AzStub { + async fn execute(&self, ctx: BuiltinContext<'_>) -> bashkit::Result { + let args: Vec<&str> = ctx.args.iter().map(|s| s.as_str()).collect(); + match args.as_slice() { + ["account", "show", ..] => { + Ok(ExecResult::ok("00000000-0000-0000-0000-000000000000\n".to_string())) + } + ["rest", "--method", "GET", ..] => Ok(ExecResult::ok( + "{\"value\":[{\"location\":\"eastus\",\"properties\":{\"skuName\":\"GlobalStandard\",\"availableCapacity\":100}}]}\n" + .to_string(), + )), + ["cognitiveservices", "usage", "list", ..] => { + Ok(ExecResult::ok("[{\"name\":{\"value\":\"OpenAI.GlobalStandard.o3-mini\"},\"limit\":200,\"currentValue\":50}]\n".to_string())) + } + ["cognitiveservices", "model", "list", ..] => { + Ok(ExecResult::ok("Version Format\n2025-01-31 OpenAI\n".to_string())) + } + _ => Ok(ExecResult::ok("{}\n".to_string())), + } + } +} + +/// Stub for `helm` that returns canned output for lint/template/install. +struct HelmStub; + +#[async_trait] +impl Builtin for HelmStub { + async fn execute(&self, ctx: BuiltinContext<'_>) -> bashkit::Result { + let sub = ctx.args.first().map(|s| s.as_str()).unwrap_or(""); + match sub { + "lint" => Ok(ExecResult::ok( + "==> Linting .\n[INFO] Chart.yaml: icon is recommended\n\n1 chart(s) linted, 0 chart(s) failed\n".to_string(), + )), + "template" => Ok(ExecResult::ok( + "---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: test\nspec:\n template:\n spec:\n containers:\n - name: app\n securityContext:\n runAsNonRoot: true\n readOnlyRootFilesystem: true\n allowPrivilegeEscalation: false\n resources:\n limits:\n cpu: 100m\n requests:\n cpu: 50m\n livenessProbe:\n httpGet:\n path: /healthz\n readinessProbe:\n httpGet:\n path: /ready\n---\napiVersion: v1\nkind: Service\nmetadata:\n name: test\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: test\n" + .to_string(), + )), + "install" => Ok(ExecResult::ok("NAME: test-release\nSTATUS: deployed\n".to_string())), + "dependency" => Ok(ExecResult::ok("NAME\tVERSION\tREPOSITORY\tSTATUS\n".to_string())), + _ => Ok(ExecResult::ok(String::new())), + } + } +} + +/// Stub for `npm` that returns success for test/install. +struct NpmStub; + +#[async_trait] +impl Builtin for NpmStub { + async fn execute(&self, ctx: BuiltinContext<'_>) -> bashkit::Result { + let sub = ctx.args.first().map(|s| s.as_str()).unwrap_or(""); + match sub { + "test" => Ok(ExecResult::ok("Tests passed\n".to_string())), + "install" => Ok(ExecResult::ok("added 42 packages\n".to_string())), + _ => Ok(ExecResult::ok(String::new())), + } + } +} + +/// Stub for `curl` that returns canned JSON responses. +/// Replaces the built-in curl so we don't need network. +struct CurlStub; + +#[async_trait] +impl Builtin for CurlStub { + async fn execute(&self, ctx: BuiltinContext<'_>) -> bashkit::Result { + let mut output_file: Option = None; + let mut write_out: Option = None; + let mut i = 0; + while i < ctx.args.len() { + match ctx.args[i].as_str() { + "-o" => { + i += 1; + if i < ctx.args.len() { + output_file = Some(ctx.args[i].clone()); + } + } + "-w" | "--write-out" => { + i += 1; + if i < ctx.args.len() { + write_out = Some(ctx.args[i].clone()); + } + } + _ => {} + } + i += 1; + } + + // Write canned content to output file if -o specified + if let Some(ref path) = output_file { + let content = b"{\"accessToken\":\"tok_test_1234567890\",\"refreshToken\":\"ref_test_0987654321\"}"; + let p = std::path::Path::new(path); + let _ = ctx.fs.write_file(p, content).await; + } + + let mut result = String::new(); + // Handle -w "%{http_code}" pattern + if let Some(ref fmt) = write_out { + if fmt.contains("http_code") { + result.push_str("200"); + } + } + if result.is_empty() && output_file.is_none() { + result.push_str("{\"previewUrl\":\"https://test.vercel.app\",\"claimUrl\":\"https://vercel.com/claim/test\"}\n"); + } + + Ok(ExecResult::ok(result)) + } +} + +/// Stub for `python3` — just echoes that it was called. +struct Python3Stub; + +#[async_trait] +impl Builtin for Python3Stub { + async fn execute(&self, ctx: BuiltinContext<'_>) -> bashkit::Result { + // For -c scripts, just return a plausible table output + if ctx.args.first().map(|s| s.as_str()) == Some("-c") { + return Ok(ExecResult::ok( + "Model: o3-mini v2025-01-31 | SKU: GlobalStandard | Min Capacity: 0K TPM\nRegions with capacity: 1 | Meets target: 1 | With quota: 1 | With projects: 0\n\nRegion Available Meets Target Quota Projects Sample Project\n----------------------------------------------------------------------------------------------------\neastus 100K......... YES 150K 0 (none)\n".to_string(), + )); + } + Ok(ExecResult::ok(String::new())) + } +} + +/// Stub for `stat` — returns a fake file size. +struct StatStub; + +#[async_trait] +impl Builtin for StatStub { + async fn execute(&self, _ctx: BuiltinContext<'_>) -> bashkit::Result { + Ok(ExecResult::ok("1024\n".to_string())) + } +} + +/// Stub for `base64` — missing builtin, stub so scripts don't fail. +/// TODO: Remove when #287 (base64 builtin) is implemented. +struct Base64Stub; + +#[async_trait] +impl Builtin for Base64Stub { + async fn execute(&self, ctx: BuiltinContext<'_>) -> bashkit::Result { + // For testing: just return a fixed base64-url-safe string + if ctx.args.first().map(|s| s.as_str()) == Some("-d") { + // decode mode + let input = ctx.stdin.unwrap_or(""); + Ok(ExecResult::ok(input.to_string())) + } else { + // encode mode — return a fixed encoded value + Ok(ExecResult::ok( + "dTIwZjlhNzNkYTRhNzRiNjM5ODNlZmViYzdiYjZm\n".to_string(), + )) + } + } +} + +// --------------------------------------------------------------------------- +// Helper: write script to VFS and make executable +// --------------------------------------------------------------------------- + +async fn write_script(bash: &Bash, path: &str, content: &str) { + let fs = bash.fs(); + let p = std::path::Path::new(path); + fs.write_file(p, content.as_bytes()).await.unwrap(); + fs.chmod(p, 0o755).await.unwrap(); +} + +// --------------------------------------------------------------------------- +// Helper: build a Bash instance with common stubs +// --------------------------------------------------------------------------- + +fn bash_with_stubs() -> Bash { + Bash::builder() + .limits( + ExecutionLimits::new() + .max_commands(1_000_000) + .max_loop_iterations(100_000), + ) + .builtin("az", Box::new(AzStub)) + .builtin("helm", Box::new(HelmStub)) + .builtin("npm", Box::new(NpmStub)) + .builtin("curl", Box::new(CurlStub)) + .builtin("python3", Box::new(Python3Stub)) + .builtin("stat", Box::new(StatStub)) + .builtin("base64", Box::new(Base64Stub)) + .builtin("keytool", Box::new(EchoStub { name: "keytool" })) + .builtin("openssl", Box::new(EchoStub { name: "openssl" })) + .build() +} + +// =========================================================================== +// PART 1: Parse-only tests — verify every fixture parses without error +// =========================================================================== + +macro_rules! parse_test { + ($name:ident, $fixture:literal) => { + #[test] + fn $name() { + let script = read_fixture($fixture); + let parser = Parser::new(&script); + match parser.parse() { + Ok(ast) => { + assert!( + !ast.commands.is_empty(), + "parsed AST should have commands for {}", + $fixture + ); + } + Err(e) => { + panic!("parse error in {}: {}", $fixture, e); + } + } + } + }; +} + +// Every fixture must parse cleanly +parse_test!(parse_azure_generate_url, "azure_generate_url.sh"); +parse_test!(parse_azure_discover_rank, "azure_discover_rank.sh"); +parse_test!(parse_azure_query_capacity, "azure_query_capacity.sh"); +parse_test!(parse_vercel_deploy, "vercel_deploy.sh"); +parse_test!(parse_stitch_verify_setup, "stitch_verify_setup.sh"); +parse_test!(parse_stitch_fetch, "stitch_fetch.sh"); +parse_test!(parse_stitch_download_asset, "stitch_download_asset.sh"); +parse_test!( + parse_superpowers_find_polluter, + "superpowers_find_polluter.sh" +); +parse_test!(parse_helm_validate_chart, "helm_validate_chart.sh"); +parse_test!(parse_jwt_test_setup, "jwt_test_setup.sh"); + +// =========================================================================== +// PART 2: Execution tests — run scripts with stubbed binaries +// =========================================================================== + +/// azure generate_deployment_url.sh — tests: while/case arg parsing, +/// variable expansion, pipes (xxd | base64 | tr), heredoc in usage() +/// +/// BUG: Hits MaxLoopIterations(100000) — the while/case arg parsing +/// loop or heredoc processing consumes excessive iterations. +#[tokio::test] +#[ignore = "hits MaxLoopIterations — while/case arg parsing loop bug"] +async fn exec_azure_generate_url() { + let script = read_fixture("azure_generate_url.sh"); + let mut bash = bash_with_stubs(); + write_script(&bash, "/test.sh", &script).await; + + let result = bash + .exec("/test.sh --subscription d5320f9a-73da-4a74-b639-83efebc7bb6f --resource-group test-rg --foundry-resource test-foundry --project test-project --deployment gpt-4o") + .await + .unwrap(); + assert_eq!(result.exit_code, 0, "script failed: {}", result.stdout); + assert!( + result.stdout.contains("ai.azure.com"), + "expected URL in output, got: {}", + result.stdout + ); +} + +/// azure query_capacity.sh — tests: set -euo pipefail, ${1:?}, ${2:-}, +/// if/elif, variable expansion, printf, brace expansion {1..60}, for loop +/// +/// BUG: Exits with code 1 under set -euo pipefail. A command in the +/// pipeline fails (likely jq or az stub output not matching expected +/// format), causing pipefail to abort. +#[tokio::test] +#[ignore = "pipefail triggers on az/jq stub output mismatch"] +async fn exec_azure_query_capacity() { + let script = read_fixture("azure_query_capacity.sh"); + let mut bash = bash_with_stubs(); + write_script(&bash, "/test.sh", &script).await; + + let result = bash.exec("/test.sh o3-mini 2025-01-31").await.unwrap(); + assert_eq!(result.exit_code, 0, "script failed: {}", result.stdout); + assert!( + result.stdout.contains("Capacity:"), + "expected capacity output, got: {}", + result.stdout + ); +} + +/// vercel deploy.sh — tests: nested function defs, trap, mktemp, tar, +/// [[ ]] glob matching, grep -o, cut, find, basename, >&2 redirects +/// +/// BUG: Exit code 2. The script's nested function definitions or +/// trap/mktemp/tar interactions cause an execution error. Parses fine. +#[tokio::test] +#[ignore = "exit code 2 — nested functions/trap/mktemp interaction"] +async fn exec_vercel_deploy() { + let script = read_fixture("vercel_deploy.sh"); + let mut bash = bash_with_stubs(); + + // Set up a minimal project directory in VFS + let fs = bash.fs(); + fs.mkdir(std::path::Path::new("/project"), true) + .await + .unwrap(); + fs.write_file( + std::path::Path::new("/project/package.json"), + br#"{"dependencies":{"next":"14.0.0"}}"#, + ) + .await + .unwrap(); + fs.write_file( + std::path::Path::new("/project/index.html"), + b"", + ) + .await + .unwrap(); + write_script(&bash, "/deploy.sh", &script).await; + + let result = bash.exec("/deploy.sh /project").await.unwrap(); + assert_eq!( + result.exit_code, 0, + "deploy failed (exit {}): stdout={}\nThis tests nested functions, trap, tar, mktemp", + result.exit_code, result.stdout + ); +} + +/// stitch verify-setup.sh — tests: echo -e with ANSI codes, file tests, +/// grep -q, find, wc -l, array iteration ("${arr[@]}") +/// +/// BUG: [ -f "components.json" ] returns false even though the file +/// exists in VFS at /project/components.json and cwd is /project. +/// Likely a cwd propagation issue into script file execution context. +#[tokio::test] +#[ignore = "[ -f ] doesn't see VFS files after cd in script execution"] +async fn exec_stitch_verify_setup() { + let script = read_fixture("stitch_verify_setup.sh"); + let mut bash = bash_with_stubs(); + + // Set up a mock project in VFS + let fs = bash.fs(); + fs.mkdir(std::path::Path::new("/project/src/lib"), true) + .await + .unwrap(); + fs.write_file(std::path::Path::new("/project/components.json"), b"{}") + .await + .unwrap(); + fs.write_file( + std::path::Path::new("/project/tailwind.config.js"), + b"module.exports = {}", + ) + .await + .unwrap(); + fs.write_file( + std::path::Path::new("/project/tsconfig.json"), + br#"{"compilerOptions":{"paths":{"@/*":["./src/*"]}}}"#, + ) + .await + .unwrap(); + fs.write_file( + std::path::Path::new("/project/src/globals.css"), + b"@tailwind base;\n@tailwind components;\n@tailwind utilities;\n:root { --bg: white; }", + ) + .await + .unwrap(); + fs.mkdir(std::path::Path::new("/project/src/components/ui"), true) + .await + .unwrap(); + fs.write_file( + std::path::Path::new("/project/src/components/ui/button.tsx"), + b"export const Button = () =>