diff --git a/.github/workflows/claude-token-optimizer.lock.yml b/.github/workflows/claude-token-optimizer.lock.yml
index 7c64b6bdb1..dca86bd705 100644
--- a/.github/workflows/claude-token-optimizer.lock.yml
+++ b/.github/workflows/claude-token-optimizer.lock.yml
@@ -26,7 +26,7 @@
# Imports:
# - shared/reporting.md
#
-# gh-aw-metadata: {"schema_version":"v3","frontmatter_hash":"e6c437005ee8faa93a5315758680e2d1ca9479e441310449bc624080d2c9de8f","strict":true,"agent_id":"copilot"}
+# gh-aw-metadata: {"schema_version":"v3","frontmatter_hash":"c83cbc1cff24dbfa71c9758e4780a18e718794d2c3a21faef06342d049bf88d9","strict":true,"agent_id":"copilot"}
name: "Claude Token Optimizer"
"on":
@@ -144,14 +144,14 @@ jobs:
run: |
bash ${RUNNER_TEMP}/gh-aw/actions/create_prompt_first.sh
{
- cat << 'GH_AW_PROMPT_9c1ef0d2e717c91b_EOF'
+ cat << 'GH_AW_PROMPT_a17ce043f710c3ae_EOF'
- GH_AW_PROMPT_9c1ef0d2e717c91b_EOF
+ GH_AW_PROMPT_a17ce043f710c3ae_EOF
cat "${RUNNER_TEMP}/gh-aw/prompts/xpia.md"
cat "${RUNNER_TEMP}/gh-aw/prompts/temp_folder_prompt.md"
cat "${RUNNER_TEMP}/gh-aw/prompts/markdown.md"
cat "${RUNNER_TEMP}/gh-aw/prompts/safe_outputs_prompt.md"
- cat << 'GH_AW_PROMPT_9c1ef0d2e717c91b_EOF'
+ cat << 'GH_AW_PROMPT_a17ce043f710c3ae_EOF'
Tools: create_issue, missing_tool, missing_data, noop
@@ -183,13 +183,13 @@ jobs:
{{/if}}
- GH_AW_PROMPT_9c1ef0d2e717c91b_EOF
+ GH_AW_PROMPT_a17ce043f710c3ae_EOF
cat "${RUNNER_TEMP}/gh-aw/prompts/github_mcp_tools_with_safeoutputs_prompt.md"
- cat << 'GH_AW_PROMPT_9c1ef0d2e717c91b_EOF'
+ cat << 'GH_AW_PROMPT_a17ce043f710c3ae_EOF'
{{#runtime-import .github/workflows/shared/reporting.md}}
{{#runtime-import .github/workflows/claude-token-optimizer.md}}
- GH_AW_PROMPT_9c1ef0d2e717c91b_EOF
+ GH_AW_PROMPT_a17ce043f710c3ae_EOF
} > "$GH_AW_PROMPT"
- name: Interpolate variables and render templates
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
@@ -314,10 +314,20 @@ jobs:
run: bash ${RUNNER_TEMP}/gh-aw/actions/configure_gh_for_ghe.sh
env:
GH_TOKEN: ${{ github.token }}
+ - env:
+ GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ name: Install gh-aw CLI
+ run: |
+ if gh extension list | grep -q "github/gh-aw"; then
+ gh extension upgrade gh-aw || true
+ else
+ gh extension install github/gh-aw
+ fi
+ gh aw --version
- env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
name: Find and download artifacts from the most expensive Claude workflow
- run: "set -euo pipefail\nmkdir -p /tmp/token-optimizer-claude\n\necho \"📥 Loading Claude workflow runs from last 24 hours...\"\n./gh-aw logs \\\n --engine claude \\\n --start-date -1d \\\n --json \\\n -c 300 \\\n > /tmp/token-optimizer-claude/claude-runs.json 2>/dev/null || echo \"[]\" > /tmp/token-optimizer-claude/claude-runs.json\n\nRUN_COUNT=$(jq '. | length' /tmp/token-optimizer-claude/claude-runs.json 2>/dev/null || echo 0)\necho \"Found ${RUN_COUNT} Claude runs\"\n\nif [ \"$RUN_COUNT\" -eq 0 ]; then\n echo \"No Claude runs found, nothing to optimize\"\n exit 0\nfi\n\n# Find the most expensive workflow (by total tokens across all its runs)\necho \"🔍 Identifying most expensive workflow...\"\njq -r '\n group_by(.workflowName) |\n map({\n workflow: .[0].workflowName,\n total_tokens: (map(.tokenUsage) | add),\n total_cost: (map(.estimatedCost) | add),\n run_count: length,\n avg_tokens: ((map(.tokenUsage) | add) / length),\n run_ids: map(.databaseId),\n latest_run_id: (sort_by(.createdAt) | last | .databaseId),\n latest_run_url: (sort_by(.createdAt) | last | .url)\n }) |\n sort_by(.total_tokens) | reverse | .[0]\n' /tmp/token-optimizer-claude/claude-runs.json > /tmp/token-optimizer-claude/top-workflow.json\n\nWORKFLOW_NAME=$(jq -r '.workflow' /tmp/token-optimizer-claude/top-workflow.json)\nLATEST_RUN_ID=$(jq -r '.latest_run_id' /tmp/token-optimizer-claude/top-workflow.json)\necho \"Most expensive workflow: $WORKFLOW_NAME (run: $LATEST_RUN_ID)\"\necho \"WORKFLOW_NAME=$WORKFLOW_NAME\" >> \"$GITHUB_ENV\"\n\n# Download the firewall-audit-logs artifact from the latest run\nARTIFACT_DIR=\"/tmp/token-optimizer-claude/artifacts\"\nmkdir -p \"$ARTIFACT_DIR\"\n\necho \"📥 Downloading firewall-audit-logs from run $LATEST_RUN_ID...\"\ngh run download \"$LATEST_RUN_ID\" \\\n --repo \"$GITHUB_REPOSITORY\" \\\n --name \"firewall-audit-logs\" \\\n --dir \"$ARTIFACT_DIR\" \\\n 2>/dev/null || true\n\n# Also download agent artifacts\necho \"📥 Downloading agent artifacts from run $LATEST_RUN_ID...\"\ngh run download \"$LATEST_RUN_ID\" \\\n --repo \"$GITHUB_REPOSITORY\" \\\n --name \"agent\" \\\n --dir \"$ARTIFACT_DIR/agent\" \\\n 2>/dev/null || true\n\n# Find token-usage.jsonl\nUSAGE_FILE=$(find \"$ARTIFACT_DIR\" -name \"token-usage.jsonl\" 2>/dev/null | head -1)\nif [ -n \"$USAGE_FILE\" ]; then\n echo \"Found token-usage.jsonl: $USAGE_FILE\"\n cp \"$USAGE_FILE\" /tmp/token-optimizer-claude/token-usage.jsonl\n echo \"Records: $(wc -l < /tmp/token-optimizer-claude/token-usage.jsonl)\"\n\n # Pre-compute Anthropic-specific metrics\n echo \"📊 Computing Anthropic cache efficiency metrics...\"\n awk '\n BEGIN { ti=0; to=0; cr=0; cw=0; tr=0 }\n {\n if (match($0, /\"input_tokens\" *: *([0-9]+)/, m)) ti += m[1]+0\n if (match($0, /\"output_tokens\" *: *([0-9]+)/, m)) to += m[1]+0\n if (match($0, /\"cache_read_tokens\" *: *([0-9]+)/, m)) cr += m[1]+0\n if (match($0, /\"cache_write_tokens\" *: *([0-9]+)/, m)) cw += m[1]+0\n tr += 1\n }\n END {\n total = ti + to + cr + cw\n if (tr == 0) exit\n printf \"Requests: %d\\n\", tr\n printf \"Input tokens: %d\\n\", ti\n printf \"Output tokens: %d\\n\", to\n printf \"Cache read tokens: %d\\n\", cr\n printf \"Cache write tokens: %d\\n\", cw\n printf \"Total tokens: %d\\n\", total\n if (ti + cr > 0) printf \"Cache hit rate: %.1f%%\\n\", (cr / (ti + cr)) * 100\n if (ti + cw > 0) printf \"Cache write rate: %.1f%%\\n\", (cw / (ti + cw)) * 100\n if (cw > 0) printf \"Cache read/write ratio: %.2f\\n\", (cr / cw)\n }' /tmp/token-optimizer-claude/token-usage.jsonl > /tmp/token-optimizer-claude/cache-metrics.txt\n cat /tmp/token-optimizer-claude/cache-metrics.txt\nelse\n echo \"No token-usage.jsonl found in artifacts\"\n touch /tmp/token-optimizer-claude/token-usage.jsonl\n touch /tmp/token-optimizer-claude/cache-metrics.txt\nfi\n\n# Find the workflow markdown source\nWORKFLOW_MD_NAME=$(echo \"$WORKFLOW_NAME\" | tr '[:upper:]' '[:lower:]' | tr ' ' '-')\nWORKFLOW_MD=\".github/workflows/${WORKFLOW_MD_NAME}.md\"\nif [ -f \"$WORKFLOW_MD\" ]; then\n echo \"Found workflow source: $WORKFLOW_MD\"\n cp \"$WORKFLOW_MD\" /tmp/token-optimizer-claude/workflow-source.md\nelse\n find .github/workflows -name \"*.md\" -exec grep -l \"^name: $WORKFLOW_NAME\" {} \\; 2>/dev/null | head -1 | while read -r f; do\n echo \"Found: $f\"\n cp \"$f\" /tmp/token-optimizer-claude/workflow-source.md\n done\nfi\n\n# Extract declared tools from workflow source\nif [ -f /tmp/token-optimizer-claude/workflow-source.md ]; then\n sed -n '/^---$/,/^---$/p' /tmp/token-optimizer-claude/workflow-source.md | \\\n grep -A20 \"^tools:\" | head -30 > /tmp/token-optimizer-claude/declared-tools.txt || true\nfi\n"
+ run: "set -euo pipefail\nmkdir -p /tmp/token-optimizer-claude\n\necho \"📥 Loading Claude workflow runs from last 24 hours...\"\ngh aw logs \\\n --engine claude \\\n --start-date -1d \\\n --json \\\n -c 300 \\\n > /tmp/token-optimizer-claude/claude-runs.json 2>/dev/null || echo \"[]\" > /tmp/token-optimizer-claude/claude-runs.json\n\nRUN_COUNT=$(jq '. | length' /tmp/token-optimizer-claude/claude-runs.json 2>/dev/null || echo 0)\necho \"Found ${RUN_COUNT} Claude runs\"\n\nif [ \"$RUN_COUNT\" -eq 0 ]; then\n echo \"No Claude runs found, nothing to optimize\"\n exit 0\nfi\n\n# Find the most expensive workflow (by total tokens across all its runs)\necho \"🔍 Identifying most expensive workflow...\"\njq -r '\n group_by(.workflowName) |\n map({\n workflow: .[0].workflowName,\n total_tokens: (map(.tokenUsage) | add),\n total_cost: (map(.estimatedCost) | add),\n run_count: length,\n avg_tokens: ((map(.tokenUsage) | add) / length),\n run_ids: map(.databaseId),\n latest_run_id: (sort_by(.createdAt) | last | .databaseId),\n latest_run_url: (sort_by(.createdAt) | last | .url)\n }) |\n sort_by(.total_tokens) | reverse | .[0]\n' /tmp/token-optimizer-claude/claude-runs.json > /tmp/token-optimizer-claude/top-workflow.json\n\nWORKFLOW_NAME=$(jq -r '.workflow' /tmp/token-optimizer-claude/top-workflow.json)\nLATEST_RUN_ID=$(jq -r '.latest_run_id' /tmp/token-optimizer-claude/top-workflow.json)\necho \"Most expensive workflow: $WORKFLOW_NAME (run: $LATEST_RUN_ID)\"\necho \"WORKFLOW_NAME=$WORKFLOW_NAME\" >> \"$GITHUB_ENV\"\n\n# Download the firewall-audit-logs artifact from the latest run\nARTIFACT_DIR=\"/tmp/token-optimizer-claude/artifacts\"\nmkdir -p \"$ARTIFACT_DIR\"\n\necho \"📥 Downloading firewall-audit-logs from run $LATEST_RUN_ID...\"\ngh run download \"$LATEST_RUN_ID\" \\\n --repo \"$GITHUB_REPOSITORY\" \\\n --name \"firewall-audit-logs\" \\\n --dir \"$ARTIFACT_DIR\" \\\n 2>/dev/null || true\n\n# Also download agent artifacts\necho \"📥 Downloading agent artifacts from run $LATEST_RUN_ID...\"\ngh run download \"$LATEST_RUN_ID\" \\\n --repo \"$GITHUB_REPOSITORY\" \\\n --name \"agent\" \\\n --dir \"$ARTIFACT_DIR/agent\" \\\n 2>/dev/null || true\n\n# Find token-usage.jsonl\nUSAGE_FILE=$(find \"$ARTIFACT_DIR\" -name \"token-usage.jsonl\" 2>/dev/null | head -1)\nif [ -n \"$USAGE_FILE\" ]; then\n echo \"Found token-usage.jsonl: $USAGE_FILE\"\n cp \"$USAGE_FILE\" /tmp/token-optimizer-claude/token-usage.jsonl\n echo \"Records: $(wc -l < /tmp/token-optimizer-claude/token-usage.jsonl)\"\n\n # Pre-compute Anthropic-specific metrics\n echo \"📊 Computing Anthropic cache efficiency metrics...\"\n awk '\n BEGIN { ti=0; to=0; cr=0; cw=0; tr=0 }\n {\n if (match($0, /\"input_tokens\" *: *([0-9]+)/, m)) ti += m[1]+0\n if (match($0, /\"output_tokens\" *: *([0-9]+)/, m)) to += m[1]+0\n if (match($0, /\"cache_read_tokens\" *: *([0-9]+)/, m)) cr += m[1]+0\n if (match($0, /\"cache_write_tokens\" *: *([0-9]+)/, m)) cw += m[1]+0\n tr += 1\n }\n END {\n total = ti + to + cr + cw\n if (tr == 0) exit\n printf \"Requests: %d\\n\", tr\n printf \"Input tokens: %d\\n\", ti\n printf \"Output tokens: %d\\n\", to\n printf \"Cache read tokens: %d\\n\", cr\n printf \"Cache write tokens: %d\\n\", cw\n printf \"Total tokens: %d\\n\", total\n if (ti + cr > 0) printf \"Cache hit rate: %.1f%%\\n\", (cr / (ti + cr)) * 100\n if (ti + cw > 0) printf \"Cache write rate: %.1f%%\\n\", (cw / (ti + cw)) * 100\n if (cw > 0) printf \"Cache read/write ratio: %.2f\\n\", (cr / cw)\n }' /tmp/token-optimizer-claude/token-usage.jsonl > /tmp/token-optimizer-claude/cache-metrics.txt\n cat /tmp/token-optimizer-claude/cache-metrics.txt\nelse\n echo \"No token-usage.jsonl found in artifacts\"\n touch /tmp/token-optimizer-claude/token-usage.jsonl\n touch /tmp/token-optimizer-claude/cache-metrics.txt\nfi\n\n# Find the workflow markdown source\nWORKFLOW_MD_NAME=$(echo \"$WORKFLOW_NAME\" | tr '[:upper:]' '[:lower:]' | tr ' ' '-')\nWORKFLOW_MD=\".github/workflows/${WORKFLOW_MD_NAME}.md\"\nif [ -f \"$WORKFLOW_MD\" ]; then\n echo \"Found workflow source: $WORKFLOW_MD\"\n cp \"$WORKFLOW_MD\" /tmp/token-optimizer-claude/workflow-source.md\nelse\n find .github/workflows -name \"*.md\" -exec grep -l \"^name: $WORKFLOW_NAME\" {} \\; 2>/dev/null | head -1 | while read -r f; do\n echo \"Found: $f\"\n cp \"$f\" /tmp/token-optimizer-claude/workflow-source.md\n done\nfi\n\n# Extract declared tools from workflow source\nif [ -f /tmp/token-optimizer-claude/workflow-source.md ]; then\n sed -n '/^---$/,/^---$/p' /tmp/token-optimizer-claude/workflow-source.md | \\\n grep -A20 \"^tools:\" | head -30 > /tmp/token-optimizer-claude/declared-tools.txt || true\nfi\n"
- name: Configure Git credentials
env:
@@ -368,12 +378,12 @@ jobs:
mkdir -p ${RUNNER_TEMP}/gh-aw/safeoutputs
mkdir -p /tmp/gh-aw/safeoutputs
mkdir -p /tmp/gh-aw/mcp-logs/safeoutputs
- cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/config.json << 'GH_AW_SAFE_OUTPUTS_CONFIG_1379b89db1ebfd9b_EOF'
+ cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/config.json << 'GH_AW_SAFE_OUTPUTS_CONFIG_4c4cf3ee925b78e9_EOF'
{"create_issue":{"close_older_issues":true,"expires":168,"labels":["automated-analysis","token-optimization","claude","cost-reduction"],"max":1,"title_prefix":"⚡ Claude Token Optimization: "},"missing_data":{},"missing_tool":{},"noop":{"max":1,"report-as-issue":"true"}}
- GH_AW_SAFE_OUTPUTS_CONFIG_1379b89db1ebfd9b_EOF
+ GH_AW_SAFE_OUTPUTS_CONFIG_4c4cf3ee925b78e9_EOF
- name: Write Safe Outputs Tools
run: |
- cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/tools_meta.json << 'GH_AW_SAFE_OUTPUTS_TOOLS_META_71880ad1fad56a41_EOF'
+ cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/tools_meta.json << 'GH_AW_SAFE_OUTPUTS_TOOLS_META_23157a90a325d224_EOF'
{
"description_suffixes": {
"create_issue": " CONSTRAINTS: Maximum 1 issue(s) can be created. Title will be prefixed with \"⚡ Claude Token Optimization: \". Labels [\"automated-analysis\" \"token-optimization\" \"claude\" \"cost-reduction\"] will be automatically added."
@@ -381,8 +391,8 @@ jobs:
"repo_params": {},
"dynamic_tools": []
}
- GH_AW_SAFE_OUTPUTS_TOOLS_META_71880ad1fad56a41_EOF
- cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/validation.json << 'GH_AW_SAFE_OUTPUTS_VALIDATION_ede96a40bce758f0_EOF'
+ GH_AW_SAFE_OUTPUTS_TOOLS_META_23157a90a325d224_EOF
+ cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/validation.json << 'GH_AW_SAFE_OUTPUTS_VALIDATION_33654cf1e6094598_EOF'
{
"create_issue": {
"defaultMax": 1,
@@ -475,7 +485,7 @@ jobs:
}
}
}
- GH_AW_SAFE_OUTPUTS_VALIDATION_ede96a40bce758f0_EOF
+ GH_AW_SAFE_OUTPUTS_VALIDATION_33654cf1e6094598_EOF
node ${RUNNER_TEMP}/gh-aw/actions/generate_safe_outputs_tools.cjs
- name: Generate Safe Outputs MCP Server Config
id: safe-outputs-config
@@ -545,7 +555,7 @@ jobs:
export MCP_GATEWAY_DOCKER_COMMAND='docker run -i --rm --network host -v /var/run/docker.sock:/var/run/docker.sock -e MCP_GATEWAY_PORT -e MCP_GATEWAY_DOMAIN -e MCP_GATEWAY_API_KEY -e MCP_GATEWAY_PAYLOAD_DIR -e MCP_GATEWAY_PAYLOAD_SIZE_THRESHOLD -e DEBUG -e MCP_GATEWAY_LOG_DIR -e GH_AW_MCP_LOG_DIR -e GH_AW_SAFE_OUTPUTS -e GH_AW_SAFE_OUTPUTS_CONFIG_PATH -e GH_AW_SAFE_OUTPUTS_TOOLS_PATH -e GH_AW_ASSETS_BRANCH -e GH_AW_ASSETS_MAX_SIZE_KB -e GH_AW_ASSETS_ALLOWED_EXTS -e DEFAULT_BRANCH -e GITHUB_MCP_SERVER_TOKEN -e GITHUB_MCP_GUARD_MIN_INTEGRITY -e GITHUB_MCP_GUARD_REPOS -e GITHUB_REPOSITORY -e GITHUB_SERVER_URL -e GITHUB_SHA -e GITHUB_WORKSPACE -e GITHUB_TOKEN -e GITHUB_RUN_ID -e GITHUB_RUN_NUMBER -e GITHUB_RUN_ATTEMPT -e GITHUB_JOB -e GITHUB_ACTION -e GITHUB_EVENT_NAME -e GITHUB_EVENT_PATH -e GITHUB_ACTOR -e GITHUB_ACTOR_ID -e GITHUB_TRIGGERING_ACTOR -e GITHUB_WORKFLOW -e GITHUB_WORKFLOW_REF -e GITHUB_WORKFLOW_SHA -e GITHUB_REF -e GITHUB_REF_NAME -e GITHUB_REF_TYPE -e GITHUB_HEAD_REF -e GITHUB_BASE_REF -e GH_AW_SAFE_OUTPUTS_PORT -e GH_AW_SAFE_OUTPUTS_API_KEY -v /tmp/gh-aw/mcp-payloads:/tmp/gh-aw/mcp-payloads:rw -v /opt:/opt:ro -v /tmp:/tmp:rw -v '"${GITHUB_WORKSPACE}"':'"${GITHUB_WORKSPACE}"':rw ghcr.io/github/gh-aw-mcpg:v0.2.12'
mkdir -p /home/runner/.copilot
- cat << GH_AW_MCP_CONFIG_c3083a2d7285b52c_EOF | bash ${RUNNER_TEMP}/gh-aw/actions/start_mcp_gateway.sh
+ cat << GH_AW_MCP_CONFIG_aa127544f74137f8_EOF | bash ${RUNNER_TEMP}/gh-aw/actions/start_mcp_gateway.sh
{
"mcpServers": {
"github": {
@@ -586,7 +596,7 @@ jobs:
"payloadDir": "${MCP_GATEWAY_PAYLOAD_DIR}"
}
}
- GH_AW_MCP_CONFIG_c3083a2d7285b52c_EOF
+ GH_AW_MCP_CONFIG_aa127544f74137f8_EOF
- name: Download activation artifact
uses: actions/download-artifact@3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c # v8.0.1
with:
diff --git a/.github/workflows/claude-token-optimizer.md b/.github/workflows/claude-token-optimizer.md
index 6fb8570ba4..cf0bab1cf0 100644
--- a/.github/workflows/claude-token-optimizer.md
+++ b/.github/workflows/claude-token-optimizer.md
@@ -44,6 +44,16 @@ network: defaults
timeout-minutes: 30
steps:
+ - name: Install gh-aw CLI
+ env:
+ GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ run: |
+ if gh extension list | grep -q "github/gh-aw"; then
+ gh extension upgrade gh-aw || true
+ else
+ gh extension install github/gh-aw
+ fi
+ gh aw --version
- name: Find and download artifacts from the most expensive Claude workflow
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
@@ -52,7 +62,7 @@ steps:
mkdir -p /tmp/token-optimizer-claude
echo "📥 Loading Claude workflow runs from last 24 hours..."
- ./gh-aw logs \
+ gh aw logs \
--engine claude \
--start-date -1d \
--json \
diff --git a/.github/workflows/claude-token-usage-analyzer.lock.yml b/.github/workflows/claude-token-usage-analyzer.lock.yml
index a0eee0484e..dc7173a33c 100644
--- a/.github/workflows/claude-token-usage-analyzer.lock.yml
+++ b/.github/workflows/claude-token-usage-analyzer.lock.yml
@@ -26,7 +26,7 @@
# Imports:
# - shared/reporting.md
#
-# gh-aw-metadata: {"schema_version":"v3","frontmatter_hash":"319395ee643167566941157e1c24260bc3618407883eceeb68c9959c28b7fad5","strict":true,"agent_id":"copilot"}
+# gh-aw-metadata: {"schema_version":"v3","frontmatter_hash":"3e1a8c2ef3e9e0df9edb0b63b53c86db6e4a7f5d018a85974579ae4733ab5041","strict":true,"agent_id":"copilot"}
name: "Claude Token Usage Analyzer"
"on":
@@ -132,14 +132,14 @@ jobs:
run: |
bash ${RUNNER_TEMP}/gh-aw/actions/create_prompt_first.sh
{
- cat << 'GH_AW_PROMPT_56175edcaa9806cd_EOF'
+ cat << 'GH_AW_PROMPT_412baf5b8763ffab_EOF'
- GH_AW_PROMPT_56175edcaa9806cd_EOF
+ GH_AW_PROMPT_412baf5b8763ffab_EOF
cat "${RUNNER_TEMP}/gh-aw/prompts/xpia.md"
cat "${RUNNER_TEMP}/gh-aw/prompts/temp_folder_prompt.md"
cat "${RUNNER_TEMP}/gh-aw/prompts/markdown.md"
cat "${RUNNER_TEMP}/gh-aw/prompts/safe_outputs_prompt.md"
- cat << 'GH_AW_PROMPT_56175edcaa9806cd_EOF'
+ cat << 'GH_AW_PROMPT_412baf5b8763ffab_EOF'
Tools: create_issue, missing_tool, missing_data, noop
@@ -171,13 +171,13 @@ jobs:
{{/if}}
- GH_AW_PROMPT_56175edcaa9806cd_EOF
+ GH_AW_PROMPT_412baf5b8763ffab_EOF
cat "${RUNNER_TEMP}/gh-aw/prompts/github_mcp_tools_with_safeoutputs_prompt.md"
- cat << 'GH_AW_PROMPT_56175edcaa9806cd_EOF'
+ cat << 'GH_AW_PROMPT_412baf5b8763ffab_EOF'
{{#runtime-import .github/workflows/shared/reporting.md}}
{{#runtime-import .github/workflows/claude-token-usage-analyzer.md}}
- GH_AW_PROMPT_56175edcaa9806cd_EOF
+ GH_AW_PROMPT_412baf5b8763ffab_EOF
} > "$GH_AW_PROMPT"
- name: Interpolate variables and render templates
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
@@ -297,10 +297,20 @@ jobs:
run: bash ${RUNNER_TEMP}/gh-aw/actions/configure_gh_for_ghe.sh
env:
GH_TOKEN: ${{ github.token }}
+ - env:
+ GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ name: Install gh-aw CLI
+ run: |
+ if gh extension list | grep -q "github/gh-aw"; then
+ gh extension upgrade gh-aw || true
+ else
+ gh extension install github/gh-aw
+ fi
+ gh aw --version
- env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
name: Download Claude workflow runs (last 24h)
- run: "set -euo pipefail\nmkdir -p /tmp/token-analyzer-claude\n\necho \"📥 Downloading Claude workflow runs from last 24 hours...\"\n./gh-aw logs \\\n --engine claude \\\n --start-date -1d \\\n --json \\\n -c 300 \\\n > /tmp/token-analyzer-claude/claude-runs.json 2>/dev/null || echo \"[]\" > /tmp/token-analyzer-claude/claude-runs.json\n\nRUN_COUNT=$(jq '. | length' /tmp/token-analyzer-claude/claude-runs.json 2>/dev/null || echo 0)\necho \"✅ Found ${RUN_COUNT} Claude workflow runs\"\n\n# Download token-usage.jsonl artifacts for per-model breakdown\nARTIFACT_DIR=\"/tmp/token-analyzer-claude/artifacts\"\nmkdir -p \"$ARTIFACT_DIR\"\n\necho \"📥 Downloading token-usage.jsonl artifacts...\"\njq -r '.[].databaseId' /tmp/token-analyzer-claude/claude-runs.json 2>/dev/null | head -50 | while read -r run_id; do\n run_dir=\"$ARTIFACT_DIR/$run_id\"\n mkdir -p \"$run_dir\"\n gh run download \"$run_id\" \\\n --repo \"$GITHUB_REPOSITORY\" \\\n --name \"firewall-audit-logs\" \\\n --dir \"$run_dir\" \\\n 2>/dev/null || true\ndone\n\n# Count how many token-usage.jsonl files we got\nJSONL_COUNT=$(find \"$ARTIFACT_DIR\" -name \"token-usage.jsonl\" 2>/dev/null | wc -l)\necho \"✅ Downloaded ${JSONL_COUNT} token-usage.jsonl artifacts\"\n\n# Merge all token-usage.jsonl files annotated with run_id\nMERGED_FILE=\"/tmp/token-analyzer-claude/token-usage-merged.jsonl\"\n> \"$MERGED_FILE\"\nfind \"$ARTIFACT_DIR\" -name \"token-usage.jsonl\" | while read -r f; do\n run_id=$(echo \"$f\" | grep -oP '(?<=/artifacts/)\\d+(?=/)' || true)\n while IFS= read -r line; do\n if [ -n \"$line\" ]; then\n echo \"${line}\" | jq --arg run_id \"$run_id\" '. + {run_id: $run_id}' >> \"$MERGED_FILE\" 2>/dev/null || true\n fi\n done < \"$f\"\ndone\n\nRECORD_COUNT=$(wc -l < \"$MERGED_FILE\" 2>/dev/null || echo 0)\necho \"✅ Merged ${RECORD_COUNT} token usage records\"\n"
+ run: "set -euo pipefail\nmkdir -p /tmp/token-analyzer-claude\n\necho \"📥 Downloading Claude workflow runs from last 24 hours...\"\ngh aw logs \\\n --engine claude \\\n --start-date -1d \\\n --json \\\n -c 300 \\\n > /tmp/token-analyzer-claude/claude-runs.json 2>/dev/null || echo \"[]\" > /tmp/token-analyzer-claude/claude-runs.json\n\nRUN_COUNT=$(jq '. | length' /tmp/token-analyzer-claude/claude-runs.json 2>/dev/null || echo 0)\necho \"✅ Found ${RUN_COUNT} Claude workflow runs\"\n\n# Download token-usage.jsonl artifacts for per-model breakdown\nARTIFACT_DIR=\"/tmp/token-analyzer-claude/artifacts\"\nmkdir -p \"$ARTIFACT_DIR\"\n\necho \"📥 Downloading token-usage.jsonl artifacts...\"\njq -r '.[].databaseId' /tmp/token-analyzer-claude/claude-runs.json 2>/dev/null | head -50 | while read -r run_id; do\n run_dir=\"$ARTIFACT_DIR/$run_id\"\n mkdir -p \"$run_dir\"\n gh run download \"$run_id\" \\\n --repo \"$GITHUB_REPOSITORY\" \\\n --name \"firewall-audit-logs\" \\\n --dir \"$run_dir\" \\\n 2>/dev/null || true\ndone\n\n# Count how many token-usage.jsonl files we got\nJSONL_COUNT=$(find \"$ARTIFACT_DIR\" -name \"token-usage.jsonl\" 2>/dev/null | wc -l)\necho \"✅ Downloaded ${JSONL_COUNT} token-usage.jsonl artifacts\"\n\n# Merge all token-usage.jsonl files annotated with run_id\nMERGED_FILE=\"/tmp/token-analyzer-claude/token-usage-merged.jsonl\"\n> \"$MERGED_FILE\"\nfind \"$ARTIFACT_DIR\" -name \"token-usage.jsonl\" | while read -r f; do\n run_id=$(echo \"$f\" | grep -oP '(?<=/artifacts/)\\d+(?=/)' || true)\n while IFS= read -r line; do\n if [ -n \"$line\" ]; then\n echo \"${line}\" | jq --arg run_id \"$run_id\" '. + {run_id: $run_id}' >> \"$MERGED_FILE\" 2>/dev/null || true\n fi\n done < \"$f\"\ndone\n\nRECORD_COUNT=$(wc -l < \"$MERGED_FILE\" 2>/dev/null || echo 0)\necho \"✅ Merged ${RECORD_COUNT} token usage records\"\n"
- name: Configure Git credentials
env:
@@ -351,12 +361,12 @@ jobs:
mkdir -p ${RUNNER_TEMP}/gh-aw/safeoutputs
mkdir -p /tmp/gh-aw/safeoutputs
mkdir -p /tmp/gh-aw/mcp-logs/safeoutputs
- cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/config.json << 'GH_AW_SAFE_OUTPUTS_CONFIG_322a980fcfb104c5_EOF'
+ cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/config.json << 'GH_AW_SAFE_OUTPUTS_CONFIG_f14cbde2b02cb022_EOF'
{"create_issue":{"close_older_issues":true,"expires":48,"labels":["automated-analysis","token-usage","claude"],"max":1,"title_prefix":"📊 Claude Token Usage Report: "},"missing_data":{},"missing_tool":{},"noop":{"max":1,"report-as-issue":"true"}}
- GH_AW_SAFE_OUTPUTS_CONFIG_322a980fcfb104c5_EOF
+ GH_AW_SAFE_OUTPUTS_CONFIG_f14cbde2b02cb022_EOF
- name: Write Safe Outputs Tools
run: |
- cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/tools_meta.json << 'GH_AW_SAFE_OUTPUTS_TOOLS_META_eb44a1156459ab22_EOF'
+ cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/tools_meta.json << 'GH_AW_SAFE_OUTPUTS_TOOLS_META_555736a39a322223_EOF'
{
"description_suffixes": {
"create_issue": " CONSTRAINTS: Maximum 1 issue(s) can be created. Title will be prefixed with \"📊 Claude Token Usage Report: \". Labels [\"automated-analysis\" \"token-usage\" \"claude\"] will be automatically added."
@@ -364,8 +374,8 @@ jobs:
"repo_params": {},
"dynamic_tools": []
}
- GH_AW_SAFE_OUTPUTS_TOOLS_META_eb44a1156459ab22_EOF
- cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/validation.json << 'GH_AW_SAFE_OUTPUTS_VALIDATION_e0665b90065f7ec8_EOF'
+ GH_AW_SAFE_OUTPUTS_TOOLS_META_555736a39a322223_EOF
+ cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/validation.json << 'GH_AW_SAFE_OUTPUTS_VALIDATION_dfb0f30952b3934c_EOF'
{
"create_issue": {
"defaultMax": 1,
@@ -458,7 +468,7 @@ jobs:
}
}
}
- GH_AW_SAFE_OUTPUTS_VALIDATION_e0665b90065f7ec8_EOF
+ GH_AW_SAFE_OUTPUTS_VALIDATION_dfb0f30952b3934c_EOF
node ${RUNNER_TEMP}/gh-aw/actions/generate_safe_outputs_tools.cjs
- name: Generate Safe Outputs MCP Server Config
id: safe-outputs-config
@@ -528,7 +538,7 @@ jobs:
export MCP_GATEWAY_DOCKER_COMMAND='docker run -i --rm --network host -v /var/run/docker.sock:/var/run/docker.sock -e MCP_GATEWAY_PORT -e MCP_GATEWAY_DOMAIN -e MCP_GATEWAY_API_KEY -e MCP_GATEWAY_PAYLOAD_DIR -e MCP_GATEWAY_PAYLOAD_SIZE_THRESHOLD -e DEBUG -e MCP_GATEWAY_LOG_DIR -e GH_AW_MCP_LOG_DIR -e GH_AW_SAFE_OUTPUTS -e GH_AW_SAFE_OUTPUTS_CONFIG_PATH -e GH_AW_SAFE_OUTPUTS_TOOLS_PATH -e GH_AW_ASSETS_BRANCH -e GH_AW_ASSETS_MAX_SIZE_KB -e GH_AW_ASSETS_ALLOWED_EXTS -e DEFAULT_BRANCH -e GITHUB_MCP_SERVER_TOKEN -e GITHUB_MCP_GUARD_MIN_INTEGRITY -e GITHUB_MCP_GUARD_REPOS -e GITHUB_REPOSITORY -e GITHUB_SERVER_URL -e GITHUB_SHA -e GITHUB_WORKSPACE -e GITHUB_TOKEN -e GITHUB_RUN_ID -e GITHUB_RUN_NUMBER -e GITHUB_RUN_ATTEMPT -e GITHUB_JOB -e GITHUB_ACTION -e GITHUB_EVENT_NAME -e GITHUB_EVENT_PATH -e GITHUB_ACTOR -e GITHUB_ACTOR_ID -e GITHUB_TRIGGERING_ACTOR -e GITHUB_WORKFLOW -e GITHUB_WORKFLOW_REF -e GITHUB_WORKFLOW_SHA -e GITHUB_REF -e GITHUB_REF_NAME -e GITHUB_REF_TYPE -e GITHUB_HEAD_REF -e GITHUB_BASE_REF -e GH_AW_SAFE_OUTPUTS_PORT -e GH_AW_SAFE_OUTPUTS_API_KEY -v /tmp/gh-aw/mcp-payloads:/tmp/gh-aw/mcp-payloads:rw -v /opt:/opt:ro -v /tmp:/tmp:rw -v '"${GITHUB_WORKSPACE}"':'"${GITHUB_WORKSPACE}"':rw ghcr.io/github/gh-aw-mcpg:v0.2.12'
mkdir -p /home/runner/.copilot
- cat << GH_AW_MCP_CONFIG_054b98fc8152da10_EOF | bash ${RUNNER_TEMP}/gh-aw/actions/start_mcp_gateway.sh
+ cat << GH_AW_MCP_CONFIG_6a7305353a2989be_EOF | bash ${RUNNER_TEMP}/gh-aw/actions/start_mcp_gateway.sh
{
"mcpServers": {
"github": {
@@ -569,7 +579,7 @@ jobs:
"payloadDir": "${MCP_GATEWAY_PAYLOAD_DIR}"
}
}
- GH_AW_MCP_CONFIG_054b98fc8152da10_EOF
+ GH_AW_MCP_CONFIG_6a7305353a2989be_EOF
- name: Download activation artifact
uses: actions/download-artifact@3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c # v8.0.1
with:
diff --git a/.github/workflows/claude-token-usage-analyzer.md b/.github/workflows/claude-token-usage-analyzer.md
index 339b261fcc..3fbf3da7d0 100644
--- a/.github/workflows/claude-token-usage-analyzer.md
+++ b/.github/workflows/claude-token-usage-analyzer.md
@@ -36,6 +36,16 @@ network: defaults
timeout-minutes: 30
steps:
+ - name: Install gh-aw CLI
+ env:
+ GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ run: |
+ if gh extension list | grep -q "github/gh-aw"; then
+ gh extension upgrade gh-aw || true
+ else
+ gh extension install github/gh-aw
+ fi
+ gh aw --version
- name: Download Claude workflow runs (last 24h)
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
@@ -44,7 +54,7 @@ steps:
mkdir -p /tmp/token-analyzer-claude
echo "📥 Downloading Claude workflow runs from last 24 hours..."
- ./gh-aw logs \
+ gh aw logs \
--engine claude \
--start-date -1d \
--json \
diff --git a/.github/workflows/copilot-token-optimizer.lock.yml b/.github/workflows/copilot-token-optimizer.lock.yml
index 3ea44a51fe..ca132f95b7 100644
--- a/.github/workflows/copilot-token-optimizer.lock.yml
+++ b/.github/workflows/copilot-token-optimizer.lock.yml
@@ -26,7 +26,7 @@
# Imports:
# - shared/reporting.md
#
-# gh-aw-metadata: {"schema_version":"v3","frontmatter_hash":"96191f76aa0a786c0af4277cc72d2c7a022760effb3e32c8ea9529d446d13ae2","strict":true,"agent_id":"copilot"}
+# gh-aw-metadata: {"schema_version":"v3","frontmatter_hash":"32da6f54a544abc78649fa29cb833a2e9c73ae28431e2a0384d7c8b74696653b","strict":true,"agent_id":"copilot"}
name: "Copilot Token Optimizer"
"on":
@@ -144,14 +144,14 @@ jobs:
run: |
bash ${RUNNER_TEMP}/gh-aw/actions/create_prompt_first.sh
{
- cat << 'GH_AW_PROMPT_0a9f7546e40b4ae5_EOF'
+ cat << 'GH_AW_PROMPT_2c9b7bbaa51e30c1_EOF'
- GH_AW_PROMPT_0a9f7546e40b4ae5_EOF
+ GH_AW_PROMPT_2c9b7bbaa51e30c1_EOF
cat "${RUNNER_TEMP}/gh-aw/prompts/xpia.md"
cat "${RUNNER_TEMP}/gh-aw/prompts/temp_folder_prompt.md"
cat "${RUNNER_TEMP}/gh-aw/prompts/markdown.md"
cat "${RUNNER_TEMP}/gh-aw/prompts/safe_outputs_prompt.md"
- cat << 'GH_AW_PROMPT_0a9f7546e40b4ae5_EOF'
+ cat << 'GH_AW_PROMPT_2c9b7bbaa51e30c1_EOF'
Tools: create_issue, missing_tool, missing_data, noop
@@ -183,13 +183,13 @@ jobs:
{{/if}}
- GH_AW_PROMPT_0a9f7546e40b4ae5_EOF
+ GH_AW_PROMPT_2c9b7bbaa51e30c1_EOF
cat "${RUNNER_TEMP}/gh-aw/prompts/github_mcp_tools_with_safeoutputs_prompt.md"
- cat << 'GH_AW_PROMPT_0a9f7546e40b4ae5_EOF'
+ cat << 'GH_AW_PROMPT_2c9b7bbaa51e30c1_EOF'
{{#runtime-import .github/workflows/shared/reporting.md}}
{{#runtime-import .github/workflows/copilot-token-optimizer.md}}
- GH_AW_PROMPT_0a9f7546e40b4ae5_EOF
+ GH_AW_PROMPT_2c9b7bbaa51e30c1_EOF
} > "$GH_AW_PROMPT"
- name: Interpolate variables and render templates
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
@@ -314,10 +314,20 @@ jobs:
run: bash ${RUNNER_TEMP}/gh-aw/actions/configure_gh_for_ghe.sh
env:
GH_TOKEN: ${{ github.token }}
+ - env:
+ GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ name: Install gh-aw CLI
+ run: |
+ if gh extension list | grep -q "github/gh-aw"; then
+ gh extension upgrade gh-aw || true
+ else
+ gh extension install github/gh-aw
+ fi
+ gh aw --version
- env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
name: Find and download artifacts from the most expensive Copilot workflow
- run: "set -euo pipefail\nmkdir -p /tmp/token-optimizer\n\necho \"📥 Loading Copilot workflow runs from last 24 hours...\"\n./gh-aw logs \\\n --engine copilot \\\n --start-date -1d \\\n --json \\\n -c 300 \\\n > /tmp/token-optimizer/copilot-runs.json 2>/dev/null || echo \"[]\" > /tmp/token-optimizer/copilot-runs.json\n\nRUN_COUNT=$(jq '. | length' /tmp/token-optimizer/copilot-runs.json 2>/dev/null || echo 0)\necho \"Found ${RUN_COUNT} Copilot runs\"\n\nif [ \"$RUN_COUNT\" -eq 0 ]; then\n echo \"No Copilot runs found, nothing to optimize\"\n exit 0\nfi\n\n# Find the most expensive workflow (by total tokens across all its runs)\necho \"🔍 Identifying most expensive workflow...\"\njq -r '\n group_by(.workflowName) |\n map({\n workflow: .[0].workflowName,\n total_tokens: (map(.tokenUsage) | add),\n total_cost: (map(.estimatedCost) | add),\n run_count: length,\n avg_tokens: ((map(.tokenUsage) | add) / length),\n run_ids: map(.databaseId),\n latest_run_id: (sort_by(.createdAt) | last | .databaseId),\n latest_run_url: (sort_by(.createdAt) | last | .url)\n }) |\n sort_by(.total_tokens) | reverse | .[0]\n' /tmp/token-optimizer/copilot-runs.json > /tmp/token-optimizer/top-workflow.json\n\nWORKFLOW_NAME=$(jq -r '.workflow' /tmp/token-optimizer/top-workflow.json)\nLATEST_RUN_ID=$(jq -r '.latest_run_id' /tmp/token-optimizer/top-workflow.json)\necho \"Most expensive workflow: $WORKFLOW_NAME (run: $LATEST_RUN_ID)\"\necho \"WORKFLOW_NAME=$WORKFLOW_NAME\" >> \"$GITHUB_ENV\"\n\n# Download the firewall-audit-logs artifact from the latest run of that workflow\nARTIFACT_DIR=\"/tmp/token-optimizer/artifacts\"\nmkdir -p \"$ARTIFACT_DIR\"\n\necho \"📥 Downloading firewall-audit-logs from run $LATEST_RUN_ID...\"\ngh run download \"$LATEST_RUN_ID\" \\\n --repo \"$GITHUB_REPOSITORY\" \\\n --name \"firewall-audit-logs\" \\\n --dir \"$ARTIFACT_DIR\" \\\n 2>/dev/null || true\n\n# Also download agent artifacts (contains prompt and tool usage logs)\necho \"📥 Downloading agent artifacts from run $LATEST_RUN_ID...\"\ngh run download \"$LATEST_RUN_ID\" \\\n --repo \"$GITHUB_REPOSITORY\" \\\n --name \"agent\" \\\n --dir \"$ARTIFACT_DIR/agent\" \\\n 2>/dev/null || true\n\n# Find token-usage.jsonl\nUSAGE_FILE=$(find \"$ARTIFACT_DIR\" -name \"token-usage.jsonl\" 2>/dev/null | head -1)\nif [ -n \"$USAGE_FILE\" ]; then\n echo \"Found token-usage.jsonl: $USAGE_FILE\"\n cp \"$USAGE_FILE\" /tmp/token-optimizer/token-usage.jsonl\n wc -l < /tmp/token-optimizer/token-usage.jsonl\nelse\n echo \"No token-usage.jsonl found in artifacts\"\n touch /tmp/token-optimizer/token-usage.jsonl\nfi\n\n# Find the workflow markdown source\nWORKFLOW_MD_NAME=$(echo \"$WORKFLOW_NAME\" | tr '[:upper:]' '[:lower:]' | tr ' ' '-')\nWORKFLOW_MD=\".github/workflows/${WORKFLOW_MD_NAME}.md\"\nif [ -f \"$WORKFLOW_MD\" ]; then\n echo \"Found workflow source: $WORKFLOW_MD\"\n cp \"$WORKFLOW_MD\" /tmp/token-optimizer/workflow-source.md\nelse\n echo \"Workflow source not found at $WORKFLOW_MD, searching...\"\n find .github/workflows -name \"*.md\" -exec grep -l \"^name: $WORKFLOW_NAME\" {} \\; 2>/dev/null | head -1 | while read -r f; do\n echo \"Found: $f\"\n cp \"$f\" /tmp/token-optimizer/workflow-source.md\n done\nfi\n\n# Extract declared tools from workflow source (if available)\nif [ -f /tmp/token-optimizer/workflow-source.md ]; then\n echo \"📋 Extracting declared tools from workflow source...\"\n # Extract tools section from frontmatter\n sed -n '/^---$/,/^---$/p' /tmp/token-optimizer/workflow-source.md | \\\n grep -A20 \"^tools:\" | head -30 > /tmp/token-optimizer/declared-tools.txt || true\n cat /tmp/token-optimizer/declared-tools.txt\nfi\n"
+ run: "set -euo pipefail\nmkdir -p /tmp/token-optimizer\n\necho \"📥 Loading Copilot workflow runs from last 24 hours...\"\ngh aw logs \\\n --engine copilot \\\n --start-date -1d \\\n --json \\\n -c 300 \\\n > /tmp/token-optimizer/copilot-runs.json 2>/dev/null || echo \"[]\" > /tmp/token-optimizer/copilot-runs.json\n\nRUN_COUNT=$(jq '. | length' /tmp/token-optimizer/copilot-runs.json 2>/dev/null || echo 0)\necho \"Found ${RUN_COUNT} Copilot runs\"\n\nif [ \"$RUN_COUNT\" -eq 0 ]; then\n echo \"No Copilot runs found, nothing to optimize\"\n exit 0\nfi\n\n# Find the most expensive workflow (by total tokens across all its runs)\necho \"🔍 Identifying most expensive workflow...\"\njq -r '\n group_by(.workflowName) |\n map({\n workflow: .[0].workflowName,\n total_tokens: (map(.tokenUsage) | add),\n total_cost: (map(.estimatedCost) | add),\n run_count: length,\n avg_tokens: ((map(.tokenUsage) | add) / length),\n run_ids: map(.databaseId),\n latest_run_id: (sort_by(.createdAt) | last | .databaseId),\n latest_run_url: (sort_by(.createdAt) | last | .url)\n }) |\n sort_by(.total_tokens) | reverse | .[0]\n' /tmp/token-optimizer/copilot-runs.json > /tmp/token-optimizer/top-workflow.json\n\nWORKFLOW_NAME=$(jq -r '.workflow' /tmp/token-optimizer/top-workflow.json)\nLATEST_RUN_ID=$(jq -r '.latest_run_id' /tmp/token-optimizer/top-workflow.json)\necho \"Most expensive workflow: $WORKFLOW_NAME (run: $LATEST_RUN_ID)\"\necho \"WORKFLOW_NAME=$WORKFLOW_NAME\" >> \"$GITHUB_ENV\"\n\n# Download the firewall-audit-logs artifact from the latest run of that workflow\nARTIFACT_DIR=\"/tmp/token-optimizer/artifacts\"\nmkdir -p \"$ARTIFACT_DIR\"\n\necho \"📥 Downloading firewall-audit-logs from run $LATEST_RUN_ID...\"\ngh run download \"$LATEST_RUN_ID\" \\\n --repo \"$GITHUB_REPOSITORY\" \\\n --name \"firewall-audit-logs\" \\\n --dir \"$ARTIFACT_DIR\" \\\n 2>/dev/null || true\n\n# Also download agent artifacts (contains prompt and tool usage logs)\necho \"📥 Downloading agent artifacts from run $LATEST_RUN_ID...\"\ngh run download \"$LATEST_RUN_ID\" \\\n --repo \"$GITHUB_REPOSITORY\" \\\n --name \"agent\" \\\n --dir \"$ARTIFACT_DIR/agent\" \\\n 2>/dev/null || true\n\n# Find token-usage.jsonl\nUSAGE_FILE=$(find \"$ARTIFACT_DIR\" -name \"token-usage.jsonl\" 2>/dev/null | head -1)\nif [ -n \"$USAGE_FILE\" ]; then\n echo \"Found token-usage.jsonl: $USAGE_FILE\"\n cp \"$USAGE_FILE\" /tmp/token-optimizer/token-usage.jsonl\n wc -l < /tmp/token-optimizer/token-usage.jsonl\nelse\n echo \"No token-usage.jsonl found in artifacts\"\n touch /tmp/token-optimizer/token-usage.jsonl\nfi\n\n# Find the workflow markdown source\nWORKFLOW_MD_NAME=$(echo \"$WORKFLOW_NAME\" | tr '[:upper:]' '[:lower:]' | tr ' ' '-')\nWORKFLOW_MD=\".github/workflows/${WORKFLOW_MD_NAME}.md\"\nif [ -f \"$WORKFLOW_MD\" ]; then\n echo \"Found workflow source: $WORKFLOW_MD\"\n cp \"$WORKFLOW_MD\" /tmp/token-optimizer/workflow-source.md\nelse\n echo \"Workflow source not found at $WORKFLOW_MD, searching...\"\n find .github/workflows -name \"*.md\" -exec grep -l \"^name: $WORKFLOW_NAME\" {} \\; 2>/dev/null | head -1 | while read -r f; do\n echo \"Found: $f\"\n cp \"$f\" /tmp/token-optimizer/workflow-source.md\n done\nfi\n\n# Extract declared tools from workflow source (if available)\nif [ -f /tmp/token-optimizer/workflow-source.md ]; then\n echo \"📋 Extracting declared tools from workflow source...\"\n # Extract tools section from frontmatter\n sed -n '/^---$/,/^---$/p' /tmp/token-optimizer/workflow-source.md | \\\n grep -A20 \"^tools:\" | head -30 > /tmp/token-optimizer/declared-tools.txt || true\n cat /tmp/token-optimizer/declared-tools.txt\nfi\n"
- name: Configure Git credentials
env:
@@ -368,12 +378,12 @@ jobs:
mkdir -p ${RUNNER_TEMP}/gh-aw/safeoutputs
mkdir -p /tmp/gh-aw/safeoutputs
mkdir -p /tmp/gh-aw/mcp-logs/safeoutputs
- cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/config.json << 'GH_AW_SAFE_OUTPUTS_CONFIG_0707fe296b3bb4c0_EOF'
+ cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/config.json << 'GH_AW_SAFE_OUTPUTS_CONFIG_e7194fc724a1e551_EOF'
{"create_issue":{"close_older_issues":true,"expires":168,"labels":["automated-analysis","token-optimization","copilot","cost-reduction"],"max":1,"title_prefix":"⚡ Copilot Token Optimization: "},"missing_data":{},"missing_tool":{},"noop":{"max":1,"report-as-issue":"true"}}
- GH_AW_SAFE_OUTPUTS_CONFIG_0707fe296b3bb4c0_EOF
+ GH_AW_SAFE_OUTPUTS_CONFIG_e7194fc724a1e551_EOF
- name: Write Safe Outputs Tools
run: |
- cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/tools_meta.json << 'GH_AW_SAFE_OUTPUTS_TOOLS_META_49f16014f875196e_EOF'
+ cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/tools_meta.json << 'GH_AW_SAFE_OUTPUTS_TOOLS_META_39bb3f93a4eeb96c_EOF'
{
"description_suffixes": {
"create_issue": " CONSTRAINTS: Maximum 1 issue(s) can be created. Title will be prefixed with \"⚡ Copilot Token Optimization: \". Labels [\"automated-analysis\" \"token-optimization\" \"copilot\" \"cost-reduction\"] will be automatically added."
@@ -381,8 +391,8 @@ jobs:
"repo_params": {},
"dynamic_tools": []
}
- GH_AW_SAFE_OUTPUTS_TOOLS_META_49f16014f875196e_EOF
- cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/validation.json << 'GH_AW_SAFE_OUTPUTS_VALIDATION_61d48624f7229232_EOF'
+ GH_AW_SAFE_OUTPUTS_TOOLS_META_39bb3f93a4eeb96c_EOF
+ cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/validation.json << 'GH_AW_SAFE_OUTPUTS_VALIDATION_b3c4cfffbfd1bf0b_EOF'
{
"create_issue": {
"defaultMax": 1,
@@ -475,7 +485,7 @@ jobs:
}
}
}
- GH_AW_SAFE_OUTPUTS_VALIDATION_61d48624f7229232_EOF
+ GH_AW_SAFE_OUTPUTS_VALIDATION_b3c4cfffbfd1bf0b_EOF
node ${RUNNER_TEMP}/gh-aw/actions/generate_safe_outputs_tools.cjs
- name: Generate Safe Outputs MCP Server Config
id: safe-outputs-config
@@ -545,7 +555,7 @@ jobs:
export MCP_GATEWAY_DOCKER_COMMAND='docker run -i --rm --network host -v /var/run/docker.sock:/var/run/docker.sock -e MCP_GATEWAY_PORT -e MCP_GATEWAY_DOMAIN -e MCP_GATEWAY_API_KEY -e MCP_GATEWAY_PAYLOAD_DIR -e MCP_GATEWAY_PAYLOAD_SIZE_THRESHOLD -e DEBUG -e MCP_GATEWAY_LOG_DIR -e GH_AW_MCP_LOG_DIR -e GH_AW_SAFE_OUTPUTS -e GH_AW_SAFE_OUTPUTS_CONFIG_PATH -e GH_AW_SAFE_OUTPUTS_TOOLS_PATH -e GH_AW_ASSETS_BRANCH -e GH_AW_ASSETS_MAX_SIZE_KB -e GH_AW_ASSETS_ALLOWED_EXTS -e DEFAULT_BRANCH -e GITHUB_MCP_SERVER_TOKEN -e GITHUB_MCP_GUARD_MIN_INTEGRITY -e GITHUB_MCP_GUARD_REPOS -e GITHUB_REPOSITORY -e GITHUB_SERVER_URL -e GITHUB_SHA -e GITHUB_WORKSPACE -e GITHUB_TOKEN -e GITHUB_RUN_ID -e GITHUB_RUN_NUMBER -e GITHUB_RUN_ATTEMPT -e GITHUB_JOB -e GITHUB_ACTION -e GITHUB_EVENT_NAME -e GITHUB_EVENT_PATH -e GITHUB_ACTOR -e GITHUB_ACTOR_ID -e GITHUB_TRIGGERING_ACTOR -e GITHUB_WORKFLOW -e GITHUB_WORKFLOW_REF -e GITHUB_WORKFLOW_SHA -e GITHUB_REF -e GITHUB_REF_NAME -e GITHUB_REF_TYPE -e GITHUB_HEAD_REF -e GITHUB_BASE_REF -e GH_AW_SAFE_OUTPUTS_PORT -e GH_AW_SAFE_OUTPUTS_API_KEY -v /tmp/gh-aw/mcp-payloads:/tmp/gh-aw/mcp-payloads:rw -v /opt:/opt:ro -v /tmp:/tmp:rw -v '"${GITHUB_WORKSPACE}"':'"${GITHUB_WORKSPACE}"':rw ghcr.io/github/gh-aw-mcpg:v0.2.12'
mkdir -p /home/runner/.copilot
- cat << GH_AW_MCP_CONFIG_1a30785b4c17e062_EOF | bash ${RUNNER_TEMP}/gh-aw/actions/start_mcp_gateway.sh
+ cat << GH_AW_MCP_CONFIG_4cdbc74765fcc5fe_EOF | bash ${RUNNER_TEMP}/gh-aw/actions/start_mcp_gateway.sh
{
"mcpServers": {
"github": {
@@ -586,7 +596,7 @@ jobs:
"payloadDir": "${MCP_GATEWAY_PAYLOAD_DIR}"
}
}
- GH_AW_MCP_CONFIG_1a30785b4c17e062_EOF
+ GH_AW_MCP_CONFIG_4cdbc74765fcc5fe_EOF
- name: Download activation artifact
uses: actions/download-artifact@3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c # v8.0.1
with:
diff --git a/.github/workflows/copilot-token-optimizer.md b/.github/workflows/copilot-token-optimizer.md
index d1987d66e1..7efaf75f24 100644
--- a/.github/workflows/copilot-token-optimizer.md
+++ b/.github/workflows/copilot-token-optimizer.md
@@ -44,6 +44,16 @@ network: defaults
timeout-minutes: 30
steps:
+ - name: Install gh-aw CLI
+ env:
+ GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ run: |
+ if gh extension list | grep -q "github/gh-aw"; then
+ gh extension upgrade gh-aw || true
+ else
+ gh extension install github/gh-aw
+ fi
+ gh aw --version
- name: Find and download artifacts from the most expensive Copilot workflow
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
@@ -52,7 +62,7 @@ steps:
mkdir -p /tmp/token-optimizer
echo "📥 Loading Copilot workflow runs from last 24 hours..."
- ./gh-aw logs \
+ gh aw logs \
--engine copilot \
--start-date -1d \
--json \
diff --git a/.github/workflows/copilot-token-usage-analyzer.lock.yml b/.github/workflows/copilot-token-usage-analyzer.lock.yml
index aff2f738f0..4904d5b8ab 100644
--- a/.github/workflows/copilot-token-usage-analyzer.lock.yml
+++ b/.github/workflows/copilot-token-usage-analyzer.lock.yml
@@ -26,7 +26,7 @@
# Imports:
# - shared/reporting.md
#
-# gh-aw-metadata: {"schema_version":"v3","frontmatter_hash":"46171278f3dc67f1437018cfe94fdfc9f8a0b9f2fcc2483c1942df6deee7630b","strict":true,"agent_id":"copilot"}
+# gh-aw-metadata: {"schema_version":"v3","frontmatter_hash":"f421662fd7913377179bd64aea41e05f2d2765cef7b22e69444578e387a93601","strict":true,"agent_id":"copilot"}
name: "Copilot Token Usage Analyzer"
"on":
@@ -132,14 +132,14 @@ jobs:
run: |
bash ${RUNNER_TEMP}/gh-aw/actions/create_prompt_first.sh
{
- cat << 'GH_AW_PROMPT_d08cedfdee49253a_EOF'
+ cat << 'GH_AW_PROMPT_dbcef2ee4ad58e02_EOF'
- GH_AW_PROMPT_d08cedfdee49253a_EOF
+ GH_AW_PROMPT_dbcef2ee4ad58e02_EOF
cat "${RUNNER_TEMP}/gh-aw/prompts/xpia.md"
cat "${RUNNER_TEMP}/gh-aw/prompts/temp_folder_prompt.md"
cat "${RUNNER_TEMP}/gh-aw/prompts/markdown.md"
cat "${RUNNER_TEMP}/gh-aw/prompts/safe_outputs_prompt.md"
- cat << 'GH_AW_PROMPT_d08cedfdee49253a_EOF'
+ cat << 'GH_AW_PROMPT_dbcef2ee4ad58e02_EOF'
Tools: create_issue, missing_tool, missing_data, noop
@@ -171,13 +171,13 @@ jobs:
{{/if}}
- GH_AW_PROMPT_d08cedfdee49253a_EOF
+ GH_AW_PROMPT_dbcef2ee4ad58e02_EOF
cat "${RUNNER_TEMP}/gh-aw/prompts/github_mcp_tools_with_safeoutputs_prompt.md"
- cat << 'GH_AW_PROMPT_d08cedfdee49253a_EOF'
+ cat << 'GH_AW_PROMPT_dbcef2ee4ad58e02_EOF'
{{#runtime-import .github/workflows/shared/reporting.md}}
{{#runtime-import .github/workflows/copilot-token-usage-analyzer.md}}
- GH_AW_PROMPT_d08cedfdee49253a_EOF
+ GH_AW_PROMPT_dbcef2ee4ad58e02_EOF
} > "$GH_AW_PROMPT"
- name: Interpolate variables and render templates
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
@@ -297,10 +297,20 @@ jobs:
run: bash ${RUNNER_TEMP}/gh-aw/actions/configure_gh_for_ghe.sh
env:
GH_TOKEN: ${{ github.token }}
+ - env:
+ GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ name: Install gh-aw CLI
+ run: |
+ if gh extension list | grep -q "github/gh-aw"; then
+ gh extension upgrade gh-aw || true
+ else
+ gh extension install github/gh-aw
+ fi
+ gh aw --version
- env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
name: Download Copilot workflow runs (last 24h)
- run: "set -euo pipefail\nmkdir -p /tmp/token-analyzer\n\necho \"📥 Downloading Copilot workflow runs from last 24 hours...\"\n./gh-aw logs \\\n --engine copilot \\\n --start-date -1d \\\n --json \\\n -c 300 \\\n > /tmp/token-analyzer/copilot-runs.json 2>/dev/null || echo \"[]\" > /tmp/token-analyzer/copilot-runs.json\n\nRUN_COUNT=$(jq '. | length' /tmp/token-analyzer/copilot-runs.json 2>/dev/null || echo 0)\necho \"✅ Found ${RUN_COUNT} Copilot workflow runs\"\n\n# Download token-usage.jsonl artifacts for per-model breakdown\n# We look for the firewall-audit-logs artifact which contains token-usage.jsonl\nARTIFACT_DIR=\"/tmp/token-analyzer/artifacts\"\nmkdir -p \"$ARTIFACT_DIR\"\n\necho \"📥 Downloading token-usage.jsonl artifacts...\"\njq -r '.[].databaseId' /tmp/token-analyzer/copilot-runs.json 2>/dev/null | head -50 | while read -r run_id; do\n run_dir=\"$ARTIFACT_DIR/$run_id\"\n mkdir -p \"$run_dir\"\n gh run download \"$run_id\" \\\n --repo \"$GITHUB_REPOSITORY\" \\\n --name \"firewall-audit-logs\" \\\n --dir \"$run_dir\" \\\n 2>/dev/null || true\ndone\n\n# Count how many token-usage.jsonl files we got\nJSONL_COUNT=$(find \"$ARTIFACT_DIR\" -name \"token-usage.jsonl\" 2>/dev/null | wc -l)\necho \"✅ Downloaded ${JSONL_COUNT} token-usage.jsonl artifacts\"\n\n# Merge all token-usage.jsonl files into a single aggregate file annotated with run_id\nMERGED_FILE=\"/tmp/token-analyzer/token-usage-merged.jsonl\"\n> \"$MERGED_FILE\"\nfind \"$ARTIFACT_DIR\" -name \"token-usage.jsonl\" | while read -r f; do\n run_id=$(echo \"$f\" | grep -oP '(?<=/artifacts/)\\d+(?=/)' || true)\n while IFS= read -r line; do\n if [ -n \"$line\" ]; then\n echo \"${line}\" | jq --arg run_id \"$run_id\" '. + {run_id: $run_id}' >> \"$MERGED_FILE\" 2>/dev/null || true\n fi\n done < \"$f\"\ndone\n\nRECORD_COUNT=$(wc -l < \"$MERGED_FILE\" 2>/dev/null || echo 0)\necho \"✅ Merged ${RECORD_COUNT} token usage records\"\n"
+ run: "set -euo pipefail\nmkdir -p /tmp/token-analyzer\n\necho \"📥 Downloading Copilot workflow runs from last 24 hours...\"\ngh aw logs \\\n --engine copilot \\\n --start-date -1d \\\n --json \\\n -c 300 \\\n > /tmp/token-analyzer/copilot-runs.json 2>/dev/null || echo \"[]\" > /tmp/token-analyzer/copilot-runs.json\n\nRUN_COUNT=$(jq '. | length' /tmp/token-analyzer/copilot-runs.json 2>/dev/null || echo 0)\necho \"✅ Found ${RUN_COUNT} Copilot workflow runs\"\n\n# Download token-usage.jsonl artifacts for per-model breakdown\n# We look for the firewall-audit-logs artifact which contains token-usage.jsonl\nARTIFACT_DIR=\"/tmp/token-analyzer/artifacts\"\nmkdir -p \"$ARTIFACT_DIR\"\n\necho \"📥 Downloading token-usage.jsonl artifacts...\"\njq -r '.[].databaseId' /tmp/token-analyzer/copilot-runs.json 2>/dev/null | head -50 | while read -r run_id; do\n run_dir=\"$ARTIFACT_DIR/$run_id\"\n mkdir -p \"$run_dir\"\n gh run download \"$run_id\" \\\n --repo \"$GITHUB_REPOSITORY\" \\\n --name \"firewall-audit-logs\" \\\n --dir \"$run_dir\" \\\n 2>/dev/null || true\ndone\n\n# Count how many token-usage.jsonl files we got\nJSONL_COUNT=$(find \"$ARTIFACT_DIR\" -name \"token-usage.jsonl\" 2>/dev/null | wc -l)\necho \"✅ Downloaded ${JSONL_COUNT} token-usage.jsonl artifacts\"\n\n# Merge all token-usage.jsonl files into a single aggregate file annotated with run_id\nMERGED_FILE=\"/tmp/token-analyzer/token-usage-merged.jsonl\"\n> \"$MERGED_FILE\"\nfind \"$ARTIFACT_DIR\" -name \"token-usage.jsonl\" | while read -r f; do\n run_id=$(echo \"$f\" | grep -oP '(?<=/artifacts/)\\d+(?=/)' || true)\n while IFS= read -r line; do\n if [ -n \"$line\" ]; then\n echo \"${line}\" | jq --arg run_id \"$run_id\" '. + {run_id: $run_id}' >> \"$MERGED_FILE\" 2>/dev/null || true\n fi\n done < \"$f\"\ndone\n\nRECORD_COUNT=$(wc -l < \"$MERGED_FILE\" 2>/dev/null || echo 0)\necho \"✅ Merged ${RECORD_COUNT} token usage records\"\n"
- name: Configure Git credentials
env:
@@ -351,12 +361,12 @@ jobs:
mkdir -p ${RUNNER_TEMP}/gh-aw/safeoutputs
mkdir -p /tmp/gh-aw/safeoutputs
mkdir -p /tmp/gh-aw/mcp-logs/safeoutputs
- cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/config.json << 'GH_AW_SAFE_OUTPUTS_CONFIG_293a235c2f9a5fa9_EOF'
+ cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/config.json << 'GH_AW_SAFE_OUTPUTS_CONFIG_a06ad8de6b425354_EOF'
{"create_issue":{"close_older_issues":true,"expires":48,"labels":["automated-analysis","token-usage","copilot"],"max":1,"title_prefix":"📊 Copilot Token Usage Report: "},"missing_data":{},"missing_tool":{},"noop":{"max":1,"report-as-issue":"true"}}
- GH_AW_SAFE_OUTPUTS_CONFIG_293a235c2f9a5fa9_EOF
+ GH_AW_SAFE_OUTPUTS_CONFIG_a06ad8de6b425354_EOF
- name: Write Safe Outputs Tools
run: |
- cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/tools_meta.json << 'GH_AW_SAFE_OUTPUTS_TOOLS_META_d3dffd4e21f58665_EOF'
+ cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/tools_meta.json << 'GH_AW_SAFE_OUTPUTS_TOOLS_META_d7d241b36709f602_EOF'
{
"description_suffixes": {
"create_issue": " CONSTRAINTS: Maximum 1 issue(s) can be created. Title will be prefixed with \"📊 Copilot Token Usage Report: \". Labels [\"automated-analysis\" \"token-usage\" \"copilot\"] will be automatically added."
@@ -364,8 +374,8 @@ jobs:
"repo_params": {},
"dynamic_tools": []
}
- GH_AW_SAFE_OUTPUTS_TOOLS_META_d3dffd4e21f58665_EOF
- cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/validation.json << 'GH_AW_SAFE_OUTPUTS_VALIDATION_6b22248bd524230b_EOF'
+ GH_AW_SAFE_OUTPUTS_TOOLS_META_d7d241b36709f602_EOF
+ cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/validation.json << 'GH_AW_SAFE_OUTPUTS_VALIDATION_0ef0226af1b9fefa_EOF'
{
"create_issue": {
"defaultMax": 1,
@@ -458,7 +468,7 @@ jobs:
}
}
}
- GH_AW_SAFE_OUTPUTS_VALIDATION_6b22248bd524230b_EOF
+ GH_AW_SAFE_OUTPUTS_VALIDATION_0ef0226af1b9fefa_EOF
node ${RUNNER_TEMP}/gh-aw/actions/generate_safe_outputs_tools.cjs
- name: Generate Safe Outputs MCP Server Config
id: safe-outputs-config
@@ -528,7 +538,7 @@ jobs:
export MCP_GATEWAY_DOCKER_COMMAND='docker run -i --rm --network host -v /var/run/docker.sock:/var/run/docker.sock -e MCP_GATEWAY_PORT -e MCP_GATEWAY_DOMAIN -e MCP_GATEWAY_API_KEY -e MCP_GATEWAY_PAYLOAD_DIR -e MCP_GATEWAY_PAYLOAD_SIZE_THRESHOLD -e DEBUG -e MCP_GATEWAY_LOG_DIR -e GH_AW_MCP_LOG_DIR -e GH_AW_SAFE_OUTPUTS -e GH_AW_SAFE_OUTPUTS_CONFIG_PATH -e GH_AW_SAFE_OUTPUTS_TOOLS_PATH -e GH_AW_ASSETS_BRANCH -e GH_AW_ASSETS_MAX_SIZE_KB -e GH_AW_ASSETS_ALLOWED_EXTS -e DEFAULT_BRANCH -e GITHUB_MCP_SERVER_TOKEN -e GITHUB_MCP_GUARD_MIN_INTEGRITY -e GITHUB_MCP_GUARD_REPOS -e GITHUB_REPOSITORY -e GITHUB_SERVER_URL -e GITHUB_SHA -e GITHUB_WORKSPACE -e GITHUB_TOKEN -e GITHUB_RUN_ID -e GITHUB_RUN_NUMBER -e GITHUB_RUN_ATTEMPT -e GITHUB_JOB -e GITHUB_ACTION -e GITHUB_EVENT_NAME -e GITHUB_EVENT_PATH -e GITHUB_ACTOR -e GITHUB_ACTOR_ID -e GITHUB_TRIGGERING_ACTOR -e GITHUB_WORKFLOW -e GITHUB_WORKFLOW_REF -e GITHUB_WORKFLOW_SHA -e GITHUB_REF -e GITHUB_REF_NAME -e GITHUB_REF_TYPE -e GITHUB_HEAD_REF -e GITHUB_BASE_REF -e GH_AW_SAFE_OUTPUTS_PORT -e GH_AW_SAFE_OUTPUTS_API_KEY -v /tmp/gh-aw/mcp-payloads:/tmp/gh-aw/mcp-payloads:rw -v /opt:/opt:ro -v /tmp:/tmp:rw -v '"${GITHUB_WORKSPACE}"':'"${GITHUB_WORKSPACE}"':rw ghcr.io/github/gh-aw-mcpg:v0.2.12'
mkdir -p /home/runner/.copilot
- cat << GH_AW_MCP_CONFIG_e4a79d5a48d4eb0e_EOF | bash ${RUNNER_TEMP}/gh-aw/actions/start_mcp_gateway.sh
+ cat << GH_AW_MCP_CONFIG_bfbf4afe334a19cb_EOF | bash ${RUNNER_TEMP}/gh-aw/actions/start_mcp_gateway.sh
{
"mcpServers": {
"github": {
@@ -569,7 +579,7 @@ jobs:
"payloadDir": "${MCP_GATEWAY_PAYLOAD_DIR}"
}
}
- GH_AW_MCP_CONFIG_e4a79d5a48d4eb0e_EOF
+ GH_AW_MCP_CONFIG_bfbf4afe334a19cb_EOF
- name: Download activation artifact
uses: actions/download-artifact@3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c # v8.0.1
with:
diff --git a/.github/workflows/copilot-token-usage-analyzer.md b/.github/workflows/copilot-token-usage-analyzer.md
index 3819ad6c6a..d5ba1d024b 100644
--- a/.github/workflows/copilot-token-usage-analyzer.md
+++ b/.github/workflows/copilot-token-usage-analyzer.md
@@ -36,6 +36,16 @@ network: defaults
timeout-minutes: 30
steps:
+ - name: Install gh-aw CLI
+ env:
+ GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ run: |
+ if gh extension list | grep -q "github/gh-aw"; then
+ gh extension upgrade gh-aw || true
+ else
+ gh extension install github/gh-aw
+ fi
+ gh aw --version
- name: Download Copilot workflow runs (last 24h)
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
@@ -44,7 +54,7 @@ steps:
mkdir -p /tmp/token-analyzer
echo "📥 Downloading Copilot workflow runs from last 24 hours..."
- ./gh-aw logs \
+ gh aw logs \
--engine copilot \
--start-date -1d \
--json \
diff --git a/.github/workflows/daily-copilot-token-report.lock.yml b/.github/workflows/daily-copilot-token-report.lock.yml
index 537fe4d052..cdd7c52f4b 100644
--- a/.github/workflows/daily-copilot-token-report.lock.yml
+++ b/.github/workflows/daily-copilot-token-report.lock.yml
@@ -30,7 +30,7 @@
# - shared/repo-memory-standard.md
# - shared/reporting.md
#
-# gh-aw-metadata: {"schema_version":"v3","frontmatter_hash":"672d54e290b52292a8ef6f4aaa48d273d4e8b8f64d1605917e803d7a7f77eca0","strict":true,"agent_id":"copilot"}
+# gh-aw-metadata: {"schema_version":"v3","frontmatter_hash":"462b30b2d2fb842e2cb3e4ab1920fe0a757bb4695cac2916fcc08301cba7eb44","strict":true,"agent_id":"copilot"}
name: "Daily Copilot Token Consumption Report"
"on":
@@ -137,16 +137,16 @@ jobs:
run: |
bash ${RUNNER_TEMP}/gh-aw/actions/create_prompt_first.sh
{
- cat << 'GH_AW_PROMPT_4f327487775628ba_EOF'
+ cat << 'GH_AW_PROMPT_b1eec491655520d1_EOF'
- GH_AW_PROMPT_4f327487775628ba_EOF
+ GH_AW_PROMPT_b1eec491655520d1_EOF
cat "${RUNNER_TEMP}/gh-aw/prompts/xpia.md"
cat "${RUNNER_TEMP}/gh-aw/prompts/temp_folder_prompt.md"
cat "${RUNNER_TEMP}/gh-aw/prompts/markdown.md"
cat "${RUNNER_TEMP}/gh-aw/prompts/cache_memory_prompt.md"
cat "${RUNNER_TEMP}/gh-aw/prompts/repo_memory_prompt.md"
cat "${RUNNER_TEMP}/gh-aw/prompts/safe_outputs_prompt.md"
- cat << 'GH_AW_PROMPT_4f327487775628ba_EOF'
+ cat << 'GH_AW_PROMPT_b1eec491655520d1_EOF'
Tools: create_discussion, upload_asset, missing_tool, missing_data, noop
@@ -180,14 +180,14 @@ jobs:
{{/if}}
- GH_AW_PROMPT_4f327487775628ba_EOF
+ GH_AW_PROMPT_b1eec491655520d1_EOF
cat "${RUNNER_TEMP}/gh-aw/prompts/github_mcp_tools_with_safeoutputs_prompt.md"
- cat << 'GH_AW_PROMPT_4f327487775628ba_EOF'
+ cat << 'GH_AW_PROMPT_b1eec491655520d1_EOF'
{{#runtime-import .github/workflows/shared/reporting.md}}
{{#runtime-import .github/workflows/shared/python-dataviz.md}}
{{#runtime-import .github/workflows/daily-copilot-token-report.md}}
- GH_AW_PROMPT_4f327487775628ba_EOF
+ GH_AW_PROMPT_b1eec491655520d1_EOF
} > "$GH_AW_PROMPT"
- name: Interpolate variables and render templates
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
@@ -381,10 +381,20 @@ jobs:
/tmp/gh-aw/python/*.py
/tmp/gh-aw/python/data/*
retention-days: 30
+ - env:
+ GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ name: Install gh-aw CLI
+ run: |
+ if gh extension list | grep -q "github/gh-aw"; then
+ gh extension upgrade gh-aw || true
+ else
+ gh extension install github/gh-aw
+ fi
+ gh aw --version
- env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
name: Pre-download workflow logs
- run: "# Download logs for copilot workflows from last 30 days with JSON output\n./gh-aw logs --engine copilot --start-date -30d --json -c 500 > /tmp/gh-aw/copilot-logs.json\n\n# Verify the download\nif [ -f /tmp/gh-aw/copilot-logs.json ]; then\n echo \"✅ Logs downloaded successfully\"\n echo \"Total runs: $(jq '. | length' /tmp/gh-aw/copilot-logs.json || echo '0')\"\nelse\n echo \"❌ Failed to download logs\"\n exit 1\nfi\n"
+ run: "# Download logs for copilot workflows from last 30 days with JSON output\ngh aw logs --engine copilot --start-date -30d --json -c 500 > /tmp/gh-aw/copilot-logs.json\n\n# Verify the download\nif [ -f /tmp/gh-aw/copilot-logs.json ]; then\n echo \"✅ Logs downloaded successfully\"\n echo \"Total runs: $(jq '. | length' /tmp/gh-aw/copilot-logs.json || echo '0')\"\nelse\n echo \"❌ Failed to download logs\"\n exit 1\nfi\n"
# Cache memory file share configuration from frontmatter processed below
- name: Create cache-memory directory
@@ -460,12 +470,12 @@ jobs:
mkdir -p ${RUNNER_TEMP}/gh-aw/safeoutputs
mkdir -p /tmp/gh-aw/safeoutputs
mkdir -p /tmp/gh-aw/mcp-logs/safeoutputs
- cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/config.json << 'GH_AW_SAFE_OUTPUTS_CONFIG_48e47ffc2bebe0af_EOF'
+ cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/config.json << 'GH_AW_SAFE_OUTPUTS_CONFIG_7d7870a2571b988b_EOF'
{"create_discussion":{"category":"audits","close_older_discussions":true,"expires":72,"fallback_to_issue":true,"max":1,"title_prefix":"[daily-copilot-token-report] "},"missing_data":{},"missing_tool":{},"noop":{"max":1,"report-as-issue":"true"},"push_repo_memory":{"memories":[{"dir":"/tmp/gh-aw/repo-memory/default","id":"default","max_file_count":100,"max_file_size":102400,"max_patch_size":10240}]},"upload_asset":{"allowed-exts":[".png",".jpg",".jpeg"],"branch":"assets/${{ github.workflow }}","max-size":10240}}
- GH_AW_SAFE_OUTPUTS_CONFIG_48e47ffc2bebe0af_EOF
+ GH_AW_SAFE_OUTPUTS_CONFIG_7d7870a2571b988b_EOF
- name: Write Safe Outputs Tools
run: |
- cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/tools_meta.json << 'GH_AW_SAFE_OUTPUTS_TOOLS_META_d1b630d880dee39e_EOF'
+ cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/tools_meta.json << 'GH_AW_SAFE_OUTPUTS_TOOLS_META_d85eaa0ff25eb41a_EOF'
{
"description_suffixes": {
"create_discussion": " CONSTRAINTS: Maximum 1 discussion(s) can be created. Title will be prefixed with \"[daily-copilot-token-report] \". Discussions will be created in category \"audits\".",
@@ -474,8 +484,8 @@ jobs:
"repo_params": {},
"dynamic_tools": []
}
- GH_AW_SAFE_OUTPUTS_TOOLS_META_d1b630d880dee39e_EOF
- cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/validation.json << 'GH_AW_SAFE_OUTPUTS_VALIDATION_7d4b330503ebddf5_EOF'
+ GH_AW_SAFE_OUTPUTS_TOOLS_META_d85eaa0ff25eb41a_EOF
+ cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/validation.json << 'GH_AW_SAFE_OUTPUTS_VALIDATION_03aad05593408c0b_EOF'
{
"create_discussion": {
"defaultMax": 1,
@@ -570,7 +580,7 @@ jobs:
}
}
}
- GH_AW_SAFE_OUTPUTS_VALIDATION_7d4b330503ebddf5_EOF
+ GH_AW_SAFE_OUTPUTS_VALIDATION_03aad05593408c0b_EOF
node ${RUNNER_TEMP}/gh-aw/actions/generate_safe_outputs_tools.cjs
- name: Generate Safe Outputs MCP Server Config
id: safe-outputs-config
@@ -643,7 +653,7 @@ jobs:
export MCP_GATEWAY_DOCKER_COMMAND='docker run -i --rm --network host -v /var/run/docker.sock:/var/run/docker.sock -e MCP_GATEWAY_PORT -e MCP_GATEWAY_DOMAIN -e MCP_GATEWAY_API_KEY -e MCP_GATEWAY_PAYLOAD_DIR -e MCP_GATEWAY_PAYLOAD_SIZE_THRESHOLD -e DEBUG -e MCP_GATEWAY_LOG_DIR -e GH_AW_MCP_LOG_DIR -e GH_AW_SAFE_OUTPUTS -e GH_AW_SAFE_OUTPUTS_CONFIG_PATH -e GH_AW_SAFE_OUTPUTS_TOOLS_PATH -e GH_AW_ASSETS_BRANCH -e GH_AW_ASSETS_MAX_SIZE_KB -e GH_AW_ASSETS_ALLOWED_EXTS -e DEFAULT_BRANCH -e GITHUB_MCP_SERVER_TOKEN -e GITHUB_MCP_GUARD_MIN_INTEGRITY -e GITHUB_MCP_GUARD_REPOS -e GITHUB_REPOSITORY -e GITHUB_SERVER_URL -e GITHUB_SHA -e GITHUB_WORKSPACE -e GITHUB_TOKEN -e GITHUB_RUN_ID -e GITHUB_RUN_NUMBER -e GITHUB_RUN_ATTEMPT -e GITHUB_JOB -e GITHUB_ACTION -e GITHUB_EVENT_NAME -e GITHUB_EVENT_PATH -e GITHUB_ACTOR -e GITHUB_ACTOR_ID -e GITHUB_TRIGGERING_ACTOR -e GITHUB_WORKFLOW -e GITHUB_WORKFLOW_REF -e GITHUB_WORKFLOW_SHA -e GITHUB_REF -e GITHUB_REF_NAME -e GITHUB_REF_TYPE -e GITHUB_HEAD_REF -e GITHUB_BASE_REF -e GH_AW_SAFE_OUTPUTS_PORT -e GH_AW_SAFE_OUTPUTS_API_KEY -v /tmp/gh-aw/mcp-payloads:/tmp/gh-aw/mcp-payloads:rw -v /opt:/opt:ro -v /tmp:/tmp:rw -v '"${GITHUB_WORKSPACE}"':'"${GITHUB_WORKSPACE}"':rw ghcr.io/github/gh-aw-mcpg:v0.2.12'
mkdir -p /home/runner/.copilot
- cat << GH_AW_MCP_CONFIG_0ae19f90feb05c7f_EOF | bash ${RUNNER_TEMP}/gh-aw/actions/start_mcp_gateway.sh
+ cat << GH_AW_MCP_CONFIG_fcc05da5cd1839db_EOF | bash ${RUNNER_TEMP}/gh-aw/actions/start_mcp_gateway.sh
{
"mcpServers": {
"github": {
@@ -684,7 +694,7 @@ jobs:
"payloadDir": "${MCP_GATEWAY_PAYLOAD_DIR}"
}
}
- GH_AW_MCP_CONFIG_0ae19f90feb05c7f_EOF
+ GH_AW_MCP_CONFIG_fcc05da5cd1839db_EOF
- name: Download activation artifact
uses: actions/download-artifact@3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c # v8.0.1
with:
diff --git a/.github/workflows/daily-copilot-token-report.md b/.github/workflows/daily-copilot-token-report.md
index ba709eb3fe..8c30d53a77 100644
--- a/.github/workflows/daily-copilot-token-report.md
+++ b/.github/workflows/daily-copilot-token-report.md
@@ -15,12 +15,22 @@ tools:
bash:
- "*"
steps:
+ - name: Install gh-aw CLI
+ env:
+ GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ run: |
+ if gh extension list | grep -q "github/gh-aw"; then
+ gh extension upgrade gh-aw || true
+ else
+ gh extension install github/gh-aw
+ fi
+ gh aw --version
- name: Pre-download workflow logs
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
# Download logs for copilot workflows from last 30 days with JSON output
- ./gh-aw logs --engine copilot --start-date -30d --json -c 500 > /tmp/gh-aw/copilot-logs.json
+ gh aw logs --engine copilot --start-date -30d --json -c 500 > /tmp/gh-aw/copilot-logs.json
# Verify the download
if [ -f /tmp/gh-aw/copilot-logs.json ]; then
diff --git a/.github/workflows/daily-integrity-analysis.lock.yml b/.github/workflows/daily-integrity-analysis.lock.yml
index 7dd5398605..a0877808bb 100644
--- a/.github/workflows/daily-integrity-analysis.lock.yml
+++ b/.github/workflows/daily-integrity-analysis.lock.yml
@@ -28,7 +28,7 @@
# - shared/python-dataviz.md
# - shared/reporting.md
#
-# gh-aw-metadata: {"schema_version":"v3","frontmatter_hash":"b5c6add452b389f977d423ae899ddebffc8cd45906a51ef9f5053fe4583a6c37","strict":true,"agent_id":"copilot"}
+# gh-aw-metadata: {"schema_version":"v3","frontmatter_hash":"742c1f18b689e7085927e3d4920da0eaebaf97af592e3a6664b4046543694adb","strict":true,"agent_id":"copilot"}
name: "Daily DIFC Integrity-Filtered Events Analyzer"
"on":
@@ -140,16 +140,16 @@ jobs:
run: |
bash ${RUNNER_TEMP}/gh-aw/actions/create_prompt_first.sh
{
- cat << 'GH_AW_PROMPT_9d4f180d5e7b4ad4_EOF'
+ cat << 'GH_AW_PROMPT_16ee1e8441be5194_EOF'
- GH_AW_PROMPT_9d4f180d5e7b4ad4_EOF
+ GH_AW_PROMPT_16ee1e8441be5194_EOF
cat "${RUNNER_TEMP}/gh-aw/prompts/xpia.md"
cat "${RUNNER_TEMP}/gh-aw/prompts/temp_folder_prompt.md"
cat "${RUNNER_TEMP}/gh-aw/prompts/markdown.md"
cat "${RUNNER_TEMP}/gh-aw/prompts/agentic_workflows_guide.md"
cat "${RUNNER_TEMP}/gh-aw/prompts/cache_memory_prompt.md"
cat "${RUNNER_TEMP}/gh-aw/prompts/safe_outputs_prompt.md"
- cat << 'GH_AW_PROMPT_9d4f180d5e7b4ad4_EOF'
+ cat << 'GH_AW_PROMPT_16ee1e8441be5194_EOF'
Tools: create_discussion, upload_asset, missing_tool, missing_data, noop
@@ -183,14 +183,14 @@ jobs:
{{/if}}
- GH_AW_PROMPT_9d4f180d5e7b4ad4_EOF
+ GH_AW_PROMPT_16ee1e8441be5194_EOF
cat "${RUNNER_TEMP}/gh-aw/prompts/github_mcp_tools_with_safeoutputs_prompt.md"
- cat << 'GH_AW_PROMPT_9d4f180d5e7b4ad4_EOF'
+ cat << 'GH_AW_PROMPT_16ee1e8441be5194_EOF'
{{#runtime-import .github/workflows/shared/reporting.md}}
{{#runtime-import .github/workflows/shared/python-dataviz.md}}
{{#runtime-import .github/workflows/daily-integrity-analysis.md}}
- GH_AW_PROMPT_9d4f180d5e7b4ad4_EOF
+ GH_AW_PROMPT_16ee1e8441be5194_EOF
} > "$GH_AW_PROMPT"
- name: Interpolate variables and render templates
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
@@ -373,10 +373,20 @@ jobs:
/tmp/gh-aw/python/*.py
/tmp/gh-aw/python/data/*
retention-days: 30
+ - env:
+ GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ name: Install gh-aw CLI
+ run: |
+ if gh extension list | grep -q "github/gh-aw"; then
+ gh extension upgrade gh-aw || true
+ else
+ gh extension install github/gh-aw
+ fi
+ gh aw --version
- env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
name: Download integrity-filtered logs
- run: "mkdir -p /tmp/gh-aw/integrity\n# Download logs filtered to only runs with DIFC integrity-filtered events\n./gh-aw logs --filtered-integrity --start-date -7d --json -c 200 \\\n > /tmp/gh-aw/integrity/filtered-logs.json\n\nif [ -f /tmp/gh-aw/integrity/filtered-logs.json ]; then\n count=$(jq '. | length' /tmp/gh-aw/integrity/filtered-logs.json 2>/dev/null || echo 0)\n echo \"✅ Downloaded $count runs with integrity-filtered events\"\nelse\n echo \"⚠️ No logs file produced; continuing with empty dataset\"\n echo \"[]\" > /tmp/gh-aw/integrity/filtered-logs.json\nfi\n"
+ run: "mkdir -p /tmp/gh-aw/integrity\n# Download logs filtered to only runs with DIFC integrity-filtered events\ngh aw logs --filtered-integrity --start-date -7d --json -c 200 \\\n > /tmp/gh-aw/integrity/filtered-logs.json\n\nif [ -f /tmp/gh-aw/integrity/filtered-logs.json ]; then\n count=$(jq '. | length' /tmp/gh-aw/integrity/filtered-logs.json 2>/dev/null || echo 0)\n echo \"✅ Downloaded $count runs with integrity-filtered events\"\nelse\n echo \"⚠️ No logs file produced; continuing with empty dataset\"\n echo \"[]\" > /tmp/gh-aw/integrity/filtered-logs.json\nfi\n"
# Cache memory file share configuration from frontmatter processed below
- name: Create cache-memory directory
@@ -466,12 +476,12 @@ jobs:
mkdir -p ${RUNNER_TEMP}/gh-aw/safeoutputs
mkdir -p /tmp/gh-aw/safeoutputs
mkdir -p /tmp/gh-aw/mcp-logs/safeoutputs
- cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/config.json << 'GH_AW_SAFE_OUTPUTS_CONFIG_01e469f28ff09895_EOF'
+ cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/config.json << 'GH_AW_SAFE_OUTPUTS_CONFIG_957be5efcca6e31a_EOF'
{"create_discussion":{"category":"audits","close_older_discussions":true,"expires":72,"fallback_to_issue":true,"max":1,"title_prefix":"[integrity] "},"missing_data":{},"missing_tool":{},"noop":{"max":1,"report-as-issue":"true"},"upload_asset":{"allowed-exts":[".png",".jpg",".jpeg"],"branch":"assets/${{ github.workflow }}","max-size":10240}}
- GH_AW_SAFE_OUTPUTS_CONFIG_01e469f28ff09895_EOF
+ GH_AW_SAFE_OUTPUTS_CONFIG_957be5efcca6e31a_EOF
- name: Write Safe Outputs Tools
run: |
- cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/tools_meta.json << 'GH_AW_SAFE_OUTPUTS_TOOLS_META_e7ca18548e543b5c_EOF'
+ cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/tools_meta.json << 'GH_AW_SAFE_OUTPUTS_TOOLS_META_c7b098070178dca8_EOF'
{
"description_suffixes": {
"create_discussion": " CONSTRAINTS: Maximum 1 discussion(s) can be created. Title will be prefixed with \"[integrity] \". Discussions will be created in category \"audits\".",
@@ -480,8 +490,8 @@ jobs:
"repo_params": {},
"dynamic_tools": []
}
- GH_AW_SAFE_OUTPUTS_TOOLS_META_e7ca18548e543b5c_EOF
- cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/validation.json << 'GH_AW_SAFE_OUTPUTS_VALIDATION_eae02aeebbe3850a_EOF'
+ GH_AW_SAFE_OUTPUTS_TOOLS_META_c7b098070178dca8_EOF
+ cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/validation.json << 'GH_AW_SAFE_OUTPUTS_VALIDATION_578fcb347771a5ba_EOF'
{
"create_discussion": {
"defaultMax": 1,
@@ -576,7 +586,7 @@ jobs:
}
}
}
- GH_AW_SAFE_OUTPUTS_VALIDATION_eae02aeebbe3850a_EOF
+ GH_AW_SAFE_OUTPUTS_VALIDATION_578fcb347771a5ba_EOF
node ${RUNNER_TEMP}/gh-aw/actions/generate_safe_outputs_tools.cjs
- name: Generate Safe Outputs MCP Server Config
id: safe-outputs-config
@@ -650,7 +660,7 @@ jobs:
export MCP_GATEWAY_DOCKER_COMMAND='docker run -i --rm --network host -v /var/run/docker.sock:/var/run/docker.sock -e MCP_GATEWAY_PORT -e MCP_GATEWAY_DOMAIN -e MCP_GATEWAY_API_KEY -e MCP_GATEWAY_PAYLOAD_DIR -e MCP_GATEWAY_PAYLOAD_SIZE_THRESHOLD -e DEBUG -e MCP_GATEWAY_LOG_DIR -e GH_AW_MCP_LOG_DIR -e GH_AW_SAFE_OUTPUTS -e GH_AW_SAFE_OUTPUTS_CONFIG_PATH -e GH_AW_SAFE_OUTPUTS_TOOLS_PATH -e GH_AW_ASSETS_BRANCH -e GH_AW_ASSETS_MAX_SIZE_KB -e GH_AW_ASSETS_ALLOWED_EXTS -e DEFAULT_BRANCH -e GITHUB_MCP_SERVER_TOKEN -e GITHUB_MCP_GUARD_MIN_INTEGRITY -e GITHUB_MCP_GUARD_REPOS -e GITHUB_REPOSITORY -e GITHUB_SERVER_URL -e GITHUB_SHA -e GITHUB_WORKSPACE -e GITHUB_TOKEN -e GITHUB_RUN_ID -e GITHUB_RUN_NUMBER -e GITHUB_RUN_ATTEMPT -e GITHUB_JOB -e GITHUB_ACTION -e GITHUB_EVENT_NAME -e GITHUB_EVENT_PATH -e GITHUB_ACTOR -e GITHUB_ACTOR_ID -e GITHUB_TRIGGERING_ACTOR -e GITHUB_WORKFLOW -e GITHUB_WORKFLOW_REF -e GITHUB_WORKFLOW_SHA -e GITHUB_REF -e GITHUB_REF_NAME -e GITHUB_REF_TYPE -e GITHUB_HEAD_REF -e GITHUB_BASE_REF -e GH_AW_SAFE_OUTPUTS_PORT -e GH_AW_SAFE_OUTPUTS_API_KEY -v /tmp/gh-aw/mcp-payloads:/tmp/gh-aw/mcp-payloads:rw -v /opt:/opt:ro -v /tmp:/tmp:rw -v '"${GITHUB_WORKSPACE}"':'"${GITHUB_WORKSPACE}"':rw ghcr.io/github/gh-aw-mcpg:v0.2.12'
mkdir -p /home/runner/.copilot
- cat << GH_AW_MCP_CONFIG_111bcd5f25852278_EOF | bash ${RUNNER_TEMP}/gh-aw/actions/start_mcp_gateway.sh
+ cat << GH_AW_MCP_CONFIG_a90c815e7f132927_EOF | bash ${RUNNER_TEMP}/gh-aw/actions/start_mcp_gateway.sh
{
"mcpServers": {
"agenticworkflows": {
@@ -710,7 +720,7 @@ jobs:
"payloadDir": "${MCP_GATEWAY_PAYLOAD_DIR}"
}
}
- GH_AW_MCP_CONFIG_111bcd5f25852278_EOF
+ GH_AW_MCP_CONFIG_a90c815e7f132927_EOF
- name: Download activation artifact
uses: actions/download-artifact@3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c # v8.0.1
with:
diff --git a/.github/workflows/daily-integrity-analysis.md b/.github/workflows/daily-integrity-analysis.md
index 7e116f88e1..1a7d32b7c4 100644
--- a/.github/workflows/daily-integrity-analysis.md
+++ b/.github/workflows/daily-integrity-analysis.md
@@ -16,13 +16,23 @@ tracker-id: daily-integrity-analysis
engine: copilot
steps:
+ - name: Install gh-aw CLI
+ env:
+ GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ run: |
+ if gh extension list | grep -q "github/gh-aw"; then
+ gh extension upgrade gh-aw || true
+ else
+ gh extension install github/gh-aw
+ fi
+ gh aw --version
- name: Download integrity-filtered logs
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
mkdir -p /tmp/gh-aw/integrity
# Download logs filtered to only runs with DIFC integrity-filtered events
- ./gh-aw logs --filtered-integrity --start-date -7d --json -c 200 \
+ gh aw logs --filtered-integrity --start-date -7d --json -c 200 \
> /tmp/gh-aw/integrity/filtered-logs.json
if [ -f /tmp/gh-aw/integrity/filtered-logs.json ]; then
diff --git a/.github/workflows/daily-syntax-error-quality.lock.yml b/.github/workflows/daily-syntax-error-quality.lock.yml
index fbba38a298..3289497025 100644
--- a/.github/workflows/daily-syntax-error-quality.lock.yml
+++ b/.github/workflows/daily-syntax-error-quality.lock.yml
@@ -26,7 +26,7 @@
# Imports:
# - shared/reporting.md
#
-# gh-aw-metadata: {"schema_version":"v3","frontmatter_hash":"d46ca0b67a726081853ef7d2b9e15038063a2772aca1472e1284609871a6b705","strict":true,"agent_id":"copilot"}
+# gh-aw-metadata: {"schema_version":"v3","frontmatter_hash":"474cbbcb1215a205651a5382f361c8c135e886ef171fdac0a000ddfa405f51c9","strict":true,"agent_id":"copilot"}
name: "Daily Syntax Error Quality Check"
"on":
@@ -132,14 +132,14 @@ jobs:
run: |
bash ${RUNNER_TEMP}/gh-aw/actions/create_prompt_first.sh
{
- cat << 'GH_AW_PROMPT_0dcc6139e05b0737_EOF'
+ cat << 'GH_AW_PROMPT_d2c6ca204f616938_EOF'
- GH_AW_PROMPT_0dcc6139e05b0737_EOF
+ GH_AW_PROMPT_d2c6ca204f616938_EOF
cat "${RUNNER_TEMP}/gh-aw/prompts/xpia.md"
cat "${RUNNER_TEMP}/gh-aw/prompts/temp_folder_prompt.md"
cat "${RUNNER_TEMP}/gh-aw/prompts/markdown.md"
cat "${RUNNER_TEMP}/gh-aw/prompts/safe_outputs_prompt.md"
- cat << 'GH_AW_PROMPT_0dcc6139e05b0737_EOF'
+ cat << 'GH_AW_PROMPT_d2c6ca204f616938_EOF'
Tools: create_issue, missing_tool, missing_data, noop
@@ -171,13 +171,13 @@ jobs:
{{/if}}
- GH_AW_PROMPT_0dcc6139e05b0737_EOF
+ GH_AW_PROMPT_d2c6ca204f616938_EOF
cat "${RUNNER_TEMP}/gh-aw/prompts/github_mcp_tools_with_safeoutputs_prompt.md"
- cat << 'GH_AW_PROMPT_0dcc6139e05b0737_EOF'
+ cat << 'GH_AW_PROMPT_d2c6ca204f616938_EOF'
{{#runtime-import .github/workflows/shared/reporting.md}}
{{#runtime-import .github/workflows/daily-syntax-error-quality.md}}
- GH_AW_PROMPT_0dcc6139e05b0737_EOF
+ GH_AW_PROMPT_d2c6ca204f616938_EOF
} > "$GH_AW_PROMPT"
- name: Interpolate variables and render templates
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
@@ -297,18 +297,16 @@ jobs:
run: bash ${RUNNER_TEMP}/gh-aw/actions/configure_gh_for_ghe.sh
env:
GH_TOKEN: ${{ github.token }}
- - name: Setup Go
- uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6.4.0
- with:
- cache: true
- go-version-file: go.mod
- - name: Build gh-aw
- run: |
- make build
- - name: Verify gh-aw installation
+ - env:
+ GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ name: Install gh-aw CLI
run: |-
- ./gh-aw --version
- echo "gh-aw binary is ready at ./gh-aw"
+ if gh extension list | grep -q "github/gh-aw"; then
+ gh extension upgrade gh-aw || true
+ else
+ gh extension install github/gh-aw
+ fi
+ gh aw --version
- name: Configure Git credentials
env:
@@ -359,12 +357,12 @@ jobs:
mkdir -p ${RUNNER_TEMP}/gh-aw/safeoutputs
mkdir -p /tmp/gh-aw/safeoutputs
mkdir -p /tmp/gh-aw/mcp-logs/safeoutputs
- cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/config.json << 'GH_AW_SAFE_OUTPUTS_CONFIG_5f03b6d985da1113_EOF'
+ cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/config.json << 'GH_AW_SAFE_OUTPUTS_CONFIG_c8a7fecb4eb398bb_EOF'
{"create_issue":{"close_older_issues":true,"expires":72,"labels":["dx","error-messages","automated-analysis"],"max":1,"title_prefix":"[syntax-error-quality] "},"missing_data":{},"missing_tool":{},"noop":{"max":1,"report-as-issue":"true"}}
- GH_AW_SAFE_OUTPUTS_CONFIG_5f03b6d985da1113_EOF
+ GH_AW_SAFE_OUTPUTS_CONFIG_c8a7fecb4eb398bb_EOF
- name: Write Safe Outputs Tools
run: |
- cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/tools_meta.json << 'GH_AW_SAFE_OUTPUTS_TOOLS_META_c7996a926c12dfe4_EOF'
+ cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/tools_meta.json << 'GH_AW_SAFE_OUTPUTS_TOOLS_META_6ac716c7b9859829_EOF'
{
"description_suffixes": {
"create_issue": " CONSTRAINTS: Maximum 1 issue(s) can be created. Title will be prefixed with \"[syntax-error-quality] \". Labels [\"dx\" \"error-messages\" \"automated-analysis\"] will be automatically added."
@@ -372,8 +370,8 @@ jobs:
"repo_params": {},
"dynamic_tools": []
}
- GH_AW_SAFE_OUTPUTS_TOOLS_META_c7996a926c12dfe4_EOF
- cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/validation.json << 'GH_AW_SAFE_OUTPUTS_VALIDATION_05a882d6dae3547d_EOF'
+ GH_AW_SAFE_OUTPUTS_TOOLS_META_6ac716c7b9859829_EOF
+ cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/validation.json << 'GH_AW_SAFE_OUTPUTS_VALIDATION_fecfe109563c4de0_EOF'
{
"create_issue": {
"defaultMax": 1,
@@ -466,7 +464,7 @@ jobs:
}
}
}
- GH_AW_SAFE_OUTPUTS_VALIDATION_05a882d6dae3547d_EOF
+ GH_AW_SAFE_OUTPUTS_VALIDATION_fecfe109563c4de0_EOF
node ${RUNNER_TEMP}/gh-aw/actions/generate_safe_outputs_tools.cjs
- name: Generate Safe Outputs MCP Server Config
id: safe-outputs-config
@@ -536,7 +534,7 @@ jobs:
export MCP_GATEWAY_DOCKER_COMMAND='docker run -i --rm --network host -v /var/run/docker.sock:/var/run/docker.sock -e MCP_GATEWAY_PORT -e MCP_GATEWAY_DOMAIN -e MCP_GATEWAY_API_KEY -e MCP_GATEWAY_PAYLOAD_DIR -e MCP_GATEWAY_PAYLOAD_SIZE_THRESHOLD -e DEBUG -e MCP_GATEWAY_LOG_DIR -e GH_AW_MCP_LOG_DIR -e GH_AW_SAFE_OUTPUTS -e GH_AW_SAFE_OUTPUTS_CONFIG_PATH -e GH_AW_SAFE_OUTPUTS_TOOLS_PATH -e GH_AW_ASSETS_BRANCH -e GH_AW_ASSETS_MAX_SIZE_KB -e GH_AW_ASSETS_ALLOWED_EXTS -e DEFAULT_BRANCH -e GITHUB_MCP_SERVER_TOKEN -e GITHUB_MCP_GUARD_MIN_INTEGRITY -e GITHUB_MCP_GUARD_REPOS -e GITHUB_REPOSITORY -e GITHUB_SERVER_URL -e GITHUB_SHA -e GITHUB_WORKSPACE -e GITHUB_TOKEN -e GITHUB_RUN_ID -e GITHUB_RUN_NUMBER -e GITHUB_RUN_ATTEMPT -e GITHUB_JOB -e GITHUB_ACTION -e GITHUB_EVENT_NAME -e GITHUB_EVENT_PATH -e GITHUB_ACTOR -e GITHUB_ACTOR_ID -e GITHUB_TRIGGERING_ACTOR -e GITHUB_WORKFLOW -e GITHUB_WORKFLOW_REF -e GITHUB_WORKFLOW_SHA -e GITHUB_REF -e GITHUB_REF_NAME -e GITHUB_REF_TYPE -e GITHUB_HEAD_REF -e GITHUB_BASE_REF -e GH_AW_SAFE_OUTPUTS_PORT -e GH_AW_SAFE_OUTPUTS_API_KEY -v /tmp/gh-aw/mcp-payloads:/tmp/gh-aw/mcp-payloads:rw -v /opt:/opt:ro -v /tmp:/tmp:rw -v '"${GITHUB_WORKSPACE}"':'"${GITHUB_WORKSPACE}"':rw ghcr.io/github/gh-aw-mcpg:v0.2.12'
mkdir -p /home/runner/.copilot
- cat << GH_AW_MCP_CONFIG_96209b8e9e2ddb9a_EOF | bash ${RUNNER_TEMP}/gh-aw/actions/start_mcp_gateway.sh
+ cat << GH_AW_MCP_CONFIG_427d7ff0f3021689_EOF | bash ${RUNNER_TEMP}/gh-aw/actions/start_mcp_gateway.sh
{
"mcpServers": {
"github": {
@@ -577,7 +575,7 @@ jobs:
"payloadDir": "${MCP_GATEWAY_PAYLOAD_DIR}"
}
}
- GH_AW_MCP_CONFIG_96209b8e9e2ddb9a_EOF
+ GH_AW_MCP_CONFIG_427d7ff0f3021689_EOF
- name: Download activation artifact
uses: actions/download-artifact@3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c # v8.0.1
with:
@@ -591,7 +589,6 @@ jobs:
# Copilot CLI tool arguments (sorted):
# --allow-tool github
# --allow-tool safeoutputs
- # --allow-tool shell(./gh-aw compile)
# --allow-tool shell(cat .github/workflows/*.md)
# --allow-tool shell(cat /tmp/*.md)
# --allow-tool shell(cat)
@@ -599,6 +596,7 @@ jobs:
# --allow-tool shell(date)
# --allow-tool shell(echo)
# --allow-tool shell(find .github/workflows -name '*.md' -type f ! -name 'daily-*.md' ! -name '*-test.md')
+ # --allow-tool shell(gh aw compile *)
# --allow-tool shell(grep)
# --allow-tool shell(head -n * .github/workflows/*.md)
# --allow-tool shell(head)
@@ -616,7 +614,7 @@ jobs:
touch /tmp/gh-aw/agent-step-summary.md
# shellcheck disable=SC1003
sudo -E awf --container-workdir "${GITHUB_WORKSPACE}" --mount "${RUNNER_TEMP}/gh-aw:${RUNNER_TEMP}/gh-aw:ro" --mount "${RUNNER_TEMP}/gh-aw:/host${RUNNER_TEMP}/gh-aw:ro" --env-all --exclude-env COPILOT_GITHUB_TOKEN --exclude-env GITHUB_MCP_SERVER_TOKEN --exclude-env MCP_GATEWAY_API_KEY --allow-domains api.business.githubcopilot.com,api.enterprise.githubcopilot.com,api.github.com,api.githubcopilot.com,api.individual.githubcopilot.com,api.snapcraft.io,archive.ubuntu.com,azure.archive.ubuntu.com,crl.geotrust.com,crl.globalsign.com,crl.identrust.com,crl.sectigo.com,crl.thawte.com,crl.usertrust.com,crl.verisign.com,crl3.digicert.com,crl4.digicert.com,crls.ssl.com,github.com,host.docker.internal,json-schema.org,json.schemastore.org,keyserver.ubuntu.com,ocsp.digicert.com,ocsp.geotrust.com,ocsp.globalsign.com,ocsp.identrust.com,ocsp.sectigo.com,ocsp.ssl.com,ocsp.thawte.com,ocsp.usertrust.com,ocsp.verisign.com,packagecloud.io,packages.cloud.google.com,packages.microsoft.com,ppa.launchpad.net,raw.githubusercontent.com,registry.npmjs.org,s.symcb.com,s.symcd.com,security.ubuntu.com,telemetry.enterprise.githubcopilot.com,ts-crl.ws.symantec.com,ts-ocsp.ws.symantec.com,www.googleapis.com --log-level info --proxy-logs-dir /tmp/gh-aw/sandbox/firewall/logs --audit-dir /tmp/gh-aw/sandbox/firewall/audit --enable-host-access --image-tag 0.25.13 --skip-pull --enable-api-proxy \
- -- /bin/bash -c '/usr/local/bin/copilot --add-dir /tmp/gh-aw/ --log-level all --log-dir /tmp/gh-aw/sandbox/agent/logs/ --disable-builtin-mcps --allow-tool github --allow-tool safeoutputs --allow-tool '\''shell(./gh-aw compile)'\'' --allow-tool '\''shell(cat .github/workflows/*.md)'\'' --allow-tool '\''shell(cat /tmp/*.md)'\'' --allow-tool '\''shell(cat)'\'' --allow-tool '\''shell(cp .github/workflows/*.md /tmp/*.md)'\'' --allow-tool '\''shell(date)'\'' --allow-tool '\''shell(echo)'\'' --allow-tool '\''shell(find .github/workflows -name '\''\'\'''\''*.md'\''\'\'''\'' -type f ! -name '\''\'\'''\''daily-*.md'\''\'\'''\'' ! -name '\''\'\'''\''*-test.md'\''\'\'''\'')'\'' --allow-tool '\''shell(grep)'\'' --allow-tool '\''shell(head -n * .github/workflows/*.md)'\'' --allow-tool '\''shell(head)'\'' --allow-tool '\''shell(ls)'\'' --allow-tool '\''shell(pwd)'\'' --allow-tool '\''shell(sort)'\'' --allow-tool '\''shell(tail)'\'' --allow-tool '\''shell(uniq)'\'' --allow-tool '\''shell(wc)'\'' --allow-tool '\''shell(yq)'\'' --allow-tool write --allow-all-paths --add-dir "${GITHUB_WORKSPACE}" --prompt "$(cat /tmp/gh-aw/aw-prompts/prompt.txt)"' 2>&1 | tee -a /tmp/gh-aw/agent-stdio.log
+ -- /bin/bash -c '/usr/local/bin/copilot --add-dir /tmp/gh-aw/ --log-level all --log-dir /tmp/gh-aw/sandbox/agent/logs/ --disable-builtin-mcps --allow-tool github --allow-tool safeoutputs --allow-tool '\''shell(cat .github/workflows/*.md)'\'' --allow-tool '\''shell(cat /tmp/*.md)'\'' --allow-tool '\''shell(cat)'\'' --allow-tool '\''shell(cp .github/workflows/*.md /tmp/*.md)'\'' --allow-tool '\''shell(date)'\'' --allow-tool '\''shell(echo)'\'' --allow-tool '\''shell(find .github/workflows -name '\''\'\'''\''*.md'\''\'\'''\'' -type f ! -name '\''\'\'''\''daily-*.md'\''\'\'''\'' ! -name '\''\'\'''\''*-test.md'\''\'\'''\'')'\'' --allow-tool '\''shell(gh aw compile *)'\'' --allow-tool '\''shell(grep)'\'' --allow-tool '\''shell(head -n * .github/workflows/*.md)'\'' --allow-tool '\''shell(head)'\'' --allow-tool '\''shell(ls)'\'' --allow-tool '\''shell(pwd)'\'' --allow-tool '\''shell(sort)'\'' --allow-tool '\''shell(tail)'\'' --allow-tool '\''shell(uniq)'\'' --allow-tool '\''shell(wc)'\'' --allow-tool '\''shell(yq)'\'' --allow-tool write --allow-all-paths --add-dir "${GITHUB_WORKSPACE}" --prompt "$(cat /tmp/gh-aw/aw-prompts/prompt.txt)"' 2>&1 | tee -a /tmp/gh-aw/agent-stdio.log
env:
COPILOT_AGENT_RUNNER_TYPE: STANDALONE
COPILOT_GITHUB_TOKEN: ${{ github.token }}
diff --git a/.github/workflows/daily-syntax-error-quality.md b/.github/workflows/daily-syntax-error-quality.md
index 77a1368df7..8e5ff198a9 100644
--- a/.github/workflows/daily-syntax-error-quality.md
+++ b/.github/workflows/daily-syntax-error-quality.md
@@ -16,7 +16,7 @@ tools:
- default
bash:
- "find .github/workflows -name '*.md' -type f ! -name 'daily-*.md' ! -name '*-test.md'"
- - "./gh-aw compile"
+ - "gh aw compile *"
- "cat .github/workflows/*.md"
- "head -n * .github/workflows/*.md"
- "cp .github/workflows/*.md /tmp/*.md"
@@ -31,20 +31,16 @@ safe-outputs:
timeout-minutes: 20
strict: true
steps:
- - name: Setup Go
- uses: actions/setup-go@v6.4.0
- with:
- go-version-file: go.mod
- cache: true
-
- - name: Build gh-aw
+ - name: Install gh-aw CLI
+ env:
+ GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
- make build
-
- - name: Verify gh-aw installation
- run: |
- ./gh-aw --version
- echo "gh-aw binary is ready at ./gh-aw"
+ if gh extension list | grep -q "github/gh-aw"; then
+ gh extension upgrade gh-aw || true
+ else
+ gh extension install github/gh-aw
+ fi
+ gh aw --version
imports:
- shared/reporting.md
features:
@@ -70,7 +66,7 @@ Test the quality of compiler error messages by:
- **Repository**: ${{ github.repository }}
- **Workspace**: ${{ github.workspace }}
-- **Compiler**: ./gh-aw
+- **Compiler**: gh aw
## Phase 1: Select Test Workflows
@@ -182,7 +178,7 @@ For each test case:
1. **Attempt to compile** the modified workflow:
```bash
cd /tmp/syntax-error-tests
- ./gh-aw compile test-1.md 2>&1 | tee test-1-output.txt
+ gh aw compile test-1.md 2>&1 | tee test-1-output.txt
```
2. **Capture the full output** including:
diff --git a/.github/workflows/portfolio-analyst.lock.yml b/.github/workflows/portfolio-analyst.lock.yml
index c251b17ac1..b0b6e2e515 100644
--- a/.github/workflows/portfolio-analyst.lock.yml
+++ b/.github/workflows/portfolio-analyst.lock.yml
@@ -29,7 +29,7 @@
# - shared/reporting.md
# - shared/trending-charts-simple.md
#
-# gh-aw-metadata: {"schema_version":"v3","frontmatter_hash":"9f1e7fa8b83dd91aefa585f492548276a6a14a828a71b90d6480f5d716a8858e","strict":true,"agent_id":"copilot"}
+# gh-aw-metadata: {"schema_version":"v3","frontmatter_hash":"96f96cc39cacd29bba2b533a731b98b8f753665731894d429968bc6976d10354","strict":true,"agent_id":"copilot"}
name: "Automated Portfolio Analyst"
"on":
@@ -141,16 +141,16 @@ jobs:
run: |
bash ${RUNNER_TEMP}/gh-aw/actions/create_prompt_first.sh
{
- cat << 'GH_AW_PROMPT_67651ce27540d996_EOF'
+ cat << 'GH_AW_PROMPT_fdfde4a7c37fbe35_EOF'
- GH_AW_PROMPT_67651ce27540d996_EOF
+ GH_AW_PROMPT_fdfde4a7c37fbe35_EOF
cat "${RUNNER_TEMP}/gh-aw/prompts/xpia.md"
cat "${RUNNER_TEMP}/gh-aw/prompts/temp_folder_prompt.md"
cat "${RUNNER_TEMP}/gh-aw/prompts/markdown.md"
cat "${RUNNER_TEMP}/gh-aw/prompts/agentic_workflows_guide.md"
cat "${RUNNER_TEMP}/gh-aw/prompts/cache_memory_prompt.md"
cat "${RUNNER_TEMP}/gh-aw/prompts/safe_outputs_prompt.md"
- cat << 'GH_AW_PROMPT_67651ce27540d996_EOF'
+ cat << 'GH_AW_PROMPT_fdfde4a7c37fbe35_EOF'
Tools: create_discussion, upload_asset, missing_tool, missing_data, noop
@@ -184,15 +184,15 @@ jobs:
{{/if}}
- GH_AW_PROMPT_67651ce27540d996_EOF
+ GH_AW_PROMPT_fdfde4a7c37fbe35_EOF
cat "${RUNNER_TEMP}/gh-aw/prompts/github_mcp_tools_with_safeoutputs_prompt.md"
- cat << 'GH_AW_PROMPT_67651ce27540d996_EOF'
+ cat << 'GH_AW_PROMPT_fdfde4a7c37fbe35_EOF'
{{#runtime-import .github/workflows/shared/reporting.md}}
{{#runtime-import .github/workflows/shared/jqschema.md}}
{{#runtime-import .github/workflows/shared/trending-charts-simple.md}}
{{#runtime-import .github/workflows/portfolio-analyst.md}}
- GH_AW_PROMPT_67651ce27540d996_EOF
+ GH_AW_PROMPT_fdfde4a7c37fbe35_EOF
} > "$GH_AW_PROMPT"
- name: Interpolate variables and render templates
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
@@ -373,12 +373,22 @@ jobs:
/tmp/gh-aw/python/*.py
/tmp/gh-aw/python/data/*
retention-days: 30
+ - env:
+ GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ name: Install gh-aw CLI
+ run: |
+ if gh extension list | grep -q "github/gh-aw"; then
+ gh extension upgrade gh-aw || true
+ else
+ gh extension install github/gh-aw
+ fi
+ gh aw --version
- env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
name: Download logs from last 30 days
run: |-
mkdir -p /tmp/portfolio-logs
- ./gh-aw logs --start-date -30d -c 5000 -o /tmp/portfolio-logs --json > /tmp/portfolio-logs/summary.json
+ gh aw logs --start-date -30d -c 5000 -o /tmp/portfolio-logs --json > /tmp/portfolio-logs/summary.json
# Cache memory file share configuration from frontmatter processed below
- name: Create cache-memory directory
@@ -468,12 +478,12 @@ jobs:
mkdir -p ${RUNNER_TEMP}/gh-aw/safeoutputs
mkdir -p /tmp/gh-aw/safeoutputs
mkdir -p /tmp/gh-aw/mcp-logs/safeoutputs
- cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/config.json << 'GH_AW_SAFE_OUTPUTS_CONFIG_c146f3b0f8cc5c7f_EOF'
+ cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/config.json << 'GH_AW_SAFE_OUTPUTS_CONFIG_f762f8a8df5b4160_EOF'
{"create_discussion":{"category":"audits","close_older_discussions":true,"expires":24,"fallback_to_issue":true,"max":1,"title_prefix":"[portfolio] "},"missing_data":{},"missing_tool":{},"noop":{"max":1,"report-as-issue":"true"},"upload_asset":{"allowed-exts":[".png",".jpg",".jpeg"],"branch":"assets/${{ github.workflow }}","max-size":10240}}
- GH_AW_SAFE_OUTPUTS_CONFIG_c146f3b0f8cc5c7f_EOF
+ GH_AW_SAFE_OUTPUTS_CONFIG_f762f8a8df5b4160_EOF
- name: Write Safe Outputs Tools
run: |
- cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/tools_meta.json << 'GH_AW_SAFE_OUTPUTS_TOOLS_META_ca789b65ceec8a9e_EOF'
+ cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/tools_meta.json << 'GH_AW_SAFE_OUTPUTS_TOOLS_META_5a6b5c2f79a090c6_EOF'
{
"description_suffixes": {
"create_discussion": " CONSTRAINTS: Maximum 1 discussion(s) can be created. Title will be prefixed with \"[portfolio] \". Discussions will be created in category \"audits\".",
@@ -482,8 +492,8 @@ jobs:
"repo_params": {},
"dynamic_tools": []
}
- GH_AW_SAFE_OUTPUTS_TOOLS_META_ca789b65ceec8a9e_EOF
- cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/validation.json << 'GH_AW_SAFE_OUTPUTS_VALIDATION_4abb4c60fd86b2e1_EOF'
+ GH_AW_SAFE_OUTPUTS_TOOLS_META_5a6b5c2f79a090c6_EOF
+ cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/validation.json << 'GH_AW_SAFE_OUTPUTS_VALIDATION_420e141b0eff5544_EOF'
{
"create_discussion": {
"defaultMax": 1,
@@ -578,7 +588,7 @@ jobs:
}
}
}
- GH_AW_SAFE_OUTPUTS_VALIDATION_4abb4c60fd86b2e1_EOF
+ GH_AW_SAFE_OUTPUTS_VALIDATION_420e141b0eff5544_EOF
node ${RUNNER_TEMP}/gh-aw/actions/generate_safe_outputs_tools.cjs
- name: Generate Safe Outputs MCP Server Config
id: safe-outputs-config
@@ -652,7 +662,7 @@ jobs:
export MCP_GATEWAY_DOCKER_COMMAND='docker run -i --rm --network host -v /var/run/docker.sock:/var/run/docker.sock -e MCP_GATEWAY_PORT -e MCP_GATEWAY_DOMAIN -e MCP_GATEWAY_API_KEY -e MCP_GATEWAY_PAYLOAD_DIR -e MCP_GATEWAY_PAYLOAD_SIZE_THRESHOLD -e DEBUG -e MCP_GATEWAY_LOG_DIR -e GH_AW_MCP_LOG_DIR -e GH_AW_SAFE_OUTPUTS -e GH_AW_SAFE_OUTPUTS_CONFIG_PATH -e GH_AW_SAFE_OUTPUTS_TOOLS_PATH -e GH_AW_ASSETS_BRANCH -e GH_AW_ASSETS_MAX_SIZE_KB -e GH_AW_ASSETS_ALLOWED_EXTS -e DEFAULT_BRANCH -e GITHUB_MCP_SERVER_TOKEN -e GITHUB_MCP_GUARD_MIN_INTEGRITY -e GITHUB_MCP_GUARD_REPOS -e GITHUB_REPOSITORY -e GITHUB_SERVER_URL -e GITHUB_SHA -e GITHUB_WORKSPACE -e GITHUB_TOKEN -e GITHUB_RUN_ID -e GITHUB_RUN_NUMBER -e GITHUB_RUN_ATTEMPT -e GITHUB_JOB -e GITHUB_ACTION -e GITHUB_EVENT_NAME -e GITHUB_EVENT_PATH -e GITHUB_ACTOR -e GITHUB_ACTOR_ID -e GITHUB_TRIGGERING_ACTOR -e GITHUB_WORKFLOW -e GITHUB_WORKFLOW_REF -e GITHUB_WORKFLOW_SHA -e GITHUB_REF -e GITHUB_REF_NAME -e GITHUB_REF_TYPE -e GITHUB_HEAD_REF -e GITHUB_BASE_REF -e GH_AW_SAFE_OUTPUTS_PORT -e GH_AW_SAFE_OUTPUTS_API_KEY -v /tmp/gh-aw/mcp-payloads:/tmp/gh-aw/mcp-payloads:rw -v /opt:/opt:ro -v /tmp:/tmp:rw -v '"${GITHUB_WORKSPACE}"':'"${GITHUB_WORKSPACE}"':rw ghcr.io/github/gh-aw-mcpg:v0.2.12'
mkdir -p /home/runner/.copilot
- cat << GH_AW_MCP_CONFIG_35aecd43eb78f4a2_EOF | bash ${RUNNER_TEMP}/gh-aw/actions/start_mcp_gateway.sh
+ cat << GH_AW_MCP_CONFIG_90c67d3c6678dbe8_EOF | bash ${RUNNER_TEMP}/gh-aw/actions/start_mcp_gateway.sh
{
"mcpServers": {
"agenticworkflows": {
@@ -712,7 +722,7 @@ jobs:
"payloadDir": "${MCP_GATEWAY_PAYLOAD_DIR}"
}
}
- GH_AW_MCP_CONFIG_35aecd43eb78f4a2_EOF
+ GH_AW_MCP_CONFIG_90c67d3c6678dbe8_EOF
- name: Download activation artifact
uses: actions/download-artifact@3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c # v8.0.1
with:
diff --git a/.github/workflows/portfolio-analyst.md b/.github/workflows/portfolio-analyst.md
index 67abbff73d..6354be0f36 100644
--- a/.github/workflows/portfolio-analyst.md
+++ b/.github/workflows/portfolio-analyst.md
@@ -18,12 +18,22 @@ tools:
toolsets: [default]
bash: ["*"]
steps:
+ - name: Install gh-aw CLI
+ env:
+ GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ run: |
+ if gh extension list | grep -q "github/gh-aw"; then
+ gh extension upgrade gh-aw || true
+ else
+ gh extension install github/gh-aw
+ fi
+ gh aw --version
- name: Download logs from last 30 days
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
mkdir -p /tmp/portfolio-logs
- ./gh-aw logs --start-date -30d -c 5000 -o /tmp/portfolio-logs --json > /tmp/portfolio-logs/summary.json
+ gh aw logs --start-date -30d -c 5000 -o /tmp/portfolio-logs --json > /tmp/portfolio-logs/summary.json
safe-outputs:
upload-asset:
timeout-minutes: 20
diff --git a/.github/workflows/static-analysis-report.lock.yml b/.github/workflows/static-analysis-report.lock.yml
index 592a2cd8ef..8975fd3e94 100644
--- a/.github/workflows/static-analysis-report.lock.yml
+++ b/.github/workflows/static-analysis-report.lock.yml
@@ -26,7 +26,7 @@
# Imports:
# - shared/reporting.md
#
-# gh-aw-metadata: {"schema_version":"v3","frontmatter_hash":"6e704ad580181ed88b36b117249a9b0079ae21982ff1e28edffa187b14b3262c","strict":true,"agent_id":"claude"}
+# gh-aw-metadata: {"schema_version":"v3","frontmatter_hash":"4d3b34422436feaf3ecc878115b3dffd83dc2daeec29a2636d73b77498076454","strict":true,"agent_id":"claude"}
name: "Static Analysis Report"
"on":
@@ -138,16 +138,16 @@ jobs:
run: |
bash ${RUNNER_TEMP}/gh-aw/actions/create_prompt_first.sh
{
- cat << 'GH_AW_PROMPT_6e5ccf7f66d1a4f3_EOF'
+ cat << 'GH_AW_PROMPT_a7a2b1849ed34e29_EOF'
- GH_AW_PROMPT_6e5ccf7f66d1a4f3_EOF
+ GH_AW_PROMPT_a7a2b1849ed34e29_EOF
cat "${RUNNER_TEMP}/gh-aw/prompts/xpia.md"
cat "${RUNNER_TEMP}/gh-aw/prompts/temp_folder_prompt.md"
cat "${RUNNER_TEMP}/gh-aw/prompts/markdown.md"
cat "${RUNNER_TEMP}/gh-aw/prompts/agentic_workflows_guide.md"
cat "${RUNNER_TEMP}/gh-aw/prompts/cache_memory_prompt.md"
cat "${RUNNER_TEMP}/gh-aw/prompts/safe_outputs_prompt.md"
- cat << 'GH_AW_PROMPT_6e5ccf7f66d1a4f3_EOF'
+ cat << 'GH_AW_PROMPT_a7a2b1849ed34e29_EOF'
Tools: create_discussion, missing_tool, missing_data, noop
@@ -179,13 +179,13 @@ jobs:
{{/if}}
- GH_AW_PROMPT_6e5ccf7f66d1a4f3_EOF
+ GH_AW_PROMPT_a7a2b1849ed34e29_EOF
cat "${RUNNER_TEMP}/gh-aw/prompts/github_mcp_tools_with_safeoutputs_prompt.md"
- cat << 'GH_AW_PROMPT_6e5ccf7f66d1a4f3_EOF'
+ cat << 'GH_AW_PROMPT_a7a2b1849ed34e29_EOF'
{{#runtime-import .github/workflows/shared/reporting.md}}
{{#runtime-import .github/workflows/static-analysis-report.md}}
- GH_AW_PROMPT_6e5ccf7f66d1a4f3_EOF
+ GH_AW_PROMPT_a7a2b1849ed34e29_EOF
} > "$GH_AW_PROMPT"
- name: Interpolate variables and render templates
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
@@ -339,12 +339,22 @@ jobs:
run: bash ${RUNNER_TEMP}/gh-aw/actions/configure_gh_for_ghe.sh
env:
GH_TOKEN: ${{ github.token }}
+ - env:
+ GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ name: Install gh-aw CLI
+ run: |
+ if gh extension list | grep -q "github/gh-aw"; then
+ gh extension upgrade gh-aw || true
+ else
+ gh extension install github/gh-aw
+ fi
+ gh aw --version
- name: Pull static analysis Docker images
run: "set -e\necho \"Pulling Docker images for static analysis tools...\"\n\n# Pull zizmor Docker image\necho \"Pulling zizmor image...\"\ndocker pull ghcr.io/zizmorcore/zizmor:latest\n\n# Pull poutine Docker image\necho \"Pulling poutine image...\"\ndocker pull ghcr.io/boostsecurityio/poutine:latest\n\necho \"All static analysis Docker images pulled successfully\"\n"
- name: Verify static analysis tools
run: "set -e\necho \"Verifying static analysis tools are available...\"\n\n# Verify zizmor\necho \"Testing zizmor...\"\ndocker run --rm ghcr.io/zizmorcore/zizmor:latest --version || echo \"Warning: zizmor version check failed\"\n\n# Verify poutine\necho \"Testing poutine...\"\ndocker run --rm ghcr.io/boostsecurityio/poutine:latest --version || echo \"Warning: poutine version check failed\"\n\necho \"Static analysis tools verification complete\"\n"
- name: Run compile with security tools
- run: "set -e\necho \"Running gh aw compile with security tools to download Docker images...\"\n\n# Run compile with all security scanner flags to download Docker images\n# Store the output in a file for inspection\n./gh-aw compile --zizmor --poutine --actionlint 2>&1 | tee /tmp/gh-aw/compile-output.txt\n\necho \"Compile with security tools completed\"\necho \"Output saved to /tmp/gh-aw/compile-output.txt\""
+ run: "set -e\necho \"Running gh aw compile with security tools to download Docker images...\"\n\n# Run compile with all security scanner flags to download Docker images\n# Store the output in a file for inspection\ngh aw compile --zizmor --poutine --actionlint 2>&1 | tee /tmp/gh-aw/compile-output.txt\n\necho \"Compile with security tools completed\"\necho \"Output saved to /tmp/gh-aw/compile-output.txt\""
# Cache memory file share configuration from frontmatter processed below
- name: Create cache-memory directory
@@ -437,12 +447,12 @@ jobs:
mkdir -p ${RUNNER_TEMP}/gh-aw/safeoutputs
mkdir -p /tmp/gh-aw/safeoutputs
mkdir -p /tmp/gh-aw/mcp-logs/safeoutputs
- cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/config.json << 'GH_AW_SAFE_OUTPUTS_CONFIG_1b8734fcff99da3d_EOF'
+ cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/config.json << 'GH_AW_SAFE_OUTPUTS_CONFIG_6d5779da1ff5b48a_EOF'
{"create_discussion":{"category":"security","close_older_discussions":true,"expires":24,"fallback_to_issue":true,"max":1},"missing_data":{},"missing_tool":{},"noop":{"max":1,"report-as-issue":"true"}}
- GH_AW_SAFE_OUTPUTS_CONFIG_1b8734fcff99da3d_EOF
+ GH_AW_SAFE_OUTPUTS_CONFIG_6d5779da1ff5b48a_EOF
- name: Write Safe Outputs Tools
run: |
- cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/tools_meta.json << 'GH_AW_SAFE_OUTPUTS_TOOLS_META_617cf60cfa47f987_EOF'
+ cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/tools_meta.json << 'GH_AW_SAFE_OUTPUTS_TOOLS_META_3b863b81f42bb639_EOF'
{
"description_suffixes": {
"create_discussion": " CONSTRAINTS: Maximum 1 discussion(s) can be created. Discussions will be created in category \"security\"."
@@ -450,8 +460,8 @@ jobs:
"repo_params": {},
"dynamic_tools": []
}
- GH_AW_SAFE_OUTPUTS_TOOLS_META_617cf60cfa47f987_EOF
- cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/validation.json << 'GH_AW_SAFE_OUTPUTS_VALIDATION_06cd6c1994b8e950_EOF'
+ GH_AW_SAFE_OUTPUTS_TOOLS_META_3b863b81f42bb639_EOF
+ cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/validation.json << 'GH_AW_SAFE_OUTPUTS_VALIDATION_4ba8849c6dc59d40_EOF'
{
"create_discussion": {
"defaultMax": 1,
@@ -537,7 +547,7 @@ jobs:
}
}
}
- GH_AW_SAFE_OUTPUTS_VALIDATION_06cd6c1994b8e950_EOF
+ GH_AW_SAFE_OUTPUTS_VALIDATION_4ba8849c6dc59d40_EOF
node ${RUNNER_TEMP}/gh-aw/actions/generate_safe_outputs_tools.cjs
- name: Generate Safe Outputs MCP Server Config
id: safe-outputs-config
@@ -607,7 +617,7 @@ jobs:
export GH_AW_ENGINE="claude"
export MCP_GATEWAY_DOCKER_COMMAND='docker run -i --rm --network host -v /var/run/docker.sock:/var/run/docker.sock -e MCP_GATEWAY_PORT -e MCP_GATEWAY_DOMAIN -e MCP_GATEWAY_API_KEY -e MCP_GATEWAY_PAYLOAD_DIR -e MCP_GATEWAY_PAYLOAD_SIZE_THRESHOLD -e DEBUG -e MCP_GATEWAY_LOG_DIR -e GH_AW_MCP_LOG_DIR -e GH_AW_SAFE_OUTPUTS -e GH_AW_SAFE_OUTPUTS_CONFIG_PATH -e GH_AW_SAFE_OUTPUTS_TOOLS_PATH -e GH_AW_ASSETS_BRANCH -e GH_AW_ASSETS_MAX_SIZE_KB -e GH_AW_ASSETS_ALLOWED_EXTS -e DEFAULT_BRANCH -e GITHUB_MCP_SERVER_TOKEN -e GITHUB_MCP_GUARD_MIN_INTEGRITY -e GITHUB_MCP_GUARD_REPOS -e GITHUB_REPOSITORY -e GITHUB_SERVER_URL -e GITHUB_SHA -e GITHUB_WORKSPACE -e GITHUB_TOKEN -e GITHUB_RUN_ID -e GITHUB_RUN_NUMBER -e GITHUB_RUN_ATTEMPT -e GITHUB_JOB -e GITHUB_ACTION -e GITHUB_EVENT_NAME -e GITHUB_EVENT_PATH -e GITHUB_ACTOR -e GITHUB_ACTOR_ID -e GITHUB_TRIGGERING_ACTOR -e GITHUB_WORKFLOW -e GITHUB_WORKFLOW_REF -e GITHUB_WORKFLOW_SHA -e GITHUB_REF -e GITHUB_REF_NAME -e GITHUB_REF_TYPE -e GITHUB_HEAD_REF -e GITHUB_BASE_REF -e GH_AW_SAFE_OUTPUTS_PORT -e GH_AW_SAFE_OUTPUTS_API_KEY -v /tmp/gh-aw/mcp-payloads:/tmp/gh-aw/mcp-payloads:rw -v /opt:/opt:ro -v /tmp:/tmp:rw -v '"${GITHUB_WORKSPACE}"':'"${GITHUB_WORKSPACE}"':rw ghcr.io/github/gh-aw-mcpg:v0.2.12'
- cat << GH_AW_MCP_CONFIG_aa8130bb400806ad_EOF | bash ${RUNNER_TEMP}/gh-aw/actions/start_mcp_gateway.sh
+ cat << GH_AW_MCP_CONFIG_d7f650b7f588b3cb_EOF | bash ${RUNNER_TEMP}/gh-aw/actions/start_mcp_gateway.sh
{
"mcpServers": {
"agenticworkflows": {
@@ -665,7 +675,7 @@ jobs:
"payloadDir": "${MCP_GATEWAY_PAYLOAD_DIR}"
}
}
- GH_AW_MCP_CONFIG_aa8130bb400806ad_EOF
+ GH_AW_MCP_CONFIG_d7f650b7f588b3cb_EOF
- name: Download activation artifact
uses: actions/download-artifact@3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c # v8.0.1
with:
diff --git a/.github/workflows/static-analysis-report.md b/.github/workflows/static-analysis-report.md
index 7083dbb7a1..0d7917f9b3 100644
--- a/.github/workflows/static-analysis-report.md
+++ b/.github/workflows/static-analysis-report.md
@@ -28,6 +28,16 @@ strict: true
imports:
- shared/reporting.md
steps:
+ - name: Install gh-aw CLI
+ env:
+ GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ run: |
+ if gh extension list | grep -q "github/gh-aw"; then
+ gh extension upgrade gh-aw || true
+ else
+ gh extension install github/gh-aw
+ fi
+ gh aw --version
- name: Pull static analysis Docker images
run: |
set -e
@@ -63,7 +73,7 @@ steps:
# Run compile with all security scanner flags to download Docker images
# Store the output in a file for inspection
- ./gh-aw compile --zizmor --poutine --actionlint 2>&1 | tee /tmp/gh-aw/compile-output.txt
+ gh aw compile --zizmor --poutine --actionlint 2>&1 | tee /tmp/gh-aw/compile-output.txt
echo "Compile with security tools completed"
echo "Output saved to /tmp/gh-aw/compile-output.txt"