diff --git a/.github/workflows/claude-token-usage-analyzer.lock.yml b/.github/workflows/claude-token-usage-analyzer.lock.yml index dc7173a33c..1178ad8eef 100644 --- a/.github/workflows/claude-token-usage-analyzer.lock.yml +++ b/.github/workflows/claude-token-usage-analyzer.lock.yml @@ -26,7 +26,7 @@ # Imports: # - shared/reporting.md # -# gh-aw-metadata: {"schema_version":"v3","frontmatter_hash":"3e1a8c2ef3e9e0df9edb0b63b53c86db6e4a7f5d018a85974579ae4733ab5041","strict":true,"agent_id":"copilot"} +# gh-aw-metadata: {"schema_version":"v3","frontmatter_hash":"5c042e9e442b822988d57ea30644cddb18c6992ee6beb461b84ac594f80f47e6","strict":true,"agent_id":"copilot"} name: "Claude Token Usage Analyzer" "on": @@ -132,14 +132,14 @@ jobs: run: | bash ${RUNNER_TEMP}/gh-aw/actions/create_prompt_first.sh { - cat << 'GH_AW_PROMPT_412baf5b8763ffab_EOF' + cat << 'GH_AW_PROMPT_0ecfe1847da03198_EOF' - GH_AW_PROMPT_412baf5b8763ffab_EOF + GH_AW_PROMPT_0ecfe1847da03198_EOF cat "${RUNNER_TEMP}/gh-aw/prompts/xpia.md" cat "${RUNNER_TEMP}/gh-aw/prompts/temp_folder_prompt.md" cat "${RUNNER_TEMP}/gh-aw/prompts/markdown.md" cat "${RUNNER_TEMP}/gh-aw/prompts/safe_outputs_prompt.md" - cat << 'GH_AW_PROMPT_412baf5b8763ffab_EOF' + cat << 'GH_AW_PROMPT_0ecfe1847da03198_EOF' Tools: create_issue, missing_tool, missing_data, noop @@ -171,13 +171,13 @@ jobs: {{/if}} - GH_AW_PROMPT_412baf5b8763ffab_EOF + GH_AW_PROMPT_0ecfe1847da03198_EOF cat "${RUNNER_TEMP}/gh-aw/prompts/github_mcp_tools_with_safeoutputs_prompt.md" - cat << 'GH_AW_PROMPT_412baf5b8763ffab_EOF' + cat << 'GH_AW_PROMPT_0ecfe1847da03198_EOF' {{#runtime-import .github/workflows/shared/reporting.md}} {{#runtime-import .github/workflows/claude-token-usage-analyzer.md}} - GH_AW_PROMPT_412baf5b8763ffab_EOF + GH_AW_PROMPT_0ecfe1847da03198_EOF } > "$GH_AW_PROMPT" - name: Interpolate variables and render templates uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8 @@ -310,7 +310,7 @@ jobs: - env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} name: Download Claude workflow runs (last 24h) - run: "set -euo pipefail\nmkdir -p /tmp/token-analyzer-claude\n\necho \"📥 Downloading Claude workflow runs from last 24 hours...\"\ngh aw logs \\\n --engine claude \\\n --start-date -1d \\\n --json \\\n -c 300 \\\n > /tmp/token-analyzer-claude/claude-runs.json 2>/dev/null || echo \"[]\" > /tmp/token-analyzer-claude/claude-runs.json\n\nRUN_COUNT=$(jq '. | length' /tmp/token-analyzer-claude/claude-runs.json 2>/dev/null || echo 0)\necho \"✅ Found ${RUN_COUNT} Claude workflow runs\"\n\n# Download token-usage.jsonl artifacts for per-model breakdown\nARTIFACT_DIR=\"/tmp/token-analyzer-claude/artifacts\"\nmkdir -p \"$ARTIFACT_DIR\"\n\necho \"📥 Downloading token-usage.jsonl artifacts...\"\njq -r '.[].databaseId' /tmp/token-analyzer-claude/claude-runs.json 2>/dev/null | head -50 | while read -r run_id; do\n run_dir=\"$ARTIFACT_DIR/$run_id\"\n mkdir -p \"$run_dir\"\n gh run download \"$run_id\" \\\n --repo \"$GITHUB_REPOSITORY\" \\\n --name \"firewall-audit-logs\" \\\n --dir \"$run_dir\" \\\n 2>/dev/null || true\ndone\n\n# Count how many token-usage.jsonl files we got\nJSONL_COUNT=$(find \"$ARTIFACT_DIR\" -name \"token-usage.jsonl\" 2>/dev/null | wc -l)\necho \"✅ Downloaded ${JSONL_COUNT} token-usage.jsonl artifacts\"\n\n# Merge all token-usage.jsonl files annotated with run_id\nMERGED_FILE=\"/tmp/token-analyzer-claude/token-usage-merged.jsonl\"\n> \"$MERGED_FILE\"\nfind \"$ARTIFACT_DIR\" -name \"token-usage.jsonl\" | while read -r f; do\n run_id=$(echo \"$f\" | grep -oP '(?<=/artifacts/)\\d+(?=/)' || true)\n while IFS= read -r line; do\n if [ -n \"$line\" ]; then\n echo \"${line}\" | jq --arg run_id \"$run_id\" '. + {run_id: $run_id}' >> \"$MERGED_FILE\" 2>/dev/null || true\n fi\n done < \"$f\"\ndone\n\nRECORD_COUNT=$(wc -l < \"$MERGED_FILE\" 2>/dev/null || echo 0)\necho \"✅ Merged ${RECORD_COUNT} token usage records\"\n" + run: "set -euo pipefail\nmkdir -p /tmp/token-analyzer-claude\n\necho \"📥 Downloading Claude workflow runs from last 24 hours...\"\ngh aw logs \\\n --engine claude \\\n --start-date -1d \\\n --json \\\n -c 300 \\\n > /tmp/token-analyzer-claude/claude-runs.json 2>/dev/null || echo \"[]\" > /tmp/token-analyzer-claude/claude-runs.json\n\nRUN_COUNT=$(jq '. | length' /tmp/token-analyzer-claude/claude-runs.json 2>/dev/null || echo 0)\necho \"✅ Found ${RUN_COUNT} Claude workflow runs\"\n\n# Download token-usage.jsonl artifacts for per-model breakdown\nARTIFACT_DIR=\"/tmp/token-analyzer-claude/artifacts\"\nmkdir -p \"$ARTIFACT_DIR\"\n\necho \"📥 Downloading token-usage.jsonl artifacts...\"\njq -r '.[0:50] | .[].databaseId' /tmp/token-analyzer-claude/claude-runs.json > /tmp/token-analyzer-claude/run-ids.txt\nwhile read -r run_id; do\n run_dir=\"$ARTIFACT_DIR/$run_id\"\n mkdir -p \"$run_dir\"\n gh run download \"$run_id\" \\\n --repo \"$GITHUB_REPOSITORY\" \\\n --name \"firewall-audit-logs\" \\\n --dir \"$run_dir\" \\\n 2>/dev/null || true\ndone < /tmp/token-analyzer-claude/run-ids.txt\n\n# Count how many token-usage.jsonl files we got\nJSONL_COUNT=$(find \"$ARTIFACT_DIR\" -name \"token-usage.jsonl\" 2>/dev/null | wc -l)\necho \"✅ Downloaded ${JSONL_COUNT} token-usage.jsonl artifacts\"\n\n# Merge all token-usage.jsonl files annotated with run_id\nMERGED_FILE=\"/tmp/token-analyzer-claude/token-usage-merged.jsonl\"\n> \"$MERGED_FILE\"\nfind \"$ARTIFACT_DIR\" -name \"token-usage.jsonl\" > /tmp/token-analyzer-claude/jsonl-files.txt 2>/dev/null || true\nwhile read -r f; do\n run_id=$(echo \"$f\" | grep -oP '(?<=/artifacts/)\\d+(?=/)' || true)\n while IFS= read -r line; do\n if [ -n \"$line\" ]; then\n echo \"${line}\" | jq --arg run_id \"$run_id\" '. + {run_id: $run_id}' >> \"$MERGED_FILE\" 2>/dev/null || true\n fi\n done < \"$f\"\ndone < /tmp/token-analyzer-claude/jsonl-files.txt\n\nRECORD_COUNT=$(wc -l < \"$MERGED_FILE\" 2>/dev/null || echo 0)\necho \"✅ Merged ${RECORD_COUNT} token usage records\"\n" - name: Configure Git credentials env: @@ -361,12 +361,12 @@ jobs: mkdir -p ${RUNNER_TEMP}/gh-aw/safeoutputs mkdir -p /tmp/gh-aw/safeoutputs mkdir -p /tmp/gh-aw/mcp-logs/safeoutputs - cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/config.json << 'GH_AW_SAFE_OUTPUTS_CONFIG_f14cbde2b02cb022_EOF' + cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/config.json << 'GH_AW_SAFE_OUTPUTS_CONFIG_c33e76cf19082c1c_EOF' {"create_issue":{"close_older_issues":true,"expires":48,"labels":["automated-analysis","token-usage","claude"],"max":1,"title_prefix":"📊 Claude Token Usage Report: "},"missing_data":{},"missing_tool":{},"noop":{"max":1,"report-as-issue":"true"}} - GH_AW_SAFE_OUTPUTS_CONFIG_f14cbde2b02cb022_EOF + GH_AW_SAFE_OUTPUTS_CONFIG_c33e76cf19082c1c_EOF - name: Write Safe Outputs Tools run: | - cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/tools_meta.json << 'GH_AW_SAFE_OUTPUTS_TOOLS_META_555736a39a322223_EOF' + cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/tools_meta.json << 'GH_AW_SAFE_OUTPUTS_TOOLS_META_71ab35b782bdd9cc_EOF' { "description_suffixes": { "create_issue": " CONSTRAINTS: Maximum 1 issue(s) can be created. Title will be prefixed with \"📊 Claude Token Usage Report: \". Labels [\"automated-analysis\" \"token-usage\" \"claude\"] will be automatically added." @@ -374,8 +374,8 @@ jobs: "repo_params": {}, "dynamic_tools": [] } - GH_AW_SAFE_OUTPUTS_TOOLS_META_555736a39a322223_EOF - cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/validation.json << 'GH_AW_SAFE_OUTPUTS_VALIDATION_dfb0f30952b3934c_EOF' + GH_AW_SAFE_OUTPUTS_TOOLS_META_71ab35b782bdd9cc_EOF + cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/validation.json << 'GH_AW_SAFE_OUTPUTS_VALIDATION_2c61931508ce5446_EOF' { "create_issue": { "defaultMax": 1, @@ -468,7 +468,7 @@ jobs: } } } - GH_AW_SAFE_OUTPUTS_VALIDATION_dfb0f30952b3934c_EOF + GH_AW_SAFE_OUTPUTS_VALIDATION_2c61931508ce5446_EOF node ${RUNNER_TEMP}/gh-aw/actions/generate_safe_outputs_tools.cjs - name: Generate Safe Outputs MCP Server Config id: safe-outputs-config @@ -538,7 +538,7 @@ jobs: export MCP_GATEWAY_DOCKER_COMMAND='docker run -i --rm --network host -v /var/run/docker.sock:/var/run/docker.sock -e MCP_GATEWAY_PORT -e MCP_GATEWAY_DOMAIN -e MCP_GATEWAY_API_KEY -e MCP_GATEWAY_PAYLOAD_DIR -e MCP_GATEWAY_PAYLOAD_SIZE_THRESHOLD -e DEBUG -e MCP_GATEWAY_LOG_DIR -e GH_AW_MCP_LOG_DIR -e GH_AW_SAFE_OUTPUTS -e GH_AW_SAFE_OUTPUTS_CONFIG_PATH -e GH_AW_SAFE_OUTPUTS_TOOLS_PATH -e GH_AW_ASSETS_BRANCH -e GH_AW_ASSETS_MAX_SIZE_KB -e GH_AW_ASSETS_ALLOWED_EXTS -e DEFAULT_BRANCH -e GITHUB_MCP_SERVER_TOKEN -e GITHUB_MCP_GUARD_MIN_INTEGRITY -e GITHUB_MCP_GUARD_REPOS -e GITHUB_REPOSITORY -e GITHUB_SERVER_URL -e GITHUB_SHA -e GITHUB_WORKSPACE -e GITHUB_TOKEN -e GITHUB_RUN_ID -e GITHUB_RUN_NUMBER -e GITHUB_RUN_ATTEMPT -e GITHUB_JOB -e GITHUB_ACTION -e GITHUB_EVENT_NAME -e GITHUB_EVENT_PATH -e GITHUB_ACTOR -e GITHUB_ACTOR_ID -e GITHUB_TRIGGERING_ACTOR -e GITHUB_WORKFLOW -e GITHUB_WORKFLOW_REF -e GITHUB_WORKFLOW_SHA -e GITHUB_REF -e GITHUB_REF_NAME -e GITHUB_REF_TYPE -e GITHUB_HEAD_REF -e GITHUB_BASE_REF -e GH_AW_SAFE_OUTPUTS_PORT -e GH_AW_SAFE_OUTPUTS_API_KEY -v /tmp/gh-aw/mcp-payloads:/tmp/gh-aw/mcp-payloads:rw -v /opt:/opt:ro -v /tmp:/tmp:rw -v '"${GITHUB_WORKSPACE}"':'"${GITHUB_WORKSPACE}"':rw ghcr.io/github/gh-aw-mcpg:v0.2.12' mkdir -p /home/runner/.copilot - cat << GH_AW_MCP_CONFIG_6a7305353a2989be_EOF | bash ${RUNNER_TEMP}/gh-aw/actions/start_mcp_gateway.sh + cat << GH_AW_MCP_CONFIG_62f3de0818863ac6_EOF | bash ${RUNNER_TEMP}/gh-aw/actions/start_mcp_gateway.sh { "mcpServers": { "github": { @@ -579,7 +579,7 @@ jobs: "payloadDir": "${MCP_GATEWAY_PAYLOAD_DIR}" } } - GH_AW_MCP_CONFIG_6a7305353a2989be_EOF + GH_AW_MCP_CONFIG_62f3de0818863ac6_EOF - name: Download activation artifact uses: actions/download-artifact@3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c # v8.0.1 with: diff --git a/.github/workflows/claude-token-usage-analyzer.md b/.github/workflows/claude-token-usage-analyzer.md index 3fbf3da7d0..13d01879d0 100644 --- a/.github/workflows/claude-token-usage-analyzer.md +++ b/.github/workflows/claude-token-usage-analyzer.md @@ -69,7 +69,8 @@ steps: mkdir -p "$ARTIFACT_DIR" echo "📥 Downloading token-usage.jsonl artifacts..." - jq -r '.[].databaseId' /tmp/token-analyzer-claude/claude-runs.json 2>/dev/null | head -50 | while read -r run_id; do + jq -r '.[0:50] | .[].databaseId' /tmp/token-analyzer-claude/claude-runs.json > /tmp/token-analyzer-claude/run-ids.txt + while read -r run_id; do run_dir="$ARTIFACT_DIR/$run_id" mkdir -p "$run_dir" gh run download "$run_id" \ @@ -77,7 +78,7 @@ steps: --name "firewall-audit-logs" \ --dir "$run_dir" \ 2>/dev/null || true - done + done < /tmp/token-analyzer-claude/run-ids.txt # Count how many token-usage.jsonl files we got JSONL_COUNT=$(find "$ARTIFACT_DIR" -name "token-usage.jsonl" 2>/dev/null | wc -l) @@ -86,14 +87,15 @@ steps: # Merge all token-usage.jsonl files annotated with run_id MERGED_FILE="/tmp/token-analyzer-claude/token-usage-merged.jsonl" > "$MERGED_FILE" - find "$ARTIFACT_DIR" -name "token-usage.jsonl" | while read -r f; do + find "$ARTIFACT_DIR" -name "token-usage.jsonl" > /tmp/token-analyzer-claude/jsonl-files.txt 2>/dev/null || true + while read -r f; do run_id=$(echo "$f" | grep -oP '(?<=/artifacts/)\d+(?=/)' || true) while IFS= read -r line; do if [ -n "$line" ]; then echo "${line}" | jq --arg run_id "$run_id" '. + {run_id: $run_id}' >> "$MERGED_FILE" 2>/dev/null || true fi done < "$f" - done + done < /tmp/token-analyzer-claude/jsonl-files.txt RECORD_COUNT=$(wc -l < "$MERGED_FILE" 2>/dev/null || echo 0) echo "✅ Merged ${RECORD_COUNT} token usage records" diff --git a/.github/workflows/copilot-token-usage-analyzer.lock.yml b/.github/workflows/copilot-token-usage-analyzer.lock.yml index 4904d5b8ab..2f00dbb165 100644 --- a/.github/workflows/copilot-token-usage-analyzer.lock.yml +++ b/.github/workflows/copilot-token-usage-analyzer.lock.yml @@ -26,7 +26,7 @@ # Imports: # - shared/reporting.md # -# gh-aw-metadata: {"schema_version":"v3","frontmatter_hash":"f421662fd7913377179bd64aea41e05f2d2765cef7b22e69444578e387a93601","strict":true,"agent_id":"copilot"} +# gh-aw-metadata: {"schema_version":"v3","frontmatter_hash":"b1ce75410b09f89b58a1f4df88bc49635b77fb7d802528b5869f504e3085b94b","strict":true,"agent_id":"copilot"} name: "Copilot Token Usage Analyzer" "on": @@ -132,14 +132,14 @@ jobs: run: | bash ${RUNNER_TEMP}/gh-aw/actions/create_prompt_first.sh { - cat << 'GH_AW_PROMPT_dbcef2ee4ad58e02_EOF' + cat << 'GH_AW_PROMPT_77b51c2908cd9c50_EOF' - GH_AW_PROMPT_dbcef2ee4ad58e02_EOF + GH_AW_PROMPT_77b51c2908cd9c50_EOF cat "${RUNNER_TEMP}/gh-aw/prompts/xpia.md" cat "${RUNNER_TEMP}/gh-aw/prompts/temp_folder_prompt.md" cat "${RUNNER_TEMP}/gh-aw/prompts/markdown.md" cat "${RUNNER_TEMP}/gh-aw/prompts/safe_outputs_prompt.md" - cat << 'GH_AW_PROMPT_dbcef2ee4ad58e02_EOF' + cat << 'GH_AW_PROMPT_77b51c2908cd9c50_EOF' Tools: create_issue, missing_tool, missing_data, noop @@ -171,13 +171,13 @@ jobs: {{/if}} - GH_AW_PROMPT_dbcef2ee4ad58e02_EOF + GH_AW_PROMPT_77b51c2908cd9c50_EOF cat "${RUNNER_TEMP}/gh-aw/prompts/github_mcp_tools_with_safeoutputs_prompt.md" - cat << 'GH_AW_PROMPT_dbcef2ee4ad58e02_EOF' + cat << 'GH_AW_PROMPT_77b51c2908cd9c50_EOF' {{#runtime-import .github/workflows/shared/reporting.md}} {{#runtime-import .github/workflows/copilot-token-usage-analyzer.md}} - GH_AW_PROMPT_dbcef2ee4ad58e02_EOF + GH_AW_PROMPT_77b51c2908cd9c50_EOF } > "$GH_AW_PROMPT" - name: Interpolate variables and render templates uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8 @@ -310,7 +310,7 @@ jobs: - env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} name: Download Copilot workflow runs (last 24h) - run: "set -euo pipefail\nmkdir -p /tmp/token-analyzer\n\necho \"📥 Downloading Copilot workflow runs from last 24 hours...\"\ngh aw logs \\\n --engine copilot \\\n --start-date -1d \\\n --json \\\n -c 300 \\\n > /tmp/token-analyzer/copilot-runs.json 2>/dev/null || echo \"[]\" > /tmp/token-analyzer/copilot-runs.json\n\nRUN_COUNT=$(jq '. | length' /tmp/token-analyzer/copilot-runs.json 2>/dev/null || echo 0)\necho \"✅ Found ${RUN_COUNT} Copilot workflow runs\"\n\n# Download token-usage.jsonl artifacts for per-model breakdown\n# We look for the firewall-audit-logs artifact which contains token-usage.jsonl\nARTIFACT_DIR=\"/tmp/token-analyzer/artifacts\"\nmkdir -p \"$ARTIFACT_DIR\"\n\necho \"📥 Downloading token-usage.jsonl artifacts...\"\njq -r '.[].databaseId' /tmp/token-analyzer/copilot-runs.json 2>/dev/null | head -50 | while read -r run_id; do\n run_dir=\"$ARTIFACT_DIR/$run_id\"\n mkdir -p \"$run_dir\"\n gh run download \"$run_id\" \\\n --repo \"$GITHUB_REPOSITORY\" \\\n --name \"firewall-audit-logs\" \\\n --dir \"$run_dir\" \\\n 2>/dev/null || true\ndone\n\n# Count how many token-usage.jsonl files we got\nJSONL_COUNT=$(find \"$ARTIFACT_DIR\" -name \"token-usage.jsonl\" 2>/dev/null | wc -l)\necho \"✅ Downloaded ${JSONL_COUNT} token-usage.jsonl artifacts\"\n\n# Merge all token-usage.jsonl files into a single aggregate file annotated with run_id\nMERGED_FILE=\"/tmp/token-analyzer/token-usage-merged.jsonl\"\n> \"$MERGED_FILE\"\nfind \"$ARTIFACT_DIR\" -name \"token-usage.jsonl\" | while read -r f; do\n run_id=$(echo \"$f\" | grep -oP '(?<=/artifacts/)\\d+(?=/)' || true)\n while IFS= read -r line; do\n if [ -n \"$line\" ]; then\n echo \"${line}\" | jq --arg run_id \"$run_id\" '. + {run_id: $run_id}' >> \"$MERGED_FILE\" 2>/dev/null || true\n fi\n done < \"$f\"\ndone\n\nRECORD_COUNT=$(wc -l < \"$MERGED_FILE\" 2>/dev/null || echo 0)\necho \"✅ Merged ${RECORD_COUNT} token usage records\"\n" + run: "set -euo pipefail\nmkdir -p /tmp/token-analyzer\n\necho \"📥 Downloading Copilot workflow runs from last 24 hours...\"\ngh aw logs \\\n --engine copilot \\\n --start-date -1d \\\n --json \\\n -c 300 \\\n > /tmp/token-analyzer/copilot-runs.json 2>/dev/null || echo \"[]\" > /tmp/token-analyzer/copilot-runs.json\n\nRUN_COUNT=$(jq '. | length' /tmp/token-analyzer/copilot-runs.json 2>/dev/null || echo 0)\necho \"✅ Found ${RUN_COUNT} Copilot workflow runs\"\n\n# Download token-usage.jsonl artifacts for per-model breakdown\n# We look for the firewall-audit-logs artifact which contains token-usage.jsonl\nARTIFACT_DIR=\"/tmp/token-analyzer/artifacts\"\nmkdir -p \"$ARTIFACT_DIR\"\n\necho \"📥 Downloading token-usage.jsonl artifacts...\"\njq -r '.[0:50][]?.databaseId' /tmp/token-analyzer/copilot-runs.json 2>/dev/null > /tmp/token-analyzer/run-ids.txt || true\nwhile read -r run_id; do\n run_dir=\"$ARTIFACT_DIR/$run_id\"\n mkdir -p \"$run_dir\"\n gh run download \"$run_id\" \\\n --repo \"$GITHUB_REPOSITORY\" \\\n --name \"firewall-audit-logs\" \\\n --dir \"$run_dir\" \\\n 2>/dev/null || true\ndone < /tmp/token-analyzer/run-ids.txt\n\n# Count how many token-usage.jsonl files we got\nJSONL_COUNT=$(find \"$ARTIFACT_DIR\" -name \"token-usage.jsonl\" 2>/dev/null | wc -l)\necho \"✅ Downloaded ${JSONL_COUNT} token-usage.jsonl artifacts\"\n\n# Merge all token-usage.jsonl files into a single aggregate file annotated with run_id\nMERGED_FILE=\"/tmp/token-analyzer/token-usage-merged.jsonl\"\n> \"$MERGED_FILE\"\nfind \"$ARTIFACT_DIR\" -name \"token-usage.jsonl\" > /tmp/token-analyzer/jsonl-files.txt 2>/dev/null || true\nwhile read -r f; do\n run_id=$(echo \"$f\" | grep -oP '(?<=/artifacts/)\\d+(?=/)' || true)\n while IFS= read -r line; do\n if [ -n \"$line\" ]; then\n echo \"${line}\" | jq --arg run_id \"$run_id\" '. + {run_id: $run_id}' >> \"$MERGED_FILE\" 2>/dev/null || true\n fi\n done < \"$f\"\ndone < /tmp/token-analyzer/jsonl-files.txt\n\nRECORD_COUNT=$(wc -l < \"$MERGED_FILE\" 2>/dev/null || echo 0)\necho \"✅ Merged ${RECORD_COUNT} token usage records\"\n" - name: Configure Git credentials env: @@ -361,12 +361,12 @@ jobs: mkdir -p ${RUNNER_TEMP}/gh-aw/safeoutputs mkdir -p /tmp/gh-aw/safeoutputs mkdir -p /tmp/gh-aw/mcp-logs/safeoutputs - cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/config.json << 'GH_AW_SAFE_OUTPUTS_CONFIG_a06ad8de6b425354_EOF' + cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/config.json << 'GH_AW_SAFE_OUTPUTS_CONFIG_0237bbe3896dae81_EOF' {"create_issue":{"close_older_issues":true,"expires":48,"labels":["automated-analysis","token-usage","copilot"],"max":1,"title_prefix":"📊 Copilot Token Usage Report: "},"missing_data":{},"missing_tool":{},"noop":{"max":1,"report-as-issue":"true"}} - GH_AW_SAFE_OUTPUTS_CONFIG_a06ad8de6b425354_EOF + GH_AW_SAFE_OUTPUTS_CONFIG_0237bbe3896dae81_EOF - name: Write Safe Outputs Tools run: | - cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/tools_meta.json << 'GH_AW_SAFE_OUTPUTS_TOOLS_META_d7d241b36709f602_EOF' + cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/tools_meta.json << 'GH_AW_SAFE_OUTPUTS_TOOLS_META_21502b3c7d44f092_EOF' { "description_suffixes": { "create_issue": " CONSTRAINTS: Maximum 1 issue(s) can be created. Title will be prefixed with \"📊 Copilot Token Usage Report: \". Labels [\"automated-analysis\" \"token-usage\" \"copilot\"] will be automatically added." @@ -374,8 +374,8 @@ jobs: "repo_params": {}, "dynamic_tools": [] } - GH_AW_SAFE_OUTPUTS_TOOLS_META_d7d241b36709f602_EOF - cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/validation.json << 'GH_AW_SAFE_OUTPUTS_VALIDATION_0ef0226af1b9fefa_EOF' + GH_AW_SAFE_OUTPUTS_TOOLS_META_21502b3c7d44f092_EOF + cat > ${RUNNER_TEMP}/gh-aw/safeoutputs/validation.json << 'GH_AW_SAFE_OUTPUTS_VALIDATION_5df732c7ff691456_EOF' { "create_issue": { "defaultMax": 1, @@ -468,7 +468,7 @@ jobs: } } } - GH_AW_SAFE_OUTPUTS_VALIDATION_0ef0226af1b9fefa_EOF + GH_AW_SAFE_OUTPUTS_VALIDATION_5df732c7ff691456_EOF node ${RUNNER_TEMP}/gh-aw/actions/generate_safe_outputs_tools.cjs - name: Generate Safe Outputs MCP Server Config id: safe-outputs-config @@ -538,7 +538,7 @@ jobs: export MCP_GATEWAY_DOCKER_COMMAND='docker run -i --rm --network host -v /var/run/docker.sock:/var/run/docker.sock -e MCP_GATEWAY_PORT -e MCP_GATEWAY_DOMAIN -e MCP_GATEWAY_API_KEY -e MCP_GATEWAY_PAYLOAD_DIR -e MCP_GATEWAY_PAYLOAD_SIZE_THRESHOLD -e DEBUG -e MCP_GATEWAY_LOG_DIR -e GH_AW_MCP_LOG_DIR -e GH_AW_SAFE_OUTPUTS -e GH_AW_SAFE_OUTPUTS_CONFIG_PATH -e GH_AW_SAFE_OUTPUTS_TOOLS_PATH -e GH_AW_ASSETS_BRANCH -e GH_AW_ASSETS_MAX_SIZE_KB -e GH_AW_ASSETS_ALLOWED_EXTS -e DEFAULT_BRANCH -e GITHUB_MCP_SERVER_TOKEN -e GITHUB_MCP_GUARD_MIN_INTEGRITY -e GITHUB_MCP_GUARD_REPOS -e GITHUB_REPOSITORY -e GITHUB_SERVER_URL -e GITHUB_SHA -e GITHUB_WORKSPACE -e GITHUB_TOKEN -e GITHUB_RUN_ID -e GITHUB_RUN_NUMBER -e GITHUB_RUN_ATTEMPT -e GITHUB_JOB -e GITHUB_ACTION -e GITHUB_EVENT_NAME -e GITHUB_EVENT_PATH -e GITHUB_ACTOR -e GITHUB_ACTOR_ID -e GITHUB_TRIGGERING_ACTOR -e GITHUB_WORKFLOW -e GITHUB_WORKFLOW_REF -e GITHUB_WORKFLOW_SHA -e GITHUB_REF -e GITHUB_REF_NAME -e GITHUB_REF_TYPE -e GITHUB_HEAD_REF -e GITHUB_BASE_REF -e GH_AW_SAFE_OUTPUTS_PORT -e GH_AW_SAFE_OUTPUTS_API_KEY -v /tmp/gh-aw/mcp-payloads:/tmp/gh-aw/mcp-payloads:rw -v /opt:/opt:ro -v /tmp:/tmp:rw -v '"${GITHUB_WORKSPACE}"':'"${GITHUB_WORKSPACE}"':rw ghcr.io/github/gh-aw-mcpg:v0.2.12' mkdir -p /home/runner/.copilot - cat << GH_AW_MCP_CONFIG_bfbf4afe334a19cb_EOF | bash ${RUNNER_TEMP}/gh-aw/actions/start_mcp_gateway.sh + cat << GH_AW_MCP_CONFIG_85f5bf5a0a3b4a96_EOF | bash ${RUNNER_TEMP}/gh-aw/actions/start_mcp_gateway.sh { "mcpServers": { "github": { @@ -579,7 +579,7 @@ jobs: "payloadDir": "${MCP_GATEWAY_PAYLOAD_DIR}" } } - GH_AW_MCP_CONFIG_bfbf4afe334a19cb_EOF + GH_AW_MCP_CONFIG_85f5bf5a0a3b4a96_EOF - name: Download activation artifact uses: actions/download-artifact@3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c # v8.0.1 with: diff --git a/.github/workflows/copilot-token-usage-analyzer.md b/.github/workflows/copilot-token-usage-analyzer.md index d5ba1d024b..33793fd52c 100644 --- a/.github/workflows/copilot-token-usage-analyzer.md +++ b/.github/workflows/copilot-token-usage-analyzer.md @@ -70,7 +70,8 @@ steps: mkdir -p "$ARTIFACT_DIR" echo "📥 Downloading token-usage.jsonl artifacts..." - jq -r '.[].databaseId' /tmp/token-analyzer/copilot-runs.json 2>/dev/null | head -50 | while read -r run_id; do + jq -r '.[0:50][]?.databaseId' /tmp/token-analyzer/copilot-runs.json 2>/dev/null > /tmp/token-analyzer/run-ids.txt || true + while read -r run_id; do run_dir="$ARTIFACT_DIR/$run_id" mkdir -p "$run_dir" gh run download "$run_id" \ @@ -78,7 +79,7 @@ steps: --name "firewall-audit-logs" \ --dir "$run_dir" \ 2>/dev/null || true - done + done < /tmp/token-analyzer/run-ids.txt # Count how many token-usage.jsonl files we got JSONL_COUNT=$(find "$ARTIFACT_DIR" -name "token-usage.jsonl" 2>/dev/null | wc -l) @@ -87,14 +88,15 @@ steps: # Merge all token-usage.jsonl files into a single aggregate file annotated with run_id MERGED_FILE="/tmp/token-analyzer/token-usage-merged.jsonl" > "$MERGED_FILE" - find "$ARTIFACT_DIR" -name "token-usage.jsonl" | while read -r f; do + find "$ARTIFACT_DIR" -name "token-usage.jsonl" > /tmp/token-analyzer/jsonl-files.txt 2>/dev/null || true + while read -r f; do run_id=$(echo "$f" | grep -oP '(?<=/artifacts/)\d+(?=/)' || true) while IFS= read -r line; do if [ -n "$line" ]; then echo "${line}" | jq --arg run_id "$run_id" '. + {run_id: $run_id}' >> "$MERGED_FILE" 2>/dev/null || true fi done < "$f" - done + done < /tmp/token-analyzer/jsonl-files.txt RECORD_COUNT=$(wc -l < "$MERGED_FILE" 2>/dev/null || echo 0) echo "✅ Merged ${RECORD_COUNT} token usage records"