Skip to content

Commit 4f398fd

Browse files
authored
Add gh_report.py: stats and logs for integration tests (#3115)
## Changes - New script: gh_parse.py that parses artifacts output by integration tests and prints a report: #3115 (comment) Can also print output for failed tests (--output) and filter by test name or env name. - New script: gh_report.py that downloads integration tests artifacts for current PR/branch or specified commit and calls gh_parse.py to print a report. ## Why Faster debugging compared to clicking via Github actions UI. ## Tests Manually tested. ### Getting stats: ``` ~/work/cli-main % ./tools/gh_report.py --commit 1a96017 ``` ### Getting output: ``` ~/work/cli-main % ./tools/gh_report.py --commit 1a96017 --filter TestAbortBind --output ```
1 parent 40e78e8 commit 4f398fd

File tree

3 files changed

+471
-0
lines changed

3 files changed

+471
-0
lines changed

.gitignore

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,3 +33,6 @@ test-output.json
3333
# Built by make for 'make fmt' and yamlcheck.py in acceptance tests
3434
tools/yamlfmt
3535
tools/yamlfmt.exe
36+
37+
# Cache for tools/gh_report.py
38+
.gh-logs

tools/gh_parse.py

Lines changed: 288 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,288 @@
1+
#!/usr/bin/env python3
2+
"""
3+
Analyze downloaded GH logs and print a report. Use gh_report.py instead of this script directly.
4+
"""
5+
6+
import sys
7+
import json
8+
import argparse
9+
import re
10+
from collections import Counter
11+
from pathlib import Path
12+
13+
14+
# \u200c is zero-width space. It is added so that len of the string corresponds to real width.
15+
# ❌, ✅, 🔄 each take space of 2 characters.
16+
FLAKY = "🔄\u200cflaky"
17+
FAIL = "❌\u200cFAIL"
18+
PASS = "✅\u200cpass"
19+
SKIP = "🙈\u200cskip"
20+
21+
# This happens when Eventually is used - there is output for the test but no result.
22+
MISSING = "🤯\u200cMISS"
23+
PANIC = "💥\u200cPANIC"
24+
25+
INTERESTING_ACTIONS = (FAIL, FLAKY, PANIC, MISSING)
26+
ACTIONS_WITH_ICON = INTERESTING_ACTIONS + (PASS, SKIP)
27+
28+
ACTION_MESSAGES = {
29+
"fail": FAIL,
30+
"pass": PASS,
31+
"skip": SKIP,
32+
}
33+
34+
35+
def cleanup_env(name):
36+
"""
37+
>>> cleanup_env("test-output-aws-prod-is-linux-ubuntu-latest")
38+
'aws linux'
39+
40+
>>> cleanup_env("test-output-gcp-prod-is-windows-server-latest")
41+
'gcp windows'
42+
43+
>>> cleanup_env("test-output-azure-prod-ucws-is-linux-ubuntu-latest")
44+
'azure-ucws linux'
45+
"""
46+
if not name.startswith("test-output-"):
47+
return ""
48+
name = name.removeprefix("test-output-")
49+
name = name.replace("-prod-ucws-is-", "-ucws-")
50+
name = name.replace("-prod-is-", "-")
51+
name = name.replace("-linux-ubuntu-latest", " linux")
52+
name = name.replace("-windows-server-latest", " windows")
53+
return name
54+
55+
56+
def iter_path(filename):
57+
p = Path(filename)
58+
if p.is_file():
59+
yield filename
60+
return
61+
for dirpath, dirnames, filenames in p.walk():
62+
for f in filenames:
63+
yield dirpath / f
64+
65+
66+
def iter_paths(paths):
67+
for path in paths:
68+
for filename in iter_path(path):
69+
yield filename
70+
71+
72+
def parse_file(path, filter):
73+
results = {}
74+
outputs = {}
75+
for line in path.open():
76+
if not line.strip():
77+
continue
78+
try:
79+
data = json.loads(line)
80+
except Exception as ex:
81+
print(f"{filename}: {ex}\n{line!r}\n")
82+
break
83+
testname = data.get("Test")
84+
if not testname:
85+
continue
86+
if filter and filter not in testname:
87+
continue
88+
action = data.get("Action")
89+
90+
action = ACTION_MESSAGES.get(action, action)
91+
92+
if action in (FAIL, PASS, SKIP):
93+
prev = results.get(testname)
94+
if prev == FAIL and action == PASS:
95+
results[testname] = FLAKY
96+
else:
97+
results[testname] = action
98+
99+
out = data.get("Output")
100+
if out:
101+
outputs.setdefault(testname, []).append(out.rstrip())
102+
103+
for testname, lines in outputs.items():
104+
if testname in results:
105+
continue
106+
if "panic: " in str(lines):
107+
results.setdefault(testname, PANIC)
108+
else:
109+
results.setdefault(testname, MISS)
110+
111+
return results, outputs
112+
113+
114+
def print_report(filenames, filter, filter_env, show_output, markdown=False):
115+
outputs = {} # testname -> env -> [output]
116+
per_test_per_env_stats = {} # testname -> env -> action -> count
117+
all_testnames = set()
118+
all_envs = set()
119+
count_files = 0
120+
count_results = 0
121+
for filename in iter_paths(filenames):
122+
p = Path(filename)
123+
env = cleanup_env(p.parent.name)
124+
if not env:
125+
print(f"Ignoring {filename}: cannot extract env")
126+
continue
127+
if filter_env and filter_env not in env:
128+
continue
129+
all_envs.add(env)
130+
test_results, test_outputs = parse_file(p, filter)
131+
count_files += 1
132+
count_results += len(test_results)
133+
for testname, action in test_results.items():
134+
per_test_per_env_stats.setdefault(testname, {}).setdefault(env, Counter())[action] += 1
135+
for testname, output in test_outputs.items():
136+
outputs.setdefault(testname, {}).setdefault(env, []).extend(output)
137+
all_testnames.update(test_results)
138+
139+
print(f"Parsed {count_files} files: {count_results} results", file=sys.stderr, flush=True)
140+
141+
# Check for missing tests
142+
for testname in all_testnames:
143+
# It is possible for test to be missing if it's parent is skipped, ignore test cases with a parent.
144+
# For acceptance tests, ignore tests with subtests produced via EnvMatrix
145+
if testname.startswith("TestAccept/") and "=" in testname:
146+
continue
147+
# For non-acceptance tests ignore all subtests.
148+
if not testname.startswith("TestAccept/") and "/" in testname:
149+
continue
150+
test_results = per_test_per_env_stats.get(testname, {})
151+
for e in all_envs:
152+
if e not in test_results:
153+
test_results.setdefault(e, Counter())[MISSING] += 1
154+
155+
per_env_stats = {} # env -> action -> count
156+
for testname, items in per_test_per_env_stats.items():
157+
for env, stats in items.items():
158+
per_env_stats.setdefault(env, Counter()).update(stats)
159+
160+
table = []
161+
for env, stats in sorted(per_env_stats.items()):
162+
status = "??"
163+
for action in ACTIONS_WITH_ICON:
164+
if action in stats:
165+
status = action[:2]
166+
break
167+
168+
table.append(
169+
{
170+
" ": status,
171+
"Env": env,
172+
**stats,
173+
}
174+
)
175+
print_table(table, markdown=markdown)
176+
177+
interesting_envs = set()
178+
for env, stats in per_env_stats.items():
179+
for act in INTERESTING_ACTIONS:
180+
if act in stats:
181+
interesting_envs.add(env)
182+
break
183+
184+
simplified_results = {} # testname -> env -> action
185+
for testname, items in sorted(per_test_per_env_stats.items()):
186+
per_testname_result = simplified_results.setdefault(testname, {})
187+
# first select tests with interesting actions (anything but pass or skip)
188+
for env, counts in items.items():
189+
for action in INTERESTING_ACTIONS:
190+
if action in counts:
191+
per_testname_result.setdefault(env, action)
192+
break
193+
194+
# Once we know test is interesting, complete the row
195+
if per_testname_result:
196+
for env, counts in items.items():
197+
if env not in interesting_envs:
198+
continue
199+
for action in (PASS, SKIP):
200+
if action in counts:
201+
per_testname_result.setdefault(env, action)
202+
break
203+
204+
if not per_testname_result:
205+
per_testname_result = simplified_results.pop(testname)
206+
207+
table = []
208+
for testname, items in simplified_results.items():
209+
table.append(
210+
{
211+
"Test Name": testname,
212+
**items,
213+
}
214+
)
215+
print_table(table, markdown=markdown)
216+
217+
if show_output:
218+
for testname, stats in simplified_results.items():
219+
for env, action in stats.items():
220+
if action not in INTERESTING_ACTIONS:
221+
continue
222+
out = "\n".join(outputs.get(testname, {}).get(env, []))
223+
if markdown:
224+
print(f"### {env} {testname} {action}\n```\n{out}\n```")
225+
else:
226+
print(f"### {env} {testname} {action}\n{out}")
227+
if out:
228+
print()
229+
230+
231+
def print_table(table, columns=None, markdown=False):
232+
"""
233+
Pretty-print a list-of-dicts as an aligned text table.
234+
235+
Args:
236+
table (list[dict]): the data rows
237+
columns (list[str]): header names & column order
238+
markdown (bool): whether to output in markdown format
239+
"""
240+
if not table:
241+
return
242+
243+
if columns is None:
244+
columns = []
245+
seen = set()
246+
for row in table:
247+
for key in row:
248+
if key in seen:
249+
continue
250+
seen.add(key)
251+
columns.append(key)
252+
columns.sort()
253+
254+
widths = [len(col) for col in columns]
255+
for row in table:
256+
for i, col in enumerate(columns):
257+
widths[i] = max(widths[i], len(str(row.get(col, ""))))
258+
259+
if markdown:
260+
# Header
261+
print("| " + " | ".join(str(col).ljust(w) for col, w in zip(columns, widths)) + " |")
262+
# Separator
263+
print("| " + " | ".join("-" * w for w in widths) + " |")
264+
# Data rows
265+
for row in table:
266+
print("| " + " | ".join(str(row.get(col, "")).ljust(w) for col, w in zip(columns, widths)) + " |")
267+
else:
268+
fmt = lambda cells: " ".join(str(cell).ljust(w) for cell, w in zip(cells, widths))
269+
print(fmt(columns))
270+
for ind, row in enumerate(table):
271+
print(fmt([row.get(col, "") for col in columns]))
272+
273+
print()
274+
275+
276+
def main():
277+
parser = argparse.ArgumentParser()
278+
parser.add_argument("filenames", nargs="+", help="Filenames or directories to parse")
279+
parser.add_argument("--filter", help="Filter results by test name (substring match)")
280+
parser.add_argument("--filter-env", help="Filter results by env name (substring match)")
281+
parser.add_argument("--output", help="Show output for failed tests", action="store_true")
282+
parser.add_argument("--markdown", help="Output in GitHub-flavored markdown format", action="store_true")
283+
args = parser.parse_args()
284+
print_report(args.filenames, filter=args.filter, filter_env=args.filter_env, show_output=args.output, markdown=args.markdown)
285+
286+
287+
if __name__ == "__main__":
288+
main()

0 commit comments

Comments
 (0)