diff --git a/.gitignore b/.gitignore index 43a5546..2829503 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,6 @@ *.pyc config.yaml kb.json +*.swp +.cache/ +.pytest_cache/ diff --git a/README.md b/README.md index 8d16aca..ebdadea 100644 --- a/README.md +++ b/README.md @@ -1,100 +1,40 @@ -A set of scripts for easier parsing and batch processing of the Jenkins test reports. - -Usage Examples: - -Parse all tiers for the last completed builds of the job -``` -In [1]: import claims - -In [2]: claims.config -Out[2]: -{'bld': 'lastCompletedBuild', - 'job': 'automation-6.2-tier{0}-rhel{1}', - 'pwd': 'nbusr123', - 'url': 'https://jenkins.server.com', - 'usr': 'uradnik1'} - -In [5]: reports -Out[5]: -['t1': - [ - 'el6': [ - { - u'className': u'tests.foreman.cli.test_syncplan.SyncPlanTestCase', - u'errorDetails': None, - u'errorStackTrace': None, - u'name': u'test_negative_synchronize_custom_product_past_sync_date', - u'status': u'PASSED', - u'testActions': [], - 'url': u'https://jenkins.server.com/job/automation-6.2-tier4-rhel7/lastCompletedBuild/testReport/junit/tests.foreman.cli.test_syncplan/SyncPlanTestCase/test_negative_synchronize_custom_product_past_sync_date' - } - ], - 'el7': [ - {...} - ] - ], -'t2': [...], -... -] -``` - -Get a flat list of all failed tests: -``` -In [6]: failures = [] - -In [7]: for i in reports.keys(): - ...: for j in reports[i].keys(): - ...: failures += claims.parse_fails(reports[i][j]) - ...: - -In [8]: len(failures) -Out[8]: 324 -In [9]: failures -Out[9]: -[ - {u'className': u'tests.foreman.cli.test_syncplan.SyncPlanTestCase', - u'errorDetails': u'AssertionError: Repository contains invalid number of content entities', - u'errorStackTrace': u'self = to_time: + # break + return out diff --git a/claims/case.py b/claims/case.py new file mode 100644 index 0000000..214f562 --- /dev/null +++ b/claims/case.py @@ -0,0 +1,148 @@ +import collections +import re +import logging +import datetime +import requests + +from .config import config + + +class Case(collections.UserDict): + """ + Result of one test case + """ + + LOG_DATE_REGEXP = re.compile('^([0-9]{4}-[01][0-9]-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}) -') + LOG_DATE_FORMAT = '%Y-%m-%d %H:%M:%S' + + def __init__(self, data): + self.data = data + + def __contains__(self, name): + return name in self.data or name in ('testName', 'testId', 'start', 'end', 'production.log') + + def __getitem__(self, name): + if name == 'testName': + self['testName'] = "%s.%s" % (self['className'], self['name']) + elif name == 'testId': + self['testId'] = "%s (tier%s, el%s)" % (self['testName'], self['tier'], self['distro']) + if name in ('start', 'end') and \ + ('start' not in self.data or 'end' not in self.data): + self.load_timings() + if name == 'production.log': + self['production.log'] = "\n".join( + ["\n".join(i['data']) for i in + self.data['OBJECT:production.log'].from_to( + self['start'], self['end'])]) + return self.data[name] + + def matches_to_rule(self, rule, indentation=0): + """ + Returns True if result matches to rule, otherwise returns False + """ + logging.debug("%srule_matches(%s, %s, %s)" % (" "*indentation, self['name'], rule, indentation)) + if 'field' in rule and 'pattern' in rule: + # This is simple rule, we can just check regexp against given field and we are done + try: + data = self[rule['field']] + if data is None: + data = '' + out = re.search(rule['pattern'], data) is not None + logging.debug("%s=> %s" % (" "*indentation, out)) + return out + except KeyError: + logging.debug("%s=> Failed to get field %s from case" % (" "*indentation, rule['field'])) + return None + elif 'AND' in rule: + # We need to check if all sub-rules in list of rules rule['AND'] matches + out = None + for r in rule['AND']: + r_out = self.matches_to_rule(r, indentation+4) + out = r_out if out is None else out and r_out + if not out: + break + return out + elif 'OR' in rule: + # We need to check if at least one sub-rule in list of rules rule['OR'] matches + for r in rule['OR']: + if self.matches_to_rule(r, indentation+4): + return True + return False + else: + raise Exception('Rule %s not formatted correctly' % rule) + + def push_claim(self, reason, sticky=False, propagate=False): + '''Claims a given test with a given reason + + :param reason: string with a comment added to a claim (ideally this is a link to a bug or issue) + + :param sticky: whether to make the claim sticky (False by default) + + :param propagate: should jenkins auto-claim next time if same test fails again? (False by default) + ''' + logging.info('claiming {0}::{1} with reason: {2}'.format(self["className"], self["name"], reason)) + + if config['headers'] is None: + config.init_headers() + + claim_req = requests.post( + u'{0}/claim/claim'.format(self['url']), + auth=requests.auth.HTTPBasicAuth( + config['usr'], + config['pwd'] + ), + data={u'json': u'{{"assignee": "", "reason": "{0}", "sticky": {1}, "propagateToFollowingBuilds": {2}}}'.format(reason, sticky, propagate)}, + headers=config['headers'], + allow_redirects=False, + verify=False + ) + + if claim_req.status_code != 302: + raise requests.HTTPError( + 'Failed to claim: {0}'.format(claim_req)) + + self['testActions'][0]['reason'] = reason + return(claim_req) + + def load_timings(self): + if self['stdout'] is None: + return + log = self['stdout'].split("\n") + log_size = len(log) + log_used = 0 + start = None + end = None + counter = 0 + while start is None: + match = self.LOG_DATE_REGEXP.match(log[counter]) + if match: + start = datetime.datetime.strptime(match.group(1), + self.LOG_DATE_FORMAT) + break + counter += 1 + log_used += counter + counter = -1 + while end is None: + match = self.LOG_DATE_REGEXP.match(log[counter]) + if match: + end = datetime.datetime.strptime(match.group(1), + self.LOG_DATE_FORMAT) + break + counter -= 1 + log_used -= counter + assert log_used <= log_size, \ + "Make sure detected start date is not below end date and vice versa" + self['start'] = start + self['end'] = end + + +def claim_by_rules(report, rules, dryrun=False): + claimed = [] + for rule in rules: + for case in [i for i in report if i['status'] in config.FAIL_STATUSES and not i['testActions'][0].get('reason')]: + if case.matches_to_rule(rule): + logging.debug(u"{0}::{1} matching pattern for '{2}' on {3}".format(case['className'], case['name'], rule['reason'], case['url'])) + if not dryrun: + case.push_claim(rule['reason']) + claimed.append((case, rule)) + return claimed diff --git a/claims/cmd.py b/claims/cmd.py new file mode 100755 index 0000000..4a60859 --- /dev/null +++ b/claims/cmd.py @@ -0,0 +1,492 @@ +#!/usr/bin/env python3 +# -*- coding: UTF-8 -*- + +import os.path +import sys +import logging +import argparse +import re +import tabulate +import csv +import collections +import statistics +import shutil + +import claims +from .config import config + +logging.basicConfig(level=logging.INFO) + + +class ClaimsCli(object): + + LATEST = 'latest' + + def __init__(self): + self.job_group = self.LATEST + self.job_group_old = None + self.grep_results = None + self.grep_rules = None + self._results = None + self._rules = None + + @property + def results(self): + if not self._results: + self._results = {} + if self.job_group not in self._results: + self._results[self.job_group] = claims.Report(self.job_group) + if self.grep_results: + self._results[self.job_group] \ + = [r for r in self._results[self.job_group] + if re.search(self.grep_results, "%s.%s" % (r['className'], r['name']))] + return self._results[self.job_group] + + @property + def rules(self): + if not self._rules: + self._rules = claims.Ruleset() + if self.grep_rules: + self._rules = [r for r in self._rules + if re.search(self.grep_rules, r['reason'])] + return self._rules + + def _table(self, data, headers=[], tablefmt=None, floatfmt='%.01f'): + if self.output == 'csv': + writer = csv.writer(sys.stdout) + if headers: + writer.writerow(headers) + for row in data: + writer.writerow(row) + else: + print(tabulate.tabulate( + data, + headers=headers, + floatfmt=floatfmt, + tablefmt=self.output)) + + def clean_cache(self): + d = os.path.join(config.CACHEDIR, self.job_group) + try: + shutil.rmtree(d) + logging.info("Removed %s" % d) + except FileNotFoundError: + pass + + def show_failed(self): + self._table( + [[r['testId']] for r in self.results + if r['status'] in config.FAIL_STATUSES], + headers=['failed test name'], tablefmt=self.output) + + def show_claimed(self): + self._table( + [[r['testId'], r['testActions'][0].get('reason')] for r in self.results + if r['status'] in config.FAIL_STATUSES and r['testActions'][0].get('reason')], + headers=['claimed test name', 'claim reason'], tablefmt=self.output) + + def show_unclaimed(self): + self._table( + [[r['testId']] for r in self.results + if r['status'] in config.FAIL_STATUSES and not r['testActions'][0].get('reason')], + headers=['unclaimed test name'], tablefmt=self.output) + + def show_claimable(self): + claimable = claims.claim_by_rules(self.results, self.rules, dryrun=True) + self._table( + [[i[0]['testId'], i[1]['reason']] for i in claimable], + headers=['claimable test name', 'claimable with reason'], + tablefmt=self.output) + + def show(self, test_class, test_name): + MAXWIDTH = 100 + FIELDS_EXTRA = ['start', 'end', 'production.log'] + FIELDS_SKIP = ['OBJECT:production.log'] + for r in self.results: + if r['className'] == test_class and r['name'] == test_name: + for k in sorted(r.keys()) + FIELDS_EXTRA: + if k in FIELDS_SKIP: + continue + v = r[k] + print("%s:" % k) + if isinstance(v, str): + for row in v.split("\n"): + if k == 'url': + print(" "*len(k), row) + if k == 'production.log' and len(row) == 0: + continue + width = len(row) + printed = MAXWIDTH + print(" "*len(k), row[0:MAXWIDTH]) + while printed < width: + printed_new = printed+MAXWIDTH-4 + print(" "*(len(k)+4), row[printed:printed_new]) + printed += len(row[printed:printed_new]) + break + + def claim(self): + claimed = claims.claim_by_rules(self.results, self.rules, dryrun=False) + self._table( + [[i[0]['testId'], i[1]['reason']] for i in claimed], + headers=['claimed test name', 'claimed with reason'], + tablefmt=self.output) + + def stats(self): + def _perc(perc_from, perc_sum): + """Just a shortcur to safely count percentage""" + try: + return float(perc_from)/perc_sum*100 + except ZeroDivisionError: + return None + + stat_all = len(self.results) + reports_fails = [i for i in self.results + if i['status'] in config.FAIL_STATUSES] + stat_failed = len(reports_fails) + reports_claimed = [i for i in reports_fails + if i['testActions'][0].get('reason')] + stat_claimed = len(reports_claimed) + + stats_all = ['TOTAL', stat_all, stat_failed, _perc(stat_failed, + stat_all), stat_claimed, _perc(stat_claimed, stat_failed)] + + stats = [] + builds = config.get_builds(self.job_group).values() + for t in [i['tier'] for i in builds]: + filtered = [r for r in self.results if r['tier'] == t] + stat_all_tiered = len(filtered) + reports_fails_tiered = [i for i in filtered + if i['status'] in config.FAIL_STATUSES] + stat_failed_tiered = len(reports_fails_tiered) + reports_claimed_tiered = [i for i in reports_fails_tiered + if i['testActions'][0].get('reason')] + stat_claimed_tiered = len(reports_claimed_tiered) + stats.append(["t%s" % t, stat_all_tiered, stat_failed_tiered, + _perc(stat_failed_tiered, stat_all_tiered), + stat_claimed_tiered, _perc(stat_claimed_tiered, + stat_failed_tiered)]) + + print("\nOverall stats") + self._table( + stats + [stats_all], + headers=['tier', 'all reports', 'failures', 'failures [%]', + 'claimed failures', 'claimed failures [%]'], + floatfmt=".01f", + tablefmt=self.output) + + reports_per_method = {} + for report in self.results: + method = report['className'].split('.')[2] + if method not in reports_per_method: + reports_per_method[method] = {'all': 0, 'failed': 0, 'claimed': 0} + reports_per_method[method]['all'] += 1 + if report in reports_fails: + reports_per_method[method]['failed'] += 1 + if report in reports_claimed: + reports_per_method[method]['claimed'] += 1 + + print("\nHow many failures are there per endpoint") + self._table( + sorted([(c, r['all'], r['failed'], _perc(r['failed'], r['all']), + r['claimed'], _perc(r['claimed'], r['failed'])) + for c, r in reports_per_method.items()], + key=lambda x: x[3], reverse=True) + [stats_all], + headers=['method', 'all reports', 'failures', 'failures [%]', + 'claimed failures', 'claimed failures [%]'], + floatfmt=".1f", + tablefmt=self.output) + + rules_reasons = [r['reason'] for r in self.rules] + reports_per_reason = {'UNKNOWN': stat_failed-stat_claimed} + reports_per_reason.update({r: 0 for r in rules_reasons}) + for report in reports_claimed: + reason = report['testActions'][0]['reason'] + if reason not in reports_per_reason: + reports_per_reason[reason] = 0 + reports_per_reason[reason] += 1 + + print("\nHow various reasons for claims are used") + reports_per_reason = sorted(reports_per_reason.items(), + key=lambda x: x[1], reverse=True) + reports_per_reason = [(r, c, r in rules_reasons) for r, c in + reports_per_reason] + self._table( + reports_per_reason, + headers=['claim reason', 'claimed times', 'claiming automated?'], + tablefmt=self.output) + + reports_per_class = {} + for report in self.results: + class_name = report['className'] + if class_name not in reports_per_class: + reports_per_class[class_name] = {'all': 0, 'failed': 0} + reports_per_class[class_name]['all'] += 1 + if report in reports_fails: + reports_per_class[class_name]['failed'] += 1 + + print("\nHow many failures are there per class") + self._table( + sorted([(c, r['all'], r['failed'], _perc(r['failed'], r['all'])) + for c, r in reports_per_class.items()], + key=lambda x: x[3], reverse=True), + headers=['class name', 'number of reports', 'number of failures', + 'failures ratio'], + floatfmt=".1f", + tablefmt=self.output) + + def _sanitize_state(self, state): + if state == 'REGRESSION': + state = 'FAILED' + if state == 'FIXED': + state = 'PASSED' + if state == 'PASSED': + return 0 + if state == 'FAILED': + return 1 + raise KeyError("Do not know how to handle state %s" % state) + + def history(self): + + matrix = collections.OrderedDict() + + # Load tests results + job_groups = config['job_groups'].keys() + for job_group in job_groups: + logging.info('Loading job group %s' % job_group) + self.job_group = job_group + report = self.results + for r in report: + t = r['testId'] + if t not in matrix: + matrix[t] = dict.fromkeys(job_group) + try: + state = self._sanitize_state(r['status']) + except KeyError: + continue # e.g. state "SKIPPED" + matrix[t][job_group] = state + + # Count statistical measure of the results + for k, v in matrix.items(): + try: + stdev = statistics.pstdev([i for i in v.values() if i is not None]) + except statistics.StatisticsError: + stdev = None + v['stdev'] = stdev + + print("Legend:\n" + " 0 ... PASSED or FIXED\n" + " 1 ... FAILED or REGRESSION\n" + " Population standard deviation, 0 is best (stable)," + " 0.5 is worst (unstable)") + headers = ['test'] + list(job_groups) + ['pstdev (all)'] + matrix_flat = [] + for k, v in matrix.items(): + v_list = [] + for job_group in job_groups: + if job_group in v: + v_list.append(v[job_group]) + else: + v_list.append(None) + matrix_flat.append([k]+v_list+[v['stdev']]) + self._table( + matrix_flat, + headers=headers, + floatfmt=".3f" + ) + + def diff(self): + assert self.job_group_old, 'When using --diff, also specify --job-group-old' + + matrix = collections.OrderedDict() + + # Load tests results + state_good = 'GOOD' + state_bad = 'BAD' + states = { + 0: state_good, + 1: state_bad, + } + job_groups = (self.job_group_old, self.job_group) + for job_group in job_groups: + logging.info('Loading job group %s' % job_group) + self.job_group = job_group + for r in self.results: + t = r['testId'] + if t not in matrix: + matrix[t] = dict.fromkeys(job_groups) + try: + state = states[self._sanitize_state(r['status'])] + except KeyError: + state = r['status'] + matrix[t][job_group] = state + + good = collections.OrderedDict() + bad = collections.OrderedDict() + stable = 0 + + # Find tests that got better and tests that got worse + for test, jgs in matrix.items(): + if jgs[self.job_group_old] != jgs[self.job_group]: + if jgs[self.job_group_old] == state_good: + bad[test] = (jgs[self.job_group_old], jgs[self.job_group]) + if jgs[self.job_group] == state_good: + good[test] = (jgs[self.job_group_old], jgs[self.job_group]) + else: + stable += 1 + + # Print diff findings + print("\nBad tests (%s)" % len(bad)) + self._table( + [[k, "%s -> %s" % v] for k,v in bad.items()], + headers=['test', 'state change'], + tablefmt=self.output) + print("\nGood tests (%s)" % len(good)) + self._table( + [[k, "%s -> %s" % v] for k,v in good.items()], + headers=['test', 'state change'], + tablefmt=self.output) + print("\nRest of the tests stayed same (%s)" % stable) + + + def timegraph(self): + for n, b in config.get_builds(self.job_group).items(): + f = "/tmp/timegraph-%s-build%s.svg" % (n, b['build']) + claims.timegraph.draw(self.results, f, b['tier']) + logging.info("Generated %s" % f) + + def handle_args(self): + parser = argparse.ArgumentParser( + description='Manipulate Jenkins claims with grace') + + # Actions + parser.add_argument('--clean-cache', action='store_true', + help='Cleans cache for job group provided by' + ' "--job-group" option (default: latest)') + parser.add_argument('--show-failed', action='store_true', + help='Show all failed tests') + parser.add_argument('--show-claimed', action='store_true', + help='Show claimed tests') + parser.add_argument('--show-unclaimed', action='store_true', + help='Show failed and not yet claimed tests') + parser.add_argument('--show-claimable', action='store_true', + help='Show failed, not yet claimed but' + ' claimable tests') + parser.add_argument('--show', action='store', + help='Show detailed info about given test case') + parser.add_argument('--claim', action='store_true', + help='Claim claimable tests') + parser.add_argument('--stats', action='store_true', + help='Show stats for selected job group') + parser.add_argument('--history', action='store_true', + help='Show how tests results and duration evolved') + parser.add_argument('--diff', action='store_true', + help='Show which test result changed between two' + ' job groups. You will need --job-group' + ' and --job-group-old options set') + parser.add_argument('--timegraph', action='store_true', + help='Generate time graph') + + # Modifiers + parser.add_argument('--job-group', action='store', + help='Specify group of jobs to perform the action' + ' with (default: latest)') + parser.add_argument('--job-group-old', action='store', + help='Only used with --diff') + parser.add_argument('--grep-results', action='store', metavar='REGEXP', + help='Only work with tests, whose' + ' "className+name" matches the regexp') + parser.add_argument('--grep-rules', action='store', metavar='REGEXP', + help='Only work with rules, whose reason matches' + ' the regexp') + parser.add_argument('--output', action='store', default='simple', + choices=['simple', 'csv', 'html'], + help='Format tables as plain, csv or html' + ' (default: simple)') + parser.add_argument('-d', '--debug', action='store_true', + help='Show also debug messages') + + args = parser.parse_args() + + # Handle "--debug" + if args.debug: + logging.getLogger().setLevel(logging.DEBUG) + logging.debug("Debug mode enabled") + + # Handle "--job-group something" + if args.job_group: + self.job_group = args.job_group + logging.debug("Job group we are going to work with is %s" + % self.job_group) + if args.job_group_old: + self.job_group_old = args.job_group_old + logging.debug("Old job group we are going to work with is %s" + % self.job_group_old) + + # Handle "--grep-results something" + if args.grep_results: + self.grep_results = args.grep_results + logging.debug("Going to consider only results matching %s" + % self.grep_results) + + # Handle "--grep-rules something" + if args.grep_rules: + self.grep_rules = args.grep_rules + logging.debug("Going to consider only rules matching %s" + % self.grep_rules) + + # Handle "--output something" + self.output = args.output + logging.debug("Using output type %s" % self.output) + + # Actions + + # Clean cache + if args.clean_cache: + self.clean_cache() + + # Show failed + if args.show_failed: + self.show_failed() + + # Show claimed + elif args.show_claimed: + self.show_claimed() + + # Show unclaimed + elif args.show_unclaimed: + self.show_unclaimed() + + # Show claimable + elif args.show_claimable: + self.show_claimable() + + # Show test details + elif args.show: + # To be sure we will not be missing the test because of filtering, + # erase grep_result filter first + self.grep_results = None + class_name = '.'.join(args.show.split('.')[:-1]) + name = args.show.split('.')[-1] + self.show(class_name, name) + + # Do a claim work + elif args.claim: + self.claim() + + # Show statistics + elif args.stats: + self.stats() + + # Show tests history + elif args.history: + self.history() + + # Show tests diff across two job groups + elif args.diff: + self.diff() + + # Generate time graphs per tier + elif args.timegraph: + self.timegraph() + + return 0 diff --git a/claims/config.py b/claims/config.py new file mode 100644 index 0000000..61fadfd --- /dev/null +++ b/claims/config.py @@ -0,0 +1,44 @@ +import collections +import yaml +import json +import logging + +from .utils import request_get + + +class Config(collections.UserDict): + + FAIL_STATUSES = ("FAILED", "ERROR", "REGRESSION") + LATEST = 'latest' # how do we call latest job group in the config? + CACHEDIR = '.cache/' # where is the cache stored + + def __init__(self): + with open("config.yaml", "r") as file: + self.data = yaml.load(file) + + # Additional params when talking to Jenkins + self['headers'] = None + self['pull_params'] = { + u'tree': u'suites[cases[className,duration,name,status,stdout,errorDetails,errorStackTrace,testActions[reason]]]{0}' + } + + def get_builds(self, job_group=''): + if job_group == '': + job_group = self.LATEST + out = collections.OrderedDict() + for job in self.data['job_groups'][job_group]['jobs']: + key = self.data['job_groups'][job_group]['template'].format(**job) + out[key] = job + return out + + def init_headers(self): + url = '{0}/crumbIssuer/api/json'.format(self['url']) + crumb_data = request_get(url, self['usr'], self['pwd'], + params=None, expected_codes=[200], cached=False) + crumb = json.loads(crumb_data) + self['headers'] = {crumb['crumbRequestField']: crumb['crumb']} + + +logging.basicConfig(level=logging.INFO) + +config = Config() diff --git a/claims/report.py b/claims/report.py new file mode 100644 index 0000000..e3e9f19 --- /dev/null +++ b/claims/report.py @@ -0,0 +1,66 @@ +import os +import collections +import pickle +import json + +from .config import config +from .build_logs import ProductionLog +from .utils import request_get +from .case import Case + + +class Report(collections.UserList): + """ + Report is a list of Cases (i.e. test results) + """ + + def __init__(self, job_group=''): + # If job group is not specified, we want latest one + if job_group == '': + job_group = config.LATEST + self.job_group = job_group + self._cache = os.path.join(config.CACHEDIR, self.job_group, 'main.pickle') + + # Attempt to load data from cache + if os.path.isfile(self._cache): + self.data = pickle.load(open(self._cache, 'rb')) + return + + # Load the actual data + self.data = [] + for name, meta in config.get_builds(self.job_group).items(): + build = meta['build'] + rhel = meta['rhel'] + tier = meta['tier'] + production_log = ProductionLog(self.job_group, name, build) + for report in self.pull_reports(name, build): + report['tier'] = tier + report['distro'] = rhel + report['OBJECT:production.log'] = production_log + self.data.append(Case(report)) + + # Dump parsed data into cache + pickle.dump(self.data, open(self._cache, 'wb')) + + def pull_reports(self, job, build): + """ + Fetches the test report for a given job and build + """ + build_url = '{0}/job/{1}/{2}'.format( + config['url'], job, build) + build_data = request_get( + build_url+'/testReport/api/json', + user=config['usr'], + password=config['pwd'], + params=config['pull_params'], + expected_codes=[200, 404], + cached=os.path.join(config.CACHEDIR, self.job_group, job, 'main.json')) + cases = json.loads(build_data)['suites'][0]['cases'] + + # Enrich individual reports with URL + for c in cases: + className = c['className'].split('.')[-1] + testPath = '.'.join(c['className'].split('.')[:-1]) + c['url'] = u'{0}/testReport/junit/{1}/{2}/{3}'.format(build_url, testPath, className, c['name']) + + return(cases) diff --git a/claims/ruleset.py b/claims/ruleset.py new file mode 100644 index 0000000..974c524 --- /dev/null +++ b/claims/ruleset.py @@ -0,0 +1,9 @@ +import collections +import json + + +class Ruleset(collections.UserList): + + def __init__(self): + with open('kb.json', 'r') as fp: + self.data = json.loads(fp.read()) diff --git a/claims/timegraph.py b/claims/timegraph.py new file mode 100644 index 0000000..275dc71 --- /dev/null +++ b/claims/timegraph.py @@ -0,0 +1,123 @@ +#!/usr/bin/env python3 +# -*- coding: UTF-8 -*- + +import logging +import datetime +import svgwrite + + +STATUS_COLOR = { +'FAILED': 'red', +'FIXED': 'blue', +'PASSED': 'green', +'REGRESSION': 'purple', +'SKIPPED': 'fuchsia', +} +LANE_HEIGHT = 10 +LANES_START = LANE_HEIGHT # we will place a timeline into the first lane +HOUR = 3600 +X_CONTRACTION = 0.1 + + +def overlaps(a, b): + """ + Return true if two intervals overlap: + overlaps((1, 3), (2, 10)) => True + overlaps((1, 3), (5, 10)) => False + """ + if b[0] <= a[0] <= b[1] or b[0] <= a[1] <= b[1]: + return True + else: + return False + + +def scale(a): + return (a[0] * X_CONTRACTION, a[1]) + + +def draw(reports, filename, tier): + reports = [i for i in reports if i['tier'] == tier] + + # Load all the reports and sort them in lanes + ###counter = 0 + lanes = [] + start = None + end = None + for r in reports: + # Get start and end time. If unknown, skip the result + try: + r_start = r['start'].timestamp() + r_end = r['end'].timestamp() + except KeyError: + logging.info("No start time for %s::%s" % (r['className'], r['name'])) + continue + # Find overal widtht of time line + if start is None or r_start < start: + logging.debug("Test %s started before current minimum of %s" % (r['name'], start)) + start = r_start + if end is None or r_end > end: + end = r_end + r['interval'] = (r_start, r_end) + # Check if there is a free lane for us, if not, create a new one + lane_found = False + for lane in lanes: + lane_found = True + for interval in lane: + if overlaps(r['interval'], interval['interval']): + lane_found = False + break + if lane_found: + break + if not lane_found: + logging.debug("Adding lane %s" % (len(lanes)+1)) + lane = [] + lanes.append(lane) + lane.append(r) + ###counter += 1 + ###if counter > 10: break + + # Create a drawing with timeline + dwg = svgwrite.Drawing(filename, + size=scale((end-start, LANE_HEIGHT*(len(lanes)+1)))) + dwg.add(dwg.line( + scale((0, LANE_HEIGHT)), + scale((end-start, LANE_HEIGHT)), + style="stroke: black; stroke-width: 1;" + )) + start_full_hour = int(start / HOUR) * HOUR + timeline = start_full_hour - start + while start + timeline <= end: + if timeline >= 0: + dwg.add(dwg.line( + scale((timeline, LANE_HEIGHT)), + scale((timeline, 2*LANE_HEIGHT/3)), + style="stroke: black; stroke-width: 1;" + )) + dwg.add(dwg.text( + datetime.datetime.fromtimestamp(start+timeline) \ + .strftime('%Y-%m-%d %H:%M:%S'), + insert=scale((timeline, 2*LANE_HEIGHT/3)), + style="fill: black; font-size: 3pt;" + )) + timeline += HOUR/4 + + # Draw tests + for lane_no in range(len(lanes)): + for r in lanes[lane_no]: + logging.debug("In lane %s adding %s::%s %s" \ + % (lane_no, r['className'], r['name'], r['interval'])) + s, e = r['interval'] + dwg.add(dwg.rect( + insert=scale((s - start, LANES_START + LANE_HEIGHT*lane_no + LANE_HEIGHT/2)), + size=scale((e - s, LANE_HEIGHT/2)), + style="fill: %s; stroke: %s; stroke-width: 0;" \ + % (STATUS_COLOR[r['status']], STATUS_COLOR[r['status']]) + )) + dwg.add(dwg.text( + "%s::%s" % (r['className'], r['name']), + insert=scale((s - start, LANES_START + LANE_HEIGHT*lane_no + LANE_HEIGHT/2)), + transform="rotate(-30, %s, %s)" \ + % scale((s - start, LANES_START + LANE_HEIGHT*lane_no + LANE_HEIGHT/2)), + style="fill: gray; font-size: 2pt;" + )) + dwg.save() diff --git a/claims/utils.py b/claims/utils.py new file mode 100755 index 0000000..a01fd6a --- /dev/null +++ b/claims/utils.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python3 + +from __future__ import division +import os +import logging +import urllib3 +import requests + + +def request_get(url, user, password, params=None, expected_codes=[200], cached=True, stream=False): + # If available, read it from cache + if cached and not stream and os.path.isfile(cached): + with open(cached, 'r') as fp: + return fp.read() + + # Get the response from the server + urllib3.disable_warnings() + response = requests.get( + url, + auth=requests.auth.HTTPBasicAuth( + user, password), + params=params, + verify=False + ) + + # Check we got expected exit code + if response.status_code not in expected_codes: + raise requests.HTTPError("Failed to get %s with %s" % (url, response.status_code)) + + # If we were streaming file + if stream: + with open(cached, 'w+b') as fp: + for chunk in response.iter_content(chunk_size=1024): + if chunk: # filter out keep-alive new chunks + fp.write(chunk) + fp.close() + return + + # In some cases 404 just means "we have nothing" + if response.status_code == 404: + return '' + + # If cache was configured, dump data in there + if cached: + os.makedirs(os.path.dirname(cached), exist_ok=True) + with open(cached, 'w') as fp: + fp.write(response.text) + + return response.text diff --git a/config.yaml.sample b/config.yaml.sample index c9b400a..6de2875 100644 --- a/config.yaml.sample +++ b/config.yaml.sample @@ -1,5 +1,19 @@ usr: jenkins_username pwd: jenkins_password url: https://jenkins.url -job: automation-6.2-tier{0}-rhel{1} -bld: lastCompletedBuild +job_groups: + latest: + template: automation-6.4-tier{tier}-rhel{rhel} + jobs: + - build: lastCompletedBuild + rhel: 7 + tier: 1 + - build: lastCompletedBuild + rhel: 7 + tier: 2 + - build: lastCompletedBuild + rhel: 7 + tier: 3 + - build: lastCompletedBuild + rhel: 7 + tier: 4 diff --git a/kb.json.sample b/kb.json.sample index c1e115c..526ea1a 100644 --- a/kb.json.sample +++ b/kb.json.sample @@ -1,5 +1,6 @@ [ { + "field": "errorDetails" "pattern": "^[rR]egex\s[eE]xpre[s]{2}ion", "reason": "my poor excuse for a failed test" } diff --git a/requirements.txt b/requirements.txt index 6c9fdba..a652fac 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,4 @@ PyYAML requests +tabulate +svgwrite diff --git a/test/conftest.py b/test/conftest.py new file mode 100644 index 0000000..9dec13d --- /dev/null +++ b/test/conftest.py @@ -0,0 +1,6 @@ +#!/usr/bin/env python3 +# -*- coding: UTF-8 -*- + +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) diff --git a/test/test_claims.py b/test/test_claims.py new file mode 100755 index 0000000..05d2d8c --- /dev/null +++ b/test/test_claims.py @@ -0,0 +1,47 @@ +#!/usr/bin/env python +# -*- coding: UTF-8 -*- + +import claims + + +def test_rule_matches(): + checkme = { + 'name': 'test', + 'greeting': 'Hello world', + 'area': 'IT Crowd', + } + result = claims.Case(checkme) + + assert result.matches_to_rule({'field': 'greeting', 'pattern': 'Hel+o'}) == True + assert result.matches_to_rule({'field': 'greeting', 'pattern': 'This is not there'}) == False + assert result.matches_to_rule({'AND': [{'field': 'greeting', 'pattern': 'Hel+o'}]}) == True + assert result.matches_to_rule({'AND': [{'field': 'greeting', 'pattern': 'Hel+o'}, {'field': 'greeting', 'pattern': 'world'}]}) == True + assert result.matches_to_rule({'AND': [{'field': 'greeting', 'pattern': 'Hel+o'}, {'field': 'greeting', 'pattern': 'world'}, {'field': 'area', 'pattern': 'IT'}]}) == True + assert result.matches_to_rule({'AND': [{'field': 'greeting', 'pattern': 'This is not there'}]}) == False + assert result.matches_to_rule({'AND': [{'field': 'greeting', 'pattern': 'Hel+o'}, {'field': 'greeting', 'pattern': 'This is not there'}]}) == False + assert result.matches_to_rule({'AND': [{'field': 'greeting', 'pattern': 'Hel+o'}, {'field': 'greeting', 'pattern': 'world'}, {'field': 'area', 'pattern': 'This is not there'}]}) == False + assert result.matches_to_rule({'AND': [{'AND': [{'field': 'greeting', 'pattern': 'Hel+o'}]}]}) == True + assert result.matches_to_rule({'AND': [{'AND': [{'field': 'greeting', 'pattern': 'Hel+o'}, {'field': 'greeting', 'pattern': 'world'}]}]}) == True + assert result.matches_to_rule({'AND': [{'AND': [{'field': 'greeting', 'pattern': 'Hel+o'}, {'field': 'greeting', 'pattern': 'world'}, {'field': 'area', 'pattern': 'IT'}]}]}) == True + assert result.matches_to_rule({'AND': [{'AND': [{'field': 'greeting', 'pattern': 'This is not there'}]}]}) == False + assert result.matches_to_rule({'AND': [{'AND': [{'field': 'greeting', 'pattern': 'This is not there'}, {'field': 'greeting', 'pattern': 'world'}]}]}) == False + assert result.matches_to_rule({'AND': [{'AND': [{'field': 'greeting', 'pattern': 'This is not there'}, {'field': 'greeting', 'pattern': 'world'}, {'field': 'area', 'pattern': 'IT'}]}]}) == False + assert result.matches_to_rule({'OR': [{'field': 'greeting', 'pattern': 'Hel+o'}]}) == True + assert result.matches_to_rule({'OR': [{'field': 'greeting', 'pattern': 'Hel+o'}, {'field': 'greeting', 'pattern': 'world'}]}) == True + assert result.matches_to_rule({'OR': [{'field': 'greeting', 'pattern': 'This is not there'}, {'field': 'greeting', 'pattern': 'world'}]}) == True + assert result.matches_to_rule({'OR': [{'field': 'greeting', 'pattern': 'Hel+o'}, {'field': 'greeting', 'pattern': 'This is not there'}]}) == True + assert result.matches_to_rule({'OR': [{'field': 'greeting', 'pattern': 'This is not there'}, {'field': 'greeting', 'pattern': 'This is not there'}]}) == False + assert result.matches_to_rule({'OR': [{'field': 'greeting', 'pattern': 'Hel+o'}, {'field': 'greeting', 'pattern': 'world'}, {'field': 'area', 'pattern': 'IT'}]}) == True + assert result.matches_to_rule({'OR': [{'field': 'greeting', 'pattern': 'This is not there'}, {'field': 'greeting', 'pattern': 'world'}, {'field': 'area', 'pattern': 'IT'}]}) == True + assert result.matches_to_rule({'OR': [{'field': 'greeting', 'pattern': 'Hel+o'}, {'field': 'greeting', 'pattern': 'This is not there'}, {'field': 'area', 'pattern': 'IT'}]}) == True + assert result.matches_to_rule({'OR': [{'field': 'greeting', 'pattern': 'This is not there'}, {'field': 'greeting', 'pattern': 'world'}, {'field': 'area', 'pattern': 'This is not there'}]}) == True + assert result.matches_to_rule({'OR': [{'field': 'greeting', 'pattern': 'Hel+o'}, {'field': 'greeting', 'pattern': 'This is not there'}, {'field': 'area', 'pattern': 'This is not there'}]}) == True + assert result.matches_to_rule({'OR': [{'field': 'greeting', 'pattern': 'This is not there'}, {'field': 'greeting', 'pattern': 'This is not there'}, {'field': 'area', 'pattern': 'IT'}]}) == True + assert result.matches_to_rule({'OR': [{'field': 'greeting', 'pattern': 'This is not there'}, {'field': 'greeting', 'pattern': 'This is not there'}, {'field': 'area', 'pattern': 'This is not there'}]}) == False + assert result.matches_to_rule({'OR': [{'AND': [{'field': 'greeting', 'pattern': 'Hel+o'}, {'field': 'greeting', 'pattern': 'world'}]}, {'AND': [{'field': 'area', 'pattern': 'IT'}]}]}) == True + assert result.matches_to_rule({'OR': [{'AND': [{'field': 'greeting', 'pattern': 'This is not there'}, {'field': 'greeting', 'pattern': 'world'}]}, {'AND': [{'field': 'area', 'pattern': 'IT'}]}]}) == True + assert result.matches_to_rule({'OR': [{'AND': [{'field': 'greeting', 'pattern': 'This is not there'}, {'field': 'greeting', 'pattern': 'world'}]}, {'AND': [{'field': 'area', 'pattern': 'This is not there'}]}]}) == False + assert result.matches_to_rule({'OR': [{'AND': [{'field': 'greeting', 'pattern': 'This is not there'}, {'field': 'greeting', 'pattern': 'world'}]}, {'field': 'area', 'pattern': 'This is not there'}]}) == False + assert result.matches_to_rule({'AND': [{'OR': [{'field': 'greeting', 'pattern': 'Hel*o'}, {'field': 'greeting', 'pattern': 'world'}]}, {'field': 'area', 'pattern': 'This is not there'}]}) == False + assert result.matches_to_rule({'AND': [{'OR': [{'field': 'greeting', 'pattern': 'Hel*o'}, {'field': 'greeting', 'pattern': 'world'}]}, {'field': 'area', 'pattern': 'IT'}]}) == True + assert result.matches_to_rule({'AND': [{'OR': [{'field': 'greeting', 'pattern': 'This is not there'}, {'field': 'greeting', 'pattern': 'world'}]}, {'field': 'area', 'pattern': 'IT'}]}) == True diff --git a/test/test_claimscmd.py b/test/test_claimscmd.py new file mode 100644 index 0000000..11f765b --- /dev/null +++ b/test/test_claimscmd.py @@ -0,0 +1,23 @@ +#!/usr/bin/env python3 +# -*- coding: UTF-8 -*- + +import sys +import pytest + +import io +from contextlib import redirect_stdout + +import claims + + +class TestClaimsCli(object): + + def test_help(self): + sys.argv = ['./something.py', '--help'] + f = io.StringIO() + with pytest.raises(SystemExit) as e: + with redirect_stdout(f): + claims.ClaimsCli().handle_args() + assert e.value.code == 0 + assert 'Manipulate Jenkins claims with grace' in f.getvalue() + assert 'optional arguments:' in f.getvalue() diff --git a/test/test_requests.py b/test/test_requests.py new file mode 100644 index 0000000..d30bc56 --- /dev/null +++ b/test/test_requests.py @@ -0,0 +1,24 @@ +#!/usr/bin/env python +# -*- coding: UTF-8 -*- + +import requests +import tempfile +import pytest + +import claims + + +class TestClaimsRequestWrapper(): + + def test_get_sanity(self): + a = claims.utils.request_get('http://inecas.fedorapeople.org/fakerepos/zoo3/repodata/repomd.xml', cached=False) + b = claims.utils.request_get('http://inecas.fedorapeople.org/fakerepos/zoo3/repodata/repomd.xml', params=None, expected_codes=[200], cached=False) + assert a == b + with pytest.raises(requests.HTTPError) as e: + claims.utils.request_get('http://inecas.fedorapeople.org/fakerepos/zoo3/repodata/repomd.xml', params=None, expected_codes=[404], cached=False) + + def test_get_caching(self): + fp, fname = tempfile.mkstemp() + a = claims.utils.request_get('http://inecas.fedorapeople.org/fakerepos/zoo3/repodata/repomd.xml', cached=fname) + b = claims.utils.request_get('http://inecas.fedorapeople.org/fakerepos/zoo3/repodata/repomd.xml', cached=fname) + assert a == b diff --git a/test/test_timegraph.py b/test/test_timegraph.py new file mode 100644 index 0000000..997b181 --- /dev/null +++ b/test/test_timegraph.py @@ -0,0 +1,14 @@ + +#!/usr/bin/env python +# -*- coding: UTF-8 -*- + +import pytest + +import claims.timegraph + + +class TestTimegraph(): + + def test_overlaps(self): + assert claims.timegraph.overlaps((1, 3), (2, 10)) == True + assert claims.timegraph.overlaps((1, 3), (5, 10)) == False diff --git a/unclaimed.py b/unclaimed.py deleted file mode 100755 index f68880f..0000000 --- a/unclaimed.py +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/python - -import claims - -reports = claims.fetch_all_reports() -reports = claims.flatten_reports(reports) -reports = claims.filter_fails(reports) -reports = claims.filter_not_claimed(reports) - -for r in reports: - print(u'{0} {1} {2}'.format(r['distro'], r['className'], r['name']))