-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathkbench.py
More file actions
88 lines (72 loc) · 2.8 KB
/
kbench.py
File metadata and controls
88 lines (72 loc) · 2.8 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
import os
import json
import signal
import argparse
import coloredlogs
import logging as log
from threading import Event
from datetime import datetime
from core.benchmark import BenchmarkRegistry
from core.workload import WorkloadRegistry
from core.metric import MetricRegistry
from core.config import SysInfo, RunConfig
coloredlogs.install(level="INFO")
parser = argparse.ArgumentParser()
requiredArgs = parser.add_argument_group("required arguments")
requiredArgs.add_argument(
"-a", "--action", choices=["build", "run", "clean", "monitor"], help="action", required=True
)
parser.add_argument(
"-c", "--config", type=str, help="configuration file for benchmark run"
)
parser.add_argument("-v", "--verbose", action="store_true", help="increase verbosity")
args = parser.parse_args()
if args.verbose:
coloredlogs.install(level="DEBUG")
SysInfo.init()
monitorEvent = Event()
def handler(signum, frame):
# Signal termination to sampling loop
if args.action == "monitor":
monitorEvent.set()
else:
exit(-1)
signal.signal(signal.SIGINT, handler)
try:
config = RunConfig(args)
t = datetime.now()
configName = os.path.splitext(os.path.basename(config.configPath))[0]
resultFilename = f"{configName}-{t.strftime('%d%m%Y-%H%M')}.json"
match config.action:
case "build":
# Load all benchmarks and register them
BenchmarkRegistry.loadBenchmarks("./benchmarks")
WorkloadRegistry.loadWorkloads("./workloads")
BenchmarkRegistry.buildBenchmarks()
case "run":
# Load all benchmarks and register them
BenchmarkRegistry.loadBenchmarks("./benchmarks")
WorkloadRegistry.loadWorkloads("./workloads")
# Load specified metrics
MetricRegistry.loadMetrics(config.metricsPath)
BenchmarkRegistry.buildBenchmarks(WorkloadRegistry.targetBenchmarks)
results = WorkloadRegistry.runWorkloads()
# Add metadata
results["uname"] = os.uname().version
with open(os.path.join("./results", resultFilename), "w") as file:
json.dump(results, file)
log.info("Wrote benchmarking results to '%s'", resultFilename)
case "monitor":
MetricRegistry.loadMetrics(config.metricsPath)
MetricRegistry.startSamplingThreads();
monitorEvent.wait()
MetricRegistry.stopSamplingThreads();
results = MetricRegistry.fetchResults()
# Add metadata
results["uname"] = os.uname().version
with open(os.path.join("./results", resultFilename), "w") as file:
json.dump(results, file)
log.info("Wrote monitoring results to '%s'", resultFilename)
except Exception as e:
log.exception(e, exc_info=True)
exit(-1)