diff --git a/bench_fio/benchlib/argparsing.py b/bench_fio/benchlib/argparsing.py index 6864b513f1..0f3c34565d 100644 --- a/bench_fio/benchlib/argparsing.py +++ b/bench_fio/benchlib/argparsing.py @@ -38,6 +38,11 @@ def get_arguments(settings): nargs="+", type=str, ) + ag.add_argument( + "--template", + help="Benchmark template to use", + default="benchmark.ini", + ) ag.add_argument( "-t", "--type", @@ -92,7 +97,7 @@ def get_arguments(settings): ag.add_argument( "--runtime", - help=f"Override the default test runtime per benchmark" + help=f"Override the default test runtime per benchmark (seconds)" f"(default: {settings['runtime']})", type=int, default=settings["runtime"], @@ -230,7 +235,7 @@ def get_arguments(settings): default=settings["invalidate"], ) ag.add_argument( - "--quiet", help="The progresbar will be supressed.", action="store_true" + "--quiet", help="The progress bar will be supressed.", action="store_true" ) ag.add_argument( "--loginterval", diff --git a/bench_fio/benchlib/defaultsettings.py b/bench_fio/benchlib/defaultsettings.py index a93d77242a..ae57bf64cc 100644 --- a/bench_fio/benchlib/defaultsettings.py +++ b/bench_fio/benchlib/defaultsettings.py @@ -8,6 +8,7 @@ def get_default_settings(): settings = {} settings["benchmarks"] = None settings["target"] = [] + settings["template"] = "benchmark.ini" settings["type"] = None settings["engine"] = "libaio" settings["mode"] = ["randread"] diff --git a/bench_fio/benchlib/display.py b/bench_fio/benchlib/display.py index d1ce33c76d..3323dd2400 100644 --- a/bench_fio/benchlib/display.py +++ b/bench_fio/benchlib/display.py @@ -33,7 +33,7 @@ def calculate_duration(settings, tests): number_of_tests = number_of_tests/len(settings["target"]) time_per_test = settings["runtime"] if time_per_test: - duration_in_seconds = number_of_tests * time_per_test + duration_in_seconds = int(number_of_tests * time_per_test) duration = str(datetime.timedelta(seconds=duration_in_seconds)) else: duration = None @@ -56,7 +56,7 @@ def print_options(settings, table): data = parse_settings_for_display(settings) for item in settings.keys(): if item not in settings["filter_items"]: # filter items are internal options that aren't relevant - if item not in descriptions.keys(): + if item not in descriptions.keys(): customitem = item + "*" # These are custom fio options so we mark them as such #print(f"{customitem:<{fl}}: {data[item]:<}") table.add_row(customitem, data[item]) @@ -70,7 +70,6 @@ def print_options(settings, table): def display_header(settings, tests): - duration = calculate_duration(settings, tests) table = Table(title="Bench-fio",title_style=Style(bgcolor="dodger_blue2",bold=True)) table.add_column(no_wrap=True, header="Setting") @@ -79,4 +78,4 @@ def display_header(settings, tests): table.add_row("Estimated Duration",duration) print_options(settings, table) console = Console() - console.print(table) \ No newline at end of file + console.print(table) diff --git a/bench_fio/benchlib/generatefio.py b/bench_fio/benchlib/generatefio.py index 3801ccb636..730b8dc69f 100644 --- a/bench_fio/benchlib/generatefio.py +++ b/bench_fio/benchlib/generatefio.py @@ -28,7 +28,7 @@ def filter_options(settings, config, mapping, benchmark, output_directory): if isinstance(value, bool): value = boolean[str(value)] if key == "type": # we check if we target a file directory or block device - devicetype = checks.check_target_type(benchmark["target_base"], settings) + devicetype = checks.check_target_type(benchmark["target"], settings) config["FIOJOB"][devicetype] = benchmark["target"] if ( value diff --git a/bench_fio/benchlib/runfio.py b/bench_fio/benchlib/runfio.py index a6ccb03f29..be6d14511b 100644 --- a/bench_fio/benchlib/runfio.py +++ b/bench_fio/benchlib/runfio.py @@ -54,8 +54,8 @@ def run_fio(settings, benchmark): # passed to fio's filename but should be removed when checking the # existance of the path, or when writing a job file or log file in # the filesystem. - benchmark.update({"target_base": benchmark['target'].replace("\\", "")}) - tmpjobfile = f"/tmp/{os.path.basename(benchmark['target_base'])}-tmpjobfile.fio" + benchmark.update({"target": benchmark['target'].replace("\\", "")}) + tmpjobfile = f"/tmp/{os.path.basename(benchmark['target'])}-tmpjobfile.fio" output_directory = supporting.generate_output_directory(settings, benchmark) output_file = f"{output_directory}/{benchmark['mode']}-{benchmark['iodepth']}-{benchmark['numjobs']}.json" generatefio.generate_fio_job_file(settings, benchmark, output_directory, tmpjobfile) diff --git a/bench_fio/benchlib/supporting.py b/bench_fio/benchlib/supporting.py index 3b86efe724..e171efc0f7 100644 --- a/bench_fio/benchlib/supporting.py +++ b/bench_fio/benchlib/supporting.py @@ -42,11 +42,11 @@ def generate_output_directory(settings, benchmark): settings["output"] = os.path.expanduser(settings["output"]) if benchmark["mode"] in settings["mixed"]: directory = ( - f"{settings['output']}/{os.path.basename(benchmark['target_base'])}/" + f"{settings['output']}/{os.path.basename(benchmark['target'])}/" f"{benchmark['mode']}{benchmark['rwmixread']}/{benchmark['block_size']}" ) else: - directory = f"{settings['output']}/{os.path.basename(benchmark['target_base'])}/{benchmark['block_size']}" + directory = f"{settings['output']}/{os.path.basename(benchmark['target'])}/{benchmark['block_size']}" if "run" in benchmark.keys(): directory = directory + f"/run-{benchmark['run']}" diff --git a/bin/bench-fio b/bin/bench-fio deleted file mode 100755 index eb8ac51d33..0000000000 --- a/bin/bench-fio +++ /dev/null @@ -1,6 +0,0 @@ -#!/usr/bin/env python3 - -import bench_fio - -if __name__ == '__main__': - bench_fio.main() diff --git a/bin/fio-plot b/bin/fio-plot deleted file mode 100755 index 88ca9688b2..0000000000 --- a/bin/fio-plot +++ /dev/null @@ -1,6 +0,0 @@ -#!/usr/bin/env python3 - -import fio_plot - -if __name__ == '__main__': - fio_plot.main() diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000000..e68ba42c93 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,22 @@ +[build-system] +requires = ["setuptools"] +build-backend = "setuptools.build_meta" + +[project] +version = "1.1.16" +name = "fio-plot" + +dependencies = [ + "numpy", + "matplotlib", + "pillow", + "pyan3", + "rich", +] + +[project.gui-scripts] +fio-plot = "fio_plot:main" +bench_fio = "bench_fio:main" + +[tool.setuptools.packages.find] +exclude = ["bench_fio.scripts"] diff --git a/requirements.txt b/requirements.txt index e9b578ccfa..b5d5488026 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,5 @@ numpy Pillow -pyparsing matplotlib<=3.8 pyan3 rich diff --git a/setup.py b/setup.py deleted file mode 100644 index ae5bf380ef..0000000000 --- a/setup.py +++ /dev/null @@ -1,22 +0,0 @@ -import setuptools - -with open("README.md", "r") as fh: - long_description = fh.read() - -setuptools.setup( - name="fio-plot", - version="1.1.16", - author="louwrentius", - description="Create charts from FIO storage benchmark tool output", - long_description=long_description, - long_description_content_type="text/markdown", - url="https://github.com/louwrentius/fio-plot/", - packages=setuptools.find_packages(), - install_requires=["numpy", "matplotlib", "Pillow", "pyan3", "pyparsing", "rich"], - include_package_data=True, - package_data={"bench_fio": ["templates/*.fio", "scripts/*.sh"]}, - entry_points={ - "console_scripts": ["fio-plot = fio_plot:main", "bench-fio = bench_fio:main"], - }, - scripts=["bin/fio-plot", "bin/bench-fio"], -) diff --git a/tests/bench_fio_test.py b/tests/bench_fio_test.py index a9bb8cbd86..657f83c4ed 100644 --- a/tests/bench_fio_test.py +++ b/tests/bench_fio_test.py @@ -18,12 +18,12 @@ def setUp(self): self.settings["output"] = "output_directory" def test_generate_benchmarks(self): - self.assertEqual(len(supporting.generate_test_list(self.settings)), 98) + self.assertEqual(len(supporting.generate_test_list(self.settings)), 49) def test_generate_benchmarks_big(self): self.settings["target"] = ["filea", "fileb", "filec", "filed"] self.settings["block_size"] = ["4k", "8k", "16k", "32k"] - self.assertEqual(len(supporting.generate_test_list(self.settings)), 1568) + self.assertEqual(len(supporting.generate_test_list(self.settings)), 784) def test_are_loop_items_lists(self): for item in self.settings["loop_items"]: @@ -32,7 +32,7 @@ def test_are_loop_items_lists(self): def test_calculate_duration(self): self.assertEqual( - display.calculate_duration(self.settings, self.tests), "1:38:00" + display.calculate_duration(self.settings, self.tests), "0:49:00" ) def test_generate_output_directory_regular(self): diff --git a/tests/test_3d.py b/tests/test_3d.py index 6016f9698a..24d56b50b5 100644 --- a/tests/test_3d.py +++ b/tests/test_3d.py @@ -9,6 +9,8 @@ def test_correct_bars_drawn(self): "rw": "read", "source": "test", "title": "test", + "title_fontsize": 12, + "subtitle_fontsize": 20, "subtitle": "", "filter": ["read", "write"], # intentionally using prime numbers @@ -16,10 +18,12 @@ def test_correct_bars_drawn(self): "numjobs": [5, 11], "maxjobs": 32, "maxdepth": 32, - "max": None, + "max_z": None, "dpi": 200, "disable_fio_version": 2.0, - "output_filename": "/tmp/test.png" + "output_filename": "/tmp/test.png", + "truncate_xaxis": False, + "source_fontsize": 12, } dataset = [{"data": []}]