Skip to content

JSON parse fail with perf stat wrapper #109

@tcherrou

Description

@tcherrou

Parsing perf stat JSON output throws an error when using the perf stat command Wrapper (PerfStatWrap)
Error thrown:

Traceback (most recent call last):
  File "/home/taoufik/Documents/PAAE/benchkit/examples/par-dpll/minimal_bench.py", line 152, in <module>
    test_campaign.run()
  File "/home/taoufik/Documents/PAAE/benchkit/benchkit/campaign.py", line 197, in run
    self.campaign_run(other_campaigns_seconds=0, barrier=None)
  File "/home/taoufik/Documents/PAAE/benchkit/benchkit/campaign.py", line 187, in campaign_run
    self._benchmark.run(
  File "/home/taoufik/Documents/PAAE/benchkit/benchkit/benchmark.py", line 538, in run
    expected_total_seconds = self.expected_total_duration_seconds()
  File "/home/taoufik/Documents/PAAE/benchkit/benchkit/benchmark.py", line 428, in expected_total_duration_seconds
    result = self.total_nb_runs() * bds
  File "/home/taoufik/Documents/PAAE/benchkit/benchkit/benchmark.py", line 396, in total_nb_runs
    build_variables, run_variables, _, _ = self._group_record_parameters(
  File "/home/taoufik/Documents/PAAE/benchkit/benchkit/benchmark.py", line 898, in _group_record_parameters
    build_variables = {
TypeError: 'NoneType' object is not iterable

Minimal reproducible example:

import pathlib
from typing import Any, Dict, Iterable, List

from benchkit.benchmark import (
    Benchmark,
    CommandAttachment,
    CommandWrapper,
    PostRunHook,
    PreRunHook,
    RecordResult,
    SharedLib,
    WriteRecordFileFunction,
)
from benchkit.dependencies.packages import PackageDependency
from benchkit.platforms import Platform, get_current_platform
from benchkit.utils.misc import TimeMeasure
from benchkit.utils.types import PathType
from benchkit.campaign import CampaignCartesianProduct
from benchkit.commandwrappers.perf import PerfStatWrap



class Test(Benchmark):

    def __init__(
        self,
        src_dir: PathType,
        command_wrappers: Iterable[CommandWrapper] = (),
        command_attachments: Iterable[CommandAttachment] = (),
        shared_libs: Iterable[SharedLib] = (),
        pre_run_hooks: Iterable[PreRunHook] = (),
        post_run_hooks: Iterable[PostRunHook] = (),
        platform: Platform | None = None,
    ) -> None:
        super().__init__(
            command_wrappers=command_wrappers,
            command_attachments=command_attachments,
            shared_libs=shared_libs,
            pre_run_hooks=pre_run_hooks,
            post_run_hooks=post_run_hooks,
        )
        if platform is not None:
            self.platform = platform  # TODO Warning! overriding upper class platform
        bench_src_path = pathlib.Path(src_dir)
        if not self.platform.comm.isdir(bench_src_path):
            raise ValueError(
                f"Invalid source path: {bench_src_path}\n"
                "src_dir argument can be defined manually."
            )
        
        self.platform = get_current_platform()
        self._bench_src_path = bench_src_path
        self._build_dir = bench_src_path

        
    @property
    def bench_src_path(self) -> pathlib.Path:
        return self._bench_src_path

    @staticmethod
    def get_build_var_names() -> List[str]:
        pass
        # return ["implementation"]

    @staticmethod
    def get_run_var_names() -> List[str]:
        # return ["instance","implementation"]
        pass

    @staticmethod
    def get_tilt_var_names() -> List[str]:
        return []

    def dependencies(self) -> List[PackageDependency]:
        return super().dependencies() + []

    def build_tilt(self, **kwargs) -> None:
        pass

    def prebuild_bench(
        self,
        **_kwargs,
    ) -> None:
        pass

    def build_bench(
        self,
        implementation: str,
        **kwargs,
    ) -> None:
        pass

    def clean_bench(self) -> None:
        pass

    def single_run(
        self,
        benchmark_duration_seconds: int,
        **kwargs,
    ) -> str:
        environment = {}
        run_command =["sleep","1"]
        wrap_run_command, wrapped_environment = self._wrap_command(
            run_command=run_command,
            environment=environment,
            **kwargs
        )

        output = self.run_bench_command(
            run_command=run_command,
            wrapped_run_command=wrap_run_command,
            current_dir=self._build_dir,
            environment=environment,
            wrapped_environment=wrapped_environment,
            print_output=False
        )
        return output

        


    def parse_output_to_results(  # pylint: disable=arguments-differ
        self,
        command_output: str,
        run_variables: Dict[str, Any],
        **_kwargs,
    ) -> Dict[str, Any]:
        result_dict = {}
        # Parsing summary
        return result_dict
 
test_campaign = CampaignCartesianProduct(
    name="test",
    nb_runs=1,
    benchmark=Test(
        src_dir="./",
        command_wrappers= [PerfStatWrap()]),
    variables={},
    constants={},
    debug=False,
    gdb=False,
    enable_data_dir=True,
    continuing=False,
    benchmark_duration_seconds=10,
)

test_campaign.run()

Metadata

Metadata

Assignees

Labels

invalidThis doesn't seem rightwontfixThis will not be worked on

Type

No type

Projects

No projects

Milestone

No milestone

Relationships

None yet

Development

No branches or pull requests

Issue actions