Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions rebench/configurator.py
Original file line number Diff line number Diff line change
Expand Up @@ -294,6 +294,7 @@ def get_executor(self, executor_name, run_details, variables, action):
executor = Executor.compile(
executor_name, self._executors[executor_name],
run_details, variables, self.build_commands, action)

return executor

def get_suite(self, suite_name):
Expand Down
55 changes: 37 additions & 18 deletions rebench/model/executor.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import os
import subprocess

from .build_cmd import BuildCommand
from .exp_run_details import ExpRunDetails
Expand All @@ -26,59 +27,77 @@
from ..configuration_error import ConfigurationError


class Executor(object):

class Executor:
@classmethod
def compile(cls, executor_name, executor, run_details, variables, build_commands, action):
path = executor.get('path')
if path and not path.startswith('~'):
path = os.path.abspath(path)
executable = executor.get('executable')
args = executor.get('args')
version_command = executor.get('version_command')
version_string = executor.get('version_string')
version_git = executor.get('version_git')

build = BuildCommand.create_commands(executor.get('build'), build_commands, path)

description = executor.get('description')
desc = executor.get('desc')
env = executor.get('env')

profiler = Profiler.compile(executor.get('profiler'))

run_details = ExpRunDetails.compile(executor, run_details)
variables = ExpVariables.compile(executor, variables)

if action == "profile" and len(profiler) == 0:
raise ConfigurationError("Executor " + executor_name + " is configured for profiling, "
+ "but no profiler details are given.")
raise ConfigurationError(f"Executor {executor_name} is configured for profiling, "
"but no profiler details are given.")

return Executor(executor_name, path, executable, args, build, description or desc,
profiler, run_details, variables, action, env)
return Executor(executor_name, path, executable, args,
version_command, version_string, version_git, build,
description or desc, profiler, run_details, variables, action, env)

def __init__(self, name, path, executable, args, build, description,
def __init__(self, name, path, executable, args,
version_command, version_string, version_git, build, description,
profiler, run_details, variables, action, env):
"""Specializing the executor details in the run definitions with the settings from
the executor definitions
the executor definitions
"""
self.name = name
self.path = path
self.executable = executable
self.args = args

self.version_command = version_command
self.version_string = version_string
self.version_git = version_git
self.build = build
self.description = description
self.profiler = profiler

self.run_details = run_details
self.variables = variables
self.env = env

self.action = action

def get_version(self):
if self.version_command:
try:
result = subprocess.run(self.version_command, shell=True,
check=True, capture_output=True, text=True)
return result.stdout.strip()
except subprocess.CalledProcessError as e:
return e.stderr.strip()
elif self.version_string:
return self.version_string
elif self.version_git:
try:
result = subprocess.run(self.version_git, shell=True,
check=True, capture_output=True, text=True)
return result.stdout.strip()
except subprocess.CalledProcessError as e:
return e.stderr.strip()
else:
return None

def as_dict(self):
result = {
'name': self.name,
'desc': self.description
}
result = {'name': self.name, 'desc': self.description}
if self.build:
result['build'] = [b.as_dict() for b in self.build]
return result
12 changes: 12 additions & 0 deletions rebench/rebench-schema.yml
Original file line number Diff line number Diff line change
Expand Up @@ -277,6 +277,18 @@ schema;executor_type:
type: str
desc: Argument given to `perf` when processing the recording
default: report -g graph --no-children --stdio
version_command:
type: str
required: false
desc: Command to retrieve the version of the executable.
version_string:
type: str
required: false
desc: Explicit version string provided by the user.
version_git:
type: str
required: false
desc: Command to retrieve the Git version of the executable.

schema;exp_suite_type:
desc: A list of suites
Expand Down
4 changes: 2 additions & 2 deletions rebench/tests/bugs/issue_4_run_equality_and_params_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ def setUp(self):
@staticmethod
def _create_template_run_id():
executor = Executor('MyVM', 'foo_bar_path', 'foo_bar_bin',
None, None, None, None, None, None, "benchmark", {})
None, None, None, None, None, None, None, None, None, "benchmark", {})
suite = BenchmarkSuite("MySuite", executor, '', '%(benchmark)s %(cores)s %(input)s',
None, None, [], None, None, None)
benchmark = Benchmark("TestBench", "TestBench", None, suite, None,
Expand All @@ -46,7 +46,7 @@ def _create_template_run_id():
@staticmethod
def _create_hardcoded_run_id():
executor = Executor('MyVM', 'foo_bar_path', 'foo_bar_bin',
None, None, None, None, None, None, "benchmark", {})
None, None, None, None, None, None, None, None, None, "benchmark", {})
suite = BenchmarkSuite('MySuite', executor, '', '%(benchmark)s %(cores)s 2 3',
None, None, [], None, None, None)
benchmark = Benchmark("TestBench", "TestBench", None, suite,
Expand Down
106 changes: 100 additions & 6 deletions rebench/tests/executor_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,17 +19,18 @@
# IN THE SOFTWARE.
import unittest
import os
import subprocess

from ..model.executor import Executor as RebenchExecutor
from .persistence import TestPersistence
from .rebench_test_case import ReBenchTestCase
from ..rebench import ReBench
from ..executor import Executor, BatchScheduler, RandomScheduler, RoundRobinScheduler
from ..configurator import Configurator, load_config
from ..rebench import ReBench
from ..executor import Executor, BatchScheduler, RandomScheduler, RoundRobinScheduler
from ..configurator import Configurator, load_config
from ..model.measurement import Measurement
from ..persistence import DataStore
from ..persistence import DataStore
from ..ui import UIError
from ..reporter import Reporter

from ..reporter import Reporter


class ExecutorTest(ReBenchTestCase):
Expand Down Expand Up @@ -219,6 +220,99 @@ def test_determine_exp_name_and_filters_only_others(self):
self.assertEqual(exp_name, None)
self.assertEqual(exp_filter, ['e:bar', 's:b'])

def test_version_command(self):
executor = RebenchExecutor(
"TestExecutor", None, None, None, "python --version",
None, None, None, None, None, None, None, None, None
)

try:
result = subprocess.run(
executor.version_command, shell=True, check=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True
)
version_output = result.stdout.strip()
except subprocess.CalledProcessError as e:
version_output = e.stderr.strip()
self.assertTrue("Python" in version_output)

def test_version_command_in_config(self):
cnf = Configurator(load_config(self._path + '/small_with_version.conf'),
DataStore(self.ui),
self.ui, None, data_file=self._tmp_file)
runs = cnf.get_runs()
executor = list(runs)[0].benchmark.suite.executor

self.assertEqual(executor.version_command, "python --version")

try:
result = subprocess.run(
executor.version_command, shell=True, check=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True
)
version_output = result.stdout.strip()
except subprocess.CalledProcessError as e:
version_output = e.stderr.strip()

self.assertTrue("Python" in version_output)

def test_version_string(self):
executor = RebenchExecutor(
"TestExecutor", None, None, None, None, "7.42",
None, None, None, None, None, None, None, None
)

version_output = executor.version_string
self.assertTrue("7.42" in version_output)

def test_version_string_in_config(self):
cnf = Configurator(load_config(self._path + '/small_with_version.conf'),
DataStore(self.ui),
self.ui, None, data_file=self._tmp_file)
runs = cnf.get_runs()
executor = list(runs)[0].benchmark.suite.executor

self.assertEqual(executor.version_string, "7.42")

version_output = executor.version_string
self.assertTrue("7.42" in version_output)

def test_version_git(self):
executor = RebenchExecutor(
"TestExecutor", None, None, None, None, None,
"git rev-parse HEAD", None, None, None, None, None, None, None
)

try:
result = subprocess.run(
executor.version_git, shell=True, check=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True
)
version_output = result.stdout.strip()
except subprocess.CalledProcessError as e:
version_output = e.stderr.strip()
self.assertTrue(len(version_output) > 0)

def test_version_git_in_config(self):
cnf = Configurator(load_config(self._path + '/small_with_version.conf'),
DataStore(self.ui),
self.ui, None, data_file=self._tmp_file)
runs = cnf.get_runs()
executor = list(runs)[0].benchmark.suite.executor

self.assertEqual(executor.version_git, "git rev-parse HEAD")

try:
result = subprocess.run(
executor.version_git, shell=True, check=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True
)
version_output = result.stdout.strip()
except subprocess.CalledProcessError as e:
version_output = e.stderr.strip()

self.assertTrue(len(version_output) > 0)


class _TestReporter(Reporter):
__test__ = False # This is not a test class
Expand Down
2 changes: 1 addition & 1 deletion rebench/tests/persistency.conf
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@

# this run definition will be chosen if no parameters are given to rebench.py
default_experiment: Test
default_data_file: 'persistency.data'
default_data_file: 'persistency.data'

reporting:
codespeed:
Expand Down
4 changes: 3 additions & 1 deletion rebench/tests/persistency_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,9 @@ class PersistencyTest(ReBenchTestCase):
def test_de_serialization(self):
data_store = DataStore(self.ui)
executor = ExecutorConf("MyVM", '', '',
None, None, None, None, None, None, "benchmark", {})
None, None, None, None,
None, None, None, None,
None, "benchmark", {})
suite = BenchmarkSuite("MySuite", executor, '', '', None, None,
None, None, None, None)
benchmark = Benchmark("Test Bench [>", "Test Bench [>", None,
Expand Down
34 changes: 34 additions & 0 deletions rebench/tests/small_with_version.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
# Config file for ReBench
# Config format is YAML (see http://yaml.org/ for detailed spec)

# this run definition will be chosen if no parameters are given to rebench.py
default_experiment: Test
default_data_file: 'small.data'

# general configuration for runs
runs:
invocations: 10
retries_after_failure: 3

benchmark_suites:
Suite:
gauge_adapter: TestExecutor
command: TestBenchMarks ~/suiteFolder/%(benchmark)s
benchmarks:
- Bench1
- Bench2

executors:
TestRunner1:
path: ~/PycharmProjects/ReBench/rebench/tests
executable: test-vm1.py
version_command: "python --version"
version_string: "7.42"
version_git: "git rev-parse HEAD"

experiments:
Test:
suites:
- Suite
executions:
- TestRunner1