Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
33 changes: 33 additions & 0 deletions pyperf/_inprocess_manager.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
import pyperf
from pyperf._manager import Manager
from pyperf._worker import WorkerProcessTask


class InProcessManager(Manager):
def __init__(self, runner, task):
super().__init__(runner)
self._task_func = task.task_func
self._task_name = task.name
self._func_metadata = {
k: v for k, v in task.metadata.items() if k not in ("name",)
}
self._inner_loops = task.inner_loops

def spawn_worker(self, calibrate_loops, calibrate_warmups):
args = self.args
args.calibrate_loops = int(calibrate_loops == 1)
args.recalibrate_loops = int(calibrate_loops > 1)
args.calibrate_warmups = int(calibrate_warmups == 1)
args.recalibrate_warmups = int(calibrate_warmups > 1)

task = WorkerProcessTask(
self.runner,
self._task_name,
self._task_func,
self._func_metadata,
)
task.inner_loops = self._inner_loops
run = task.create_run()

bench = pyperf.Benchmark((run,))
return pyperf.BenchmarkSuite([bench])
25 changes: 23 additions & 2 deletions pyperf/_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -190,6 +190,13 @@ def __init__(self, values=None, processes=None,
type=strictly_positive)
parser.add_argument('--worker', action='store_true',
help='Worker process, run the benchmark.')
parser.add_argument('--in-process', action='store_true',
dest='in_process',
help='Run benchmark in the current process '
'without spawning worker subprocesses. '
'Only used for environments that '
'do not support subprocesses, '
'like WebAssembly.')
parser.add_argument('--worker-task', type=positive_or_nul, metavar='TASK_ID',
help='Identifier of the worker task: '
'only execute the benchmark function TASK_ID')
Expand Down Expand Up @@ -266,8 +273,9 @@ def _multiline_output(self):
return self.args.verbose or multiline_output(self.args)

def _only_in_worker(self, option):
if not self.args.worker:
raise CLIError("option %s requires --worker" % option)
if not self.args.worker and not self.args.in_process:
raise CLIError("option %s requires --worker or --in-process"
% option)

def _process_args_impl(self):
args = self.args
Expand Down Expand Up @@ -461,6 +469,8 @@ def _main(self, task):
try:
if args.worker:
bench = self._worker(task)
elif args.in_process:
bench = self._in_process(task)
elif args.compare_to:
self._compare_to()
bench = None
Expand Down Expand Up @@ -684,6 +694,17 @@ def _manager(self):
self._display_result(bench)
return bench

def _in_process(self, task):
from pyperf._inprocess_manager import InProcessManager

if self.args.verbose and self._worker_task > 0:
print()
bench = InProcessManager(self, task).create_bench()
if not self.args.quiet:
print()
self._display_result(bench)
return bench

def _compare_to(self):
# Use lazy import to limit imports on 'import pyperf'
from pyperf._compare import timeit_compare_benchs
Expand Down
136 changes: 136 additions & 0 deletions pyperf/tests/test_inprocess.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,136 @@
import os.path
import unittest
from unittest import mock

import pyperf
from pyperf import tests


def check_args(loops, a, b):
if a != 1:
raise ValueError
if b != 2:
raise ValueError
return loops


class TestInProcess(unittest.TestCase):
def create_runner(self, args, **kwargs):
pyperf.Runner._created.clear()
runner = pyperf.Runner(**kwargs)
runner._cpu_affinity = lambda: None
runner.parse_args(args)
return runner

def fake_timer(self):
t = self._timer_value
self._timer_value += 1.0
return t

def exec_in_process(self, *extra_args, name="bench", time_func=None, **kwargs):
self._timer_value = 0.0

def fake_get_clock_info(clock):
class ClockInfo:
implementation = "fake_clock"
resolution = 1.0

return ClockInfo()

args = ["--in-process", "-p1", "-n3", "-l1", "-w1"] + list(extra_args)
runner = self.create_runner(args, **kwargs)

with mock.patch("time.perf_counter", self.fake_timer):
with mock.patch("time.get_clock_info", fake_get_clock_info):
with tests.capture_stdout() as stdout:
with tests.capture_stderr() as stderr:
if time_func:
bench = runner.bench_time_func(name, time_func)
else:
bench = runner.bench_func(name, check_args, None, 1, 2)

stdout = stdout.getvalue()
stderr = stderr.getvalue()
return bench, stdout, stderr

def test_bench_func(self):
bench, stdout, _ = self.exec_in_process()
self.assertIsInstance(bench, pyperf.Benchmark)
self.assertEqual(bench.get_name(), "bench")

def test_bench_time_func(self):
def time_func(loops):
return 1.0

bench, stdout, _ = self.exec_in_process(time_func=time_func)
self.assertIsInstance(bench, pyperf.Benchmark)
self.assertEqual(bench.get_name(), "bench")
self.assertEqual(bench.get_nvalue(), 3)

def test_values_count(self):
bench, _, _ = self.exec_in_process("-n5")
self.assertEqual(bench.get_nvalue(), 5)

def test_json_output(self):
with tests.temporary_directory() as tmpdir:
filename = os.path.join(tmpdir, "test.json")
bench, _, _ = self.exec_in_process("--output", filename)
loaded = pyperf.Benchmark.load(filename)
self.assertEqual(loaded.get_name(), bench.get_name())
self.assertEqual(loaded.get_nvalue(), bench.get_nvalue())

def test_calibrate_loops(self):
def time_func(loops):
return loops * 1e-6

bench, stdout, _ = self.exec_in_process(
"-p1", "-w0", "-n2", "--min-time=0.001", time_func=time_func
)
self.assertIsInstance(bench, pyperf.Benchmark)

def test_two_benchmarks(self):
self._timer_value = 0.0

def fake_get_clock_info(clock):
class ClockInfo:
implementation = "fake_clock"
resolution = 1.0

return ClockInfo()

args = ["--in-process", "-p1", "-l1", "-w0", "-n3"]
runner = self.create_runner(args)

def time_func1(loops):
return 1.0

def time_func2(loops):
return 2.0

with mock.patch("time.perf_counter", self.fake_timer):
with mock.patch("time.get_clock_info", fake_get_clock_info):
with tests.capture_stdout():
bench1 = runner.bench_time_func("bench1", time_func1)
bench2 = runner.bench_time_func("bench2", time_func2)

self.assertEqual(bench1.get_name(), "bench1")
self.assertEqual(bench1.get_values(), (1.0, 1.0, 1.0))
self.assertEqual(bench2.get_name(), "bench2")
self.assertEqual(bench2.get_values(), (2.0, 2.0, 2.0))

def test_show_name(self):
bench, stdout, _ = self.exec_in_process(name="NAME")
self.assertIn("NAME:", stdout)

def test_show_name_false(self):
bench, stdout, _ = self.exec_in_process(name="NAME", show_name=False)
self.assertNotIn("NAME:", stdout)

def test_no_subprocess_spawned(self):
with mock.patch("pyperf._manager.Manager.spawn_worker") as mock_spawn:
bench, _, _ = self.exec_in_process()
mock_spawn.assert_not_called()


if __name__ == "__main__":
unittest.main()