diff --git a/.github/workflows/run-tests.yaml b/.github/workflows/run-tests.yaml index c84e6f70..dfbc3630 100644 --- a/.github/workflows/run-tests.yaml +++ b/.github/workflows/run-tests.yaml @@ -31,7 +31,7 @@ jobs: container: image: silkeh/clang:19 options: --user root - timeout-minutes: 20 + timeout-minutes: 30 steps: - name: Install deps run: | diff --git a/runtime/CMakeLists.txt b/runtime/CMakeLists.txt index 565ccc47..c152d769 100644 --- a/runtime/CMakeLists.txt +++ b/runtime/CMakeLists.txt @@ -3,7 +3,6 @@ set (SOURCE_FILES lib.cpp lin_check.cpp logger.cpp - pretty_printer.cpp verifying.cpp generators.cpp minimization.cpp diff --git a/runtime/include/custom_round.h b/runtime/include/custom_round.h new file mode 100644 index 00000000..23bbaf58 --- /dev/null +++ b/runtime/include/custom_round.h @@ -0,0 +1,12 @@ +#pragma once + +#include + +#include "lib.h" + +struct CustomRound { + CustomRound(std::vector> threads_) + : threads(threads_) {} + + std::vector> threads; +}; \ No newline at end of file diff --git a/runtime/include/latomic.h b/runtime/include/latomic.h new file mode 100644 index 00000000..fbfae64a --- /dev/null +++ b/runtime/include/latomic.h @@ -0,0 +1,384 @@ +#pragma once + +#include + +#include "lib.h" +#include "wmm/wmm.h" + +namespace ltest { + +using namespace wmm; + +// This class is intended to be the entry point +// for the weak memory logic later. +template +class latomic { + std::atomic atomicValue; + int locationId; + ExecutionGraph& wmmGraph = ExecutionGraph::getInstance(); + + public: +#if __cplusplus >= 201703L // C++17 + static constexpr bool is_always_lock_free = + std::atomic::is_always_lock_free; +#endif + + // Constructors + constexpr latomic() noexcept : latomic(T{}) {} + constexpr latomic(T desired) noexcept : atomicValue(desired) { + if (wmm_enabled) { + locationId = wmmGraph.RegisterLocation(desired); + } + } + latomic(const latomic&) = delete; + latomic& operator=(const latomic&) = delete; + latomic& operator=(const latomic&) volatile = delete; + + // operator= + T operator=(T desired) noexcept { + store(desired); + return desired; + } + + T operator=(T desired) volatile noexcept { + store(desired); + return desired; + } + + // is_lock_free + bool is_lock_free() const noexcept { return atomicValue.is_lock_free(); } + + bool is_lock_free() const volatile noexcept { + return atomicValue.is_lock_free(); + } + + // store + void store(T desired, + std::memory_order order = std::memory_order_seq_cst) noexcept { + atomicValue.store(desired, order); + + if (wmm_enabled && this_coro) { + // std::cout << "Store: coro id=" << this_coro->GetId() << ", thread=" << + // this_thread_id + // << ", name=" << this_coro->GetName() << std::endl; + wmmGraph.Store(locationId, this_thread_id, WmmUtils::OrderFromStd(order), + desired); + } + } + + void store(T desired, std::memory_order order = + std::memory_order_seq_cst) volatile noexcept { + store(desired, order); + } + + // load + T load(std::memory_order order = std::memory_order_seq_cst) const noexcept { + if (wmm_enabled && this_coro) { + // std::cout << "Load: coro id=" << this_coro->GetId() << ", thread=" << + // this_thread_id + // << ", name=" << this_coro->GetName() << std::endl; + T value = wmmGraph.Load(locationId, this_thread_id, + WmmUtils::OrderFromStd(order)); + return value; + } + + return atomicValue.load(order); + } + + T load(std::memory_order order = std::memory_order_seq_cst) const + volatile noexcept { + return load(order); + } + + // operator T() + operator T() const noexcept { return load(); } + + operator T() const volatile noexcept { return load(); } + + // TODO: add wmm support for exchange + // exchange + T exchange(T desired, + std::memory_order order = std::memory_order_seq_cst) noexcept { + return atomicValue.exchange(desired, order); + } + + T exchange(T desired, std::memory_order order = + std::memory_order_seq_cst) volatile noexcept { + return atomicValue.exchange(desired, order); + } + + // TODO: for CASes there more complex rule how to get 'failure' order from + // 'success', implement them instead of blind defaults compare_exchange_weak + bool compare_exchange_weak(T& expected, T desired, std::memory_order success, + std::memory_order failure) noexcept { + // we want to prevent actual atomics from overriding 'expected' value on rmw + // failure + T myExpected = expected; + bool value = atomicValue.compare_exchange_weak(myExpected, desired, success, + failure); + + if (wmm_enabled && this_coro) { + // std::cout << "Compare exchange weak: coro id=" << this_coro->GetId() << + // ", thread=" << this_thread_id + // << ", name=" << this_coro->GetName() << std::endl; + auto [rmwSuccess, readValue] = wmmGraph.ReadModifyWrite( + locationId, this_thread_id, &expected, desired, + WmmUtils::OrderFromStd(success), WmmUtils::OrderFromStd(failure)); + value = rmwSuccess; + } else { + // update expected only if we are not in a coroutine + expected = myExpected; + } + + return value; + } + + bool compare_exchange_weak(T& expected, T desired, std::memory_order success, + std::memory_order failure) volatile noexcept { + return compare_exchange_weak(expected, desired, success, failure); + } + + bool compare_exchange_weak( + T& expected, T desired, + std::memory_order order = std::memory_order_seq_cst) noexcept { + return compare_exchange_weak(expected, desired, order, + std::memory_order_seq_cst); + } + + bool compare_exchange_weak( + T& expected, T desired, + std::memory_order order = std::memory_order_seq_cst) volatile noexcept { + return compare_exchange_weak(expected, desired, order, + std::memory_order_seq_cst); + } + + // compare_exchange_strong + bool compare_exchange_strong(T& expected, T desired, + std::memory_order success, + std::memory_order failure) noexcept { + // we want to prevent actual atomics from overriding 'expected' value on rmw + // failure + T myExpected = expected; + bool value = atomicValue.compare_exchange_strong(myExpected, desired, + success, failure); + + if (wmm_enabled && this_coro) { + auto [rmwSuccess, readValue] = wmmGraph.ReadModifyWrite( + locationId, this_thread_id, &expected, desired, + WmmUtils::OrderFromStd(success), WmmUtils::OrderFromStd(failure)); + value = rmwSuccess; + } else { + // update expected only if we are not in a coroutine + expected = myExpected; + } + + return value; + } + + bool compare_exchange_strong(T& expected, T desired, + std::memory_order success, + std::memory_order failure) volatile noexcept { + return compare_exchange_strong(expected, desired, success, failure); + } + + bool compare_exchange_strong( + T& expected, T desired, + std::memory_order order = std::memory_order_seq_cst) noexcept { + return compare_exchange_strong(expected, desired, order, + std::memory_order_seq_cst); + } + + bool compare_exchange_strong( + T& expected, T desired, + std::memory_order order = std::memory_order_seq_cst) volatile noexcept { + return compare_exchange_strong(expected, desired, order, + std::memory_order_seq_cst); + } + + // TODO: operations below need to be implemented in WMM graph as well + +// wait +#if __cplusplus >= 202002L // C++20 + void wait(T old, std::memory_order order = + std::memory_order_seq_cst) const noexcept { + atomicValue.wait(old, order); + } + + void wait(T old, std::memory_order order = std::memory_order_seq_cst) const + volatile noexcept { + atomicValue.wait(old, order); + } + + // notify_one + void notify_one() noexcept { atomicValue.notify_one(); } + + void notify_one() volatile noexcept { atomicValue.notify_one(); } + + // notify all + void notify_all() noexcept { atomicValue.notify_all(); } + + void notify_all() volatile noexcept { atomicValue.notify_all(); } +#endif + + // fetch_add + T fetch_add(T arg, + std::memory_order order = std::memory_order_seq_cst) noexcept { + return atomicValue.fetch_add(arg, order); + } + + T fetch_add(T arg, std::memory_order order = + std::memory_order_seq_cst) volatile noexcept { + return atomicValue.fetch_add(arg, order); + } + + // TODO: fix ambiguity with specialization for T* + // T* fetch_add(std::ptrdiff_t arg, std::memory_order order = + // std::memory_order_seq_cst) noexcept { + // return atomicValue.fetch_add(arg, order); + // } + + // T* fetch_add(std::ptrdiff_t arg, std::memory_order order = + // std::memory_order_seq_cst) volatile noexcept { + // return atomicValue.fetch_add(arg, order); + // } + + // fetch_sub + T fetch_sub(T arg, + std::memory_order order = std::memory_order_seq_cst) noexcept { + return atomicValue.fetch_sub(arg, order); + } + + T fetch_sub(T arg, std::memory_order order = + std::memory_order_seq_cst) volatile noexcept { + return atomicValue.fetch_sub(arg, order); + } + + // TODO: fix ambiguity with specialization for T* + // T* fetch_sub(std::ptrdiff_t arg, std::memory_order order = + // std::memory_order_seq_cst) noexcept { + // return atomicValue.fetch_sub(arg, order); + // } + + // T* fetch_sub(std::ptrdiff_t arg, std::memory_order order = + // std::memory_order_seq_cst) volatile noexcept { + // return atomicValue.fetch_sub(arg, order); + // } + + // operator+= + T operator+=(T arg) noexcept { return atomicValue.operator+=(arg); } + + T operator+=(T arg) volatile noexcept { return atomicValue.operator+=(arg); } + + // TODO: fix ambiguity with specialization for T* + // T* operator+=(std::ptrdiff_t arg) noexcept { + // return atomicValue.operator+=(arg); + // } + + // T* operator+=(std::ptrdiff_t arg) volatile noexcept { + // return atomicValue.operator+=(arg); + // } + + // operator-= + T operator-=(T arg) noexcept { return atomicValue.operator-=(arg); } + + T operator-=(T arg) volatile noexcept { return atomicValue.operator-=(arg); } + + // TODO: fix ambiguity with specialization for T* + // T* operator-=(std::ptrdiff_t arg) noexcept { + // return atomicValue.operator-=(arg); + // } + + // T* operator-=(std::ptrdiff_t arg) volatile noexcept { + // return atomicValue.operator-=(arg); + // } + + // fetch_max + T fetch_max(T arg, + std::memory_order order = std::memory_order_seq_cst) noexcept { + return atomicValue.fetch_max(arg, order); + } + + T fetch_max(T arg, std::memory_order order = + std::memory_order_seq_cst) volatile noexcept { + return atomicValue.fetch_max(arg, order); + } + + // fetch_min + T fetch_min(T arg, + std::memory_order order = std::memory_order_seq_cst) noexcept { + return atomicValue.fetch_min(arg, order); + } + + T fetch_min(T arg, std::memory_order order = + std::memory_order_seq_cst) volatile noexcept { + return atomicValue.fetch_min(arg, order); + } + + // operator++ + T operator++() noexcept { return atomicValue.operator++(); } + + T operator++() volatile noexcept { return atomicValue.operator++(); } + + T operator++(int) noexcept { return atomicValue.operator++(0); } + + T operator++(int) volatile noexcept { return atomicValue.operator++(0); } + + // operator-- + T operator--() noexcept { return atomicValue.operator--(); } + + T operator--() volatile noexcept { return atomicValue.operator--(); } + + T operator--(int) noexcept { return atomicValue.operator--(0); } + + T operator--(int) volatile noexcept { return atomicValue.operator--(0); } + + // fetch_and + T fetch_and(T arg, + std::memory_order order = std::memory_order_seq_cst) noexcept { + return atomicValue.fetch_and(arg, order); + } + + T fetch_and(T arg, std::memory_order order = + std::memory_order_seq_cst) volatile noexcept { + return atomicValue.fetch_and(arg, order); + } + + // fetch_or + T fetch_or(T arg, + std::memory_order order = std::memory_order_seq_cst) noexcept { + return atomicValue.fetch_or(arg, order); + } + + T fetch_or(T arg, std::memory_order order = + std::memory_order_seq_cst) volatile noexcept { + return atomicValue.fetch_or(arg, order); + } + + // fetch_xor + T fetch_xor(T arg, + std::memory_order order = std::memory_order_seq_cst) noexcept { + return atomicValue.fetch_xor(arg, order); + } + + T fetch_xor(T arg, std::memory_order order = + std::memory_order_seq_cst) volatile noexcept { + return atomicValue.fetch_xor(arg, order); + } + + // operator&= + T operator&=(T arg) noexcept { return atomicValue.operator&=(arg); } + + T operator&=(T arg) volatile noexcept { return atomicValue.operator&=(arg); } + + // operator|= + T operator|=(T arg) noexcept { return atomicValue.operator|=(arg); } + + T operator|=(T arg) volatile noexcept { return atomicValue.operator|=(arg); } + + // operator^= + T operator^=(T arg) noexcept { return atomicValue.operator^=(arg); } + + T operator^=(T arg) volatile noexcept { return atomicValue.operator^=(arg); } +}; + +} // namespace ltest \ No newline at end of file diff --git a/runtime/include/lib.h b/runtime/include/lib.h index c3d3b99a..72f53e8a 100644 --- a/runtime/include/lib.h +++ b/runtime/include/lib.h @@ -20,6 +20,7 @@ struct CoroutineStatus; // Current executing coroutine. extern std::shared_ptr this_coro; +extern int this_thread_id; // Scheduler context extern boost::context::fiber_context sched_ctx; @@ -45,7 +46,7 @@ struct CoroBase : public std::enable_shared_from_this { virtual std::shared_ptr Restart(void* this_ptr) = 0; // Resume the coroutine to the next yield. - void Resume(); + void Resume(int resumed_thread_id); // Check if the coroutine is returned. bool IsReturned() const; @@ -70,10 +71,10 @@ struct CoroBase : public std::enable_shared_from_this { std::shared_ptr GetPtr(); // Try to terminate the coroutine. - void TryTerminate(); + void TryTerminate(int running_thread_id); // Terminate the coroutine. - void Terminate(); + void Terminate(int running_thread_id); void SetBlocked(const BlockState& state) { fstate = state; diff --git a/runtime/include/pct_strategy.h b/runtime/include/pct_strategy.h index 02569b88..06e6a7ff 100644 --- a/runtime/include/pct_strategy.h +++ b/runtime/include/pct_strategy.h @@ -166,19 +166,7 @@ struct PctStrategy : public BaseStrategyWithThreads { } void StartNextRound() override { - this->new_task_id = 0; - // log() << "depth: " << current_depth << "\n"; - // Reconstruct target as we start from the beginning. - this->TerminateTasks(); - for (auto& thread : this->threads) { - // We don't have to keep references alive - while (thread.size() > 0) { - thread.pop_back(); - } - thread = StableVector(); - } - // this->state.Reset(); - + BaseStrategyWithThreads::StartNextRound(); UpdateStatistics(); } diff --git a/runtime/include/pick_strategy.h b/runtime/include/pick_strategy.h index 8a21045a..d3a8aba5 100644 --- a/runtime/include/pick_strategy.h +++ b/runtime/include/pick_strategy.h @@ -34,18 +34,6 @@ struct PickStrategy : public BaseStrategyWithThreads { is_new, current_thread}; } - void StartNextRound() override { - this->new_task_id = 0; - - this->TerminateTasks(); - for (auto& thread : this->threads) { - // We don't have to keep references alive - while (thread.size() > 0) { - thread.pop_back(); - } - } - } - ~PickStrategy() { this->TerminateTasks(); } protected: diff --git a/runtime/include/pretty_print.h b/runtime/include/pretty_print.h index 9504fa59..baf9fc8c 100644 --- a/runtime/include/pretty_print.h +++ b/runtime/include/pretty_print.h @@ -14,7 +14,7 @@ using std::to_string; using FullHistoryWithThreads = std::vector, CoroutineStatus>>>; struct PrettyPrinter { - PrettyPrinter(size_t threads_num); + explicit PrettyPrinter() = default; /* Prints like this: @@ -31,7 +31,7 @@ struct PrettyPrinter { */ template void PrettyPrint(const std::vector>& result, - Out_t& out) { + int threads_num, Out_t& out) { auto get_thread_num = [](const std::variant& v) { // Crutch. if (v.index() == 0) { @@ -42,7 +42,7 @@ struct PrettyPrinter { int cell_width = 50; // Up it if necessary. Enough for now. - auto print_separator = [&out, this, cell_width]() { + auto print_separator = [threads_num, &out, this, cell_width]() { out << "*"; for (int i = 0; i < threads_num; ++i) { for (int j = 0; j < cell_width; ++j) { @@ -121,10 +121,11 @@ struct PrettyPrinter { // Helps to debug full histories. template - void PrettyPrint(FullHistoryWithThreads& result, Out_t& out) { + void PrettyPrint(FullHistoryWithThreads& result, int threads_num, + Out_t& out) { int cell_width = 20; // Up it if necessary. Enough for now. - auto print_separator = [&out, this, cell_width]() { + auto print_separator = [threads_num, &out, this, cell_width]() { out << "*"; for (int i = 0; i < threads_num; ++i) { for (int j = 0; j < cell_width; ++j) { @@ -178,7 +179,7 @@ struct PrettyPrinter { index[base] = sz; } int length = std::to_string(index[base]).size(); - std::cout << index[base]; + out << index[base]; assert(spaces - length >= 0); print_spaces(7 - length); out << "|"; @@ -247,5 +248,4 @@ struct PrettyPrinter { out << msg; } }; - size_t threads_num; }; diff --git a/runtime/include/random_strategy.h b/runtime/include/random_strategy.h index 9f144f7f..f2f52d77 100644 --- a/runtime/include/random_strategy.h +++ b/runtime/include/random_strategy.h @@ -80,6 +80,14 @@ struct RandomStrategy : PickStrategy { return std::nullopt; } + void SetCustomRound(CustomRound &custom_round) override { + PickStrategy::SetCustomRound(custom_round); + + size_t custom_threads_count = custom_round.threads.size(); + weights.resize(custom_threads_count, 1); + pick_weights.clear(); + } + private: std::vector weights; std::vector pick_weights; diff --git a/runtime/include/scheduler.h b/runtime/include/scheduler.h index 86420377..9275a61a 100644 --- a/runtime/include/scheduler.h +++ b/runtime/include/scheduler.h @@ -9,6 +9,7 @@ #include #include +#include "custom_round.h" #include "lib.h" #include "lincheck.h" #include "logger.h" @@ -17,6 +18,9 @@ #include "pretty_print.h" #include "scheduler_fwd.h" #include "stable_vector.h" +#include "wmm/wmm.h" + +using namespace ltest::wmm; struct TaskWithMetaData { Task& task; @@ -35,6 +39,7 @@ concept StrategyTaskVerifier = requires(T a) { } -> std::same_as; { a.OnFinished(std::declval(), size_t()) } -> std::same_as; { a.ReleaseTask(size_t()) } -> std::same_as>; + { a.Reset() } -> std::same_as; }; // Strategy is the general strategy interface which decides which task @@ -78,6 +83,11 @@ struct Strategy { // Resets the state of all created tasks in the strategy. virtual void ResetCurrentRound() = 0; + // Sets custom round provided by the user for execution. + // The round should be executed via `Scheduler::ExploreRound` method + // instead of `Scheduler::GenerateAndRunRound`. + virtual void SetCustomRound(CustomRound& custom_round) = 0; + // Returns the number of non-removed tasks virtual int GetValidTasksCount() const = 0; @@ -99,6 +109,12 @@ struct Strategy { // than `round_schedule[thread]` or the same index if the task is not finished virtual int GetNextTaskInThread(int thread_index) const = 0; + void ResetWmmGraph(int threads_count) { + if (wmm_enabled) { + wmm_graph.Reset(threads_count); + } + } + // id of next generated task int new_task_id = 0; // stores task ids that are removed during the round minimization @@ -106,16 +122,18 @@ struct Strategy { // when generated round is explored this vector stores indexes of tasks // that will be invoked next in each thread std::vector round_schedule; + ExecutionGraph& wmm_graph = ExecutionGraph::getInstance(); }; template struct BaseStrategyWithThreads : public Strategy { BaseStrategyWithThreads(size_t threads_count, std::vector constructors) - : state(std::make_unique()), - threads_count(threads_count), - constructors(std::move(constructors)) { + : threads_count(threads_count), constructors(std::move(constructors)) { + // must be called before instantiating `TargetObj` + ResetWmmGraph(this->threads_count); round_schedule.resize(threads_count, -1); + state = std::make_unique(); constructors_distribution = std::uniform_int_distribution( @@ -153,6 +171,32 @@ struct BaseStrategyWithThreads : public Strategy { return threads; } + void StartNextRound() override { + this->new_task_id = 0; + // also resets the state + this->TerminateTasks(); // TODO: what about different threads count for + // wmm_graph? + + // this could happen if we run custom scenarios + // (which could have arbitrary number of threads) + if (this->threads.size() != this->threads_count) { + this->threads.clear(); + for (size_t i = 0; i < this->threads_count; ++i) { + this->threads.emplace_back(); + } + } else { + // more optimal allocations-wise implementation + for (auto& thread : this->threads) { + // We don't have to keep references alive + while (thread.size() > 0) { + thread.pop_back(); + } + } + } + + this->round_schedule.resize(this->threads_count, -1); + } + void ResetCurrentRound() override { TerminateTasks(); for (auto& thread : threads) { @@ -165,6 +209,29 @@ struct BaseStrategyWithThreads : public Strategy { } } + void SetCustomRound(CustomRound& custom_round) override { + size_t custom_threads_count = custom_round.threads.size(); + + // custom round threads count might differ from the generated rounds + this->threads.resize(custom_threads_count); + this->round_schedule.resize(custom_threads_count, -1); + this->sched_checker.Reset(); + ResetWmmGraph(custom_threads_count); + this->state = std::make_unique(); + + for (size_t current_thread = 0; current_thread < custom_threads_count; + ++current_thread) { + auto& builders = custom_round.threads[current_thread]; + StableVector thread_tasks; + for (auto& task_builder : builders) { + auto task = + task_builder.Build(state.get(), current_thread, new_task_id++); + thread_tasks.emplace_back(task); + } + this->threads[current_thread] = std::move(thread_tasks); + } + } + int GetValidTasksCount() const override { int non_removed_tasks = 0; for (auto& thread : threads) { @@ -274,7 +341,7 @@ struct BaseStrategyWithThreads : public Strategy { auto& task = thread[task_index]; has_nonterminated_threads = true; // do a single step in this task - task->Resume(); + task->Resume(thread_index); if (task->IsReturned()) { OnVerifierTaskFinish(task, thread_index); debug(stderr, "Terminated: %ld\n", thread_index); @@ -282,6 +349,11 @@ struct BaseStrategyWithThreads : public Strategy { } } } + // must appear before state reset, so that constructors of atomics in + // data structure under test register themselves in the new execution graph + // TODO: for custom scenarios threads number might differ, check for places + // where `threads.size()` cannot be used + ResetWmmGraph(threads.size()); state.reset(new TargetObj{}); } @@ -320,11 +392,13 @@ struct StrategyScheduler : public SchedulerWithReplay { // max_switches represents the maximal count of switches. After this count // scheduler will end execution of the Run function StrategyScheduler(Strategy& sched_class, ModelChecker& checker, + std::vector custom_rounds, PrettyPrinter& pretty_printer, size_t max_tasks, size_t max_rounds, bool minimize, size_t exploration_runs, size_t minimization_runs) : strategy(sched_class), checker(checker), + custom_rounds(std::move(custom_rounds)), pretty_printer(pretty_printer), max_tasks(max_tasks), max_rounds(max_rounds), @@ -336,28 +410,40 @@ struct StrategyScheduler : public SchedulerWithReplay { // history is a history with all events, where each element in the vector is a // Resume operation on the corresponding task Scheduler::Result Run() override { - for (size_t i = 0; i < max_rounds; ++i) { - log() << "run round: " << i << "\n"; - auto histories = RunRound(); + for (size_t j = 0; j < custom_rounds.size() + max_rounds; ++j) { + bool is_running_custom_scenarios = (j < custom_rounds.size()); + Result histories; + + if (is_running_custom_scenarios) { + log() << "explore custom round: " << j << "\n"; + strategy.SetCustomRound(custom_rounds[j]); + histories = ExploreRound(exploration_runs, true); + } else { + size_t i = j - custom_rounds.size(); + log() << "run round: " << i << "\n"; + debug(stderr, "run round: %zu\n", i); + histories = RunRound(); + } if (histories.has_value()) { auto& [full_history, sequential_history, reason] = histories.value(); + int threads_num = GetStartegyThreadsCount(); if (should_minimize_history) { log() << "Full nonlinear scenario: \n"; - pretty_printer.PrettyPrint(sequential_history, log()); + pretty_printer.PrettyPrint(sequential_history, threads_num, log()); log() << "Minimizing same interleaving...\n"; Minimize(histories.value(), SameInterleavingMinimizor()); log() << "Minimized to:\n"; - pretty_printer.PrettyPrint(sequential_history, log()); + pretty_printer.PrettyPrint(sequential_history, threads_num, log()); log() << "Minimizing with rescheduling (exploration runs: " << exploration_runs << ")...\n"; Minimize(histories.value(), StrategyExplorationMinimizor(exploration_runs)); log() << "Minimized to:\n"; - pretty_printer.PrettyPrint(sequential_history, log()); + pretty_printer.PrettyPrint(sequential_history, threads_num, log()); log() << "Minimizing with smart minimizor (exploration runs: " << exploration_runs @@ -377,6 +463,10 @@ struct StrategyScheduler : public SchedulerWithReplay { return std::nullopt; } + int GetStartegyThreadsCount() const override { + return strategy.GetThreadsCount(); + } + protected: // Runs a round with some interleaving while generating it Result RunRound() override { @@ -401,7 +491,7 @@ struct StrategyScheduler : public SchedulerWithReplay { } full_history.emplace_back(next_task); - next_task->Resume(); + next_task->Resume(thread_id); if (next_task->IsReturned()) { finished_tasks++; strategy.OnVerifierTaskFinish(next_task, thread_id); @@ -412,7 +502,8 @@ struct StrategyScheduler : public SchedulerWithReplay { } } - pretty_printer.PrettyPrint(sequential_history, log()); + pretty_printer.PrettyPrint(sequential_history, GetStartegyThreadsCount(), + log()); if (deadlock_detected) { return NonLinearizableHistory(full_history, sequential_history, @@ -429,7 +520,7 @@ struct StrategyScheduler : public SchedulerWithReplay { } // Runs different interleavings of the current round - Result ExploreRound(int runs) override { + Result ExploreRound(int runs, bool log_each_interleaving) override { for (int i = 0; i < runs; ++i) { // log() << "Run " << i + 1 << "/" << runs << "\n"; strategy.ResetCurrentRound(); @@ -452,7 +543,7 @@ struct StrategyScheduler : public SchedulerWithReplay { } full_history.emplace_back(next_task); - next_task->Resume(); + next_task->Resume(thread_id); if (next_task->IsReturned()) { tasks_to_run--; strategy.OnVerifierTaskFinish(next_task, thread_id); @@ -463,6 +554,12 @@ struct StrategyScheduler : public SchedulerWithReplay { } } + if (log_each_interleaving) { + pretty_printer.PrettyPrint(sequential_history, + GetStartegyThreadsCount(), log()); + log() << "\n"; + } + if (deadlock_detected) { return NonLinearizableHistory(full_history, sequential_history, NonLinearizableHistory::Reason::DEADLOCK); @@ -470,7 +567,8 @@ struct StrategyScheduler : public SchedulerWithReplay { if (!checker.Check(sequential_history)) { // log() << "New nonlinearized scenario:\n"; - // pretty_printer.PrettyPrint(sequential_history, log()); + // pretty_printer.PrettyPrint(sequential_history, + // GetStartegyThreadsCount(), log()); return NonLinearizableHistory( full_history, sequential_history, NonLinearizableHistory::Reason::NON_LINEARIZABLE_HISTORY); @@ -519,10 +617,10 @@ struct StrategyScheduler : public SchedulerWithReplay { // if this is the last time this task appears in `tasks_ordering`, then // complete it fully. if (resumes_count[next_task_id] == 0) { - next_task->Terminate(); + next_task->Terminate(thread_id); } else { resumes_count[next_task_id]--; - next_task->Resume(); + next_task->Resume(thread_id); } if (next_task->IsReturned()) { @@ -554,6 +652,7 @@ struct StrategyScheduler : public SchedulerWithReplay { private: Strategy& strategy; ModelChecker& checker; + std::vector custom_rounds; PrettyPrinter& pretty_printer; size_t max_tasks; size_t max_rounds; @@ -614,6 +713,8 @@ struct TLAScheduler : Scheduler { struct Frame { // Pointer to the in task thread. Task* task{}; + // Id of the thread in which the task is running. + int thread_id{}; // Is true if the task was created at this step. bool is_new{}; }; @@ -624,11 +725,11 @@ struct TLAScheduler : Scheduler { // cancel() func takes care for graceful shutdown void TerminateTasks() { cancel(); - for (size_t i = 0; i < threads.size(); ++i) { - for (size_t j = 0; j < threads[i].tasks.size(); ++j) { - auto& task = threads[i].tasks[j]; + for (size_t thread_id = 0; thread_id < threads.size(); ++thread_id) { + for (size_t j = 0; j < threads[thread_id].tasks.size(); ++j) { + auto& task = threads[thread_id].tasks[j]; if (!task->IsReturned()) { - task->Terminate(); + task->Terminate(thread_id); } } } @@ -651,7 +752,7 @@ struct TLAScheduler : Scheduler { } else { // It was a not new task, hence, we recreated in early. } - (*task)->Resume(); + (*task)->Resume(frame.thread_id); } coroutine_status.reset(); } @@ -698,6 +799,7 @@ struct TLAScheduler : Scheduler { } auto& task = thread.tasks.back(); frame.task = &task; + frame.thread_id = thread_id; thread_id_history.push_back(thread_id); if (is_new) { @@ -705,7 +807,7 @@ struct TLAScheduler : Scheduler { } assert(!task->IsBlocked()); - task->Resume(); + task->Resume(thread_id); UpdateFullHistory(thread_id, task, is_new); bool is_finished = task->IsReturned(); if (is_finished) { @@ -724,7 +826,8 @@ struct TLAScheduler : Scheduler { } } else { log() << "run round: " << finished_rounds << "\n"; - pretty_printer.PrettyPrint(full_history, log()); + pretty_printer.PrettyPrint(full_history, GetStartegyThreadsCount(), + log()); log() << "===============================================\n\n"; log().flush(); // Stop, check if the the generated history is linearizable. @@ -813,7 +916,7 @@ struct TLAScheduler : Scheduler { } frame.is_new = true; auto size_before = tasks.size(); - tasks.emplace_back(cons.Build(&state, i, -1/* TODO: fix task id for tla, because it is Scheduler and not Strategy class for some reason */)); + tasks.emplace_back(cons.Build(state.get(), i, -1/* TODO: fix task id for tla, because it is Scheduler and not Strategy class for some reason */)); started_tasks++; auto [is_over, res] = ResumeTask(frame, step, switches, thread, true); if (is_over || res.has_value()) { @@ -834,6 +937,8 @@ struct TLAScheduler : Scheduler { return {false, {}}; } + int GetStartegyThreadsCount() const override { return threads.size(); } + PrettyPrinter& pretty_printer; size_t max_tasks; size_t max_rounds; diff --git a/runtime/include/scheduler_fwd.h b/runtime/include/scheduler_fwd.h index 5e0900f1..0b51f7f2 100644 --- a/runtime/include/scheduler_fwd.h +++ b/runtime/include/scheduler_fwd.h @@ -23,6 +23,8 @@ struct Scheduler { virtual Result Run() = 0; + virtual int GetStartegyThreadsCount() const = 0; + virtual ~Scheduler() = default; }; @@ -35,7 +37,7 @@ struct SchedulerWithReplay : Scheduler { virtual Result RunRound() = 0; - virtual Result ExploreRound(int runs) = 0; + virtual Result ExploreRound(int runs, bool log_each_interleaving = false) = 0; virtual Result ReplayRound(const std::vector& tasks_ordering) = 0; diff --git a/runtime/include/strategy_verifier.h b/runtime/include/strategy_verifier.h index 75edf5ae..22f2ef2c 100644 --- a/runtime/include/strategy_verifier.h +++ b/runtime/include/strategy_verifier.h @@ -9,4 +9,6 @@ struct DefaultStrategyTaskVerifier { inline std::optional ReleaseTask(size_t thread_id) { return std::nullopt; } + + inline void Reset() {} }; diff --git a/runtime/include/verifying.h b/runtime/include/verifying.h index 10a9c3d4..1491d5e7 100644 --- a/runtime/include/verifying.h +++ b/runtime/include/verifying.h @@ -6,6 +6,8 @@ #include #include "blocking_primitives.h" +#include "custom_round.h" +#include "latomic.h" #include "lib.h" #include "lincheck_recursive.h" #include "logger.h" @@ -48,6 +50,7 @@ struct Opts { bool minimize; size_t exploration_runs; size_t minimization_runs; + bool wmm_enabled; size_t depth; bool forbid_all_same; bool verbose; @@ -68,6 +71,7 @@ struct DefaultOptions { const char *weights; size_t minimization_runs; size_t exploration_runs; + bool wmm_enabled; }; void SetOpts(const DefaultOptions &def); @@ -112,11 +116,14 @@ std::unique_ptr MakeStrategy(Opts &opts, std::vector l) { template struct StrategySchedulerWrapper : StrategyScheduler { StrategySchedulerWrapper(std::unique_ptr strategy, - ModelChecker &checker, PrettyPrinter &pretty_printer, - size_t max_tasks, size_t max_rounds, bool minimize, + ModelChecker &checker, + std::vector custom_rounds, + PrettyPrinter &pretty_printer, size_t max_tasks, + size_t max_rounds, bool minimize, size_t exploration_runs, size_t minimization_runs) : strategy(std::move(strategy)), - StrategyScheduler(*strategy.get(), checker, pretty_printer, + StrategyScheduler(*strategy.get(), checker, + std::move(custom_rounds), pretty_printer, max_tasks, max_rounds, minimize, exploration_runs, minimization_runs) {}; @@ -127,6 +134,7 @@ struct StrategySchedulerWrapper : StrategyScheduler { template std::unique_ptr MakeScheduler(ModelChecker &checker, Opts &opts, const std::vector &l, + std::vector custom_rounds, PrettyPrinter &pretty_printer, const std::function &cancel) { std::cout << "strategy = "; @@ -136,8 +144,9 @@ std::unique_ptr MakeScheduler(ModelChecker &checker, Opts &opts, case RND: { auto strategy = MakeStrategy(opts, std::move(l)); auto scheduler = std::make_unique>( - std::move(strategy), checker, pretty_printer, opts.tasks, opts.rounds, - opts.minimize, opts.exploration_runs, opts.minimization_runs); + std::move(strategy), checker, std::move(custom_rounds), + pretty_printer, opts.tasks, opts.rounds, opts.minimize, + opts.exploration_runs, opts.minimization_runs); return scheduler; } case TLA: { @@ -159,12 +168,14 @@ inline int TrapRun(std::unique_ptr &&scheduler, if (result.has_value()) { if (result->reason == Scheduler::NonLinearizableHistory::Reason::DEADLOCK) { std::cout << "deadlock detected:\n"; - pretty_printer.PrettyPrint(result->seq, std::cout); + pretty_printer.PrettyPrint( + result->seq, scheduler->GetStartegyThreadsCount(), std::cout); return 4; // see https://tldp.org/LDP/abs/html/exitcodes.html } else if (result->reason == Scheduler::NonLinearizableHistory::Reason:: NON_LINEARIZABLE_HISTORY) { std::cout << "non linearized:\n"; - pretty_printer.PrettyPrint(result->seq, std::cout); + pretty_printer.PrettyPrint( + result->seq, scheduler->GetStartegyThreadsCount(), std::cout); return 3; } else { std::abort(); @@ -177,7 +188,7 @@ inline int TrapRun(std::unique_ptr &&scheduler, template -int Run(int argc, char *argv[]) { +int Run(int argc, char *argv[], std::vector custom_rounds = {}) { if constexpr (!std::is_same_v) { SetOpts(Spec::options_override_t::GetOptions()); @@ -196,9 +207,10 @@ int Run(int argc, char *argv[]) { std::cout << "exploration runs = " << opts.exploration_runs << "\n"; std::cout << "minimization runs = " << opts.minimization_runs << "\n"; } + std::cout << "wmm enabled = " << std::boolalpha << opts.wmm_enabled << "\n"; std::cout << "targets = " << task_builders.size() << "\n"; - PrettyPrinter pretty_printer{opts.threads}; + PrettyPrinter pretty_printer; using lchecker_t = LinearizabilityCheckerRecursive( - checker, opts, std::move(task_builders), pretty_printer, - &Spec::cancel_t::Cancel); + checker, opts, std::move(task_builders), std::move(custom_rounds), + pretty_printer, &Spec::cancel_t::Cancel); std::cout << "\n\n"; std::cout.flush(); return TrapRun(std::move(scheduler), pretty_printer); @@ -217,12 +229,28 @@ int Run(int argc, char *argv[]) { } // namespace ltest -#define LTEST_ENTRYPOINT_CONSTRAINT(spec_obj_t, strategy_verifier) \ - int main(int argc, char *argv[]) { \ - return ltest::Run(argc, argv); \ +// `...` is used instead of named argument in order to allow +// user to specify custom rounds without wrapping them into +// parenthesis `()` manually +#define LTEST_ENTRYPOINT(spec_obj_t, ...) \ + int main(int argc, char *argv[]) { \ + std::vector custom_rounds; \ + __VA_OPT__(std::vector>> builders = \ + {__VA_ARGS__}; \ + for (auto &v : builders) { \ + custom_rounds.emplace_back(std::move(v)); \ + }) \ + return ltest::Run(argc, argv, std::move(custom_rounds)); \ } -#define LTEST_ENTRYPOINT(spec_obj_t) \ - int main(int argc, char *argv[]) { \ - return ltest::Run(argc, argv); \ - }\ +#define LTEST_ENTRYPOINT_CONSTRAINT(spec_obj_t, strategy_verifier, ...) \ + int main(int argc, char *argv[]) { \ + std::vector custom_rounds; \ + __VA_OPT__(std::vector>> builders = \ + {__VA_ARGS__}; \ + for (auto &v : builders) { \ + custom_rounds.emplace_back(std::move(v)); \ + }) \ + return ltest::Run( \ + argc, argv, std::move(custom_rounds)); \ + } \ No newline at end of file diff --git a/runtime/include/verifying_macro.h b/runtime/include/verifying_macro.h index 466f2d7b..a9f21f7b 100644 --- a/runtime/include/verifying_macro.h +++ b/runtime/include/verifying_macro.h @@ -78,6 +78,26 @@ struct TargetMethod { } }; +template +struct MethodInvocation { + using Method = std::function; + + inline static TaskBuilder GetTaskBuilder(std::string_view method_name, + std::tuple params, + Method method) { + auto builder = + [method_name, params = std::move(params), method = std::move(method)]( + void *this_ptr, size_t unused_thread_num, int task_id) -> Task { + auto args = std::shared_ptr(new std::tuple(params)); + auto coro = Coro::New(method, this_ptr, args, + <est::toStringArgs, + method_name, task_id); + return coro; + }; + return TaskBuilder(std::string(method_name), builder); + } +}; + template struct TargetMethod { using Method = std::function; @@ -102,6 +122,31 @@ struct TargetMethod { } }; +template +struct MethodInvocation { + using Method = std::function; + + inline static TaskBuilder GetTaskBuilder(std::string_view method_name, + std::tuple params, + Method method) { + auto builder = + [method_name, params = std::move(params), method = std::move(method)]( + void *this_ptr, size_t unused_thread_num, int task_id) -> Task { + // auto wrapper = Wrapper{method}; + auto wrapper = [f = std::move(method)](void *this_ptr, Args &&...args) { + f(reinterpret_cast(this_ptr), std::forward(args)...); + return void_v; + }; + auto args = std::shared_ptr(new std::tuple(params)); + auto coro = Coro::New(wrapper, this_ptr, args, + <est::toStringArgs, + method_name, task_id); + return coro; + }; + return TaskBuilder(std::string(method_name), builder); + } +}; + } // namespace ltest #define declare_task_name(symbol) \ @@ -111,3 +156,11 @@ struct TargetMethod { declare_task_name(symbol); \ ltest::TargetMethod \ symbol##_ltest_method_cls{symbol##_task_name, gen, &cls::symbol}; + +#define stringify_detail(x) #x +#define stringify(x) stringify_detail(x) +#define method_invocation(params, ret, cls, symbol, ...) \ + ltest::MethodInvocation< \ + ret, cls __VA_OPT__(, ) __VA_ARGS__>::GetTaskBuilder(stringify(symbol), \ + params, \ + &cls::symbol) \ No newline at end of file diff --git a/runtime/include/wmm/common.h b/runtime/include/wmm/common.h new file mode 100644 index 00000000..e688bfa4 --- /dev/null +++ b/runtime/include/wmm/common.h @@ -0,0 +1,92 @@ +#pragma once + +#include +#include + +namespace ltest::wmm { + +using EventId = int; +using EdgeId = int; + +enum class MemoryOrder { Relaxed, Acquire, Release, AcqRel, SeqCst }; + +enum class EventType { DUMMY, READ, WRITE, RMW }; + +enum class EdgeType { + PO, // program order / sequenced before + SC, // seq-cst edge + RF, // reads-from + // TODO: do we need it? since we have hb-clocks already + // HB, // happens-before + MO, // modification order + // TODO: do we need it explicitly? hb-clocks can give the same basically + // SW, // synchronized-with +}; + +struct WmmUtils { + inline static MemoryOrder OrderFromStd(std::memory_order order) { + switch (order) { + case std::memory_order_relaxed: + return MemoryOrder::Relaxed; + case std::memory_order_acquire: + return MemoryOrder::Acquire; + case std::memory_order_release: + return MemoryOrder::Release; + case std::memory_order_acq_rel: + return MemoryOrder::AcqRel; + case std::memory_order_seq_cst: + return MemoryOrder::SeqCst; + default: + throw std::invalid_argument("Unsupported memory order"); + } + } + + inline static std::string OrderToString(MemoryOrder order) { + switch (order) { + case MemoryOrder::Relaxed: + return "Relaxed"; + case MemoryOrder::Acquire: + return "Acquire"; + case MemoryOrder::Release: + return "Release"; + case MemoryOrder::AcqRel: + return "AcqRel"; + case MemoryOrder::SeqCst: + return "SeqCst"; + } + } + + inline static std::string EventTypeToString(EventType type) { + switch (type) { + case EventType::DUMMY: + return "D"; + case EventType::READ: + return "R"; + case EventType::WRITE: + return "W"; + case EventType::RMW: + return "RMW"; + } + } + + inline static std::string EdgeTypeToString(EdgeType type) { + switch (type) { + case EdgeType::PO: + return "po"; + case EdgeType::SC: + return "sc"; + case EdgeType::RF: + return "rf"; + // case EdgeType::HB: return "hb"; + // case EdgeType::SW: return "sw"; + case EdgeType::MO: + return "mo"; + } + } + + // thread id to which all initalization events (constructors of atomics) will + // belong to + inline static int INIT_THREAD_ID = 0; +}; + +} // namespace ltest::wmm diff --git a/runtime/include/wmm/edge.h b/runtime/include/wmm/edge.h new file mode 100644 index 00000000..976902a8 --- /dev/null +++ b/runtime/include/wmm/edge.h @@ -0,0 +1,14 @@ +#pragma once + +#include "common.h" + +namespace ltest::wmm { + +struct Edge { + EdgeId id; + EdgeType type; + EventId from; + EventId to; +}; + +} // namespace ltest::wmm \ No newline at end of file diff --git a/runtime/include/wmm/event.h b/runtime/include/wmm/event.h new file mode 100644 index 00000000..867edef0 --- /dev/null +++ b/runtime/include/wmm/event.h @@ -0,0 +1,338 @@ +#pragma once + +#include "common.h" +#include "hbclock.h" + +namespace ltest::wmm { + +struct Event { + protected: + Event(EventId id, EventType type, int nThreads, int location, int threadId, + MemoryOrder order) + : id(id), + type(type), + location(location), + threadId(threadId), + order(order), + clock(nThreads) {} + + public: + EventId id; + EventType type; + int location; + int threadId; + MemoryOrder order; + HBClock clock; + std::vector edges; // outgoing edges (e.g. `edge.to == this`) + + virtual ~Event() = default; + + virtual std::string AsString() const { + std::stringstream ss; + + ss << id << ":" << WmmUtils::EventTypeToString(type) << ":T" << threadId + << ":L" << location << ":" << WmmUtils::OrderToString(order) << ":" + << clock.AsString(); + + return ss.str(); + } + + virtual void SetReadFromEvent(Event* event) { + assert(false && "'SetReadFromEvent' can only be called on read/rmw events"); + } + + virtual Event* GetReadFromEvent() const { + assert(false && "'GetReadFromEvent' can only be called on read/rmw events"); + return nullptr; + } + + bool HappensBefore(Event* other) const { + return clock.IsSubsetOf(other->clock); + } + + bool IsDummy() const { return type == EventType::DUMMY; } + + bool IsWrite() const { return type == EventType::WRITE; } + + bool IsRead() const { return type == EventType::READ; } + + bool IsRMW() const { return type == EventType::RMW; } + + bool IsWriteOrRMW() const { return IsWrite() || IsRMW(); } + + bool IsReadOrRMW() const { return IsRead() || IsRMW(); } + + virtual bool IsSeqCst() const { return order == MemoryOrder::SeqCst; } + + virtual bool IsAcqRel() const { return order == MemoryOrder::AcqRel; } + + virtual bool IsAcquire() const { return order == MemoryOrder::Acquire; } + + virtual bool IsRelease() const { return order == MemoryOrder::Release; } + + virtual bool IsRelaxed() const { return order == MemoryOrder::Relaxed; } + + virtual bool IsAtLeastAcquire() const { + return order >= MemoryOrder::Acquire && order != MemoryOrder::Release; + } + + virtual bool IsAtLeastRelease() const { + return order >= MemoryOrder::Release; + } + + // Returns true if this event is a RMW and it is resolved to a READ state + virtual bool IsReadRWM() const { return false; } + + // Returns true if this event is a RMW and it is resolved to a MODIFY state + virtual bool IsModifyRMW() const { return false; } + + template + static T GetWrittenValue(Event* event); + + template + static T GetReadValue(Event* event); +}; + +struct DummyEvent : Event { + DummyEvent(EventId id, int nThreads, int threadId) + : Event(id, EventType::DUMMY, nThreads, -1 /* non-existing location */, + threadId, MemoryOrder::Relaxed) {} +}; + +template +struct WriteEvent : Event { + WriteEvent(EventId id, int nThreads, int location, int threadId, + MemoryOrder order, T value) + : Event(id, EventType::WRITE, nThreads, location, threadId, order), + value(std::move(value)) {} // , moBefore(-1) + + virtual std::string AsString() const { + std::stringstream ss; + + ss << id << ":" << WmmUtils::EventTypeToString(type) << "(" << value << ")" + << ":T" << threadId << ":L" << location << ":" + << WmmUtils::OrderToString(order) << ":" << clock.AsString(); + + return ss.str(); + } + + T GetWrittenValue() const { return value; } + + T value; +}; + +template +struct ReadEvent : Event { + ReadEvent(EventId id, int nThreads, int location, int threadId, + MemoryOrder order) + : Event(id, EventType::READ, nThreads, location, threadId, order), + readFrom(nullptr) {} + + virtual void SetReadFromEvent(Event* event) override { + assert((event == nullptr || event->IsWriteOrRMW()) && + "Read event must read from write/rmw event"); + readFrom = event; + } + + virtual Event* GetReadFromEvent() const override { return readFrom; } + + T GetReadValue() const { + assert(readFrom != nullptr && + "Read event hasn't set its 'readFrom' write-event"); + return Event::GetWrittenValue(readFrom); + } + + // points to write-event which we read from + Event* readFrom; +}; + +enum RMWState { READ, MODIFY, UNSET }; + +template +struct RMWEvent : Event { + RMWEvent(EventId id, int nThreads, int location, int threadId, T* expected, + T desired, MemoryOrder successOrder, MemoryOrder failureOrder) + : Event(id, EventType::RMW, nThreads, location, threadId, successOrder), + // TODO: expcted might be uninitialized, so we should not dereference it + // blindly, fix it + initialExpectedValue(*expected), + cachedExpectedValue(*expected), + expected(expected), + desired(desired), + failureOrder(failureOrder), + readFrom(nullptr) {} + + virtual std::string AsString() const override { + std::stringstream ss; + + ss << id << ":" << WmmUtils::EventTypeToString(type) + << "(written_expected=" << cachedExpectedValue + << ", init_expected=" << initialExpectedValue << ", desired=" << desired + << ", " << StateAsString(state) << ")" + << ":T" << threadId << ":L" << location << ":" + << "succ=" << WmmUtils::OrderToString(order) + << ", fail=" << WmmUtils::OrderToString(failureOrder) << ":" + << clock.AsString(); + + return ss.str(); + } + + /* + All methods below will use a correct memory order of this RMW event, + meaning that for 'state == READ' it will use 'failureOrder', and for + 'state == MODIFY' it will use 'order' (successOrder). + */ + virtual bool IsSeqCst() const override { + assert(state != RMWState::UNSET && + "RMW event's memory order is queried for its resolving"); + auto order = (state == RMWState::MODIFY) ? this->order : failureOrder; + return order == MemoryOrder::SeqCst; + } + + virtual bool IsAcqRel() const override { + assert(state != RMWState::UNSET && + "RMW event's memory order is queried for its resolving"); + auto order = (state == RMWState::MODIFY) ? this->order : failureOrder; + return order == MemoryOrder::AcqRel; + } + + virtual bool IsAcquire() const override { + assert(state != RMWState::UNSET && + "RMW event's memory order is queried for its resolving"); + auto order = (state == RMWState::MODIFY) ? this->order : failureOrder; + return order == MemoryOrder::Acquire; + } + + virtual bool IsRelease() const override { + assert(state != RMWState::UNSET && + "RMW event's memory order is queried for its resolving"); + auto order = (state == RMWState::MODIFY) ? this->order : failureOrder; + return order == MemoryOrder::Release; + } + + virtual bool IsRelaxed() const override { + assert(state != RMWState::UNSET && + "RMW event's memory order is queried for its resolving"); + auto order = (state == RMWState::MODIFY) ? this->order : failureOrder; + return order == MemoryOrder::Relaxed; + } + + virtual bool IsAtLeastAcquire() const override { + assert(state != RMWState::UNSET && + "RMW event's memory order is queried for its resolving"); + auto order = (state == RMWState::MODIFY) ? this->order : failureOrder; + return order >= MemoryOrder::Acquire && order != MemoryOrder::Release; + } + + virtual bool IsAtLeastRelease() const override { + assert(state != RMWState::UNSET && + "RMW event's memory order is queried for its resolving"); + auto order = (state == RMWState::MODIFY) ? this->order : failureOrder; + return order >= MemoryOrder::Release; + } + + virtual bool IsReadRWM() const override { return state == RMWState::READ; } + + virtual bool IsModifyRMW() const override { + return state == RMWState::MODIFY; + } + + virtual void SetReadFromEvent(Event* event) override { + assert((event == nullptr || event->IsWriteOrRMW()) && + "Read event must read from write/rmw event"); + readFrom = event; + + if (readFrom == nullptr) { + state = RMWState::UNSET; + *expected = initialExpectedValue; // reset expected value + cachedExpectedValue = initialExpectedValue; + } else { + T readValue = Event::GetWrittenValue(readFrom); + assert(*expected == initialExpectedValue && + "Expected value must be equal to initial expected value on RMW " + "resolving"); + if (readValue == initialExpectedValue) { + // in case of MODIFY we do not change expected value + state = RMWState::MODIFY; + } else { + // in case of READ we set expected to the actually read value + state = RMWState::READ; + *expected = readValue; + cachedExpectedValue = readValue; + } + } + } + + virtual Event* GetReadFromEvent() const override { return readFrom; } + + T GetReadValue() const { + assert(readFrom != nullptr && + "RMW event hasn't set its 'readFrom' write-event"); + return Event::GetWrittenValue(readFrom); + } + + T GetWrittenValue() const { + assert(state != RMWState::UNSET && + "RMW event must have a resolved state (not UNSET)"); + if (state == RMWState::MODIFY) { + // in case of MODIFY we return desired value + return desired; + } else { + // in case of READ we assume that RMW writes the value that it reads from + // its own 'readFrom' event (which is saved to the *expected pointer) + return *expected; + } + } + + RMWState state = UNSET; // current state of the RMW operation + T initialExpectedValue; + // TODO: I assume, that boost stack manipulation causes a failure when + // dereferencing of '*expected' from different thread, and it causes sanitizer + // to fails. Requires inverstigation to debug it. + T cachedExpectedValue; + T* expected; + T desired; + MemoryOrder failureOrder; + // points to write/rmw-event which we read from + Event* readFrom; + + private: + inline static std::string StateAsString(RMWState state) { + switch (state) { + case RMWState::READ: + return "READ"; + case RMWState::MODIFY: + return "MODIFY"; + case RMWState::UNSET: + return "UNSET"; + } + } +}; + +template +T Event::GetWrittenValue(Event* event) { + assert(event != nullptr && "Event must not be null"); + assert(event->IsWriteOrRMW() && "Only write/rmw events can write values"); + + if (event->IsWrite()) { + auto writeEvent = static_cast*>(event); + return writeEvent->GetWrittenValue(); + } else { + auto rmwEvent = static_cast*>(event); + return rmwEvent->GetWrittenValue(); + } +} + +template +T Event::GetReadValue(Event* event) { + assert(event->IsReadOrRMW() && "Event must be a read/rmw event"); + if (event->IsRead()) { + ReadEvent* readEvent = static_cast*>(event); + return readEvent->GetReadValue(); + } else { + RMWEvent* rmwEvent = static_cast*>(event); + return rmwEvent->GetReadValue(); + } +} + +} // namespace ltest::wmm \ No newline at end of file diff --git a/runtime/include/wmm/graph.h b/runtime/include/wmm/graph.h new file mode 100644 index 00000000..41603d60 --- /dev/null +++ b/runtime/include/wmm/graph.h @@ -0,0 +1,699 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "../logger.h" +#include "common.h" +#include "edge.h" +#include "event.h" + +namespace ltest::wmm { + +class Graph { + public: + Graph() {} + ~Graph() { Clean(); } + + void Reset(int nThreads) { + Clean(); + InitThreads(nThreads); + } + + // TODO: add `ExecutionPolicy` or other way of specifying how to create edges + // (Random, BoundedModelChecker, etc.) + template + T AddReadEvent(int location, int threadId, MemoryOrder order) { + EventId eventId = events.size(); + auto event = new ReadEvent(eventId, nThreads, location, threadId, order); + + // establish po-edge + CreatePoEdgeToEvent(event); // prevInThread --po--> event + + auto shuffledEvents = GetShuffledReadFromCandidates(event); + for (auto readFromEvent : shuffledEvents) { + // try reading from `readFromEvent` + if (TryCreateRfEdge(readFromEvent, event)) { + log() << "Read event " << event->AsString() << " now reads from " + << readFromEvent->AsString() << "\n"; + break; + } + } + + assert(event->readFrom != nullptr && + "Read event must have appropriate write event to read from"); + assert((event->readFrom->IsWrite() || event->readFrom->IsModifyRMW()) && + "Read event must read from write or modifying rmw event"); + return Event::GetReadValue(event); + } + + template + void AddWriteEvent(int location, int threadId, MemoryOrder order, T value) { + EventId eventId = events.size(); + auto event = + new WriteEvent(eventId, nThreads, location, threadId, order, value); + + // establish po-edge + CreatePoEdgeToEvent(event); // prevInThread --po--> event + + if (order == MemoryOrder::SeqCst) { + // establish sc-edge between last sc-write and event + CreateScEdgeToEvent(event); // prevScCstWrite --sc--> event + + // Seq-Cst / MO Consistency (if locations match) + CreateSeqCstConsistencyEdges(event); // prevScCstWrite --mo--> event + + // update last seq_cst write + lastSeqCstWriteEvents[event->location] = event->id; + } + + // Read-Write Coherence + CreateReadWriteCoherenceEdges(event); + + // Write-Write Coherence + CreateWriteWriteCoherenceEdges(event); + } + + template + std::pair AddRMWEvent(int location, int threadId, T* expected, + T desired, MemoryOrder successOrder, + MemoryOrder failureOrder) { + EventId eventId = events.size(); + auto event = new RMWEvent(eventId, nThreads, location, threadId, + expected, desired, successOrder, failureOrder); + + // establish po-edge + CreatePoEdgeToEvent(event); // prevInThread --po--> event + + auto shuffledEvents = GetShuffledReadFromCandidates(event); + for (auto readFromEvent : shuffledEvents) { + // try reading from `readFromEvent` + if (TryCreateRfEdge(readFromEvent, event)) { + log() << "RMW event " << event->AsString() << " now reads from " + << readFromEvent->AsString() << "\n"; + break; + } + } + + assert(event->readFrom != nullptr && + "RMW event must have appropriate write event to read from"); + assert((event->readFrom->IsWrite() || event->readFrom->IsModifyRMW()) && + "RMW event must read from write or modifying rmw event"); + return { + event->IsModifyRMW(), // true if RMW is resolved to MODIFY state, false + // if rmw failed and resolve to READ state + Event::GetReadValue(event)}; + } + + template + void Print(Out& os) const { + os << "Graph edges:" << "\n"; + if (edges.empty()) + os << ""; + else { + for (const auto& edge : edges) { + os << events[edge.from]->AsString() << " ->" + << WmmUtils::EdgeTypeToString(edge.type) << " " + << events[edge.to]->AsString() << "\n"; + } + } + os << "\n"; + } + + private: + std::vector GetShuffledReadFromCandidates(Event* event) { + // Shuffle events to randomize the order of read-from edges + // and allow for more non-sc behaviours + auto filteredEventsView = + events | std::views::filter([event](Event* e) { + return ((e->IsWrite() || e->IsModifyRMW()) && + e->location == event->location && e != event); + }); + std::vector shuffledEvents(filteredEventsView.begin(), + filteredEventsView.end()); + std::ranges::shuffle(shuffledEvents, gen); + + return shuffledEvents; + } + + // Tries to create a read-from edge between `write` and `read` events (write + // --rf--> read). Returns `true` if edge was created, `false` otherwise. + bool TryCreateRfEdge(Event* write, Event* read) { + assert(write->IsWriteOrRMW() && read->IsReadOrRMW() && + "Write and Read events must be of correct type"); + assert(write->location == read->location && + "Write and Read events must be of the same location"); + + StartSnapshot(); + + // establish rf-edge + AddEdge(EdgeType::RF, write->id, read->id); + // remember the event we read from + read->SetReadFromEvent(write); + + // update the clock if synchronized-with relation has appeared + // and save the old clock in case snapshot is discarded + HBClock oldClock; + bool isClockUpdated = false; + + // update the last seq-cst write event in case of successful RMW event + // and save the old value in case snapshot is discarded + EventId oldLastSeqCstWriteEvent = lastSeqCstWriteEvents[read->location]; + + // Applying rel-acq semantics via establishing synchronized-with (SW) + // relation + if (read->IsAtLeastAcquire() && write->IsAtLeastRelease() && + // TODO: maybe somehow get rid of the sc-edges instead? + !(read->IsSeqCst() && + write->IsSeqCst()) // this case no need to consider, because we have + // sc edges instead + ) { + isClockUpdated = true; + // save the old clock + oldClock = read->clock; + // instantitate a SW (synchronized-with) relation + read->clock.UniteWith(write->clock); + } + + if (read->IsRead() || read->IsReadRWM()) { + // in case of 'read' event has actually type RMW but is resolved to READ + // state (basically the comparison of read value with `expected` one + // failed), then we treat it as a regular READ event + if (read->IsSeqCst()) { + // establish sc-edge + CreateScEdgeToEvent(read); // prevScCstWrite --sc--> event + // Seq-Cst Write-Read Coherence + CreateSeqCstReadWriteCoherenceEdges(write, read); + } + + // Write-Read Coherence + CreateWriteReadCoherenceEdges(write, read); + + // Read-Read Coherence + CreateReadReadCoherenceEdges(write, read); + } else { + // otherwise, if the RWM event is resolved to MODIFY state + // then we treat that as RMW event and apply rules from reads and writes + // as well + assert(read->IsModifyRMW() && + "Read event must be a successful RMW event in this code branch"); + + // Apply RMW-specific rules + // RMW / MO Consistency + CreateRmwConsistencyEdges(write, read); + + // RMW Atomicity + CreateRmwAtomicityEdges(write, read); + + // Applying rules both from READs and WRITEs + if (read->IsSeqCst()) { + // establish sc-edge + CreateScEdgeToEvent(read); // prevScCstWrite --sc--> event + // Seq-Cst Write-Read Coherence + CreateSeqCstReadWriteCoherenceEdges(write, read); + // Seq-Cst / MO Consistency (if locations match) + CreateSeqCstConsistencyEdges(read); // prevScCstWrite --mo--> event + + // update last seq_cst write (will be restored in case of snapshot + // discard) + lastSeqCstWriteEvents[read->location] = read->id; + } + + // Write-Read Coherence + CreateWriteReadCoherenceEdges(write, read); + + // Read-Read Coherence + CreateReadReadCoherenceEdges(write, read); + + // Read-Write Coherence + CreateReadWriteCoherenceEdges(read); + + // Write-Write Coherence + CreateWriteWriteCoherenceEdges(read); + } + + bool isConsistent = IsConsistent(); + if (isConsistent) { + log() << "Consistent graph:" << "\n"; + Print(log()); + // preserve added edges and other modifications + ApplySnapshot(); + } else { + log() << "Not consistent graph:" << "\n"; + Print(log()); + // removes all added edges + log() << "Discarding snapshot" << "\n"; + DiscardSnapshot(); + // remove rf-edge + read->SetReadFromEvent(nullptr); + // restore old clock if necessary + if (isClockUpdated) { + read->clock = oldClock; + } + // restore last seq-cst write event + lastSeqCstWriteEvents[read->location] = oldLastSeqCstWriteEvent; + Print(log()); + } + + return isConsistent; + } + + // ===== Methods to create general graph edges ===== + + // Creates a po-edge between last event in the same thread as `event` + void CreatePoEdgeToEvent(Event* event) { + int threadId = event->threadId; + EventId eventId = event->id; + + // connect prev event in the same thread with new event via PO edge + auto lastEventInSameThread = events[eventsPerThread[threadId].back()]; + AddEdge(EdgeType::PO, lastEventInSameThread->id, eventId); + + // update last event in thread + eventsPerThread[threadId].push_back(eventId); + + // insert in all-events vector + events.push_back(event); + + // set correct hb-clocks for new event + event->clock = lastEventInSameThread->clock; + event->clock.Increment(threadId); + } + + // Adds an sc-edge between prev sc-write (to the same location as `event`) and + // `event` + void CreateScEdgeToEvent(Event* event) { + assert(event->IsSeqCst() && "Event must be an SC access"); + + // last seq_cst write should appear before us + EventId lastSeqCstWriteEvent = GetLastSeqCstWriteEventId(event->location); + if (lastSeqCstWriteEvent != -1) { + auto lastSeqCstWrite = events[lastSeqCstWriteEvent]; + assert((lastSeqCstWrite->IsWrite() || lastSeqCstWrite->IsModifyRMW()) && + "Prev scq-cst event must be a write or modifying rmw"); + AddEdge(EdgeType::SC, lastSeqCstWrite->id, event->id); + + // unite current hb-clock with last seq-cst write + event->clock.UniteWith(lastSeqCstWrite->clock); + } + } + + // ===== Methods to create mo edges ===== + + // Applies Seq-Cst Write-Read Coherence rules: establishes mo-edge between + // last sc-write to the same location and `write` event + // W'_x --sc--> R_x W'_x --sc--> R_x + // ^ \ ^ + // | \ | + // rf => \ rf + // | \ | + // W_x --mo--> W_x + void CreateSeqCstReadWriteCoherenceEdges(Event* write, Event* read) { + // TODO: in all such assertions do I need to check for correct RMW type? + assert(write->IsWriteOrRMW() && read->IsReadOrRMW() && + "Write and read events must be of correct type"); + assert(write->location == read->location && + "Write and read events must be of the same location"); + assert(read->GetReadFromEvent() == write && + "Read event must read-from the write event"); + + EventId lastSeqCstWriteEvent = GetLastSeqCstWriteEventId(read->location); + // no such event, no need to create mo edge + if (lastSeqCstWriteEvent == -1) return; + + // create mo-edge between last sc-write and `write` that `read` event + // reads-from + auto lastSeqCstWrite = events[lastSeqCstWriteEvent]; + assert((lastSeqCstWrite->IsWrite() || lastSeqCstWrite->IsModifyRMW()) && + "Last sc-write event must be a write or modifying rmw"); + assert(lastSeqCstWrite->location == read->location && + "Last sc-write event must have the same location as read event"); + + if (lastSeqCstWrite->id == write->id) + return; // no need to create edge to itself + + // TODO: check that sc-edge between lastSeqCstWrite and read exists + AddEdge(EdgeType::MO, lastSeqCstWrite->id, write->id); + } + + // Applies Write-Read Coherence rules: establishes mo-edges between + // stores (which happened-before `read`) and `write` + // W'_x --hb--> R_x W'_x --hb--> R_x + // ^ \ ^ + // | \ | + // rf => \ rf + // | \ | + // W_x --mo--> W_x + void CreateWriteReadCoherenceEdges(Event* write, Event* read) { + assert(write->IsWriteOrRMW() && read->IsReadOrRMW() && + "Write and read events must be of correct type"); + assert(write->location == read->location && + "Write and read events must be of the same location"); + assert(read->GetReadFromEvent() == write && + "Read event must read-from the write event"); + + IterateThroughMostRecentEventsByPredicate( + [write, read](Event* otherEvent) -> bool { + return (read->id != otherEvent->id && + write->id != otherEvent->id && // not the same events + (otherEvent->IsWrite() || otherEvent->IsModifyRMW()) && + otherEvent->location == read->location && + otherEvent->HappensBefore(read)); + }, + [this, write](Event* otherEvent) -> void { + // establish mo edge + AddEdge(EdgeType::MO, otherEvent->id, write->id); + }); + } + + // Applies Read-Read Coherence rules: establishes mo-edges between + // `write` and write events from which other read events + // (which happened-before `read`) reaf-from. + // W'_x --rf--> R'_x W'_x --rf--> R'_x + // | | | + // hb => mo hb + // | | | + // v v v + // W_x --rf--> R_x W_x --rf--> R_x + void CreateReadReadCoherenceEdges(Event* write, Event* read) { + assert(write->IsWriteOrRMW() && read->IsReadOrRMW() && + "Write and read events must be of correct type"); + assert(write->location == read->location && + "Write and read events must be of the same location"); + assert(read->GetReadFromEvent() == write && + "Read event must read-from the write event"); + + IterateThroughMostRecentEventsByPredicate( + [write, read](Event* otherEvent) -> bool { + return (read->id != otherEvent->id && + write->id != otherEvent->id && // not the same events + otherEvent->IsReadOrRMW() && + otherEvent->location == read->location && + otherEvent->HappensBefore(read) && + otherEvent->GetReadFromEvent() != nullptr && + otherEvent->GetReadFromEvent() != + write // R'_x does not read-from `write` + ); + }, + [this, write](Event* otherRead) -> void { + auto otherWrite = otherRead->GetReadFromEvent(); + // establish mo-edge + AddEdge(EdgeType::MO, otherWrite->id, write->id); + }); + } + + // TODO: instead of sc-edges, add reads-from from "Repairing Sequential + // Consistency in C/C++11"? Applies Seq-Cst / MO Consistency rules: + // establishes mo-edge between last sc-write and current sc-write-event if + // their locations match W'_x --sc--> W_x => W'_x --mo--> W_x + void CreateSeqCstConsistencyEdges(Event* event) { + assert(event->IsWriteOrRMW() && event->IsSeqCst() && + "Event must be a write/rmw with seq-cst order"); + + EventId lastSeqCstWriteEvent = GetLastSeqCstWriteEventId(event->location); + if (lastSeqCstWriteEvent == -1) return; + auto lastSeqCstWrite = events[lastSeqCstWriteEvent]; + assert((lastSeqCstWrite->IsWrite() || lastSeqCstWrite->IsModifyRMW()) && + "Last sc-write event must be a write or modifying rmw"); + assert(lastSeqCstWrite->location == event->location && + "Last sc-write event must have the same location as event"); + // TODO: check that sc-edge between lastSeqCstWrite and read exists + AddEdge(EdgeType::MO, lastSeqCstWrite->id, event->id); + } + + // Applies Write-Write Coherence rules: establishes mo-edges between + // other writes that happened before `event` only for the same location + // W'_x --hb--> W_x => W'_x --mo--> W_x + void CreateWriteWriteCoherenceEdges(Event* event) { + assert(event->IsWriteOrRMW()); + + IterateThroughMostRecentEventsByPredicate( + [event](Event* otherEvent) -> bool { + return (event->id != otherEvent->id && // not the same event + (otherEvent->IsWrite() || otherEvent->IsModifyRMW()) && + otherEvent->location == event->location && // same location + otherEvent->HappensBefore(event)); + }, + [this, event](Event* otherEvent) -> void { + // establish mo edge + AddEdge(EdgeType::MO, otherEvent->id, event->id); + }); + } + + // Applies Read-Write Coherence rules: establishes mo-edges between + // stores that are read-from by reads which happened-before our write `event` + // W'_x --rf--> R'_x W'_x --rf--> R'_x + // | \ | + // hb => \ hb + // | \ | + // v \ v + // W_x --mo--> W_x + void CreateReadWriteCoherenceEdges(Event* event) { + assert(event->IsWriteOrRMW()); + + IterateThroughMostRecentEventsByPredicate( + [event](Event* otherEvent) -> bool { + return (event->id != otherEvent->id && // not the same event + otherEvent->IsReadOrRMW() && + otherEvent->location == event->location && // same location + otherEvent->HappensBefore(event)); + }, + [this, event](Event* otherEvent) -> void { + assert(otherEvent->GetReadFromEvent() != nullptr && + "Read event must have read-from event"); + auto writeEvent = otherEvent->GetReadFromEvent(); + // establish mo edge + AddEdge(EdgeType::MO, writeEvent->id, event->id); + }); + } + + // Applies RMW Consitency rules: establishes mo-edge between + // store that is read-from by the current RMW event and the RMW event itself. + // W_x --rf--> RMW_x => W_x --mo--> RMW_x + void CreateRmwConsistencyEdges(Event* write, Event* rmw) { + assert(write->IsWriteOrRMW() && rmw->IsModifyRMW() && + "Write and RMW events must be of correct type"); + assert(write->location == rmw->location && + "Write and RMW events must be of the same location"); + assert(rmw->GetReadFromEvent() == write && + "RMW event must read-from the Write event"); + + // establish mo-edge + AddEdge(EdgeType::MO, write->id, rmw->id); + } + + // Applies RMW Atomicity rules: establishes mo-edges between + // RMW event and all writes that are mo-after `write` event + // (from which RMW reads-from) + // RMW_x <--rf-- W_x RMW_x <--rf-- W_x + // | \ | + // mo => \ mo + // | \ | + // v \ v + // W'_x --mo--> W'_x + void CreateRmwAtomicityEdges(Event* write, Event* rmw) { + assert(write->IsWriteOrRMW() && rmw->IsModifyRMW() && + "Write and RMW events must be of correct type"); + assert(write->location == rmw->location && + "Write and RMW events must be of the same location"); + assert(rmw->GetReadFromEvent() == write && + "RMW event must read-from the Write event"); + + for (EdgeId edgeId : write->edges) { + Edge& edge = edges[edgeId]; + if (edge.type == EdgeType::MO && edge.to != rmw->id) { + // establish mo-edge + AddEdge(EdgeType::MO, rmw->id, edge.to); + } + } + } + + // ===== Helper methods ===== + + template + requires requires(Predicate p, Event* event) { + { p(event) } -> std::same_as; + } && requires(Callback cb, Event* event) { + { cb(event) } -> std::same_as; + } + void IterateThroughMostRecentEventsByPredicate(Predicate&& predicate, + Callback&& callback) { + // iterate through each thread and find last write-event that hb `event` + for (int t = 0; t < nThreads; ++t) { + // iterate from most recent to earliest events in thread `t` + const auto& threadEvents = eventsPerThread[t]; + for (auto it = threadEvents.rbegin(); it != threadEvents.rend(); ++it) { + Event* otherEvent = events[*it]; + if (!std::forward(predicate)(otherEvent)) continue; + // invoke `callback` on the most recent event in the thread `t` + std::forward(callback)(otherEvent); + break; // no need to invoke `callback` with earlier events from this + // thread + } + } + } + + EventId GetLastSeqCstWriteEventId(int location) const { + if (lastSeqCstWriteEvents.contains(location)) { + return lastSeqCstWriteEvents.at(location); + } + return -1; + } + + void AddEdge(EdgeType type, EventId from, EventId to) { + // for mo edges we might add duplicates, so we need to check that such + // mo-edge does not exist + if (type == EdgeType::MO && ExistsEdge(type, from, to)) { + // log() << "Edge already exists: " << events[from]->AsString() << " + // --" << WmmUtils::EdgeTypeToString(type) << "--> " + // << events[to]->AsString() << "\n"; + return; + } + + auto& from_edges = events[from]->edges; + EdgeId eId = edges.size(); + Edge e = {eId, type, from, to}; + edges.push_back(e); + from_edges.push_back(eId); + + if (inSnapshotMode) { + snapshotEdges.insert(eId); + } + } + + bool ExistsEdge(EdgeType type, EventId from, EventId to) const { + const auto& from_edges = events[from]->edges; + auto it = + std::ranges::find_if(from_edges, [this, from, to, type](EdgeId eId) { + auto& edge = edges[eId]; + return edge.from == from && edge.to == to && edge.type == type; + }); + return it != from_edges.end(); + } + + // Check execution graph for consistency createria: + // * modification order is acyclic + bool IsConsistent() { + // TODO: should consistency criteria be taken from paper "Repairing + // Sequential Consistency in C/C++11"? + enum { NOT_VISITED = 0, IN_STACK = 1, VISITED = 2 }; + std::vector colors(events.size(), + NOT_VISITED); // each event is colored 0 (not + // visited), 1 (entered), 2 (visited) + std::vector> stack; + + for (auto e : events) { + assert(colors[e->id] != IN_STACK && + "Should not be possible, invalid cycle detection"); + if (colors[e->id] == VISITED) continue; + stack.push_back({e, false}); + + while (!stack.empty()) { + auto [event, considred] = stack.back(); + EventId eventId = event->id; + + stack.pop_back(); + if (considred) { + colors[eventId] = VISITED; + continue; + } + stack.push_back( + {event, + true}); // next time we take it out, we do not traverse its edges + + for (auto edgeId : event->edges) { + Edge& edge = edges[edgeId]; + if (edge.type != EdgeType::MO) continue; + if (colors[edge.to] == NOT_VISITED) { + stack.push_back({events[edge.to], false}); + colors[edge.to] = IN_STACK; + } else if (colors[edge.to] == IN_STACK) { + // cycle detected + return false; + } + } + } + } + + return true; + } + + void StartSnapshot() { + assert(!inSnapshotMode && "Snapshot started twice"); + inSnapshotMode = true; + } + + void ApplySnapshot() { + assert(inSnapshotMode && "Applying snapshot not in snapshot mode"); + inSnapshotMode = false; + snapshotEdges.clear(); + } + + void DiscardSnapshot() { + assert(inSnapshotMode && "Discarding snapshot not in snapshot mode"); + // clearing all added edges from the graph + // TODO: make sure below 'note' is true + // Note: all appended edges will be in the suffixes of all edges arrays + // 1. removing from edges vector + while (!edges.empty() && snapshotEdges.contains(edges.back().id)) { + edges.pop_back(); + } + + // 2. removing from events edges + for (auto event : events) { + auto& eventEdges = event->edges; + while (!eventEdges.empty() && snapshotEdges.contains(eventEdges.back())) { + eventEdges.pop_back(); + } + } + + // reset snapshot state + inSnapshotMode = false; + snapshotEdges.clear(); + } + + void Clean() { + edges.clear(); + for (auto event : events) { + delete event; + } + events.clear(); + eventsPerThread.clear(); + lastSeqCstWriteEvents.clear(); + } + + void InitThreads(int nThreads) { + this->nThreads = nThreads; + eventsPerThread.resize(nThreads); + + // insert dummy events (with all-zero hbClocks), + // which will be the first event in each thread + for (int t = 0; t < nThreads; ++t) { + // TODO: DummyEvents are all ?seq-cst? (now rlx) writes, do I need to add + // proper ?sc?-egdes between them? For now I don't + int eventId = events.size(); + auto dummyEvent = new DummyEvent(eventId, nThreads, t); + events.push_back(dummyEvent); + eventsPerThread[t].push_back(eventId); + } + } + + std::vector edges; + std::vector events; + std::vector> eventsPerThread; + std::map lastSeqCstWriteEvents; + std::unordered_set + snapshotEdges; // edges that are part of the snapshot (which case be + // discarded or applied, which is usefull when adding + // rf-edge) + std::mt19937 gen{std::random_device{}()}; // random number generator for + // randomized rf-edge selection + bool inSnapshotMode = false; + int nThreads = 0; +}; + +} // namespace ltest::wmm \ No newline at end of file diff --git a/runtime/include/wmm/hbclock.h b/runtime/include/wmm/hbclock.h new file mode 100644 index 00000000..d1d6e013 --- /dev/null +++ b/runtime/include/wmm/hbclock.h @@ -0,0 +1,61 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace ltest::wmm { +struct HBClock { + HBClock() = default; + HBClock(int nThreads) : times(nThreads, 0) {} + + std::string AsString() const { + std::stringstream ss; + + ss << "["; + for (size_t i = 0; i < times.size(); ++i) { + ss << times[i]; + if (i < times.size() - 1) { + ss << ","; + } + } + ss << "]"; + + return ss.str(); + } + + bool IsSubsetOf(const HBClock& other) const { + assert(IsSameLength(other)); + + for (int i = 0; i < times.size(); ++i) { + if (times[i] > other.times[i]) { + return false; + } + } + + return true; + } + + void UniteWith(const HBClock& other) { + assert(IsSameLength(other)); + + for (int i = 0; i < times.size(); ++i) { + times[i] = std::max(times[i], other.times[i]); + } + } + + void Increment(int threadId) { + assert(threadId >= 0 && threadId < times.size()); + times[threadId]++; + } + + private: + bool IsSameLength(const HBClock& other) const { + return times.size() == other.times.size(); + } + + std::vector times; +}; +} // namespace ltest::wmm \ No newline at end of file diff --git a/runtime/include/wmm/wmm.h b/runtime/include/wmm/wmm.h new file mode 100644 index 00000000..90a507b0 --- /dev/null +++ b/runtime/include/wmm/wmm.h @@ -0,0 +1,100 @@ +#pragma once + +#include + +#include "../logger.h" +#include "common.h" +#include "graph.h" + +namespace ltest::wmm { + +extern bool wmm_enabled; + +class ExecutionGraph { + public: + ExecutionGraph(const ExecutionGraph&) = delete; + ExecutionGraph& operator=(const ExecutionGraph&) = delete; + ExecutionGraph(ExecutionGraph&&) = delete; + ExecutionGraph& operator=(ExecutionGraph&&) = delete; + + static ExecutionGraph& getInstance() { + static ExecutionGraph instance; // Thread-safe in C++11 and later + return instance; + } + + // Empties graph events and sets new number of threads. + void Reset(int nThreads) { + log() << "Reset Graph: threads=" << nThreads << "\n"; + this->nThreads = nThreads; + this->nextLocationId = 0; + + graph.Reset(nThreads); + graph.Print(log()); + } + + // When new location is constructed, it registers itself in the wmm-graph + // in order to generate corresponding initialization event. + template + int RegisterLocation(T value) { + int currentLocationId = nextLocationId++; + log() << "Register location: loc-" << currentLocationId + << ", init value=" << value << "\n"; + graph.AddWriteEvent(currentLocationId, WmmUtils::INIT_THREAD_ID, + MemoryOrder::SeqCst, value); + + graph.Print(log()); + return currentLocationId; + } + + template + T Load(int location, int threadId, MemoryOrder order) { + // TODO: if we now only do real atomics, then they should be stored in + // graph, I guess? + log() << "Load: loc-" << location << ", thread=" << threadId + << ", order=" << WmmUtils::OrderToString(order) << "\n"; + T readValue = graph.AddReadEvent(location, threadId, order); + + graph.Print(log()); + return readValue; + } + + template + void Store(int location, int threadId, MemoryOrder order, T value) { + log() << "Store: loc-" << location << ", thread=" << threadId + << ", order=" << WmmUtils::OrderToString(order) << ", value=" << value + << "\n"; + graph.AddWriteEvent(location, threadId, order, value); + + graph.Print(log()); + } + + // TODO: generalize to any other type of the RMW operation + // (which have different method signature from compare_and_set, e.g. + // fetch_add, etc.) + template + std::pair ReadModifyWrite(int location, int threadId, T* expected, + T desired, MemoryOrder success, + MemoryOrder failure) { + log() << "RMW: loc-" << location << ", thread=" << threadId + << ", expected=" << *expected << ", desired=" << desired + << ", success=" << WmmUtils::OrderToString(success) + << ", failure=" << WmmUtils::OrderToString(failure) << "\n"; + auto rmwResult = graph.AddRMWEvent(location, threadId, expected, desired, + success, failure); + graph.Print(log()); + log() << "RMW result: " << (rmwResult.first ? "MODIFY" : "READ") + << ", value=" << rmwResult.second << "\n"; + return rmwResult; + } + + private: + ExecutionGraph() = default; + ~ExecutionGraph() = default; + + int nThreads = 0; + int nextLocationId = 0; + Graph graph; + // TODO: here can add real atomic's name via clangpass +}; + +} // namespace ltest::wmm \ No newline at end of file diff --git a/runtime/lib.cpp b/runtime/lib.cpp index e91e24de..f68ea37b 100644 --- a/runtime/lib.cpp +++ b/runtime/lib.cpp @@ -10,6 +10,7 @@ // See comments in the lib.h. Task this_coro{}; +int this_thread_id = -1; boost::context::fiber_context sched_ctx; std::optional coroutine_status; @@ -20,8 +21,9 @@ std::vector task_builders{}; Task CoroBase::GetPtr() { return shared_from_this(); } -void CoroBase::Resume() { +void CoroBase::Resume(int resumed_thread_id) { this_coro = this->GetPtr(); + this_thread_id = resumed_thread_id; assert(!this_coro->IsReturned() && this_coro->ctx); // debug(stderr, "name: %s\n", // std::string(this_coro->GetPtr()->GetName()).c_str()); @@ -38,6 +40,7 @@ void CoroBase::Resume() { }).resume(); } this_coro.reset(); + this_thread_id = -1; } int CoroBase::GetId() const { return id; } @@ -75,18 +78,18 @@ extern "C" void CoroutineStatusChange(char* name, bool start) { CoroYield(); } -void CoroBase::Terminate() { +void CoroBase::Terminate(int running_thread_id) { int tries = 0; while (!IsReturned()) { ++tries; - Resume(); + Resume(running_thread_id); assert(tries < 1000000 && "coroutine is spinning too long, possible wrong terminating order"); } } -void CoroBase::TryTerminate() { +void CoroBase::TryTerminate(int running_thread_id) { for (size_t i = 0; i < 1000 && !is_returned; ++i) { - Resume(); + Resume(running_thread_id); } } \ No newline at end of file diff --git a/runtime/pretty_printer.cpp b/runtime/pretty_printer.cpp deleted file mode 100644 index 36cffdbe..00000000 --- a/runtime/pretty_printer.cpp +++ /dev/null @@ -1,3 +0,0 @@ -#include "include/pretty_print.h" - -PrettyPrinter::PrettyPrinter(size_t threads_num) : threads_num{threads_num} {} diff --git a/runtime/verifying.cpp b/runtime/verifying.cpp index 85f52ed2..0dff14b4 100644 --- a/runtime/verifying.cpp +++ b/runtime/verifying.cpp @@ -9,6 +9,10 @@ namespace ltest { +namespace wmm { +bool wmm_enabled = false; +} // namespace wmm + template <> std::string toString(const int &a) { return std::to_string(a); @@ -74,6 +78,9 @@ DEFINE_int32(exploration_runs, 15, "minimization step"); DEFINE_int32(minimization_runs, 15, "Number of minimization runs for smart minimizor"); +DEFINE_bool(wmm_enabled, false, + "Enable WMM graph usage (all atomic operations will be performed " + "via this graph"); DEFINE_int32(depth, 0, "How many tasks can be executed on one thread(Only for TLA)"); DEFINE_bool(verbose, false, "Verbosity"); @@ -91,6 +98,7 @@ void SetOpts(const DefaultOptions &def) { FLAGS_weights = def.weights; FLAGS_exploration_runs = def.exploration_runs; FLAGS_minimization_runs = def.minimization_runs; + FLAGS_wmm_enabled = def.wmm_enabled; } // Extracts required opts, returns the rest of args. @@ -104,6 +112,7 @@ Opts ParseOpts() { // scenarios with locks is not supported opts.exploration_runs = FLAGS_exploration_runs; opts.minimization_runs = FLAGS_minimization_runs; + opts.wmm_enabled = wmm_enabled = FLAGS_wmm_enabled; opts.verbose = FLAGS_verbose; opts.typ = FromLiteral(std::move(FLAGS_strategy)); opts.depth = FLAGS_depth; diff --git a/test/runtime/stackfulltask_mock.h b/test/runtime/stackfulltask_mock.h index 1fe6e0a7..381b7cd9 100644 --- a/test/runtime/stackfulltask_mock.h +++ b/test/runtime/stackfulltask_mock.h @@ -9,14 +9,14 @@ class MockTask : public CoroBase { public: MOCK_METHOD(Task, Restart, (void*), (override)); - MOCK_METHOD(void, Resume, (), ()); + MOCK_METHOD(void, Resume, (int), ()); MOCK_METHOD(bool, IsReturned, (), (const)); MOCK_METHOD(ValueWrapper, GetRetVal, (), (const, override)); MOCK_METHOD(std::string_view, GetName, (), (const, override)); MOCK_METHOD(std::vector, GetStrArgs, (), (const, override)); MOCK_METHOD(void*, GetArgs, (), (const, override)); MOCK_METHOD(bool, IsSuspended, (), (const)); - MOCK_METHOD(void, Terminate, (), ()); - MOCK_METHOD(void, TryTerminate, (), ()); + MOCK_METHOD(void, Terminate, (int), ()); + MOCK_METHOD(void, TryTerminate, (int), ()); virtual ~MockTask() { is_returned = true; } }; diff --git a/verifying/CMakeLists.txt b/verifying/CMakeLists.txt index 4d0f1ed9..ca6d48f7 100644 --- a/verifying/CMakeLists.txt +++ b/verifying/CMakeLists.txt @@ -29,8 +29,8 @@ function(verify_target_without_plugin target) COMMAND ${CLANG_TOOL_EXECUTABLE} -p=${CMAKE_BINARY_DIR}/compile_commands.json # passing compilation database, make sure CMAKE_EXPORT_COMPILE_COMMANDS flag is set --temp-prefix ${CLANG_TOOL_TMP_PREFIX} - --replace-names ::std::mutex,::std::shared_mutex,::std::condition_variable - --insert-names ltest::mutex,ltest::shared_mutex,ltest::condition_variable + --replace-names ::std::mutex,::std::shared_mutex,::std::condition_variable,::std::atomic + --insert-names ltest::mutex,ltest::shared_mutex,ltest::condition_variable,ltest::latomic ${CMAKE_CURRENT_SOURCE_DIR}/${source_name} DEPENDS ${CLANG_TOOL} COMMENT "Running Clang Pass Tool on ${source_name}" diff --git a/verifying/blocking/verifiers/buffered_channel_verifier.h b/verifying/blocking/verifiers/buffered_channel_verifier.h index 0bb894c5..ec6c5b50 100644 --- a/verifying/blocking/verifiers/buffered_channel_verifier.h +++ b/verifying/blocking/verifiers/buffered_channel_verifier.h @@ -46,8 +46,14 @@ struct BufferedChannelVerifier { return std::nullopt; } - size_t senders_; - size_t receivers_; - size_t size_; + void Reset() { + senders_ = 0; + receivers_ = 0; + size_ = 0; + } + + size_t senders_ = 0; + size_t receivers_ = 0; + size_t size_ = 0; }; } // namespace spec \ No newline at end of file diff --git a/verifying/blocking/verifiers/mutex_verifier.h b/verifying/blocking/verifiers/mutex_verifier.h index 30f12cc6..949caf06 100644 --- a/verifying/blocking/verifiers/mutex_verifier.h +++ b/verifying/blocking/verifiers/mutex_verifier.h @@ -36,6 +36,8 @@ struct MutexVerifier { return std::nullopt; } + void Reset() { status.clear(); } + // NOTE(kmitkin): we cannot just store number of thread that holds mutex // because Lock can finish before Unlock! std::unordered_map status; diff --git a/verifying/blocking/verifiers/shared_mutex_verifier.h b/verifying/blocking/verifiers/shared_mutex_verifier.h index 628fdbf0..928d48af 100644 --- a/verifying/blocking/verifiers/shared_mutex_verifier.h +++ b/verifying/blocking/verifiers/shared_mutex_verifier.h @@ -54,6 +54,8 @@ struct SharedMutexVerifier { return std::nullopt; } + void Reset() { status.clear(); } + std::unordered_map status; }; } // namespace spec diff --git a/verifying/targets/CMakeLists.txt b/verifying/targets/CMakeLists.txt index fa5e8a79..4d1cc646 100644 --- a/verifying/targets/CMakeLists.txt +++ b/verifying/targets/CMakeLists.txt @@ -6,6 +6,8 @@ set (SOURCE_TARGET_LIST nonlinear_set.cpp nonlinear_ms_queue.cpp nonlinear_treiber_stack.cpp + nonlinear_queue_with_custom_rounds.cpp + wmm.cpp ) set (SOURCE_TARGET_WITHOUT_PLUGIN_LIST @@ -75,6 +77,12 @@ add_integration_test("nonlinear_queue_pct" "verify" TRUE nonlinear_queue --rounds 10000 --strategy pct ) +# TODO: test should fail, but for some reason after commit '15f64f0 - feat: add more tests for blocking data structures&refactor blocking support' +# it takes too much time to find bugs, I wasn't able to wait until custom round detects a bug +add_integration_test("nonlinear_queue_with_custom_rounds_pct" "verify" FALSE + nonlinear_queue_with_custom_rounds --rounds 0 --exploration_runs 1000 --strategy pct +) + add_integration_test("unique_args" "verify" FALSE unique_args ) diff --git a/verifying/targets/nonlinear_queue_with_custom_rounds.cpp b/verifying/targets/nonlinear_queue_with_custom_rounds.cpp new file mode 100644 index 00000000..c3dc5862 --- /dev/null +++ b/verifying/targets/nonlinear_queue_with_custom_rounds.cpp @@ -0,0 +1,45 @@ +#include +#include + +#include "../specs/queue.h" +#include "runtime/include/verifying.h" +#include "runtime/include/verifying_macro.h" + +const int N = 100; + +// Implementation copy-pasted from `nonlinear_queue.cpp` +struct Queue { + Queue() {} + + non_atomic void Push(int v) { + int pos = head.fetch_add(1); + a[pos] = v; + } + + non_atomic int Pop() { + int last = head.load(); + for (int i = 0; i < last; ++i) { + int e = a[i].load(); + if (e != 0 && a[i].compare_exchange_strong(e, 0)) { + return e; + } + } + return 0; + } + + std::atomic a[N]; + std::atomic head{}; +}; + +// Specify target structure and it's sequential specification. +using spec_t = + ltest::Spec, spec::QueueHash, spec::QueueEquals>; + +LTEST_ENTRYPOINT(spec_t, + {{ + method_invocation(std::tuple(1), void, Queue, Push, int), + method_invocation(std::tuple(2), void, Queue, Push, int), + method_invocation(std::tuple(), int, Queue, Pop), + }, + {method_invocation(std::tuple(3), void, Queue, Push, int), + method_invocation(std::tuple(), int, Queue, Pop)}}); \ No newline at end of file diff --git a/verifying/targets/wmm.cpp b/verifying/targets/wmm.cpp new file mode 100644 index 00000000..7453a0c3 --- /dev/null +++ b/verifying/targets/wmm.cpp @@ -0,0 +1,325 @@ +#include +#include +#include +#include + +#include "runtime/include/verifying.h" +#include "runtime/include/verifying_macro.h" +#include "verifying/specs/register.h" + +struct WmmTest { + WmmTest() {} + WmmTest(const WmmTest&) {} + WmmTest& operator=(const WmmTest&) { + return *this; + } + + std::atomic x{0}, y{0}; + int r1 = -1, r2 = -1; + + // Example 1 + non_atomic void Exp1_A() { + r1 = y.load(std::memory_order_seq_cst); + x.store(1, std::memory_order_seq_cst); + std::string out = "r1 = " + std::to_string(r1) + "\n"; + std::cout << out; + + assert(!(r1 == 1 && r2 == 1)); + } + + non_atomic void Exp1_B() { + r2 = x.load(std::memory_order_seq_cst); + y.store(1, std::memory_order_seq_cst); + std::string out = "r2 = " + std::to_string(r2) + "\n"; + std::cout << out; + + assert(!(r1 == 1 && r2 == 1)); + } + + // Example 2 + non_atomic void Exp2_A() { + y.store(1, std::memory_order_relaxed); + x.store(2, std::memory_order_seq_cst); + } + + non_atomic void Exp2_B() { + if (x.load(std::memory_order_seq_cst) == 2) { + r1 = y.load(std::memory_order_relaxed); + assert(r1 == 1); + } + std::string out = "r1 = " + std::to_string(r1) + "\n"; + std::cout << out; + } + + // Example 3 + non_atomic void Exp3_A() { + y.store(20, std::memory_order_seq_cst); + x.store(10, std::memory_order_seq_cst); + } + + non_atomic void Exp3_B() { + if (x.load(std::memory_order_seq_cst) == 10) { + assert(y.load(std::memory_order_seq_cst) == 20); + y.store(10, std::memory_order_seq_cst); + } + } + + non_atomic void Exp3_C() { + if (y.load(std::memory_order_seq_cst) == 10) { + assert(x.load(std::memory_order_seq_cst) == 10); + } + } + + // Example 4 + non_atomic void Exp4_A() { + y.store(20, std::memory_order_relaxed); + x.store(10, std::memory_order_relaxed); + } + + non_atomic void Exp4_B() { + if (x.load(std::memory_order_relaxed) == 10) { + assert(y.load(std::memory_order_relaxed) == 20); // could fail + y.store(10, std::memory_order_relaxed); + } + } + + non_atomic void Exp4_C() { + if (y.load(std::memory_order_relaxed) == 10) { + assert(x.load(std::memory_order_relaxed) == 10); // could fail + } + } + + // Example 5 + non_atomic void Exp5_A() { + y.store(20, std::memory_order_release); + x.store(10, std::memory_order_release); + } + + non_atomic void Exp5_B() { + if (x.load(std::memory_order_acquire) == 10) { + assert(y.load(std::memory_order_acquire) == 20); + y.store(10, std::memory_order_release); + } + } + + non_atomic void Exp5_C() { + if (y.load(std::memory_order_acquire) == 10) { + assert(x.load(std::memory_order_acquire) == 10); + } + } + + // Example 6 (TODO: fix mixed memory order accesses, see https://gcc.gnu.org/wiki/Atomic/GCCMM/AtomicSync) + // Note: Requires further investigation, because it seems that sc support is not sound right now. + // Fix it. + non_atomic void Exp6_A() { + y.store(20, std::memory_order_relaxed); + x.store(10, std::memory_order_seq_cst); + } + + non_atomic void Exp6_B() { + if (x.load(std::memory_order_relaxed) == 10) { + assert(y.load(std::memory_order_seq_cst) == 20); + y.store(10, std::memory_order_relaxed); + } + } + + non_atomic void Exp6_C() { + if (y.load(std::memory_order_acquire) == 10) { + assert(x.load(std::memory_order_acquire) == 10); + } + } + + // Example 7 + non_atomic void Exp7_A() { + int expected = 0; + do { + expected = x.load(std::memory_order_seq_cst); + } while (!x.compare_exchange_weak(expected, expected + 1, std::memory_order_seq_cst)); + assert(expected == 0); + assert(x.load() == 1); + } + + non_atomic void Exp7_B() { + int r = x.load(std::memory_order_seq_cst); + std::cout << "r = " << r << "\n"; + assert(r >= 0 && r <= 1); + } + + // Example 8 + non_atomic void Exp8_A() { + int expected; + do { + expected = x.load(std::memory_order_seq_cst); + } while (!x.compare_exchange_weak(expected, expected + 1, std::memory_order_seq_cst)); + int r = x.load(std::memory_order_seq_cst); + assert(expected == 0 || expected == 1); + assert(r >= 1 && r <= 2); + } + + non_atomic void Exp8_B() { + x.store(1, std::memory_order_seq_cst); + int r = x.load(std::memory_order_seq_cst); + std::cout << "r = " << r << "\n"; + assert(r >= 1 && r <= 2); + } + + // Example 9 (weak rmw) + non_atomic void Exp9_A() { + int expected = 0; + while (!x.compare_exchange_strong(expected, 1, std::memory_order_relaxed, std::memory_order_relaxed)) { + expected = 0; + } + y.store(1, std::memory_order_relaxed); + } + + non_atomic void Exp9_B() { + int a = y.load(std::memory_order_relaxed); + int b = x.load(std::memory_order_relaxed); + std::stringstream ss; + ss << "a = " << a << ", b = " << b << "\n"; + std::cout << ss.str(); + assert(!(a == 1 && b == 0)); // Could fail + } +}; + +struct LinearWmmSpec { + using method_t = std::function; + + static auto GetMethods() { + method_t func = [](LinearWmmSpec *l, void *) -> ValueWrapper { return void_v; }; + + return std::map{ + {"Exp1_A", func}, + {"Exp1_B", func}, + + {"Exp2_A", func}, + {"Exp2_B", func}, + + {"Exp3_A", func}, + {"Exp3_B", func}, + {"Exp3_C", func}, + + {"Exp4_A", func}, + {"Exp4_B", func}, + {"Exp4_C", func}, + + {"Exp5_A", func}, + {"Exp5_B", func}, + {"Exp5_C", func}, + + {"Exp6_A", func}, + {"Exp6_B", func}, + {"Exp6_C", func}, + + {"Exp7_A", func}, + {"Exp7_B", func}, + + {"Exp8_A", func}, + {"Exp8_B", func}, + + {"Exp9_A", func}, + {"Exp9_B", func}, + }; + } +}; + +struct LinearWmmHash { + size_t operator()(const LinearWmmSpec &r) const { return 1; } +}; + +struct LinearWmmEquals { + bool operator()(const LinearWmmSpec &lhs, const LinearWmmSpec &rhs) const { + return true; + } +}; + +using spec_t = + ltest::Spec; + +// Uncomment required tests when needed +LTEST_ENTRYPOINT(spec_t, + { + { + method_invocation(std::tuple(), void, WmmTest, Exp1_A) + }, + { + method_invocation(std::tuple(), void, WmmTest, Exp1_B) + } + }, + // { + // { + // method_invocation(std::tuple(), void, WmmTest, Exp2_A) + // }, + // { + // method_invocation(std::tuple(), void, WmmTest, Exp2_B) + // } + // }, + // { + // { + // method_invocation(std::tuple(), void, WmmTest, Exp3_A) + // }, + // { + // method_invocation(std::tuple(), void, WmmTest, Exp3_B) + // }, + // { + // method_invocation(std::tuple(), void, WmmTest, Exp3_C) + // } + // }, + // { // could fail + // { + // method_invocation(std::tuple(), void, WmmTest, Exp4_A) + // }, + // { + // method_invocation(std::tuple(), void, WmmTest, Exp4_B) + // }, + // { + // method_invocation(std::tuple(), void, WmmTest, Exp4_C) + // } + // }, + // { + // { + // method_invocation(std::tuple(), void, WmmTest, Exp5_A) + // }, + // { + // method_invocation(std::tuple(), void, WmmTest, Exp5_B) + // }, + // { + // method_invocation(std::tuple(), void, WmmTest, Exp5_C) + // } + // }, + // { // TODO: fails but should not, probably due to missing support for release sequences + // { + // method_invocation(std::tuple(), void, WmmTest, Exp6_A) + // }, + // { + // method_invocation(std::tuple(), void, WmmTest, Exp6_B) + // }, + // { + // method_invocation(std::tuple(), void, WmmTest, Exp6_C) + // } + // }, + // { + // { + // method_invocation(std::tuple(), void, WmmTest, Exp7_A) + // }, + // { + // method_invocation(std::tuple(), void, WmmTest, Exp7_B) + // } + // }, + // { + // { + // method_invocation(std::tuple(), void, WmmTest, Exp8_A) + // }, + // { + // method_invocation(std::tuple(), void, WmmTest, Exp8_B) + // } + // }, + // { + // { + // method_invocation(std::tuple(), void, WmmTest, Exp9_A) + // }, + // { + // method_invocation(std::tuple(), void, WmmTest, Exp9_B) + // } + // }, +); \ No newline at end of file