diff --git a/.github/workflows/run-tests.yaml b/.github/workflows/run-tests.yaml index c84e6f70..0c61cfd9 100644 --- a/.github/workflows/run-tests.yaml +++ b/.github/workflows/run-tests.yaml @@ -31,7 +31,7 @@ jobs: container: image: silkeh/clang:19 options: --user root - timeout-minutes: 20 + timeout-minutes: 30 steps: - name: Install deps run: | @@ -45,8 +45,32 @@ jobs: run: | cmake -G Ninja -B build -DCMAKE_BUILD_TYPE=RelWithAssert cmake --build build --target verify-targets verify-blocking - - name: "Tests" + - name: "Verifying tests" run: ctest --parallel 4 --test-dir build -L "verify" -V + litmus-test: + runs-on: ubuntu-latest + defaults: + run: + shell: bash + container: + image: silkeh/clang:19 + options: --user root + timeout-minutes: 30 + steps: + - name: Install deps + run: | + apt update && apt install -y git ninja-build valgrind libgoogle-glog-dev libsnappy-dev protobuf-compiler libboost-context-dev pkg-config libcapstone-dev libclang-19-dev && \ + git clone https://github.com/Kirillog/syscall_intercept.git && \ + cmake syscall_intercept -G Ninja -B syscall_intercept/build -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_BUILD_TYPE=Release -DCMAKE_C_COMPILER=clang && \ + cmake --build syscall_intercept/build --target install + - name: Check out repository code + uses: actions/checkout@v4 + - name: Build + run: | + cmake -G Ninja -B build -DCMAKE_BUILD_TYPE=RelWithAssert + cmake --build build --target verify-litmus + - name: "Litmus tests" + run: ctest --parallel 4 --test-dir build -L "litmus" -V verifying-folly-release: runs-on: ubuntu-latest env: diff --git a/CMakeLists.txt b/CMakeLists.txt index fe508ba5..221844bd 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -7,7 +7,7 @@ set(CMAKE_CXX_STANDARD 20) set(CMAKE_EXPORT_COMPILE_COMMANDS ON) -set(APPLY_CLANG_TOOL ON) +set(APPLY_CLANG_TOOL OFF) set(CMAKE_CONFIGURATION_TYPES "Debug;Release;RelWithAssert" CACHE STRING "" FORCE) diff --git a/codegen/yieldpass.cpp b/codegen/yieldpass.cpp index 2b495f39..dec97693 100644 --- a/codegen/yieldpass.cpp +++ b/codegen/yieldpass.cpp @@ -1,3 +1,11 @@ +#include +#include +#include + +#include "llvm/IR/Constants.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/IRBuilder.h" +#include "llvm/IR/Instructions.h" #include "llvm/IR/Module.h" #include "llvm/Pass.h" #include "llvm/Passes/PassBuilder.h" @@ -119,7 +127,12 @@ struct YieldInserter { } } -#ifndef DEBUG +// This change was added to instrument methods recusively for some VK data +// structure, which had a lot of methods inside of it. This change allows to +// insert thread interleavings in the methods without marking them with +// `non_atomic` attribute. This flag is unset by default, because it might cause +// wmm tests to instrument the LTest code, which is leads to errors. +#if defined(LTEST_RECURSIVE_YIELDS) for (auto &B : F) { for (auto &I : B) { if (auto call = dyn_cast(&I)) { @@ -160,12 +173,426 @@ struct YieldInserter { std::set atomic{}; }; +static int StdOrderFromLlvm(AtomicOrdering ordering) { + switch (ordering) { + case AtomicOrdering::Monotonic: + return static_cast(std::memory_order_relaxed); + case AtomicOrdering::Acquire: + return static_cast(std::memory_order_acquire); + case AtomicOrdering::Release: + return static_cast(std::memory_order_release); + case AtomicOrdering::AcquireRelease: + return static_cast(std::memory_order_acq_rel); + case AtomicOrdering::SequentiallyConsistent: + return static_cast(std::memory_order_seq_cst); + case AtomicOrdering::NotAtomic: + case AtomicOrdering::Unordered: + default: + return static_cast(std::memory_order_relaxed); + } +} + +static std::string TypeSuffix(Type *ty) { + if (!ty->isIntegerTy()) { + return ""; + } + unsigned bits = ty->getIntegerBitWidth(); + switch (bits) { + case 1: + return "i1"; + case 8: + return "i8"; + case 16: + return "i16"; + case 32: + return "i32"; + case 64: + return "i64"; + default: + return ""; + } +} + +static bool IsSupportedType(Type *ty) { + return !TypeSuffix(ty).empty(); +} + +static FunctionCallee GetChoiceCallee(Module &M) { + auto &ctx = M.getContext(); + auto i32 = Type::getInt32Ty(ctx); + auto fty = FunctionType::get(i32, {i32}, false); + return M.getOrInsertFunction("__ltest_wmm_choice", fty); +} + +static FunctionCallee GetLoadCandidatesCallee(Module &M, Type *val_ty) { + auto &ctx = M.getContext(); + auto suffix = TypeSuffix(val_ty); + std::string name = "__ltest_wmm_load_candidates_" + suffix; + auto void_ptr = PointerType::get(ctx, 0); + auto order_ty = Type::getInt32Ty(ctx); + std::vector args = {void_ptr, order_ty}; + auto fty = FunctionType::get(Type::getInt32Ty(ctx), args, false); + return M.getOrInsertFunction(name, fty); +} + +static FunctionCallee GetLoadApplyCallee(Module &M, Type *val_ty) { + auto &ctx = M.getContext(); + auto suffix = TypeSuffix(val_ty); + std::string name = "__ltest_wmm_load_apply_" + suffix; + auto i32 = Type::getInt32Ty(ctx); + auto fty = FunctionType::get(val_ty, {i32}, false); + return M.getOrInsertFunction(name, fty); +} + +static FunctionCallee GetStoreCallee(Module &M, Type *val_ty) { + auto &ctx = M.getContext(); + auto suffix = TypeSuffix(val_ty); + std::string name = "__ltest_wmm_store_" + suffix; + auto void_ptr = PointerType::get(ctx, 0); + auto order_ty = Type::getInt32Ty(ctx); + std::vector args = {void_ptr, order_ty, val_ty}; + auto fty = FunctionType::get(Type::getVoidTy(ctx), args, false); + return M.getOrInsertFunction(name, fty); +} + +static FunctionCallee GetCmpXchgCallee(Module &M, Type *val_ty) { + auto &ctx = M.getContext(); + auto suffix = TypeSuffix(val_ty); + std::string name = "__ltest_wmm_cmpxchg_" + suffix; + auto void_ptr = PointerType::get(ctx, 0); + auto order_ty = Type::getInt32Ty(ctx); + auto i1 = Type::getInt1Ty(ctx); + std::vector args = {void_ptr, val_ty, val_ty, order_ty, order_ty, i1}; + auto ret_ty = StructType::get(ctx, {val_ty, i1}); + auto fty = FunctionType::get(ret_ty, args, false); + return M.getOrInsertFunction(name, fty); +} + +static Value *ToVoidPtr(IRBuilder<> &B, Value *ptr) { + auto *i8ptr = PointerType::get(B.getContext(), 0); + if (ptr->getType() == i8ptr) { + return ptr; + } + return B.CreateBitCast(ptr, i8ptr); +} + +static bool IsStdAtomicCall(Function *F) { + if (!F) { + return false; + } + auto name = F->getName(); + if (name.starts_with("__ltest_wmm_")) { + return false; + } + return name.contains("__atomic_base") || name.contains("atomic") || + name.contains("atomic_base"); +} + +static bool IsStdAtomicLoad(Function *F) { + if (!IsStdAtomicCall(F)) { + return false; + } + auto name = F->getName(); + return name.contains("load"); +} + +static bool IsStdAtomicStore(Function *F) { + if (!IsStdAtomicCall(F)) { + return false; + } + auto name = F->getName(); + return name.contains("store"); +} + +static bool IsStdAtomicCmpXchg(Function *F) { + if (!IsStdAtomicCall(F)) { + return false; + } + auto name = F->getName(); + return name.contains("compare_exchange_strong") || + name.contains("compare_exchange_weak"); +} + +static bool IsStdAtomicCmpXchgWeak(Function *F) { + if (!IsStdAtomicCmpXchg(F)) { + return false; + } + auto name = F->getName(); + return name.contains("compare_exchange_weak"); +} + +struct WmmAtomicInserter { + explicit WmmAtomicInserter(Module &M) : M(M) {} + + bool Run(const FunIndex &index) { + bool changed = false; + for (auto &F : M) { + if (!IsTarget(F.getName(), index)) { + continue; + } + changed |= InstrumentFunction(F); + } + return changed; + } + + private: + bool IsTarget(const StringRef fun_name, const FunIndex &index) { + return HasAttribute(index, fun_name, nonatomic_attr); + } + + bool InstrumentFunction(Function &F) { + SmallVector work; + for (auto &B : F) { + for (auto &I : B) { + if (auto *LI = dyn_cast(&I)) { + if (LI->isAtomic()) { + work.push_back(LI); + } + } else if (auto *SI = dyn_cast(&I)) { + if (SI->isAtomic()) { + work.push_back(SI); + } + } else if (auto *CX = dyn_cast(&I)) { + work.push_back(CX); + } else if (auto *RMW = dyn_cast(&I)) { + work.push_back(RMW); + } else if (auto *CI = dyn_cast(&I)) { + if (auto *CF = CI->getCalledFunction()) { + if (IsStdAtomicLoad(CF) || IsStdAtomicStore(CF) || + IsStdAtomicCmpXchg(CF)) { + work.push_back(CI); + } + } + } + } + } + + bool changed = false; + for (auto *I : work) { + if (auto *LI = dyn_cast(I)) { + changed |= InstrumentLoad(LI); + } else if (auto *SI = dyn_cast(I)) { + changed |= InstrumentStore(SI); + } else if (auto *CX = dyn_cast(I)) { + changed |= InstrumentCmpXchg(CX); + } else if (auto *RMW = dyn_cast(I)) { + changed |= InstrumentRmw(RMW); + } else if (auto *CI = dyn_cast(I)) { + changed |= InstrumentStdAtomicCall(CI); + } + } + + return changed; + } + + bool InstrumentLoad(LoadInst *LI) { + auto *val_ty = LI->getType(); + if (!IsSupportedType(val_ty)) { + return false; + } + + IRBuilder<> B(LI); + auto *order = B.getInt32(StdOrderFromLlvm(LI->getOrdering())); + auto *addr = ToVoidPtr(B, LI->getPointerOperand()); + auto load_candidates = GetLoadCandidatesCallee(M, val_ty); + auto *count = B.CreateCall(load_candidates, {addr, order}); + auto choice_callee = GetChoiceCallee(M); + auto *choice = B.CreateCall(choice_callee, {count}); + auto load_apply = GetLoadApplyCallee(M, val_ty); + auto *call = B.CreateCall(load_apply, {choice}); + + LI->replaceAllUsesWith(call); + LI->eraseFromParent(); + return true; + } + + bool InstrumentStore(StoreInst *SI) { + auto *val_ty = SI->getValueOperand()->getType(); + if (!IsSupportedType(val_ty)) { + return false; + } + + IRBuilder<> B(SI); + auto *order = B.getInt32(StdOrderFromLlvm(SI->getOrdering())); + auto *addr = ToVoidPtr(B, SI->getPointerOperand()); + auto *value = SI->getValueOperand(); + auto callee = GetStoreCallee(M, val_ty); + B.CreateCall(callee, {addr, order, value}); + SI->eraseFromParent(); + return true; + } + + bool InstrumentCmpXchg(AtomicCmpXchgInst *CX) { + auto *val_ty = CX->getCompareOperand()->getType(); + if (!IsSupportedType(val_ty)) { + return false; + } + + IRBuilder<> B(CX); + auto *order_success = B.getInt32(StdOrderFromLlvm(CX->getSuccessOrdering())); + auto *order_failure = B.getInt32(StdOrderFromLlvm(CX->getFailureOrdering())); + auto *addr = ToVoidPtr(B, CX->getPointerOperand()); + auto *expected = CX->getCompareOperand(); + auto *desired = CX->getNewValOperand(); + auto *is_weak = B.getInt1(CX->isWeak()); + + auto callee = GetCmpXchgCallee(M, val_ty); + auto *call = B.CreateCall( + callee, {addr, expected, desired, order_success, order_failure, is_weak}); + + auto *new_old = ExtractValueInst::Create(call, {0}, "", CX->getNextNode()); + auto *new_success = ExtractValueInst::Create(call, {1}, "", CX->getNextNode()); + + SmallVector to_remove; + for (auto *user : CX->users()) { + if (auto *EV = dyn_cast(user)) { + if (EV->getIndices()[0] == 0) { + EV->replaceAllUsesWith(new_old); + to_remove.push_back(EV); + } else if (EV->getIndices()[0] == 1) { + EV->replaceAllUsesWith(new_success); + to_remove.push_back(EV); + } + } + } + + for (auto *I : to_remove) { + I->eraseFromParent(); + } + + if (CX->use_empty()) { + CX->eraseFromParent(); + } + + return true; + } + + bool InstrumentRmw(AtomicRMWInst *RMW) { + // rmw is not fully supported by wmm graph + (void)RMW; + return false; + } + + bool InstrumentStdAtomicCall(CallInst *CI) { + auto *CF = CI->getCalledFunction(); + if (!CF) { + return false; + } + + if (IsStdAtomicLoad(CF)) { + if (CI->arg_size() < 1) { + return false; + } + auto *addr = CI->getArgOperand(0); + Value *order = nullptr; + if (CI->arg_size() >= 2) { + order = CI->getArgOperand(1); + } + auto *val_ty = CI->getType(); + if (!IsSupportedType(val_ty)) { + return false; + } + IRBuilder<> B(CI); + if (!order) { + // default std::atomic load uses seq_cst. + order = B.getInt32(static_cast(std::memory_order_seq_cst)); + } + auto load_candidates = GetLoadCandidatesCallee(M, val_ty); + auto *count = B.CreateCall(load_candidates, {ToVoidPtr(B, addr), order}); + auto choice_callee = GetChoiceCallee(M); + auto *choice = B.CreateCall(choice_callee, {count}); + auto load_apply = GetLoadApplyCallee(M, val_ty); + auto *call = B.CreateCall(load_apply, {choice}); + CI->replaceAllUsesWith(call); + CI->eraseFromParent(); + return true; + } + + if (IsStdAtomicStore(CF)) { + if (CI->arg_size() < 2) { + return false; + } + auto *addr = CI->getArgOperand(0); + auto *value = CI->getArgOperand(1); + Value *order = nullptr; + if (CI->arg_size() >= 3) { + order = CI->getArgOperand(2); + } + auto *val_ty = value->getType(); + if (!IsSupportedType(val_ty)) { + return false; + } + IRBuilder<> B(CI); + if (!order) { + // default std::atomic store uses seq_cst + order = B.getInt32(static_cast(std::memory_order_seq_cst)); + } + auto callee = GetStoreCallee(M, val_ty); + B.CreateCall(callee, {ToVoidPtr(B, addr), order, value}); + CI->eraseFromParent(); + return true; + } + + if (IsStdAtomicCmpXchg(CF)) { + if (CI->arg_size() < 4) { + return false; + } + auto *addr = CI->getArgOperand(0); + auto *expected_ptr = CI->getArgOperand(1); + auto *desired = CI->getArgOperand(2); + auto *order_success = CI->getArgOperand(3); + Value *order_failure = nullptr; + if (CI->arg_size() >= 5) { + order_failure = CI->getArgOperand(4); + } + auto *val_ty = desired->getType(); + if (!IsSupportedType(val_ty)) { + return false; + } + + IRBuilder<> B(CI); + auto *expected_val = B.CreateLoad(val_ty, expected_ptr); + auto *is_weak = B.getInt1(IsStdAtomicCmpXchgWeak(CF)); + if (!order_failure) { + // overload with single memory_order + order_failure = order_success; + } + auto callee = GetCmpXchgCallee(M, val_ty); + auto *call = B.CreateCall( + callee, + {ToVoidPtr(B, addr), expected_val, desired, order_success, + order_failure, + is_weak}); + + Instruction *insert_pt = CI->getNextNode(); + if (!insert_pt) { + insert_pt = CI->getParent()->getTerminator(); + } + IRBuilder<> Bafter(insert_pt); + auto *new_old = Bafter.CreateExtractValue(call, {0}); + auto *new_success = Bafter.CreateExtractValue(call, {1}); + Bafter.CreateStore(new_old, expected_ptr); + + CI->replaceAllUsesWith(new_success); + CI->eraseFromParent(); + return true; + } + + return false; + } + + Module &M; +}; + namespace { struct YieldInsertPass final : public PassInfoMixin { PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM) { auto fun_index = CreateFunIndex(M); + WmmAtomicInserter wmm{M}; + wmm.Run(fun_index); + YieldInserter gen{M}; gen.Run(fun_index); diff --git a/runtime/CMakeLists.txt b/runtime/CMakeLists.txt index 565ccc47..f5f0f91f 100644 --- a/runtime/CMakeLists.txt +++ b/runtime/CMakeLists.txt @@ -3,12 +3,13 @@ set (SOURCE_FILES lib.cpp lin_check.cpp logger.cpp - pretty_printer.cpp verifying.cpp generators.cpp minimization.cpp minimization_smart.cpp coro_ctx_guard.cpp + strategy_context.cpp + wmm_runtime.cpp ) add_library(runtime SHARED ${SOURCE_FILES}) diff --git a/runtime/include/custom_round.h b/runtime/include/custom_round.h new file mode 100644 index 00000000..23bbaf58 --- /dev/null +++ b/runtime/include/custom_round.h @@ -0,0 +1,12 @@ +#pragma once + +#include + +#include "lib.h" + +struct CustomRound { + CustomRound(std::vector> threads_) + : threads(threads_) {} + + std::vector> threads; +}; \ No newline at end of file diff --git a/runtime/include/latomic.h b/runtime/include/latomic.h new file mode 100644 index 00000000..f0b9712e --- /dev/null +++ b/runtime/include/latomic.h @@ -0,0 +1,384 @@ +#pragma once + +#include + +#include "lib.h" +#include "wmm/wmm.h" + +namespace ltest { + +using namespace wmm; + +// This class is intended to be the entry point +// for the weak memory logic later. +template +class latomic { + std::atomic atomicValue; + int locationId = -1; + ExecutionGraph& wmmGraph = ExecutionGraph::getInstance(); + + public: +#if __cplusplus >= 201703L // C++17 + static constexpr bool is_always_lock_free = + std::atomic::is_always_lock_free; +#endif + + // Constructors + constexpr latomic() noexcept : latomic(T{}) {} + constexpr latomic(T desired) noexcept : atomicValue(desired) { + if (wmm_enabled) { + locationId = wmmGraph.RegisterLocation(desired); + } + } + latomic(const latomic&) = delete; + latomic& operator=(const latomic&) = delete; + latomic& operator=(const latomic&) volatile = delete; + + // operator= + T operator=(T desired) noexcept { + store(desired); + return desired; + } + + T operator=(T desired) volatile noexcept { + store(desired); + return desired; + } + + // is_lock_free + bool is_lock_free() const noexcept { return atomicValue.is_lock_free(); } + + bool is_lock_free() const volatile noexcept { + return atomicValue.is_lock_free(); + } + + // store + void store(T desired, + std::memory_order order = std::memory_order_seq_cst) noexcept { + atomicValue.store(desired, order); + + if (wmm_enabled && this_coro && locationId >= 0 && this_thread_id >= 0) { + // std::cout << "Store: coro id=" << this_coro->GetId() << ", thread=" << + // this_thread_id + // << ", name=" << this_coro->GetName() << std::endl; + wmmGraph.Store(locationId, this_thread_id, WmmUtils::OrderFromStd(order), + desired); + } + } + + void store(T desired, std::memory_order order = + std::memory_order_seq_cst) volatile noexcept { + store(desired, order); + } + + // load + T load(std::memory_order order = std::memory_order_seq_cst) const noexcept { + if (wmm_enabled && this_coro && locationId >= 0 && this_thread_id >= 0) { + // std::cout << "Load: coro id=" << this_coro->GetId() << ", thread=" << + // this_thread_id + // << ", name=" << this_coro->GetName() << std::endl; + T value = wmmGraph.Load(locationId, this_thread_id, + WmmUtils::OrderFromStd(order)); + return value; + } + + return atomicValue.load(order); + } + + T load(std::memory_order order = std::memory_order_seq_cst) const + volatile noexcept { + return load(order); + } + + // operator T() + operator T() const noexcept { return load(); } + + operator T() const volatile noexcept { return load(); } + + // TODO: add wmm support for exchange + // exchange + T exchange(T desired, + std::memory_order order = std::memory_order_seq_cst) noexcept { + return atomicValue.exchange(desired, order); + } + + T exchange(T desired, std::memory_order order = + std::memory_order_seq_cst) volatile noexcept { + return atomicValue.exchange(desired, order); + } + + // TODO: for CASes there more complex rule how to get 'failure' order from + // 'success', implement them instead of blind defaults compare_exchange_weak + bool compare_exchange_weak(T& expected, T desired, std::memory_order success, + std::memory_order failure) noexcept { + // we want to prevent actual atomics from overriding 'expected' value on rmw + // failure + T myExpected = expected; + bool value = atomicValue.compare_exchange_weak(myExpected, desired, success, + failure); + + if (wmm_enabled && this_coro && locationId >= 0 && this_thread_id >= 0) { + // std::cout << "Compare exchange weak: coro id=" << this_coro->GetId() << + // ", thread=" << this_thread_id + // << ", name=" << this_coro->GetName() << std::endl; + auto [rmwSuccess, readValue] = wmmGraph.ReadModifyWrite( + locationId, this_thread_id, &expected, desired, + WmmUtils::OrderFromStd(success), WmmUtils::OrderFromStd(failure)); + value = rmwSuccess; + } else { + // update expected only if we are not in a coroutine + expected = myExpected; + } + + return value; + } + + bool compare_exchange_weak(T& expected, T desired, std::memory_order success, + std::memory_order failure) volatile noexcept { + return compare_exchange_weak(expected, desired, success, failure); + } + + bool compare_exchange_weak( + T& expected, T desired, + std::memory_order order = std::memory_order_seq_cst) noexcept { + return compare_exchange_weak(expected, desired, order, + std::memory_order_seq_cst); + } + + bool compare_exchange_weak( + T& expected, T desired, + std::memory_order order = std::memory_order_seq_cst) volatile noexcept { + return compare_exchange_weak(expected, desired, order, + std::memory_order_seq_cst); + } + + // compare_exchange_strong + bool compare_exchange_strong(T& expected, T desired, + std::memory_order success, + std::memory_order failure) noexcept { + // we want to prevent actual atomics from overriding 'expected' value on rmw + // failure + T myExpected = expected; + bool value = atomicValue.compare_exchange_strong(myExpected, desired, + success, failure); + + if (wmm_enabled && this_coro && locationId >= 0 && this_thread_id >= 0) { + auto [rmwSuccess, readValue] = wmmGraph.ReadModifyWrite( + locationId, this_thread_id, &expected, desired, + WmmUtils::OrderFromStd(success), WmmUtils::OrderFromStd(failure)); + value = rmwSuccess; + } else { + // update expected only if we are not in a coroutine + expected = myExpected; + } + + return value; + } + + bool compare_exchange_strong(T& expected, T desired, + std::memory_order success, + std::memory_order failure) volatile noexcept { + return compare_exchange_strong(expected, desired, success, failure); + } + + bool compare_exchange_strong( + T& expected, T desired, + std::memory_order order = std::memory_order_seq_cst) noexcept { + return compare_exchange_strong(expected, desired, order, + std::memory_order_seq_cst); + } + + bool compare_exchange_strong( + T& expected, T desired, + std::memory_order order = std::memory_order_seq_cst) volatile noexcept { + return compare_exchange_strong(expected, desired, order, + std::memory_order_seq_cst); + } + + // TODO: operations below need to be implemented in WMM graph as well + +// wait +#if __cplusplus >= 202002L // C++20 + void wait(T old, std::memory_order order = + std::memory_order_seq_cst) const noexcept { + atomicValue.wait(old, order); + } + + void wait(T old, std::memory_order order = std::memory_order_seq_cst) const + volatile noexcept { + atomicValue.wait(old, order); + } + + // notify_one + void notify_one() noexcept { atomicValue.notify_one(); } + + void notify_one() volatile noexcept { atomicValue.notify_one(); } + + // notify all + void notify_all() noexcept { atomicValue.notify_all(); } + + void notify_all() volatile noexcept { atomicValue.notify_all(); } +#endif + + // fetch_add + T fetch_add(T arg, + std::memory_order order = std::memory_order_seq_cst) noexcept { + return atomicValue.fetch_add(arg, order); + } + + T fetch_add(T arg, std::memory_order order = + std::memory_order_seq_cst) volatile noexcept { + return atomicValue.fetch_add(arg, order); + } + + // TODO: fix ambiguity with specialization for T* + // T* fetch_add(std::ptrdiff_t arg, std::memory_order order = + // std::memory_order_seq_cst) noexcept { + // return atomicValue.fetch_add(arg, order); + // } + + // T* fetch_add(std::ptrdiff_t arg, std::memory_order order = + // std::memory_order_seq_cst) volatile noexcept { + // return atomicValue.fetch_add(arg, order); + // } + + // fetch_sub + T fetch_sub(T arg, + std::memory_order order = std::memory_order_seq_cst) noexcept { + return atomicValue.fetch_sub(arg, order); + } + + T fetch_sub(T arg, std::memory_order order = + std::memory_order_seq_cst) volatile noexcept { + return atomicValue.fetch_sub(arg, order); + } + + // TODO: fix ambiguity with specialization for T* + // T* fetch_sub(std::ptrdiff_t arg, std::memory_order order = + // std::memory_order_seq_cst) noexcept { + // return atomicValue.fetch_sub(arg, order); + // } + + // T* fetch_sub(std::ptrdiff_t arg, std::memory_order order = + // std::memory_order_seq_cst) volatile noexcept { + // return atomicValue.fetch_sub(arg, order); + // } + + // operator+= + T operator+=(T arg) noexcept { return atomicValue.operator+=(arg); } + + T operator+=(T arg) volatile noexcept { return atomicValue.operator+=(arg); } + + // TODO: fix ambiguity with specialization for T* + // T* operator+=(std::ptrdiff_t arg) noexcept { + // return atomicValue.operator+=(arg); + // } + + // T* operator+=(std::ptrdiff_t arg) volatile noexcept { + // return atomicValue.operator+=(arg); + // } + + // operator-= + T operator-=(T arg) noexcept { return atomicValue.operator-=(arg); } + + T operator-=(T arg) volatile noexcept { return atomicValue.operator-=(arg); } + + // TODO: fix ambiguity with specialization for T* + // T* operator-=(std::ptrdiff_t arg) noexcept { + // return atomicValue.operator-=(arg); + // } + + // T* operator-=(std::ptrdiff_t arg) volatile noexcept { + // return atomicValue.operator-=(arg); + // } + + // fetch_max + T fetch_max(T arg, + std::memory_order order = std::memory_order_seq_cst) noexcept { + return atomicValue.fetch_max(arg, order); + } + + T fetch_max(T arg, std::memory_order order = + std::memory_order_seq_cst) volatile noexcept { + return atomicValue.fetch_max(arg, order); + } + + // fetch_min + T fetch_min(T arg, + std::memory_order order = std::memory_order_seq_cst) noexcept { + return atomicValue.fetch_min(arg, order); + } + + T fetch_min(T arg, std::memory_order order = + std::memory_order_seq_cst) volatile noexcept { + return atomicValue.fetch_min(arg, order); + } + + // operator++ + T operator++() noexcept { return atomicValue.operator++(); } + + T operator++() volatile noexcept { return atomicValue.operator++(); } + + T operator++(int) noexcept { return atomicValue.operator++(0); } + + T operator++(int) volatile noexcept { return atomicValue.operator++(0); } + + // operator-- + T operator--() noexcept { return atomicValue.operator--(); } + + T operator--() volatile noexcept { return atomicValue.operator--(); } + + T operator--(int) noexcept { return atomicValue.operator--(0); } + + T operator--(int) volatile noexcept { return atomicValue.operator--(0); } + + // fetch_and + T fetch_and(T arg, + std::memory_order order = std::memory_order_seq_cst) noexcept { + return atomicValue.fetch_and(arg, order); + } + + T fetch_and(T arg, std::memory_order order = + std::memory_order_seq_cst) volatile noexcept { + return atomicValue.fetch_and(arg, order); + } + + // fetch_or + T fetch_or(T arg, + std::memory_order order = std::memory_order_seq_cst) noexcept { + return atomicValue.fetch_or(arg, order); + } + + T fetch_or(T arg, std::memory_order order = + std::memory_order_seq_cst) volatile noexcept { + return atomicValue.fetch_or(arg, order); + } + + // fetch_xor + T fetch_xor(T arg, + std::memory_order order = std::memory_order_seq_cst) noexcept { + return atomicValue.fetch_xor(arg, order); + } + + T fetch_xor(T arg, std::memory_order order = + std::memory_order_seq_cst) volatile noexcept { + return atomicValue.fetch_xor(arg, order); + } + + // operator&= + T operator&=(T arg) noexcept { return atomicValue.operator&=(arg); } + + T operator&=(T arg) volatile noexcept { return atomicValue.operator&=(arg); } + + // operator|= + T operator|=(T arg) noexcept { return atomicValue.operator|=(arg); } + + T operator|=(T arg) volatile noexcept { return atomicValue.operator|=(arg); } + + // operator^= + T operator^=(T arg) noexcept { return atomicValue.operator^=(arg); } + + T operator^=(T arg) volatile noexcept { return atomicValue.operator^=(arg); } +}; + +} // namespace ltest \ No newline at end of file diff --git a/runtime/include/lib.h b/runtime/include/lib.h index c3d3b99a..a21e8ccb 100644 --- a/runtime/include/lib.h +++ b/runtime/include/lib.h @@ -3,6 +3,7 @@ #include #include #include +#include #include #include #include @@ -20,6 +21,7 @@ struct CoroutineStatus; // Current executing coroutine. extern std::shared_ptr this_coro; +extern int this_thread_id; // Scheduler context extern boost::context::fiber_context sched_ctx; @@ -31,6 +33,21 @@ struct CoroutineStatus { bool has_started; }; +namespace ltest { +struct TestFailure : public std::exception { + explicit TestFailure(std::string message) : msg(std::move(message)) {} + const char* what() const noexcept override { return msg.c_str(); } + + private: + std::string msg; +}; + +void SetTestFailure(std::string message); +bool HasTestFailure(); +const std::string& GetTestFailureMessage(); +void ClearTestFailure(); +} // namespace ltest + extern "C" void CoroYield(); extern "C" void CoroutineStatusChange(char* coroutine, bool start); @@ -45,7 +62,7 @@ struct CoroBase : public std::enable_shared_from_this { virtual std::shared_ptr Restart(void* this_ptr) = 0; // Resume the coroutine to the next yield. - void Resume(); + void Resume(int resumed_thread_id); // Check if the coroutine is returned. bool IsReturned() const; @@ -70,10 +87,10 @@ struct CoroBase : public std::enable_shared_from_this { std::shared_ptr GetPtr(); // Try to terminate the coroutine. - void TryTerminate(); + void TryTerminate(int running_thread_id); // Terminate the coroutine. - void Terminate(); + void Terminate(int running_thread_id); void SetBlocked(const BlockState& state) { fstate = state; @@ -152,7 +169,14 @@ struct Coro final : public CoroBase { reinterpret_cast*>(c->args.get()); auto this_arg = std::tuple{reinterpret_cast(c->this_ptr)}; - c->ret = std::apply(c->func, std::tuple_cat(this_arg, *real_args)); + try { + c->ret = std::apply(c->func, std::tuple_cat(this_arg, *real_args)); + } catch (const ltest::TestFailure& ex) { + ltest::SetTestFailure(ex.what()); + c->ret = void_v; + } catch (...) { + throw; + } c->is_returned = true; return std::move(ctx); }); diff --git a/runtime/include/pct_strategy.h b/runtime/include/pct_strategy.h index 02569b88..f7e12237 100644 --- a/runtime/include/pct_strategy.h +++ b/runtime/include/pct_strategy.h @@ -166,19 +166,7 @@ struct PctStrategy : public BaseStrategyWithThreads { } void StartNextRound() override { - this->new_task_id = 0; - // log() << "depth: " << current_depth << "\n"; - // Reconstruct target as we start from the beginning. - this->TerminateTasks(); - for (auto& thread : this->threads) { - // We don't have to keep references alive - while (thread.size() > 0) { - thread.pop_back(); - } - thread = StableVector(); - } - // this->state.Reset(); - + BaseStrategyWithThreads::StartNextRound(); UpdateStatistics(); } @@ -187,6 +175,31 @@ struct PctStrategy : public BaseStrategyWithThreads { UpdateStatistics(); } + void SetCustomRound(CustomRound& custom_round) override { + BaseStrategyWithThreads::SetCustomRound(custom_round); + // reinitialize pct internals to avoid oob + current_schedule_length = 0; + count_chosen_same = 0; + fair_stage = 0; + fair_start = 0; + last_chosen = 0; + PrepareForDepth(current_depth, 1); + } + + size_t ChooseCandidate(size_t candidates_count) override { + // uniform choice for wmm candidates + if (candidates_count == 0) { + return 0; + } + std::uniform_int_distribution dist(0, candidates_count - 1); + return dist(this->choice_rng); + } + + void SetSeed(uint64_t seed) override { + BaseStrategyWithThreads::SetSeed(seed); + rng.seed(seed); + } + ~PctStrategy() { this->TerminateTasks(); } private: @@ -210,7 +223,7 @@ struct PctStrategy : public BaseStrategyWithThreads { void PrepareForDepth(size_t depth, size_t k) { // Generates priorities - priorities = std::vector(this->threads_count); + priorities = std::vector(this->threads.size()); for (size_t i = 0; i < priorities.size(); ++i) { priorities[i] = current_depth + i; } diff --git a/runtime/include/pick_strategy.h b/runtime/include/pick_strategy.h index 8a21045a..d3a8aba5 100644 --- a/runtime/include/pick_strategy.h +++ b/runtime/include/pick_strategy.h @@ -34,18 +34,6 @@ struct PickStrategy : public BaseStrategyWithThreads { is_new, current_thread}; } - void StartNextRound() override { - this->new_task_id = 0; - - this->TerminateTasks(); - for (auto& thread : this->threads) { - // We don't have to keep references alive - while (thread.size() > 0) { - thread.pop_back(); - } - } - } - ~PickStrategy() { this->TerminateTasks(); } protected: diff --git a/runtime/include/pretty_print.h b/runtime/include/pretty_print.h index 9504fa59..baf9fc8c 100644 --- a/runtime/include/pretty_print.h +++ b/runtime/include/pretty_print.h @@ -14,7 +14,7 @@ using std::to_string; using FullHistoryWithThreads = std::vector, CoroutineStatus>>>; struct PrettyPrinter { - PrettyPrinter(size_t threads_num); + explicit PrettyPrinter() = default; /* Prints like this: @@ -31,7 +31,7 @@ struct PrettyPrinter { */ template void PrettyPrint(const std::vector>& result, - Out_t& out) { + int threads_num, Out_t& out) { auto get_thread_num = [](const std::variant& v) { // Crutch. if (v.index() == 0) { @@ -42,7 +42,7 @@ struct PrettyPrinter { int cell_width = 50; // Up it if necessary. Enough for now. - auto print_separator = [&out, this, cell_width]() { + auto print_separator = [threads_num, &out, this, cell_width]() { out << "*"; for (int i = 0; i < threads_num; ++i) { for (int j = 0; j < cell_width; ++j) { @@ -121,10 +121,11 @@ struct PrettyPrinter { // Helps to debug full histories. template - void PrettyPrint(FullHistoryWithThreads& result, Out_t& out) { + void PrettyPrint(FullHistoryWithThreads& result, int threads_num, + Out_t& out) { int cell_width = 20; // Up it if necessary. Enough for now. - auto print_separator = [&out, this, cell_width]() { + auto print_separator = [threads_num, &out, this, cell_width]() { out << "*"; for (int i = 0; i < threads_num; ++i) { for (int j = 0; j < cell_width; ++j) { @@ -178,7 +179,7 @@ struct PrettyPrinter { index[base] = sz; } int length = std::to_string(index[base]).size(); - std::cout << index[base]; + out << index[base]; assert(spaces - length >= 0); print_spaces(7 - length); out << "|"; @@ -247,5 +248,4 @@ struct PrettyPrinter { out << msg; } }; - size_t threads_num; }; diff --git a/runtime/include/random_strategy.h b/runtime/include/random_strategy.h index 9f144f7f..152b9bdb 100644 --- a/runtime/include/random_strategy.h +++ b/runtime/include/random_strategy.h @@ -80,6 +80,23 @@ struct RandomStrategy : PickStrategy { return std::nullopt; } + size_t ChooseCandidate(size_t candidates_count) override { + // uniform choice across candidates for random strategy + if (candidates_count == 0) { + return 0; + } + std::uniform_int_distribution dist(0, candidates_count - 1); + return dist(this->choice_rng); + } + + void SetCustomRound(CustomRound &custom_round) override { + PickStrategy::SetCustomRound(custom_round); + + size_t custom_threads_count = custom_round.threads.size(); + weights.resize(custom_threads_count, 1); + pick_weights.clear(); + } + private: std::vector weights; std::vector pick_weights; diff --git a/runtime/include/round_robin_strategy.h b/runtime/include/round_robin_strategy.h index 7045308e..1f3c2ca5 100644 --- a/runtime/include/round_robin_strategy.h +++ b/runtime/include/round_robin_strategy.h @@ -39,5 +39,15 @@ struct RoundRobinStrategy : PickStrategy { return std::nullopt; } + size_t ChooseCandidate(size_t candidates_count) override { + if (candidates_count == 0) { + return 0; + } + size_t choice = next_candidate % candidates_count; + next_candidate++; + return choice; + } + size_t next_task; + size_t next_candidate{0}; }; diff --git a/runtime/include/scheduler.h b/runtime/include/scheduler.h index 86420377..23227877 100644 --- a/runtime/include/scheduler.h +++ b/runtime/include/scheduler.h @@ -9,6 +9,7 @@ #include #include +#include "custom_round.h" #include "lib.h" #include "lincheck.h" #include "logger.h" @@ -17,6 +18,9 @@ #include "pretty_print.h" #include "scheduler_fwd.h" #include "stable_vector.h" +#include "wmm/wmm.h" + +using namespace ltest::wmm; struct TaskWithMetaData { Task& task; @@ -35,12 +39,18 @@ concept StrategyTaskVerifier = requires(T a) { } -> std::same_as; { a.OnFinished(std::declval(), size_t()) } -> std::same_as; { a.ReleaseTask(size_t()) } -> std::same_as>; + { a.Reset() } -> std::same_as; }; // Strategy is the general strategy interface which decides which task // will be the next one it can be implemented by different strategies, such as: // randomized/tla/fair struct Strategy { + Strategy() { + std::random_device dev; + choice_rng.seed(dev()); + } + virtual std::optional NextThreadId() = 0; virtual std::optional Next() = 0; @@ -78,6 +88,11 @@ struct Strategy { // Resets the state of all created tasks in the strategy. virtual void ResetCurrentRound() = 0; + // Sets custom round provided by the user for execution. + // The round should be executed via `Scheduler::ExploreRound` method + // instead of `Scheduler::GenerateAndRunRound`. + virtual void SetCustomRound(CustomRound& custom_round) = 0; + // Returns the number of non-removed tasks virtual int GetValidTasksCount() const = 0; @@ -92,6 +107,12 @@ struct Strategy { // BaseStrategyWithThreads knows about the Verifier and will delegate to that) virtual void OnVerifierTaskFinish(Task& task, size_t thread_id) = 0; + // Chooses read candidate index in [0, candidates_count) + virtual size_t ChooseCandidate(size_t candidates_count) = 0; + + // Seeds choice rng for reproducible scheduling and wmm choices + virtual void SetSeed(uint64_t seed) { choice_rng.seed(seed); } + virtual ~Strategy() = default; protected: @@ -99,6 +120,15 @@ struct Strategy { // than `round_schedule[thread]` or the same index if the task is not finished virtual int GetNextTaskInThread(int thread_index) const = 0; + // Rng used by strategy choice; derived strategies can use it directly + std::mt19937 choice_rng; + + void ResetWmmGraph(int threads_count) { + if (wmm_enabled) { + wmm_graph.Reset(threads_count); + } + } + // id of next generated task int new_task_id = 0; // stores task ids that are removed during the round minimization @@ -106,16 +136,18 @@ struct Strategy { // when generated round is explored this vector stores indexes of tasks // that will be invoked next in each thread std::vector round_schedule; + ExecutionGraph& wmm_graph = ExecutionGraph::getInstance(); }; template struct BaseStrategyWithThreads : public Strategy { BaseStrategyWithThreads(size_t threads_count, std::vector constructors) - : state(std::make_unique()), - threads_count(threads_count), - constructors(std::move(constructors)) { + : threads_count(threads_count), constructors(std::move(constructors)) { + // must be called before instantiating `TargetObj` + ResetWmmGraph(this->threads_count); round_schedule.resize(threads_count, -1); + state = std::make_unique(); constructors_distribution = std::uniform_int_distribution( @@ -130,6 +162,11 @@ struct BaseStrategyWithThreads : public Strategy { rng = std::mt19937(dev()); } + void SetSeed(uint64_t seed) override { + Strategy::SetSeed(seed); + rng.seed(seed); + } + std::optional> GetTask(int task_id) override { // TODO: can this be optimized? int thread_id = 0; @@ -153,6 +190,32 @@ struct BaseStrategyWithThreads : public Strategy { return threads; } + void StartNextRound() override { + this->new_task_id = 0; + // also resets the state + this->TerminateTasks(); // TODO: what about different threads count for + // wmm_graph? + + // this could happen if we run custom scenarios + // (which could have arbitrary number of threads) + if (this->threads.size() != this->threads_count) { + this->threads.clear(); + for (size_t i = 0; i < this->threads_count; ++i) { + this->threads.emplace_back(); + } + } else { + // more optimal allocations-wise implementation + for (auto& thread : this->threads) { + // We don't have to keep references alive + while (thread.size() > 0) { + thread.pop_back(); + } + } + } + + this->round_schedule.resize(this->threads_count, -1); + } + void ResetCurrentRound() override { TerminateTasks(); for (auto& thread : threads) { @@ -165,6 +228,29 @@ struct BaseStrategyWithThreads : public Strategy { } } + void SetCustomRound(CustomRound& custom_round) override { + size_t custom_threads_count = custom_round.threads.size(); + + // custom round threads count might differ from the generated rounds + this->threads.resize(custom_threads_count); + this->round_schedule.resize(custom_threads_count, -1); + this->sched_checker.Reset(); + ResetWmmGraph(custom_threads_count); + this->state = std::make_unique(); + + for (size_t current_thread = 0; current_thread < custom_threads_count; + ++current_thread) { + auto& builders = custom_round.threads[current_thread]; + StableVector thread_tasks; + for (auto& task_builder : builders) { + auto task = + task_builder.Build(state.get(), current_thread, new_task_id++); + thread_tasks.emplace_back(task); + } + this->threads[current_thread] = std::move(thread_tasks); + } + } + int GetValidTasksCount() const override { int non_removed_tasks = 0; for (auto& thread : threads) { @@ -274,7 +360,7 @@ struct BaseStrategyWithThreads : public Strategy { auto& task = thread[task_index]; has_nonterminated_threads = true; // do a single step in this task - task->Resume(); + task->Resume(thread_index); if (task->IsReturned()) { OnVerifierTaskFinish(task, thread_index); debug(stderr, "Terminated: %ld\n", thread_index); @@ -282,6 +368,11 @@ struct BaseStrategyWithThreads : public Strategy { } } } + // must appear before state reset, so that constructors of atomics in + // data structure under test register themselves in the new execution graph + // TODO: for custom scenarios threads number might differ, check for places + // where `threads.size()` cannot be used + ResetWmmGraph(threads.size()); state.reset(new TargetObj{}); } @@ -313,6 +404,10 @@ struct BaseStrategyWithThreads : public Strategy { std::mt19937 rng; }; +// Active strategy context for wmm choice +void SetActiveStrategy(Strategy* strategy); +Strategy* GetActiveStrategy(); + // StrategyScheduler generates different sequential histories (using Strategy) // and then checks them with the ModelChecker template @@ -320,11 +415,13 @@ struct StrategyScheduler : public SchedulerWithReplay { // max_switches represents the maximal count of switches. After this count // scheduler will end execution of the Run function StrategyScheduler(Strategy& sched_class, ModelChecker& checker, + std::vector custom_rounds, PrettyPrinter& pretty_printer, size_t max_tasks, size_t max_rounds, bool minimize, size_t exploration_runs, size_t minimization_runs) : strategy(sched_class), checker(checker), + custom_rounds(std::move(custom_rounds)), pretty_printer(pretty_printer), max_tasks(max_tasks), max_rounds(max_rounds), @@ -336,28 +433,40 @@ struct StrategyScheduler : public SchedulerWithReplay { // history is a history with all events, where each element in the vector is a // Resume operation on the corresponding task Scheduler::Result Run() override { - for (size_t i = 0; i < max_rounds; ++i) { - log() << "run round: " << i << "\n"; - auto histories = RunRound(); + for (size_t j = 0; j < custom_rounds.size() + max_rounds; ++j) { + bool is_running_custom_scenarios = (j < custom_rounds.size()); + Result histories; + + if (is_running_custom_scenarios) { + log() << "explore custom round: " << j << "\n"; + strategy.SetCustomRound(custom_rounds[j]); + histories = ExploreRound(exploration_runs, true); + } else { + size_t i = j - custom_rounds.size(); + log() << "run round: " << i << "\n"; + debug(stderr, "run round: %zu\n", i); + histories = RunRound(); + } if (histories.has_value()) { auto& [full_history, sequential_history, reason] = histories.value(); + int threads_num = GetStartegyThreadsCount(); if (should_minimize_history) { log() << "Full nonlinear scenario: \n"; - pretty_printer.PrettyPrint(sequential_history, log()); + pretty_printer.PrettyPrint(sequential_history, threads_num, log()); log() << "Minimizing same interleaving...\n"; Minimize(histories.value(), SameInterleavingMinimizor()); log() << "Minimized to:\n"; - pretty_printer.PrettyPrint(sequential_history, log()); + pretty_printer.PrettyPrint(sequential_history, threads_num, log()); log() << "Minimizing with rescheduling (exploration runs: " << exploration_runs << ")...\n"; Minimize(histories.value(), StrategyExplorationMinimizor(exploration_runs)); log() << "Minimized to:\n"; - pretty_printer.PrettyPrint(sequential_history, log()); + pretty_printer.PrettyPrint(sequential_history, threads_num, log()); log() << "Minimizing with smart minimizor (exploration runs: " << exploration_runs @@ -377,6 +486,10 @@ struct StrategyScheduler : public SchedulerWithReplay { return std::nullopt; } + int GetStartegyThreadsCount() const override { + return strategy.GetThreadsCount(); + } + protected: // Runs a round with some interleaving while generating it Result RunRound() override { @@ -401,7 +514,7 @@ struct StrategyScheduler : public SchedulerWithReplay { } full_history.emplace_back(next_task); - next_task->Resume(); + next_task->Resume(thread_id); if (next_task->IsReturned()) { finished_tasks++; strategy.OnVerifierTaskFinish(next_task, thread_id); @@ -412,7 +525,8 @@ struct StrategyScheduler : public SchedulerWithReplay { } } - pretty_printer.PrettyPrint(sequential_history, log()); + pretty_printer.PrettyPrint(sequential_history, GetStartegyThreadsCount(), + log()); if (deadlock_detected) { return NonLinearizableHistory(full_history, sequential_history, @@ -429,7 +543,7 @@ struct StrategyScheduler : public SchedulerWithReplay { } // Runs different interleavings of the current round - Result ExploreRound(int runs) override { + Result ExploreRound(int runs, bool log_each_interleaving) override { for (int i = 0; i < runs; ++i) { // log() << "Run " << i + 1 << "/" << runs << "\n"; strategy.ResetCurrentRound(); @@ -452,7 +566,7 @@ struct StrategyScheduler : public SchedulerWithReplay { } full_history.emplace_back(next_task); - next_task->Resume(); + next_task->Resume(thread_id); if (next_task->IsReturned()) { tasks_to_run--; strategy.OnVerifierTaskFinish(next_task, thread_id); @@ -463,6 +577,12 @@ struct StrategyScheduler : public SchedulerWithReplay { } } + if (log_each_interleaving) { + pretty_printer.PrettyPrint(sequential_history, + GetStartegyThreadsCount(), log()); + log() << "\n"; + } + if (deadlock_detected) { return NonLinearizableHistory(full_history, sequential_history, NonLinearizableHistory::Reason::DEADLOCK); @@ -470,7 +590,8 @@ struct StrategyScheduler : public SchedulerWithReplay { if (!checker.Check(sequential_history)) { // log() << "New nonlinearized scenario:\n"; - // pretty_printer.PrettyPrint(sequential_history, log()); + // pretty_printer.PrettyPrint(sequential_history, + // GetStartegyThreadsCount(), log()); return NonLinearizableHistory( full_history, sequential_history, NonLinearizableHistory::Reason::NON_LINEARIZABLE_HISTORY); @@ -519,10 +640,10 @@ struct StrategyScheduler : public SchedulerWithReplay { // if this is the last time this task appears in `tasks_ordering`, then // complete it fully. if (resumes_count[next_task_id] == 0) { - next_task->Terminate(); + next_task->Terminate(thread_id); } else { resumes_count[next_task_id]--; - next_task->Resume(); + next_task->Resume(thread_id); } if (next_task->IsReturned()) { @@ -554,6 +675,7 @@ struct StrategyScheduler : public SchedulerWithReplay { private: Strategy& strategy; ModelChecker& checker; + std::vector custom_rounds; PrettyPrinter& pretty_printer; size_t max_tasks; size_t max_rounds; @@ -614,6 +736,8 @@ struct TLAScheduler : Scheduler { struct Frame { // Pointer to the in task thread. Task* task{}; + // Id of the thread in which the task is running. + int thread_id{}; // Is true if the task was created at this step. bool is_new{}; }; @@ -624,11 +748,11 @@ struct TLAScheduler : Scheduler { // cancel() func takes care for graceful shutdown void TerminateTasks() { cancel(); - for (size_t i = 0; i < threads.size(); ++i) { - for (size_t j = 0; j < threads[i].tasks.size(); ++j) { - auto& task = threads[i].tasks[j]; + for (size_t thread_id = 0; thread_id < threads.size(); ++thread_id) { + for (size_t j = 0; j < threads[thread_id].tasks.size(); ++j) { + auto& task = threads[thread_id].tasks[j]; if (!task->IsReturned()) { - task->Terminate(); + task->Terminate(thread_id); } } } @@ -651,7 +775,7 @@ struct TLAScheduler : Scheduler { } else { // It was a not new task, hence, we recreated in early. } - (*task)->Resume(); + (*task)->Resume(frame.thread_id); } coroutine_status.reset(); } @@ -698,6 +822,7 @@ struct TLAScheduler : Scheduler { } auto& task = thread.tasks.back(); frame.task = &task; + frame.thread_id = thread_id; thread_id_history.push_back(thread_id); if (is_new) { @@ -705,7 +830,7 @@ struct TLAScheduler : Scheduler { } assert(!task->IsBlocked()); - task->Resume(); + task->Resume(thread_id); UpdateFullHistory(thread_id, task, is_new); bool is_finished = task->IsReturned(); if (is_finished) { @@ -724,7 +849,8 @@ struct TLAScheduler : Scheduler { } } else { log() << "run round: " << finished_rounds << "\n"; - pretty_printer.PrettyPrint(full_history, log()); + pretty_printer.PrettyPrint(full_history, GetStartegyThreadsCount(), + log()); log() << "===============================================\n\n"; log().flush(); // Stop, check if the the generated history is linearizable. @@ -813,7 +939,7 @@ struct TLAScheduler : Scheduler { } frame.is_new = true; auto size_before = tasks.size(); - tasks.emplace_back(cons.Build(&state, i, -1/* TODO: fix task id for tla, because it is Scheduler and not Strategy class for some reason */)); + tasks.emplace_back(cons.Build(state.get(), i, -1/* TODO: fix task id for tla, because it is Scheduler and not Strategy class for some reason */)); started_tasks++; auto [is_over, res] = ResumeTask(frame, step, switches, thread, true); if (is_over || res.has_value()) { @@ -834,6 +960,8 @@ struct TLAScheduler : Scheduler { return {false, {}}; } + int GetStartegyThreadsCount() const override { return threads.size(); } + PrettyPrinter& pretty_printer; size_t max_tasks; size_t max_rounds; diff --git a/runtime/include/scheduler_fwd.h b/runtime/include/scheduler_fwd.h index 5e0900f1..0b51f7f2 100644 --- a/runtime/include/scheduler_fwd.h +++ b/runtime/include/scheduler_fwd.h @@ -23,6 +23,8 @@ struct Scheduler { virtual Result Run() = 0; + virtual int GetStartegyThreadsCount() const = 0; + virtual ~Scheduler() = default; }; @@ -35,7 +37,7 @@ struct SchedulerWithReplay : Scheduler { virtual Result RunRound() = 0; - virtual Result ExploreRound(int runs) = 0; + virtual Result ExploreRound(int runs, bool log_each_interleaving = false) = 0; virtual Result ReplayRound(const std::vector& tasks_ordering) = 0; diff --git a/runtime/include/strategy_verifier.h b/runtime/include/strategy_verifier.h index 75edf5ae..22f2ef2c 100644 --- a/runtime/include/strategy_verifier.h +++ b/runtime/include/strategy_verifier.h @@ -9,4 +9,6 @@ struct DefaultStrategyTaskVerifier { inline std::optional ReleaseTask(size_t thread_id) { return std::nullopt; } + + inline void Reset() {} }; diff --git a/runtime/include/verifying.h b/runtime/include/verifying.h index 10a9c3d4..fe3336ad 100644 --- a/runtime/include/verifying.h +++ b/runtime/include/verifying.h @@ -6,6 +6,8 @@ #include #include "blocking_primitives.h" +#include "custom_round.h" +#include "latomic.h" #include "lib.h" #include "lincheck_recursive.h" #include "logger.h" @@ -45,9 +47,11 @@ struct Opts { size_t tasks; size_t switches; size_t rounds; + int64_t seed; bool minimize; size_t exploration_runs; size_t minimization_runs; + bool wmm_enabled; size_t depth; bool forbid_all_same; bool verbose; @@ -62,12 +66,14 @@ struct DefaultOptions { size_t switches; size_t rounds; size_t depth; + int64_t seed; bool forbid_all_same; bool verbose; const char *strategy; const char *weights; size_t minimization_runs; size_t exploration_runs; + bool wmm_enabled; }; void SetOpts(const DefaultOptions &def); @@ -81,8 +87,12 @@ std::unique_ptr MakeStrategy(Opts &opts, std::vector l) { switch (opts.typ) { case RR: { std::cout << "round-robin\n"; - return std::make_unique>( + auto strategy = std::make_unique>( opts.threads, std::move(l)); + if (opts.seed >= 0) { + strategy->SetSeed(static_cast(opts.seed)); + } + return strategy; } case RND: { std::cout << "random\n"; @@ -94,13 +104,21 @@ std::unique_ptr MakeStrategy(Opts &opts, std::vector l) { throw std::invalid_argument{ "number of threads not equal to number of weights"}; } - return std::make_unique>( + auto strategy = std::make_unique>( opts.threads, std::move(l), std::move(weights)); + if (opts.seed >= 0) { + strategy->SetSeed(static_cast(opts.seed)); + } + return strategy; } case PCT: { std::cout << "pct\n"; - return std::make_unique>(opts.threads, - std::move(l)); + auto strategy = std::make_unique>( + opts.threads, std::move(l)); + if (opts.seed >= 0) { + strategy->SetSeed(static_cast(opts.seed)); + } + return strategy; } default: assert(false && "unexpected type"); @@ -112,13 +130,19 @@ std::unique_ptr MakeStrategy(Opts &opts, std::vector l) { template struct StrategySchedulerWrapper : StrategyScheduler { StrategySchedulerWrapper(std::unique_ptr strategy, - ModelChecker &checker, PrettyPrinter &pretty_printer, - size_t max_tasks, size_t max_rounds, bool minimize, + ModelChecker &checker, + std::vector custom_rounds, + PrettyPrinter &pretty_printer, size_t max_tasks, + size_t max_rounds, bool minimize, size_t exploration_runs, size_t minimization_runs) : strategy(std::move(strategy)), - StrategyScheduler(*strategy.get(), checker, pretty_printer, + StrategyScheduler(*strategy.get(), checker, + std::move(custom_rounds), pretty_printer, max_tasks, max_rounds, minimize, - exploration_runs, minimization_runs) {}; + exploration_runs, minimization_runs) { + // active strategy pointer for wmm choice + SetActiveStrategy(this->strategy.get()); + } private: std::unique_ptr strategy; @@ -127,6 +151,7 @@ struct StrategySchedulerWrapper : StrategyScheduler { template std::unique_ptr MakeScheduler(ModelChecker &checker, Opts &opts, const std::vector &l, + std::vector custom_rounds, PrettyPrinter &pretty_printer, const std::function &cancel) { std::cout << "strategy = "; @@ -136,8 +161,9 @@ std::unique_ptr MakeScheduler(ModelChecker &checker, Opts &opts, case RND: { auto strategy = MakeStrategy(opts, std::move(l)); auto scheduler = std::make_unique>( - std::move(strategy), checker, pretty_printer, opts.tasks, opts.rounds, - opts.minimize, opts.exploration_runs, opts.minimization_runs); + std::move(strategy), checker, std::move(custom_rounds), + pretty_printer, opts.tasks, opts.rounds, opts.minimize, + opts.exploration_runs, opts.minimization_runs); return scheduler; } case TLA: { @@ -155,16 +181,23 @@ std::unique_ptr MakeScheduler(ModelChecker &checker, Opts &opts, inline int TrapRun(std::unique_ptr &&scheduler, PrettyPrinter &pretty_printer) { + ltest::ClearTestFailure(); auto result = scheduler->Run(); + if (ltest::HasTestFailure()) { + std::cout << ltest::GetTestFailureMessage() << "\n"; + return 1; + } if (result.has_value()) { if (result->reason == Scheduler::NonLinearizableHistory::Reason::DEADLOCK) { std::cout << "deadlock detected:\n"; - pretty_printer.PrettyPrint(result->seq, std::cout); + pretty_printer.PrettyPrint( + result->seq, scheduler->GetStartegyThreadsCount(), std::cout); return 4; // see https://tldp.org/LDP/abs/html/exitcodes.html } else if (result->reason == Scheduler::NonLinearizableHistory::Reason:: NON_LINEARIZABLE_HISTORY) { std::cout << "non linearized:\n"; - pretty_printer.PrettyPrint(result->seq, std::cout); + pretty_printer.PrettyPrint( + result->seq, scheduler->GetStartegyThreadsCount(), std::cout); return 3; } else { std::abort(); @@ -177,7 +210,7 @@ inline int TrapRun(std::unique_ptr &&scheduler, template -int Run(int argc, char *argv[]) { +int Run(int argc, char *argv[], std::vector custom_rounds = {}) { if constexpr (!std::is_same_v) { SetOpts(Spec::options_override_t::GetOptions()); @@ -191,14 +224,16 @@ int Run(int argc, char *argv[]) { std::cout << "tasks = " << opts.tasks << "\n"; std::cout << "switches = " << opts.switches << "\n"; std::cout << "rounds = " << opts.rounds << "\n"; + std::cout << "seed = " << opts.seed << "\n"; std::cout << "minimize = " << std::boolalpha << opts.minimize << "\n"; if (opts.minimize) { std::cout << "exploration runs = " << opts.exploration_runs << "\n"; std::cout << "minimization runs = " << opts.minimization_runs << "\n"; } + std::cout << "wmm enabled = " << std::boolalpha << opts.wmm_enabled << "\n"; std::cout << "targets = " << task_builders.size() << "\n"; - PrettyPrinter pretty_printer{opts.threads}; + PrettyPrinter pretty_printer; using lchecker_t = LinearizabilityCheckerRecursive( - checker, opts, std::move(task_builders), pretty_printer, - &Spec::cancel_t::Cancel); + checker, opts, std::move(task_builders), std::move(custom_rounds), + pretty_printer, &Spec::cancel_t::Cancel); std::cout << "\n\n"; std::cout.flush(); return TrapRun(std::move(scheduler), pretty_printer); @@ -217,12 +252,28 @@ int Run(int argc, char *argv[]) { } // namespace ltest -#define LTEST_ENTRYPOINT_CONSTRAINT(spec_obj_t, strategy_verifier) \ - int main(int argc, char *argv[]) { \ - return ltest::Run(argc, argv); \ +// `...` is used instead of named argument in order to allow +// user to specify custom rounds without wrapping them into +// parenthesis `()` manually +#define LTEST_ENTRYPOINT(spec_obj_t, ...) \ + int main(int argc, char *argv[]) { \ + std::vector custom_rounds; \ + __VA_OPT__(std::vector>> builders = \ + {__VA_ARGS__}; \ + for (auto &v : builders) { \ + custom_rounds.emplace_back(std::move(v)); \ + }) \ + return ltest::Run(argc, argv, std::move(custom_rounds)); \ } -#define LTEST_ENTRYPOINT(spec_obj_t) \ - int main(int argc, char *argv[]) { \ - return ltest::Run(argc, argv); \ - }\ +#define LTEST_ENTRYPOINT_CONSTRAINT(spec_obj_t, strategy_verifier, ...) \ + int main(int argc, char *argv[]) { \ + std::vector custom_rounds; \ + __VA_OPT__(std::vector>> builders = \ + {__VA_ARGS__}; \ + for (auto &v : builders) { \ + custom_rounds.emplace_back(std::move(v)); \ + }) \ + return ltest::Run( \ + argc, argv, std::move(custom_rounds)); \ + } \ No newline at end of file diff --git a/runtime/include/verifying_macro.h b/runtime/include/verifying_macro.h index 466f2d7b..88b5ba1d 100644 --- a/runtime/include/verifying_macro.h +++ b/runtime/include/verifying_macro.h @@ -1,6 +1,7 @@ // Keeps as separated file because use in regression tests. #pragma once #include +#include #include #include "generators.h" @@ -11,6 +12,12 @@ namespace ltest { extern std::vector task_builders; +inline void LtestFail(const char *expr, const char *file, int line) { + std::ostringstream oss; + oss << "test failed: " << expr << " at " << file << ":" << line; + throw ltest::TestFailure(oss.str()); +} + } // namespace ltest // Adds an attribute. @@ -25,6 +32,9 @@ extern std::vector task_builders; // standard library will not be inserted. #define as_atomic attr(ltest_atomic) +#define rassert(expr) \ + ((expr) ? (void)0 : ltest::LtestFail(#expr, __FILE__, __LINE__)) + namespace ltest { template @@ -78,6 +88,26 @@ struct TargetMethod { } }; +template +struct MethodInvocation { + using Method = std::function; + + inline static TaskBuilder GetTaskBuilder(std::string_view method_name, + std::tuple params, + Method method) { + auto builder = + [method_name, params = std::move(params), method = std::move(method)]( + void *this_ptr, size_t unused_thread_num, int task_id) -> Task { + auto args = std::shared_ptr(new std::tuple(params)); + auto coro = Coro::New(method, this_ptr, args, + <est::toStringArgs, + method_name, task_id); + return coro; + }; + return TaskBuilder(std::string(method_name), builder); + } +}; + template struct TargetMethod { using Method = std::function; @@ -102,6 +132,31 @@ struct TargetMethod { } }; +template +struct MethodInvocation { + using Method = std::function; + + inline static TaskBuilder GetTaskBuilder(std::string_view method_name, + std::tuple params, + Method method) { + auto builder = + [method_name, params = std::move(params), method = std::move(method)]( + void *this_ptr, size_t unused_thread_num, int task_id) -> Task { + // auto wrapper = Wrapper{method}; + auto wrapper = [f = std::move(method)](void *this_ptr, Args &&...args) { + f(reinterpret_cast(this_ptr), std::forward(args)...); + return void_v; + }; + auto args = std::shared_ptr(new std::tuple(params)); + auto coro = Coro::New(wrapper, this_ptr, args, + <est::toStringArgs, + method_name, task_id); + return coro; + }; + return TaskBuilder(std::string(method_name), builder); + } +}; + } // namespace ltest #define declare_task_name(symbol) \ @@ -111,3 +166,11 @@ struct TargetMethod { declare_task_name(symbol); \ ltest::TargetMethod \ symbol##_ltest_method_cls{symbol##_task_name, gen, &cls::symbol}; + +#define stringify_detail(x) #x +#define stringify(x) stringify_detail(x) +#define method_invocation(params, ret, cls, symbol, ...) \ + ltest::MethodInvocation< \ + ret, cls __VA_OPT__(, ) __VA_ARGS__>::GetTaskBuilder(stringify(symbol), \ + params, \ + &cls::symbol) \ No newline at end of file diff --git a/runtime/include/wmm/common.h b/runtime/include/wmm/common.h new file mode 100644 index 00000000..e688bfa4 --- /dev/null +++ b/runtime/include/wmm/common.h @@ -0,0 +1,92 @@ +#pragma once + +#include +#include + +namespace ltest::wmm { + +using EventId = int; +using EdgeId = int; + +enum class MemoryOrder { Relaxed, Acquire, Release, AcqRel, SeqCst }; + +enum class EventType { DUMMY, READ, WRITE, RMW }; + +enum class EdgeType { + PO, // program order / sequenced before + SC, // seq-cst edge + RF, // reads-from + // TODO: do we need it? since we have hb-clocks already + // HB, // happens-before + MO, // modification order + // TODO: do we need it explicitly? hb-clocks can give the same basically + // SW, // synchronized-with +}; + +struct WmmUtils { + inline static MemoryOrder OrderFromStd(std::memory_order order) { + switch (order) { + case std::memory_order_relaxed: + return MemoryOrder::Relaxed; + case std::memory_order_acquire: + return MemoryOrder::Acquire; + case std::memory_order_release: + return MemoryOrder::Release; + case std::memory_order_acq_rel: + return MemoryOrder::AcqRel; + case std::memory_order_seq_cst: + return MemoryOrder::SeqCst; + default: + throw std::invalid_argument("Unsupported memory order"); + } + } + + inline static std::string OrderToString(MemoryOrder order) { + switch (order) { + case MemoryOrder::Relaxed: + return "Relaxed"; + case MemoryOrder::Acquire: + return "Acquire"; + case MemoryOrder::Release: + return "Release"; + case MemoryOrder::AcqRel: + return "AcqRel"; + case MemoryOrder::SeqCst: + return "SeqCst"; + } + } + + inline static std::string EventTypeToString(EventType type) { + switch (type) { + case EventType::DUMMY: + return "D"; + case EventType::READ: + return "R"; + case EventType::WRITE: + return "W"; + case EventType::RMW: + return "RMW"; + } + } + + inline static std::string EdgeTypeToString(EdgeType type) { + switch (type) { + case EdgeType::PO: + return "po"; + case EdgeType::SC: + return "sc"; + case EdgeType::RF: + return "rf"; + // case EdgeType::HB: return "hb"; + // case EdgeType::SW: return "sw"; + case EdgeType::MO: + return "mo"; + } + } + + // thread id to which all initalization events (constructors of atomics) will + // belong to + inline static int INIT_THREAD_ID = 0; +}; + +} // namespace ltest::wmm diff --git a/runtime/include/wmm/edge.h b/runtime/include/wmm/edge.h new file mode 100644 index 00000000..976902a8 --- /dev/null +++ b/runtime/include/wmm/edge.h @@ -0,0 +1,14 @@ +#pragma once + +#include "common.h" + +namespace ltest::wmm { + +struct Edge { + EdgeId id; + EdgeType type; + EventId from; + EventId to; +}; + +} // namespace ltest::wmm \ No newline at end of file diff --git a/runtime/include/wmm/event.h b/runtime/include/wmm/event.h new file mode 100644 index 00000000..867edef0 --- /dev/null +++ b/runtime/include/wmm/event.h @@ -0,0 +1,338 @@ +#pragma once + +#include "common.h" +#include "hbclock.h" + +namespace ltest::wmm { + +struct Event { + protected: + Event(EventId id, EventType type, int nThreads, int location, int threadId, + MemoryOrder order) + : id(id), + type(type), + location(location), + threadId(threadId), + order(order), + clock(nThreads) {} + + public: + EventId id; + EventType type; + int location; + int threadId; + MemoryOrder order; + HBClock clock; + std::vector edges; // outgoing edges (e.g. `edge.to == this`) + + virtual ~Event() = default; + + virtual std::string AsString() const { + std::stringstream ss; + + ss << id << ":" << WmmUtils::EventTypeToString(type) << ":T" << threadId + << ":L" << location << ":" << WmmUtils::OrderToString(order) << ":" + << clock.AsString(); + + return ss.str(); + } + + virtual void SetReadFromEvent(Event* event) { + assert(false && "'SetReadFromEvent' can only be called on read/rmw events"); + } + + virtual Event* GetReadFromEvent() const { + assert(false && "'GetReadFromEvent' can only be called on read/rmw events"); + return nullptr; + } + + bool HappensBefore(Event* other) const { + return clock.IsSubsetOf(other->clock); + } + + bool IsDummy() const { return type == EventType::DUMMY; } + + bool IsWrite() const { return type == EventType::WRITE; } + + bool IsRead() const { return type == EventType::READ; } + + bool IsRMW() const { return type == EventType::RMW; } + + bool IsWriteOrRMW() const { return IsWrite() || IsRMW(); } + + bool IsReadOrRMW() const { return IsRead() || IsRMW(); } + + virtual bool IsSeqCst() const { return order == MemoryOrder::SeqCst; } + + virtual bool IsAcqRel() const { return order == MemoryOrder::AcqRel; } + + virtual bool IsAcquire() const { return order == MemoryOrder::Acquire; } + + virtual bool IsRelease() const { return order == MemoryOrder::Release; } + + virtual bool IsRelaxed() const { return order == MemoryOrder::Relaxed; } + + virtual bool IsAtLeastAcquire() const { + return order >= MemoryOrder::Acquire && order != MemoryOrder::Release; + } + + virtual bool IsAtLeastRelease() const { + return order >= MemoryOrder::Release; + } + + // Returns true if this event is a RMW and it is resolved to a READ state + virtual bool IsReadRWM() const { return false; } + + // Returns true if this event is a RMW and it is resolved to a MODIFY state + virtual bool IsModifyRMW() const { return false; } + + template + static T GetWrittenValue(Event* event); + + template + static T GetReadValue(Event* event); +}; + +struct DummyEvent : Event { + DummyEvent(EventId id, int nThreads, int threadId) + : Event(id, EventType::DUMMY, nThreads, -1 /* non-existing location */, + threadId, MemoryOrder::Relaxed) {} +}; + +template +struct WriteEvent : Event { + WriteEvent(EventId id, int nThreads, int location, int threadId, + MemoryOrder order, T value) + : Event(id, EventType::WRITE, nThreads, location, threadId, order), + value(std::move(value)) {} // , moBefore(-1) + + virtual std::string AsString() const { + std::stringstream ss; + + ss << id << ":" << WmmUtils::EventTypeToString(type) << "(" << value << ")" + << ":T" << threadId << ":L" << location << ":" + << WmmUtils::OrderToString(order) << ":" << clock.AsString(); + + return ss.str(); + } + + T GetWrittenValue() const { return value; } + + T value; +}; + +template +struct ReadEvent : Event { + ReadEvent(EventId id, int nThreads, int location, int threadId, + MemoryOrder order) + : Event(id, EventType::READ, nThreads, location, threadId, order), + readFrom(nullptr) {} + + virtual void SetReadFromEvent(Event* event) override { + assert((event == nullptr || event->IsWriteOrRMW()) && + "Read event must read from write/rmw event"); + readFrom = event; + } + + virtual Event* GetReadFromEvent() const override { return readFrom; } + + T GetReadValue() const { + assert(readFrom != nullptr && + "Read event hasn't set its 'readFrom' write-event"); + return Event::GetWrittenValue(readFrom); + } + + // points to write-event which we read from + Event* readFrom; +}; + +enum RMWState { READ, MODIFY, UNSET }; + +template +struct RMWEvent : Event { + RMWEvent(EventId id, int nThreads, int location, int threadId, T* expected, + T desired, MemoryOrder successOrder, MemoryOrder failureOrder) + : Event(id, EventType::RMW, nThreads, location, threadId, successOrder), + // TODO: expcted might be uninitialized, so we should not dereference it + // blindly, fix it + initialExpectedValue(*expected), + cachedExpectedValue(*expected), + expected(expected), + desired(desired), + failureOrder(failureOrder), + readFrom(nullptr) {} + + virtual std::string AsString() const override { + std::stringstream ss; + + ss << id << ":" << WmmUtils::EventTypeToString(type) + << "(written_expected=" << cachedExpectedValue + << ", init_expected=" << initialExpectedValue << ", desired=" << desired + << ", " << StateAsString(state) << ")" + << ":T" << threadId << ":L" << location << ":" + << "succ=" << WmmUtils::OrderToString(order) + << ", fail=" << WmmUtils::OrderToString(failureOrder) << ":" + << clock.AsString(); + + return ss.str(); + } + + /* + All methods below will use a correct memory order of this RMW event, + meaning that for 'state == READ' it will use 'failureOrder', and for + 'state == MODIFY' it will use 'order' (successOrder). + */ + virtual bool IsSeqCst() const override { + assert(state != RMWState::UNSET && + "RMW event's memory order is queried for its resolving"); + auto order = (state == RMWState::MODIFY) ? this->order : failureOrder; + return order == MemoryOrder::SeqCst; + } + + virtual bool IsAcqRel() const override { + assert(state != RMWState::UNSET && + "RMW event's memory order is queried for its resolving"); + auto order = (state == RMWState::MODIFY) ? this->order : failureOrder; + return order == MemoryOrder::AcqRel; + } + + virtual bool IsAcquire() const override { + assert(state != RMWState::UNSET && + "RMW event's memory order is queried for its resolving"); + auto order = (state == RMWState::MODIFY) ? this->order : failureOrder; + return order == MemoryOrder::Acquire; + } + + virtual bool IsRelease() const override { + assert(state != RMWState::UNSET && + "RMW event's memory order is queried for its resolving"); + auto order = (state == RMWState::MODIFY) ? this->order : failureOrder; + return order == MemoryOrder::Release; + } + + virtual bool IsRelaxed() const override { + assert(state != RMWState::UNSET && + "RMW event's memory order is queried for its resolving"); + auto order = (state == RMWState::MODIFY) ? this->order : failureOrder; + return order == MemoryOrder::Relaxed; + } + + virtual bool IsAtLeastAcquire() const override { + assert(state != RMWState::UNSET && + "RMW event's memory order is queried for its resolving"); + auto order = (state == RMWState::MODIFY) ? this->order : failureOrder; + return order >= MemoryOrder::Acquire && order != MemoryOrder::Release; + } + + virtual bool IsAtLeastRelease() const override { + assert(state != RMWState::UNSET && + "RMW event's memory order is queried for its resolving"); + auto order = (state == RMWState::MODIFY) ? this->order : failureOrder; + return order >= MemoryOrder::Release; + } + + virtual bool IsReadRWM() const override { return state == RMWState::READ; } + + virtual bool IsModifyRMW() const override { + return state == RMWState::MODIFY; + } + + virtual void SetReadFromEvent(Event* event) override { + assert((event == nullptr || event->IsWriteOrRMW()) && + "Read event must read from write/rmw event"); + readFrom = event; + + if (readFrom == nullptr) { + state = RMWState::UNSET; + *expected = initialExpectedValue; // reset expected value + cachedExpectedValue = initialExpectedValue; + } else { + T readValue = Event::GetWrittenValue(readFrom); + assert(*expected == initialExpectedValue && + "Expected value must be equal to initial expected value on RMW " + "resolving"); + if (readValue == initialExpectedValue) { + // in case of MODIFY we do not change expected value + state = RMWState::MODIFY; + } else { + // in case of READ we set expected to the actually read value + state = RMWState::READ; + *expected = readValue; + cachedExpectedValue = readValue; + } + } + } + + virtual Event* GetReadFromEvent() const override { return readFrom; } + + T GetReadValue() const { + assert(readFrom != nullptr && + "RMW event hasn't set its 'readFrom' write-event"); + return Event::GetWrittenValue(readFrom); + } + + T GetWrittenValue() const { + assert(state != RMWState::UNSET && + "RMW event must have a resolved state (not UNSET)"); + if (state == RMWState::MODIFY) { + // in case of MODIFY we return desired value + return desired; + } else { + // in case of READ we assume that RMW writes the value that it reads from + // its own 'readFrom' event (which is saved to the *expected pointer) + return *expected; + } + } + + RMWState state = UNSET; // current state of the RMW operation + T initialExpectedValue; + // TODO: I assume, that boost stack manipulation causes a failure when + // dereferencing of '*expected' from different thread, and it causes sanitizer + // to fails. Requires inverstigation to debug it. + T cachedExpectedValue; + T* expected; + T desired; + MemoryOrder failureOrder; + // points to write/rmw-event which we read from + Event* readFrom; + + private: + inline static std::string StateAsString(RMWState state) { + switch (state) { + case RMWState::READ: + return "READ"; + case RMWState::MODIFY: + return "MODIFY"; + case RMWState::UNSET: + return "UNSET"; + } + } +}; + +template +T Event::GetWrittenValue(Event* event) { + assert(event != nullptr && "Event must not be null"); + assert(event->IsWriteOrRMW() && "Only write/rmw events can write values"); + + if (event->IsWrite()) { + auto writeEvent = static_cast*>(event); + return writeEvent->GetWrittenValue(); + } else { + auto rmwEvent = static_cast*>(event); + return rmwEvent->GetWrittenValue(); + } +} + +template +T Event::GetReadValue(Event* event) { + assert(event->IsReadOrRMW() && "Event must be a read/rmw event"); + if (event->IsRead()) { + ReadEvent* readEvent = static_cast*>(event); + return readEvent->GetReadValue(); + } else { + RMWEvent* rmwEvent = static_cast*>(event); + return rmwEvent->GetReadValue(); + } +} + +} // namespace ltest::wmm \ No newline at end of file diff --git a/runtime/include/wmm/graph.h b/runtime/include/wmm/graph.h new file mode 100644 index 00000000..54e25e9d --- /dev/null +++ b/runtime/include/wmm/graph.h @@ -0,0 +1,762 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "../logger.h" +#include "common.h" +#include "edge.h" +#include "event.h" + +namespace ltest::wmm { + +class Graph { + public: + Graph() {} + ~Graph() { Clean(); } + + template + struct ReadCandidate { + // тут же можно хранить различные метрики + Event* read_event; + Event* write_event; + T value; + }; + + void Reset(int nThreads) { + Clean(); + InitThreads(nThreads); + } + + // TODO: add `ExecutionPolicy` or other way of specifying how to create edges + // (Random, BoundedModelChecker, etc.) + template + T AddReadEvent(int location, int threadId, MemoryOrder order) { + EventId eventId = events.size(); + auto event = new ReadEvent(eventId, nThreads, location, threadId, order); + + // establish po-edge + CreatePoEdgeToEvent(event); // prevInThread --po--> event + + auto shuffledEvents = GetShuffledReadFromCandidates(event); + for (auto readFromEvent : shuffledEvents) { + // try reading from `readFromEvent` + if (TryCreateRfEdge(readFromEvent, event)) { + log() << "Read event " << event->AsString() << " now reads from " + << readFromEvent->AsString() << "\n"; + break; + } + } + + assert(event->readFrom != nullptr && + "Read event must have appropriate write event to read from"); + assert((event->readFrom->IsWrite() || event->readFrom->IsModifyRMW()) && + "Read event must read from write or modifying rmw event"); + return Event::GetReadValue(event); + } + + // creates a read event and returns all consistent read-from candidates + // caller is responsible for choose + template + std::vector> GetReadFromCandidates(int location, + int threadId, + MemoryOrder order) { + EventId eventId = events.size(); + auto event = new ReadEvent(eventId, nThreads, location, threadId, order); + + CreatePoEdgeToEvent(event); + + std::vector> candidates; + auto readFromEvents = GetReadFromCandidatesForEvent(event); + for (auto writeEvent : readFromEvents) { + if (TryCreateRfEdgeWithCommit(writeEvent, event, false)) { + candidates.push_back(ReadCandidate{ + event, + writeEvent, + Event::GetWrittenValue(writeEvent), + }); + } + } + + return candidates; + } + + // applies chosen candidate and returns read value + template + T ApplyReadCandidate(const ReadCandidate& candidate) { + bool ok = + TryCreateRfEdgeWithCommit(candidate.write_event, candidate.read_event, + true); + assert(ok && "Chosen read candidate must be consistent"); + return Event::GetReadValue(candidate.read_event); + } + + template + void AddWriteEvent(int location, int threadId, MemoryOrder order, T value) { + EventId eventId = events.size(); + auto event = + new WriteEvent(eventId, nThreads, location, threadId, order, value); + + // establish po-edge + CreatePoEdgeToEvent(event); // prevInThread --po--> event + + if (order == MemoryOrder::SeqCst) { + // establish sc-edge between last sc-write and event + CreateScEdgeToEvent(event); // prevScCstWrite --sc--> event + + // Seq-Cst / MO Consistency (if locations match) + CreateSeqCstConsistencyEdges(event); // prevScCstWrite --mo--> event + + // update last seq_cst write + lastSeqCstWriteEvents[event->location] = event->id; + } + + // Read-Write Coherence + CreateReadWriteCoherenceEdges(event); + + // Write-Write Coherence + CreateWriteWriteCoherenceEdges(event); + } + + template + std::pair AddRMWEvent(int location, int threadId, T* expected, + T desired, MemoryOrder successOrder, + MemoryOrder failureOrder) { + EventId eventId = events.size(); + auto event = new RMWEvent(eventId, nThreads, location, threadId, + expected, desired, successOrder, failureOrder); + + // establish po-edge + CreatePoEdgeToEvent(event); // prevInThread --po--> event + + auto shuffledEvents = GetShuffledReadFromCandidates(event); + for (auto readFromEvent : shuffledEvents) { + // try reading from `readFromEvent` + if (TryCreateRfEdge(readFromEvent, event)) { + log() << "RMW event " << event->AsString() << " now reads from " + << readFromEvent->AsString() << "\n"; + break; + } + } + + assert(event->readFrom != nullptr && + "RMW event must have appropriate write event to read from"); + assert((event->readFrom->IsWrite() || event->readFrom->IsModifyRMW()) && + "RMW event must read from write or modifying rmw event"); + return { + event->IsModifyRMW(), // true if RMW is resolved to MODIFY state, false + // if rmw failed and resolve to READ state + Event::GetReadValue(event)}; + } + + template + void Print(Out& os) const { + os << "Graph edges:" << "\n"; + if (edges.empty()) + os << ""; + else { + for (const auto& edge : edges) { + os << events[edge.from]->AsString() << " ->" + << WmmUtils::EdgeTypeToString(edge.type) << " " + << events[edge.to]->AsString() << "\n"; + } + } + os << "\n"; + } + + private: + std::vector GetReadFromCandidatesForEvent(Event* event) { + auto filteredEventsView = + events | std::views::filter([event](Event* e) { + return ((e->IsWrite() || e->IsModifyRMW()) && + e->location == event->location && e != event); + }); + return {filteredEventsView.begin(), filteredEventsView.end()}; + } + + std::vector GetShuffledReadFromCandidates(Event* event) { + // Shuffle events to randomize the order of read-from edges + // and allow for more non-sc behaviours + auto filteredEventsView = + events | std::views::filter([event](Event* e) { + return ((e->IsWrite() || e->IsModifyRMW()) && + e->location == event->location && e != event); + }); + std::vector shuffledEvents(filteredEventsView.begin(), + filteredEventsView.end()); + std::ranges::shuffle(shuffledEvents, gen); + + return shuffledEvents; + } + + // Tries to create a read-from edge between `write` and `read` events (write + // --rf--> read). Returns `true` if edge was created, `false` otherwise. + bool TryCreateRfEdge(Event* write, Event* read) { + return TryCreateRfEdgeWithCommit(write, read, true); + } + + // Tries to create a read-from edge. If commit is false, all changes are + // discarded and graph stays intact. + bool TryCreateRfEdgeWithCommit(Event* write, Event* read, bool commit) { + assert(write->IsWriteOrRMW() && read->IsReadOrRMW() && + "Write and Read events must be of correct type"); + assert(write->location == read->location && + "Write and Read events must be of the same location"); + + StartSnapshot(); + + // establish rf-edge + AddEdge(EdgeType::RF, write->id, read->id); + // remember the event we read from + read->SetReadFromEvent(write); + + // update the clock if synchronized-with relation has appeared + // and save the old clock in case snapshot is discarded + HBClock oldClock; + bool isClockUpdated = false; + + // update the last seq-cst write event in case of successful RMW event + // and save the old value in case snapshot is discarded + EventId oldLastSeqCstWriteEvent = lastSeqCstWriteEvents[read->location]; + + // Applying rel-acq semantics via establishing synchronized-with (SW) + // relation + if (read->IsAtLeastAcquire() && write->IsAtLeastRelease() && + // TODO: maybe somehow get rid of the sc-edges instead? + !(read->IsSeqCst() && + write->IsSeqCst()) // this case no need to consider, because we have + // sc edges instead + ) { + isClockUpdated = true; + // save the old clock + oldClock = read->clock; + // instantitate a SW (synchronized-with) relation + read->clock.UniteWith(write->clock); + } + + if (read->IsRead() || read->IsReadRWM()) { + // in case of 'read' event has actually type RMW but is resolved to READ + // state (basically the comparison of read value with `expected` one + // failed), then we treat it as a regular READ event + if (read->IsSeqCst()) { + // establish sc-edge + CreateScEdgeToEvent(read); // prevScCstWrite --sc--> event + // Seq-Cst Write-Read Coherence + CreateSeqCstReadWriteCoherenceEdges(write, read); + } + + // Write-Read Coherence + CreateWriteReadCoherenceEdges(write, read); + + // Read-Read Coherence + CreateReadReadCoherenceEdges(write, read); + } else { + // otherwise, if the RWM event is resolved to MODIFY state + // then we treat that as RMW event and apply rules from reads and writes + // as well + assert(read->IsModifyRMW() && + "Read event must be a successful RMW event in this code branch"); + + // Apply RMW-specific rules + // RMW / MO Consistency + CreateRmwConsistencyEdges(write, read); + + // RMW Atomicity + CreateRmwAtomicityEdges(write, read); + + // Applying rules both from READs and WRITEs + if (read->IsSeqCst()) { + // establish sc-edge + CreateScEdgeToEvent(read); // prevScCstWrite --sc--> event + // Seq-Cst Write-Read Coherence + CreateSeqCstReadWriteCoherenceEdges(write, read); + // Seq-Cst / MO Consistency (if locations match) + CreateSeqCstConsistencyEdges(read); // prevScCstWrite --mo--> event + + // update last seq_cst write (will be restored in case of snapshot + // discard) + lastSeqCstWriteEvents[read->location] = read->id; + } + + // Write-Read Coherence + CreateWriteReadCoherenceEdges(write, read); + + // Read-Read Coherence + CreateReadReadCoherenceEdges(write, read); + + // Read-Write Coherence + CreateReadWriteCoherenceEdges(read); + + // Write-Write Coherence + CreateWriteWriteCoherenceEdges(read); + } + + bool isConsistent = IsConsistent(); + if (isConsistent && commit) { + log() << "Consistent graph:" << "\n"; + Print(log()); + // preserve added edges and other modifications + ApplySnapshot(); + return true; + } + + // Non-commit or inconsistent path: rollback state. + log() << (isConsistent ? "Candidate accepted (no-commit)" : + "Not consistent graph") + << ":" << "\n"; + Print(log()); + // removes all added edges + log() << "Discarding snapshot" << "\n"; + DiscardSnapshot(); + // remove rf-edge + read->SetReadFromEvent(nullptr); + // restore old clock if necessary + if (isClockUpdated) { + read->clock = oldClock; + } + // restore last seq-cst write event + lastSeqCstWriteEvents[read->location] = oldLastSeqCstWriteEvent; + Print(log()); + + return isConsistent; + } + + // ===== Methods to create general graph edges ===== + + // Creates a po-edge between last event in the same thread as `event` + void CreatePoEdgeToEvent(Event* event) { + int threadId = event->threadId; + EventId eventId = event->id; + + // connect prev event in the same thread with new event via PO edge + auto lastEventInSameThread = events[eventsPerThread[threadId].back()]; + AddEdge(EdgeType::PO, lastEventInSameThread->id, eventId); + + // update last event in thread + eventsPerThread[threadId].push_back(eventId); + + // insert in all-events vector + events.push_back(event); + + // set correct hb-clocks for new event + event->clock = lastEventInSameThread->clock; + event->clock.Increment(threadId); + } + + // Adds an sc-edge between prev sc-write (to the same location as `event`) and + // `event` + void CreateScEdgeToEvent(Event* event) { + assert(event->IsSeqCst() && "Event must be an SC access"); + + // last seq_cst write should appear before us + EventId lastSeqCstWriteEvent = GetLastSeqCstWriteEventId(event->location); + if (lastSeqCstWriteEvent != -1) { + auto lastSeqCstWrite = events[lastSeqCstWriteEvent]; + assert((lastSeqCstWrite->IsWrite() || lastSeqCstWrite->IsModifyRMW()) && + "Prev scq-cst event must be a write or modifying rmw"); + AddEdge(EdgeType::SC, lastSeqCstWrite->id, event->id); + + // unite current hb-clock with last seq-cst write + event->clock.UniteWith(lastSeqCstWrite->clock); + } + } + + // ===== Methods to create mo edges ===== + + // Applies Seq-Cst Write-Read Coherence rules: establishes mo-edge between + // last sc-write to the same location and `write` event + // W'_x --sc--> R_x W'_x --sc--> R_x + // ^ \ ^ + // | \ | + // rf => \ rf + // | \ | + // W_x --mo--> W_x + void CreateSeqCstReadWriteCoherenceEdges(Event* write, Event* read) { + // TODO: in all such assertions do I need to check for correct RMW type? + assert(write->IsWriteOrRMW() && read->IsReadOrRMW() && + "Write and read events must be of correct type"); + assert(write->location == read->location && + "Write and read events must be of the same location"); + assert(read->GetReadFromEvent() == write && + "Read event must read-from the write event"); + + EventId lastSeqCstWriteEvent = GetLastSeqCstWriteEventId(read->location); + // no such event, no need to create mo edge + if (lastSeqCstWriteEvent == -1) return; + + // create mo-edge between last sc-write and `write` that `read` event + // reads-from + auto lastSeqCstWrite = events[lastSeqCstWriteEvent]; + assert((lastSeqCstWrite->IsWrite() || lastSeqCstWrite->IsModifyRMW()) && + "Last sc-write event must be a write or modifying rmw"); + assert(lastSeqCstWrite->location == read->location && + "Last sc-write event must have the same location as read event"); + + if (lastSeqCstWrite->id == write->id) + return; // no need to create edge to itself + + // TODO: check that sc-edge between lastSeqCstWrite and read exists + AddEdge(EdgeType::MO, lastSeqCstWrite->id, write->id); + } + + // Applies Write-Read Coherence rules: establishes mo-edges between + // stores (which happened-before `read`) and `write` + // W'_x --hb--> R_x W'_x --hb--> R_x + // ^ \ ^ + // | \ | + // rf => \ rf + // | \ | + // W_x --mo--> W_x + void CreateWriteReadCoherenceEdges(Event* write, Event* read) { + assert(write->IsWriteOrRMW() && read->IsReadOrRMW() && + "Write and read events must be of correct type"); + assert(write->location == read->location && + "Write and read events must be of the same location"); + assert(read->GetReadFromEvent() == write && + "Read event must read-from the write event"); + + IterateThroughMostRecentEventsByPredicate( + [write, read](Event* otherEvent) -> bool { + return (read->id != otherEvent->id && + write->id != otherEvent->id && // not the same events + (otherEvent->IsWrite() || otherEvent->IsModifyRMW()) && + otherEvent->location == read->location && + otherEvent->HappensBefore(read)); + }, + [this, write](Event* otherEvent) -> void { + // establish mo edge + AddEdge(EdgeType::MO, otherEvent->id, write->id); + }); + } + + // Applies Read-Read Coherence rules: establishes mo-edges between + // `write` and write events from which other read events + // (which happened-before `read`) reaf-from. + // W'_x --rf--> R'_x W'_x --rf--> R'_x + // | | | + // hb => mo hb + // | | | + // v v v + // W_x --rf--> R_x W_x --rf--> R_x + void CreateReadReadCoherenceEdges(Event* write, Event* read) { + assert(write->IsWriteOrRMW() && read->IsReadOrRMW() && + "Write and read events must be of correct type"); + assert(write->location == read->location && + "Write and read events must be of the same location"); + assert(read->GetReadFromEvent() == write && + "Read event must read-from the write event"); + + IterateThroughMostRecentEventsByPredicate( + [write, read](Event* otherEvent) -> bool { + return (read->id != otherEvent->id && + write->id != otherEvent->id && // not the same events + otherEvent->IsReadOrRMW() && + otherEvent->location == read->location && + otherEvent->HappensBefore(read) && + otherEvent->GetReadFromEvent() != nullptr && + otherEvent->GetReadFromEvent() != + write // R'_x does not read-from `write` + ); + }, + [this, write](Event* otherRead) -> void { + auto otherWrite = otherRead->GetReadFromEvent(); + // establish mo-edge + AddEdge(EdgeType::MO, otherWrite->id, write->id); + }); + } + + // TODO: instead of sc-edges, add reads-from from "Repairing Sequential + // Consistency in C/C++11"? Applies Seq-Cst / MO Consistency rules: + // establishes mo-edge between last sc-write and current sc-write-event if + // their locations match W'_x --sc--> W_x => W'_x --mo--> W_x + void CreateSeqCstConsistencyEdges(Event* event) { + assert(event->IsWriteOrRMW() && event->IsSeqCst() && + "Event must be a write/rmw with seq-cst order"); + + EventId lastSeqCstWriteEvent = GetLastSeqCstWriteEventId(event->location); + if (lastSeqCstWriteEvent == -1) return; + auto lastSeqCstWrite = events[lastSeqCstWriteEvent]; + assert((lastSeqCstWrite->IsWrite() || lastSeqCstWrite->IsModifyRMW()) && + "Last sc-write event must be a write or modifying rmw"); + assert(lastSeqCstWrite->location == event->location && + "Last sc-write event must have the same location as event"); + // TODO: check that sc-edge between lastSeqCstWrite and read exists + AddEdge(EdgeType::MO, lastSeqCstWrite->id, event->id); + } + + // Applies Write-Write Coherence rules: establishes mo-edges between + // other writes that happened before `event` only for the same location + // W'_x --hb--> W_x => W'_x --mo--> W_x + void CreateWriteWriteCoherenceEdges(Event* event) { + assert(event->IsWriteOrRMW()); + + IterateThroughMostRecentEventsByPredicate( + [event](Event* otherEvent) -> bool { + return (event->id != otherEvent->id && // not the same event + (otherEvent->IsWrite() || otherEvent->IsModifyRMW()) && + otherEvent->location == event->location && // same location + otherEvent->HappensBefore(event)); + }, + [this, event](Event* otherEvent) -> void { + // establish mo edge + AddEdge(EdgeType::MO, otherEvent->id, event->id); + }); + } + + // Applies Read-Write Coherence rules: establishes mo-edges between + // stores that are read-from by reads which happened-before our write `event` + // W'_x --rf--> R'_x W'_x --rf--> R'_x + // | \ | + // hb => \ hb + // | \ | + // v \ v + // W_x --mo--> W_x + void CreateReadWriteCoherenceEdges(Event* event) { + assert(event->IsWriteOrRMW()); + + IterateThroughMostRecentEventsByPredicate( + [event](Event* otherEvent) -> bool { + return (event->id != otherEvent->id && // not the same event + otherEvent->IsReadOrRMW() && + otherEvent->location == event->location && // same location + otherEvent->HappensBefore(event)); + }, + [this, event](Event* otherEvent) -> void { + assert(otherEvent->GetReadFromEvent() != nullptr && + "Read event must have read-from event"); + auto writeEvent = otherEvent->GetReadFromEvent(); + // establish mo edge + AddEdge(EdgeType::MO, writeEvent->id, event->id); + }); + } + + // Applies RMW Consitency rules: establishes mo-edge between + // store that is read-from by the current RMW event and the RMW event itself. + // W_x --rf--> RMW_x => W_x --mo--> RMW_x + void CreateRmwConsistencyEdges(Event* write, Event* rmw) { + assert(write->IsWriteOrRMW() && rmw->IsModifyRMW() && + "Write and RMW events must be of correct type"); + assert(write->location == rmw->location && + "Write and RMW events must be of the same location"); + assert(rmw->GetReadFromEvent() == write && + "RMW event must read-from the Write event"); + + // establish mo-edge + AddEdge(EdgeType::MO, write->id, rmw->id); + } + + // Applies RMW Atomicity rules: establishes mo-edges between + // RMW event and all writes that are mo-after `write` event + // (from which RMW reads-from) + // RMW_x <--rf-- W_x RMW_x <--rf-- W_x + // | \ | + // mo => \ mo + // | \ | + // v \ v + // W'_x --mo--> W'_x + void CreateRmwAtomicityEdges(Event* write, Event* rmw) { + assert(write->IsWriteOrRMW() && rmw->IsModifyRMW() && + "Write and RMW events must be of correct type"); + assert(write->location == rmw->location && + "Write and RMW events must be of the same location"); + assert(rmw->GetReadFromEvent() == write && + "RMW event must read-from the Write event"); + + for (EdgeId edgeId : write->edges) { + Edge& edge = edges[edgeId]; + if (edge.type == EdgeType::MO && edge.to != rmw->id) { + // establish mo-edge + AddEdge(EdgeType::MO, rmw->id, edge.to); + } + } + } + + // ===== Helper methods ===== + + template + requires requires(Predicate p, Event* event) { + { p(event) } -> std::same_as; + } && requires(Callback cb, Event* event) { + { cb(event) } -> std::same_as; + } + void IterateThroughMostRecentEventsByPredicate(Predicate&& predicate, + Callback&& callback) { + // iterate through each thread and find last write-event that hb `event` + for (int t = 0; t < nThreads; ++t) { + // iterate from most recent to earliest events in thread `t` + const auto& threadEvents = eventsPerThread[t]; + for (auto it = threadEvents.rbegin(); it != threadEvents.rend(); ++it) { + Event* otherEvent = events[*it]; + if (!std::forward(predicate)(otherEvent)) continue; + // invoke `callback` on the most recent event in the thread `t` + std::forward(callback)(otherEvent); + break; // no need to invoke `callback` with earlier events from this + // thread + } + } + } + + EventId GetLastSeqCstWriteEventId(int location) const { + if (lastSeqCstWriteEvents.contains(location)) { + return lastSeqCstWriteEvents.at(location); + } + return -1; + } + + void AddEdge(EdgeType type, EventId from, EventId to) { + // for mo edges we might add duplicates, so we need to check that such + // mo-edge does not exist + if (type == EdgeType::MO && ExistsEdge(type, from, to)) { + // log() << "Edge already exists: " << events[from]->AsString() << " + // --" << WmmUtils::EdgeTypeToString(type) << "--> " + // << events[to]->AsString() << "\n"; + return; + } + + auto& from_edges = events[from]->edges; + EdgeId eId = edges.size(); + Edge e = {eId, type, from, to}; + edges.push_back(e); + from_edges.push_back(eId); + + if (inSnapshotMode) { + snapshotEdges.insert(eId); + } + } + + bool ExistsEdge(EdgeType type, EventId from, EventId to) const { + const auto& from_edges = events[from]->edges; + auto it = + std::ranges::find_if(from_edges, [this, from, to, type](EdgeId eId) { + auto& edge = edges[eId]; + return edge.from == from && edge.to == to && edge.type == type; + }); + return it != from_edges.end(); + } + + // Check execution graph for consistency createria: + // * modification order is acyclic + bool IsConsistent() { + // TODO: should consistency criteria be taken from paper "Repairing + // Sequential Consistency in C/C++11"? + enum { NOT_VISITED = 0, IN_STACK = 1, VISITED = 2 }; + std::vector colors(events.size(), + NOT_VISITED); // each event is colored 0 (not + // visited), 1 (entered), 2 (visited) + std::vector> stack; + + for (auto e : events) { + assert(colors[e->id] != IN_STACK && + "Should not be possible, invalid cycle detection"); + if (colors[e->id] == VISITED) continue; + stack.push_back({e, false}); + + while (!stack.empty()) { + auto [event, considred] = stack.back(); + EventId eventId = event->id; + + stack.pop_back(); + if (considred) { + colors[eventId] = VISITED; + continue; + } + stack.push_back( + {event, + true}); // next time we take it out, we do not traverse its edges + + for (auto edgeId : event->edges) { + Edge& edge = edges[edgeId]; + if (edge.type != EdgeType::MO) continue; + if (colors[edge.to] == NOT_VISITED) { + stack.push_back({events[edge.to], false}); + colors[edge.to] = IN_STACK; + } else if (colors[edge.to] == IN_STACK) { + // cycle detected + return false; + } + } + } + } + + return true; + } + + void StartSnapshot() { + assert(!inSnapshotMode && "Snapshot started twice"); + inSnapshotMode = true; + } + + void ApplySnapshot() { + assert(inSnapshotMode && "Applying snapshot not in snapshot mode"); + inSnapshotMode = false; + snapshotEdges.clear(); + } + + void DiscardSnapshot() { + assert(inSnapshotMode && "Discarding snapshot not in snapshot mode"); + // clearing all added edges from the graph + // TODO: make sure below 'note' is true + // Note: all appended edges will be in the suffixes of all edges arrays + // 1. removing from edges vector + while (!edges.empty() && snapshotEdges.contains(edges.back().id)) { + edges.pop_back(); + } + + // 2. removing from events edges + for (auto event : events) { + auto& eventEdges = event->edges; + while (!eventEdges.empty() && snapshotEdges.contains(eventEdges.back())) { + eventEdges.pop_back(); + } + } + + // reset snapshot state + inSnapshotMode = false; + snapshotEdges.clear(); + } + + void Clean() { + edges.clear(); + for (auto event : events) { + delete event; + } + events.clear(); + eventsPerThread.clear(); + lastSeqCstWriteEvents.clear(); + } + + void InitThreads(int nThreads) { + this->nThreads = nThreads; + eventsPerThread.resize(nThreads); + + // insert dummy events (with all-zero hbClocks), + // which will be the first event in each thread + for (int t = 0; t < nThreads; ++t) { + // TODO: DummyEvents are all ?seq-cst? (now rlx) writes, do I need to add + // proper ?sc?-egdes between them? For now I don't + int eventId = events.size(); + auto dummyEvent = new DummyEvent(eventId, nThreads, t); + events.push_back(dummyEvent); + eventsPerThread[t].push_back(eventId); + } + } + + std::vector edges; + std::vector events; + std::vector> eventsPerThread; + std::map lastSeqCstWriteEvents; + std::unordered_set + snapshotEdges; // edges that are part of the snapshot (which case be + // discarded or applied, which is usefull when adding + // rf-edge) + std::mt19937 gen{std::random_device{}()}; // random number generator for + // randomized rf-edge selection + bool inSnapshotMode = false; + int nThreads = 0; +}; + +} // namespace ltest::wmm \ No newline at end of file diff --git a/runtime/include/wmm/hbclock.h b/runtime/include/wmm/hbclock.h new file mode 100644 index 00000000..d1d6e013 --- /dev/null +++ b/runtime/include/wmm/hbclock.h @@ -0,0 +1,61 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace ltest::wmm { +struct HBClock { + HBClock() = default; + HBClock(int nThreads) : times(nThreads, 0) {} + + std::string AsString() const { + std::stringstream ss; + + ss << "["; + for (size_t i = 0; i < times.size(); ++i) { + ss << times[i]; + if (i < times.size() - 1) { + ss << ","; + } + } + ss << "]"; + + return ss.str(); + } + + bool IsSubsetOf(const HBClock& other) const { + assert(IsSameLength(other)); + + for (int i = 0; i < times.size(); ++i) { + if (times[i] > other.times[i]) { + return false; + } + } + + return true; + } + + void UniteWith(const HBClock& other) { + assert(IsSameLength(other)); + + for (int i = 0; i < times.size(); ++i) { + times[i] = std::max(times[i], other.times[i]); + } + } + + void Increment(int threadId) { + assert(threadId >= 0 && threadId < times.size()); + times[threadId]++; + } + + private: + bool IsSameLength(const HBClock& other) const { + return times.size() == other.times.size(); + } + + std::vector times; +}; +} // namespace ltest::wmm \ No newline at end of file diff --git a/runtime/include/wmm/wmm.h b/runtime/include/wmm/wmm.h new file mode 100644 index 00000000..639b0fde --- /dev/null +++ b/runtime/include/wmm/wmm.h @@ -0,0 +1,147 @@ +#pragma once + +#include +#include +#include + +#include "../logger.h" +#include "common.h" +#include "graph.h" + +namespace ltest::wmm { + +extern bool wmm_enabled; + +class ExecutionGraph { + public: + ExecutionGraph(const ExecutionGraph&) = delete; + ExecutionGraph& operator=(const ExecutionGraph&) = delete; + ExecutionGraph(ExecutionGraph&&) = delete; + ExecutionGraph& operator=(ExecutionGraph&&) = delete; + + static ExecutionGraph& getInstance() { + static ExecutionGraph instance; // Thread-safe in C++11 and later + return instance; + } + + // Empties graph events and sets new number of threads. + void Reset(int nThreads) { + log() << "Reset Graph: threads=" << nThreads << "\n"; + this->nThreads = nThreads; + this->nextLocationId = 0; + + // for lazy addr-location registration we bind init-write to the + // registering worker thread + WmmUtils::INIT_THREAD_ID = 0; + + locationByAddr.clear(); + + graph.Reset(nThreads); + graph.Print(log()); + } + + // When new location is constructed, it registers itself in the wmm-graph + // in order to generate corresponding initialization event. + template + int RegisterLocation(T value, int registrarThreadId) { + if (registrarThreadId < 0 || registrarThreadId >= nThreads) { + registrarThreadId = 0; + } + int currentLocationId = nextLocationId++; + log() << "Register location: loc-" << currentLocationId + << ", init value=" << value + << ", registrar thread=" << registrarThreadId << "\n"; + graph.AddWriteEvent(currentLocationId, registrarThreadId, + MemoryOrder::SeqCst, value); + + graph.Print(log()); + return currentLocationId; + } + + template + int GetOrRegisterLocation(void* addr, T initial_value, int threadId) { + auto it = locationByAddr.find(addr); + if (it != locationByAddr.end()) { + return it->second; + } + int locationId = RegisterLocation(initial_value, threadId); + locationByAddr[addr] = locationId; + return locationId; + } + + template + T Load(int location, int threadId, MemoryOrder order) { + // TODO: if we now only do real atomics, then they should be stored in + // graph, I guess? + log() << "Load: loc-" << location << ", thread=" << threadId + << ", order=" << WmmUtils::OrderToString(order) << "\n"; + T readValue = graph.AddReadEvent(location, threadId, order); + + graph.Print(log()); + return readValue; + } + + // all consistent read-from candidates for the requested load + template + std::vector> LoadCandidates( + int location, int threadId, MemoryOrder order) { + log() << "Load candidates: loc-" << location << ", thread=" << threadId + << ", order=" << WmmUtils::OrderToString(order) << "\n"; + auto candidates = graph.GetReadFromCandidates(location, threadId, order); + + graph.Print(log()); + return candidates; + } + + // applies chosen candidate and returns the read value + template + T ApplyReadCandidate( + const typename Graph::template ReadCandidate& candidate) { + log() << "Apply read candidate: loc-" << candidate.read_event->location + << ", thread=" << candidate.read_event->threadId << "\n"; + T readValue = graph.ApplyReadCandidate(candidate); + graph.Print(log()); + return readValue; + } + + template + void Store(int location, int threadId, MemoryOrder order, T value) { + log() << "Store: loc-" << location << ", thread=" << threadId + << ", order=" << WmmUtils::OrderToString(order) << ", value=" << value + << "\n"; + graph.AddWriteEvent(location, threadId, order, value); + + graph.Print(log()); + } + + // TODO: generalize to any other type of the RMW operation + // (which have different method signature from compare_and_set, e.g. + // fetch_add, etc.) + template + std::pair ReadModifyWrite(int location, int threadId, T* expected, + T desired, MemoryOrder success, + MemoryOrder failure) { + log() << "RMW: loc-" << location << ", thread=" << threadId + << ", expected=" << *expected << ", desired=" << desired + << ", success=" << WmmUtils::OrderToString(success) + << ", failure=" << WmmUtils::OrderToString(failure) << "\n"; + auto rmwResult = graph.AddRMWEvent(location, threadId, expected, desired, + success, failure); + graph.Print(log()); + log() << "RMW result: " << (rmwResult.first ? "MODIFY" : "READ") + << ", value=" << rmwResult.second << "\n"; + return rmwResult; + } + + private: + ExecutionGraph() = default; + ~ExecutionGraph() = default; + + int nThreads = 0; + int nextLocationId = 0; + std::unordered_map locationByAddr; + Graph graph; + // TODO: here can add real atomic's name via clangpass +}; + +} // namespace ltest::wmm \ No newline at end of file diff --git a/runtime/include/wmm_runtime.h b/runtime/include/wmm_runtime.h new file mode 100644 index 00000000..36f0b608 --- /dev/null +++ b/runtime/include/wmm_runtime.h @@ -0,0 +1,130 @@ +#pragma once + +#include + +// коды операций rmw, которые llvmpass передаёт в runtime +// нужны, чтобы одна функция __ltest_wmm_rmw_ обслуживала все rmw +// соответствие задаётся в yieldpass.cpp - AtomicRMWInst::getOperation() +enum LTestRmwOp : int { + LTEST_RMW_ADD = 0, + LTEST_RMW_SUB = 1, + LTEST_RMW_AND = 2, + LTEST_RMW_OR = 3, + LTEST_RMW_XOR = 4, + LTEST_RMW_XCHG = 5, + LTEST_RMW_MAX = 6, + LTEST_RMW_MIN = 7, + LTEST_RMW_UMAX = 8, + LTEST_RMW_UMIN = 9, +}; + +#ifdef __cplusplus +extern "C" { +#endif + +// strategy's choice for read candidates +// index in [0, candidates_count) +int __ltest_wmm_choice(int candidates_count); + +bool __ltest_wmm_load_i1(void* addr, int order); +int8_t __ltest_wmm_load_i8(void* addr, int order); +int16_t __ltest_wmm_load_i16(void* addr, int order); +int32_t __ltest_wmm_load_i32(void* addr, int order); +int64_t __ltest_wmm_load_i64(void* addr, int order); + +// candidate's load api for llvmpass +int __ltest_wmm_load_candidates_i1(void* addr, int order); +int __ltest_wmm_load_candidates_i8(void* addr, int order); +int __ltest_wmm_load_candidates_i16(void* addr, int order); +int __ltest_wmm_load_candidates_i32(void* addr, int order); +int __ltest_wmm_load_candidates_i64(void* addr, int order); + +bool __ltest_wmm_load_apply_i1(int choice_index); +int8_t __ltest_wmm_load_apply_i8(int choice_index); +int16_t __ltest_wmm_load_apply_i16(int choice_index); +int32_t __ltest_wmm_load_apply_i32(int choice_index); +int64_t __ltest_wmm_load_apply_i64(int choice_index); + + +void __ltest_wmm_store_i1(void* addr, int order, bool value); +void __ltest_wmm_store_i8(void* addr, int order, int8_t value); +void __ltest_wmm_store_i16(void* addr, int order, int16_t value); +void __ltest_wmm_store_i32(void* addr, int order, int32_t value); +void __ltest_wmm_store_i64(void* addr, int order, int64_t value); + + +struct LTestCmpXchgResult_i1 { + bool old; + bool success; +}; + +struct LTestCmpXchgResult_i8 { + int8_t old; + bool success; +}; + +struct LTestCmpXchgResult_i16 { + int16_t old; + bool success; +}; + +struct LTestCmpXchgResult_i32 { + int32_t old; + bool success; +}; + +struct LTestCmpXchgResult_i64 { + int64_t old; + bool success; +}; + + +int8_t __ltest_wmm_rmw_i8(void* addr, int order, int op, int8_t operand); +int16_t __ltest_wmm_rmw_i16(void* addr, int order, int op, int16_t operand); +int32_t __ltest_wmm_rmw_i32(void* addr, int order, int op, int32_t operand); +int64_t __ltest_wmm_rmw_i64(void* addr, int order, int op, int64_t operand); + +LTestCmpXchgResult_i1 __ltest_wmm_cmpxchg_i1( + void* addr, + bool expected, + bool desired, + int success_order, + int failure_order, + bool is_weak); + +LTestCmpXchgResult_i8 __ltest_wmm_cmpxchg_i8( + void* addr, + int8_t expected, + int8_t desired, + int success_order, + int failure_order, + bool is_weak); + +LTestCmpXchgResult_i16 __ltest_wmm_cmpxchg_i16( + void* addr, + int16_t expected, + int16_t desired, + int success_order, + int failure_order, + bool is_weak); + +LTestCmpXchgResult_i32 __ltest_wmm_cmpxchg_i32( + void* addr, + int32_t expected, + int32_t desired, + int success_order, + int failure_order, + bool is_weak); + +LTestCmpXchgResult_i64 __ltest_wmm_cmpxchg_i64( + void* addr, + int64_t expected, + int64_t desired, + int success_order, + int failure_order, + bool is_weak); + + +#ifdef __cplusplus +} +#endif diff --git a/runtime/lib.cpp b/runtime/lib.cpp index e91e24de..7ee1dd06 100644 --- a/runtime/lib.cpp +++ b/runtime/lib.cpp @@ -1,6 +1,7 @@ #include "include/lib.h" #include +#include #include #include @@ -10,6 +11,7 @@ // See comments in the lib.h. Task this_coro{}; +int this_thread_id = -1; boost::context::fiber_context sched_ctx; std::optional coroutine_status; @@ -18,10 +20,33 @@ namespace ltest { std::vector task_builders{}; } +// Test failure tracking for litmus tests, which could expectedly fail. +namespace ltest { +namespace { +bool test_failed{false}; +std::string test_failure_message{}; +} // namespace + +void SetTestFailure(std::string message) { + test_failure_message = std::move(message); + test_failed = true; +} + +bool HasTestFailure() { return test_failed; } + +const std::string& GetTestFailureMessage() { return test_failure_message; } + +void ClearTestFailure() { + test_failed = false; + test_failure_message.clear(); +} +} // namespace ltest + Task CoroBase::GetPtr() { return shared_from_this(); } -void CoroBase::Resume() { +void CoroBase::Resume(int resumed_thread_id) { this_coro = this->GetPtr(); + this_thread_id = resumed_thread_id; assert(!this_coro->IsReturned() && this_coro->ctx); // debug(stderr, "name: %s\n", // std::string(this_coro->GetPtr()->GetName()).c_str()); @@ -38,6 +63,7 @@ void CoroBase::Resume() { }).resume(); } this_coro.reset(); + this_thread_id = -1; } int CoroBase::GetId() const { return id; } @@ -75,18 +101,18 @@ extern "C" void CoroutineStatusChange(char* name, bool start) { CoroYield(); } -void CoroBase::Terminate() { +void CoroBase::Terminate(int running_thread_id) { int tries = 0; while (!IsReturned()) { ++tries; - Resume(); + Resume(running_thread_id); assert(tries < 1000000 && "coroutine is spinning too long, possible wrong terminating order"); } } -void CoroBase::TryTerminate() { +void CoroBase::TryTerminate(int running_thread_id) { for (size_t i = 0; i < 1000 && !is_returned; ++i) { - Resume(); + Resume(running_thread_id); } } \ No newline at end of file diff --git a/runtime/pretty_printer.cpp b/runtime/pretty_printer.cpp deleted file mode 100644 index 36cffdbe..00000000 --- a/runtime/pretty_printer.cpp +++ /dev/null @@ -1,3 +0,0 @@ -#include "include/pretty_print.h" - -PrettyPrinter::PrettyPrinter(size_t threads_num) : threads_num{threads_num} {} diff --git a/runtime/strategy_context.cpp b/runtime/strategy_context.cpp new file mode 100644 index 00000000..63738cbb --- /dev/null +++ b/runtime/strategy_context.cpp @@ -0,0 +1,14 @@ +#include "include/scheduler.h" + +namespace { +Strategy* g_active_strategy = nullptr; +} + +void SetActiveStrategy(Strategy* strategy) { + // store pointer to current strategy for WMM choice callbacks + g_active_strategy = strategy; +} + +Strategy* GetActiveStrategy() { + return g_active_strategy; +} diff --git a/runtime/verifying.cpp b/runtime/verifying.cpp index 85f52ed2..55232328 100644 --- a/runtime/verifying.cpp +++ b/runtime/verifying.cpp @@ -9,6 +9,10 @@ namespace ltest { +namespace wmm { +bool wmm_enabled = false; +} // namespace wmm + template <> std::string toString(const int &a) { return std::to_string(a); @@ -68,12 +72,16 @@ DEFINE_int32(threads, 2, "Number of threads"); DEFINE_int32(tasks, 15, "Number of tasks"); DEFINE_int32(switches, 100000000, "Number of switches"); DEFINE_int32(rounds, 5, "Number of rounds"); +DEFINE_int64(seed, -1, "Seed for strategy choice and scheduling"); DEFINE_bool(minimize, false, "Minimize nonlinear scenario"); DEFINE_int32(exploration_runs, 15, "Number of attempts to find nonlinearized round during each " "minimization step"); DEFINE_int32(minimization_runs, 15, "Number of minimization runs for smart minimizor"); +DEFINE_bool(wmm_enabled, false, + "Enable WMM graph usage (all atomic operations will be performed " + "via this graph"); DEFINE_int32(depth, 0, "How many tasks can be executed on one thread(Only for TLA)"); DEFINE_bool(verbose, false, "Verbosity"); @@ -85,12 +93,14 @@ void SetOpts(const DefaultOptions &def) { FLAGS_tasks = def.tasks; FLAGS_switches = def.switches; FLAGS_rounds = def.rounds; + FLAGS_seed = def.seed; FLAGS_depth = def.depth; FLAGS_verbose = def.verbose; FLAGS_strategy = def.strategy; FLAGS_weights = def.weights; FLAGS_exploration_runs = def.exploration_runs; FLAGS_minimization_runs = def.minimization_runs; + FLAGS_wmm_enabled = def.wmm_enabled; } // Extracts required opts, returns the rest of args. @@ -100,10 +110,12 @@ Opts ParseOpts() { opts.tasks = FLAGS_tasks; opts.switches = FLAGS_switches; opts.rounds = FLAGS_rounds; + opts.seed = FLAGS_seed; opts.minimize = FLAGS_minimize; // NOTE(dartiukhov) minimization for // scenarios with locks is not supported opts.exploration_runs = FLAGS_exploration_runs; opts.minimization_runs = FLAGS_minimization_runs; + opts.wmm_enabled = wmm_enabled = FLAGS_wmm_enabled; opts.verbose = FLAGS_verbose; opts.typ = FromLiteral(std::move(FLAGS_strategy)); opts.depth = FLAGS_depth; diff --git a/runtime/wmm_runtime.cpp b/runtime/wmm_runtime.cpp new file mode 100644 index 00000000..7cb067db --- /dev/null +++ b/runtime/wmm_runtime.cpp @@ -0,0 +1,426 @@ +#include "include/wmm_runtime.h" + +#include +#include +#include +#include + +#include "include/lib.h" +#include "include/scheduler.h" +#include "include/wmm/wmm.h" + +using ltest::wmm::WmmUtils; + +namespace { + +template +struct CandidateStorage { + bool use_graph = false; + std::vector> graph; + std::vector direct; +}; + +template +CandidateStorage& GetCandidateStorageForThread(int thread_id) { + static std::vector> storage; + if (thread_id < 0) { + thread_id = 0; + } + if (storage.size() <= static_cast(thread_id)) { + storage.resize(thread_id + 1); + } + return storage[thread_id]; +} + +template +int LoadCandidatesImpl(void* addr, int order); + +template +T ApplyLoadImpl(int choice_index); + +template +int GetLocationId(void* addr, T initial_value) { + auto& graph = ltest::wmm::ExecutionGraph::getInstance(); + (void)initial_value; + auto* raw = reinterpret_cast(addr); + T init_value = __atomic_load_n(raw, __ATOMIC_RELAXED); + return graph.GetOrRegisterLocation(addr, init_value, this_thread_id); +} + +template +T LoadImpl(void* addr, int order) { + int candidates_count = LoadCandidatesImpl(addr, order); + int choice_index = __ltest_wmm_choice(candidates_count); + return ApplyLoadImpl(choice_index); +} + +template +int LoadCandidatesImpl(void* addr, int order) { + auto& storage = GetCandidateStorageForThread(this_thread_id); + storage.graph.clear(); + storage.direct.clear(); + + if (!ltest::wmm::wmm_enabled || !this_coro) { + // read the real atomic directly + auto* atom = reinterpret_cast*>(addr); + storage.direct.push_back( + atom->load(static_cast(order))); + log() << "[WMM][candidates][direct] thread=" << this_thread_id + << " addr=" << addr << " order=" << order + << " count=" << storage.direct.size() + << " value0=" << storage.direct.front() << "\n"; + storage.use_graph = false; + return static_cast(storage.direct.size()); + } + + auto& graph = ltest::wmm::ExecutionGraph::getInstance(); + int location_id = GetLocationId(addr, T{}); + storage.graph = graph.LoadCandidates( + location_id, + this_thread_id, + WmmUtils::OrderFromStd(static_cast(order))); + storage.use_graph = true; + assert(!storage.graph.empty() && "WMM graph returned no read candidates"); + + log() << "[WMM][candidates][graph] thread=" << this_thread_id + << " loc=" << location_id << " addr=" << addr << " order=" + << order << " count=" << storage.graph.size() << "\n"; + for (size_t i = 0; i < storage.graph.size(); ++i) { + const auto& c = storage.graph[i]; + log() << " [" << i << "] value=" << c.value + << " write=" << c.write_event->AsString() + << " read=" << c.read_event->AsString() << "\n"; + } + + return static_cast(storage.graph.size()); +} + +template +T ApplyLoadImpl(int choice_index) { + auto& storage = GetCandidateStorageForThread(this_thread_id); + if (storage.use_graph) { + if (choice_index < 0 || + static_cast(choice_index) >= storage.graph.size()) { + choice_index = 0; + } + auto& graph = ltest::wmm::ExecutionGraph::getInstance(); + T value = graph.ApplyReadCandidate(storage.graph[choice_index]); + storage.graph.clear(); + storage.use_graph = false; + return value; + } + + if (storage.direct.empty()) { + return T{}; + } + if (choice_index < 0 || + static_cast(choice_index) >= storage.direct.size()) { + choice_index = 0; + } + T value = storage.direct[choice_index]; + storage.direct.clear(); + storage.use_graph = false; + return value; +} + +template +void StoreImpl(void* addr, int order, T value) { + if (ltest::wmm::wmm_enabled && this_coro) { + auto& graph = ltest::wmm::ExecutionGraph::getInstance(); + int location_id = GetLocationId(addr, value); + graph.Store( + location_id, + this_thread_id, + WmmUtils::OrderFromStd(static_cast(order)), + value); + return; + } + auto* atom = reinterpret_cast*>(addr); + atom->store(value, static_cast(order)); +} + +template +struct CmpXchgResult { + T old; + bool success; +}; + +template +CmpXchgResult CmpXchgImpl( + void* addr, + T expected, + T desired, + int success_order, + int failure_order, + bool is_weak) { + if (ltest::wmm::wmm_enabled && this_coro) { + auto& graph = ltest::wmm::ExecutionGraph::getInstance(); + int location_id = GetLocationId(addr, expected); + T expected_copy = expected; + auto [rmw_success, read_value] = graph.ReadModifyWrite( + location_id, + this_thread_id, + &expected_copy, + desired, + WmmUtils::OrderFromStd(static_cast(success_order)), + WmmUtils::OrderFromStd(static_cast(failure_order))); + return CmpXchgResult{read_value, rmw_success}; + } + + auto* atom = reinterpret_cast*>(addr); + T expected_copy = expected; + bool success = is_weak + ? atom->compare_exchange_weak( + expected_copy, desired, + static_cast(success_order), + static_cast(failure_order)) + : atom->compare_exchange_strong( + expected_copy, desired, + static_cast(success_order), + static_cast(failure_order)); + return CmpXchgResult{expected_copy, success}; +} + +template +T AtomicRmwImpl(void* addr, int order, int op, T operand) { + // wmm graph currently models only compare_exchange rmw + // for other rmw ops fallback to real atomics + auto* atom = reinterpret_cast*>(addr); + auto order_mo = static_cast(order); + switch (op) { + case LTEST_RMW_ADD: return atom->fetch_add(operand, order_mo); + case LTEST_RMW_SUB: return atom->fetch_sub(operand, order_mo); + case LTEST_RMW_AND: return atom->fetch_and(operand, order_mo); + case LTEST_RMW_OR: return atom->fetch_or(operand, order_mo); + case LTEST_RMW_XOR: return atom->fetch_xor(operand, order_mo); + case LTEST_RMW_XCHG: return atom->exchange(operand, order_mo); + default: { + // max/min/umax/umin через cas + T cur = atom->load(order_mo); + while (true) { + T next = cur; + switch (op) { + case LTEST_RMW_MAX: next = (cur > operand) ? cur : operand; break; + case LTEST_RMW_MIN: next = (cur < operand) ? cur : operand; break; + case LTEST_RMW_UMAX: { + using U = std::make_unsigned_t; + next = (static_cast(cur) > static_cast(operand)) ? cur : operand; + break; + } + case LTEST_RMW_UMIN: { + using U = std::make_unsigned_t; + next = (static_cast(cur) < static_cast(operand)) ? cur : operand; + break; + } + default: next = operand; break; + } + if (atom->compare_exchange_weak(cur, next, order_mo, order_mo)) { + return cur; + } + } + } + } +} + +} // namespace + +extern "C" bool __ltest_wmm_load_i1(void* addr, int order) { + return LoadImpl(addr, order); +} + +extern "C" int8_t __ltest_wmm_load_i8(void* addr, int order) { + return LoadImpl(addr, order); +} + +extern "C" int16_t __ltest_wmm_load_i16(void* addr, int order) { + return LoadImpl(addr, order); +} + +extern "C" int32_t __ltest_wmm_load_i32(void* addr, int order) { + return LoadImpl(addr, order); +} + +extern "C" int64_t __ltest_wmm_load_i64(void* addr, int order) { + return LoadImpl(addr, order); +} + +extern "C" int __ltest_wmm_load_candidates_i1(void* addr, int order) { + return LoadCandidatesImpl(addr, order); +} + +extern "C" int __ltest_wmm_load_candidates_i8(void* addr, int order) { + return LoadCandidatesImpl(addr, order); +} + +extern "C" int __ltest_wmm_load_candidates_i16(void* addr, int order) { + return LoadCandidatesImpl(addr, order); +} + +extern "C" int __ltest_wmm_load_candidates_i32(void* addr, int order) { + return LoadCandidatesImpl(addr, order); +} + +extern "C" int __ltest_wmm_load_candidates_i64(void* addr, int order) { + return LoadCandidatesImpl(addr, order); +} + +extern "C" bool __ltest_wmm_load_apply_i1(int choice_index) { + return ApplyLoadImpl(choice_index); +} + +extern "C" int8_t __ltest_wmm_load_apply_i8(int choice_index) { + return ApplyLoadImpl(choice_index); +} + +extern "C" int16_t __ltest_wmm_load_apply_i16(int choice_index) { + return ApplyLoadImpl(choice_index); +} + +extern "C" int32_t __ltest_wmm_load_apply_i32(int choice_index) { + return ApplyLoadImpl(choice_index); +} + +extern "C" int64_t __ltest_wmm_load_apply_i64(int choice_index) { + return ApplyLoadImpl(choice_index); +} + + +extern "C" void __ltest_wmm_store_i1(void* addr, int order, bool value) { + StoreImpl(addr, order, value); +} + +extern "C" void __ltest_wmm_store_i8(void* addr, int order, int8_t value) { + StoreImpl(addr, order, value); +} + +extern "C" void __ltest_wmm_store_i16(void* addr, int order, int16_t value) { + StoreImpl(addr, order, value); +} + +extern "C" void __ltest_wmm_store_i32(void* addr, int order, int32_t value) { + StoreImpl(addr, order, value); +} + +extern "C" void __ltest_wmm_store_i64(void* addr, int order, int64_t value) { + StoreImpl(addr, order, value); +} + + +extern "C" LTestCmpXchgResult_i1 __ltest_wmm_cmpxchg_i1( + void* addr, + bool expected, + bool desired, + int success_order, + int failure_order, + bool is_weak) { + auto result = CmpXchgImpl( + addr, + expected, + desired, + success_order, + failure_order, + is_weak); + return {result.old, result.success}; +} + +extern "C" LTestCmpXchgResult_i8 __ltest_wmm_cmpxchg_i8( + void* addr, + int8_t expected, + int8_t desired, + int success_order, + int failure_order, + bool is_weak) { + auto result = CmpXchgImpl( + addr, + expected, + desired, + success_order, + failure_order, + is_weak); + return {result.old, result.success}; +} + +extern "C" LTestCmpXchgResult_i16 __ltest_wmm_cmpxchg_i16( + void* addr, + int16_t expected, + int16_t desired, + int success_order, + int failure_order, + bool is_weak) { + auto result = CmpXchgImpl( + addr, + expected, + desired, + success_order, + failure_order, + is_weak); + return {result.old, result.success}; +} + +extern "C" LTestCmpXchgResult_i32 __ltest_wmm_cmpxchg_i32( + void* addr, + int32_t expected, + int32_t desired, + int success_order, + int failure_order, + bool is_weak) { + auto result = CmpXchgImpl( + addr, + expected, + desired, + success_order, + failure_order, + is_weak); + return {result.old, result.success}; +} + +extern "C" LTestCmpXchgResult_i64 __ltest_wmm_cmpxchg_i64( + void* addr, + int64_t expected, + int64_t desired, + int success_order, + int failure_order, + bool is_weak) { + auto result = CmpXchgImpl( + addr, + expected, + desired, + success_order, + failure_order, + is_weak); + return {result.old, result.success}; +} + + +extern "C" int8_t __ltest_wmm_rmw_i8( + void* addr, int order, int op, int8_t operand) { + return AtomicRmwImpl(addr, order, op, operand); +} + +extern "C" int16_t __ltest_wmm_rmw_i16( + void* addr, int order, int op, int16_t operand) { + return AtomicRmwImpl(addr, order, op, operand); +} + +extern "C" int32_t __ltest_wmm_rmw_i32( + void* addr, int order, int op, int32_t operand) { + return AtomicRmwImpl(addr, order, op, operand); +} + +extern "C" int64_t __ltest_wmm_rmw_i64( + void* addr, int order, int op, int64_t operand) { + return AtomicRmwImpl(addr, order, op, operand); +} + +extern "C" int __ltest_wmm_choice(int candidates_count) { + // this is the only place where we give choice to the strategy + auto* strategy = GetActiveStrategy(); + if (!strategy || candidates_count <= 0) { + return 0; + } + if (candidates_count == 1) { + return 0; + } + return static_cast(strategy->ChooseCandidate( + static_cast(candidates_count))); +} diff --git a/test/runtime/stackfulltask_mock.h b/test/runtime/stackfulltask_mock.h index 1fe6e0a7..381b7cd9 100644 --- a/test/runtime/stackfulltask_mock.h +++ b/test/runtime/stackfulltask_mock.h @@ -9,14 +9,14 @@ class MockTask : public CoroBase { public: MOCK_METHOD(Task, Restart, (void*), (override)); - MOCK_METHOD(void, Resume, (), ()); + MOCK_METHOD(void, Resume, (int), ()); MOCK_METHOD(bool, IsReturned, (), (const)); MOCK_METHOD(ValueWrapper, GetRetVal, (), (const, override)); MOCK_METHOD(std::string_view, GetName, (), (const, override)); MOCK_METHOD(std::vector, GetStrArgs, (), (const, override)); MOCK_METHOD(void*, GetArgs, (), (const, override)); MOCK_METHOD(bool, IsSuspended, (), (const)); - MOCK_METHOD(void, Terminate, (), ()); - MOCK_METHOD(void, TryTerminate, (), ()); + MOCK_METHOD(void, Terminate, (int), ()); + MOCK_METHOD(void, TryTerminate, (int), ()); virtual ~MockTask() { is_returned = true; } }; diff --git a/verifying/CMakeLists.txt b/verifying/CMakeLists.txt index 4d0f1ed9..ca6d48f7 100644 --- a/verifying/CMakeLists.txt +++ b/verifying/CMakeLists.txt @@ -29,8 +29,8 @@ function(verify_target_without_plugin target) COMMAND ${CLANG_TOOL_EXECUTABLE} -p=${CMAKE_BINARY_DIR}/compile_commands.json # passing compilation database, make sure CMAKE_EXPORT_COMPILE_COMMANDS flag is set --temp-prefix ${CLANG_TOOL_TMP_PREFIX} - --replace-names ::std::mutex,::std::shared_mutex,::std::condition_variable - --insert-names ltest::mutex,ltest::shared_mutex,ltest::condition_variable + --replace-names ::std::mutex,::std::shared_mutex,::std::condition_variable,::std::atomic + --insert-names ltest::mutex,ltest::shared_mutex,ltest::condition_variable,ltest::latomic ${CMAKE_CURRENT_SOURCE_DIR}/${source_name} DEPENDS ${CLANG_TOOL} COMMENT "Running Clang Pass Tool on ${source_name}" diff --git a/verifying/blocking/verifiers/buffered_channel_verifier.h b/verifying/blocking/verifiers/buffered_channel_verifier.h index 0bb894c5..ec6c5b50 100644 --- a/verifying/blocking/verifiers/buffered_channel_verifier.h +++ b/verifying/blocking/verifiers/buffered_channel_verifier.h @@ -46,8 +46,14 @@ struct BufferedChannelVerifier { return std::nullopt; } - size_t senders_; - size_t receivers_; - size_t size_; + void Reset() { + senders_ = 0; + receivers_ = 0; + size_ = 0; + } + + size_t senders_ = 0; + size_t receivers_ = 0; + size_t size_ = 0; }; } // namespace spec \ No newline at end of file diff --git a/verifying/blocking/verifiers/mutex_verifier.h b/verifying/blocking/verifiers/mutex_verifier.h index 30f12cc6..949caf06 100644 --- a/verifying/blocking/verifiers/mutex_verifier.h +++ b/verifying/blocking/verifiers/mutex_verifier.h @@ -36,6 +36,8 @@ struct MutexVerifier { return std::nullopt; } + void Reset() { status.clear(); } + // NOTE(kmitkin): we cannot just store number of thread that holds mutex // because Lock can finish before Unlock! std::unordered_map status; diff --git a/verifying/blocking/verifiers/shared_mutex_verifier.h b/verifying/blocking/verifiers/shared_mutex_verifier.h index 628fdbf0..928d48af 100644 --- a/verifying/blocking/verifiers/shared_mutex_verifier.h +++ b/verifying/blocking/verifiers/shared_mutex_verifier.h @@ -54,6 +54,8 @@ struct SharedMutexVerifier { return std::nullopt; } + void Reset() { status.clear(); } + std::unordered_map status; }; } // namespace spec diff --git a/verifying/specs/unique_args.h b/verifying/specs/unique_args.h index afa13996..e32e822d 100644 --- a/verifying/specs/unique_args.h +++ b/verifying/specs/unique_args.h @@ -51,6 +51,7 @@ struct UniqueArgsOptionsOverride { .tasks = limit, .switches = 100000000, .rounds = 10000, + .seed = -1, .depth = 1, .forbid_all_same = false, .verbose = false, diff --git a/verifying/targets/CMakeLists.txt b/verifying/targets/CMakeLists.txt index fa5e8a79..ec1f9ee5 100644 --- a/verifying/targets/CMakeLists.txt +++ b/verifying/targets/CMakeLists.txt @@ -6,6 +6,7 @@ set (SOURCE_TARGET_LIST nonlinear_set.cpp nonlinear_ms_queue.cpp nonlinear_treiber_stack.cpp + nonlinear_queue_with_custom_rounds.cpp ) set (SOURCE_TARGET_WITHOUT_PLUGIN_LIST @@ -75,10 +76,18 @@ add_integration_test("nonlinear_queue_pct" "verify" TRUE nonlinear_queue --rounds 10000 --strategy pct ) +# TODO: test should fail, but for some reason after commit '15f64f0 - feat: add more tests for blocking data structures&refactor blocking support' +# it takes too much time to find bugs, I wasn't able to wait until custom round detects a bug +add_integration_test("nonlinear_queue_with_custom_rounds_pct" "verify" FALSE + nonlinear_queue_with_custom_rounds --rounds 0 --exploration_runs 1000 --strategy pct +) + add_integration_test("unique_args" "verify" FALSE unique_args ) add_integration_test("counique_args" "verify" FALSE counique_args -) \ No newline at end of file +) + +add_subdirectory(wmm_litmus) \ No newline at end of file diff --git a/verifying/targets/nonlinear_queue_with_custom_rounds.cpp b/verifying/targets/nonlinear_queue_with_custom_rounds.cpp new file mode 100644 index 00000000..c3dc5862 --- /dev/null +++ b/verifying/targets/nonlinear_queue_with_custom_rounds.cpp @@ -0,0 +1,45 @@ +#include +#include + +#include "../specs/queue.h" +#include "runtime/include/verifying.h" +#include "runtime/include/verifying_macro.h" + +const int N = 100; + +// Implementation copy-pasted from `nonlinear_queue.cpp` +struct Queue { + Queue() {} + + non_atomic void Push(int v) { + int pos = head.fetch_add(1); + a[pos] = v; + } + + non_atomic int Pop() { + int last = head.load(); + for (int i = 0; i < last; ++i) { + int e = a[i].load(); + if (e != 0 && a[i].compare_exchange_strong(e, 0)) { + return e; + } + } + return 0; + } + + std::atomic a[N]; + std::atomic head{}; +}; + +// Specify target structure and it's sequential specification. +using spec_t = + ltest::Spec, spec::QueueHash, spec::QueueEquals>; + +LTEST_ENTRYPOINT(spec_t, + {{ + method_invocation(std::tuple(1), void, Queue, Push, int), + method_invocation(std::tuple(2), void, Queue, Push, int), + method_invocation(std::tuple(), int, Queue, Pop), + }, + {method_invocation(std::tuple(3), void, Queue, Push, int), + method_invocation(std::tuple(), int, Queue, Pop)}}); \ No newline at end of file diff --git a/verifying/targets/wmm_litmus/CMakeLists.txt b/verifying/targets/wmm_litmus/CMakeLists.txt new file mode 100644 index 00000000..ea23a89c --- /dev/null +++ b/verifying/targets/wmm_litmus/CMakeLists.txt @@ -0,0 +1,29 @@ +set(WMM_TARGET_LIST) + +function(add_wmm_litmus name expect_fail) + set(source_name "${name}.cpp") + verify_target(${name}) + list(APPEND WMM_TARGET_LIST ${name}) + set(WMM_TARGET_LIST ${WMM_TARGET_LIST} PARENT_SCOPE) + add_integration_test(${name} "litmus" ${expect_fail} + ${name} --wmm_enabled --rounds 0 --exploration_runs 10000 --strategy random ${ARGN} + ) + set_tests_properties("litmus_${name}" + PROPERTIES LABELS "litmus;wmm" + ) +endfunction() + +add_wmm_litmus(litmus1_sc FALSE) +add_wmm_litmus(litmus2_relaxed_sc FALSE) +add_wmm_litmus(litmus3_sc FALSE) +add_wmm_litmus(litmus4_relaxed TRUE) +add_wmm_litmus(litmus5_ra FALSE) +# add_wmm_litmus(litmus6_mixed TRUE) # TODO: requires a fix, see comment in the file +add_wmm_litmus(litmus7_rmw_sc FALSE) +add_wmm_litmus(litmus8_rmw_sc FALSE) +add_wmm_litmus(litmus9_weak_rmw TRUE) + +add_custom_target(verify-litmus + DEPENDS + ${WMM_TARGET_LIST} +) diff --git a/verifying/targets/wmm_litmus/litmus1_sc.cpp b/verifying/targets/wmm_litmus/litmus1_sc.cpp new file mode 100644 index 00000000..e0e87b5f --- /dev/null +++ b/verifying/targets/wmm_litmus/litmus1_sc.cpp @@ -0,0 +1,44 @@ +#include +#include +#include +#include + +#include "verifying/targets/wmm_litmus/litmus_common.h" + +struct Exp1Test { + std::atomic x{0}, y{0}; + int r1 = -1, r2 = -1; + + non_atomic void A() { + r1 = y.load(std::memory_order_seq_cst); + x.store(1, std::memory_order_seq_cst); + rassert(!(r1 == 1 && r2 == 1)); + } + + non_atomic void B() { + r2 = x.load(std::memory_order_seq_cst); + y.store(1, std::memory_order_seq_cst); + rassert(!(r1 == 1 && r2 == 1)); + } +}; + +struct Exp1Spec { + using method_t = std::function; + static auto GetMethods() { + method_t func = [](Exp1Spec *, void *) -> ValueWrapper { return void_v; }; + return std::map{ + {"A", func}, + {"B", func}, + }; + } +}; + +using spec_t = ltest::Spec; + +LTEST_ENTRYPOINT(spec_t, + {{ + method_invocation(std::tuple(), void, Exp1Test, A), + }, + { + method_invocation(std::tuple(), void, Exp1Test, B), + }}); diff --git a/verifying/targets/wmm_litmus/litmus2_relaxed_sc.cpp b/verifying/targets/wmm_litmus/litmus2_relaxed_sc.cpp new file mode 100644 index 00000000..f43a409a --- /dev/null +++ b/verifying/targets/wmm_litmus/litmus2_relaxed_sc.cpp @@ -0,0 +1,44 @@ +#include +#include +#include +#include + +#include "verifying/targets/wmm_litmus/litmus_common.h" + +struct Exp2Test { + std::atomic x{0}, y{0}; + int r1 = -1; + + non_atomic void A() { + y.store(1, std::memory_order_relaxed); + x.store(2, std::memory_order_seq_cst); + } + + non_atomic void B() { + if (x.load(std::memory_order_seq_cst) == 2) { + r1 = y.load(std::memory_order_relaxed); + rassert(r1 == 1); + } + } +}; + +struct Exp2Spec { + using method_t = std::function; + static auto GetMethods() { + method_t func = [](Exp2Spec *, void *) -> ValueWrapper { return void_v; }; + return std::map{ + {"A", func}, + {"B", func}, + }; + } +}; + +using spec_t = ltest::Spec; + +LTEST_ENTRYPOINT(spec_t, + {{ + method_invocation(std::tuple(), void, Exp2Test, A), + }, + { + method_invocation(std::tuple(), void, Exp2Test, B), + }}); diff --git a/verifying/targets/wmm_litmus/litmus3_sc.cpp b/verifying/targets/wmm_litmus/litmus3_sc.cpp new file mode 100644 index 00000000..48415e85 --- /dev/null +++ b/verifying/targets/wmm_litmus/litmus3_sc.cpp @@ -0,0 +1,53 @@ +#include +#include +#include +#include + +#include "verifying/targets/wmm_litmus/litmus_common.h" + +struct Exp3Test { + std::atomic x{0}, y{0}; + + non_atomic void A() { + y.store(20, std::memory_order_seq_cst); + x.store(10, std::memory_order_seq_cst); + } + + non_atomic void B() { + if (x.load(std::memory_order_seq_cst) == 10) { + rassert(y.load(std::memory_order_seq_cst) == 20); + y.store(10, std::memory_order_seq_cst); + } + } + + non_atomic void C() { + if (y.load(std::memory_order_seq_cst) == 10) { + rassert(x.load(std::memory_order_seq_cst) == 10); + } + } +}; + +struct Exp3Spec { + using method_t = std::function; + static auto GetMethods() { + method_t func = [](Exp3Spec *, void *) -> ValueWrapper { return void_v; }; + return std::map{ + {"A", func}, + {"B", func}, + {"C", func}, + }; + } +}; + +using spec_t = ltest::Spec; + +LTEST_ENTRYPOINT(spec_t, + {{ + method_invocation(std::tuple(), void, Exp3Test, A), + }, + { + method_invocation(std::tuple(), void, Exp3Test, B), + }, + { + method_invocation(std::tuple(), void, Exp3Test, C), + }}); diff --git a/verifying/targets/wmm_litmus/litmus4_relaxed.cpp b/verifying/targets/wmm_litmus/litmus4_relaxed.cpp new file mode 100644 index 00000000..f53401d7 --- /dev/null +++ b/verifying/targets/wmm_litmus/litmus4_relaxed.cpp @@ -0,0 +1,53 @@ +#include +#include +#include + +#include "verifying/targets/wmm_litmus/litmus_common.h" + +// The test might fail and it is expected +struct Exp4Test { + std::atomic x{0}, y{0}; + + non_atomic void A() { + y.store(20, std::memory_order_relaxed); + x.store(10, std::memory_order_relaxed); + } + + non_atomic void B() { + if (x.load(std::memory_order_relaxed) == 10) { + rassert(y.load(std::memory_order_relaxed) == 20); // could fail + y.store(10, std::memory_order_relaxed); + } + } + + non_atomic void C() { + if (y.load(std::memory_order_relaxed) == 10) { + rassert(x.load(std::memory_order_relaxed) == 10); // could fail + } + } +}; + +struct Exp4Spec { + using method_t = std::function; + static auto GetMethods() { + method_t func = [](Exp4Spec *, void *) -> ValueWrapper { return void_v; }; + return std::map{ + {"A", func}, + {"B", func}, + {"C", func}, + }; + } +}; + +using spec_t = ltest::Spec; + +LTEST_ENTRYPOINT(spec_t, + {{ + method_invocation(std::tuple(), void, Exp4Test, A), + }, + { + method_invocation(std::tuple(), void, Exp4Test, B), + }, + { + method_invocation(std::tuple(), void, Exp4Test, C), + }}); diff --git a/verifying/targets/wmm_litmus/litmus5_ra.cpp b/verifying/targets/wmm_litmus/litmus5_ra.cpp new file mode 100644 index 00000000..1c997fd8 --- /dev/null +++ b/verifying/targets/wmm_litmus/litmus5_ra.cpp @@ -0,0 +1,53 @@ +#include +#include +#include +#include + +#include "verifying/targets/wmm_litmus/litmus_common.h" + +struct Exp5Test { + std::atomic x{0}, y{0}; + + non_atomic void A() { + y.store(20, std::memory_order_release); + x.store(10, std::memory_order_release); + } + + non_atomic void B() { + if (x.load(std::memory_order_acquire) == 10) { + rassert(y.load(std::memory_order_acquire) == 20); + y.store(10, std::memory_order_release); + } + } + + non_atomic void C() { + if (y.load(std::memory_order_acquire) == 10) { + rassert(x.load(std::memory_order_acquire) == 10); + } + } +}; + +struct Exp5Spec { + using method_t = std::function; + static auto GetMethods() { + method_t func = [](Exp5Spec *, void *) -> ValueWrapper { return void_v; }; + return std::map{ + {"A", func}, + {"B", func}, + {"C", func}, + }; + } +}; + +using spec_t = ltest::Spec; + +LTEST_ENTRYPOINT(spec_t, + {{ + method_invocation(std::tuple(), void, Exp5Test, A), + }, + { + method_invocation(std::tuple(), void, Exp5Test, B), + }, + { + method_invocation(std::tuple(), void, Exp5Test, C), + }}); diff --git a/verifying/targets/wmm_litmus/litmus6_mixed.cpp b/verifying/targets/wmm_litmus/litmus6_mixed.cpp new file mode 100644 index 00000000..af43a6c6 --- /dev/null +++ b/verifying/targets/wmm_litmus/litmus6_mixed.cpp @@ -0,0 +1,57 @@ +#include +#include +#include +#include + +#include "verifying/targets/wmm_litmus/litmus_common.h" + +// Example 6 (TODO: fix mixed memory order accesses, see +// https://gcc.gnu.org/wiki/Atomic/GCCMM/AtomicSync) Note: Requires further +// investigation, because it seems that sc support is not sound right now. +// Fix it. +struct Exp6Test { + std::atomic x{0}, y{0}; + + non_atomic void A() { + y.store(20, std::memory_order_relaxed); + x.store(10, std::memory_order_seq_cst); + } + + non_atomic void B() { + if (x.load(std::memory_order_relaxed) == 10) { + rassert(y.load(std::memory_order_seq_cst) == 20); + y.store(10, std::memory_order_relaxed); + } + } + + non_atomic void C() { + if (y.load(std::memory_order_acquire) == 10) { + rassert(x.load(std::memory_order_acquire) == 10); + } + } +}; + +struct Exp6Spec { + using method_t = std::function; + static auto GetMethods() { + method_t func = [](Exp6Spec *, void *) -> ValueWrapper { return void_v; }; + return std::map{ + {"A", func}, + {"B", func}, + {"C", func}, + }; + } +}; + +using spec_t = ltest::Spec; + +LTEST_ENTRYPOINT(spec_t, + {{ + method_invocation(std::tuple(), void, Exp6Test, A), + }, + { + method_invocation(std::tuple(), void, Exp6Test, B), + }, + { + method_invocation(std::tuple(), void, Exp6Test, C), + }}); diff --git a/verifying/targets/wmm_litmus/litmus7_rmw_sc.cpp b/verifying/targets/wmm_litmus/litmus7_rmw_sc.cpp new file mode 100644 index 00000000..10b1ae3e --- /dev/null +++ b/verifying/targets/wmm_litmus/litmus7_rmw_sc.cpp @@ -0,0 +1,46 @@ +#include +#include +#include +#include + +#include "verifying/targets/wmm_litmus/litmus_common.h" + +struct Exp7Test { + std::atomic x{0}; + + non_atomic void A() { + int expected = 0; + do { + expected = x.load(std::memory_order_seq_cst); + } while (!x.compare_exchange_weak(expected, expected + 1, + std::memory_order_seq_cst)); + rassert(expected == 0); + rassert(x.load(std::memory_order_seq_cst) == 1); + } + + non_atomic void B() { + int r = x.load(std::memory_order_seq_cst); + rassert(r >= 0 && r <= 1); + } +}; + +struct Exp7Spec { + using method_t = std::function; + static auto GetMethods() { + method_t func = [](Exp7Spec *, void *) -> ValueWrapper { return void_v; }; + return std::map{ + {"A", func}, + {"B", func}, + }; + } +}; + +using spec_t = ltest::Spec; + +LTEST_ENTRYPOINT(spec_t, + {{ + method_invocation(std::tuple(), void, Exp7Test, A), + }, + { + method_invocation(std::tuple(), void, Exp7Test, B), + }}); diff --git a/verifying/targets/wmm_litmus/litmus8_rmw_sc.cpp b/verifying/targets/wmm_litmus/litmus8_rmw_sc.cpp new file mode 100644 index 00000000..26680e33 --- /dev/null +++ b/verifying/targets/wmm_litmus/litmus8_rmw_sc.cpp @@ -0,0 +1,48 @@ +#include +#include +#include +#include + +#include "verifying/targets/wmm_litmus/litmus_common.h" + +struct Exp8Test { + std::atomic x{0}; + + non_atomic void A() { + int expected; + do { + expected = x.load(std::memory_order_seq_cst); + } while (!x.compare_exchange_weak(expected, expected + 1, + std::memory_order_seq_cst)); + int r = x.load(std::memory_order_seq_cst); + rassert(expected == 0 || expected == 1); + rassert(r >= 1 && r <= 2); + } + + non_atomic void B() { + x.store(1, std::memory_order_seq_cst); + int r = x.load(std::memory_order_seq_cst); + rassert(r >= 1 && r <= 2); + } +}; + +struct Exp8Spec { + using method_t = std::function; + static auto GetMethods() { + method_t func = [](Exp8Spec *, void *) -> ValueWrapper { return void_v; }; + return std::map{ + {"A", func}, + {"B", func}, + }; + } +}; + +using spec_t = ltest::Spec; + +LTEST_ENTRYPOINT(spec_t, + {{ + method_invocation(std::tuple(), void, Exp8Test, A), + }, + { + method_invocation(std::tuple(), void, Exp8Test, B), + }}); diff --git a/verifying/targets/wmm_litmus/litmus9_weak_rmw.cpp b/verifying/targets/wmm_litmus/litmus9_weak_rmw.cpp new file mode 100644 index 00000000..5a19613b --- /dev/null +++ b/verifying/targets/wmm_litmus/litmus9_weak_rmw.cpp @@ -0,0 +1,47 @@ +#include +#include +#include +#include + +#include "verifying/targets/wmm_litmus/litmus_common.h" + +// The test might fail and it is expected +struct Exp9Test { + std::atomic x{0}, y{0}; + + non_atomic void A() { + int expected = 0; + while (!x.compare_exchange_strong(expected, 1, std::memory_order_relaxed, + std::memory_order_relaxed)) { + expected = 0; + } + y.store(1, std::memory_order_relaxed); + } + + non_atomic void B() { + int a = y.load(std::memory_order_relaxed); + int b = x.load(std::memory_order_relaxed); + rassert(!(a == 1 && b == 0)); // could fail + } +}; + +struct Exp9Spec { + using method_t = std::function; + static auto GetMethods() { + method_t func = [](Exp9Spec *, void *) -> ValueWrapper { return void_v; }; + return std::map{ + {"A", func}, + {"B", func}, + }; + } +}; + +using spec_t = ltest::Spec; + +LTEST_ENTRYPOINT(spec_t, + {{ + method_invocation(std::tuple(), void, Exp9Test, A), + }, + { + method_invocation(std::tuple(), void, Exp9Test, B), + }}); diff --git a/verifying/targets/wmm_litmus/litmus_common.h b/verifying/targets/wmm_litmus/litmus_common.h new file mode 100644 index 00000000..677b6540 --- /dev/null +++ b/verifying/targets/wmm_litmus/litmus_common.h @@ -0,0 +1,18 @@ +#pragma once + +#include "runtime/include/verifying.h" +#include "runtime/include/verifying_macro.h" + +struct LinearWmmHash { + template + size_t operator()(const T &) const { + return 1; + } +}; + +struct LinearWmmEquals { + template + bool operator()(const T &, const T &) const { + return true; + } +};